index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/EnumUtils.java
package water.util; import water.H2O; import water.nbhm.NonBlockingHashMap; import java.util.Arrays; import java.util.Optional; /** * Utilities to deal with Java enums. */ public class EnumUtils { /** * Memoizer for {@link #valueOf(Class, String)} */ private static NonBlockingHashMap<Class<? extends Enum>, NonBlockingHashMap<String, Enum>> enumMappings = new NonBlockingHashMap<>(150); /** * Return an array of Strings of all the enum levels. * <p> * Taken from http://stackoverflow.com/questions/13783295/getting-all-names-in-an-enum-as-a-string. */ public static String[] getNames(Class<? extends Enum<?>> e) { return Arrays.toString(e.getEnumConstants()).replaceAll("^.|.$", "").split(", "); } /** * This is like Enum.valueOf() only better: it matches enum constants very loosely: case-insensitive and disregarding * any non-alphanumeric characters (e.g. "_"). For example, if Enum declares constant LOG_NORMAL, then all of the * following would also match to this constant: * log_normal, logNormal, LogNormal, __LoGnOrmaL___, "LogNormal", $Log.Normal, lognormal, etc. * * @param <T> The enum type whose constant is to be returned * @param clz the {@code Class} object of the enum type from which to return a constant * @param name the name of the constant to return * @return the enum constant of the specified enum type with the specified name */ public static <T extends Enum<T>> T valueOf(Class<T> clz, String name) { NonBlockingHashMap<String, Enum> map = enumMappings.get(clz); if (map == null) { T[] enumValues = clz.getEnumConstants(); map = new NonBlockingHashMap<>(enumValues.length * 2); for (Enum item : enumValues) { String origName = item.name(); String unifName = origName.toUpperCase().replaceAll("[^0-9A-Z]", ""); if (map.containsKey(origName)) throw H2O.fail("Unexpected key " + origName + " in enum " + clz); if (map.containsKey(unifName)) throw H2O.fail("Non-unique key " + unifName + " in enum " + clz); map.put(origName, item); map.put(unifName, item); } // Put the map into {enumMappings} no sooner than it is fully constructed. If there are multiple threads // accessing the same enum mapping, then it is possible they'll begin constructing the map simultaneously and // then overwrite each other's results. This is harmless. // However it would be an error to put the {map} into {enumMappings} before it is filled, because then the // other thread would think that the map is complete, and may not find some of the legitimate keys. enumMappings.put(clz, map); } Enum value = map.get(name); if (value == null && name != null) { String unifName = name.toUpperCase().replaceAll("[^0-9A-Z]", ""); value = map.get(unifName); // Save the mapping name -> value, so that subsequent requests with the same name will be faster. if (value != null) map.put(name, value); } if (value == null) throw new IllegalArgumentException("No enum constant " + clz.getCanonicalName() + "." + name); if (name == null) throw new NullPointerException("Name is null"); // noinspection unchecked return (T) value; } /** * @param enumeration Enumeration to search in * @param value String to search fore * @param <T> Class of the Enumeration to search in * @return */ public static <T extends Enum<?>> Optional<T> valueOfIgnoreCase(Class<T> enumeration, String value) { final T[] enumConstants = enumeration.getEnumConstants(); for (T field : enumConstants) { if (field.name().compareToIgnoreCase(value) == 0) { return Optional.of(field); } } return Optional.empty(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/ExportFileFormat.java
package water.util; public enum ExportFileFormat { csv, parquet }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/FileIntegrityChecker.java
package water.util; import java.io.File; import java.util.ArrayList; import water.*; import water.fvec.*; import water.persist.PersistNFS; public class FileIntegrityChecker extends MRTask<FileIntegrityChecker> { final String[] _files; // File names found locally final long [] _sizes; // File sizes found locally int[] _ok; // OUTPUT: files which are globally compatible @Override public void setupLocal() { _ok = new int[_files.length]; for( int i = 0; i < _files.length; ++i ) { File f = new File(_files[i]); if( f.exists() && (f.length()==_sizes[i]) ) _ok[i] = 1; } } @Override public void reduce( FileIntegrityChecker o ) { ArrayUtils.add(_ok,o._ok); } private void addFolder(File path, ArrayList<File> filesInProgress ) { if( !path.canRead() ) return; File[] files = path.listFiles(); if( files != null ) { //path is a dir, and these are the files for( File f : files ) { if( !f.canRead() ) continue; // Ignore unreadable files if( f.length() == 0 ) continue; // Ignore 0-byte files if( f.isHidden() && !path.isHidden() ) continue; // Do not dive into hidden dirs unless asked if (f.isDirectory()) addFolder(f,filesInProgress); else filesInProgress.add(f); } } else if (path.length() > 0) { //path is a non-zero byte file filesInProgress.add(path); } } public static FileIntegrityChecker check(File r) { return new FileIntegrityChecker(r).doAllNodes(); } public FileIntegrityChecker(File root) { super(H2O.GUI_PRIORITY); ArrayList<File> filesInProgress = new ArrayList<>(); addFolder(root,filesInProgress); _files = new String[filesInProgress.size()]; _sizes = new long[filesInProgress.size()]; for( int i = 0; i < _files.length; ++i ) { File f = filesInProgress.get(i); _files[i] = f.getAbsolutePath(); _sizes[i] = f.length(); } } public int size() { return _files.length; } // Sync this directory with H2O. Record all files that appear to be visible // to the entire cloud, and give their Keys. List also all files which appear // on this H2O instance but are not consistent around the cluster, and Keys // which match the directory name but are not on disk. public Key syncDirectory(ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { Futures fs = new Futures(); Key k = null; // Find all Keys which match ... for( int i = 0; i < _files.length; ++i ) { if( _ok[i] < H2O.CLOUD.size() ) { if( fails != null ) fails.add(_files[i]); } else { File f = new File(_files[i]); // Do not call getCanonicalFile - which resolves symlinks - breaks test harness // try { f = f.getCanonicalFile(); _files[i] = f.getPath(); } // Attempt to canonicalize // catch( IOException ignore ) {} k = PersistNFS.decodeFile(f); if( files != null ) files.add(_files[i]); if( keys != null ) keys .add(k.toString()); if( DKV.get(k) != null ) dels.add(k.toString()); Key lockOwner = Key.make(); new Frame(k).delete_and_lock(lockOwner); // Lock before making the NFS; avoids racing ImportFiles creating same Frame NFSFileVec nfs = NFSFileVec.make(f, fs); new Frame(k,new String[]{"C1"}, new Vec[]{nfs}).update(lockOwner).unlock(lockOwner); } } fs.blockForPending(); return k; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/FileUtils.java
package water.util; import water.Key; import java.io.Closeable; import java.io.EOFException; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.util.Objects; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * File utilities. */ public class FileUtils { public static boolean makeSureDirExists(String dir) { File f = new File(dir); if (!f.exists()) { return f.mkdirs(); } else { return true; } } /** * Silently close given files. * * @param closeable files to close */ public static void closeSilently(Closeable...closeable) { for(Closeable c : closeable) try { if( c != null ) c.close(); } catch( IOException xe ) { } } /** * Closes given files, logging exceptions thrown during the process of closing. * * @param closeable files to close */ public static void close(Closeable...closeable) { for(Closeable c : closeable) try { if( c != null ) c.close(); } catch( IOException ex ) { Log.err(ex); } } public static void copyStream(InputStream is, OutputStream os, final int buffer_size) { try { byte[] bytes=new byte[buffer_size]; while( is.available() > 0 ) { int count=is.read(bytes, 0, buffer_size); if(count<=0) break; os.write(bytes, 0, count); } } catch(EOFException eofe) { // no problem } catch(Exception ex) { throw new RuntimeException(ex); } } public static URI getURI(String path) { boolean windowsPath = path.matches("^[a-zA-Z]:.*$"); if (windowsPath) { return new File(path).toURI(); } else if (path.contains(":/")) { // Seems like return URI.create(path); } else { return new File(path).toURI(); } } public static boolean delete(File file) { if (file.isFile()) file.delete(); else if (file.isDirectory()) { File[] files = file.listFiles(); for (File f: files) { if (f.isDirectory()) { delete(f); } else { f.delete(); } } // Delete top-level directory return file.delete(); } return false; } /** Hunt for files in likely places. Null if cannot find. * @param fname filename * @return Found file or null */ public static File locateFile(String fname) { // Search in pre-defined path, when active, overrides all other, including direct file lookup final Optional<File> fileInPredefinedPath = findFileInPredefinedPath(fname); if (fileInPredefinedPath.isPresent()) return fileInPredefinedPath.get(); File file = new File(fname); if (file.exists()) return file; file = new File("target/" + fname); if (!file.exists()) file = new File("../" + fname); if (!file.exists()) file = new File("../../" + fname); if (!file.exists()) file = new File("../../../" + fname); if (!file.exists()) file = new File("../target/" + fname); if (!file.exists()) file = new File(StringUtils.expandPath(fname)); if (!file.exists()) file = null; return file; } /** * @param fileName File name/path to search for in pre-defined search path * @return An {@link Optional} with the file inside, if the H2O_FILES_SEARCH_PATH is defined and the file exists. * Otherwise an empty {@link Optional}. Never null. */ private static Optional<File> findFileInPredefinedPath(final String fileName) { Objects.requireNonNull(fileName); final String searchPath = System.getenv("H2O_FILES_SEARCH_PATH"); if (searchPath == null) return Optional.empty(); final StringBuilder localizedFileNameBuilder = new StringBuilder(searchPath); if (!searchPath.endsWith("/")) { localizedFileNameBuilder.append('/'); } // If the file starts with {"./", ".\", "../", "..\"} (or multiple instances of these), strip it. // Does not match relative paths from top of the filesystem tree (starting with "/"). final Pattern pattern = Pattern.compile("(\\.+[\\/]{1})+(.*)"); final Matcher matcher = pattern.matcher(fileName); if (matcher.matches()) { localizedFileNameBuilder.append(matcher.group(2)); } else if (fileName.startsWith("/")) { return Optional.empty(); // The "/" at the beginning indicates absolute path, except for Windows. Do not attempt. } else { localizedFileNameBuilder.append(fileName); } final File file = new File(localizedFileNameBuilder.toString()); if (file.exists()) { return Optional.of(file); } else { return Optional.empty(); } } private static void check(boolean cond, String msg) throws IOException { if (!cond) throw new IOException(msg); } private static void checkFileEntry(String name, File file) throws IOException { check(file != null, "File not found: " + name); check(file.exists(), "File should exist: " + name); } public static void checkFile(File file, String name) throws IOException { checkFileEntry(name, file); check(file.isFile(), "Expected a file: " + name); check(file.canRead(), "Expected a readable file: " + name); } public static File getFile(String fname) throws IOException { File f = locateFile(fname); checkFile(f, fname); return f; } public static File[] contentsOf(File folder, String name) throws IOException { checkFileEntry(name, folder); if (!folder.isDirectory()) throw new IOException("Expected a folder: " + name); File[] files = folder.listFiles(); if (files == null) throw new IOException("Cannot read folder: " + folder); return files; } /** Transform given key to a string which can be used as a file name. */ public static String keyToFileName(Key k) { return k.toString().replaceAll("[^a-zA-Z0-9_\\-\\.]", "_"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/FrameUtils.java
package water.util; import hex.Interaction; import hex.Model; import hex.ToEigenVec; import jsr166y.CountedCompleter; import org.apache.commons.io.IOUtils; import water.*; import water.fvec.*; import water.parser.BufferedString; import water.parser.ParseDataset; import water.parser.ParseSetup; import water.persist.PersistManager; import java.io.*; import java.net.URI; import java.net.URL; import java.util.*; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; public class FrameUtils { public static final int MAX_VEC_NUM_ROWS_FOR_ARRAY_EXPORT = 100_000; /** Parse given file(s) into the form of single frame represented by the given key. * * @param okey destination key for parsed frame * @param files files to parse * @return a new frame */ public static Frame parseFrame(Key okey, File ...files) throws IOException { if (files == null || files.length == 0) { throw new IllegalArgumentException("List of files is empty!"); } for (File f : files) { if (!f.exists()) throw new FileNotFoundException("File not found " + f); } // Create output key if it is not given if(okey == null) okey = Key.make(files[0].getName()); Key[] inKeys = new Key[files.length]; for (int i=0; i<files.length; i++) inKeys[i] = NFSFileVec.make(files[i])._key; return ParseDataset.parse(okey, inKeys); } /** Parse given set of URIs and produce a frame's key representing output. * * @param okey key for ouput frame. Can be null * @param uris array of URI (file://, hdfs://, s3n://, s3a://, s3://, http://, https:// ...) to parse * @return a frame which is saved into DKV under okey * @throws IOException in case of parse error. */ public static Frame parseFrame(Key okey, URI ...uris) throws IOException { return parseFrame(okey, null, uris); } public static Key eagerLoadFromHTTP(String path) throws IOException { return eagerLoadFromURL(path, new URL(path)); } public static Key<?> eagerLoadFromURL(String sourceId, URL url) throws IOException { try (InputStream is = url.openStream()) { return eagerLoadFromInputStream(sourceId, is); } } private static Key<?> eagerLoadFromInputStream(String sourceId, InputStream is) throws IOException { Key<?> destination_key = Key.make(sourceId); UploadFileVec.ReadPutStats stats = new UploadFileVec.ReadPutStats(); UploadFileVec.readPut(destination_key, is, stats); return destination_key; } public static Frame parseFrame(Key okey, ParseSetup parseSetup, URI ...uris) throws IOException { if (uris == null || uris.length == 0) { throw new IllegalArgumentException("List of uris is empty!"); } if(okey == null) okey = Key.make(uris[0].toString()); Key[] inKeys = new Key[uris.length]; for (int i = 0; i < uris.length; i++){ if ("http".equals(uris[i].getScheme()) || "https".equals(uris[i].getScheme())) { inKeys[i] = eagerLoadFromHTTP(uris[i].toString()); } else{ inKeys[i] = H2O.getPM().anyURIToKey(uris[i]); } } // Return result return parseSetup != null ? ParseDataset.parse(okey, inKeys, true, ParseSetup.guessSetup(inKeys, parseSetup)) : ParseDataset.parse(okey, inKeys); } public static Frame categoricalEncoder(Frame dataset, String[] skipCols, Model.Parameters.CategoricalEncodingScheme scheme, ToEigenVec tev, int maxLevels) { switch (scheme) { case AUTO: case Enum: case SortByResponse: //the work is done in ModelBuilder - the domain is all we need to change once, adaptTestTrain takes care of test set adaptation case OneHotInternal: return dataset; //leave as is - most algos do their own internal default handling of enums case OneHotExplicit: return new CategoricalOneHotEncoder(dataset, skipCols).exec().get(); case Binary: return new CategoricalBinaryEncoder(dataset, skipCols).exec().get(); case EnumLimited: return new CategoricalEnumLimitedEncoder(maxLevels, dataset, skipCols).exec().get(); case Eigen: return new CategoricalEigenEncoder(tev, dataset, skipCols).exec().get(); case LabelEncoder: return new CategoricalLabelEncoder(dataset, skipCols).exec().get(); default: throw H2O.unimpl(); } } public static void printTopCategoricalLevels(Frame fr, boolean warn, int topK) { String[][] domains = fr.domains(); String[] names = fr.names(); int len = domains.length; int[] levels = new int[len]; for (int i = 0; i < len; ++i) levels[i] = domains[i] != null ? domains[i].length : 0; Arrays.sort(levels); if (levels[len - 1] > 0) { int levelcutoff = levels[len - 1 - Math.min(topK, len - 1)]; int count = 0; for (int i = 0; i < len && count < topK; ++i) { if (domains[i] != null && domains[i].length >= levelcutoff) { if (warn) Log.warn("Categorical feature '" + names[i] + "' has cardinality " + domains[i].length + "."); else Log.info("Categorical feature '" + names[i] + "' has cardinality " + domains[i].length + "."); } count++; } } } public static class Vec2ArryTsk extends MRTask<Vec2ArryTsk> { final int N; public double [] res; public Vec2ArryTsk(int N){this.N = N;} @Override public void setupLocal(){ res = MemoryManager.malloc8d(N); } @Override public void map(Chunk c){ final int off = (int)c.start(); for(int i = 0; i < c._len; i = c.nextNZ(i)) res[off+i] = c.atd(i); } @Override public void reduce(Vec2ArryTsk other){ if(res != other.res) { for(int i = 0; i < res.length; ++i) { assert res[i] == 0 || other.res[i] == 0; res[i] += other.res[i]; // assuming only one nonzero } } } } public static class Vecs2ArryTsk extends MRTask<Vecs2ArryTsk> { final int dim1; // treat as row final int dim2; // treat as column public double [][] res; public Vecs2ArryTsk(int dim1, int dim2) { this.dim1 = dim1; this.dim2 = dim2; } @Override public void setupLocal(){ res = MemoryManager.malloc8d(dim1, dim2); } @Override public void map(Chunk[] c){ final int off = (int)c[0].start(); for (int colIndex = 0; colIndex < dim2; colIndex++) { for (int rowIndex = 0; rowIndex < dim1; rowIndex++) { res[off+rowIndex][colIndex] = c[colIndex].atd(rowIndex); } } } @Override public void reduce(Vecs2ArryTsk other){ ArrayUtils.add(res, other.res); } } public static double [] asDoubles(Vec v){ return new Vec2ArryTsk((int)v.length()).doAll(v).res; } public static double [][] asDoubles(Frame frame){ if (frame.numRows() > MAX_VEC_NUM_ROWS_FOR_ARRAY_EXPORT) throw new IllegalArgumentException("Frame is too big to be extracted into array"); double [][] frameArray = new double[frame.numCols()][]; for (int i = 0; i < frame.numCols(); i++) { Vec v = frame.vec(i); frameArray[i] = new Vec2ArryTsk((int)v.length()).doAll(v).res; } return frameArray; } private static class Vec2IntArryTsk extends MRTask<Vec2IntArryTsk> { final int N; public int [] res; public Vec2IntArryTsk(int N){this.N = N;} @Override public void setupLocal(){ res = MemoryManager.malloc4(N); } @Override public void map(Chunk c){ final int off = (int)c.start(); for(int i = 0; i < c._len; i = c.nextNZ(i)) res[off+i] = (int)c.at8(i); } @Override public void reduce(Vec2IntArryTsk other){ if(res != other.res) { for(int i = 0; i < res.length; ++i) { assert res[i] == 0 || other.res[i] == 0; res[i] += other.res[i]; // assuming only one nonzero } } } } public static int [] asInts(Vec v){ if(v.length() > 100000) throw new IllegalArgumentException("Vec is too big to be extracted into array"); return new Vec2IntArryTsk((int)v.length()).doAll(v).res; } /** * Compute a chunk summary (how many chunks of each type, relative size, total size) * @param fr * @return chunk summary */ public static ChunkSummary chunkSummary(Frame fr) { return new ChunkSummary().doAll(fr); } /** Generate given numbers of keys by suffixing key by given numbered suffix. */ public static Key[] generateNumKeys(Key mk, int num) { return generateNumKeys(mk, num, "_part"); } public static Key[] generateNumKeys(Key mk, int num, String delim) { Key[] ks = new Key[num]; String n = mk!=null ? mk.toString() : "noname"; String suffix = ""; if (n.endsWith(".hex")) { n = n.substring(0, n.length()-4); // be nice suffix = ".hex"; } for (int i=0; i<num; i++) ks[i] = Key.make(n+delim+i+suffix); return ks; } /** * Helper to insert missing values into a Frame */ public static class MissingInserter extends Iced { Job<Frame> _job; final Key<Frame> _dataset; final double _fraction; final long _seed; public MissingInserter(Key<Frame> frame, long seed, double frac){ _dataset = frame; _seed = seed; _fraction = frac; } /** * Driver for MissingInserter */ class MissingInserterDriver extends H2O.H2OCountedCompleter { transient final Frame _frame; MissingInserterDriver(Frame frame) {_frame = frame; } @Override public void compute2() { new MRTask() { @Override public void map (Chunk[]cs){ final Random rng = RandomUtils.getRNG(0); for (int c = 0; c < cs.length; c++) { for (int r = 0; r < cs[c]._len; r++) { rng.setSeed(_seed + 1234 * c ^ 1723 * (cs[c].start() + r)); if (rng.nextDouble() < _fraction) cs[c].setNA(r); } } _job.update(1); } }.doAll(_frame); tryComplete(); } } public Job<Frame> execImpl() { _job = new Job<>(_dataset, Frame.class.getName(), "MissingValueInserter"); if (DKV.get(_dataset) == null) throw new IllegalArgumentException("Invalid Frame key " + _dataset + " (Frame doesn't exist)."); if (_fraction < 0 || _fraction > 1 ) throw new IllegalArgumentException("fraction must be between 0 and 1."); final Frame frame = DKV.getGet(_dataset); MissingInserterDriver mid = new MissingInserterDriver(frame); int work = frame.vecs()[0].nChunks(); return _job.start(mid, work); } } /** * compute fraction of sparse chunks in this array. * @param chks * @return */ public static double sparseRatio(Chunk [] chks) { double cnt = 0; double reg = 1.0/chks.length; for(Chunk c :chks) if(c.isSparseNA()){ cnt += c.sparseLenNA()/(double)c.len(); } else if(c.isSparseZero()){ cnt += c.sparseLenZero()/(double)c.len(); } else cnt += 1; return cnt * reg; } public static double sparseRatio(Frame fr) { double reg = 1.0/fr.numCols(); double res = 0; for(Vec v:fr.vecs()) res += v.sparseRatio(); return res * reg; } public static class WeightedMean extends MRTask<WeightedMean> { private double _wresponse; private double _wsum; public double weightedMean() { return _wsum == 0 ? 0 : _wresponse / _wsum; } @Override public void map(Chunk response, Chunk weight, Chunk offset) { for (int i=0;i<response._len;++i) { if (response.isNA(i)) continue; double w = weight.atd(i); if (w == 0) continue; _wresponse += w*(response.atd(i)-offset.atd(i)); _wsum += w; } } @Override public void reduce(WeightedMean mrt) { _wresponse += mrt._wresponse; _wsum += mrt._wsum; } } public static class ExportTaskDriver extends H2O.H2OCountedCompleter<ExportTaskDriver> { private static int BUFFER_SIZE = 8 * 1024 * 1024; private static long DEFAULT_TARGET_PART_SIZE = 134217728L; // 128MB, default HDFS block size private static int AUTO_PARTS_MAX = 128; // maximum number of parts if automatic determination is enabled final Frame _frame; final String _path; final String _frameName; final boolean _overwrite; final Job _j; int _nParts; boolean _parallel; final CompressionFactory _compressor; final Frame.CSVStreamParams _csv_parms; public ExportTaskDriver(Frame frame, String path, String frameName, boolean overwrite, Job j, int nParts, boolean perChunk, CompressionFactory compressor, Frame.CSVStreamParams csvParms) { _frame = frame; _path = path; _frameName = frameName; _overwrite = overwrite; _j = j; _nParts = nParts; _parallel = perChunk; _compressor = compressor; _csv_parms = csvParms; } @Override public void compute2() { _frame.read_lock(_j._key); if (_parallel && _nParts == 1) { _nParts = _frame.anyVec().nChunks(); int processed = 0; final String compression = H2O.getSysProperty("export.csv.cache.compression", "none"); final CompressionFactory compressor = CompressionFactory.make(compression); final DecompressionFactory decompressor = DecompressionFactory.make(compression); final String cacheStorage = H2O.getSysProperty("export.csv.cache.storage", "memory"); final CsvChunkCache cache = "memory".equals(cacheStorage) ? new DkvCsvChunkCache() : new FileSystemCsvChunkCache(); Log.info("Using compression=`" + compressor.getName() + "` and cache=`" + cache.getName() + "` for interim partial CSV export files."); final ChunkExportTask chunkExportTask = cache.makeExportTask(_frame, _csv_parms, compressor); H2O.submitTask(new LocalMR(chunkExportTask, H2O.NUMCPUS)); try (FileOutputStream os = new FileOutputStream(_path)) { byte[] buffer = new byte[BUFFER_SIZE]; final boolean[] isChunkCompleted = new boolean[_nParts + 1]; while (processed != _nParts) { final int cid = chunkExportTask._completed.take(); isChunkCompleted[cid] = true; while (isChunkCompleted[processed]) { try (final InputStream rawInputStream = cache.getChunkCsvStream(chunkExportTask, processed); final InputStream is = decompressor.wrapInputStream(rawInputStream)) { IOUtils.copyLarge(is, os, buffer); } finally { cache.releaseCache(chunkExportTask, processed); } processed++; _j.update(1); } } } catch (IOException e) { throw new RuntimeException("File export failed", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException("File export interrupted", e); } tryComplete(); } else if (_nParts == 1) { // Single file export, the file should be created by the node that was asked to export the data // (this is for non-distributed filesystems, we want the file to go to the local filesystem of the node) final Frame.CSVStream is = new Frame.CSVStream(_frame, _csv_parms); exportCSVStream(is, _path, 0); tryComplete(); } else { // Multi-part export if (_nParts < 0) { _nParts = calculateNParts(_csv_parms); assert _nParts > 0; } final int nChunksPerPart = ((_frame.anyVec().nChunks() - 1) / _nParts) + 1; new PartExportTask(this, _frame._names, nChunksPerPart, _csv_parms).dfork(_frame); } } private interface CsvChunkCache { String getName(); ChunkExportTask makeExportTask(Frame f, Frame.CSVStreamParams csvParams, CompressionFactory compressor); InputStream getChunkCsvStream(ChunkExportTask task, int cid) throws IOException; void releaseCache(ChunkExportTask task, int cid); } private class FileSystemCsvChunkCache implements CsvChunkCache { @Override public String getName() { return "FileSystem"; } @Override public ChunkExportTask makeExportTask(Frame f, Frame.CSVStreamParams csvParams, CompressionFactory compressor) { return new ChunkExportTask(f, f._names, csvParams, compressor); } @Override public InputStream getChunkCsvStream(ChunkExportTask task, int cid) throws IOException { File chunkFile = new File(task.getChunkPath(cid)); return new FileInputStream(chunkFile); } @Override public void releaseCache(ChunkExportTask task, int cid) { File chunkFile = new File(task.getChunkPath(cid)); if (! chunkFile.delete()) { Log.warn("Temporary file " + chunkFile.getAbsoluteFile() + " couldn't be deleted."); } } } private class DkvCsvChunkCache implements CsvChunkCache { private final Key<?> _vecKey; public DkvCsvChunkCache() { _vecKey = Vec.newKey(); } @Override public String getName() { return "DKV"; } @Override public ChunkExportTask makeExportTask(Frame f, Frame.CSVStreamParams csvParams, CompressionFactory compressor) { return new InMemoryChunkExportTask(_vecKey, f, f._names, csvParams, compressor); } @Override public InputStream getChunkCsvStream(ChunkExportTask task, int cid) { Key<?> ck = Vec.chunkKey(_vecKey, cid); PersistManager pm = H2O.getPM(); return pm.open(pm.toHexPath(ck)); } @Override public void releaseCache(ChunkExportTask task, int cid) { Key<?> ck = Vec.chunkKey(_vecKey, cid); DKV.remove(ck); } } @Override public void onCompletion(CountedCompleter caller) { _frame.unlock(_j); } @Override public boolean onExceptionalCompletion(Throwable t, CountedCompleter caller) { _frame.unlock(_j); return super.onExceptionalCompletion(t, caller); } private int calculateNParts(Frame.CSVStreamParams parms) { EstimateSizeTask estSize = new EstimateSizeTask(parms).dfork(_frame).getResult(); Log.debug("Estimator result: ", estSize); // the goal is to not to create too small part files (and too many files), ideal part file size is one HDFS block int nParts = Math.max((int) (estSize._size / DEFAULT_TARGET_PART_SIZE), H2O.CLOUD.size() + 1); if (nParts > AUTO_PARTS_MAX) { Log.debug("Recommended number of part files (" + nParts + ") exceeds maximum limit " + AUTO_PARTS_MAX + ". " + "Number of part files is limited to avoid slow downs when importing back to H2O."); // @tomk nParts = AUTO_PARTS_MAX; } Log.info("For file of estimated size " + estSize + "B determined number of parts: " + _nParts); return nParts; } /** * Trivial CSV file size estimator. Uses the first line of each non-empty chunk to estimate the size of the chunk. * The total estimated size is the total of the estimated chunk sizes. */ static class EstimateSizeTask extends MRTask<EstimateSizeTask> { // IN private final Frame.CSVStreamParams _parms; // OUT int _nNonEmpty; long _size; public EstimateSizeTask(Frame.CSVStreamParams parms) { _parms = parms; } @Override public void map(Chunk[] cs) { if (cs[0]._len == 0) return; try (Frame.CSVStream is = new Frame.CSVStream(cs, null, 1, _parms)) { _nNonEmpty++; _size += (long) is.getCurrentRowSize() * cs[0]._len; } catch (IOException e) { throw new RuntimeException(e); } } @Override public void reduce(EstimateSizeTask mrt) { _nNonEmpty += mrt._nNonEmpty; _size += mrt._size; } @Override public String toString() { return "EstimateSizeTask{_nNonEmpty=" + _nNonEmpty + ", _size=" + _size + '}'; } } private long copyCSVStream(Frame.CSVStream is, OutputStream os, int firstChkIdx) throws IOException { long len = 0; byte[] bytes = new byte[BUFFER_SIZE]; int curChkIdx = firstChkIdx; for (;;) { int count = is.read(bytes, 0, BUFFER_SIZE); if (count <= 0) { break; } len += count; os.write(bytes, 0, count); int workDone = is._curChkIdx - curChkIdx; if (workDone > 0) { if (_j.stop_requested()) throw new Job.JobCancelledException(_j); _j.update(workDone); curChkIdx = is._curChkIdx; } } return len; } private void exportCSVStream(Frame.CSVStream is, String path, int firstChkIdx) { exportCSVStream(is, path, firstChkIdx, _compressor); } private void exportCSVStream(Frame.CSVStream is, String path, int firstChkIdx, CompressionFactory compressor) { OutputStream os = null; long written = -1; try { os = H2O.getPM().create(path, _overwrite); if (compressor != null) { os = compressor.wrapOutputStream(os); } written = copyCSVStream(is, os, firstChkIdx); } catch (IOException e) { throw new RuntimeException(e); } finally { if (os != null) { try { os.flush(); // Seems redundant, but seeing a short-file-read on windows sometimes os.close(); Log.info("Written " + written + " bytes of key '" + _frameName + "' to " + _path + "."); } catch (Exception e) { Log.err(e); } } try { is.close(); } catch (Exception e) { Log.err(e); } } } class PartExportTask extends MRTask<PartExportTask> { final String[] _colNames; final int _length; final Frame.CSVStreamParams _csv_parms; PartExportTask(H2O.H2OCountedCompleter<?> completer, String[] colNames, int length, Frame.CSVStreamParams csvParms) { super(completer); _colNames = colNames; _length = length; _csv_parms = csvParms; } @Override public void map(Chunk[] cs) { Chunk anyChunk = cs[0]; if (anyChunk.cidx() % _length > 0) { return; } int partIdx = anyChunk.cidx() / _length; String partPath = _path + "/part-m-" + String.valueOf(100000 + partIdx).substring(1); Frame.CSVStream is = new Frame.CSVStream(cs, _colNames, _length, _csv_parms); exportCSVStream(is, partPath, anyChunk.cidx()); } @Override protected void setupLocal() { boolean created = H2O.getPM().mkdirs(_path); if (! created) Log.warn("Path ", _path, " was not created."); } } class ChunkExportTask extends MrFun<ChunkExportTask> { private final transient AtomicInteger _chunkIndex = new AtomicInteger(-1); private final transient BlockingQueue<Integer> _completed = new LinkedBlockingQueue<>(); final Frame _fr; final String[] _colNames; final Frame.CSVStreamParams _csv_parms; final CompressionFactory _compressor; ChunkExportTask(Frame fr, String[] colNames, Frame.CSVStreamParams csvParms, CompressionFactory compressor) { _fr = fr; _colNames = colNames; _csv_parms = csvParms; _compressor = compressor; } @Override protected void map(int id) { final int nChunks = _fr.anyVec().nChunks(); int cid; while ((cid = _chunkIndex.incrementAndGet()) < nChunks) { Chunk[] cs = new Chunk[_fr.numCols()]; for (int i = 0; i < cs.length; i++) { Vec v = _fr.vec(i); cs[i] = v.chunkForChunkIdx(cid); } String chunkPath = getChunkPath(cid); Frame.CSVStream is = new Frame.CSVStream(cs, cid == 0 ? _colNames : null, 1, _csv_parms); exportCSVStream(is, chunkPath, cid, _compressor); _completed.add(cid); } } String getChunkPath(int cid) { return _path + ".chunk-" + String.valueOf(100000 + cid).substring(1); } } class InMemoryChunkExportTask extends ChunkExportTask { private final Key<?> _k; InMemoryChunkExportTask(Key<?> k, Frame fr, String[] colNames, Frame.CSVStreamParams csvParms, CompressionFactory compressor) { super(fr, colNames, csvParms, compressor); _k = k; } @Override String getChunkPath(int cid) { return H2O.getPM().toHexPath(Vec.chunkKey(_k, cid)); } } } public static class CategoricalOneHotEncoder extends Iced { final Frame _frame; Job<Frame> _job; final String[] _skipCols; public CategoricalOneHotEncoder(Frame dataset, String[] skipCols) { _frame = dataset; _skipCols = skipCols; } /** * Driver for CategoricalOneHotEncoder */ class CategoricalOneHotEncoderDriver extends H2O.H2OCountedCompleter { final Frame _frame; final Key<Frame> _destKey; final String[] _skipCols; CategoricalOneHotEncoderDriver(Frame frame, Key<Frame> destKey, String[] skipCols) { _frame = frame; _destKey = destKey; _skipCols = skipCols; } class OneHotConverter extends MRTask<OneHotConverter> { int[] _categorySizes; public OneHotConverter(int[] categorySizes) { _categorySizes = categorySizes; } @Override public void map(Chunk[] cs, NewChunk[] ncs) { int targetColOffset = 0; for (int iCol = 0; iCol < cs.length; ++iCol) { Chunk col = cs[iCol]; int numTargetColumns = _categorySizes[iCol]; for (int iRow = 0; iRow < col._len; ++iRow) { long val = col.isNA(iRow)? numTargetColumns-1 : col.at8(iRow); for (int j = 0; j < numTargetColumns; ++j) { ncs[targetColOffset + j].addNum(val==j ? 1 : 0, 0); } } targetColOffset += numTargetColumns; } } } @Override public void compute2() { Vec[] frameVecs = _frame.vecs(); int numCategoricals = 0; for (int i=0;i<frameVecs.length;++i) if (frameVecs[i].isCategorical() && ArrayUtils.find(_skipCols, _frame._names[i])==-1) numCategoricals++; Vec[] extraVecs = new Vec[_skipCols.length]; for (int i=0; i< extraVecs.length; ++i) { Vec v = _frame.vec(_skipCols[i]); //can be null if (v!=null) extraVecs[i] = v; } Frame categoricalFrame = new Frame(); Frame outputFrame = new Frame(_destKey); int[] categorySizes = new int[numCategoricals]; int numOutputColumns = 0; List<String> catnames= new ArrayList<>(); for (int i = 0, j = 0; i < frameVecs.length; ++i) { if (ArrayUtils.find(_skipCols, _frame._names[i])>=0) continue; int numCategories = frameVecs[i].cardinality(); // Returns -1 if non-categorical variable if (numCategories > 0) { categoricalFrame.add(_frame.name(i), frameVecs[i]); categorySizes[j] = numCategories + 1/* for NAs */; numOutputColumns += categorySizes[j]; for (int k=0;k<categorySizes[j]-1;++k) catnames.add(_frame.name(i) + "." + _frame.vec(i).domain()[k]); catnames.add(_frame.name(i) + ".missing(NA)"); ++j; } else { outputFrame.add(_frame.name(i), frameVecs[i].makeCopy()); } } OneHotConverter mrtask = new OneHotConverter(categorySizes); Frame binaryCols = mrtask.doAll(numOutputColumns, Vec.T_NUM, categoricalFrame).outputFrame(); binaryCols.setNames(catnames.toArray(new String[0])); outputFrame.add(binaryCols); for (int i=0;i<extraVecs.length;++i) { if (extraVecs[i]!=null) outputFrame.add(_skipCols[i], extraVecs[i].makeCopy()); } DKV.put(outputFrame); tryComplete(); } } public Job<Frame> exec() { if (_frame == null) throw new IllegalArgumentException("Frame doesn't exist."); Key<Frame> destKey = Key.makeSystem(Key.make().toString()); _job = new Job<>(destKey, Frame.class.getName(), "CategoricalOneHotEncoder"); int workAmount = _frame.lastVec().nChunks(); return _job.start(new CategoricalOneHotEncoderDriver(_frame, destKey, _skipCols), workAmount); } } public static class CategoricalLabelEncoder extends Iced { final Frame _frame; Job<Frame> _job; final String[] _skipCols; public CategoricalLabelEncoder(Frame dataset, String[] skipCols) { _frame = dataset; _skipCols = skipCols; } /** * Driver for CategoricalLabelEncoder */ class CategoricalLabelEncoderDriver extends H2O.H2OCountedCompleter { final Frame _frame; final Key<Frame> _destKey; final String[] _skipCols; CategoricalLabelEncoderDriver(Frame frame, Key<Frame> destKey, String[] skipCols) { _frame = frame; _destKey = destKey; _skipCols = skipCols; } @Override public void compute2() { Vec[] frameVecs = _frame.vecs(); Vec[] extraVecs = _skipCols==null?null:new Vec[_skipCols.length]; if (extraVecs!=null) { for (int i = 0; i < extraVecs.length; ++i) { Vec v = _frame.vec(_skipCols[i]); //can be null if (v != null) extraVecs[i] = v; } } Frame outputFrame = new Frame(_destKey); for (int i = 0, j = 0; i < frameVecs.length; ++i) { if (_skipCols!=null && ArrayUtils.find(_skipCols, _frame._names[i])>=0) continue; int numCategories = frameVecs[i].cardinality(); // Returns -1 if non-categorical variable if (numCategories > 0) { outputFrame.add(_frame.name(i), frameVecs[i].toNumericVec()); } else outputFrame.add(_frame.name(i), frameVecs[i].makeCopy()); } if (_skipCols!=null) { for (int i = 0; i < extraVecs.length; ++i) { if (extraVecs[i] != null) outputFrame.add(_skipCols[i], extraVecs[i].makeCopy()); } } DKV.put(outputFrame); tryComplete(); } } public Job<Frame> exec() { if (_frame == null) throw new IllegalArgumentException("Frame doesn't exist."); Key<Frame> destKey = Key.makeSystem(Key.make().toString()); _job = new Job<>(destKey, Frame.class.getName(), "CategoricalLabelEncoder"); int workAmount = _frame.lastVec().nChunks(); return _job.start(new CategoricalLabelEncoderDriver(_frame, destKey, _skipCols), workAmount); } } /** * Helper to convert a categorical variable into a "binary" encoding format. In this format each categorical value is * first assigned an integer value, then that integer is written in binary, and each bit column is converted into a * separate column. This is intended as an improvement to an existing one-hot transformation. * For each categorical variable we assume that the number of categories is 1 + domain cardinality, the extra * category is reserved for NAs. * See http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/ */ public static class CategoricalBinaryEncoder extends Iced { final Frame _frame; Job<Frame> _job; final String[] _skipCols; public CategoricalBinaryEncoder(Frame dataset, String[] skipCols) { _frame = dataset; _skipCols = skipCols; } /** * Driver for CategoricalBinaryEncoder */ class CategoricalBinaryEncoderDriver extends H2O.H2OCountedCompleter { final Frame _frame; final Key<Frame> _destKey; final String[] _skipCols; CategoricalBinaryEncoderDriver(Frame frame, Key<Frame> destKey, String[] skipCols) { _frame = frame; _destKey = destKey; _skipCols = skipCols; } class BinaryConverter extends MRTask<BinaryConverter> { int[] _categorySizes; public BinaryConverter(int[] categorySizes) { _categorySizes = categorySizes; } @Override public void map(Chunk[] cs, NewChunk[] ncs) { int targetColOffset = 0; for (int iCol = 0; iCol < cs.length; ++iCol) { Chunk col = cs[iCol]; int numTargetColumns = _categorySizes[iCol]; for (int iRow = 0; iRow < col._len; ++iRow) { long val = col.isNA(iRow)? 0 : 1 + col.at8(iRow); for (int j = 0; j < numTargetColumns; ++j) { ncs[targetColOffset + j].addNum(val & 1, 0); val >>>= 1; } assert val == 0 : ""; } targetColOffset += numTargetColumns; } } } @Override public void compute2() { Vec[] frameVecs = _frame.vecs(); int numCategoricals = 0; for (int i=0;i<frameVecs.length;++i) if (frameVecs[i].isCategorical() && (_skipCols==null || ArrayUtils.find(_skipCols, _frame._names[i])==-1)) numCategoricals++; Vec[] extraVecs = _skipCols==null?null:new Vec[_skipCols.length]; if (extraVecs!=null) { for (int i = 0; i < extraVecs.length; ++i) { Vec v = _frame.vec(_skipCols[i]); //can be null if (v != null) extraVecs[i] = v; } } Frame categoricalFrame = new Frame(); Frame outputFrame = new Frame(_destKey); int[] binaryCategorySizes = new int[numCategoricals]; int numOutputColumns = 0; for (int i = 0, j = 0; i < frameVecs.length; ++i) { if (_skipCols!=null && ArrayUtils.find(_skipCols, _frame._names[i])>=0) continue; int numCategories = frameVecs[i].cardinality(); // Returns -1 if non-categorical variable if (numCategories > 0) { categoricalFrame.add(_frame.name(i), frameVecs[i]); binaryCategorySizes[j] = 1 + MathUtils.log2(numCategories - 1 + 1/* for NAs */); numOutputColumns += binaryCategorySizes[j]; ++j; } else outputFrame.add(_frame.name(i), frameVecs[i].makeCopy()); } BinaryConverter mrtask = new BinaryConverter(binaryCategorySizes); Frame binaryCols = mrtask.doAll(numOutputColumns, Vec.T_NUM, categoricalFrame).outputFrame(); // change names of binaryCols so that they reflect the original names of the categories for (int i = 0, j = 0; i < binaryCategorySizes.length; j += binaryCategorySizes[i++]) { for (int k = 0; k < binaryCategorySizes[i]; ++k) { binaryCols._names[j + k] = categoricalFrame.name(i) + ":" + k; } } outputFrame.add(binaryCols); if (_skipCols!=null) { for (int i = 0; i < extraVecs.length; ++i) { if (extraVecs[i] != null) outputFrame.add(_skipCols[i], extraVecs[i].makeCopy()); } } DKV.put(outputFrame); tryComplete(); } } public Job<Frame> exec() { if (_frame == null) throw new IllegalArgumentException("Frame doesn't exist."); Key<Frame> destKey = Key.makeSystem(Key.make().toString()); _job = new Job<>(destKey, Frame.class.getName(), "CategoricalBinaryEncoder"); int workAmount = _frame.lastVec().nChunks(); return _job.start(new CategoricalBinaryEncoderDriver(_frame, destKey, _skipCols), workAmount); } } /** * Helper to convert a categorical variable into the first eigenvector of the dummy-expanded matrix. */ public static class CategoricalEnumLimitedEncoder { final Frame _frame; Job<Frame> _job; final String[] _skipCols; final int _maxLevels; public CategoricalEnumLimitedEncoder(int maxLevels, Frame dataset, String[] skipCols) { _frame = dataset; _skipCols = skipCols; _maxLevels = maxLevels; } /** * Driver for CategoricalEnumLimited */ class CategoricalEnumLimitedDriver extends H2O.H2OCountedCompleter { final Frame _frame; final Key<Frame> _destKey; final String[] _skipCols; CategoricalEnumLimitedDriver(Frame frame, Key<Frame> destKey, String[] skipCols) { _frame = frame; _destKey = destKey; _skipCols = skipCols; } @Override public void compute2() { Vec[] frameVecs = _frame.vecs(); Vec[] extraVecs = new Vec[_skipCols==null?0:_skipCols.length]; for (int i=0; i< extraVecs.length; ++i) { Vec v = _skipCols==null||_skipCols.length<=i?null:_frame.vec(_skipCols[i]); //can be null if (v!=null) extraVecs[i] = v; } // Log.info(_frame.toTwoDimTable(0, (int)_frame.numRows())); Frame outputFrame = new Frame(_destKey); for (int i = 0; i < frameVecs.length; ++i) { Vec src = frameVecs[i]; if (_skipCols!=null && ArrayUtils.find(_skipCols, _frame._names[i])>=0) continue; if (src.cardinality() > _maxLevels && !(src.isDomainTruncated(_maxLevels))) { //avoid double-encoding by checking it was not previously truncated on first encoding Key<Frame> source = Key.make(); Key<Frame> dest = Key.make(); Frame train = new Frame(source, new String[]{"enum"}, new Vec[]{src}); DKV.put(train); Log.info("Reducing the cardinality of a categorical column with " + src.cardinality() + " levels to " + _maxLevels); train = Interaction.getInteraction(train._key, train.names(), _maxLevels).execImpl(dest).get(); outputFrame.add(_frame.name(i) + ".top_" + _maxLevels + "_levels", train.anyVec().makeCopy()); train.remove(); DKV.remove(source); } else { outputFrame.add(_frame.name(i), frameVecs[i].makeCopy()); } } for (int i=0;i<extraVecs.length;++i) { if (extraVecs[i]!=null) outputFrame.add(_skipCols[i], extraVecs[i].makeCopy()); } // Log.info(outputFrame.toTwoDimTable(0, (int)outputFrame.numRows())); DKV.put(outputFrame); tryComplete(); } } public Job<Frame> exec() { if (_frame == null) throw new IllegalArgumentException("Frame doesn't exist."); Key<Frame> destKey = Key.makeSystem(Key.make().toString()); _job = new Job<>(destKey, Frame.class.getName(), "CategoricalEnumLimited"); int workAmount = _frame.lastVec().nChunks(); return _job.start(new CategoricalEnumLimitedDriver(_frame, destKey, _skipCols), workAmount); } } /** * Helper to convert a categorical variable into the first eigenvector of the dummy-expanded matrix. */ public static class CategoricalEigenEncoder { final Frame _frame; Job<Frame> _job; final String[] _skipCols; final ToEigenVec _tev; public CategoricalEigenEncoder(ToEigenVec tev, Frame dataset, String[] skipCols) { _frame = dataset; _skipCols = skipCols; _tev = tev; } /** * Driver for CategoricalEigenEncoder */ class CategoricalEigenEncoderDriver extends H2O.H2OCountedCompleter { final Frame _frame; final Key<Frame> _destKey; final String[] _skipCols; final ToEigenVec _tev; CategoricalEigenEncoderDriver(ToEigenVec tev, Frame frame, Key<Frame> destKey, String[] skipCols) { _tev = tev; _frame = frame; _destKey = destKey; _skipCols = skipCols; assert _tev!=null : "Override toEigenVec for this Algo!"; } @Override public void compute2() { Vec[] frameVecs = _frame.vecs(); Vec[] extraVecs = new Vec[_skipCols==null?0:_skipCols.length]; for (int i=0; i< extraVecs.length; ++i) { Vec v = _skipCols==null||_skipCols.length<=i?null:_frame.vec(_skipCols[i]); //can be null if (v!=null) extraVecs[i] = v; } Frame outputFrame = new Frame(_destKey); for (int i = 0; i < frameVecs.length; ++i) { if (_skipCols!=null && ArrayUtils.find(_skipCols, _frame._names[i])>=0) continue; if (frameVecs[i].isCategorical()) outputFrame.add(_frame.name(i) + ".Eigen", _tev.toEigenVec(frameVecs[i])); else outputFrame.add(_frame.name(i), frameVecs[i].makeCopy()); } for (int i=0;i<extraVecs.length;++i) { if (extraVecs[i]!=null) outputFrame.add(_skipCols[i], extraVecs[i].makeCopy()); } DKV.put(outputFrame); tryComplete(); } } public Job<Frame> exec() { if (_frame == null) throw new IllegalArgumentException("Frame doesn't exist."); Key<Frame> destKey = Key.makeSystem(Key.make().toString()); _job = new Job<>(destKey, Frame.class.getName(), "CategoricalEigenEncoder"); int workAmount = _frame.lastVec().nChunks(); return _job.start(new CategoricalEigenEncoderDriver(_tev, _frame, destKey, _skipCols), workAmount); } } static public void cleanUp(IcedHashMap<Key, String> toDelete) { if (toDelete == null) { return; } Futures fs = new Futures(); for (Key k : toDelete.keySet()) { Keyed.remove(k, fs, true); } fs.blockForPending(); toDelete.clear(); } /** * reduce the domains of all categorical columns to the actually observed subset * @param frameToModifyInPlace */ static public void shrinkDomainsToObservedSubset(Frame frameToModifyInPlace) { for (Vec v : frameToModifyInPlace.vecs()) { if (v.isCategorical()) { long[] uniques = (v.min() >= 0 && v.max() < Integer.MAX_VALUE - 4) ? new VecUtils.CollectDomainFast((int)v.max()).doAll(v).domain() : new VecUtils.CollectIntegerDomain().doAll(v).domain(); String[] newDomain = new String[uniques.length]; final int[] fromTo = new int[(int)ArrayUtils.maxValue(uniques)+1]; for (int i=0;i<newDomain.length;++i) { newDomain[i] = v.domain()[(int) uniques[i]]; fromTo[(int)uniques[i]] = i; //helper for value mapping } new MRTask() { @Override public void map(Chunk c) { for (int i=0;i<c._len;++i) { if (c.isNA(i)) continue; else c.set(i, fromTo[(int)c.at8(i)]); } } }.doAll(v); v.setDomain(newDomain); } } } public static void delete(Lockable ...frs) { for (Lockable l : frs) { if (l != null) l.delete(); } } /** * This class will calculate the weighted mean and standard deviatioin of a target column of a data frame * with the weights specified in another column. * * For the weighted mean, it is calculated as (sum from i=1 to N wi*xi)/(sum from i=1 to N wi) * For the weigthed std, it is calculated as * (sum from i=1 to N wi*(xi-weightedMean)*(xi-weightedMean))/(C *sum from i=1 to N wi) * where C = (M-1)/M and M is the number of nonzero weights. * */ public static class CalculateWeightMeanSTD extends MRTask<CalculateWeightMeanSTD> { public double _weightedEleSum; public double _weightedEleSqSum; public double _weightedCount; public double _weightedMean; public double _weightedSigma; public long _nonZeroWeightsNum; @Override public void map(Chunk pcs, Chunk wcs) { _weightedEleSum = 0; _weightedEleSqSum = 0; _weightedCount = 0; _nonZeroWeightsNum = 0; assert pcs._len==wcs._len:"Prediction and weight chunk should have the same length."; // 0 contains prediction, 1 columns weight for (int rindex = 0; rindex < pcs._len; rindex++) { double weight = wcs.atd(rindex); double pvalue = pcs.atd(rindex); if ((!Double.isNaN(pvalue)) && (Math.abs(weight) > 0) && (!Double.isNaN(pvalue))) { double v1 = pvalue * wcs.atd(rindex); _weightedEleSum += v1; _weightedEleSqSum += v1 * pvalue; _weightedCount += wcs.atd(rindex); _nonZeroWeightsNum++; } } } @Override public void reduce(CalculateWeightMeanSTD other) { _weightedEleSum += other._weightedEleSum; _weightedEleSqSum += other._weightedEleSqSum; _weightedCount += other._weightedCount; _nonZeroWeightsNum += other._nonZeroWeightsNum; } @Override public void postGlobal() { _weightedMean = _weightedCount==0?Double.NaN:_weightedEleSum/_weightedCount; // return NaN for bad input double scale = _nonZeroWeightsNum==1?_nonZeroWeightsNum*1.0:(_nonZeroWeightsNum-1.0); double scaling = _nonZeroWeightsNum*1.0/scale; _weightedSigma = _weightedCount==0?Double.NaN: Math.sqrt((_weightedEleSqSum/_weightedCount-_weightedMean*_weightedMean)*scaling); // return NaN for bad input } public double getWeightedMean() { return _weightedMean; } public double getWeightedSigma() { return _weightedSigma; } } /** * Labels frame's rows with a sequence starting with 1 & sending with total number of rows in the frame. * A vector is added to the frame given, no data are duplicated. * * @param frame Frame to label * @param labelColumnName Name of the label column */ public static void labelRows(final Frame frame, final String labelColumnName) { final Vec labelVec = Vec.makeSeq(1, frame.numRows()); frame.add(labelColumnName, labelVec); } private static String getCurrConstraintName(int id, Vec constraintsNames, BufferedString tmpStr) { String currConstraintName; if (constraintsNames.isString()) currConstraintName = constraintsNames.atStr(tmpStr, id).toString(); else if (constraintsNames.isCategorical()) currConstraintName = constraintsNames.domain()[id]; else throw new IllegalArgumentException("Illegal beta constraints file, names column expected to contain column names (strings)"); return currConstraintName; } private static void writeNewRow(String name, Frame betaConstraints, NewChunk[] nc, int id) { nc[0].addStr(name); for (int k = 1; k < nc.length; k++) { nc[k].addNum(betaConstraints.vec(k).at(id)); } } public static class ExpandCatBetaConstraints extends MRTask<ExpandCatBetaConstraints> { public final Frame _trainFrame; public final Frame _betaCS; public ExpandCatBetaConstraints(Frame betaCs, Frame train) { _trainFrame = train; _betaCS = betaCs; } @Override public void map(Chunk[] chunks, NewChunk[] newChunks) { int chkLen = chunks[0]._len; int chkCol = chunks.length; BufferedString tempStr = new BufferedString(); List<String> trainColNames = Arrays.asList(_trainFrame.names()); String[] colTypes = _trainFrame.typesStr(); for (int rowIndex=0; rowIndex<chkLen; rowIndex++) { String cName = chunks[0].atStr(tempStr, rowIndex).toString(); int trainColNumber = trainColNames.indexOf(cName); if (trainColNumber < 0 && "intercept".equals(cName)) throw new IllegalArgumentException("beta constraints cannot be applied to the intercept"); String csTypes = colTypes[trainColNumber]; if ("Enum".equals(csTypes)) { String[] domains = _trainFrame.vec(trainColNumber).domain(); int domainLen = domains.length; for (int repIndex = 0; repIndex < domainLen; repIndex++) { String newCSName = cName+'.'+domains[repIndex]; newChunks[0].addStr(newCSName); for (int colIndex = 1; colIndex < chkCol; colIndex++) { newChunks[colIndex].addNum(chunks[colIndex].atd(rowIndex)); } } } else { // copy over non-enum beta constraints newChunks[0].addStr(chunks[0].atStr(tempStr, rowIndex).toString()); for (int colIndex = 1; colIndex < chkCol; colIndex++) { newChunks[colIndex].addNum(chunks[colIndex].atd(rowIndex)); } } } } } public static Frame encodeBetaConstraints(Key key, String[] coefNames, String[] coefOriginalNames, Frame betaConstraints) { int ncols = betaConstraints.numCols(); AppendableVec[] appendableVecs = new AppendableVec[ncols]; NewChunk ncs[] = new NewChunk[ncols]; Key keys[] = Vec.VectorGroup.VG_LEN1.addVecs(ncols); for (int i = 0; i < appendableVecs.length; i++) { appendableVecs[i] = new AppendableVec(keys[i], i == 0 ? Vec.T_STR : betaConstraints.vec(i).get_type()); } Futures fs = new Futures(); int chunknum = 0; if (ncs[0] == null) { for (int i = 0; i < ncols; i++) { ncs[i] = new NewChunk(appendableVecs[i],chunknum); } } Vec constraintsNames = betaConstraints.vec(0); BufferedString tmpStr = new BufferedString(); for (int i = 0; i < constraintsNames.length(); i++) { String currConstraintName = getCurrConstraintName(i, constraintsNames, tmpStr); for (int j = 0; j < coefNames.length; j++) { if (coefNames[j].equals(currConstraintName)) { writeNewRow(currConstraintName, betaConstraints, ncs, i); } else if (!Arrays.asList(coefNames).contains(currConstraintName) && Arrays.asList(coefOriginalNames).contains(currConstraintName) && coefNames[j].startsWith(currConstraintName)) { writeNewRow(coefNames[j], betaConstraints, ncs, i); } } } if (ncs[0] != null) { for (int i = 0; i < ncols; i++) { ncs[i].close(chunknum,fs); } ncs[0] = null; } Vec[] vecs = new Vec[ncols]; final int rowLayout = appendableVecs[0].compute_rowLayout(); for (int i = 0; i < appendableVecs.length; i++) { vecs[i] = appendableVecs[i].close(rowLayout,fs); } fs.blockForPending(); Frame fr = new Frame(key, betaConstraints.names(), vecs); if (key != null) { DKV.put(fr); } return fr; } public static Chunk[] extractChunks(Frame fr, int chunkId, boolean runLocal) { final Vec v0 = fr.anyVec(); final Vec[] vecs = fr.vecs(); final Chunk[] chunks = new Chunk[vecs.length]; for (int i = 0; i < vecs.length; i++) { if (vecs[i] != null) { assert runLocal || vecs[i].chunkKey(chunkId).home() : "Chunk=" + chunkId + " v0=" + v0 + ", k=" + v0.chunkKey(chunkId) + " v[" + i + "]=" + vecs[i] + ", k=" + vecs[i].chunkKey(chunkId); chunks[i] = vecs[i].chunkForChunkIdx(chunkId); } } return chunks; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/GetLogsFromNode.java
package water.util; import water.*; import java.io.*; /** * Get zipped log directory data from a node. * The intent here is to return a binary blob with the data for saving as a file. * This is used as part of the "/3/Logs/download" REST API. */ public class GetLogsFromNode extends Iced { static final int MB = 1 << 20; static final int MAX_SIZE = 25 * MB; public GetLogsFromNode(int nodeidx, LogArchiveContainer container) { this.nodeidx = nodeidx; this.container = container; } // Input /** * Node number to get logs from (starting at 0). */ private final int nodeidx; private final LogArchiveContainer container; // Output /** * Byte array containing a archived file with the entire log directory. */ public byte[] bytes; /** * Do the work. */ public void doIt() { if (nodeidx == -1) { GetLogsTask t = new GetLogsTask(container); t.doIt(); bytes = t._bytes; } else { H2ONode node = H2O.CLOUD._memary[nodeidx]; GetLogsTask t = new GetLogsTask(container); Log.trace("GetLogsTask starting to node " + nodeidx + "..."); // Synchronous RPC call to get ticks from remote (possibly this) node. new RPC<>(node, t).call().get(); Log.trace("GetLogsTask completed to node " + nodeidx); bytes = t._bytes; } } private static class GetLogsTask extends DTask<GetLogsTask> { // IN private final LogArchiveContainer _container; // OUT private byte[] _bytes; public GetLogsTask(LogArchiveContainer container) { super(H2O.MIN_HI_PRIORITY); _container = container; _bytes = null; } public void doIt() { try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); LogArchiveWriter archiveWriter = _container.createLogArchiveWriter(baos)) { String archiveRoot = String.format("h2ologs_node%d_%s_%d", H2O.SELF.index(), H2O.SELF_ADDRESS.getHostAddress(), H2O.API_PORT); archiveDir(Log.getLogDir(), archiveRoot, baos, archiveWriter); archiveWriter.close(); // need to close before we extract the bytes _bytes = baos.toByteArray(); } catch (Exception e) { _bytes = StringUtils.toBytes(e); } } @Override public void compute2() { doIt(); tryComplete(); } //here is the code for the method private void archiveDir(String dir, String pathInArchive, ByteArrayOutputStream baos, LogArchiveWriter writer) { try { //convert paths represented as strings into File instances File sourceDir = new File(dir); File destinationDir = new File(pathInArchive); //get a listing of the directory content String[] dirList = sourceDir.list(); if (dirList == null) return; byte[] readBuffer = new byte[4096]; //loop through dirList, and archive the files for(int i=0; i<dirList.length; i++) { File sourceFile = new File(sourceDir, dirList[i]); File destinationFile = new File(destinationDir, dirList[i]); if(sourceFile.isDirectory()) { //if the File object is a directory, call this archiveDir(sourceFile.getPath(), destinationFile.getPath(), baos, writer); //loop again continue; } // In the Sparkling Water case, when running in the local-cluster configuration, // there are jar files in the log directory too. Ignore them. if (sourceFile.toString().endsWith(".jar")) { continue; } //if we reached here, the File object f was not a directory //create a FileInputStream on top of f FileInputStream fis = new FileInputStream(sourceFile.getPath()); //create a new archive entry LogArchiveWriter.ArchiveEntry anEntry = new LogArchiveWriter.ArchiveEntry(destinationFile.getPath(), sourceFile.lastModified()); //place the archive entry writer.putNextEntry(anEntry); //now add the content of the file to the archive boolean stopEarlyBecauseTooMuchData = false; int bytesIn; while((bytesIn = fis.read(readBuffer)) != -1) { writer.write(readBuffer, 0, bytesIn); if (baos.size() > MAX_SIZE) { stopEarlyBecauseTooMuchData = true; break; } } //close the Stream fis.close(); writer.closeEntry(); if (stopEarlyBecauseTooMuchData) { Log.warn("GetLogsTask stopEarlyBecauseTooMuchData"); break; } } } catch(Exception e) { Log.warn(e); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/HttpResponseStatus.java
// Taken from the netty project (Apache V2 license) package water.util; /** * The response code and its description of HTTP or its derived protocols, such as * <a href="http://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol">RTSP</a> and * <a href="http://en.wikipedia.org/wiki/Internet_Content_Adaptation_Protocol">ICAP</a>. * @apiviz.exclude */ public class HttpResponseStatus implements Comparable<HttpResponseStatus> { /** * 100 Continue */ public static final HttpResponseStatus CONTINUE = new HttpResponseStatus(100, "Continue"); /** * 101 Switching Protocols */ public static final HttpResponseStatus SWITCHING_PROTOCOLS = new HttpResponseStatus(101, "Switching Protocols"); /** * 102 Processing (WebDAV, RFC2518) */ public static final HttpResponseStatus PROCESSING = new HttpResponseStatus(102, "Processing"); /** * 200 OK */ public static final HttpResponseStatus OK = new HttpResponseStatus(200, "OK"); /** * 201 Created */ public static final HttpResponseStatus CREATED = new HttpResponseStatus(201, "Created"); /** * 202 Accepted */ public static final HttpResponseStatus ACCEPTED = new HttpResponseStatus(202, "Accepted"); /** * 203 Non-Authoritative Information (since HTTP/1.1) */ public static final HttpResponseStatus NON_AUTHORITATIVE_INFORMATION = new HttpResponseStatus(203, "Non-Authoritative Information"); /** * 204 No Content */ public static final HttpResponseStatus NO_CONTENT = new HttpResponseStatus(204, "No Content"); /** * 205 Reset Content */ public static final HttpResponseStatus RESET_CONTENT = new HttpResponseStatus(205, "Reset Content"); /** * 206 Partial Content */ public static final HttpResponseStatus PARTIAL_CONTENT = new HttpResponseStatus(206, "Partial Content"); /** * 207 Multi-Status (WebDAV, RFC2518) */ public static final HttpResponseStatus MULTI_STATUS = new HttpResponseStatus(207, "Multi-Status"); /** * 300 Multiple Choices */ public static final HttpResponseStatus MULTIPLE_CHOICES = new HttpResponseStatus(300, "Multiple Choices"); /** * 301 Moved Permanently */ public static final HttpResponseStatus MOVED_PERMANENTLY = new HttpResponseStatus(301, "Moved Permanently"); /** * 302 Found */ public static final HttpResponseStatus FOUND = new HttpResponseStatus(302, "Found"); /** * 303 See Other (since HTTP/1.1) */ public static final HttpResponseStatus SEE_OTHER = new HttpResponseStatus(303, "See Other"); /** * 304 Not Modified */ public static final HttpResponseStatus NOT_MODIFIED = new HttpResponseStatus(304, "Not Modified"); /** * 305 Use Proxy (since HTTP/1.1) */ public static final HttpResponseStatus USE_PROXY = new HttpResponseStatus(305, "Use Proxy"); /** * 307 Temporary Redirect (since HTTP/1.1) */ public static final HttpResponseStatus TEMPORARY_REDIRECT = new HttpResponseStatus(307, "Temporary Redirect"); /** * 400 Bad Request */ public static final HttpResponseStatus BAD_REQUEST = new HttpResponseStatus(400, "Bad Request"); /** * 401 Unauthorized */ public static final HttpResponseStatus UNAUTHORIZED = new HttpResponseStatus(401, "Unauthorized"); /** * 402 Payment Required */ public static final HttpResponseStatus PAYMENT_REQUIRED = new HttpResponseStatus(402, "Payment Required"); /** * 403 Forbidden */ public static final HttpResponseStatus FORBIDDEN = new HttpResponseStatus(403, "Forbidden"); /** * 404 Not Found */ public static final HttpResponseStatus NOT_FOUND = new HttpResponseStatus(404, "Not Found"); /** * 405 Method Not Allowed */ public static final HttpResponseStatus METHOD_NOT_ALLOWED = new HttpResponseStatus(405, "Method Not Allowed"); /** * 406 Not Acceptable */ public static final HttpResponseStatus NOT_ACCEPTABLE = new HttpResponseStatus(406, "Not Acceptable"); /** * 407 Proxy Authentication Required */ public static final HttpResponseStatus PROXY_AUTHENTICATION_REQUIRED = new HttpResponseStatus(407, "Proxy Authentication Required"); /** * 408 Request Timeout */ public static final HttpResponseStatus REQUEST_TIMEOUT = new HttpResponseStatus(408, "Request Timeout"); /** * 409 Conflict */ public static final HttpResponseStatus CONFLICT = new HttpResponseStatus(409, "Conflict"); /** * 410 Gone */ public static final HttpResponseStatus GONE = new HttpResponseStatus(410, "Gone"); /** * 411 Length Required */ public static final HttpResponseStatus LENGTH_REQUIRED = new HttpResponseStatus(411, "Length Required"); /** * 412 Precondition Failed */ public static final HttpResponseStatus PRECONDITION_FAILED = new HttpResponseStatus(412, "Precondition Failed"); /** * 413 Request Entity Too Large */ public static final HttpResponseStatus REQUEST_ENTITY_TOO_LARGE = new HttpResponseStatus(413, "Request Entity Too Large"); /** * 414 Request-URI Too Long */ public static final HttpResponseStatus REQUEST_URI_TOO_LONG = new HttpResponseStatus(414, "Request-URI Too Long"); /** * 415 Unsupported Media Type */ public static final HttpResponseStatus UNSUPPORTED_MEDIA_TYPE = new HttpResponseStatus(415, "Unsupported Media Type"); /** * 416 Requested Range Not Satisfiable */ public static final HttpResponseStatus REQUESTED_RANGE_NOT_SATISFIABLE = new HttpResponseStatus(416, "Requested Range Not Satisfiable"); /** * 417 Expectation Failed */ public static final HttpResponseStatus EXPECTATION_FAILED = new HttpResponseStatus(417, "Expectation Failed"); /** * 422 Unprocessable Entity (WebDAV, RFC4918) */ public static final HttpResponseStatus UNPROCESSABLE_ENTITY = new HttpResponseStatus(422, "Unprocessable Entity"); /** * 423 Locked (WebDAV, RFC4918) */ public static final HttpResponseStatus LOCKED = new HttpResponseStatus(423, "Locked"); /** * 424 Failed Dependency (WebDAV, RFC4918) */ public static final HttpResponseStatus FAILED_DEPENDENCY = new HttpResponseStatus(424, "Failed Dependency"); /** * 425 Unordered Collection (WebDAV, RFC3648) */ public static final HttpResponseStatus UNORDERED_COLLECTION = new HttpResponseStatus(425, "Unordered Collection"); /** * 426 Upgrade Required (RFC2817) */ public static final HttpResponseStatus UPGRADE_REQUIRED = new HttpResponseStatus(426, "Upgrade Required"); /** * 500 Internal Server Error */ public static final HttpResponseStatus INTERNAL_SERVER_ERROR = new HttpResponseStatus(500, "Internal Server Error"); /** * 501 Not Implemented */ public static final HttpResponseStatus NOT_IMPLEMENTED = new HttpResponseStatus(501, "Not Implemented"); /** * 502 Bad Gateway */ public static final HttpResponseStatus BAD_GATEWAY = new HttpResponseStatus(502, "Bad Gateway"); /** * 503 Service Unavailable */ public static final HttpResponseStatus SERVICE_UNAVAILABLE = new HttpResponseStatus(503, "Service Unavailable"); /** * 504 Gateway Timeout */ public static final HttpResponseStatus GATEWAY_TIMEOUT = new HttpResponseStatus(504, "Gateway Timeout"); /** * 505 HTTP Version Not Supported */ public static final HttpResponseStatus HTTP_VERSION_NOT_SUPPORTED = new HttpResponseStatus(505, "HTTP Version Not Supported"); /** * 506 Variant Also Negotiates (RFC2295) */ public static final HttpResponseStatus VARIANT_ALSO_NEGOTIATES = new HttpResponseStatus(506, "Variant Also Negotiates"); /** * 507 Insufficient Storage (WebDAV, RFC4918) */ public static final HttpResponseStatus INSUFFICIENT_STORAGE = new HttpResponseStatus(507, "Insufficient Storage"); /** * 510 Not Extended (RFC2774) */ public static final HttpResponseStatus NOT_EXTENDED = new HttpResponseStatus(510, "Not Extended"); /** * Returns the {@link HttpResponseStatus} represented by the specified code. * If the specified code is a standard HTTP status code, a cached instance * will be returned. Otherwise, a new instance will be returned. */ public static HttpResponseStatus valueOf(int code) { switch (code) { case 100: return CONTINUE; case 101: return SWITCHING_PROTOCOLS; case 102: return PROCESSING; case 200: return OK; case 201: return CREATED; case 202: return ACCEPTED; case 203: return NON_AUTHORITATIVE_INFORMATION; case 204: return NO_CONTENT; case 205: return RESET_CONTENT; case 206: return PARTIAL_CONTENT; case 207: return MULTI_STATUS; case 300: return MULTIPLE_CHOICES; case 301: return MOVED_PERMANENTLY; case 302: return FOUND; case 303: return SEE_OTHER; case 304: return NOT_MODIFIED; case 305: return USE_PROXY; case 307: return TEMPORARY_REDIRECT; case 400: return BAD_REQUEST; case 401: return UNAUTHORIZED; case 402: return PAYMENT_REQUIRED; case 403: return FORBIDDEN; case 404: return NOT_FOUND; case 405: return METHOD_NOT_ALLOWED; case 406: return NOT_ACCEPTABLE; case 407: return PROXY_AUTHENTICATION_REQUIRED; case 408: return REQUEST_TIMEOUT; case 409: return CONFLICT; case 410: return GONE; case 411: return LENGTH_REQUIRED; case 412: return PRECONDITION_FAILED; case 413: return REQUEST_ENTITY_TOO_LARGE; case 414: return REQUEST_URI_TOO_LONG; case 415: return UNSUPPORTED_MEDIA_TYPE; case 416: return REQUESTED_RANGE_NOT_SATISFIABLE; case 417: return EXPECTATION_FAILED; case 422: return UNPROCESSABLE_ENTITY; case 423: return LOCKED; case 424: return FAILED_DEPENDENCY; case 425: return UNORDERED_COLLECTION; case 426: return UPGRADE_REQUIRED; case 500: return INTERNAL_SERVER_ERROR; case 501: return NOT_IMPLEMENTED; case 502: return BAD_GATEWAY; case 503: return SERVICE_UNAVAILABLE; case 504: return GATEWAY_TIMEOUT; case 505: return HTTP_VERSION_NOT_SUPPORTED; case 506: return VARIANT_ALSO_NEGOTIATES; case 507: return INSUFFICIENT_STORAGE; case 510: return NOT_EXTENDED; } final String reasonPhrase; if (code < 100) { reasonPhrase = "Unknown Status"; } else if (code < 200) { reasonPhrase = "Informational"; } else if (code < 300) { reasonPhrase = "Successful"; } else if (code < 400) { reasonPhrase = "Redirection"; } else if (code < 500) { reasonPhrase = "Client Error"; } else if (code < 600) { reasonPhrase = "Server Error"; } else { reasonPhrase = "Unknown Status"; } return new HttpResponseStatus(code, reasonPhrase + " (" + code + ')'); } private final int code; private final String reasonPhrase; /** * Creates a new instance with the specified {@code code} and its * {@code reasonPhrase}. */ public HttpResponseStatus(int code, String reasonPhrase) { if (code < 0) { throw new IllegalArgumentException( "code: " + code + " (expected: 0+)"); } if (reasonPhrase == null) { throw new NullPointerException("reasonPhrase"); } for (int i = 0; i < reasonPhrase.length(); i ++) { char c = reasonPhrase.charAt(i); // Check prohibited characters. switch (c) { case '\n': case '\r': throw new IllegalArgumentException( "reasonPhrase contains one of the following prohibited characters: " + "\\r\\n: " + reasonPhrase); } } this.code = code; this.reasonPhrase = reasonPhrase; } /** * Returns the code of this status. */ public int getCode() { return code; } /** * Returns the reason phrase of this status. */ public String getReasonPhrase() { return reasonPhrase; } @Override public int hashCode() { return getCode(); } @Override public boolean equals(Object o) { if (!(o instanceof HttpResponseStatus)) { return false; } return getCode() == ((HttpResponseStatus) o).getCode(); } public int compareTo(HttpResponseStatus o) { return getCode() - o.getCode(); } @Override public String toString() { StringBuilder buf = new StringBuilder(reasonPhrase.length() + 5); buf.append(code); buf.append(' '); buf.append(reasonPhrase); return buf.toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedAtomicInt.java
package water.util; import water.AutoBuffer; import water.Freezable; import water.H2O; import water.TypeMap; import java.util.concurrent.atomic.AtomicInteger; /** * Created by tomas on 3/13/17. */ public final class IcedAtomicInt extends AtomicInteger implements Freezable { private static volatile int _frozeType = 0; public IcedAtomicInt(){super(0);} public IcedAtomicInt(int val){super(val);} @Override public final AutoBuffer write(AutoBuffer ab) { ab.put4(get()); return ab; } @Override public final IcedAtomicInt read(AutoBuffer ab) { set(ab.get4()); return this; } @Override public AutoBuffer writeJSON(AutoBuffer ab) { return ab.putJSON4(get()); } @Override public Freezable readJSON(AutoBuffer ab) { throw H2O.unimpl(); } @Override public int frozenType() { if(_frozeType != 0) return _frozeType; return (_frozeType = TypeMap.getIcer(this).frozenType()); } @Override public byte [] asBytes(){ return write(new AutoBuffer()).buf(); } @Override public IcedAtomicInt reloadFromBytes(byte [] ary){ return read(new AutoBuffer(ary)); } @Override public Freezable clone() { return new IcedAtomicInt(get()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedBitSet.java
package water.util; import water.AutoBuffer; import water.Iced; /** BitSet - Iced, meaning cheaply serialized over the wire. * * <p>The bits are numbered starting at _offset - so there is an implicit * offset built-in as a service; the offset can be zero. This allows for an * efficient representation if there is a known zero prefix of bits. * * <p>The number of bits (after the zero offset) is also required - meaning * this is a fixed-size (not-auto-sizing) bitset, and this bit offset is * removed from all bit-indices. * * <p>A number of bytes in the byte[] can be skipped also; this is value skips * <em>bytes</em>, not bit indices, and is intended to allow an IcedBitSet to * be embedded inside a large byte array containing unrelated data. */ public class IcedBitSet extends Iced { private byte[] _val; // Holder of the bits, perhaps also holding other unrelated data private int _byteoff; // Number of bytes skipped before starting to count bits private int _nbits; // Number of bits-in-a-row private int _bitoff; // Number of bits discarded from beginning (inclusive min) public IcedBitSet(int nbits) { this(nbits, 0); } public IcedBitSet(int nbits, int bitoff) { // For small bitsets, just use a no-offset fixed-length format if( bitoff+nbits <= 32 ) { bitoff = 0; nbits = 32; } int nbytes = bytes(nbits); fill(nbits <= 0 ? null : new byte[nbytes], 0, nbits, bitoff); } // Fill in fields, with the bytes coming from some other large backing byte // array, which also contains other unrelated bits. public void fill(byte[] v, int byteoff, int nbits, int bitoff) { if( nbits < 0 ) throw new NegativeArraySizeException("nbits < 0: " + nbits ); if( byteoff < 0 ) throw new IndexOutOfBoundsException("byteoff < 0: "+ byteoff); if( bitoff < 0 ) throw new IndexOutOfBoundsException("bitoff < 0: " + bitoff ); assert(v.length >= bytes(nbits)); assert byteoff+bytes(nbits) <= v.length; _val = v; _nbits = nbits; _bitoff = bitoff; _byteoff = byteoff; } public boolean isInRange(int idx) { idx -= _bitoff; return idx >= 0 && idx < _nbits; } public boolean contains(int idx) { idx -= _bitoff; assert (idx >= 0 && idx < _nbits): "Must have "+_bitoff+" <= idx <= " + (_bitoff+_nbits-1) + ": " + idx; return (_val[_byteoff+(idx >> 3)] & ((byte)1 << (idx & 7))) != 0; } /** * Activate the bit specified by the integer (must be from 0 ... _nbits-1) * @param idx - */ public void set(int idx) { idx -= _bitoff; assert (idx >= 0 && idx < _nbits): "Must have "+_bitoff+" <= idx <= " + (_bitoff+_nbits-1) + ": " + idx; _val[_byteoff+(idx >> 3)] |= ((byte)1 << (idx & 7)); } public void clear(int idx) { idx -= _bitoff; assert (idx >= 0 && idx < _nbits) : "Must have 0 <= idx <= " + (_nbits - 1) + ": " + idx; _val[_byteoff + (idx >> 3)] &= ~((byte)1 << (idx & 7)); } public int cardinality() { int nbits = 0; int bytes = numBytes(); for(int i = 0; i < bytes; i++) { nbits += Integer.bitCount(0xFF&_val[_byteoff + i]); } return nbits; } public int size() { return _nbits; } private static int bytes(int nbits) { return ((nbits-1) >> 3) + 1; } public int numBytes() { return bytes(_nbits); } public int max() { return _bitoff+_nbits; } // 1 larger than the largest bit allowed // Smaller compression format: just exactly 4 bytes public void compress2( AutoBuffer ab ) { assert max() <= 32; // Expect a larger format assert _byteoff == 0; // This is only set on loading a pre-existing IcedBitSet assert _val.length==4; ab.putA1(_val,4); } public void fill2( byte[] bits, AutoBuffer ab ) { fill(bits,ab.position(),32,0); ab.skip(4); // Skip inline bitset } // Larger compression format: dump down bytes into the AutoBuffer. public void compress3( AutoBuffer ab ) { assert max() > 32; // Expect a larger format assert _byteoff == 0; // This is only set on loading a pre-existing IcedBitSet assert _val.length==numBytes(); ab.put2((char)_bitoff); ab.put4(_nbits); ab.putA1(_val,_val.length); } // Reload IcedBitSet from AutoBuffer public void fill3( byte[] bits, AutoBuffer ab ) { int bitoff = ab.get2(); int nbits = ab.get4(); fill(bits,ab.position(),nbits,bitoff); ab.skip(bytes(nbits)); // Skip inline bitset } @Override public String toString() { return toString(new SB()).toString(); } public SB toString(SB sb) { sb.p("{"); if (_bitoff > 0) { sb.p("...").p(_bitoff).p(" 0-bits... "); } int limit = _nbits; final int bytes = bytes(_nbits); for(int i = 0; i < bytes; i++) { if (i > 0){ sb.p(' '); } sb.p(byteToBinaryString(_val[_byteoff + i], limit)); limit -= 8; } return sb.p("}"); } /** * Converts a byte into its binary representation (with at most 8 digits). * @param b the byte to be converted * @param limit the maximal length of returned string - it will never exceed 8 anyway * @return binary representation, lowest bit (weight 1) goes first */ static String byteToBinaryString(byte b, int limit) { final StringBuilder sb = new StringBuilder(); if (limit > 8) { limit = 8; } for (int i = 0; i < limit; i++) { sb.append((char)('0' + (b&1))); b>>=1; } return sb.toString(); } public String toStrArray() { final StringBuilder sb = new StringBuilder(); sb.append("{").append(_val[_byteoff]); final int bytes = bytes(_nbits); for(int i = 1; i < bytes; i++) { sb.append(", ").append(_val[_byteoff+i]); } sb.append("}"); return sb.toString(); } public void toJava( SB sb, String varname, int col) { sb.p("!GenModel.bitSetContains(").p(varname).p(", ").p(_nbits).p(", ").p(_bitoff).p(", data[").p(col).p("])"); } public void toJavaRangeCheck(SB sb, int col) { sb.p("GenModel.bitSetIsInRange(").p(_nbits).p(", ").p(_bitoff).p(", data[").p(col).p("])"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedDouble.java
package water.util; import water.Iced; public class IcedDouble extends Iced<IcedDouble> { public double _val; public IcedDouble() { this(Double.NaN); } public IcedDouble(double v){_val = v;} @Override public boolean equals( Object o ) { return o instanceof IcedDouble && ((IcedDouble) o)._val == _val; } @Override public int hashCode() { long h = Double.doubleToLongBits(_val); // Doubles are lousy hashes; mix up the bits some h ^= (h >>> 20) ^ (h >>> 12); h ^= (h >>> 7) ^ (h >>> 4); return (int) ((h ^ (h >> 32)) & 0x7FFFFFFF); } @Override public String toString() { return Double.toString(_val); } public IcedDouble setVal(double atd) { _val = atd; return this; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedHashMap.java
package water.util; import water.AutoBuffer; import water.Freezable; import water.H2O; import water.Iced; import water.nbhm.NonBlockingHashMap; import java.util.Map; import java.util.concurrent.ConcurrentMap; /** Iced / Freezable NonBlockingHashMap. Delegates to a NonBlockingHashMap for * all its operations. Inspired by water.parser.Categorical. */ public class IcedHashMap<K, V> extends IcedHashMapBase<K,V> implements ConcurrentMap<K, V> { transient NonBlockingHashMap<K,V> _map; public IcedHashMap() { init(); } @Override protected Map<K, V> map() { return _map; } @Override protected Map<K, V> init() { return _map = new NonBlockingHashMap<>(); } public V putIfAbsent(K key, V value) { return _map.putIfAbsent(key, value); } public boolean remove(Object key, Object value) { return _map.remove(key, value); } public boolean replace(K key, V oldValue, V newValue) { return _map.replace(key, oldValue, newValue); } public V replace(K key, V value) { return _map.replace(key, value); } public K getk(K key) { return _map.getk(key); } // Map-writing optimized for NBHM @Override protected void writeMap(AutoBuffer ab, byte mode) { // For faster K/V store walking get the NBHM raw backing array, // and walk it directly. Object[] kvs = _map.raw_array(); KeyType keyType = keyType(mode); ValueType valueType = valueType(mode); ArrayType valueArrayType = arrayType(mode); // Start the walk at slot 2, because slots 0,1 hold meta-data // In the raw backing array, Keys and Values alternate in slots // Ignore tombstones and Primes and null's for (int i=2; i < kvs.length; i+=2) { K key = (K) kvs[i]; if (!isValidKey(key, keyType)) continue; V value = (V) kvs[i+1]; if (!isValidValue(value, valueType, valueArrayType)) continue; writeKey(ab, keyType, key); writeValue(ab, valueType, valueArrayType, value); } } @Override protected boolean writeable() { return true; // we are backed by NonBlockingHashMap, serialization is thus safe even while the map is being modified // because we are working with a snapshot of NBHM } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedHashMapBase.java
package water.util; import water.AutoBuffer; import water.Freezable; import water.H2O; import water.Iced; import java.io.Serializable; import java.lang.reflect.Array; import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.Set; import java.util.stream.Stream; import static org.apache.commons.lang.ArrayUtils.toObject; import static org.apache.commons.lang.ArrayUtils.toPrimitive; /** * Iced / Freezable NonBlockingHashMap abstract base class. */ public abstract class IcedHashMapBase<K, V> extends Iced implements Map<K, V>, Cloneable, Serializable { public enum KeyType { String(String.class), Freezable(Freezable.class), ; Class _clazz; KeyType(Class clazz) { _clazz = clazz; } } public enum ValueType { String(String.class), Freezable(Freezable.class), Boolean(Boolean.class, boolean.class), Integer(Integer.class, int.class), Long(Long.class, long.class), Float(Float.class, float.class), Double(Double.class, double.class), ; Class _clazz; Class _arrayClazz; Class _primitiveArrayClazz; ValueType(Class clazz) { this(clazz, Void.class); } ValueType(Class clazz, Class primitiveClazz) { _clazz = clazz; _arrayClazz = Array.newInstance(_clazz, 0).getClass(); _primitiveArrayClazz = Array.newInstance(primitiveClazz, 0).getClass(); } } public enum ArrayType { None, Array, PrimitiveArray } private transient volatile boolean _write_lock; abstract protected Map<K,V> map(); public int size() { return map().size(); } public boolean isEmpty() { return map().isEmpty(); } public boolean containsKey(Object key) { return map().containsKey(key); } public boolean containsValue(Object value) { return map().containsValue(value); } public V get(Object key) { return (V)map().get(key); } public V put(K key, V value) { assert writeable(); return (V)map().put(key, value);} public V remove(Object key) { assert writeable(); return map().remove(key); } public void putAll(Map<? extends K, ? extends V> m) { assert writeable(); map().putAll(m); } public void clear() { assert writeable(); map().clear(); } public Set<K> keySet() { return map().keySet(); } public Collection<V> values() { return map().values(); } public Set<Entry<K, V>> entrySet() { return map().entrySet(); } @Override public boolean equals(Object o) { return map().equals(o); } @Override public int hashCode() { return map().hashCode(); } @Override public String toString() { return map().toString(); } private static final byte empty_map = -1; // for future extensions, -1 becomes valid mode if for all enums there's an entry corresponding to their max allocated bits static KeyType keyType(byte mode) { return KeyType.values()[mode & 0x3]; } // first 2 bits encode key type static ValueType valueType(byte mode) { return ValueType.values()[mode>>>2 & 0xF];} // 3rd to 6th bit encodes value type static ArrayType arrayType(byte mode) { return ArrayType.values()[mode>>>6 & 0x3]; } // 7th to 8th bit encodes array type private static byte getMode(KeyType keyType, ValueType valueType, ArrayType arrayType) { return (byte) ((arrayType.ordinal() << 6) | (valueType.ordinal() << 2) | (keyType.ordinal())); } private KeyType getKeyType(K key) { assert key != null; return Stream.of(KeyType.values()) .filter(t -> isValidKey(key, t)) .findFirst() .orElseThrow(() -> new IllegalArgumentException("keys of type "+key.getClass().getTypeName()+" are not supported")); } private ValueType getValueType(V value) { ArrayType arrayType = getArrayType(value); return Stream.of(ValueType.values()) .filter(t -> isValidValue(value, t, arrayType)) .findFirst() .orElseThrow(() -> new IllegalArgumentException("values of type "+value.getClass().getTypeName()+" are not supported")); } private ArrayType getArrayType(V value) { if (value != null && value.getClass().isArray()) { if (value.getClass().getComponentType().isPrimitive()) return ArrayType.PrimitiveArray; return ArrayType.Array; } return ArrayType.None; } boolean isValidKey(K key, KeyType keyType) { return keyType._clazz.isInstance(key); } boolean isValidValue(V value, ValueType valueType, ArrayType arrayType) { if (value == null) return false; switch (arrayType) { case None: return valueType._clazz.isInstance(value); case Array: return valueType._arrayClazz.isInstance(value); case PrimitiveArray: return valueType._primitiveArrayClazz.isInstance(value); default: return false; } } // This comment is stolen from water.parser.Categorical: // // Since this is a *concurrent* hashtable, writing it whilst its being // updated is tricky. If the table is NOT being updated, then all is written // as expected. If the table IS being updated we only promise to write the // Keys that existed at the time the table write began. If elements are // being deleted, they may be written anyways. If the Values are changing, a // random Value is written. public final AutoBuffer write_impl( AutoBuffer ab ) { _write_lock = true; try { if (map().size() == 0) return ab.put1(empty_map); Entry<K, V> entry = map().entrySet().iterator().next(); K key = entry.getKey(); V val = entry.getValue(); assert key != null && val != null; byte mode = getMode(getKeyType(key), getValueType(val), getArrayType(val)); ab.put1(mode); // Type of hashmap being serialized writeMap(ab, mode); // Do the hard work of writing the map switch (keyType(mode)) { case String: return ab.putStr(null); case Freezable: default: return ab.put(null); } } catch (Throwable t) { throw H2O.fail("Iced hash map serialization failed!" + t.toString() + ", msg = " + t.getMessage(), t); } finally { _write_lock = false; } } abstract protected Map<K,V> init(); /** * Can the map be modified? * * By default we don't make any assumptions about the implementation of the backing Map and we will write-lock * the map when we are trying to serialize it. However, if the specific implementation knows it is safe to modify * the map when it is being written, it can bypass the write-lock by overriding this method. * * @return true if map can be modified */ protected boolean writeable() { return !_write_lock; } protected void writeMap(AutoBuffer ab, byte mode) { KeyType keyType = keyType(mode); ValueType valueType = valueType(mode); ArrayType arrayType = arrayType(mode); for( Entry<K, V> e : map().entrySet() ) { K key = e.getKey(); assert key != null; V val = e.getValue(); assert val != null; writeKey(ab, keyType, key); writeValue(ab, valueType, arrayType, val); } } protected void writeKey(AutoBuffer ab, KeyType keyType, K key) { switch (keyType) { case String: ab.putStr((String)key); break; case Freezable: ab.put((Freezable)key); break; } } protected void writeValue(AutoBuffer ab, ValueType valueType, ArrayType arrayType, V value) { switch (arrayType) { case None: switch (valueType) { case String: ab.putStr((String)value); break; case Freezable: ab.put((Freezable)value); break; case Boolean: ab.put1((Boolean)value ? 1 : 0); break; case Integer: ab.put4((Integer)value); break; case Long: ab.put8((Long)value); break; case Float: ab.put4f((Float)value); break; case Double: ab.put8d((Double)value); break; } break; case Array: switch (valueType) { case String: ab.putAStr((String[])value); break; case Freezable: ab.putA((Freezable[])value); break; case Boolean: ab.putA1(bools2bytes(toPrimitive((Boolean[])value))); break; case Integer: ab.putA4(toPrimitive((Integer[])value)); break; case Long: ab.putA8(toPrimitive((Long[])value)); break; case Float: ab.putA4f(toPrimitive((Float[])value)); break; case Double: ab.putA8d(toPrimitive((Double[])value)); break; } break; case PrimitiveArray: switch (valueType) { case Boolean: ab.putA1(bools2bytes((boolean[])value)); break; case Integer: ab.putA4((int[])value); break; case Long: ab.putA8((long[])value); break; case Float: ab.putA4f((float[])value); break; case Double: ab.putA8d((double[])value); break; } break; } } @SuppressWarnings("unchecked") protected K readKey(AutoBuffer ab, KeyType keyType) { switch (keyType) { case String: return (K) ab.getStr(); case Freezable: return ab.get(); default: return null; } } @SuppressWarnings("unchecked") protected V readValue(AutoBuffer ab, ValueType valueType, ArrayType arrayType) { switch (arrayType) { case None: switch (valueType) { case String: return (V) ab.getStr(); case Freezable: return (V) ab.get(); case Boolean: return (V) Boolean.valueOf(ab.get1() == 1); case Integer: return (V) Integer.valueOf(ab.get4()); case Long: return (V) Long.valueOf(ab.get8()); case Float: return (V) Float.valueOf(ab.get4f()); case Double: return (V) Double.valueOf(ab.get8d()); default: return null; } case Array: switch (valueType) { case String: return (V) ab.getAStr(); case Freezable: return (V) ab.getA(Freezable.class); case Boolean: return (V) toObject(bytes2bools(ab.getA1())); case Integer: return (V) toObject(ab.getA4()); case Long: return (V) toObject(ab.getA8()); case Float: return (V) toObject(ab.getA4f()); case Double: return (V) toObject(ab.getA8d()); default: return null; } case PrimitiveArray: switch (valueType) { case Boolean: return (V) bytes2bools(ab.getA1()); case Integer: return (V) ab.getA4(); case Long: return (V) ab.getA8(); case Float: return (V) ab.getA4f(); case Double: return (V) ab.getA8d(); default: return null; } default: return null; } } /** * Helper for serialization - fills the mymap() from K-V pairs in the AutoBuffer object * @param ab Contains the serialized K-V pairs */ public final IcedHashMapBase read_impl(AutoBuffer ab) { try { assert map() == null || map().isEmpty(); // Fresh from serializer, no constructor has run Map<K, V> map = init(); byte mode = ab.get1(); if (mode == empty_map) return this; KeyType keyType = keyType(mode); ValueType valueType = valueType(mode); ArrayType arrayType = arrayType(mode); while (true) { K key = readKey(ab, keyType); if (key == null) break; V val = readValue(ab, valueType, arrayType); map.put(key, val); } return this; } catch(Throwable t) { if (null == t.getCause()) { throw H2O.fail("IcedHashMap deserialization failed! + " + t.toString() + ", msg = " + t.getMessage() + ", cause: null", t); } else { throw H2O.fail("IcedHashMap deserialization failed! + " + t.toString() + ", msg = " + t.getMessage() + ", cause: " + t.getCause().toString() + ", cause msg: " + t.getCause().getMessage() + ", cause stacktrace: " + java.util.Arrays.toString(t.getCause().getStackTrace())); } } } public final IcedHashMapBase readJSON_impl( AutoBuffer ab ) {throw H2O.unimpl();} public final AutoBuffer writeJSON_impl( AutoBuffer ab ) { boolean first = true; for (Entry<K, V> entry : map().entrySet()) { K key = entry.getKey(); V value = entry.getValue(); KeyType keyType = getKeyType(key); assert keyType == KeyType.String: "JSON format supports only String keys"; ValueType valueType = getValueType(value); ArrayType arrayType = getArrayType(value); if (first) { first = false; } else {ab.put1(',').put1(' '); } String name = (String) key; switch (arrayType) { case None: switch (valueType) { case String: ab.putJSONStr(name, (String) value); break; case Freezable: ab.putJSON(name, (Freezable) value); break; case Boolean: ab.putJSONStrUnquoted(name, Boolean.toString((Boolean)value)); break; case Integer: ab.putJSON4(name, (Integer) value); break; case Long: ab.putJSON8(name, (Long) value); break; case Float: ab.putJSON4f(name, (Float) value); break; case Double: ab.putJSON8d(name, (Double) value); break; } break; case Array: switch (valueType) { case String: ab.putJSONAStr(name, (String[]) value); break; case Freezable: ab.putJSONA(name, (Freezable[]) value); break; case Boolean: ab.putJSONStrUnquoted(name, Arrays.toString(toPrimitive((Boolean[]) value))); break; case Integer: ab.putJSONA4(name, toPrimitive((Integer[]) value)); break; case Long: ab.putJSONA8(name, toPrimitive((Long[]) value)); break; case Float: ab.putJSONA4f(name, toPrimitive((Float[]) value)); break; case Double: ab.putJSONA8d(name, toPrimitive((Double[]) value)); break; } break; case PrimitiveArray: switch (valueType) { case Boolean: ab.putJSONStrUnquoted(name, Arrays.toString((boolean[]) value)); break; case Integer: ab.putJSONA4(name, (int[]) value); break; case Long: ab.putJSONA8(name, (long[]) value); break; case Float: ab.putJSONA4f(name, (float[]) value); break; case Double: ab.putJSONA8d(name, (double[]) value); break; } break; } } return ab; } private static byte[] bools2bytes(boolean[] bools) { byte[] bytes = new byte[bools.length]; for (int i=0; i<bools.length; i++) bytes[i] = bools[i] ? (byte)1 : 0; return bytes; } private static boolean[] bytes2bools(byte[] bytes) { boolean[] bools = new boolean[bytes.length]; for(int i=0; i<bytes.length; i++) bools[i] = bytes[i] == 1; return bools; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedHashMapGeneric.java
package water.util; import water.AutoBuffer; import water.Freezable; import water.H2O; import water.Iced; import water.nbhm.NonBlockingHashMap; import java.io.Serializable; import java.util.Collection; import java.util.Map; import java.util.Set; /** * * Generalization of standard IcedHashMap (Iced NBHM wrapper) with relaxed restrictions on K/V pairs. * * K/V pairs do not have to follow the same mode, each K/V pair is independent and can be one of: * * String | Freezable -> Integer | String | Freezable | Freezable[]. * * Values are type checked during put operation. * */ public class IcedHashMapGeneric<K, V> extends Iced implements Map<K, V>, Cloneable, Serializable { public boolean isSupportedKeyType(Object K) { return (K instanceof Freezable[] || K instanceof Freezable || K instanceof String); } public boolean isSupportedValType(Object V) { return (V instanceof Freezable[] || V instanceof Freezable || V instanceof String || V instanceof Integer || V instanceof Boolean || V instanceof Float || V instanceof Double); } public IcedHashMapGeneric(){init();} private transient volatile boolean _write_lock; transient NonBlockingHashMap<K,V> _map; protected Map<K,V> map(){return _map;} public int size() { return map().size(); } public boolean isEmpty() { return map().isEmpty(); } public boolean containsKey(Object key) { return map().containsKey(key); } public boolean containsValue(Object value) { return map().containsValue(value); } public V get(Object key) { return (V)map().get(key); } public V put(K key, V val) { assert !_write_lock; if(!isSupportedKeyType(key)) throw new IllegalArgumentException("given key type is not supported: " + key.getClass().getName()); if(!isSupportedValType(val)) throw new IllegalArgumentException("given val type is not supported: " + val.getClass().getName()); return (V)map().put(key, val); } public V remove(Object key) { assert !_write_lock; return map().remove(key); } public void putAll(Map<? extends K, ? extends V> m) { assert !_write_lock; for(Entry<? extends K, ? extends V> e:m.entrySet()) put(e.getKey(),e.getValue()); } public void clear() { assert !_write_lock; map().clear(); } public Set<K> keySet() { return map().keySet(); } public Collection<V> values() { return map().values(); } public Set<Entry<K, V>> entrySet() { return map().entrySet(); } public boolean equals(Object o) { return map().equals(o); } public int hashCode() { return map().hashCode(); } private boolean isStringKey(int mode){ return mode % 2 == 1; } private boolean isStringVal(int mode){return mode == 1 || mode == 2;} private boolean isFreezeVal(int mode){return mode == 3 || mode == 4;} private boolean isFArrayVal(int mode){return mode == 5 || mode == 6;} private boolean isIntegrVal(int mode){return mode == 7 || mode == 8;} private boolean isBoolVal(int mode){return mode == 9 || mode == 10;} private boolean isFloatVal(int mode){return mode == 11 || mode == 12;} private boolean isDoubleVal(int mode){return mode == 13 || mode == 14;} // This comment is stolen from water.parser.Categorical: // // Since this is a *concurrent* hashtable, writing it whilst its being // updated is tricky. If the table is NOT being updated, then all is written // as expected. If the table IS being updated we only promise to write the // Keys that existed at the time the table write began. If elements are // being deleted, they may be written anyways. If the Values are changing, a // random Value is written. public final AutoBuffer write_impl( AutoBuffer ab ) { _write_lock = true; try{ for( Entry<K, V> e : map().entrySet() ) { K key = e.getKey(); assert key != null; V val = e.getValue(); assert val != null; int mode = 0; if (key instanceof String) { if (val instanceof String) { mode = 1; } else if(val instanceof Freezable){ mode = 3; } else if(val instanceof Freezable[]) { mode = 5; } else if( val instanceof Integer ){ mode = 7; } else if( val instanceof Boolean ){ mode = 9; } else if( val instanceof Float ){ mode = 11; } else if( val instanceof Double ){ mode = 13; } else { throw new IllegalArgumentException("unsupported value class " + val.getClass().getName()); } } else { if(!(key instanceof Iced)) throw new IllegalArgumentException("key must be String or Freezable, got " + key.getClass().getName()); if (val instanceof String) { mode = 2; } else if(val instanceof Freezable) { mode = 4; } else if(val instanceof Freezable[]) { mode = 6; } else if (val instanceof Integer){ mode = 8; } else if (val instanceof Boolean){ mode = 10; } else if( val instanceof Float ){ mode = 12; } else if( val instanceof Double ){ mode = 14; } else { throw new IllegalArgumentException("unsupported value class " + val.getClass().getName()); } } ab.put1(mode); // Type of hashmap being serialized // put key if (isStringKey(mode)) ab.putStr((String) key); else ab.put((Freezable) key); // put value if (isStringVal(mode)) ab.putStr((String) val); else if(isFreezeVal(mode)) ab.put((Freezable) val); else if (isFArrayVal(mode)) { ab.put4(((Freezable[]) val).length); for (Freezable v : (Freezable[]) val) ab.put(v); } else if(isIntegrVal(mode)) ab.put4((Integer)val); else if (isBoolVal(mode)) ab.put1((Boolean)val ? 1 : 0); else if (isFloatVal(mode)) ab.put4f((Float)val); else if (isDoubleVal(mode)) ab.put8d((Double)val); else throw H2O.fail(); } ab.put1(-1); } catch(Throwable t){ throw H2O.fail("Iced hash map serialization failed!" + t.toString() + ", msg = " + t.getMessage(), t); } finally { _write_lock = false; } return ab; } protected Map<K, V> init() { return _map = new NonBlockingHashMap<>(); } /** * Helper for serialization - fills the mymap() from K-V pairs in the AutoBuffer object * @param ab Contains the serialized K-V pairs */ @SuppressWarnings("unchecked") public final IcedHashMapGeneric read_impl(AutoBuffer ab) { try { assert map() == null || map().isEmpty(); // Fresh from serializer, no constructor has run Map<K, V> map = init(); K key; V val; int mode; while ((mode = ab.get1()) != -1) { key = isStringKey(mode)?(K)ab.getStr():(K)ab.get(); if (isStringVal(mode)) val = (V)ab.getStr(); else if(isFreezeVal(mode)) val = (V)ab.get(); else if (isFArrayVal(mode)) { Freezable[] vals = new Freezable[ab.get4()]; for (int i = 0; i < vals.length; ++i) vals[i] = ab.get(); val = (V)vals; } else if(isIntegrVal(mode)) val = (V) ((Integer) ab.get4()); else if(isBoolVal(mode)) val = (V) (ab.get1() == 1 ? Boolean.TRUE : Boolean.FALSE); else if (isFloatVal(mode)) val = (V) ((Float) ab.get4f()); else if (isDoubleVal(mode)) val = (V) ((Double) ab.get8d()); else throw H2O.fail(); map.put(key,val); } return this; } catch(Throwable t) { throw H2O.fail("IcedHashMap deserialization failed! + " + t.toString() + ", msg = " + t.getMessage(), t); } } public final IcedHashMapGeneric readJSON_impl(AutoBuffer ab ) {throw H2O.unimpl();} public final AutoBuffer writeJSON_impl( AutoBuffer ab ) { boolean first = true; for (Entry<K, V> entry : map().entrySet()) { K key = entry.getKey(); V value = entry.getValue(); assert entry.getKey() instanceof String; assert value instanceof String || value instanceof String[] || value instanceof Integer || value instanceof Boolean || value instanceof Float || value instanceof Double || value instanceof Freezable || value instanceof Freezable[]; if (first) { first = false; } else {ab.put1(',').put1(' '); } ab.putJSONName((String) key); ab.put1(':'); if (value instanceof String) ab.putJSONName((String) value); else if (value instanceof String[]) ab.putJSONAStr((String[]) value); else if (value instanceof Integer) ab.putJSON4((Integer) value); else if (value instanceof Boolean) ab.putJSONZ((Boolean) value); else if (value instanceof Float) ab.putJSON4f((Float) value); else if (value instanceof Double) ab.putJSON8d((Double) value); else if (value instanceof Freezable) ab.putJSON((Freezable) value); else if (value instanceof Freezable[]) ab.putJSONA((Freezable[]) value); } // ab.put1('}'); // NOTE: the serialization framework adds this automagically return ab; } // Subtypes which allow us to determine the type parameters at runtime, for generating schema metadata. public static class IcedHashMapStringString extends IcedHashMapGeneric<String, String> {} public static class IcedHashMapStringObject extends IcedHashMapGeneric<String, Object> {} }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedHashSet.java
package water.util; import water.AutoBuffer; import water.Freezable; import water.H2O; import water.Iced; import water.nbhm.NonBlockingHashMap; import java.io.Serializable; import java.util.*; /** Iced / Freezable hash set. Delegates to a NonBlockingHashMap for * all its operations. Implementation based on IcedHashMapBase. * * Only supports Freezables as values. */ public class IcedHashSet<V extends Freezable<V>> extends Iced<IcedHashSet<V>> implements Set<V>, Cloneable, Serializable { private transient volatile boolean _write_lock; // not an actual lock - used in asserts only to catch issues in development private transient NonBlockingHashMap<V, V> _map; // the backing NonBlockingHashMap public IcedHashSet() { init(); } private Map<V, V> init() { return (_map = makeBackingMap()); } private NonBlockingHashMap<V, V> makeBackingMap() { return new NonBlockingHashMap<>(); } public V addIfAbsent(V value) { assert ! _write_lock; return _map.putIfAbsent(value, value); } public V get(V value) { assert ! _write_lock; return _map.getk(value); } @Override public int size() { return _map.size(); } @Override public boolean isEmpty() { return _map.isEmpty(); } @Override public boolean contains(Object value) { return _map.containsKey(value); } @Override public Iterator<V> iterator() { return _map.values().iterator(); } @Override public Object[] toArray() { return _map.values().toArray(); } @Override public <T> T[] toArray(T[] a) { Objects.requireNonNull(a); return _map.values().toArray(a); } @Override public boolean add(V v) { assert ! _write_lock; return _map.putIfAbsent(v, v) == null; } @Override public boolean remove(Object o) { assert ! _write_lock; return _map.remove(o, o); } @Override public boolean containsAll(Collection<?> c) { return _map.keySet().containsAll(c); } @Override public boolean addAll(Collection<? extends V> c) { assert ! _write_lock; boolean added = false; for (V item : c) { added |= _map.putIfAbsent(item, item) == null; } return added; } @Override public boolean retainAll(Collection<?> c) { throw new UnsupportedOperationException("Operation retainAll is not yet supported on IcedHashSet"); } @Override public boolean removeAll(Collection<?> c) { throw new UnsupportedOperationException("Operation removeAll is not yet supported on IcedHashSet"); } @Override public void clear() { assert ! _write_lock; _map.clear(); } // Optimized for a set structure represented by NBHM - only values are written private void writeMap(AutoBuffer ab) { // For faster K/V store walking get the NBHM raw backing array, // and walk it directly. Object[] kvs = _map.raw_array(); // Start the walk at slot 2, because slots 0,1 hold meta-data // In the raw backing array, Keys and Values alternate in slots // Ignore tombstones and Primes and null's for (int i = 2; i < kvs.length; i += 2) if (kvs[i+1] instanceof Iced) ab.put((Freezable)kvs[i+1]); } // Since this is a *concurrent* hashtable, writing it whilst its being // updated is tricky. If the table is NOT being updated, then all is written // as expected. If the table IS being updated we only promise to write the // Keys that existed at the time the table write began. If elements are // being deleted, they may be written anyways. If the Values are changing, a // random Value is written. public final AutoBuffer write_impl(AutoBuffer ab) { _write_lock = true; try { if (_map.size() == 0) return ab.put1(0); // empty set ab.put1(1); // mark non-empty writeMap(ab); // Do the hard work of writing the date return ab.put(null); } catch (Exception e) { throw new RuntimeException("IcedHashSet serialization failed!", e); } finally { _write_lock = false; } } public final IcedHashSet read_impl(AutoBuffer ab) { try { assert _map == null || _map.isEmpty(); // Fresh from serializer, no constructor has run Map<V, V> map = init(); if (ab.get1() == 0) return this; V val; while ((val = ab.get()) != null) { map.put(val, val); } return this; } catch (Exception e) { throw new RuntimeException("IcedHashSet deserialization failed!", e); } } public final IcedHashSet readJSON_impl(AutoBuffer ab) { throw H2O.unimpl(); } public final AutoBuffer writeJSON_impl(AutoBuffer ab) { boolean first = true; for (V value : _map.values()) { if (! first) ab.put1(',').put1(' '); else first = false; if (value != null) ab.putJSON(value); } // ab.put1('}'); // NOTE: the serialization framework adds this auto-magically return ab; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; IcedHashSet<?> that = (IcedHashSet<?>) o; return _map != null ? _map.equals(that._map) : that._map == null; } @Override public int hashCode() { return _map != null ? _map.hashCode() : 0; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedInt.java
package water.util; import water.H2O.H2OCountedCompleter; import water.Iced; import water.TAtomic; public class IcedInt extends Iced<IcedInt> { public int _val; public IcedInt(int v){_val = v;} @Override public boolean equals( Object o ) { return o instanceof IcedInt && ((IcedInt) o)._val == _val; } @Override public int hashCode() { return _val; } @Override public String toString() { return Integer.toString(_val); } public static class AtomicIncrementAndGet extends TAtomic<IcedInt> { public AtomicIncrementAndGet(H2OCountedCompleter cc) {super(cc);} public int _val; @Override protected IcedInt atomic(IcedInt old) { return new IcedInt(_val = old._val + 1); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedLong.java
package water.util; import water.H2O; import water.Iced; import water.Key; import water.TAtomic; public class IcedLong extends Iced { public long _val; public IcedLong(long v){_val = v;} @Override public boolean equals( Object o ) { return o instanceof IcedLong && ((IcedLong) o)._val == _val; } @Override public int hashCode() { return (int)(_val ^ (_val >>> 32)); } @Override public String toString() { return Long.toString(_val); } public static IcedLong valueOf(long value) { return new IcedLong(value); } public static long incrementAndGet(Key key) { return ((AtomicIncrementAndGet) new AtomicIncrementAndGet().invoke(key))._val; } public static class AtomicIncrementAndGet extends TAtomic<IcedLong> { public AtomicIncrementAndGet() { this(null); } public AtomicIncrementAndGet(H2O.H2OCountedCompleter cc) { super(cc); } // OUT public long _val; @Override protected IcedLong atomic(IcedLong old) { return new IcedLong(_val = old._val + 1); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/IcedSortedHashMap.java
package water.util; import java.util.Map; import java.util.TreeMap; /** Iced / Freezable Sorted HashMap. Delegates to a TreeMap for * all its operations. */ public class IcedSortedHashMap<K, V> extends IcedHashMapBase<K,V> { transient TreeMap<K,V> _map; public IcedSortedHashMap() { init(); } @Override protected Map<K, V> map() { return _map; } @Override protected Map<K, V> init() { return _map = new TreeMap<>(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/JCodeGen.java
package water.util; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.lang.reflect.Method; import java.net.URI; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; import javax.tools.*; import water.H2O; import water.exceptions.JCodeSB; /** Internal utility for pretty-printing Models as Java code */ public class JCodeGen { public static <T extends JCodeSB> T toStaticVar(T sb, String varname, int value, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); return (T) sb.ip("public static final int ").p(varname).p(" = ").p(value).p(';').nl(); } public static JCodeSB toStaticVar(JCodeSB sb, String varname, String[] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final String[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("new String[]{").p("\""+values[0]+"\""); for (int i = 1; i < values.length; ++i) sb.p(",").p("\""+values[i]+"\""); return sb.p("};").nl(); } public static JCodeSB toStaticVar(JCodeSB sb, String varname, float[] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final float[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("{").pj(values[0]); for (int i = 1; i < values.length; ++i) sb.p(",").pj(values[i]); return sb.p("};").nl(); } public static JCodeSB toStaticVarZeros(JCodeSB sb, String varname, double[] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final double[] ").p(varname).p(" = new double[" + values.length + "];"); return sb.nl(); } public static JCodeSB toStaticVar(JCodeSB sb, String varname, double[] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final double[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("{").pj(values[0]); for (int i = 1; i < values.length; ++i) sb.p(",").pj(values[i]); return sb.p("};").nl(); } public static JCodeSB toStaticVar(JCodeSB sb, String varname, int[] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final int[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("{").p(values[0]); for (int i = 1; i < values.length; ++i) sb.p(",").p(values[i]); return sb.p("};").nl(); } public static JCodeSB toStaticVar(JCodeSB sb, String varname, double[][] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final double[][] ").p(varname).p(" = "); return sb.toJavaStringInit(values).p(';').nl(); } public static JCodeSB toStaticVar(JCodeSB sb, String varname, double[][][] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final double[][][] ").p(varname).p(" = "); return sb.toJavaStringInit(values).p(';').nl(); } public static JCodeSB toStaticVar(JCodeSB sb, String varname, boolean[] values, String comment) { if (comment!=null) sb.ip("// ").p(comment).nl(); sb.ip("public static final boolean[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("{").p(values[0]); for (int i = 1; i < values.length; ++i) sb.p(",").p(values[i]); return sb.p("};").nl(); } /** * Generates a new class with one static member called <em>VALUES</em> which * is filled by values of given array. * <p>The generator can generate more classes to avoid limit of class constant * pool holding all generated literals</p>. * * @param sb output * @param className name of generated class * @param values array holding values which should be hold in generated field VALUES. * @param comment comment to prefix the class with * @return output buffer */ public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, String[] values, String comment) { if (comment != null) { sb.p("// ").p(comment).nl(); } sb.ip(modifiers!=null ? modifiers+" ": "").p("class ").p(className).p(" implements java.io.Serializable {").nl().ii(1); sb.ip("public static final String[] VALUES = "); if (values==null) sb.p("null;").nl(); else { sb.p("new String[").p(values.length).p("];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.ip("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.ip(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).ip("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, String[] values) { return toClassWithArray(sb, modifiers, className, values, null); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, double[] values, String comment) { if (comment != null) { sb.p("// ").p(comment).nl(); } sb.ip(modifiers!=null ? modifiers+" ": "").p("class ").p(className).p(" implements java.io.Serializable {").nl().ii(1); sb.ip("public static final double[] VALUES = "); if (values==null) sb.p("null;").nl(); else { sb.p("new double[").p(values.length).p("];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.ip("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.ip(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).ip("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, double[] values) { return toClassWithArray(sb, modifiers, className, values, null); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, float[] values, String comment) { if (comment != null) { sb.p("// ").p(comment).nl(); } sb.ip(modifiers!=null ? modifiers+" ": "").p("class ").p(className).p(" implements java.io.Serializable {").nl().ii(1); sb.ip("public static final float[] VALUES = "); if (values==null) sb.p("null;").nl(); else { sb.p("new float[").p(values.length).p("];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.ip("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.ip(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).ip("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, float[] values) { return toClassWithArray(sb, modifiers, className, values, null); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, int[] values, String comment) { if (comment != null) { sb.p("// ").p(comment).nl(); } sb.ip(modifiers!=null ? modifiers+" ": "").p("class ").p(className).p(" implements java.io.Serializable {").nl().ii(1); sb.ip("public static final int[] VALUES = "); if (values==null) sb.p("null;").nl(); else { sb.p("new int[").p(values.length).p("];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.ip("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.ip(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).ip("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, int[] values) { return toClassWithArray(sb, modifiers, className, values, null); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, double[][] values, String comment) { if (comment != null) { sb.p("// ").p(comment).nl(); } sb.ip(modifiers!=null ? modifiers+" ": "").p("class ").p(className).p(" implements java.io.Serializable {").nl().ii(1); sb.ip("public static final double[][] VALUES = "); if (values == null) sb.p("null;").nl(); else { sb.p("new double[").p(values.length).p("][];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.ip("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.ip(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).ip("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, double[][] values) { return toClassWithArray(sb, modifiers, className, values, null); } public static JCodeSB toClassWithArray(JCodeSB sb, String modifiers, String className, double[][][] values, String comment) { if (comment != null) { sb.p("// ").p(comment).nl(); } sb.ip(modifiers!=null ? modifiers+" ": "").p("class ").p(className).p(" implements java.io.Serializable {").nl().ii(1); sb.ip("public static final double[][][] VALUES = "); if (values == null) sb.p("null;").nl(); else { sb.p("new double[").p(values.length).p("][][];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.ip("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.ip(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).ip("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } /** Maximum number of string generated per class (static initializer) */ public static int MAX_STRINGS_IN_CONST_POOL = 3000; public static JCodeSB toClassWithArrayFill(JCodeSB sb, String clzName, String[] values, int start, int len) { sb.ip("static final class ").p(clzName).p(" implements java.io.Serializable {").ii(1).nl(); sb.ip("static final void fill(String[] sa) {").ii(1).nl(); for (int i=0; i<len; i++) { sb.ip("sa[").p(start+i).p("] = ").ps(values[start+i]).p(";").nl(); } sb.di(1).ip("}").nl(); sb.di(1).ip("}").nl(); return sb; } public static JCodeSB toClassWithArrayFill(JCodeSB sb, String clzName, float[] values, int start, int len) { sb.ip("static final class ").p(clzName).p(" implements java.io.Serializable {").ii(1).nl(); sb.ip("static final void fill(float[] sa) {").ii(1).nl(); for (int i=0; i<len; i++) { sb.ip("sa[").p(start+i).p("] = ").pj(values[start+i]).p(";").nl(); } sb.di(1).ip("}").nl(); sb.di(1).ip("}").nl(); return sb; } public static JCodeSB toClassWithArrayFill(JCodeSB sb, String clzName, double[] values, int start, int len) { sb.ip("static final class ").p(clzName).p(" implements java.io.Serializable {").ii(1).nl(); sb.ip("static final void fill(double[] sa) {").ii(1).nl(); for (int i=0; i<len; i++) { sb.ip("sa[").p(start+i).p("] = ").pj(values[start+i]).p(";").nl(); } sb.di(1).ip("}").nl(); sb.di(1).ip("}").nl(); return sb; } public static JCodeSB toClassWithArrayFill(JCodeSB sb, String clzName, int[] values, int start, int len) { sb.ip("static final class ").p(clzName).p(" implements java.io.Serializable {").ii(1).nl(); sb.ip("static final void fill(int[] sa) {").ii(1).nl(); for (int i=0; i<len; i++) { sb.ip("sa[").p(start+i).p("] = ").p(values[start + i]).p(";").nl(); } sb.di(1).ip("}").nl(); sb.di(1).ip("}").nl(); return sb; } public static JCodeSB toClassWithArrayFill(JCodeSB sb, String clzName, double[][] values, int start, int len) { for (int i = 0; i < len; i++) { int idx = start + i; toClassWithArray(sb, "static", clzName + "_" + idx, values[i + start]); } sb.ip("static final class ").p(clzName).p(" implements java.io.Serializable {").ii(1).nl(); sb.ip("static final void fill(double[][] sa) {").ii(1).nl(); for (int i=0; i<len; i++) { int idx = start + i; sb.ip("sa[").p(start+i).p("] = ").p(clzName + "_" + idx).p(".VALUES;").nl(); } sb.di(1).ip("}").nl(); sb.di(1).ip("}").nl(); return sb; } public static JCodeSB toClassWithArrayFill(JCodeSB sb, String clzName, double[][][] values, int start, int len) { for (int i = 0; i < len; i++) { int idx = start + i; toClassWithArray(sb, "static", clzName + "_" + idx, values[i + start]); } sb.ip("static final class ").p(clzName).p(" implements java.io.Serializable {").ii(1).nl(); sb.ip("static final void fill(double[][][] sa) {").ii(1).nl(); for (int i=0; i<len; i++) { int idx = start + i; sb.ip("sa[").p(start+i).p("] = ").p(clzName + "_" + idx).p(".VALUES;").nl(); } sb.di(1).ip("}").nl(); sb.di(1).ip("}").nl(); return sb; } /** Transform given string to legal java Identifier (see Java grammar http://docs.oracle.com/javase/specs/jls/se7/html/jls-3.html#jls-3.8) */ public static String toJavaId(String s) { // Note that the leading 4 backslashes turn into 2 backslashes in the // string - which turn into a single backslash in the REGEXP. // "+-*/ !@#$%^&()={}[]|\\;:'\"<>,.?/" return s.replaceAll("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]", "_"); } // Compiler loaded??? public static boolean canCompile() { return COMPILER!=null; } public static Class compile(String class_name, String java_text) throws Exception { return compile(class_name, java_text, true); } public static Class compile(String class_name, String java_text, boolean failureIsFatal) throws Exception { if( COMPILER==null ) throw new UnsupportedOperationException("Unable to launch an internal instance of javac"); // Wrap input string up as a file-like java source thing JavaFileObject file = new JavaSourceFromString(class_name, java_text); // Capture all output class "files" as simple ByteArrayOutputStreams JavacFileManager jfm = new JavacFileManager(COMPILER.getStandardFileManager(null, null, null)); // Invoke javac if( !COMPILER.getTask(null, jfm, null, /*javac options*/null, null, Arrays.asList(file)).call() ) if (failureIsFatal) throw H2O.fail("Internal POJO compilation failed."); else throw new IllegalStateException("Internal POJO compilation failed."); // Load POJO classes via a separated classloader to separate POJO namespace ClassLoader cl = new TestPojoCL(Thread.currentThread().getContextClassLoader()); for( Map.Entry<String, ByteArrayOutputStream> entry : jfm._buffers.entrySet()) { byte[] bits = entry.getValue().toByteArray(); // Call classLoader.defineClass("className",byte[]) DEFINE_CLASS_METHOD.invoke(cl, entry.getKey(), bits, 0, bits.length); } return Class.forName(class_name, true, cl); // Return the original top-level class } /** * A private pojo classloader to separate each pojo namespace and * avoid collisions in loading */ private static class TestPojoCL extends ClassLoader { public TestPojoCL(ClassLoader parent) { super(parent); } } // Parts of this code are shamelessly robbed from: // OpenHFT/Java-Runtime-Compiler/blob/master/compiler/src/main/java/net/openhft/compiler // Then a lot of extra stuff is tossed out. private static final Method DEFINE_CLASS_METHOD; private static final JavaCompiler COMPILER = ToolProvider.getSystemJavaCompiler(); // These lines rely on tools.jar in the test-set of jars, and may allow some // Windows java installs to run the POJO tests that otherwise fail because an // internal instance of javac cannot be launched. Untested; this code works // on my Windows machine & on the Ubuntu Jenkins machines, but not the // Jenkins Windows VM. //import com.sun.tools.javac.api.JavacTool; //private static final JavaCompiler COMPILER = COMPILER1==null ? JavacTool.create() : COMPILER1; static { try { DEFINE_CLASS_METHOD = ClassLoader.class.getDeclaredMethod("defineClass", String.class, byte[].class, int.class, int.class); DEFINE_CLASS_METHOD.setAccessible(true); } catch (NoSuchMethodException e) { throw new AssertionError(e); } } // Simple declaration of a string as a file-like thing static class JavaSourceFromString extends javax.tools.SimpleJavaFileObject { final String _code; JavaSourceFromString(String name, String code) { super(URI.create("string:///" + name.replace('.','/') + Kind.SOURCE.extension),Kind.SOURCE); _code = code; } @Override public CharSequence getCharContent(boolean ignoreEncodingErrors) { return _code; } } // Manage all "files" being manipulated by javac - the input files are really // Strings, the output files are simple byte[]'s holding the classes. Things // other than Java source strings are routed through the standard fileManager // so javac can look up related class files. static class JavacFileManager extends ForwardingJavaFileManager<JavaFileManager> { private final StandardJavaFileManager _fileManager; final HashMap<String, ByteArrayOutputStream> _buffers = new HashMap<>(); JavacFileManager(StandardJavaFileManager fileManager) { super(fileManager); _fileManager = fileManager; } public ClassLoader getClassLoader(Location location) { return _fileManager.getClassLoader(location); } public String inferBinaryName(Location location, JavaFileObject file) { return _fileManager.inferBinaryName(location, file); } public boolean isSameFile(FileObject a, FileObject b) { return _fileManager.isSameFile(a, b); } public boolean hasLocation(Location location) { return _fileManager.hasLocation(location); } public JavaFileObject getJavaFileForInput(Location location, String className, JavaFileObject.Kind kind) throws IOException { if( location == StandardLocation.CLASS_OUTPUT && _buffers.containsKey(className) && kind == JavaFileObject.Kind.CLASS ) { final byte[] bytes = _buffers.get(className).toByteArray(); return new SimpleJavaFileObject(URI.create(className), kind) { public InputStream openInputStream() { return new ByteArrayInputStream(bytes); } }; } return _fileManager.getJavaFileForInput(location, className, kind); } public JavaFileObject getJavaFileForOutput(Location location, final String className, JavaFileObject.Kind kind, FileObject sibling) throws IOException { return new SimpleJavaFileObject(URI.create(className), kind) { public OutputStream openOutputStream() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); _buffers.put(className, baos); return baos; } }; } public FileObject getFileForInput(Location location, String packageName, String relativeName) throws IOException { return _fileManager.getFileForInput(location, packageName, relativeName); } public FileObject getFileForOutput(Location location, String packageName, String relativeName, FileObject sibling) throws IOException { return _fileManager.getFileForOutput(location, packageName, relativeName, sibling); } public void flush() throws IOException { _fileManager.flush(); } public void close() throws IOException { _fileManager.close(); } public int isSupportedOption(String option) { return _fileManager.isSupportedOption(option); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/JProfile.java
package water.util; import water.H2O; import water.Iced; public class JProfile extends Iced { public static class ProfileSummary extends Iced { public ProfileSummary( String name, ProfileCollectorTask.NodeProfile profile) { this.name=name; this.profile=profile; } public final String name; public final ProfileCollectorTask.NodeProfile profile; } public final String node_name; public final long timestamp; public final int depth; public JProfile(int d) { depth = d; node_name = H2O.getIpPortString(); timestamp = System.currentTimeMillis(); } public ProfileSummary nodes[]; public JProfile execImpl(boolean print) { ProfileCollectorTask.NodeProfile profiles[] = new ProfileCollectorTask(depth).doAllNodes()._result; nodes = new ProfileSummary[H2O.CLOUD.size()]; for( int i=0; i<nodes.length; i++ ) { assert(profiles[i] != null); nodes[i] = new ProfileSummary(H2O.CLOUD._memary[i].toString(), profiles[i]); } if( !print ) return this; for( int i=0; i<nodes.length; i++ ) { Log.info(nodes[i].name); for (int j = 0; j < nodes[i].profile.counts.length; ++j) { Log.info(nodes[i].profile.counts[j]); Log.info(nodes[i].profile.stacktraces[j]); } } return this; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/JSONUtils.java
package water.util; import com.google.gson.Gson; import water.nbhm.NonBlockingHashMap; import java.util.Properties; public class JSONUtils { public static NonBlockingHashMap<String, Object> parse(String json) { return new Gson().fromJson(json, NonBlockingHashMap.class); } public static Properties parseToProperties(String json) { return new Gson().fromJson(json, Properties.class); } public static <T> T parse(String json, Class<T> type) { return new Gson().fromJson(json, type); } public static String toJSON(Object o) { return new Gson().toJson(o); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/JSONValue.java
package water.util; import water.Iced; import water.api.Schema; import water.api.Schema.AutoParseable; import java.lang.reflect.Array; import java.util.Map; import java.util.Objects; /** * CLass providing encapsulation for json values, especially for parts of json objects with polymorphic values. * * Idea is to store json (or part of json object) as json string + class for serialization. * It can then later be parsed dynamically using the provided `value` and `valueAs` methods. * @param <V> */ public class JSONValue<V> extends Iced { @SuppressWarnings("unchecked") public static <V> JSONValue<V> fromValue(V v) { return new JSONValue(JSONUtils.toJSON(v), v.getClass()); } protected String _json; protected Class<V> _clazz; public JSONValue(String json) { this(json, null); } public JSONValue(String json, Class<V> clazz) { _json = json; _clazz = clazz; } public V value() { return valueAs(_clazz); } @SuppressWarnings("unchecked") public <T> T valueAs(Class<T> clazz) { if (clazz == null) return (T)JSONUtils.parse(_json); return JSONUtils.parse(_json, clazz); } public <T extends Iced, S extends Schema<T, S>> T valueAs(Class<T> clazz, Class<S> schema) { return valueAsSchema(schema).createAndFillImpl(); } @SuppressWarnings("unchecked") public <T extends Iced, S extends Schema<T, S>> T[] valueAsArray(Class<T[]> clazz, Class<S[]> schema) { final S[] ss = valueAsSchemas(schema); final Class<T> tClazz = (Class<T>)clazz.getComponentType(); final T[] ts = (T[])Array.newInstance(tClazz, ss.length); for (int i=0; i<ss.length; i++) { ts[i] = ss[i].createAndFillImpl(); } return ts; } public <S extends Schema> S valueAsSchema(Class<S> schema) { final S s; if (AutoParseable.class.isAssignableFrom(schema)) { s = valueAs(schema); } else { s = Schema.newInstance(schema); PojoUtils.fillFromJson(s, _json); } return s; } @SuppressWarnings("unchecked") public <S extends Schema> S[] valueAsSchemas(Class<S[]> schema) { final Class<S> sClazz = (Class<S>)schema.getComponentType(); final S[] ss; if (AutoParseable.class.isAssignableFrom(sClazz)) { ss = valueAs(schema); } else { final Map[] maps = valueAs(Map[].class); ss = (S[]) Array.newInstance(sClazz, maps.length); for (int i=0; i<ss.length; i++) { ss[i] = JSONValue.fromValue(maps[i]).valueAsSchema(sClazz); } } return ss; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JSONValue<?> jsonValue = (JSONValue<?>) o; return Objects.equals(_json, jsonValue._json) && Objects.equals(_clazz, jsonValue._clazz); } @Override public int hashCode() { return Objects.hash(_json, _clazz); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/JStack.java
package water.util; import water.Iced; import water.H2O; public class JStack extends Iced { public JStackCollectorTask.DStackTrace _traces[]; public JStack execImpl() { _traces = new JStackCollectorTask().doAllNodes()._traces; return this; // flow coding } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/JStackCollectorTask.java
package water.util; import water.H2O; import water.Iced; import water.MRTask; import java.util.ArrayList; import java.util.Map; import java.util.Map.Entry; import java.util.TreeMap; public class JStackCollectorTask extends MRTask<JStackCollectorTask> { JStackCollectorTask() { super(H2O.MIN_HI_PRIORITY); } public static class DStackTrace extends Iced { public final String _node; // Node name public final long _time; // Unix epoch time public final String[] _thread_traces; // One per thread DStackTrace( String[] traces ) { _node = H2O.getIpPortString(); _time = System.currentTimeMillis(); _thread_traces = traces; } } public DStackTrace _traces[]; // One per Node @Override public void reduce(JStackCollectorTask that) { for( int i=0; i<_traces.length; ++i ) if( _traces[i] == null ) _traces[i] = that._traces[i]; } private static class ThreadInfo { int _parked; int _active; int _blocked; int _unknown; public ThreadInfo add(ThreadInfo ti) { _parked += ti._parked; _active += ti._active; _blocked += ti._blocked; _unknown += ti._unknown; return this; } public double [] toDoubleArray(){ return new double[]{_active + _unknown, _blocked, _parked, _active + _unknown + _blocked + _parked}; } public boolean hasAny(){return _parked + _active + _blocked + _unknown > 0;} } enum ThreadType {HTTP_REQUEST, FJ, OTHER, TCP, JETTY, HADOOP} private static class ThreadKey implements Comparable<ThreadKey> { ThreadType _type; @Override public int compareTo(ThreadKey o) { return _type.ordinal() - o._type.ordinal(); } @Override public String toString() {return _type.toString();} } // bruteforce search for H2O Servlet, don't call until other obvious cases were filtered out private int isH2OHTTPRequestThread(StackTraceElement [] elms){ for(int i = 0; i < elms.length; ++i) if(elms[i].getClassName().equals("....JettyHTTPD$H2oDefaultServlet")) //TODO FIXME! No such class(H2oDefaultServlet) exists there now! Use class comparison if another one took the role. return i; return elms.length; } @Override public void setupLocal() { _traces = new DStackTrace[H2O.CLOUD.size()]; if( H2O.SELF.isClient() ) return; // Clients are not in the cloud, and do not get stack traces Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces(); // Known to be interesting ArrayList<String> http_traces = new ArrayList<>(); http_traces.add("HttpReq traces"); ArrayList<String> fj_traces = new ArrayList<>(); fj_traces.add("FJ traces"); // unknown - possibly interesting ArrayList<String> other_traces = new ArrayList<>(); other_traces.add("'other' traces"); // Most likely uninteresting ArrayList<String> tcp_traces = new ArrayList<>(); tcp_traces.add("TCP traces"); ArrayList<String> system_traces = new ArrayList<>(); system_traces.add("system traces"); ArrayList<String> jetty_traces = new ArrayList<>(); jetty_traces.add("Jetty traces"); ArrayList<String> h2o_sys_traces = new ArrayList<>(); h2o_sys_traces.add("H2O System traces"); Map<Integer,ThreadInfo> fjThreadSummary = new TreeMap<>(); ThreadInfo threadSum = new ThreadInfo(); ThreadInfo httpReqs = new ThreadInfo(); ThreadInfo tcpThreads = new ThreadInfo(); ThreadInfo otherThreads = new ThreadInfo(); ThreadInfo jettythreads = new ThreadInfo(); ThreadInfo h2oSysThreads = new ThreadInfo(); ThreadInfo systemThreads = new ThreadInfo(); for( Entry<Thread,StackTraceElement[]> el : allStackTraces.entrySet() ) { StackTraceElement [] elms = el.getValue(); Thread t = el.getKey(); int idx = elms.length; ArrayList<String> trace = null; ThreadInfo tinfo = null; if(elms.length == 0) continue; if(t.getName().startsWith("FJ-") && elms[elms.length-1].getClassName().contains("ForkJoinWorkerThread")) { // H2O specific FJ Thread trace = fj_traces; Integer fjq = Integer.parseInt(t.getName().substring(3, t.getName().indexOf('-', 3))); if (!fjThreadSummary.containsKey(fjq)) fjThreadSummary.put(fjq, new ThreadInfo()); tinfo = fjThreadSummary.get(fjq); } else if(elms[elms.length-1].getClassName().equals("water.TCPReceiverThread$TCPReaderThread")) { if (elms[elms.length - 2].getClassName().equals("water.AutoBuffer") && elms[elms.length - 2].getMethodName().equals("<init>")) { tcpThreads._parked++; continue; } trace = tcp_traces; tinfo = tcpThreads; } else if(elms[elms.length-1].getClassName().equals("water.MultiReceiverThread") || elms[elms.length-1].getClassName().equals("water.TCPReceiverThread") || elms[elms.length-1].getClassName().equals("water.HeartBeatThread")){ trace = h2o_sys_traces; tinfo = h2oSysThreads; } else if(elms.length > 1 && elms[elms.length-2].getClassName().startsWith("java.util.concurrent.ThreadPoolExecutor") || elms[elms.length-1].getClassName().startsWith("java.lang.ref.Finalizer") || elms[elms.length-1].getClassName().startsWith("java.lang.ref.Reference")) { trace = system_traces; tinfo = systemThreads; }else if((idx = isH2OHTTPRequestThread(elms)) < elms.length) { // h2o HTTP request trace = http_traces; tinfo = httpReqs; } else if(elms.length > 1 && elms[elms.length-2].getClassName().startsWith("org.eclipse.jetty")){ trace = jetty_traces; tinfo = jettythreads; } else { trace = other_traces; tinfo = otherThreads; } if(elms[0].getClassName().equals("sun.misc.Unsafe") && elms[0].getMethodName().equals("park")) { ++tinfo._parked; // don't include parked stacktraces continue; } if(t.getState().toString().equals("RUNNABLE")) { ++tinfo._active; } else if(t.getState().toString().contains("WAITING")) { ++tinfo._blocked; } else { ++tinfo._unknown; System.out.println("UNKNOWN STATE: " + t.getState()); } SB sb = new SB().p('"').p(t.getName()).p('"'); if (t.isDaemon()) sb.p(" daemon"); sb.p(" prio=").p(t.getPriority()); sb.p(" tid=").p(t.getId()); sb.p(" java.lang.Thread.State: ").p(t.getState().toString()); sb.nl(); for( int j = 0; j < idx; ++j) sb.p("\tat ").p(elms[j].toString()).nl(); trace.add(sb.toString()); } // get the summary of idle threads // String tableHeader, String tableDescription, String[] rowHeaders, String[] colHeaders, String[] colTypes, // String[] colFormats, String colHeaderForRowHeaders, String[][] strCellValues, double[][] dblCellValues ArrayList<String> rowNames = new ArrayList<>(); ArrayList<double[]> cellVals = new ArrayList<>(); if(httpReqs.hasAny()) { rowNames.add("HttpReq"); cellVals.add(httpReqs.toDoubleArray()); } for(Entry<Integer,ThreadInfo> e:fjThreadSummary.entrySet()) { rowNames.add("FJ-" + e.getKey()); ThreadInfo fjt = e.getValue(); threadSum.add(fjt); cellVals.add(fjt.toDoubleArray()); } if(otherThreads.hasAny()) { rowNames.add("other"); cellVals.add(otherThreads.toDoubleArray()); } if(tcpThreads.hasAny()) { rowNames.add("TCP"); cellVals.add(tcpThreads.toDoubleArray()); } if(h2oSysThreads.hasAny()) { rowNames.add("h2osys"); cellVals.add(h2oSysThreads.toDoubleArray()); } if(systemThreads.hasAny()) { rowNames.add("system"); cellVals.add(systemThreads.toDoubleArray()); } if(jettythreads.hasAny()) { rowNames.add("jetty"); cellVals.add(jettythreads.toDoubleArray()); } rowNames.add("TOTAL"); cellVals.add(threadSum.add(httpReqs).add(otherThreads).add(tcpThreads).add(systemThreads).add(jettythreads).toDoubleArray()); TwoDimTable td = new TwoDimTable("Thread Summary", "Summary of running threads", rowNames.toArray(new String[0]), new String[] {"active","blocked","idle","TOTAL"}, new String[]{"int","int","int","int"}, new String[]{"%d","%d","%d","%d"}, "Thread",new String[cellVals.size()][],cellVals.toArray(new double[0][0])); // todo - sort FJ traces? String [] traces = new String[1+ http_traces.size() + fj_traces.size() + other_traces.size() + tcp_traces.size() + h2o_sys_traces.size() + system_traces.size() + jetty_traces.size()]; int ii = 1; for(String t:http_traces) { traces[ii++] = t; Log.info(t); } for(String t:fj_traces) { traces[ii++] = t; Log.info(t); } for(String t:other_traces) { traces[ii++] = t; Log.info(t); } for(String t:tcp_traces) { traces[ii++] = t; Log.info(t); } for(String t:h2o_sys_traces) { traces[ii++] = t; Log.info(t); } for(String t:system_traces) { traces[ii++] = t; Log.info(t); } for(String t:jetty_traces) { traces[ii++] = t; Log.info(t); } traces[0] = td.toString(); Log.info(traces[0]); _traces[H2O.SELF.index()] = new DStackTrace(traces); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/Java7.java
package water.util; /** * The following code replaces Java 7 Objects class, while Java 7 * is not always available. * * Created by vpatryshev on 3/1/17. */ public class Java7 { public static final class Objects { public static boolean equals(Object x, Object y) { return x == y || (x != null && x.equals(y)); } public static int hashCode(Object a) { return a == null ? 0 : a.hashCode(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/LineLimitOutputStreamWrapper.java
package water.util; import java.io.IOException; import java.io.OutputStream; /** Simple {@code OutputStream} wrapper limiting * number of rows outputed into a given stream. * * It delegates all calls to underlying output stream, * but counts number of lines passed trough. * * Note: new line is detected only based on '\n' character! */ public class LineLimitOutputStreamWrapper extends OutputStream { /** Output stream to delegate writes */ private final OutputStream os; /** Number of lines to output. */ private final int lineLimit; /** Number of lines in output */ private int linesCnt = 0; public LineLimitOutputStreamWrapper(OutputStream os, int lineLimit) { this.os = os; this.lineLimit = lineLimit; } @Override public void write(int b) throws IOException { if (linesCnt < lineLimit) { os.write(b); if (b == '\n') linesCnt++; } } @Override public void write(byte[] b, int off, int len) throws IOException { if (linesCnt < lineLimit) { for (int i = 0; i < len; i++) { if (b[off + i] == '\n') linesCnt++; if (linesCnt == lineLimit) { len = off + i; break; } } os.write(b, off, len); } } @Override public void write(byte[] b) throws IOException { os.write(b); } @Override public void flush() throws IOException { os.flush(); } @Override public void close() throws IOException { os.close(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/LinuxProcFileReader.java
package water.util; import java.io.*; import java.util.ArrayList; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Linux /proc file reader. * * Read tick information for the system and the current process in order to provide * stats on the cloud page about CPU utilization. * * Tick counts are monotonically increasing since boot. * * Find definitions of /proc file info here. * http://man7.org/linux/man-pages/man5/proc.5.html */ public class LinuxProcFileReader { private String _systemData; private String _processData; private String _processStatus; private String _pid; private long _systemIdleTicks = -1; private long _systemTotalTicks = -1; private long _processTotalTicks = -1; private long _processRss = -1; private int _processCpusAllowed = -1; private int _processNumOpenFds = -1; private ArrayList<long[]> _cpuTicks = null; /** * Constructor. */ public LinuxProcFileReader() { } /** * @return whether this java process is running in Windows Subsystem for Linux environment. */ public boolean isWsl() { try { if (! new File("/proc/version").exists()) { return false; } String s = readFile(new File("/proc/version")); if (! s.contains("Microsoft")) { return false; } return true; } catch (Exception e) { return false; } } /** * @return ticks the system was idle. in general: idle + busy == 100% */ public long getSystemIdleTicks() { assert _systemIdleTicks > 0; return _systemIdleTicks; } /** * @return ticks the system was up. */ public long getSystemTotalTicks() { assert _systemTotalTicks > 0; return _systemTotalTicks; } /** * @return ticks this process was running. */ public long getProcessTotalTicks() { assert _processTotalTicks > 0; return _processTotalTicks; } /** * Array of ticks. * [cpu number][tick type] * * tick types are: * * [0] user ticks * [1] system ticks * [2] other ticks (i/o) * [3] idle ticks * * @return ticks array for each cpu of the system. */ public long[][] getCpuTicks() { assert _cpuTicks != null; return _cpuTicks.toArray(new long[0][0]); } /** * @return resident set size (RSS) of this process. */ public long getProcessRss() { assert _processRss > 0; return _processRss; } static private boolean isOSNameMatch(final String osName, final String osNamePrefix) { if (osName == null) { return false; } return osName.startsWith(osNamePrefix); } private static boolean getOSMatchesName(final String osNamePrefix) { String osName = System.getProperty("os.name"); return isOSNameMatch(osName, osNamePrefix); } private static boolean IS_OS_LINUX() { return getOSMatchesName("Linux") || getOSMatchesName("LINUX"); } /** * @return number of CPUs allowed by this process. */ public int getProcessCpusAllowed() { return getProcessCpusAllowed(IS_OS_LINUX()); } int getProcessCpusAllowed(boolean isLinux) { if (! isLinux) { return getProcessCpusAllowedFallback(); } // _processCpusAllowed is not available on CentOS 5 and earlier. // In this case, just return availableProcessors. if (_processCpusAllowed < 0) { return getProcessCpusAllowedFallback(); } return _processCpusAllowed; } int getProcessCpusAllowedFallback() { // Note: We use H2ORuntime#availableProcessors everywhere else - here we report the actual #cpus JVM is allowed to see return Runtime.getRuntime().availableProcessors(); } /** * @return number of currently open fds of this process. */ public int getProcessNumOpenFds() { assert _processNumOpenFds > 0; return _processNumOpenFds; } /** * @return process id for this node as a String. */ public String getProcessID() { return _pid; } /** * Read and parse data from /proc/stat and /proc/&lt;pid&gt;/stat. * If this doesn't work for some reason, the values will be -1. */ public void read() { String pid = "-1"; try { pid = getProcessId(); _pid = pid; } catch (Exception ignore) {} File f = new File ("/proc/stat"); if (! f.exists()) { return; } try { readSystemProcFile(); readProcessProcFile(pid); readProcessNumOpenFds(pid); readProcessStatusFile(pid); parseSystemProcFile(_systemData); parseProcessProcFile(_processData); parseProcessStatusFile(_processStatus); } catch (Exception ignore) {} } /** * @return true if all the values are ok to use; false otherwise. */ public boolean valid() { return ((_systemIdleTicks >= 0) && (_systemTotalTicks >= 0) && (_processTotalTicks >= 0) && (_processNumOpenFds >= 0)); } /** * @return number of set bits in hexadecimal string (chars must be 0-F) */ public static int numSetBitsHex(String s) { // Look-up table for num set bits in 4-bit char final int[] bits_set = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4}; int nset = 0; for(int i = 0; i < s.length(); i++) { Character ch = s.charAt(i); if (ch == ',') { continue; } int x = Integer.parseInt(ch.toString(), 16); nset += bits_set[x]; } return nset; } private static String getProcessId() throws Exception { // Note: may fail in some JVM implementations // therefore fallback has to be provided // something like '<pid>@<hostname>', at least in SUN / Oracle JVMs final String jvmName = java.lang.management.ManagementFactory.getRuntimeMXBean().getName(); final int index = jvmName.indexOf('@'); if (index < 1) { // part before '@' empty (index = 0) / '@' not found (index = -1) throw new Exception ("Can't get process Id"); } return Long.toString(Long.parseLong(jvmName.substring(0, index))); } private String readFile(File f) throws Exception { char[] buffer = new char[16 * 1024]; FileReader fr = new FileReader(f); int bytesRead = 0; while (true) { int n = fr.read(buffer, bytesRead, buffer.length - bytesRead); if (n < 0) { fr.close(); return new String (buffer, 0, bytesRead); } else if (n == 0) { // This is weird. fr.close(); throw new Exception("LinuxProcFileReader readFile read 0 bytes"); } bytesRead += n; if (bytesRead >= buffer.length) { fr.close(); throw new Exception("LinuxProcFileReader readFile unexpected buffer full"); } } } private void readSystemProcFile() { try { _systemData = readFile(new File("/proc/stat")); } catch (Exception ignore) {} } /** * @param s String containing contents of proc file. */ void parseSystemProcFile(String s) { if (s == null) return; try { BufferedReader reader = new BufferedReader(new StringReader(s)); String line = reader.readLine(); // Read aggregate cpu values { Pattern p = Pattern.compile("cpu\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+).*"); Matcher m = p.matcher(line); boolean b = m.matches(); if (!b) { return; } long systemUserTicks = Long.parseLong(m.group(1)); long systemNiceTicks = Long.parseLong(m.group(2)); long systemSystemTicks = Long.parseLong(m.group(3)); _systemIdleTicks = Long.parseLong(m.group(4)); _systemTotalTicks = systemUserTicks + systemNiceTicks + systemSystemTicks + _systemIdleTicks; } // Read individual cpu values _cpuTicks = new ArrayList<long[]>(); line = reader.readLine(); while (line != null) { Pattern p = Pattern.compile("cpu(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+).*"); Matcher m = p.matcher(line); boolean b = m.matches(); if (! b) { break; } // Copying algorithm from http://gee.cs.oswego.edu/dl/code/ // See perfbar.c in gtk_perfbar package. // int cpuNum = Integer.parseInt(m.group(1)); long cpuUserTicks = 0; long cpuSystemTicks = 0; long cpuOtherTicks = 0; long cpuIdleTicks = 0; cpuUserTicks += Long.parseLong(m.group(2)); cpuOtherTicks += Long.parseLong(m.group(3)); cpuSystemTicks += Long.parseLong(m.group(4)); cpuIdleTicks += Long.parseLong(m.group(5)); cpuOtherTicks += Long.parseLong(m.group(6)); cpuSystemTicks += Long.parseLong(m.group(7)); cpuSystemTicks += Long.parseLong(m.group(8)); long[] oneCpuTicks = {cpuUserTicks, cpuSystemTicks, cpuOtherTicks, cpuIdleTicks}; _cpuTicks.add(oneCpuTicks); line = reader.readLine(); } } catch (Exception ignore) {} } private void readProcessProcFile(String pid) { try { String s = "/proc/" + pid + "/stat"; _processData = readFile(new File(s)); } catch (Exception ignore) {} } void parseProcessProcFile(String s) { if (s == null) return; try { BufferedReader reader = new BufferedReader(new StringReader(s)); String line = reader.readLine(); Pattern p = Pattern.compile( "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + ".*"); Matcher m = p.matcher(line); boolean b = m.matches(); if (! b) { return; } long processUserTicks = Long.parseLong(m.group(14)); long processSystemTicks = Long.parseLong(m.group(15)); _processTotalTicks = processUserTicks + processSystemTicks; _processRss = Long.parseLong(m.group(24)); } catch (Exception ignore) {} } private void readProcessNumOpenFds(String pid) { try { String s = "/proc/" + pid + "/fd"; File f = new File(s); String[] arr = f.list(); if (arr != null) { _processNumOpenFds = arr.length; } } catch (Exception ignore) {} } private void readProcessStatusFile(String pid) { try { String s = "/proc/" + pid + "/status"; _processStatus = readFile(new File(s)); } catch (Exception ignore) {} } void parseProcessStatusFile(String s) { if(s == null) return; try { Pattern p = Pattern.compile("Cpus_allowed:\\s+([A-Fa-f0-9,]+)"); Matcher m = p.matcher(s); boolean b = m.find(); if (! b) { return; } _processCpusAllowed = numSetBitsHex(m.group(1)); } catch (Exception ignore) {} } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/Log.java
package water.util; import org.apache.log4j.Logger; import water.H2O; import water.persist.PersistManager; import java.io.File; import java.util.ArrayList; import java.util.List; import static water.util.LoggerBackend.L4J_LVLS; import static water.util.StringUtils.fixedLength; /** * Log for H2O. * * OOME: when the VM is low on memory, OutOfMemoryError can be thrown in the * logging framework while it is trying to print a message. In this case the * first message that fails is recorded for later printout, and a number of * messages can be discarded. The framework will attempt to print the recorded * message later, and report the number of dropped messages, but this done in * a best effort and lossy manner. Basically when an OOME occurs during * logging, no guarantees are made about the messages. **/ abstract public class Log { public static final byte FATAL= 0; public static final byte ERRR = 1; public static final byte WARN = 2; public static final byte INFO = 3; public static final byte DEBUG= 4; public static final byte TRACE= 5; public static final String[] LVLS = { "FATAL", "ERRR", "WARN", "INFO", "DEBUG", "TRACE" }; private static final String PROP_MAX_PID_LENGTH = H2O.OptArgs.SYSTEM_PROP_PREFIX + "log.max.pid.length"; private static int _level = INFO; private static boolean _quiet = false; private static Logger _logger = null; private static boolean _bufferMessages = true; private static String _logDir = null; private static String _maxLogFileSize = "3MB"; // A little bit of startup buffering private static class BufferedMsg { private final int lvl; private final String msg; private final Throwable t; BufferedMsg(int l, String m, Throwable t) { this.lvl = l; this.msg = m; this.t = t; } } private static ArrayList<BufferedMsg> INIT_MSGS = new ArrayList<>(); public static byte valueOf( String slvl ) { if( slvl == null ) return -1; slvl = slvl.toLowerCase(); if( slvl.startsWith("fatal") ) return FATAL; if( slvl.startsWith("err" ) ) return ERRR; if( slvl.startsWith("warn" ) ) return WARN; if( slvl.startsWith("info" ) ) return INFO; if( slvl.startsWith("debug") ) return DEBUG; if( slvl.startsWith("trace") ) return TRACE; return -1; } public static void init(String sLvl, boolean quiet, String maxLogFileSize) { int lvl = valueOf(sLvl); if( lvl != -1 ) _level = lvl; _quiet = quiet; _logger = null; if (maxLogFileSize != null) { _maxLogFileSize = maxLogFileSize; } } public static void notifyAboutNetworkingInitialized() { _bufferMessages = false; // at this point we can create the log files and use a correct prefix ip:port for each log message assert H2O.SELF_ADDRESS != null && H2O.H2O_PORT != 0; } public static void notifyAboutProcessExiting() { // make sure we write out whatever we have right now Log.flushBufferedMessages(); // if there are any other log messages after this call, we want to preserve them as well if (_quiet) { _quiet = false; _logger = null; } INIT_MSGS = null; } public static void setLogLevel(String level, boolean quiet) { init(level, quiet, null); } public static void setLogLevel(String level) { setLogLevel(level, true); } public static void trace( Object... objs ) { log(TRACE,objs); } public static void debug( Object... objs ) { log(DEBUG,objs); } public static void info ( Object... objs ) { log(INFO ,objs); } public static void warn ( Object... objs ) { log(WARN ,objs); } public static void err ( Object... objs ) { log(ERRR ,objs); } public static void fatal( Object... objs ) { log(FATAL,objs); } public static void log ( int level, Object... objs ) { write(level, objs); } // This call *throws* an unchecked exception and never returns (after logging). public static RuntimeException throwErr( Throwable e ) { err(e); // Log it throw e instanceof RuntimeException ? (RuntimeException)e : new RuntimeException(e); // Throw it } private static void write(int lvl, Object[] objs) { write0(lvl, objs); } private static void write0(int lvl, Object[] objs) { StringBuilder msgBuff = new StringBuilder(); Throwable t = null; for (int i = 0; i < objs.length - 1; i++) msgBuff.append(objs[i]); if (objs.length > 0 && objs[objs.length - 1] instanceof Throwable) { t = (Throwable) objs[objs.length-1]; } else if (objs.length > 0) { msgBuff.append(objs[objs.length-1]); } String msg = msgBuff.toString(); if (_bufferMessages) { // Oops, need to buffer until we can do a proper header INIT_MSGS.add(new BufferedMsg(lvl, msg, t)); return; } flushBufferedMessages(); write0(lvl, msg, t); } private static void write0(int lvl, String s, Throwable t) { Logger log = (_logger != null ? _logger : createLog4j()); if (s.contains("\n")) { for (String line : s.split("\n")) { log.log(L4J_LVLS[lvl], line); } if (t != null) { log.log(L4J_LVLS[lvl], t); } } else { log.log(L4J_LVLS[lvl], s, t); } } private static List<BufferedMsg> consumeBufferedMessages() { List<BufferedMsg> buff = null; if (INIT_MSGS != null) { buff = INIT_MSGS; INIT_MSGS = null; } return buff; } public static void flushBufferedMessages() { List<BufferedMsg> buff = consumeBufferedMessages(); if (buff != null) for (BufferedMsg m : buff) write0(m.lvl, m.msg, m.t); } public static void flushBufferedMessagesToStdout() { List<BufferedMsg> buff = consumeBufferedMessages(); if (buff != null) for (BufferedMsg m : buff) { System.out.println(m.msg); if (m.t != null) m.t.printStackTrace(); } } public static int getLogLevel(){ return _level; } public static boolean isLoggingFor(int level) { if (level == -1) { // in case of invalid log level return false return false; } return _level >= level; } public static boolean isLoggingFor(String strLevel){ int level = valueOf(strLevel); return isLoggingFor(level); } /** * Get the directory where the logs are stored. */ public static String getLogDir() { if (_logDir == null) { throw new RuntimeException("LOG_DIR not yet defined"); } return _logDir; } private static String getLogFileNamePrefix() { if (H2O.SELF_ADDRESS == null) { throw new RuntimeException("H2O.SELF_ADDRESS not yet defined"); } if (H2O.H2O_PORT == 0) { throw new RuntimeException("H2O.H2O_PORT is not yet determined"); } String ip = H2O.SELF_ADDRESS.getHostAddress(); int port = H2O.API_PORT; String portString = Integer.toString(port); return "h2o_" + ip + "_" + portString; } private static File determineLogDir() { File dir; if (H2O.ARGS.log_dir != null) { dir = new File(H2O.ARGS.log_dir); } else { boolean windowsPath = H2O.ICE_ROOT.toString().matches("^[a-zA-Z]:.*"); // Use ice folder if local, or default if (windowsPath) dir = new File(H2O.ICE_ROOT.toString()); else if (H2O.ICE_ROOT.getScheme() == null || PersistManager.Schemes.FILE.equals(H2O.ICE_ROOT.getScheme())) dir = new File(H2O.ICE_ROOT.getPath()); else dir = new File(H2O.DEFAULT_ICE_ROOT()); dir = new File(dir, "h2ologs"); } return dir; } /** * Get log file name without the path for particular log level. */ public static String getLogFileName(String level) { return getLogFileNamePrefix() + getLogFileNameSuffix(level); } /** Get suffix of the log file name specific to particular log level */ private static String getLogFileNameSuffix(String level){ switch (level) { case "trace": return "-1-trace.log"; case "debug": return "-2-debug.log"; case "info": return "-3-info.log"; case "warn": return "-4-warn.log"; case "error": return "-5-error.log"; case "fatal": return "-6-fatal.log"; case "httpd": return "-httpd.log"; default: throw new RuntimeException("Unknown level " + level); } } /** Get full path to a specific log file*/ public static String getLogFilePath(String level) { return getLogDir() + File.separator + getLogFileName(level); } private static String getHostPortPid() { String host = H2O.SELF_ADDRESS.getHostAddress(); return fixedLength(host + ":" + H2O.API_PORT + " ", 22) + fixedLength(H2O.PID + " ", maximumPidLength() + 2); } // set sys.ai.h2o.log.max.pid.length to avoid h2o-3 trimming PID in the logs private static int maximumPidLength() { String maxPidPropertyValue = System.getProperty(PROP_MAX_PID_LENGTH); return maxPidPropertyValue != null ? Integer.parseInt(maxPidPropertyValue) : 4; } private static synchronized Logger createLog4j() { if (_logger == null) { // Test again under lock _logDir = determineLogDir().toString(); LoggerBackend lb = new LoggerBackend(); lb._launchedWithHadoopJar = H2O.ARGS.launchedWithHadoopJar(); lb._haveInheritedLog4jConfiguration = H2O.haveInheritedLog4jConfiguration(); lb._prefix = getHostPortPid(); lb._maxLogFileSize = _maxLogFileSize; lb._level = _level; lb._getLogFilePath = Log::getLogFilePath; Logger logger = lb.createLog4j(); if (logger == null) { H2O.exit(1); throw new IllegalStateException("Shouldn't reach this - exit should exit the application"); } _logger = logger; } return _logger; } public static void ignore(Throwable e) { ignore(e,"[h2o] Problem ignored: "); } public static void ignore(Throwable e, String msg) { ignore(e, msg, true); } public static void ignore(Throwable e, String msg, boolean printException) { debug(msg + (printException? e.toString() : "")); } //----------------------------------------------------------------- // POST support for debugging embedded configurations. //----------------------------------------------------------------- /** * POST stands for "Power on self test". * Stamp a POST code to /tmp. * This is for bringup, when no logging or stdout I/O is reliable. * (Especially when embedded, such as in hadoop mapreduce, for example.) * * @param n POST code. * @param s String to emit. */ public static void POST(int n, String s) { System.out.println("POST " + n + ": " + s); } public static void POST(int n, Exception e) { if (e.getMessage() != null) { POST(n, e.getMessage()); } POST(n, e.toString()); StackTraceElement[] els = e.getStackTrace(); for (StackTraceElement el : els) { POST(n, el.toString()); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/LogArchiveContainer.java
package water.util; import water.api.RequestServer; import java.io.ByteArrayOutputStream; public enum LogArchiveContainer { ZIP(RequestServer.MIME_DEFAULT_BINARY) { @Override public LogArchiveWriter createLogArchiveWriter(ByteArrayOutputStream baos) { return new ZipLogArchiveWriter(baos); } }, LOG(RequestServer.MIME_PLAINTEXT) { @Override public LogArchiveWriter createLogArchiveWriter(ByteArrayOutputStream baos) { return new ConcatenatedLogArchiveWriter(baos); } }; private final String _mime_type; LogArchiveContainer(String mimeType) { _mime_type = mimeType; } public abstract LogArchiveWriter createLogArchiveWriter(ByteArrayOutputStream baos); public String getFileExtension() { return name().toLowerCase(); } public String getMimeType() { return _mime_type; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/LogArchiveWriter.java
package water.util; import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; import java.util.Date; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; /** * Abstract layer over different types of log containers (zip, txt,...) */ public abstract class LogArchiveWriter implements Closeable { final OutputStream _os; LogArchiveWriter(OutputStream os) { _os = os; } public abstract void putNextEntry(ArchiveEntry entry) throws IOException; public abstract void closeEntry() throws IOException; public void write(byte[] b, int off, int len) throws IOException { _os.write(b, off, len); } public final void write(byte[] b) throws IOException { write(b, 0, b.length); } @Override public void close() throws IOException { _os.close(); } public static class ArchiveEntry { final String _name; final long _time; public ArchiveEntry(String name, Date date) { this(name, date.getTime()); } ArchiveEntry(String name, long time) { _name = name; _time = time; } @Override public String toString() { return _name + " (" + new Date(_time) + ")"; } } } class ZipLogArchiveWriter extends LogArchiveWriter { private final ZipOutputStream _zos; ZipLogArchiveWriter(OutputStream os) { this(new ZipOutputStream(os)); } private ZipLogArchiveWriter(ZipOutputStream zos) { super(zos); _zos = zos; } @Override public void putNextEntry(ArchiveEntry entry) throws IOException { ZipEntry ze = new ZipEntry(entry._name); ze.setTime(entry._time); _zos.putNextEntry(ze); } @Override public void closeEntry() throws IOException { _zos.closeEntry(); } } class ConcatenatedLogArchiveWriter extends LogArchiveWriter { ConcatenatedLogArchiveWriter(OutputStream baos) { super(baos); } @Override public void putNextEntry(ArchiveEntry entry) throws IOException { String entryStr = entry.toString(); String header = "\n" + entryStr + ":\n" + org.apache.commons.lang.StringUtils.repeat("=", entryStr.length() + 1) + "\n"; _os.write(StringUtils.toBytes(header)); } @Override public void closeEntry() { // noop } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/MRUtils.java
package water.util; import water.H2O; import water.Key; import water.MRTask; import water.fvec.*; import water.parser.BufferedString; import java.util.Arrays; import java.util.Random; import static water.util.RandomUtils.getRNG; public class MRUtils { /** * Sample rows from a frame. * Can be unlucky for small sampling fractions - will continue calling itself until at least 1 row is returned. * @param fr Input frame * @param rows Approximate number of rows to sample (across all chunks) * @param seed Seed for RNG * @return Sampled frame */ public static Frame sampleFrame(Frame fr, final long rows, final long seed) { return sampleFrame(fr, rows, null, seed); } /** * Row-wise shuffle of a frame (only shuffles rows inside of each chunk) * @param fr Input frame * @return Shuffled frame */ public static Frame shuffleFramePerChunk(Frame fr, final long seed) { return new MRTask() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { int[] idx = new int[cs[0]._len]; for (int r=0; r<idx.length; ++r) idx[r] = r; ArrayUtils.shuffleArray(idx, getRNG(seed)); for (long anIdx : idx) { for (int i = 0; i < ncs.length; i++) { if (cs[i] instanceof CStrChunk) { ncs[i].addStr(cs[i],cs[i].start()+anIdx); } else { ncs[i].addNum(cs[i].atd((int) anIdx)); } } } } }.doAll(fr.types(), fr).outputFrame(fr.names(), fr.domains()); } /** * Compute the class distribution from a class label vector * (not counting missing values) * * Usage 1: Label vector is categorical * ------------------------------------ * Vec label = ...; * assert(label.isCategorical()); * double[] dist = new ClassDist(label).doAll(label).dist(); * * Usage 2: Label vector is numerical * ---------------------------------- * Vec label = ...; * int num_classes = ...; * assert(label.isInt()); * double[] dist = new ClassDist(num_classes).doAll(label).dist(); * */ public static class ClassDist extends MRTask<ClassDist> { final int _nclass; protected double[] _ys; public ClassDist(final Vec label) { _nclass = label.domain().length; } public ClassDist(int n) { _nclass = n; } public final double[] dist() { return _ys; } public final double[] relDist() { final double sum = ArrayUtils.sum(_ys); // due to CV and weights there can be sum == 0 return sum == 0 ? _ys : ArrayUtils.div(Arrays.copyOf(_ys, _ys.length), sum); } @Override public void map(Chunk ys) { _ys = new double[_nclass]; for( int i=0; i<ys._len; i++ ) if (!ys.isNA(i)) _ys[(int) ys.at8(i)]++; } @Override public void map(Chunk ys, Chunk ws) { _ys = new double[_nclass]; for( int i=0; i<ys._len; i++ ) if (!ys.isNA(i)) _ys[(int) ys.at8(i)] += ws.atd(i); } @Override public void reduce( ClassDist that ) { ArrayUtils.add(_ys,that._ys); } } /** * Compute the class distribution for qusibinomial distribution from a class label vector * (not counting missing values) */ public static class ClassDistQuasibinomial extends MRTask<ClassDistQuasibinomial> { final int _nclass; private double[] _ys; private String[] _domain; private double _firstDoubleDomain; public ClassDistQuasibinomial(String[] domain) { _nclass = 2; _domain = domain; _firstDoubleDomain = Double.valueOf(domain[0]); } public final double[] dist() { return _ys; } public final double[] relDist() { final double sum = ArrayUtils.sum(_ys); // due to CV and weights there can be sum == 0 return sum == 0 ? _ys : ArrayUtils.div(Arrays.copyOf(_ys, _ys.length), sum); } public final String[] domains(){ return _domain; } @Override public void map(Chunk ys) { _ys = new double[_nclass]; for( int i=0; i<ys._len; i++ ) if (!ys.isNA(i)) { int index = ys.atd(i) == _firstDoubleDomain ? 0 : 1; _ys[index]++; } } @Override public void map(Chunk ys, Chunk ws) { _ys = new double[_nclass]; for( int i=0; i<ys._len; i++ ) if (!ys.isNA(i)) { int index = ys.atd(i) == _firstDoubleDomain? 0 : 1; _ys[index] += ws.atd(i); } } @Override public void reduce(ClassDistQuasibinomial that) { ArrayUtils.add(_ys,that._ys); } } public static class Dist extends MRTask<Dist> { private IcedHashMap<IcedDouble,IcedAtomicInt> _dist; @Override public void map(Chunk ys) { _dist = new IcedHashMap<>(); IcedDouble d = new IcedDouble(0); for( int row=0; row< ys._len; row++ ) if( !ys.isNA(row) ) { d._val = ys.atd(row); IcedAtomicInt oldV = _dist.get(d); if(oldV == null) oldV = _dist.putIfAbsent(new IcedDouble(d._val), new IcedAtomicInt(1)); if(oldV != null) oldV.incrementAndGet(); } } @Override public void reduce(Dist mrt) { if( _dist != mrt._dist ) { IcedHashMap<IcedDouble,IcedAtomicInt> l = _dist; IcedHashMap<IcedDouble,IcedAtomicInt> r = mrt._dist; if( l.size() < r.size() ) { l=r; r=_dist; } for( IcedDouble v: r.keySet() ) { IcedAtomicInt oldVal = l.putIfAbsent(v, r.get(v)); if( oldVal!=null ) oldVal.addAndGet(r.get(v).get()); } _dist=l; mrt._dist=null; } } public double[] dist() { int i=0; double[] dist = new double[_dist.size()]; for( IcedAtomicInt v: _dist.values() ) dist[i++] = v.get(); return dist; } public double[] keys() { int i=0; double[] keys = new double[_dist.size()]; for( IcedDouble k: _dist.keySet() ) keys[i++] = k._val; return keys; } } /** * Stratified sampling for classifiers - FIXME: For weights, this is not accurate, as the sampling is done with uniform weights * @param fr Input frame * @param label Label vector (must be categorical) * @param weights Weights vector, can be null * @param sampling_ratios Optional: array containing the requested sampling ratios per class (in order of domains), will be overwritten if it contains all 0s * @param maxrows Maximum number of rows in the returned frame * @param seed RNG seed for sampling * @param allowOversampling Allow oversampling of minority classes * @param verbose Whether to print verbose info * @return Sampled frame, with approximately the same number of samples from each class (or given by the requested sampling ratios) */ public static Frame sampleFrameStratified(final Frame fr, Vec label, Vec weights, float[] sampling_ratios, long maxrows, final long seed, final boolean allowOversampling, final boolean verbose) { return sampleFrameStratified(fr, label, weights, sampling_ratios, maxrows, seed, allowOversampling, verbose, null); } /** * Stratified sampling for classifiers - FIXME: For weights, this is not accurate, as the sampling is done with uniform weights * @param fr Input frame * @param label Label vector (must be categorical) * @param weights Weights vector, can be null * @param sampling_ratios Optional: array containing the requested sampling ratios per class (in order of domains), will be overwritten if it contains all 0s * @param maxrows Maximum number of rows in the returned frame * @param seed RNG seed for sampling * @param allowOversampling Allow oversampling of minority classes * @param verbose Whether to print verbose info * @param quasibinomialDomain quasibinomial domain * @return Sampled frame, with approximately the same number of samples from each class (or given by the requested sampling ratios) */ public static Frame sampleFrameStratified(final Frame fr, Vec label, Vec weights, float[] sampling_ratios, long maxrows, final long seed, final boolean allowOversampling, final boolean verbose, String[] quasibinomialDomain) { if (fr == null) return null; assert(label.isCategorical()); if (maxrows < label.domain().length) { Log.warn("Attempting to do stratified sampling to fewer samples than there are class labels - automatically increasing to #rows == #labels (" + label.domain().length + ")."); maxrows = label.domain().length; } double[] dist; if(quasibinomialDomain != null){ dist = weights != null ? new ClassDistQuasibinomial(quasibinomialDomain).doAll(label, weights).dist() : new ClassDistQuasibinomial(quasibinomialDomain).doAll(label).dist(); } else { dist = weights != null ? new ClassDist(label).doAll(label, weights).dist() : new ClassDist(label).doAll(label).dist(); } assert(dist.length > 0); Log.info("Doing stratified sampling for data set containing " + fr.numRows() + " rows from " + dist.length + " classes. Oversampling: " + (allowOversampling ? "on" : "off")); if (verbose) for (int i=0; i<dist.length;++i) Log.info("Class " + label.factor(i) + ": count: " + dist[i] + " prior: " + (float)dist[i]/fr.numRows()); // create sampling_ratios for class balance with max. maxrows rows (fill // existing array if not null). Make a defensive copy. sampling_ratios = sampling_ratios == null ? new float[dist.length] : sampling_ratios.clone(); assert sampling_ratios.length == dist.length; if( ArrayUtils.minValue(sampling_ratios) == 0 && ArrayUtils.maxValue(sampling_ratios) == 0 ) { // compute sampling ratios to achieve class balance for (int i=0; i<dist.length;++i) sampling_ratios[i] = ((float)fr.numRows() / label.domain().length) / (float)dist[i]; // prior^-1 / num_classes final float inv_scale = ArrayUtils.minValue(sampling_ratios); //majority class has lowest required oversampling factor to achieve balance if (!Float.isNaN(inv_scale) && !Float.isInfinite(inv_scale)) ArrayUtils.div(sampling_ratios, inv_scale); //want sampling_ratio 1.0 for majority class (no downsampling) } if (!allowOversampling) for (int i=0; i<sampling_ratios.length; ++i) sampling_ratios[i] = Math.min(1.0f, sampling_ratios[i]); // given these sampling ratios, and the original class distribution, this is the expected number of resulting rows float numrows = 0; for (int i=0; i<sampling_ratios.length; ++i) { numrows += sampling_ratios[i] * dist[i]; } if (Float.isNaN(numrows)) { Log.err("Total number of sampled rows was NaN. " + "Sampling ratios: " + Arrays.toString(sampling_ratios) + "; Dist: " + Arrays.toString(dist)); throw new IllegalArgumentException("Error during sampling - too few points?"); } final long actualnumrows = Math.min(maxrows, Math.round(numrows)); //cap #rows at maxrows assert(actualnumrows >= 0); //can have no matching rows in case of sparse data where we had to fill in a makeZero() vector Log.info("Stratified sampling to a total of " + String.format("%,d", actualnumrows) + " rows" + (actualnumrows < numrows ? " (limited by max_after_balance_size).":".")); if (actualnumrows != numrows) { ArrayUtils.mult(sampling_ratios, (float)actualnumrows/numrows); //adjust the sampling_ratios by the global rescaling factor if (verbose) Log.info("Downsampling majority class by " + (float)actualnumrows/numrows + " to limit number of rows to " + String.format("%,d", maxrows)); } for (int i=0;i<label.domain().length;++i) { Log.info("Class '" + label.domain()[i] + "' sampling ratio: " + sampling_ratios[i]); } return sampleFrameStratified(fr, label, weights, sampling_ratios, seed, verbose, quasibinomialDomain); } /** * Stratified sampling * @param fr Input frame * @param label Label vector (from the input frame) * @param weights Weight vector (from the input frame), can be null * @param sampling_ratios Given sampling ratios for each class, in order of domains * @param seed RNG seed * @param debug Whether to print debug info * @param quasibinomialDomain quasibinomial domain * @return Stratified frame */ public static Frame sampleFrameStratified(final Frame fr, Vec label, Vec weights, final float[] sampling_ratios, final long seed, final boolean debug, String[] quasibinomialDomain) { return sampleFrameStratified(fr, label, weights, sampling_ratios, seed, debug, 0, quasibinomialDomain); } // internal version with repeat counter // currently hardcoded to do up to 10 tries to get a row from each class, which can be impossible for certain wrong sampling ratios private static Frame sampleFrameStratified(final Frame fr, Vec label, Vec weights, final float[] sampling_ratios, final long seed, final boolean debug, int count, String[] quasibinomialDomain) { if (fr == null) return null; assert(label.isCategorical()); assert(sampling_ratios != null && sampling_ratios.length == label.domain().length); final int labelidx = fr.find(label); //which column is the label? assert(labelidx >= 0); final int weightsidx = fr.find(weights); //which column is the weight? final boolean poisson = false; //beta feature //FIXME - this is doing uniform sampling, even if the weights are given Frame r = new MRTask() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { final Random rng = getRNG(seed); for (int r = 0; r < cs[0]._len; r++) { if (cs[labelidx].isNA(r)) continue; //skip missing labels rng.setSeed(cs[0].start()+r+seed); final int label = (int)cs[labelidx].at8(r); assert(sampling_ratios.length > label && label >= 0); int sampling_reps; if (poisson) { throw H2O.unimpl(); // sampling_reps = ArrayUtils.getPoisson(sampling_ratios[label], rng); } else { final float remainder = sampling_ratios[label] - (int)sampling_ratios[label]; sampling_reps = (int)sampling_ratios[label] + (rng.nextFloat() < remainder ? 1 : 0); } for (int i = 0; i < ncs.length; i++) { if (cs[i] instanceof CStrChunk) { for (int j = 0; j < sampling_reps; ++j) { ncs[i].addStr(cs[i],cs[0].start()+r); } } else { for (int j = 0; j < sampling_reps; ++j) { ncs[i].addNum(cs[i].atd(r)); } } } } } }.doAll(fr.types(), fr).outputFrame(fr.names(), fr.domains()); // Confirm the validity of the distribution Vec lab = r.vecs()[labelidx]; Vec wei = weightsidx != -1 ? r.vecs()[weightsidx] : null; double[] dist; if(quasibinomialDomain != null){ dist = wei != null ? new ClassDistQuasibinomial(quasibinomialDomain).doAll(lab, wei).dist() : new ClassDistQuasibinomial(quasibinomialDomain).doAll(lab).dist(); } else { dist = wei != null ? new ClassDist(lab).doAll(lab, wei).dist() : new ClassDist(lab).doAll(lab).dist(); } // if there are no training labels in the test set, then there is no point in sampling the test set if (dist == null) return fr; if (debug) { double sumdist = ArrayUtils.sum(dist); Log.info("After stratified sampling: " + sumdist + " rows."); for (int i=0; i<dist.length;++i) { Log.info("Class " + r.vecs()[labelidx].factor(i) + ": count: " + dist[i] + " sampling ratio: " + sampling_ratios[i] + " actual relative frequency: " + (float)dist[i] / sumdist * dist.length); } } // Re-try if we didn't get at least one example from each class if (ArrayUtils.minValue(dist) == 0 && count < 10) { Log.info("Re-doing stratified sampling because not all classes were represented (unlucky draw)."); r.remove(); return sampleFrameStratified(fr, label, weights, sampling_ratios, seed+1, debug, ++count, quasibinomialDomain); } // shuffle intra-chunk Frame shuffled = shuffleFramePerChunk(r, seed+0x580FF13); r.remove(); return shuffled; } /** * Sample rows from a frame with weight column. * Weights are used in the following manner: a row that has n-times higher weight should be n-times more likely to be picked. * Can be unlucky for small sampling fractions - will continue calling itself until at least 1 row is returned. * @param fr Input frame * @param rows Approximate number of rows to sample (across all chunks) * @param weightColumn Weight column name * @param seed Seed for RNG * @return Sampled frame */ public static Frame sampleFrame(Frame fr, final long rows, final String weightColumn, final long seed) { if (fr == null) return null; final int weightIdx = fr.find(weightColumn); final double fractionOfWeights; if (weightIdx < 0) { fractionOfWeights = rows > 0 ? (double)rows / fr.numRows(): 1.f; } else { final double meanWeight = fr.vec(weightIdx).mean(); fractionOfWeights = rows > 0 ? (double)rows / (fr.numRows() * meanWeight): 1.f; } if (fractionOfWeights >= 1.f) return fr; Key newKey = fr._key != null ? Key.make(fr._key.toString() + (fr._key.toString().contains("temporary") ? ".sample." : ".temporary.sample.") + PrettyPrint.formatPct(fractionOfWeights).replace(" ","")) : null; Frame r = new MRTask() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { final Random rng = getRNG(0); final BufferedString bStr = new BufferedString(); int count = 0; for (int r = 0; r < cs[0]._len; r++) { rng.setSeed(seed+r+cs[0].start()); final double threshold = weightIdx < 0 ? fractionOfWeights : (fractionOfWeights * cs[weightIdx].atd(r)); // A row with n-times higher weight should be n-times more likely to be picked if (rng.nextFloat() < threshold || (count == 0 && r == cs[0]._len-1) ) { count++; for (int i = 0; i < ncs.length; i++) { if (cs[i].isNA(r)) ncs[i].addNA(); else if (cs[i] instanceof CStrChunk) ncs[i].addStr(cs[i].atStr(bStr,r)); else if (cs[i] instanceof C16Chunk) ncs[i].addUUID(cs[i].at16l(r),cs[i].at16h(r)); else ncs[i].addNum(cs[i].atd(r)); } } } } }.doAll(fr.types(), fr).outputFrame(newKey, fr.names(), fr.domains()); if (r.numRows() == 0) { Log.warn("You asked for " + rows + " rows (out of " + fr.numRows() + "), but you got none (seed=" + seed + ")."); Log.warn("Let's try again. You've gotta ask yourself a question: \"Do I feel lucky?\""); return sampleFrame(fr, rows, seed+1); } return r; } /** * Sample small number of rows from a frame. Doesn't support weights. * * Meaning of "small" is relative, it shouldn't be more that 10k of rows. * * @param fr Input frame * @param rows Exact number of rows to sample * @param seed Seed for RNG * @return Sampled frame, guaranteed to have exactly specified #rows (as long as the frame is large enough) */ public static Frame sampleFrameSmall(Frame fr, final int rows, final long seed) { return sampleFrameSmall(fr, rows, getRNG(seed)); } /** * Sample small number of rows from a frame. Doesn't support weights. * * Meaning of "small" is relative, it shouldn't be more that 10k of rows. * * @param fr Input frame * @param rows Exact number of rows to sample * @param rand Random Generator * @return Sampled frame, guaranteed to have exactly specified #rows (as long as the frame is large enough) */ public static Frame sampleFrameSmall(Frame fr, final int rows, final Random rand) { if (rows >= fr.numRows()) return fr; return fr.deepSlice(ArrayUtils.distinctLongs(rows, fr.numRows(), rand), null); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/MarkdownBuilder.java
package water.util; /** * Small helper class for creating Markdown in a StringBuffer. */ public class MarkdownBuilder { private StringBuffer sb; public MarkdownBuilder() { sb = new StringBuffer(); } public StringBuffer append(StringBuffer s) { sb.append(s); return sb; } public StringBuffer append(String s) { sb.append(s); return sb; } public StringBuffer paragraph(String paragraph) { sb.append(paragraph).append("\n\n"); return sb; } public StringBuffer hline() { sb.append("---\n"); return sb; } private StringBuffer append(String separator, boolean addNewline, String... strings) { int i = 0; for (String string : strings) { if (i++ > 0) sb.append(separator); sb.append(string); } if (addNewline) sb.append("\n"); return sb; } public StringBuffer comment(String... comment) { sb.append("[//]: # ("); this.append(" ", false, comment); sb.append(")\n"); return sb; } public StringBuffer heading1(String... heading) { sb.append("# "); this.append(" ", true, heading); return sb; } public StringBuffer heading2(String... heading) { sb.append("## "); this.append(" ", true, heading); return sb; } public StringBuffer tableRow(String... cols) { this.append(" | ", true, cols); return sb; } public StringBuffer tableHeader(String... cols) { tableRow(cols); int i = 0; for (String col : cols) { if (i++ > 0) sb.append(" | "); sb.append("---"); } sb.append("\n"); return sb; } public StringBuffer stringBuffer() { return sb; } @Override public String toString() { return sb.toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/MathUtils.java
package water.util; import hex.quantile.Quantile; import hex.quantile.QuantileModel; import org.jtransforms.dct.DoubleDCT_1D; import org.jtransforms.dct.DoubleDCT_2D; import org.jtransforms.dct.DoubleDCT_3D; import pl.edu.icm.jlargearrays.ConcurrencyUtils; import water.*; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import java.math.BigInteger; import java.util.Arrays; public class MathUtils { /** * Euler–Mascheroni constant (also called Euler's constant) */ public static final double EULER_MASCHERONI_CONSTANT = 0.5772156649; public static double weightedSigma(long nobs, double wsum, double xSum, double xxSum) { double reg = 1.0/wsum; return nobs <= 1? 0 : Math.sqrt(xxSum*reg - (xSum*xSum) * reg * reg); } public static double logFactorial(long y) { if(y <= 100) { double l = 0; for (long i = 2; i <= y; ++i) l += Math.log(i); return l; } return y * Math.log(y) - y + .5*Math.log(2*Math.PI*y); } public static int combinatorial(int num, int d) { if (num < 0 || d < 0) throw new H2OIllegalArgumentException("argument to combinatorial must be >= 0!"); int denom1 = num-d; int maxDenom = Math.max(d, denom1); int minDenom = Math.min(d, denom1); int prodNum = 1; for (int index = maxDenom+1; index <= num; index++) prodNum *= index; int prodDenom = 1; for (int index = 1; index <= minDenom; index++) prodDenom *= index; return (prodNum/prodDenom); } static public double computeWeightedQuantile(Vec weight, Vec values, double alpha) { QuantileModel.QuantileParameters parms = new QuantileModel.QuantileParameters(); Frame tempFrame = weight == null ? new Frame(Key.<Frame>make(), new String[]{"y"}, new Vec[]{values}) : new Frame(Key.<Frame>make(), new String[]{"y","w"}, new Vec[]{values, weight}); DKV.put(tempFrame); parms._train = tempFrame._key; parms._probs = new double[]{alpha}; parms._weights_column = weight == null ? null : "w"; Job<QuantileModel> job = new Quantile(parms).trainModel(); QuantileModel kmm = job.get(); double value = kmm._output._quantiles[0/*col*/][0/*quantile*/]; assert(!Double.isNaN(value)); Log.debug("weighted " + alpha + "-quantile: " + value); job.remove(); kmm.remove(); DKV.remove(tempFrame._key); return value; } static public class ComputeAbsDiff extends MRTask<ComputeAbsDiff> { @Override public void map(Chunk chks[], NewChunk nc[]) { for (int i=0; i<chks[0].len(); ++i) nc[0].addNum(Math.abs(chks[0].atd(i) - chks[1].atd(i))); } } /** * Wrapper around weighted paralell basic stats computation (mean, variance) */ public static final class BasicStats extends Iced { private final double[] _mean; private final double[] _m2; double[] _wsums; transient double[] _nawsums; long [] _naCnt; double[] _var; double[] _sd; public double _wsum = Double.NaN; public long[] _nzCnt; long _nobs = -1; public BasicStats(int n) { _mean = MemoryManager.malloc8d(n); _m2 = MemoryManager.malloc8d(n); _wsums = MemoryManager.malloc8d(n); _nzCnt = MemoryManager.malloc8(n); _nawsums = MemoryManager.malloc8d(n); _naCnt = MemoryManager.malloc8(n); } public void add(double x, double w, int i) { if(Double.isNaN(x)) { _nawsums[i] += w; _naCnt[i]++; } else if (w != 0) { double wsum = _wsums[i] + w; double delta = x - _mean[i]; double R = delta * w / wsum; _mean[i] += R; _m2[i] += _wsums[i] * delta * R; _wsums[i] = wsum; ++_nzCnt[i]; } } public void add(double[] x, double w) { for (int i = 0; i < x.length; ++i) add(x[i], w, i); } public void setNobs(long nobs, double wsum) { _nobs = nobs; _wsum = wsum; } public void fillSparseZeros(int i) { int zeros = (int)(_nobs - _nzCnt[i]); if(zeros > 0) { double muReg = 1.0 / (_wsum - _nawsums[i]); double zeromean = 0; double delta = _mean[i] - zeromean; double zerowsum = _wsum - _wsums[i] - _nawsums[i]; _mean[i] *= _wsums[i] * muReg; _m2[i] += delta * delta * _wsums[i] * zerowsum * muReg; //this is the variance*(N-1), will do sqrt(_sigma/(N-1)) later in postGlobal _wsums[i] += zerowsum; } } public void fillSparseNAs(int i) {_naCnt[i] = (int)(_nobs - _nzCnt[i]);} public void reduce(BasicStats bs) { ArrayUtils.add(_nzCnt, bs._nzCnt); ArrayUtils.add(_naCnt, bs._naCnt); for (int i = 0; i < _mean.length; ++i) { double wsum = _wsums[i] + bs._wsums[i]; if(wsum != 0) { double delta = bs._mean[i] - _mean[i]; _mean[i] = (_wsums[i] * _mean[i] + bs._wsums[i] * bs._mean[i]) / wsum; _m2[i] += bs._m2[i] + delta * delta * _wsums[i] * bs._wsums[i] / wsum; } _wsums[i] = wsum; } _nobs += bs._nobs; _wsum += bs._wsum; } private double[] variance(double[] res) { for (int i = 0; i < res.length; ++i) { long nobs = _nobs - _naCnt[i]; res[i] = nobs == 0?0:(nobs / (nobs - 1.0)) * _m2[i] / _wsums[i]; } return res; } public double variance(int i){return variance()[i];} public double[] variance() { // if(sparse()) throw new UnsupportedOperationException("Can not do single pass sparse variance computation"); if (_var != null) return _var; return _var = variance(MemoryManager.malloc8d(_mean.length)); } public double sigma(int i){return sigma()[i];} public double[] sigma() { if(_sd != null) return _sd; double[] res = variance().clone(); for (int i = 0; i < res.length; ++i) res[i] = Math.sqrt(res[i]); return _sd = res; } public double[] mean() {return _mean;} public double mean(int i) {return _mean[i];} public long nobs() {return _nobs;} public boolean isSparse(int col) {return _nzCnt[col] < _nobs;} } public static final class SimpleStats extends Iced { double[] _wsums; double[] _xsumsqs; double[] _xsums; double[] _var; double[] _sd; double[] _mean; public SimpleStats(int n) { _xsumsqs = MemoryManager.malloc8d(n); _xsums = MemoryManager.malloc8d(n); _wsums = MemoryManager.malloc8d(n); } public void add(double x, double w, int i) { if(!Double.isNaN(x) && w!=0) { _xsums[i] += x*w; _xsumsqs[i] += x*x*w; _wsums[i] = _wsums[i] + w; } } public void add(double[] x, double w) { for (int i = 0; i < x.length; ++i) add(x[i], w, i); } private double[] variance(double[] res) { if (_mean == null) mean(); // calculate mean if it is not done already for (int i = 0; i < res.length; ++i) { double v1 = _wsums[i]; double oneOv1M1 = 1.0/(v1-1); res[i] = (v1 == 0 || v1==1)?0:(_xsumsqs[i]-v1*_mean[i]*_mean[i])*oneOv1M1; } return _var=res; } public double[] mean() { if (_mean!=null) return _mean; int len = _xsums.length; _mean = MemoryManager.malloc8d(len); for (int index=0; index<len; index++) { _mean[index] = _xsums[index]/_wsums[index]; } return _mean; } public double variance(int i){return variance()[i];} public double[] variance() { if (_var != null) return _var; return _var = variance(MemoryManager.malloc8d(_mean.length)); } public double sigma(int i){return sigma()[i];} public double[] sigma() { if(_sd != null) return _sd; double[] res = variance().clone(); for (int i = 0; i < res.length; ++i) res[i] = Math.sqrt(res[i]); return _sd = res; } } /** Fast approximate sqrt * @return sqrt(x) with up to 5% relative error */ public static double approxSqrt(double x) { return Double.longBitsToDouble(((Double.doubleToLongBits(x) >> 32) + 1072632448) << 31); } public static BigInteger convertDouble2BigInteger(double x) { long tempValue = Double.doubleToRawLongBits(x); return ((tempValue>>63)==0)?BigInteger.valueOf(tempValue).setBit(63):BigInteger.valueOf(tempValue^(-1)); } /** Fast approximate sqrt * @return sqrt(x) with up to 5% relative error */ public static float approxSqrt(float x) { return Float.intBitsToFloat(532483686 + (Float.floatToRawIntBits(x) >> 1)); } /** Fast approximate 1./sqrt * @return 1./sqrt(x) with up to 2% relative error */ public static double approxInvSqrt(double x) { double xhalf = 0.5d*x; x = Double.longBitsToDouble(0x5fe6ec85e7de30daL - (Double.doubleToLongBits(x)>>1)); return x*(1.5d - xhalf*x*x); } /** Fast approximate 1./sqrt * @return 1./sqrt(x) with up to 2% relative error */ public static float approxInvSqrt(float x) { float xhalf = 0.5f*x; x = Float.intBitsToFloat(0x5f3759df - (Float.floatToIntBits(x)>>1)); return x*(1.5f - xhalf*x*x); } /** Fast approximate exp * @return exp(x) with up to 5% relative error */ public static double approxExp(double x) { return Double.longBitsToDouble(((long)(1512775 * x + 1072632447)) << 32); } /** Fast approximate log for values greater than 1, otherwise exact * @return log(x) with up to 0.1% relative error */ public static double approxLog(double x){ if (x > 1) return ((Double.doubleToLongBits(x) >> 32) - 1072632447d) / 1512775d; else return Math.log(x); } /** Fast calculation of log base 2 for integers. * @return log base 2 of n */ public static int log2(int n) { if (n <= 0) throw new IllegalArgumentException(); return 31 - Integer.numberOfLeadingZeros(n); } public static int log2(long n) { return 63 - Long.numberOfLeadingZeros(n); } private static final double LOG2 = Math.log(2); public static double log2(double x) { return Math.log(x) / LOG2; } public static float[] div(float[] nums, float n) { assert !Float.isInfinite(n) : "Trying to divide " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want for (int i=0; i<nums.length; i++) nums[i] /= n; return nums; } public static double[] div(double[] nums, double n) { assert !Double.isInfinite(n) : "Trying to divide " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want for (int i=0; i<nums.length; i++) nums[i] /= n; return nums; } public static float sum(final float[] from) { float result = 0; for (float d: from) result += d; return result; } public static double sum(final double[] from) { double result = 0; for (double d: from) result += d; return result; } public static float sumSquares(final float[] a) { return sumSquares(a, 0, a.length); } /** * Approximate sumSquares * @param a Array with numbers * @param from starting index (inclusive) * @param to ending index (exclusive) * @return approximate sum of squares based on a sample somewhere in the middle of the array (pos determined by bits of a[0]) */ public static float approxSumSquares(final float[] a, int from, int to) { final int len = to-from; final int samples = Math.max(len / 16, 1); final int offset = from + Math.abs(Float.floatToIntBits(a[0])) % (len-samples); assert(offset+samples <= to); return sumSquares(a, offset, offset + samples) * (float)len / (float)samples; } public static float sumSquares(final float[] a, int from, int to) { assert(from >= 0 && to <= a.length); float result = 0; final int cols = to-from; final int extra=cols-cols%8; final int multiple = (cols/8)*8-1; float psum1 = 0, psum2 = 0, psum3 = 0, psum4 = 0; float psum5 = 0, psum6 = 0, psum7 = 0, psum8 = 0; for (int c = from; c < from + multiple; c += 8) { psum1 += a[c ]*a[c ]; psum2 += a[c+1]*a[c+1]; psum3 += a[c+2]*a[c+2]; psum4 += a[c+3]*a[c+3]; psum5 += a[c+4]*a[c+4]; psum6 += a[c+5]*a[c+5]; psum7 += a[c+6]*a[c+6]; psum8 += a[c+7]*a[c+7]; } result += psum1 + psum2 + psum3 + psum4; result += psum5 + psum6 + psum7 + psum8; for (int c = from + extra; c < to; ++c) { result += a[c]*a[c]; } return result; } /** * Compare two numbers to see if they are within one ulp of the smaller decade. * Order of the arguments does not matter. * * @param a First number * @param b Second number * @return true if a and b are essentially equal, false otherwise. */ public static boolean equalsWithinOneSmallUlp(float a, float b) { if (Double.isNaN(a) && Double.isNaN(b)) return true; float ulp_a = Math.ulp(a); float ulp_b = Math.ulp(b); float small_ulp = Math.min(ulp_a, ulp_b); float absdiff_a_b = Math.abs(a - b); // subtraction order does not matter, due to IEEE 754 spec return absdiff_a_b <= small_ulp; } public static boolean equalsWithinOneSmallUlp(double a, double b) { if (Double.isNaN(a) && Double.isNaN(b)) return true; double ulp_a = Math.ulp(a); double ulp_b = Math.ulp(b); double small_ulp = Math.min(ulp_a, ulp_b); double absdiff_a_b = Math.abs(a - b); // subtraction order does not matter, due to IEEE 754 spec return absdiff_a_b <= small_ulp; } // Section 4.2: Error bound on recursive sum from Higham, Accuracy and Stability of Numerical Algorithms, 2nd Ed // |E_n| <= (n-1) * u * \sum_i^n |x_i| + P(u^2) public static boolean equalsWithinRecSumErr(double actual, double expected, int n, double absum) { return Math.abs(actual - expected) <= (n-1) * Math.ulp(actual) * absum; } /** * Compare 2 doubles within a tolerance. True iff the numbers difference within a absolute tolerance or * within an relative tolerance. * * @param a double * @param b double * @param absoluteTolerance - Absolute allowed tolerance * @param relativeTolerance - Relative allowed tolerance * @return true if equal within tolerances */ public static boolean compare(double a, double b, double absoluteTolerance, double relativeTolerance) { assert absoluteTolerance >= 0; assert relativeTolerance >= 0; final boolean equal = Double.compare(a, b) == 0; if (equal) { return true; } final double absoluteError = Math.abs(a - b); if (absoluteError <= absoluteTolerance) { return true; } final double relativeError = Math.abs(absoluteError / Math.max(Math.abs(a), Math.abs(b))); return relativeError < relativeTolerance; } // some common Vec ops public static double innerProduct(double [] x, double [] y){ double result = 0; for (int i = 0; i < x.length; i++) result += x[i] * y[i]; return result; } public static double l2norm2(double [] x){ double sum = 0; for(double d:x) sum += d*d; return sum; } public static double l1norm(double [] x){ double sum = 0; for(double d:x) sum += d >= 0?d:-d; return sum; } public static double l2norm(double [] x){ return Math.sqrt(l2norm2(x)); } public static double [] wadd(double [] x, double [] y, double w){ for(int i = 0; i < x.length; ++i) x[i] += w*y[i]; return x; } // Random 1000 larger primes public static final long[] PRIMES = { 709887397L, 98016697L, 85080053L, 56490571L, 385003067, 57525611L, 191172517L, 707389223L, 38269029L, 971065009L, 969012193L, 932573549L, 88277861L, 557977913L, 186530489L, 971846399L, 93684557L, 568491823L, 374500471L, 260955337L, 98748991L, 571124921L, 268388903L, 931975097L, 80137923L, 378339371L, 191476231L, 982164353L, 96991951L, 193488247L, 186331151L, 186059399L, 99717967L, 714703333L, 195765091L, 934873301L, 33844087L, 392819423L, 709242049L, 975098351L, 15814261L, 846357791L, 973645069L, 968987629L, 27247177L, 939785537L, 714611087L, 846883019L, 98514157L, 851126069L, 180055321L, 378662957L, 97312573L, 553353439L, 268057183L, 554327167L, 24890223L, 180650339L, 964569689L, 565633303L, 52962097L, 931225723L, 556700413L, 570525509L, 99233241L, 270892441L, 185716603L, 928527371L, 21286513L, 561435671L, 561547303L, 696202733L, 53624617L, 930346357L, 567779323L, 973736227L, 91898247L, 560750693L, 187256227L, 373704811L, 35668549L, 191257589L, 934128313L, 698681153L, 81768851L, 378742241L, 971211347L, 848250443L, 57148391L, 844575103L, 976095787L, 193706609L, 12680637L, 929060857L, 973363793L, 979803301L, 59840627L, 923478557L, 262430459L, 970229543L, 77980417L, 924763579L, 703130651L, 263613989L, 88115473L, 695202203L, 378625519L, 850417619L, 37875123L, 696088793L, 553766351L, 381382453L, 90515451L, 570302171L, 962465983L, 923407679L, 19931057L, 856231703L, 941060833L, 971397239L, 10339277L, 379853059L, 845156227L, 187980707L, 87821407L, 938344853L, 380122333L, 270054377L, 83320839L, 261180221L, 192697819L, 839701211L, 12564821L, 556717591L, 848036339L, 374151047L, 97257047L, 936281293L, 188681027L, 195149543L, 87704907L, 927976717L, 844819139L, 273676181L, 39585799L, 706129079L, 384034087L, 933489013L, 59297633L, 268994839L, 981927539L, 195840863L, 67345573L, 967452049L, 560096107L, 381740743L, 30924129L, 924804943L, 856120231L, 378647363L, 80385621L, 697508593L, 274289269L, 193688753L, 73891551L, 271848133L, 932057111L, 257551951L, 91279349L, 938126183L, 555432523L, 981016831L, 30805159L, 196382603L, 706893793L, 933713923L, 24244231L, 378590591L, 710972333L, 269517089L, 16916897L, 562526791L, 183312523L, 189463201L, 38989417L, 391893721L, 972826333L, 386610647L, 64896971L, 926400467L, 932555329L, 850558381L, 89064649L, 714662899L, 384851339L, 265636697L, 91508059L, 275418673L, 559709609L, 922161403L, 10531101L, 857303261L, 853919329L, 558603317L, 55745273L, 856595459L, 923077957L, 841009783L, 16850687L, 708322837L, 184264963L, 696558959L, 93682079L, 375977179L, 974002649L, 849803629L, 97926061L, 968610047L, 844793123L, 384591617L, 55237313L, 935336407L, 559316999L, 554674333L, 14130253L, 846839069L, 931726963L, 696160733L, 75174581L, 557994317L, 838168543L, 966852493L, 77072929L, 970159979L, 964704397L, 189568151L, 86268653L, 855284593L, 850048289L, 191313583L, 93713647L, 191142043L, 388880231L, 553249517L, 30195511L, 387150937L, 849836231L, 970592537L, 28652147L, 268424399L, 558866377L, 186814247L, 39044643L, 976912063L, 845625881L, 711967423L, 50662731L, 386395531L, 188849761L, 711490979L, 15549633L, 979839541L, 559484329L, 563433161L, 59397379L, 920856857L, 192399139L, 187354667L, 55056687L, 196880249L, 558354787L, 967650823L, 94294149L, 389784139L, 180486277L, 565918721L, 20466667L, 268413349L, 267469649L, 936151193L, 72346123L, 979276561L, 695068741L, 699857383L, 54711473L, 182608813L, 183270007L, 702031919L, 97944489L, 387586607L, 381249059L, 376605809L, 77319227L, 556347787L, 701093269L, 192346391L, 90335227L, 256723087L, 962532569L, 266508769L, 17739193L, 937662653L, 847160927L, 555998467L, 88295583L, 857415067L, 261917263L, 385579793L, 51141643L, 373631119L, 705996133L, 973170461L, 55331307L, 967455763L, 938587709L, 706688057L, 21297597L, 922065379L, 185517257L, 187628431L, 96410283L, 563376631L, 570763741L, 936993961L, 52224149L, 979458331L, 392576593L, 700887227L, 68821447L, 979730771L, 980082293L, 273639451L, 50288347L, 378934783L, 571910639L, 557914661L, 96941061L, 260494543L, 711310849L, 192637969L, 22890911L, 963887479L, 554730437L, 922265609L, 78772921L, 696207877L, 570249107L, 393007129L, 86456451L, 385480783L, 926825371L, 267285527L, 22092111L, 713561533L, 393315437L, 856347343L, 93146269L, 855525691L, 939838357L, 708335053L, 93532607L, 714598517L, 853725269L, 844167949L, 21977701L, 270958973L, 192136349L, 375609701L, 19897797L, 966888187L, 932260729L, 383532827L, 25237737L, 272543773L, 392590733L, 853665451L, 21725587L, 700887881L, 194074883L, 981838607L, 80417439L, 704312201L, 553750697L, 980933669L, 74528743L, 179675627L, 383340833L, 709235897L, 90741063L, 192309673L, 571935391L, 194902511L, 94110553L, 924261131L, 191984729L, 269236567L, 58470623L, 182656571L, 849099131L, 569471723L, 11961733L, 851046631L, 262712029L, 193922059L, 51451747L, 854728031L, 264981697L, 842532959L, 11163561L, 967373513L, 857689213L, 971242631L, 91159577L, 376996001L, 561336649L, 709380197L, 53406409L, 963273559L, 273184829L, 559905089L, 80983593L, 570001207L, 181289533L, 846881023L, 28890767L, 845688421L, 555569233L, 189620681L, 78793177L, 854935111L, 572712211L, 965532551L, 37847349L, 262570873L, 963609191L, 926753309L, 58346681L, 189095527L, 842218019L, 265500401L, 58861247L, 389674489L, 390095639L, 841892383L, 85054659L, 191505641L, 712111369L, 841407407L, 91256717L, 930216869L, 196419757L, 714269687L, 27174241L, 572612297L, 191433857L, 180735229L, 55107853L, 183312203L, 981881179L, 185146877L, 82402047L, 187382323L, 274363207L, 191076499L, 57751437L, 187785713L, 924689923L, 393190717L, 71161873L, 197227729L, 180143683L, 381192601L, 15005641L, 376847017L, 567605161L, 838240673L, 80153253L, 965992537L, 857310253L, 261754247L, 36064557L, 267898751L, 967090921L, 937570097L, 12337347L, 712318247L, 978577751L, 568905091L, 94257099L, 842182967L, 374004977L, 381257309L, 96791961L, 921781121L, 557889977L, 192185387L, 93247459L, 193216277L, 700322947L, 970295303L, 13157043L, 377418233L, 938901113L, 380496409L, 27278997L, 980067787L, 921546019L, 182505511L, 80115941L, 934837181L, 926914847L, 259623571L, 28102691L, 562673513L, 967105907L, 926710639L, 94210853L, 920748757L, 391684499L, 387247697L, 57752203L, 839753723L, 566374183L, 569364071L, 91244107L, 701970299L, 183147761L, 192938983L, 57579247L, 387206317L, 938222833L, 270174413L, 80376961L, 923378317L, 383078257L, 191690461L, 96389807L, 267712741L, 850101353L, 970424239L, 34699577L, 707392033L, 846517769L, 572099873L, 80426597L, 980129011L, 846324977L, 571031159L, 93248107L, 567629729L, 192701459L, 375630173L, 97379631L, 558891877L, 385348591L, 708982787L, 99143939L, 181841897L, 192597829L, 854675441L, 71312189L, 383257489L, 382600903L, 714164239L, 14287911L, 555130057L, 970321717L, 570861703L, 25868783L, 559474921L, 269746163L, 934658899L, 11042893L, 188907143L, 933254173L, 275577487L, 22606051L, 570314989L, 706436851L, 382812809L, 20093987L, 383146817L, 258516589L, 180236977L, 70049377L, 929492677L, 704664187L, 185934289L, 58575211L, 392996663L, 856628287L, 197998483L, 95194827L, 980551813L, 927882983L, 391326917L, 24153433L, 378212663L, 849772571L, 382378159L, 69371443L, 259661527L, 380291797L, 970105957L, 39696727L, 931108069L, 557712577L, 706204777L, 90975487L, 377724973L, 976364429L, 258731423L, 32280277L, 966276109L, 392993767L, 922543927L, 35895501L, 843852797L, 842395019L, 938078633L, 80021733L, 180972413L, 972384389L, 257708257L, 11399039L, 699607547L, 179571479L, 381531497L, 95577441L, 967694027L, 703939237L, 560134033L, 10374449L, 969953659L, 570804607L, 188228603L, 98870849L, 695911061L, 179866429L, 566537623L, 18741029L, 572525543L, 705109633L, 374728357L, 66409487L, 857997661L, 969932363L, 271021117L, 87386813L, 924659837L, 930064451L, 699659099L, 92722127L, 940860467L, 381665183L, 979952719L, 27144841L, 274646369L, 936578021L, 559210007L, 16684763L, 196169173L, 926404139L, 192762901L, 17681727L, 189521161L, 181515617L, 858437443L, 23552873L, 258885643L, 572831971L, 973561471L, 59372601L, 181459769L, 566285441L, 965442013L, 93491029L, 180786043L, 929988151L, 845756941L, 35529257L, 699442283L, 853078201L, 390950671L, 15958801L, 712435631L, 387157913L, 976160347L, 68684279L, 179988047L, 389090791L, 699322219L, 10307823L, 259064219L, 377097319L, 850345549L, 66881839L, 933108151L, 266299519L, 260426339L, 72105031L, 931087667L, 973797767L, 392582221L, 66105353L, 843357917L, 965549551L, 555596219L, 98867657L, 973871617L, 928572781L, 965246651L, 73876453L, 934831181L, 940948433L, 570264209L, 71210171L, 847592843L, 262149649L, 555835717L, 17468753L, 388931927L, 260194087L, 970748903L, 39762147L, 181554757L, 711884729L, 261162977L, 35297709L, 856201667L, 380186867L, 180397589L, 11201441L, 922615327L, 376981837L, 554670449L, 34089477L, 964124867L, 569139349L, 853955087L, 95490287L, 709207027L, 572850679L, 566624309L, 39946727L, 968467037L, 840315521L, 923008613L, 96636383L, 570123877L, 695094643L, 695377961L, 85046823L, 698062327L, 840797417L, 197750629L, 88399737L, 389835253L, 939584969L, 923130347L, 71023647L, 981863369L, 696543251L, 375409421L, 13752431L, 855538433L, 269223991L, 980951861L, 17976011L, 383342473L, 696386767L, 383000213L, 38001763L, 260224427L, 969142787L, 924409687L, 92289037L, 705677339L, 854639273L, 709648501L, 51602861L, 927498401L, 963151939L, 257969059L, 99942561L, 702552397L, 378807467L, 843849547L, 20636249L, 838174921L, 921188483L, 697743737L, 55171601L, 963313399L, 969542537L, 268784609L, 10638293L, 554031749L, 257309069L, 856356289L, 272064581L, 193518863L, 272811667L, 382857571L, 705293539L, 94434307L, 841390831L, 378434863L, 22644091L, 933591301L, 263483903L, 937305671L, 92030791L, 855482651L, 706132187L, 703258151L, 34513681L, 262886671L, 193130321L, 977976803L, 51169839L, 934495231L, 266741317L, 974393971L, 22079491L, 700151497L, 705291473L, 568384493L, 93712889L, 851253661L, 265654027L, 393268147L, 56217787L, 850416367L, 857303827L, 391728109L, 98810113L, 191962153L, 268291579L, 181466911L, 94017901L, 921053269L, 186716597L, 963617209L, 59349733L, 192916351L, 853395997L, 181896479L, 54769193L, 186653633L, 841422889L, 560707079L, 92365467L, 703592261L, 982412807L, 982243111L, 78892241L, 927464383L, 930534359L, 268636259L, 94549379L, 712074763L, 559450939L, 857428151L, 71670509L, 256671463L, 936352111L, 980141417L, 36271839L, 186475811L, 925100521L, 972243169L, 91920501L, 696389069L, 928678631L, 381418831L, 12023729L, 844714907L, 857426887L, 846161201L, 99505771L, 386542469L, 856860959L, 572063227L, 56038117L, 385629949L, 979920607L, 258498697L, 81234773L, 389956109L, 556370957L, 379944343L, 50730109L, 565321789L, 981670519L, 974403491L, 96057349L, 711469903L, 979604279L, 265069711L, 35443673L, 197595613L, 925185959L, 940443347L, 17173331L, 854818409L, 707162809L, 557260003L, 12290843L, 973388453L, 713357609L, 379834097L, 16945751L, 272464273L, 853795783L, 975641603L, 20326481L, 271093661L, 560031733L, 563000783L, 89785227L, 381224603L, 389678899L, 382372531L, 93398507L, 713755909L, 379280107L, 849555587L, 12726569L, 713067799L, 386762897L, 699452197L, 68249743L, 921329677L, 969662999L, 708401153L, 92343817L, 695690659L, 376186373L, 971774849L, 68191267L, 559122461L, 846282403L, 928908247L, 36511479L, 921516097L, 270107843L, 568075631L, 87827469L, 844675283L, 562808263L, 191356681L, 14927579L, 840652927L, 553679459L, 558298787L, 89230059L, 980861633L, 266720513L, 566820913L, 69320183L, 554150749L, 970182487L, 196312381L, 13836923L, 927087017L, 269236103L, 197279059L, 27011321L, 190280689L, 844923689L, 708889619L, 35296049L, 383543333L, 971450659L, 932468473L, 94659689L, 569153671L, 378633757L, 972685003L, 94676831L, 383130073L, 184098373L, 848604173L, 57587529L, 383922947L, 257719843L, 377849887L, 94816741L, 974841787L, 851800231L, 386896033L, 28408719L, 852139663L, 975564299L, 268145221L, 11937199L, 386365229L, 190900637L, 187768367L, }; public static double roundToNDigits(double d, int n) { if(d == 0)return d; int log = (int)Math.log10(d); int exp = n; exp -= log; int ival = (int)(Math.round(d * Math.pow(10,exp))); return ival/Math.pow(10,exp); } public enum Norm {L1,L2,L2_2,L_Infinite} public static double[] min_max_mean_stddev(long[] counts) { double min = Float.MAX_VALUE; double max = Float.MIN_VALUE; double mean = 0; for (long tmp : counts) { min = Math.min(tmp, min); max = Math.max(tmp, max); mean += tmp; } mean /= counts.length; double stddev = 0; for (long tmp : counts) { stddev += Math.pow(tmp - mean, 2); } stddev /= counts.length; stddev = Math.sqrt(stddev); return new double[] {min,max,mean,stddev}; } public static double sign(double d) { if(d == 0)return 0; return d < 0?-1:1; } public static class DCT { public static void initCheck(Frame input, int width, int height, int depth) { ConcurrencyUtils.setNumberOfThreads(1); if (width < 1 || height < 1 || depth < 1) throw new H2OIllegalArgumentException("dimensions must be >= 1"); if (width*height*depth != input.numCols()) throw new H2OIllegalArgumentException("dimensions HxWxD must match the # columns of the frame"); for (Vec v : input.vecs()) { if (v.naCnt() > 0) throw new H2OIllegalArgumentException("DCT can not be computed on rows with missing values"); if (!v.isNumeric()) throw new H2OIllegalArgumentException("DCT can only be computed on numeric columns"); } } /** * Compute the 1D discrete cosine transform for each row in the given Frame, and return a new Frame * * @param input Frame containing numeric columns with data samples * @param N Number of samples (must be less or equal than number of columns) * @param inverse Whether to compute the inverse * @return Frame containing 1D (inverse) DCT of each row (same dimensionality) */ public static Frame transform1D(Frame input, final int N, final boolean inverse) { initCheck(input, N, 1, 1); return new MRTask() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { double[] a = new double[N]; for (int row = 0; row < cs[0]._len; ++row) { // fill 1D array for (int i = 0; i < N; ++i) a[i] = cs[i].atd(row); // compute DCT for each row if (!inverse) new DoubleDCT_1D(N).forward(a, true); else new DoubleDCT_1D(N).inverse(a, true); // write result to NewChunk for (int i = 0; i < N; ++i) ncs[i].addNum(a[i]); } } }.doAll(input.numCols(), Vec.T_NUM, input).outputFrame(); } /** * Compute the 2D discrete cosine transform for each row in the given Frame, and return a new Frame * * @param input Frame containing numeric columns with data samples * @param height height * @param width width * @param inverse Whether to compute the inverse * @return Frame containing 2D DCT of each row (same dimensionality) */ public static Frame transform2D(Frame input, final int height, final int width, final boolean inverse) { initCheck(input, height, width, 1); return new MRTask() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { double[][] a = new double[height][width]; // each row is a 2D sample for (int row = 0; row < cs[0]._len; ++row) { for (int i = 0; i < height; ++i) for (int j = 0; j < width; ++j) a[i][j] = cs[i * width + j].atd(row); // compute 2D DCT if (!inverse) new DoubleDCT_2D(height, width).forward(a, true); else new DoubleDCT_2D(height, width).inverse(a, true); // write result to NewChunk for (int i = 0; i < height; ++i) for (int j = 0; j < width; ++j) ncs[i * width + j].addNum(a[i][j]); } } }.doAll(height * width, Vec.T_NUM, input).outputFrame(); } /** * Compute the 3D discrete cosine transform for each row in the given Frame, and return a new Frame * * @param input Frame containing numeric columns with data samples * @param height height * @param width width * @param depth depth * @param inverse Whether to compute the inverse * @return Frame containing 3D DCT of each row (same dimensionality) */ public static Frame transform3D(Frame input, final int height, final int width, final int depth, final boolean inverse) { initCheck(input, height, width, depth); return new MRTask() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { double[][][] a = new double[height][width][depth]; // each row is a 3D sample for (int row = 0; row < cs[0]._len; ++row) { for (int i = 0; i < height; ++i) for (int j = 0; j < width; ++j) for (int k = 0; k < depth; ++k) a[i][j][k] = cs[i*(width*depth) + j*depth + k].atd(row); // compute 3D DCT if (!inverse) new DoubleDCT_3D(height, width, depth).forward(a, true); else new DoubleDCT_3D(height, width, depth).inverse(a, true); // write result to NewChunk for (int i = 0; i < height; ++i) for (int j = 0; j < width; ++j) for (int k = 0; k < depth; ++k) ncs[i*(width*depth) + j*depth + k].addNum(a[i][j][k]); } } }.doAll(height*width*depth, Vec.T_NUM, input).outputFrame(); } } public static class SquareError extends MRTask<SquareError> { public double _sum; @Override public void map( Chunk resp, Chunk pred ) { double sum = 0; for( int i=0; i<resp._len; i++ ) { double err = resp.atd(i)-pred.atd(i); sum += err*err; } _sum = sum; } @Override public void reduce( SquareError ce ) { _sum += ce._sum; } } public static double y_log_y(double y, double mu) { if(y == 0)return 0; if(mu < Double.MIN_NORMAL) mu = Double.MIN_NORMAL; return y * Math.log(y / mu); } /** Compare signed longs */ public static int compare(long x, long y) { return (x < y) ? -1 : ((x == y) ? 0 : 1); } /** Copmarision of unsigned longs. */ public static int compareUnsigned(long a, long b) { // Just map [0, 2^64-1] to [-2^63, 2^63-1] return compare(a^0x8000000000000000L, b^0x8000000000000000L); } /** Comparision of 128bit unsigned values represented by 2 longs */ public static int compareUnsigned(long hiA, long loA, long hiB, long loB) { int resHi = compareUnsigned(hiA, hiB); int resLo = compareUnsigned(loA, loB); return resHi != 0 ? resHi : resLo; } /** * Logloss * @param err prediction error (between 0 and 1) * @return logloss */ public static double logloss(double err) { assert(err >= 0 && err <= 1) : "Logloss is only defined for values in 0...1, but got " + err; return Math.min(MAXLL, -Math.log(1.0-err)); } final static double MAXLL = -Math.log(1e-15); //34.53878 public static double[][] arrayTranspose(double[][] arr) { assert arr!=null:"null array"; assert arr[0] != null:"null array"; int length1 = arr.length; int length2 = arr[0].length; double[][] transposed = new double[length2][]; for (int ind1 = 0; ind1 < length2; ind1++) { for (int ind2 = 0; ind2 < length1; ind2++) { transposed[ind1][ind2] = arr[ind2][ind1]; } } return transposed; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/NetworkUtils.java
package water.util; import java.io.IOException; import java.net.InetAddress; import java.net.NetworkInterface; import java.net.SocketException; import java.net.UnknownHostException; import static water.H2O.OptArgs.SYSTEM_PROP_PREFIX; import static water.util.ArrayUtils.toByteArray; /** * Utilities to support networking code. * * See: * - http://www.tcpipguide.com/free/diagrams/ipv6scope.png for IPV6 Scope explanations * - https://en.wikipedia.org/wiki/Multicast_address */ public class NetworkUtils { // Google DNS https://developers.google.com/speed/public-dns/docs/using#important_before_you_start public static byte[] GOOGLE_DNS_IPV4 = new byte[] {8, 8 , 8, 8}; public static byte[] GOOGLE_DNS_IPV6 = toByteArray(new int[] {0x20, 0x01, 0x48, 0x60, 0x48, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x88 }); /** Override IPv6 scope by a defined value */ private static String H2O_SYSTEM_SCOPE_PARAM = SYSTEM_PROP_PREFIX + "network.ipv6.scope"; /** Define timeout in ms to figure out if local ip is reachable */ private static String H2O_SYSTEM_LOCAL_IP_PING_TIMEOUT = SYSTEM_PROP_PREFIX + "network.ip.ping.timeout"; // See IPv6 Multicast scopes: public static long SCOPE_IFACE_LOCAL = 0x0001000000000000L; public static long SCOPE_LINK_LOCAL = 0x0002000000000000L; public static long SCOPE_SITE_LOCAL = 0x0005000000000000L; public static long SCOPE_ORG_LOCAL = 0x0008000000000000L; public static long SCOPE_GLOBAL_LOCAL = 0x000e000000000000L; public static long SCOPE_MASK = ~0x000f000000000000L; public static int[] IPV4_MULTICAST_ALLOCATION_RANGE = new int[] { /* low */ 0xE1000000, /* high */ 0xEFFFFFFF }; // The preconfigured scopes of IPv6 multicast groups - see https://en.wikipedia.org/wiki/Multicast_address#IPv6 public static long[][] IPV6_MULTICAST_ALLOCATION_RANGE = new long[][] { /* low */ new long[] {0xff08000000000000L, 0x0L}, // T-flag for transient, 8 = organization scope (will be replace by real scope /* high */ new long[] {0xff08FFFFFFFFFFFFL, 0xFFFFFFFFFFFFFFFFL}}; public static boolean isIPv6Preferred() { return Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses", "false")) || (System.getProperty("java.net.preferIPv4Addresses") != null && !Boolean.parseBoolean(System.getProperty("java.net.preferIPv4Addresses"))); } public static boolean isIPv4Preferred() { return Boolean.parseBoolean(System.getProperty("java.net.preferIPv4Addresses", "true")); } public static InetAddress getIPv4MulticastGroup(int hash) throws UnknownHostException { return getIPv4MulticastGroup(hash, IPV4_MULTICAST_ALLOCATION_RANGE[0], IPV4_MULTICAST_ALLOCATION_RANGE[1]); } public static InetAddress getIPv4MulticastGroup(int hash, int lowIp, int highIp) throws UnknownHostException { hash = hash & 0x7fffffff; // delete sign int port = (hash % (highIp-lowIp+1)) + lowIp; byte[] ip = new byte[4]; for( int i=0; i<4; i++ ) ip[i] = (byte)(port>>>((3-i)<<3)); return InetAddress.getByAddress(ip); } public static InetAddress getIPv6MulticastGroup(int hash, long scope) throws UnknownHostException { return getIPv6MulticastGroup(hash, IPV6_MULTICAST_ALLOCATION_RANGE[0], IPV6_MULTICAST_ALLOCATION_RANGE[1], scope); } public static InetAddress getIPv6MulticastGroup(int hash, long[] lowIp, long[] highIp, long scope) throws UnknownHostException { hash = hash & 0x7fffffff; // delete sign byte[] ip = ArrayUtils.toByteArray(((lowIp[0] & SCOPE_MASK) | scope) | hash, lowIp[1] | hash); // Simple encoding of the hash into multicast group return InetAddress.getByAddress(ip); } public static int getMulticastPort(int hash) { hash = hash & 0x7fffffff; // delete sign int port = (hash % (0xF0000000-0xE1000000))+0xE1000000; return port>>>16; } /** Return IPv6 scope for given IPv6 address. */ public static long getIPv6Scope(InetAddress ip) { Long value = OSUtils.getLongProperty(H2O_SYSTEM_SCOPE_PARAM) != null ? OSUtils.getLongProperty(H2O_SYSTEM_SCOPE_PARAM) : OSUtils.getLongProperty(H2O_SYSTEM_SCOPE_PARAM, 16); if (value != null && ArrayUtils.equalsAny(value, SCOPE_IFACE_LOCAL, SCOPE_LINK_LOCAL, SCOPE_SITE_LOCAL, SCOPE_ORG_LOCAL, SCOPE_GLOBAL_LOCAL)) return value; if (ip.isLoopbackAddress()) return SCOPE_IFACE_LOCAL; if (ip.isLinkLocalAddress()) return SCOPE_LINK_LOCAL; if (ip.isSiteLocalAddress()) return SCOPE_SITE_LOCAL; return SCOPE_ORG_LOCAL; } public static boolean isUp(NetworkInterface iface) { try { return iface.isUp(); } catch (SocketException e) { return false; } } public static boolean isReachable(NetworkInterface iface, InetAddress address, int timeout) { try { return address.isReachable(iface, 0, timeout); } catch (IOException e) { return false; } } public static int getLocalIpPingTimeout() { String value = System.getProperty(H2O_SYSTEM_LOCAL_IP_PING_TIMEOUT, "150" /* ms */); try { return Integer.valueOf(value); } catch (NumberFormatException e) { return 150; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/OSUtils.java
package water.util; import java.lang.management.ManagementFactory; import javax.management.MBeanServer; import javax.management.ObjectName; public class OSUtils { /** Safe call to obtain size of total physical memory. * * <p>It is platform dependent and returns size of machine physical * memory in bytes</p> * * @return total size of machine physical memory in bytes or -1 if the attribute is not available. */ public static long getTotalPhysicalMemory() { try { MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); Object attribute = mBeanServer.getAttribute(new ObjectName("java.lang","type","OperatingSystem"), "TotalPhysicalMemorySize"); return (Long) attribute; } catch (Exception e) { Log.err(e); return -1; } } public static Long getLongProperty(String name) { return getLongProperty(name, 10); } public static Long getLongProperty(String name, int radix) { String value = System.getProperty(name); try { return value == null ? null : longValueOf(value, radix); } catch (NumberFormatException nfe) { return null; } } public static long longValueOf(String value, int radix) { if (radix == 16 && value.startsWith("0x")) { return Long.valueOf(value.substring(2), radix); } else { return Long.valueOf(value, radix); } } public static String getOsName() { return System.getProperty("os.name"); } public static boolean isLinux() { return getOsName().toLowerCase().startsWith("linux"); } public static boolean isMac() { return getOsName().toLowerCase().startsWith("mac"); } public static boolean isWindows() { return getOsName().toLowerCase().startsWith("windows"); } public static boolean isWsl() { LinuxProcFileReader lpfr = new LinuxProcFileReader(); return lpfr.isWsl(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/Pair.java
package water.util; import java.util.ArrayList; import java.util.List; import water.util.Java7.Objects; /** Pair class with a clearer name than AbstractMap.SimpleEntry. */ public class Pair<X, Y> { private X x; private Y y; public Pair(X x, Y y) { this.x = x; this.y = y; } public X _1() { return x; } public Y _2() { return y; } @SuppressWarnings("unchecked") @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Pair)) return false; Pair<X, Y> q = (Pair<X,Y>) o; return Objects.equals(x, q._1()) && Objects.equals(y, q._2()); } @Override public int hashCode() { return Objects.hashCode(x)*67 + Objects.hashCode(y); } @Override public String toString() { return "Pair(" + x + ", " + y + ')'; } static public <X,Y> List<Pair<X,Y>> product(X[] xs, Y[] ys) { List<Pair<X,Y>> out = new ArrayList<>(xs.length*ys.length); for (X x : xs) for (Y y : ys) out.add(new Pair<>(x,y)); return out; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/PojoUtils.java
package water.util; import water.*; import water.api.Schema; import water.api.SchemaServer; import water.api.schemas3.FrameV3; import water.api.schemas3.KeyV3; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2ONotFoundArgumentException; import java.lang.reflect.Array; import java.lang.reflect.Field; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.regex.Pattern; /** * POJO utilities which cover cases similar to but not the same as Apache Commons PojoUtils. */ public class PojoUtils { public enum FieldNaming { CONSISTENT { @Override String toDest(String origin) { return origin; } @Override String toOrigin(String dest) { return dest; } }, DEST_HAS_UNDERSCORES { @Override String toDest(String origin) { return "_" + origin; } @Override String toOrigin(String dest) { return dest.substring(1); } }, ORIGIN_HAS_UNDERSCORES { @Override String toDest(String origin) { return origin.substring(1); } @Override String toOrigin(String dest) { return "_" + dest; } }; /** * Return destination name based on origin name. * @param origin name of origin argument * @return return a name of destination argument. */ abstract String toDest(String origin); /** * Return name of origin parameter derived from name of origin parameter. * @param dest name of destination argument. * @return return a name of origin argument. */ abstract String toOrigin(String dest); } /** * Copy properties "of the same name" from one POJO to the other. If the fields are * named consistently (both sides have fields named "_foo" and/or "bar") this acts like * Apache Commons PojoUtils.copyProperties(). If one side has leading underscores and * the other does not then the names are conformed according to the field_naming * parameter. * * It is also able to map fields between external types like Schema to their corresponding * internal types. * * @param dest Destination POJO * @param origin Origin POJO * @param field_naming Are the fields named consistently, or does one side have underscores? */ public static void copyProperties(Object dest, Object origin, FieldNaming field_naming) { copyProperties(dest, origin, field_naming, null); } /** * Copy properties "of the same name" from one POJO to the other. If the fields are * named consistently (both sides have fields named "_foo" and/or "bar") this acts like * Apache Commons PojoUtils.copyProperties(). If one side has leading underscores and * the other does not then the names are conformed according to the field_naming * parameter. * * @param dest Destination POJO * @param origin Origin POJO * @param field_naming Are the fields named consistently, or does one side have underscores? * @param skip_fields Array of origin or destination field names to skip */ public static void copyProperties(Object dest, Object origin, FieldNaming field_naming, String[] skip_fields) { copyProperties(dest, origin, field_naming, skip_fields, null); } /** * Copy properties "of the same name" from one POJO to the other. If the fields are * named consistently (both sides have fields named "_foo" and/or "bar") this acts like * Apache Commons PojoUtils.copyProperties(). If one side has leading underscores and * the other does not then the names are conformed according to the field_naming * parameter. * * @param dest Destination POJO * @param origin Origin POJO * @param field_naming Are the fields named consistently, or does one side have underscores? * @param skip_fields Array of origin or destination field names to skip * @param only_fields Array of origin or destination field names to include; ones not in this list will be skipped */ public static void copyProperties(Object dest, Object origin, FieldNaming field_naming, String[] skip_fields, String[] only_fields) { if (null == dest || null == origin) return; Field[] dest_fields = Weaver.getWovenFields(dest .getClass()); Field[] orig_fields = Weaver.getWovenFields(origin.getClass()); for (Field orig_field : orig_fields) { String origin_name = orig_field.getName(); String dest_name = field_naming.toDest(origin_name); if (skip_fields != null && (ArrayUtils.contains(skip_fields, origin_name) || ArrayUtils.contains(skip_fields, dest_name))) continue; if (only_fields != null && ! (ArrayUtils.contains(only_fields, origin_name) || ArrayUtils.contains(only_fields, dest_name))) continue; try { Field dest_field = null; for( Field fd : dest_fields ) { if (fd.getName().equals(dest_name)) { dest_field = fd; break; } } if( dest_field != null ) { dest_field.setAccessible(true); orig_field.setAccessible(true); // Log.info("PojoUtils.copyProperties, origin field: " + orig_field + "; destination field: " + dest_field); if (null == orig_field.get(origin)) { // // Assigning null to dest. // dest_field.set(dest, null); } else if (dest_field.getType().isArray() && orig_field.getType().isArray() && (dest_field.getType().getComponentType() != orig_field.getType().getComponentType())) { // // Assigning an array to another array. // // You can't use reflection to set an int[] with an Integer[]. Argh. // TODO: other types of arrays. . . if (dest_field.getType().getComponentType().isAssignableFrom(orig_field.getType().getComponentType())) { // //Assigning an T[] to an U[] if T extends U // dest_field.set(dest, orig_field.get(origin)); } else if (dest_field.getType().getComponentType() == double.class && orig_field.getType().getComponentType() == Double.class) { // // Assigning an Double[] to an double[] // double[] copy = (double[]) orig_field.get(origin); dest_field.set(dest, copy); } else if (dest_field.getType().getComponentType() == Double.class && orig_field.getType().getComponentType() == double.class) { // // Assigning an double[] to an Double[] // Double[] copy = (Double[]) orig_field.get(origin); dest_field.set(dest, copy); } else if (dest_field.getType().getComponentType() == int.class && orig_field.getType().getComponentType() == Integer.class) { // // Assigning an Integer[] to an int[] // int[] copy = (int[]) orig_field.get(origin); dest_field.set(dest, copy); } else if (dest_field.getType().getComponentType() == Integer.class && orig_field.getType().getComponentType() == int.class) { // // Assigning an int[] to an Integer[] // Integer[] copy = (Integer[]) orig_field.get(origin); dest_field.set(dest, copy); } else if (Schema.class.isAssignableFrom(dest_field.getType().getComponentType()) && (Schema.getImplClass((Class<?extends Schema>)dest_field.getType().getComponentType())).isAssignableFrom(orig_field.getType().getComponentType())) { // // Assigning an array of impl fields to an array of schema fields, e.g. a DeepLearningParameters[] into a DeepLearningParametersV2[] // Class dest_component_class = dest_field.getType().getComponentType(); // NOTE: there can be a race on the source array, so shallow copy it. // If it has shrunk the elements might have dangling references. Iced[] orig_array = (Iced[]) orig_field.get(origin); int length = orig_array.length; Iced[] orig_array_copy = Arrays.copyOf(orig_array, length); // Will null pad if it has shrunk since calling length Schema[] translation = (Schema[]) Array.newInstance(dest_component_class, length); int version = ((Schema)dest).getSchemaVersion(); // Look up the schema for each element of the array; if not found fall back to the schema for the base class. for (int i = 0; i < length; i++) { Iced impl = orig_array_copy[i]; if (null == impl) { translation[i++] = null; // also can happen if the array shrank between .length and the copy } else { Schema s = null; try { s = SchemaServer.schema(version, impl); } catch (H2ONotFoundArgumentException e) { s = ((Schema) dest_field.getType().getComponentType().newInstance()); } translation[i] = s.fillFromImpl(impl); } } dest_field.set(dest, translation); } else if (Schema.class.isAssignableFrom(orig_field.getType().getComponentType()) && Iced.class.isAssignableFrom(dest_field.getType().getComponentType())) { // // Assigning an array of schema fields to an array of impl fields, e.g. a DeepLearningParametersV2[] into a DeepLearningParameters[] // // We can't check against the actual impl class I, because we can't instantiate the schema base classes to get the impl class from an instance: // dest_field.getType().getComponentType().isAssignableFrom(((Schema)f.getType().getComponentType().newInstance()).getImplClass())) { Class dest_component_class = dest_field.getType().getComponentType(); Schema[] orig_array = (Schema[]) orig_field.get(origin); int length = orig_array.length; Schema[] orig_array_copy = Arrays.copyOf(orig_array, length); Iced[] translation = (Iced[]) Array.newInstance(dest_component_class, length); for (int i = 0; i < length; i++) { Schema s = orig_array_copy[i]; translation[i] = s == null ? null : s.createAndFillImpl(); } dest_field.set(dest, translation); } else { throw H2O.fail("Don't know how to cast an array of: " + orig_field.getType().getComponentType() + " to an array of: " + dest_field.getType().getComponentType()); } // end of array handling } else if (dest_field.getType() == Key.class && Keyed.class.isAssignableFrom(orig_field.getType())) { // // Assigning a Keyed (e.g., a Frame or Model) to a Key. // dest_field.set(dest, ((Keyed) orig_field.get(origin))._key); } else if (orig_field.getType() == Key.class && Keyed.class.isAssignableFrom(dest_field.getType())) { // // Assigning a Key (for e.g., a Frame or Model) to a Keyed (e.g., a Frame or Model). // Value v = DKV.get((Key) orig_field.get(origin)); dest_field.set(dest, (null == v ? null : v.get())); } else if (KeyV3.class.isAssignableFrom(dest_field.getType()) && Keyed.class.isAssignableFrom(orig_field.getType())) { // // Assigning a Keyed (e.g., a Frame or Model) to a KeyV1. // dest_field.set(dest, KeyV3.make(((Class<? extends KeyV3>) dest_field.getType()), ((Keyed) orig_field.get(origin))._key)); } else if (KeyV3.class.isAssignableFrom(orig_field.getType()) && Keyed.class.isAssignableFrom(dest_field.getType())) { // // Assigning a KeyV1 (for e.g., a Frame or Model) to a Keyed (e.g., a Frame or Model). // KeyV3 k = (KeyV3)orig_field.get(origin); Value v = DKV.get(Key.make(k.name)); dest_field.set(dest, (null == v ? null : v.get())); } else if (KeyV3.class.isAssignableFrom(dest_field.getType()) && Key.class.isAssignableFrom(orig_field.getType())) { // // Assigning a Key to a KeyV1. // dest_field.set(dest, KeyV3.make(((Class<? extends KeyV3>) dest_field.getType()), (Key) orig_field.get(origin))); } else if (KeyV3.class.isAssignableFrom(orig_field.getType()) && Key.class.isAssignableFrom(dest_field.getType())) { // // Assigning a KeyV1 to a Key. // KeyV3 k = (KeyV3)orig_field.get(origin); dest_field.set(dest, (null == k.name ? null : Key.make(k.name))); } else if (dest_field.getType() == Pattern.class && String.class.isAssignableFrom(orig_field.getType())) { // // Assigning a String to a Pattern. // dest_field.set(dest, Pattern.compile((String) orig_field.get(origin))); } else if (orig_field.getType() == Pattern.class && String.class.isAssignableFrom(dest_field.getType())) { // // We are assigning a Pattern to a String. // dest_field.set(dest, orig_field.get(origin).toString()); } else if (dest_field.getType() == FrameV3.ColSpecifierV3.class && String.class.isAssignableFrom(orig_field.getType())) { // // Assigning a String to a ColSpecifier. Note that we currently support only the colname, not a frame name too. // dest_field.set(dest, new FrameV3.ColSpecifierV3((String) orig_field.get(origin))); } else if (orig_field.getType() == FrameV3.ColSpecifierV3.class && String.class.isAssignableFrom(dest_field.getType())) { // // We are assigning a ColSpecifierV2 to a String. The column_name gets copied. // dest_field.set(dest, ((FrameV3.ColSpecifierV3)orig_field.get(origin)).column_name); } else if (Enum.class.isAssignableFrom(dest_field.getType()) && String.class.isAssignableFrom(orig_field.getType())) { // // Assigning a String into an enum field. // Class<Enum> dest_class = (Class<Enum>)dest_field.getType(); dest_field.set(dest, EnumUtils.valueOfIgnoreCase(dest_class, (String) orig_field.get(origin))); } else if (Enum.class.isAssignableFrom(orig_field.getType()) && String.class.isAssignableFrom(dest_field.getType())) { // // Assigning an enum field into a String. // Object o = orig_field.get(origin); dest_field.set(dest, (o == null ? null : o.toString())); } else if (Schema.class.isAssignableFrom(dest_field.getType()) && Schema.getImplClass((Class<? extends Schema>) dest_field.getType()).isAssignableFrom(orig_field.getType())) { // // Assigning an impl field into a schema field, e.g. a DeepLearningParameters into a DeepLearningParametersV2. // int dest_version = Schema.extractVersionFromSchemaName(dest_field.getType().getSimpleName()); Iced ori = (Iced)orig_field.get(origin); dest_field.set(dest, SchemaServer.schema(dest_version, ori.getClass()).fillFromImpl(ori)); } else if (Schema.class.isAssignableFrom(orig_field.getType()) && Schema.getImplClass((Class<? extends Schema>)orig_field.getType()).isAssignableFrom(dest_field.getType())) { // // Assigning a schema field into an impl field, e.g. a DeepLearningParametersV2 into a DeepLearningParameters. // Schema s = ((Schema)orig_field.get(origin)); dest_field.set(dest, s.fillImpl(s.createImpl())); } else if ((Schema.class.isAssignableFrom(dest_field.getType()) && Key.class.isAssignableFrom(orig_field.getType()))) { // // Assigning an impl field fetched via a Key into a schema field, e.g. a DeepLearningParameters into a DeepLearningParametersV2. // Note that unlike the cases above we don't know the type of the impl class until we fetch in the body of the if. // Key origin_key = (Key) orig_field.get(origin); Value v = DKV.get(origin_key); if (null == v || null == v.get()) { dest_field.set(dest, null); } else { if (((Schema)dest_field.get(dest)).getImplClass().isAssignableFrom(v.get().getClass())) { //FIXME: dest_field.get(dest) can be null! Schema s = ((Schema)dest_field.get(dest)); dest_field.set(dest, SchemaServer.schema(s.getSchemaVersion(), s.getImplClass()).fillFromImpl(v.get())); } else { Log.err("Can't fill Schema of type: " + dest_field.getType() + " with value of type: " + v.getClass() + " fetched from Key: " + origin_key); dest_field.set(dest, null); } } } else if (Schema.class.isAssignableFrom(orig_field.getType()) && Keyed.class.isAssignableFrom(dest_field.getType())) { // // Assigning a schema field into a Key field, e.g. a DeepLearningV2 into a (DeepLearningParameters) key. // Schema s = ((Schema)orig_field.get(origin)); dest_field.set(dest, ((Keyed)s.fillImpl(s.createImpl()))._key); } else { // // Normal case: not doing any type conversion. // dest_field.set(dest, orig_field.get(origin)); } } } catch (IllegalAccessException e) { Log.err("Illegal access exception trying to copy field: " + origin_name + " of class: " + origin.getClass() + " to field: " + dest_name + " of class: " + dest.getClass()); } catch (InstantiationException e) { Log.err("Instantiation exception trying to copy field: " + origin_name + " of class: " + origin.getClass() + " to field: " + dest_name + " of class: " + dest.getClass()); } catch (Exception e) { Log.err(e.getClass().getCanonicalName() + " Exception: " + origin_name + " of class: " + origin.getClass() + " to field: " + dest_name + " of class: " + dest.getClass()); throw e; } } } /** * Null out fields in this schema and its children as specified by parameters __exclude_fields and __include_fields. * <b>NOTE: modifies the scheme tree in place.</b> */ public static void filterFields(Object o, String includes, String excludes) { if (null == o) return; if (null == excludes || "".equals(excludes)) return; if (null != includes) // not yet implemented throw new H2OIllegalArgumentException("_include_fields", "filterFields", includes); String[] exclude_paths = excludes.split(","); for (String path : exclude_paths) { // for each path. . . int slash = path.indexOf("/"); if (-1 == slash || slash == path.length()) { // NOTE: handles trailing slash // we've hit the end: null the field, if it exists Field f = ReflectionUtils.findNamedField(o, path); if (null == f) throw new H2OIllegalArgumentException("_exclude_fields", "filterFields", path); try { f.set(o, null); } catch (IllegalAccessException e) { throw new H2OIllegalArgumentException("_exclude_fields", "filterFields", path); } } // hit the end of the path else { String first = path.substring(0, slash); String rest = path.substring(slash + 1); Field f = ReflectionUtils.findNamedField(o, first); if (null == f) throw new H2OIllegalArgumentException("_exclude_fields", "filterFields", path); if (f.getType().isArray() && Object.class.isAssignableFrom(f.getType().getComponentType())) { // recurse into the children with the "rest" of the path try { Object[] field_value = (Object[]) f.get(o); for (Object child : field_value) { filterFields(child, null, rest); } } catch (IllegalAccessException e) { throw new H2OIllegalArgumentException("_exclude_fields", "filterFields", path); } } else if (Object.class.isAssignableFrom(f.getType())) { // recurse into the child with the "rest" of the path try { Object field_value = f.get(o); filterFields(field_value, null, rest); } catch (IllegalAccessException e) { throw new H2OIllegalArgumentException("_exclude_fields", "filterFields", path); } } else { throw new H2OIllegalArgumentException("_exclude_fields", "filterFields", path); } } // need to recurse } // foreach exclude_paths } public static boolean equals(Object a, Field fa, Object b, Field fb) { try { Object va = fa.get(a); Object vb = fb.get(b); if (va == null) return vb == null; Class clazz = va.getClass(); if (clazz.isArray()) { if (clazz.equals(vb.getClass())) { switch (fa.getType().getSimpleName()) { case "double[]": return Arrays.equals((double[]) va, (double[])vb); case "int[]": return Arrays.equals((int[]) va, (int[]) vb); case "byte[]": return Arrays.equals((byte[]) va, (byte[]) vb); case "char[]": return Arrays.equals((char[]) va, (char[]) vb); case "float[]": return Arrays.equals((float[]) va, (float[]) vb); case "long[]": return Arrays.equals((long[]) va, (long[]) vb); case "short[]": return Arrays.equals((short[]) va, (short[]) vb); case "boolean[]": return Arrays.equals((boolean[]) va, (boolean[]) vb); default: return Arrays.equals((Object[]) va, (Object[]) vb); } } return false; } return va.equals(vb); } catch (IllegalAccessException e) { Log.err(e); return false; } } public static void setField(Object o, String fieldName, Object value, FieldNaming objectNamingConvention) { String destFieldName = null; switch (objectNamingConvention) { case CONSISTENT: destFieldName = fieldName; break; case DEST_HAS_UNDERSCORES: if (fieldName.startsWith("_")) destFieldName = fieldName; else destFieldName = "_" + fieldName; break; case ORIGIN_HAS_UNDERSCORES: if (fieldName.startsWith("_")) destFieldName = fieldName.substring(1); else throw new IllegalArgumentException("Wrong combination of options!"); break; } setField(o, destFieldName, value); } /** * Set given field to given value on given object. * * @param o object to modify * @param fieldName name of field to set * @param value value to write the the field */ public static void setField(Object o, String fieldName, Object value) { try { Field f = PojoUtils.getFieldEvenInherited(o, fieldName); f.setAccessible(true); if (null == value) { f.set(o, null); return; } if (List.class.isAssignableFrom(value.getClass()) && f.getType().isArray() && f.getType().getComponentType() == String.class) { // convert ArrayList to array and try again setField(o, fieldName, ((List)value).toArray(new String[0])); return; } // If it doesn't know any better, Gson deserializes all numeric types as doubles. // If our target is an integer type, cast. if (f.getType().isPrimitive() && f.getType() != value.getClass()) { // Log.debug("type conversion"); if (f.getType() == int.class && (value.getClass() == Double.class || value.getClass() == Float.class)) f.set(o, ((Double) value).intValue()); else if (f.getType() == long.class && (value.getClass() == Double.class || value.getClass() == Float.class)) f.set(o, ((Double) value).longValue()); else if (f.getType() == int.class && value.getClass() == Integer.class) f.set(o, ((Integer) value).intValue()); else if (f.getType() == long.class && (value.getClass() == Long.class || value.getClass() == Integer.class)) f.set(o, ((Long) value).longValue()); else if (f.getType() == float.class && (value instanceof Number)) f.set(o, ((Number) value).floatValue()); else { // Double -> double, Integer -> int will work: f.set(o, value); } } else if (f.getType().isArray() && (value.getClass().isArray()) || value instanceof List) { final Class<?> valueComponentType; if (value instanceof List) { List<?> valueList = (List<?>) value; if (valueList.isEmpty()) { valueComponentType = f.getType().getComponentType(); value = java.lang.reflect.Array.newInstance(valueComponentType, 0); } else { value = valueList.toArray(); valueComponentType = valueList.get(0).getClass(); } } else { valueComponentType = value.getClass().getComponentType(); } if (f.getType().getComponentType() == valueComponentType) { // array of the same type on both sides f.set(o, value); } else if (f.getType().getComponentType() == int.class && valueComponentType == Integer.class) { Object[] valuesTyped = ((Object[])value); int[] valuesCast = new int[valuesTyped.length]; for (int i = 0; i < valuesTyped.length; i++) valuesCast[i] = ((Number) valuesTyped[i]).intValue(); f.set(o, valuesCast); } else if (f.getType().getComponentType() == long.class && valueComponentType == Long.class) { Object[] valuesTyped = ((Object[])value); long[] valuesCast = new long[valuesTyped.length]; for (int i = 0; i < valuesTyped.length; i++) valuesCast[i] = ((Number) valuesTyped[i]).longValue(); f.set(o, valuesCast); } else if (f.getType().getComponentType() == double.class && (valueComponentType == Float.class || valueComponentType == Double.class || valueComponentType == Integer.class || valueComponentType == Long.class)) { Object[] valuesTyped = ((Object[])value); double[] valuesCast = new double[valuesTyped.length]; for (int i = 0; i < valuesTyped.length; i++) valuesCast[i] = ((Number) valuesTyped[i]).doubleValue(); f.set(o, valuesCast); } else if (f.getType().getComponentType() == float.class && (valueComponentType == Float.class || valueComponentType == Double.class || valueComponentType == Integer.class || valueComponentType == Long.class)) { Object[] valuesTyped = ((Object[]) value); float[] valuesCast = new float[valuesTyped.length]; for (int i = 0; i < valuesTyped.length; i++) valuesCast[i] = ((Number) valuesTyped[i]).floatValue(); f.set(o, valuesCast); } else if(f.getType().getComponentType().isEnum()) { final Object[] valuesTyped = ((Object[]) value); Enum[] valuesCast = (Enum[]) Array.newInstance(f.getType().getComponentType(), valuesTyped.length); for (int i = 0; i < valuesTyped.length; i++) { String v = (String) valuesTyped[i]; Enum enu = EnumUtils.valueOfIgnoreCase((Class<Enum>)f.getType().getComponentType(), v) .orElseThrow(() -> new IllegalArgumentException("Field = " + fieldName + " element cannot be set to value = " + Arrays.toString(valuesTyped))); valuesCast[i] = enu; } f.set(o, valuesCast); } else if (Schema.class.isAssignableFrom(f.getType().getComponentType())) { Object[] valuesTyped = ((Object[])value); Schema[] valuesCast = (Schema[]) Array.newInstance(f.getType().getComponentType(), valuesTyped.length);; try { for (int i = 0; i < valuesTyped.length; i++) { Schema v = (Schema)f.getType().getComponentType().newInstance(); valuesCast[i] = v.fillFromAny(valuesTyped[i]); } f.set(o, valuesCast); } catch (InstantiationException e) { throw new IllegalArgumentException("Field = " + fieldName + " element cannot be set to value = " + value, e); } } else { throw new IllegalArgumentException("setField can't yet convert an array of: " + value.getClass().getComponentType() + " to an array of: " + f.getType().getComponentType()); } } else if (f.getType().isEnum() && value instanceof String){ final IllegalArgumentException fieldNotSettablException = new IllegalArgumentException("Field = " + fieldName + " cannot be set to value = " + value); Enum enu = EnumUtils.valueOfIgnoreCase((Class<Enum>)f.getType(), (String) value) .orElseThrow(() -> fieldNotSettablException); f.set(o, enu); } else if (! f.getType().isPrimitive() && ! f.getType().isAssignableFrom(value.getClass())) { // TODO: pull the auto-type-conversion stuff out of copyProperties so we don't have limited copy-paste code here throw new IllegalArgumentException("setField can't yet convert a: " + value.getClass() + " to a: " + f.getType()); } else { // not casting a primitive type f.set(o, value); } } catch (NoSuchFieldException e) { throw new IllegalArgumentException("Field " + fieldName + " not found!", e); } catch (IllegalAccessException e) { throw new IllegalArgumentException("Field=" + fieldName + " cannot be set to value=" + value, e); } } /** * Gets a public, protected or private Field of an object, even if it's inherited. Neither Class.getField nor * Class.getDeclaredField do this. NOTE: the caller must call f.setAccessible(true) if they want to make private * fields accessible. */ public static Field getFieldEvenInherited(Object o, String name) throws NoSuchFieldException, SecurityException { Class clazz = o.getClass(); while (clazz != Object.class) { try { return clazz.getDeclaredField(name); } catch (Exception e) { // ignore } clazz = clazz.getSuperclass(); } throw new NoSuchFieldException("Failed to find field: " + name + " in object: " + o); } /** * Returns field value. * * @param o object to read field value from * @param name name of field to read * @return returns field value * * @throws java.lang.IllegalArgumentException when o is <code>null</code>, or field is not found, * or field cannot be read. */ public static Object getFieldValue(Object o, String name, FieldNaming fieldNaming) { if (o == null) throw new IllegalArgumentException("Cannot get the field from null object!"); String destName = fieldNaming.toDest(name); try { Field f = PojoUtils.getFieldEvenInherited(o, destName); // failing with fields declared in superclasses return f.get(o); } catch (NoSuchFieldException e) { throw new IllegalArgumentException("Field not found: '" + name + "/" + destName + "' on object " + o); } catch (IllegalAccessException e) { throw new IllegalArgumentException("Cannot get value of the field: '" + name + "/" + destName + "' on object " + o); } } /** * Take a object which potentially has default values for some fields and set * only those fields which are in the supplied JSON string. NOTE: Doesn't handle array fields yet. */ public static <T> T fillFromJson(T o, String json) { Map<String, Object> setFields = JSONUtils.parse(json); return fillFromMap(o, setFields); } /** * Fill the fields of an Object from the corresponding fields in a Map. * @see #fillFromJson(Object, String) */ private static <T> T fillFromMap(T o, Map<String, Object> setFields) { for (String key : setFields.keySet()) { Field f; try { f = PojoUtils.getFieldEvenInherited(o, key); f.setAccessible(true); } catch (NoSuchFieldException e) { throw new IllegalArgumentException("Field not found: '" + key + "' on object " + o); } Object value = setFields.get(key); if (JSONValue.class == f.getType()) { setField(o, key, JSONValue.fromValue(value)); } else if (value instanceof Map) { // handle nested objects try { // In some cases, the target object has children already (e.g., defaults), while in other cases it doesn't. if (null == f.get(o)) f.set(o, f.getType().newInstance()); fillFromMap(f.get(o), (Map<String, Object>) value); } catch (IllegalAccessException e) { throw new IllegalArgumentException("Cannot get value of the field: '" + key + "' on object " + o); } catch (InstantiationException e) { try { throw new IllegalArgumentException("Cannot create new child object of type: " + PojoUtils.getFieldEvenInherited(o, key).getClass().getCanonicalName() + " for field: '" + key + "' on object " + o); } catch (NoSuchFieldException ee) { // Can't happen: we've already checked for this. throw new IllegalArgumentException("Cannot create new child object of type for field: '" + key + "' on object " + o); } } } else if (value instanceof List) { List values = (List)value; if (f.getType().isArray() && Schema.class.isAssignableFrom(f.getType().getComponentType()) && values.size() > 0 && values.stream().allMatch(Map.class::isInstance)) { // nested json filling Schema[] valuesDest = (Schema[]) Array.newInstance(f.getType().getComponentType(), values.size()); try { int i = 0; for (Map<String, Object> valueMap : (List<Map<String, Object>>)values) { Schema v = (Schema) f.getType().getComponentType().newInstance(); valuesDest[i++] = fillFromMap(v, valueMap); } f.set(o, valuesDest); } catch (IllegalAccessException e) { throw new IllegalArgumentException("Cannot get value of the field: '" + key + "' on object " + o); } catch (InstantiationException e) { throw new IllegalArgumentException("Field = " + key + " element cannot be set to value = " + value, e); } } else { setField(o, key, value); } } else { // Scalar or String, possibly with an automagic type conversion as copyProperties does. if (f.getType().isAssignableFrom(FrameV3.ColSpecifierV3.class)) { setField(o, key, new FrameV3.ColSpecifierV3((String) value)); } else if (KeyV3.class.isAssignableFrom(f.getType())) { setField(o, key, KeyV3.make((Class<? extends KeyV3>)f.getType(), Key.make((String) value))); } else { setField(o, key, value); } } // else not a nested object } // for all fields in the map return o; } /** * Helper for Arrays.equals(). */ public static boolean arraysEquals(Object a, Object b) { if (a == null || ! a.getClass().isArray()) throw new H2OIllegalArgumentException("a", "arraysEquals", a); if (b == null || ! b.getClass().isArray()) throw new H2OIllegalArgumentException("b", "arraysEquals", b); if (a.getClass().getComponentType() != b.getClass().getComponentType()) throw new H2OIllegalArgumentException("Can't compare arrays of different types: " + a.getClass().getComponentType() + " and: " + b.getClass().getComponentType()); if (a.getClass().getComponentType() == boolean.class) return Arrays.equals((boolean[])a, (boolean[])b); if (a.getClass().getComponentType() == Boolean.class) return Arrays.equals((Boolean[])a, (Boolean[])b); if (a.getClass().getComponentType() == char.class) return Arrays.equals((char[])a, (char[])b); if (a.getClass().getComponentType() == short.class) return Arrays.equals((short[])a, (short[])b); if (a.getClass().getComponentType() == Short.class) return Arrays.equals((Short[])a, (Short[])b); if (a.getClass().getComponentType() == int.class) return Arrays.equals((int[])a, (int[])b); if (a.getClass().getComponentType() == Integer.class) return Arrays.equals((Integer[])a, (Integer[])b); if (a.getClass().getComponentType() == float.class) return Arrays.equals((float[])a, (float[])b); if (a.getClass().getComponentType() == Float.class) return Arrays.equals((Float[])a, (Float[])b); if (a.getClass().getComponentType() == double.class) return Arrays.equals((double[])a, (double[])b); if (a.getClass().getComponentType() == Double.class) return Arrays.equals((Double[])a, (Double[])b); return Arrays.deepEquals((Object[])a, (Object[])b); } public static String toJavaDoubleArray(double[] array) { if (array == null) { return "null"; } SB sb = new SB(); sb.p("new double[] {"); for (int i = 0; i < array.length; i++) { sb.p(" "); sb.p(array[i]); if (i < array.length - 1) sb.p(","); } sb.p("}"); return sb.getContent(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/PrettyPrint.java
package water.util; import org.joda.time.DurationFieldType; import org.joda.time.Period; import org.joda.time.PeriodType; import org.joda.time.format.PeriodFormat; import org.joda.time.format.PeriodFormatter; import water.fvec.C1SChunk; import water.fvec.C2SChunk; import water.fvec.C4SChunk; import water.fvec.Chunk; import static java.lang.Double.isNaN; import java.util.ArrayList; import java.util.Date; import java.util.concurrent.TimeUnit; public class PrettyPrint { public static String msecs(long msecs, boolean truncate) { final long hr = TimeUnit.MILLISECONDS.toHours (msecs); msecs -= TimeUnit.HOURS .toMillis(hr); final long min = TimeUnit.MILLISECONDS.toMinutes(msecs); msecs -= TimeUnit.MINUTES.toMillis(min); final long sec = TimeUnit.MILLISECONDS.toSeconds(msecs); msecs -= TimeUnit.SECONDS.toMillis(sec); final long ms = TimeUnit.MILLISECONDS.toMillis (msecs); if( !truncate ) return String.format("%02d:%02d:%02d.%03d", hr, min, sec, ms); if( hr != 0 ) return String.format("%2d:%02d:%02d.%03d", hr, min, sec, ms); if( min != 0 ) return String.format("%2d min %2d.%03d sec", min, sec, ms); return String.format("%2d.%03d sec", sec, ms); } public static String usecs(long usecs) { final long hr = TimeUnit.MICROSECONDS.toHours (usecs); usecs -= TimeUnit.HOURS .toMicros(hr); final long min = TimeUnit.MICROSECONDS.toMinutes(usecs); usecs -= TimeUnit.MINUTES.toMicros(min); final long sec = TimeUnit.MICROSECONDS.toSeconds(usecs); usecs -= TimeUnit.SECONDS.toMicros(sec); final long ms = TimeUnit.MICROSECONDS.toMillis(usecs); usecs -= TimeUnit.MILLISECONDS.toMicros(ms); if( hr != 0 ) return String.format("%2d:%02d:%02d.%03d", hr, min, sec, ms); if( min != 0 ) return String.format("%2d min %2d.%03d sec", min, sec, ms); if( sec != 0 ) return String.format("%2d.%03d sec", sec, ms); if( ms != 0 ) return String.format("%3d.%03d msec", ms, usecs); return String.format("%3d usec", usecs); } public static String toAge(Date from, Date to) { if (from == null || to == null) return "N/A"; final Period period = new Period(from.getTime(), to.getTime()); DurationFieldType[] dtf = new ArrayList<DurationFieldType>() {{ add(DurationFieldType.years()); add(DurationFieldType.months()); add(DurationFieldType.days()); if (period.getYears() == 0 && period.getMonths() == 0 && period.getDays() == 0) { add(DurationFieldType.hours()); add(DurationFieldType.minutes()); } }}.toArray(new DurationFieldType[0]); PeriodFormatter pf = PeriodFormat.getDefault(); return pf.print(period.normalizedStandard(PeriodType.forFields(dtf))); } // Return X such that (bytes < 1L<<(X*10)) static int byteScale(long bytes) { if (bytes<0) return -1; for( int i=0; i<6; i++ ) if( bytes < 1L<<(i*10) ) return i; return 6; } static double bytesScaled(long bytes, int scale) { if( scale <= 0 ) return bytes; return bytes / (double)(1L<<((scale-1)*10)); } static final String[] SCALE = new String[] {"N/A (-ve)","Zero ","%4.0f B","%.1f KB","%.1f MB","%.2f GB","%.3f TB","%.3f PB"}; public static String bytes(long bytes) { return bytes(bytes,byteScale(bytes)); } static String bytes(long bytes, int scale) { return String.format(SCALE[scale+1],bytesScaled(bytes,scale)); } public static String bytesPerSecond(long bytes) { if( bytes < 0 ) return "N/A"; return bytes(bytes)+"/S"; } static double [] powers10 = new double[]{ 0.0000000001, 0.000000001, 0.00000001, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, 10000000.0, 100000000.0, 1000000000.0, 10000000000.0, }; static public long [] powers10i = new long[]{ 1l, 10l, 100l, 1000l, 10000l, 100000l, 1000000l, 10000000l, 100000000l, 1000000000l, 10000000000l, 100000000000l, 1000000000000l, 10000000000000l, 100000000000000l, 1000000000000000l, 10000000000000000l, 100000000000000000l, 1000000000000000000l, }; public static double pow10(long m, int e){ return e < 0?m/pow10(-e):m*pow10(e); } private static double pow10(int exp){ return ((exp >= -10 && exp <= 10)?powers10[exp+10]:Math.pow(10, exp)); } public static long pow10i(int exp){ return ((exp > -1 && exp < 19)?powers10i[exp]:(long)Math.pow(10, exp)); } public static boolean fitsIntoInt(double d) { return Math.abs((int)d - d) < 1e-8; } // About as clumsy and random as a blaster... public static String UUID( long lo, long hi ) { long lo0 = (lo>>32)&0xFFFFFFFFL; long lo1 = (lo>>16)&0xFFFFL; long lo2 = (lo>> 0)&0xFFFFL; long hi0 = (hi>>48)&0xFFFFL; long hi1 = (hi>> 0)&0xFFFFFFFFFFFFL; return String.format("%08X-%04X-%04X-%04X-%012X",lo0,lo1,lo2,hi0,hi1); } public static String uuid(java.util.UUID uuid) { return uuid == null ? "(N/A)" : UUID(uuid.getLeastSignificantBits(), uuid.getMostSignificantBits()); } public static String number(Chunk chk, double d, int precision) { long l = (long)d; if( (double)l == d ) return Long.toString(l); if( precision > 0 ) return x2(d,PrettyPrint.pow10(-precision)); Class chunkClass = chk.getClass(); if( chunkClass == C1SChunk.class ) return x2(d,((C1SChunk)chk).scale()); if( chunkClass == C2SChunk.class ) return x2(d,((C2SChunk)chk).scale()); if( chunkClass == C4SChunk.class ) return x2(d,((C4SChunk)chk).scale()); return Double.toString(d); } private static String x2( double d, double scale ) { String s = Double.toString(d); // Double math roundoff error means sometimes we get very long trailing // strings of junk 0's with 1 digit at the end... when we *know* the data // has only "scale" digits. Chop back to actual digits int ex = (int)Math.log10(scale); int x = s.indexOf('.'); int y = x+1+(-ex); if( x != -1 && y < s.length() ) s = s.substring(0,x+1+(-ex)); while( s.charAt(s.length()-1)=='0' ) s = s.substring(0,s.length()-1); return s; } public static String formatPct(double pct) { String s = "N/A"; if( !isNaN(pct) ) s = String.format("%5.2f %%", 100 * pct); return s; } /** * This method takes a number, and returns the * string form of the number with the proper * ordinal indicator attached (e.g. 1->1st, and 22->22nd) * @param i - number to have ordinal indicator attached * @return string form of number along with ordinal indicator as a suffix */ public static String withOrdinalIndicator(long i) { String ord; // Grab second to last digit int d = (int) (Math.abs(i) / Math.pow(10, 1)) % 10; if (d == 1) ord = "th"; //teen values all end in "th" else { // not a weird teen number d = (int) (Math.abs(i) / Math.pow(10, 0)) % 10; switch (d) { case 1: ord = "st"; break; case 2: ord = "nd"; break; case 3: ord = "rd"; break; default: ord = "th"; } } return i+ord; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/ProfileCollectorTask.java
package water.util; import water.H2O; import water.Iced; import water.MRTask; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.TreeMap; public class ProfileCollectorTask extends MRTask<ProfileCollectorTask> { // helper class to store per-node profiles public static class NodeProfile extends Iced { NodeProfile(int len) { stacktraces = new String[len]; counts = new int[len]; } public String node_name; public long timestamp; public String[] stacktraces; public int[] counts; } public ProfileCollectorTask(int stack_depth) { super(H2O.GUI_PRIORITY); _stack_depth = stack_depth; } // input public final int _stack_depth; // output public NodeProfile[] _result; @Override public void reduce(ProfileCollectorTask that) { for (int i=0; i<_result.length; ++i) if (_result[i] == null) _result[i] = that._result[i]; } /** * This runs on each node in the cluster. */ @Override public void setupLocal() { int idx = H2O.SELF.index(); _result = new NodeProfile[H2O.CLOUD.size()]; Map<String, Integer> countedStackTraces = new HashMap<>(); final int repeats = 100; for (int i=0; i<repeats; ++i) { Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces(); for (Map.Entry<Thread, StackTraceElement[]> el : allStackTraces.entrySet()) { StringBuilder sb = new StringBuilder(); int j=0; for (StackTraceElement ste : el.getValue()) { String val = ste.toString(); // filter out unimportant stuff if( j==0 && ( val.equals("sun.misc.Unsafe.park(Native Method)") || val.equals("java.lang.Object.wait(Native Method)") || val.equals("java.lang.Thread.sleep(Native Method)") || val.equals("java.lang.Thread.yield(Native Method)") || val.equals("java.net.PlainSocketImpl.socketAccept(Native Method)") || val.equals("sun.nio.ch.ServerSocketChannelImpl.accept0(Native Method)") || val.equals("sun.nio.ch.DatagramChannelImpl.receive0(Native Method)") || val.equals("java.lang.Thread.dumpThreads(Native Method)") ) ) { break; } sb.append(ste.toString()); sb.append("\n"); j++; if (j==_stack_depth) break; } String st = sb.toString(); boolean found = false; for (Map.Entry<String, Integer> entry : countedStackTraces.entrySet()) { if (entry.getKey().equals(st)) { entry.setValue(entry.getValue() + 1); found = true; break; } } if (!found) countedStackTraces.put(st, 1); } try { Thread.sleep(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } int i=0; _result[idx] = new NodeProfile(countedStackTraces.size()); _result[idx].node_name = H2O.getIpPortString(); _result[idx].timestamp = System.currentTimeMillis(); for (Map.Entry<String, Integer> entry : countedStackTraces.entrySet()) { _result[idx].stacktraces[i] = entry.getKey(); _result[idx].counts[i] = entry.getValue(); i++; } // sort it Map<Integer, String> sorted = new TreeMap<>(Collections.reverseOrder()); for (int j=0; j<_result[idx].counts.length; ++j) { if (_result[idx].stacktraces[j] != null && _result[idx].stacktraces[j].length() > 0) sorted.put(_result[idx].counts[j], _result[idx].stacktraces[j]); } // overwrite results String[] sorted_stacktraces = new String[sorted.entrySet().size()]; int[] sorted_counts = new int[sorted.entrySet().size()]; i=0; for (Map.Entry<Integer, String> e : sorted.entrySet()) { sorted_stacktraces[i] = e.getValue(); sorted_counts[i] = e.getKey(); i++; } _result[idx].stacktraces = sorted_stacktraces; _result[idx].counts = sorted_counts; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/RString.java
package water.util; import java.io.IOException; import java.net.URLEncoder; import java.util.*; import java.util.Map.Entry; import water.Key; import water.H2O; /** * A replaceable string that allows very easy and simple replacements. * * %placeholder is normally inserted * * %$placeholder is inserted in URL encoding for UTF-8 charset. This should be * used for all hrefs. * */ public class RString { // A placeholder information with replcement group and start and end labels. private static class Placeholder { LabelledStringList.Label start; LabelledStringList.Label end; RString group; // Creates new placeholder private Placeholder(LabelledStringList.Label start, LabelledStringList.Label end) { this.start = start; this.end = end; this.group = null; } // Creates new placeholder for replacement group private Placeholder(LabelledStringList.Label start, LabelledStringList.Label end, String from) { this.start = start; this.end = end; this.group = new RString(from, this); } } // Placeholders MMHashMap<String, Placeholder> _placeholders; // Parts of the final string (replacements and originals together). LabelledStringList _parts; // Parent placeholder if the RString is a replacement group. Placeholder _parent; // Passes only valid placeholder name characters static private boolean isIdentChar(char x) { return (x == '$') || ((x >= 'a') && (x <= 'z')) || ((x >= 'A') && (x <= 'Z')) || ((x >= '0') && (x <= '9')) || (x == '_'); } // Creates a string that is itself a replacement group. private RString(final String from, Placeholder parent) { this(from); _parent = parent; } // Creates the RString from given normal string. The placeholders must begin // with % sign and if the placeholder name is followed by { }, the placeholder // is treated as a replacement group. Replacement groups cannot be tested. // Only letters, numbers and underscore can form a placeholder name. In the // constructor the string is parsed into parts and placeholders so that all // replacements in the future are very quick (hashmap lookup in fact). public RString(final String from) { _parts = new LabelledStringList(); _placeholders = new MMHashMap<>(); LabelledStringList.Label cur = _parts.begin(); int start = 0; int end = 0; while( true ) { start = from.indexOf("%", end); if( start == -1 ) { cur.insertAndAdvance(from.substring(end, from.length())); break; } ++start; if( start == from.length() ) { throw new ArrayIndexOutOfBoundsException(); } if( from.charAt(start) == '%' ) { cur.insertAndAdvance(from.substring(end, start)); end = start + 1; } else { cur.insertAndAdvance(from.substring(end, start - 1)); end = start; while( (end < from.length()) && (isIdentChar(from.charAt(end))) ) { ++end; } String pname = from.substring(start, end); if( (end == from.length()) || (from.charAt(end) != '{') ) { // it is a normal placeholder _placeholders.put2(pname, new Placeholder(cur.clone(), cur.clone())); } else { // it is another RString start = end + 1; end = from.indexOf("}", end); if( end == -1 ) { throw new ArrayIndexOutOfBoundsException("Missing } after replacement group"); } _placeholders.put2(pname, new Placeholder(cur.clone(), cur.clone(), from.substring(start, end))); ++end; } } } } // Returns the string with all replaced material. @Override public String toString() { return _parts.toString(); } // Removes all replacements from the string (keeps the placeholders so that // they can be used again. private void clear() { //for( Placeholder p : _placeholders.values() ) { // p.start.removeTill(p.end); //} throw H2O.unimpl(); } public void replace(String what, Key key) { replace(what, key.user_allowed() ? key.toString() : "<code>"+key.toString()+"</code>"); } // Replaces the given placeholder with an object. On a single placeholder, // multiple replaces can be called in which case they are appended one after // another in order. public void replace(String what, Object with) { if (what.charAt(0)=='$') throw new RuntimeException("$ is now control char that denotes URL encoding!"); for (Placeholder p : _placeholders.get(what)) p.end.insertAndAdvance(with.toString()); ArrayList<Placeholder> ar = _placeholders.get("$"+what); if( ar == null ) return; for (Placeholder p : ar) try { p.end.insertAndAdvance(URLEncoder.encode(with.toString(),"UTF-8")); } catch (IOException e) { p.end.insertAndAdvance(e.toString()); } } // Returns a replacement group of the given name and clears it so that it // can be filled again. private RString restartGroup(String what) { List<Placeholder> all = _placeholders.get(what); assert all.size() == 1; Placeholder result = all.get(0); if( result.group == null ) { throw new NoSuchElementException("Element " + what + " is not a group."); } result.group.clear(); return result.group; } // If the RString itself is a replacement group, adds its contents to the // placeholder. private void append() { if( _parent == null ) { throw new UnsupportedOperationException("Cannot append if no parent is specified."); } _parent.end.insertAndAdvance(toString()); } private static class MMHashMap<K,V> extends HashMap<K,ArrayList<V>> { void put2( K key, V val ) { ArrayList<V> ar = get(key); if( ar==null ) put(key,ar = new ArrayList<>()); ar.add(val); } } } /** * List that has labels to it (something like copyable iterators) and some very * basic functionality for it. * * Since it is not a private class only the things we require are filled in. The * labels do not expect or deal with improper use, so make sure you know what * you are doing when using directly this class. */ class LabelledStringList { // Inner item of the list, single linked private static class Item { String value; Item next; Item(String value, Item next) { this.value = value; this.next = next; } } // Label to the list, which acts as a restricted form of an iterator. Notably // a label can be used to add items in the middle of the list and also to // delete all items in between two labels. class Label { // element before the label Item _prev; // Creates the label from given inner list item so that the label points // right after it. If null, label points at the very beginnig of the list. Label(Item prev) { _prev = prev; } // Creates a new copy of the label that points to the same place @Override protected Label clone() { return new Label(_prev); } // Inserts new string after the label private void insert(String value) { if( _prev == null ) { _begin = new Item(value, _begin); } else { _prev.next = new Item(value, _prev.next); } ++_noOfElements; _length += value.length(); } // Inserts new string after the label and then advances the label. Thus in // theory inserting before the label. void insertAndAdvance(String value) { insert(value); if( _prev == null ) { _prev = _begin; } else { _prev = _prev.next; } } // Removes the element after the label. private void remove() throws NoSuchElementException { if( _prev == null ) { if( _begin == null ) { throw new NoSuchElementException(); } _length -= _begin.value.length(); _begin = _begin.next; } else { if( _prev.next == null ) { throw new NoSuchElementException(); } _length -= _prev.next.value.length(); _prev.next = _prev.next.next; } --_noOfElements; } // Removes all elements between the label and the other label. The other // label must come after the first label, otherwise everything after the // label will be deleted. private void removeTill(Label other) { if( _prev == null ) { if( other._prev == null ) { return; } while( ((_begin != null) && (_begin.next != other._prev.next)) ) { _length -= _begin.value.length(); _begin = _begin.next; --_noOfElements; } } else { if( other._prev == null ) { clear(); _prev = null; } else { Item end = other._prev.next; while( (_prev.next != null) && (_prev.next != end) ) { remove(); } } } other._prev = _prev; } } // first item private Item _begin; // length in characters of the total stored string private int _length; // number of String elemets stored private int _noOfElements; // Creates an empty string list LabelledStringList() { _length = 0; _noOfElements = 0; } // Returns a label to the first item Label begin() { return new Label(null); } // Returns the number of elements stored in the list private int length() { return _noOfElements; } // Clears all elements in the list (all labels should be cleared by the // user when calling this method). private void clear() { _begin = null; _length = 0; _noOfElements = 0; } // Concatenates all parts of the string and returns them as single string @Override public String toString() { StringBuilder s = new StringBuilder(_length); Item i = _begin; while( i != null ) { s.append(i.value); i = i.next; } return s.toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/RandomBase.java
package water.util; import java.util.Random; import java.util.stream.DoubleStream; import java.util.stream.IntStream; import java.util.stream.LongStream; public class RandomBase extends Random { protected RandomBase() { super(); } protected RandomBase(long seed) { super(seed); } /** * Returns a pseudorandom, uniformly distributed value * between 0 (inclusive) and the specified value (exclusive). * * @see jsr166y.ThreadLocalRandom#nextLong(long) * * @param n the bound on the random number to be returned. Must be * positive. * @return the next value * @throws IllegalArgumentException if n is not positive */ public long nextLong(long n) { if (n <= 0) throw new IllegalArgumentException("n must be positive"); // Divide n by two until small enough for nextInt. On each // iteration (at most 31 of them but usually much less), // randomly choose both whether to include high bit in result // (offset) and whether to continue with the lower vs upper // half (which makes a difference only if odd). long offset = 0; while (n >= Integer.MAX_VALUE) { int bits = next(2); long half = n >>> 1; long nextn = ((bits & 2) == 0) ? half : n - half; if ((bits & 1) == 0) offset += n - nextn; n = nextn; } return offset + nextInt((int) n); } @Override public final IntStream ints(long streamSize) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final IntStream ints() { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final IntStream ints(long streamSize, int randomNumberOrigin, int randomNumberBound) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final IntStream ints(int randomNumberOrigin, int randomNumberBound) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final LongStream longs(long streamSize) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final LongStream longs() { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final LongStream longs(long streamSize, long randomNumberOrigin, long randomNumberBound) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final LongStream longs(long randomNumberOrigin, long randomNumberBound) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final DoubleStream doubles(long streamSize) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final DoubleStream doubles() { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final DoubleStream doubles(long streamSize, double randomNumberOrigin, double randomNumberBound) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } @Override public final DoubleStream doubles(double randomNumberOrigin, double randomNumberBound) { throw new UnsupportedOperationException("Please avoid using Stream API on Random - it introduces different behavior on different Java versions"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/RandomUtils.java
package water.util; import water.H2O; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; public class RandomUtils { public enum RNGType { PCGRNG, MersenneTwisterRNG, JavaRNG, XorShiftRNG } private static RNGType _rngType = RNGType.PCGRNG; /* Returns the configured random generator */ public static RandomBase getRNG(long... seed) { switch(_rngType) { case JavaRNG: return new H2ORandomRNG(seed[0]); case XorShiftRNG: return new XorShiftRNG (seed[0]); case PCGRNG: return new PCGRNG (seed[0],seed.length > 1 ? seed[1] : 1); case MersenneTwisterRNG: // Do not copy the seeds - use them, and initialize the first two ints by // seeds based given argument. The call is locked, and also // MersenneTwisterRNG will just copy the seeds into its datastructures return new MersenneTwisterRNG(ArrayUtils.unpackInts(seed)); } throw H2O.fail(); } // Converted to Java from the C /* * PCG Random Number Generation for C. * * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For additional information about the PCG random number generation scheme, * including its license and other licensing options, visit * * http://www.pcg-random.org */ public static class PCGRNG extends RandomBase { private long _state; // Random state private long _inc; // Fixed sequence, always odd // Seed the rng. Specified in two parts, state initializer and a sequence // selection constant (a.k.a. stream id) public PCGRNG(long seed, long seq) { //need to call a super-class constructor - ugly by-product of inheritance //required for reproducibility - super() would call time-based setSeed() and modify _state to non-zero super(0); assert(_state == 0); _inc = (seq<<1)|1; nextInt(); _state += seed; nextInt(); } @Override public synchronized void setSeed(long seed) { _state = 0; nextInt(); _state += seed; nextInt(); } // CNC: PCG expects the output to be an *unsigned* int which Java does not // support. Instead we're returning a signed int, and the caller has to // figure out the sign-extension. @Override public int nextInt() { long oldstate = _state; _state = oldstate * 6364136223846793005L + _inc; int xorshifted = (int)(((oldstate >>> 18) ^ oldstate) >>> 27); int rot = (int)(oldstate >>> 59); return (xorshifted >>> rot) | (xorshifted << ((-rot) & 31)); } @Override public long nextLong() { return (((long)nextInt())<<32) | (((long)nextInt())&0xFFFFFFFFL); } @Override protected int next(int bits) { long nextseed = nextLong(); return (int) (nextseed & ((1L << bits) - 1)); } // Generate a uniformly distributed number, r, where 0 <= r < bound @Override public int nextInt(int bound) { // To avoid bias, we need to make the range of the RNG a multiple of // bound, which we do by dropping output less than a threshold. A naive // scheme to calculate the threshold would be to do // // uint32_t threshold = 0x100000000ull % bound; // // but 64-bit div/mod is slower than 32-bit div/mod (especially on 32-bit // platforms). In essence, we do // // uint32_t threshold = (0x100000000ull-bound) % bound; // // because this version will calculate the same modulus, but the LHS // value is less than 2^32. long threshold = (-(long)bound % (long)bound)&0xFFFFFFFFL; // Uniformity guarantees that this loop will terminate. In practice, it // should usually terminate quickly; on average (assuming all bounds are // equally likely), 82.25% of the time, we can expect it to require just // one iteration. In the worst case, someone passes a bound of 2^31 + 1 // (i.e., 2147483649), which invalidates almost 50% of the range. In // practice, bounds are typically small and only a tiny amount of the // range is eliminated. for (;;) { long r = ((long)nextInt()) & 0xFFFFFFFFL; if (r >= threshold) return (int)(r % bound); } } } /** Stock Java RNG, but force the initial seed to have no zeros in either the * low 32 or high 32 bits - leading to well known really bad behavior. */ public static class H2ORandomRNG extends RandomBase { public H2ORandomRNG(long seed) { super(); if ((seed >>> 32) < 0x0000ffffL) seed |= 0x5b93000000000000L; if (((seed << 32) >>> 32) < 0x0000ffffL) seed |= 0xdb910000L; setSeed(seed); } } /** Simple XorShiftRNG. * Note: According to RF benchmarks it does not provide so accurate results * as {@link java.util.Random}, however it can be used as an alternative. */ public static class XorShiftRNG extends RandomBase { private AtomicLong _seed; public XorShiftRNG (long seed) { _seed = new AtomicLong(seed); } @Override public long nextLong() { long oldseed, nextseed; AtomicLong seed = this._seed; do { oldseed = seed.get(); nextseed = xorShift(oldseed); } while (!seed.compareAndSet(oldseed, nextseed)); return nextseed; } @Override public int nextInt() { return nextInt(Integer.MAX_VALUE); } @Override public int nextInt(int n) { int r = (int) (nextLong() % n); return r > 0 ? r : -r; } @Override protected int next(int bits) { long nextseed = nextLong(); return (int) (nextseed & ((1L << bits) - 1)); } private long xorShift(long x) { x ^= (x << 21); x ^= (x >>> 35); x ^= (x << 4); return x; } } /** * <p> * Random number generator based on the <a * href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html" * target="_top">Mersenne Twister</a> algorithm developed by Makoto Matsumoto * and Takuji Nishimura. * </p> * * <p> * This is a very fast random number generator with good statistical properties * (it passes the full DIEHARD suite). This is the best RNG for most * experiments. If a non-linear generator is required, use the slower * <code>AESCounterRNG</code> RNG. * </p> * * <p> * This PRNG is deterministic, which can be advantageous for testing purposes * since the output is repeatable. If multiple instances of this class are * created with the same seed they will all have identical output. * </p> * * <p> * This code is translated from the original C version and assumes that we will * always seed from an array of bytes. I don't pretend to know the meanings of * the magic numbers or how it works, it just does. * </p> * * <p> * <em>NOTE: Because instances of this class require 128-bit seeds, it is not * possible to seed this RNG using the {@link #setSeed(long)} method inherited * from {@link java.util.Random}. Calls to this method will have no effect. * Instead the seed must be set by a constructor.</em> * </p> * * @author Makoto Matsumoto and Takuji Nishimura (original C version) * @author Daniel Dyer (Java port) */ public static class MersenneTwisterRNG extends RandomBase { // Magic numbers from original C version. private static final int N = 624; private static final int M = 397; private static final int[] MAG01 = { 0, 0x9908b0df }; private static final int UPPER_MASK = 0x80000000; private static final int LOWER_MASK = 0x7fffffff; private static final int BOOTSTRAP_SEED = 19650218; private static final int BOOTSTRAP_FACTOR = 1812433253; private static final int SEED_FACTOR1 = 1664525; private static final int SEED_FACTOR2 = 1566083941; private static final int GENERATE_MASK1 = 0x9d2c5680; private static final int GENERATE_MASK2 = 0xefc60000; // Lock to prevent concurrent modification of the RNG's internal state. private final ReentrantLock lock = new ReentrantLock(); /* State vector */ private final int[] mt = new int[N]; /* Index into state vector */ private int mtIndex = 0; // public MersenneTwisterRNG(long... seeds){ // this(unpackInts(seeds)); // } /** * Creates an RNG and seeds it with the specified seed data. * * @param seedInts The seed data used to initialise the RNG. */ public MersenneTwisterRNG(int... seedInts) { // This section is translated from the init_genrand code in the C version. mt[0] = BOOTSTRAP_SEED; for( mtIndex = 1; mtIndex < N; mtIndex++ ) { mt[mtIndex] = (BOOTSTRAP_FACTOR * (mt[mtIndex - 1] ^ (mt[mtIndex - 1] >>> 30)) + mtIndex); } // This section is translated from the init_by_array code in the C version. int i = 1; int j = 0; for( int k = Math.max(N, SEEDS.length); k > 0; k-- ) { int jseeds = (j == 0 || j == 1) ? seedInts[j] : SEEDS[j]; mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >>> 30)) * SEED_FACTOR1)) + jseeds + j; i++; j++; if( i >= N ) { mt[0] = mt[N - 1]; i = 1; } if( j >= SEEDS.length ) { j = 0; } } for( int k = N - 1; k > 0; k-- ) { mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >>> 30)) * SEED_FACTOR2)) - i; i++; if( i >= N ) { mt[0] = mt[N - 1]; i = 1; } } mt[0] = UPPER_MASK; // Most significant bit is 1 - guarantees non-zero // initial array. } @Override protected final int next(int bits) { int y; try { lock.lock(); if( mtIndex >= N ) // Generate N ints at a time. { int kk; for( kk = 0; kk < N - M; kk++ ) { y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + M] ^ (y >>> 1) ^ MAG01[y & 0x1]; } for( ; kk < N - 1; kk++ ) { y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + (M - N)] ^ (y >>> 1) ^ MAG01[y & 0x1]; } y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[N - 1] = mt[M - 1] ^ (y >>> 1) ^ MAG01[y & 0x1]; mtIndex = 0; } y = mt[mtIndex++]; } finally { lock.unlock(); } // Tempering y ^= (y >>> 11); y ^= (y << 7) & GENERATE_MASK1; y ^= (y << 15) & GENERATE_MASK2; y ^= (y >>> 18); return y >>> (32 - bits); } /* 624 int seeds generated from /dev/random * * SEEDS[0], and SEEDS[1] are reserved for MersenneTwister initialization in hex.rf.Utils. * They can obtain any value! * * Note: SEEDS are modified at this place. The user has to ensure proper locking. */ public static final int[] SEEDS = new int[] { 0x00000000, 0x00000000, 0x8a885b28, 0xcb618e3c, 0x6812fe78, 0xca8ca770, 0xf2a19ffd, 0xb6821eaa, 0xd1fa32c7, 0xc6dbee65, 0xd9534b7f, 0xa8e765a6, 0x2da3c864, 0xb5a7766a, 0x2bc7e671, 0xf80571d0, 0xa7174754, 0xf3234de2, 0x4e7cc080, 0x1140d082, 0x5fad93ab, 0x8cce5b9f, 0x1872465a, 0x6b42ecd3, 0x2c8c9653, 0x453a2eef, 0xcc508838, 0x5a85a0e1, 0x3b7a05e9, 0x2ac09cfd, 0x88aa58c6, 0xd9680c83, 0x061c1189, 0xc5ce6f21, 0x0acff61d, 0x3f550f57, 0xfce253ce, 0x72f39c54, 0x1772831b, 0x7f61413f, 0x5971d316, 0x38306f1e, 0xe4102ecc, 0xe64f0fc5, 0x3bc7ba66, 0x739ef534, 0x1379892e, 0x8f608758, 0x4828e965, 0xf4ac7b9a, 0xa8ddaba3, 0x50f8b1cb, 0xfec0f9d0, 0x842537e7, 0x5e6231bf, 0xef3ae390, 0x420f8f3a, 0xeedd75cc, 0xe3c10283, 0x5c38cbd6, 0x662c8b91, 0x2cd589d5, 0xe28522a7, 0xda03a7b4, 0xb29877dc, 0x45a109fb, 0x99c3021e, 0x0af14661, 0xe85d6e6e, 0xbdaa929b, 0x940e053d, 0x861e7d7d, 0x73ae673f, 0x8491c460, 0xc01be6a4, 0x06e0818c, 0x142f7399, 0xc80a6a41, 0x45600653, 0x1c0516d5, 0xd2ff0694, 0xb1cb723d, 0x73f355e0, 0x076cb63a, 0x7db7190f, 0x35ea0b80, 0xa36f646b, 0xb9ebfa2f, 0x3844839b, 0x58d80a19, 0x1f3d8746, 0x229bb12e, 0x0ac3846d, 0xd2f43715, 0x04aaeb46, 0xacc87633, 0x7dd5b268, 0xba3651fc, 0xd76801e6, 0x9e413be6, 0xb31b71c5, 0x5fd36451, 0x4041662e, 0x8e87487b, 0x03126116, 0x6574b757, 0x7717d040, 0x1d15c783, 0x7a167e9c, 0x8e4ec7a0, 0x749bc3e5, 0xfa2ea1b1, 0x25df2c84, 0xf9e7ae19, 0xe071597a, 0x6ae0fb27, 0x12380f69, 0xf672e42f, 0x5425f6f6, 0xed6e16b7, 0x36b29279, 0x24cbd8fb, 0x4d682009, 0x0e17116c, 0x10428b6b, 0xe463f573, 0x2c5ff8d0, 0x1102b138, 0xc544907c, 0xcf403704, 0x2565d0ec, 0x67e3111c, 0xc5097632, 0xe3505d2d, 0xb0a31246, 0x55cbffb3, 0xf2b662cb, 0x944ba74f, 0xf64a1136, 0x67628af5, 0x1d442a18, 0x31c8c7d4, 0x648a701b, 0x563930c4, 0x28ecd115, 0x9959be3f, 0x9afa938d, 0x0c40f581, 0x8ec73f72, 0x20dbf8a1, 0x2c2ca035, 0xb81f414c, 0xfc16c15c, 0xec386121, 0x41d8bd3a, 0x60eab9ce, 0x9f4b093d, 0x56e5bb7c, 0x0d60cd53, 0x3238a405, 0xa159ab87, 0xdadaaed3, 0xc86b574f, 0x9ed3b528, 0x3137e717, 0x028012fc, 0x8477ea87, 0x6477d097, 0x06b6e294, 0x1dd29c4e, 0x5c732920, 0xc760bcec, 0x5d40a29a, 0xc581f784, 0x13b46a5e, 0xf6761ea7, 0x1b4ee8c3, 0x1637d570, 0x0c00569a, 0xd01cb95e, 0x87343e82, 0x17190e4c, 0x357078a3, 0x3b59246c, 0xdf11b5e7, 0x68971c7a, 0xcc3d497e, 0x21659527, 0x2c211ba2, 0xf34aa1ee, 0x4a07f67e, 0x7ae0eacd, 0xe05bdc85, 0xfe2347a7, 0xebc4be3f, 0x1f033044, 0x82e2a46e, 0x75c66f49, 0x56c50b1e, 0xc20f0644, 0x798ec011, 0x9eba0c81, 0x0fe34e70, 0x28061a7f, 0x26536ace, 0x6541a948, 0x305edffe, 0x25eaa0a9, 0xef64db75, 0xe1f4d734, 0xe27e22de, 0x3b68a4b3, 0x8917d09f, 0x402f7e99, 0xe9b3e3e7, 0x9a95e6fb, 0x42a5725c, 0x00d9f288, 0x9e893c59, 0x3771df6d, 0xbfb39333, 0x9039fd17, 0x3d574609, 0xb8a44bc4, 0xe12f34ad, 0x7f165a6c, 0x8e13ec33, 0xa8d935be, 0x00ac09d8, 0x3ffff87b, 0xda94be75, 0x8b1804d5, 0xd1ac4301, 0xc2b4101d, 0xb8dae770, 0x3062dbf0, 0xc5defd8d, 0xa791e2aa, 0x678f3924, 0xec4ea145, 0x457c82b5, 0x6698be3c, 0xfbd4913f, 0xff52ad6d, 0x54c7f66d, 0x7d6ec779, 0x9ce9d1d9, 0x384dd1eb, 0xb4b4d565, 0xa5736588, 0x33ae82b2, 0x051221b0, 0x11a8775f, 0xd2ed52ea, 0xdf99b00b, 0xa0425a1a, 0xd6b32a9b, 0xfa162152, 0x4de98efb, 0xb0d5553e, 0xdd9d7239, 0x05be808d, 0x438f6f74, 0xdf28fc47, 0xb6fcd76d, 0x58375c21, 0x1a88eae6, 0x1ce15ca9, 0x46304120, 0xc2a8c9ee, 0xa2eaf06e, 0xf548a76c, 0xd288b960, 0xec1c7cb5, 0x6e59f189, 0x3424b4eb, 0x521220db, 0x9d2f797d, 0x8561d680, 0x63eda823, 0x7f406b58, 0x31104105, 0x1a457dc1, 0x3a94cec4, 0xed5a24b7, 0xa11766a2, 0xefd011e1, 0x10806e51, 0x5519474f, 0x08d1a66f, 0xc83ac414, 0xf9dad4f5, 0xfa64b469, 0x6cbfd6a3, 0xb2e787ce, 0x63eb2f8e, 0xe0d36a89, 0xe232fe8f, 0xd0d28011, 0xd198ab29, 0x1e5aa524, 0x05ae372d, 0x314fb7fb, 0x7e263de0, 0x61e8d239, 0x2f76e5b6, 0xaf2af828, 0x4146a159, 0x3626bccf, 0x308a82ed, 0x1e5527a3, 0xe540898d, 0xb2e944de, 0x010007fd, 0xaabb40cc, 0xa119fd6b, 0xefca25a8, 0xd1389d26, 0x15b65a4b, 0xf1323150, 0x3798f801, 0xf5787776, 0xcd069f96, 0x91da0117, 0xb603eaa4, 0xb068125e, 0x346216d5, 0xcb0af099, 0xad8131db, 0x1c5ce132, 0x3a094b8a, 0x68d20e3f, 0x6f62b0b9, 0x5b2da8a9, 0x11530b9a, 0x5c340608, 0x9b23c1d9, 0xf175fcba, 0x70fddd5e, 0x9c554ec4, 0xfc0cb505, 0x5249997f, 0xc42f151f, 0xee9f506f, 0x8fb2cd27, 0xb799db4b, 0x4c5c0eeb, 0x37278283, 0x8183b362, 0x928b4cc7, 0x6c895352, 0x9b0a8270, 0xc5cb93da, 0xf8268a31, 0x09fd1af6, 0xbc6e89fc, 0x5a614eb8, 0xe55b1348, 0x992a69ee, 0x55b0ffb7, 0x4eb5db62, 0x5cde9e6b, 0xad9b186d, 0xa5006f43, 0xc82c2c7f, 0x822fa75f, 0xa3a4cb06, 0x6d05edda, 0x5bf76fb7, 0x846a54f8, 0xca7ce73c, 0x43c1a8d1, 0x1b4c79a7, 0x85cb66c7, 0xc541b4ad, 0x07e69a11, 0xffb1e304, 0xe585f233, 0x506773a5, 0xc7adaa3c, 0xf980d0c6, 0xa3d90125, 0xfbce4232, 0xfe6fed8f, 0xe17f437a, 0x29c45214, 0xa0ea1046, 0xc025f727, 0x820202ca, 0x554f4e76, 0x5389096c, 0x7d58de96, 0xe32295b8, 0x689b5fbe, 0xdfefacf1, 0xd4facb70, 0x0cf3703e, 0x78fec105, 0x57b53e14, 0x54bcd2ef, 0x335f4d0d, 0x58552c2e, 0xf64df202, 0x0e5c3565, 0xa4cb22c5, 0xd91c91c1, 0x7827bb3f, 0x37b456e3, 0x84950a9e, 0x273edcd7, 0xddaa5ebd, 0xb1f46855, 0xe0052b20, 0xcfb04082, 0xa449e49b, 0xfd95e21c, 0xa9f477c0, 0xacf0be15, 0x611d1edc, 0xb3dca16a, 0x781efb9a, 0x6480c096, 0x4e545269, 0xbc836952, 0xd511b539, 0xdf6248b4, 0x8ff7da61, 0x0756106d, 0x92f04a17, 0xee649e83, 0x14e35780, 0x6dc76815, 0x0fe032bb, 0x1fd66462, 0x0f4be990, 0x1627c658, 0xb95f902d, 0xa6f9e4e9, 0xb7b9aa16, 0x6a0a31d5, 0x647129e6, 0x071f89b7, 0xe4033ca9, 0xd81b3f59, 0x74f8a887, 0xc44bc880, 0xf1c2d04c, 0xf9e246c9, 0x529f9c45, 0x14d322e7, 0x8c3305b1, 0x8dd9a988, 0x8a92b883, 0x47574eb3, 0x7b5779f4, 0x759a4eb6, 0xc8ed6a11, 0x42a4e0ee, 0xf4603b1d, 0x790d9126, 0xa261034e, 0x94569718, 0x5f57c893, 0xa1c2486a, 0x6727618f, 0xcfb7c5b3, 0xa4c2f232, 0x33b5e051, 0x9ed6c2d0, 0x16f3ec37, 0x5c7c96ba, 0x3a16185f, 0x361d6c17, 0xa179808b, 0xb6751231, 0xc8486729, 0x873fc8ab, 0xe7f78a78, 0x2fd3093b, 0x489efe89, 0x83628cd1, 0x67ad9faa, 0x623cbc2f, 0x3f01e8c4, 0xfdad453f, 0x2ccfb969, 0x5d2a3806, 0x9e3df87a, 0x04700155, 0xab7b57ef, 0x262d746b, 0x737aa3e3, 0x949c724c, 0xa4120c39, 0xb0d6fc26, 0xf627a213, 0xc0a0bc60, 0x24d6564a, 0x34d460dd, 0x785b0656, 0x9376f6a5, 0x25ebee5b, 0x5a0a5018, 0x84d02b01, 0xa2b3658a, 0xad0d1cce, 0x38271683, 0x9f491585, 0x8ba28247, 0x40d5a42e, 0x7780e82e, 0x4211ccc3, 0x99da0844, 0xb85f9474, 0xbdb158b6, 0xf8194c71, 0x6339f3ec, 0x4cd66cf7, 0xb636aa4f, 0x4068c56c, 0xe41080a1, 0x55740173, 0x95903235, 0x90f39f69, 0x3f10a4e2, 0x3192a79b, 0x0590a944, 0xc9058c4f, 0x6f05a8eb, 0xdb326d13, 0xfcefbcee, 0xa699db05, 0xd819d477, 0x610f7e52, 0xfa0a4aca, 0x0e6b3f1d, 0x7a8da290, 0x6d12a9ef, 0xa12642d5, 0xebdedcff, 0x175ed926, 0xa094363a, 0xb3a07e30, 0x34fa8d2c, 0xbc16e646, 0x3e6de94d, 0xd5288754, 0x204e5283, 0xc61106f6, 0x299835e0, 0xe04e7a38, 0x2e2c1e34, 0xc069ea80, 0x5c2117cf, 0xd8fc2947, 0x10a40dc9, 0xb40dacd9, 0xfbdac86b, 0x2a8383cb, 0x46d86dc1, 0x0a1f3958, 0x0f7e59ea, 0x5c10a118, 0xea13bfc8, 0xc82c0da5, 0x4cd40dd7, 0xdaa5dfe9, 0x8c2cc0a3, 0x8dc15a64, 0x241b160c, 0xc44f573b, 0x3eb3155f, 0x284ba3fc, 0x1ece8db4, 0x03eaf07f, 0x7cbd99fb, 0x7d313b45, 0xe7ea83a7, 0x6d339d60, 0x0ef002cb, 0x92a04b40, 0x510d79bc, 0x6440e050, 0x33916596, 0xa11c5df3, 0xb582a3de, 0x031001c1, 0x85951218, 0xbe538ada, 0xe3aec1d2, 0x7fb67836, 0xc2d9ab84, 0xb1841ad9, 0x1e64cc5f, 0xa3fe111d, 0xd081d6bb, 0xf8ae6c3b, 0x3b12ae4c, 0x9ba5eb58, 0x22931b18, 0xf99b2e61, 0x628f1252, 0x2fce9aa0, 0xf99a04fb, 0x21577d22, 0x9d474c81, 0x7350e54a, 0xf88c8ac6, 0x94f38853, 0x0b6333fe, 0x8875045e, 0x90c23689, 0x6b08a34b, 0x3fb742ea, 0xa8a9466a, 0xd543807d, 0xbf12e26e, 0x10211c25, 0x068852e1, 0xf1d8f035, 0x012a5782, 0xe84cbf5f, 0xee35a87a, 0x8bfa2f09, }; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/ReflectionUtils.java
package water.util; import water.H2O; import water.Iced; import java.lang.reflect.*; public class ReflectionUtils { /** * Reflection helper which returns the actual class for a type parameter, even if itself is parameterized. */ public static <T> Class<T> findActualClassParameter(Class clz, int parm) { Class parm_class = null; if (clz.getGenericSuperclass() instanceof ParameterizedType) { Type[] handler_type_parms = ((ParameterizedType) (clz.getGenericSuperclass())).getActualTypeArguments(); if (handler_type_parms[parm] instanceof Class) { // The handler's Iced class is not parameterized (the normal case): parm_class = (Class) handler_type_parms[parm]; // E.g., for a Schema [0] is the impl (Iced) type; [1] is the Schema type } else if (handler_type_parms[parm] instanceof TypeVariable) { // The handler's Iced class is parameterized, e.g. to handle multiple layers of Schema classes as in ModelsHandler: TypeVariable v = (TypeVariable) (handler_type_parms[parm]); Type t = v.getBounds()[0]; // [0] or [parm] ? if (t instanceof Class) parm_class = (Class) t; else if (t instanceof ParameterizedType) parm_class = (Class) ((ParameterizedType) t).getRawType(); } else if (handler_type_parms[parm] instanceof ParameterizedType) { // The handler's Iced class is parameterized, e.g. to handle multiple layers of Schema classes as in ModelsHandler: parm_class = (Class) ((ParameterizedType) (handler_type_parms[parm])).getRawType(); // For a Key<Frame> this returns Key.class; see also getActualTypeArguments() } else { String msg = "Iced parameter for handler: " + clz + " uses a type parameterization scheme that we don't yet handle: " + handler_type_parms[parm]; Log.warn(msg); throw H2O.fail(msg); } } else { // Superclass is not a ParameterizedType, so we just have Iced. parm_class = Iced.class; // If the handler isn't parameterized on the Iced class then this has to be Iced. } return (Class<T>) parm_class; } /** * Reflection helper which returns the actual class for a method's parameter. */ public static Class findMethodParameterClass(Method method, int parm) { Class[] clzes = method.getParameterTypes(); if (clzes.length <= parm) throw H2O.fail("Asked for the class of parameter number: " + parm + " of method: " + method + ", which only has: " + clzes.length + " parameters."); return clzes[parm]; } /** * Reflection helper which returns the actual class for a method's parameter. */ public static Class findMethodOutputClass(Method method) { return method.getReturnType(); } /** * Reflection helper which returns the actual class for a field which has a parameterized type. * E.g., DeepLearningV2's "parameters" class is in parent ModelBuilderSchema, and is parameterized * by type parameter P. */ public static Class findActualFieldClass(Class clz, Field f) { // schema.getClass().getGenericSuperclass() instanceof ParameterizedType Type generic_type = f.getGenericType(); if (! (generic_type instanceof TypeVariable)) return f.getType(); // field is a parameterized type // ((TypeVariable)schema.getClass().getField("parameters").getGenericType()) TypeVariable[] tvs = clz.getSuperclass().getTypeParameters(); TypeVariable tv = (TypeVariable)generic_type; String type_param_name = tv.getName(); int which_tv = -1; for(int i = 0; i < tvs.length; i++) if (type_param_name.equals(tvs[i].getName())) which_tv = i; if (-1 == which_tv) { // We topped out in the type heirarchy, so just use the type from f. // E.g., this happens when getting the metadata for the parameters field of ModelSchemaV3. // It has no generic parent, so we need to use the base class. return f.getType(); } ParameterizedType generic_super = (ParameterizedType)clz.getGenericSuperclass(); if (generic_super.getActualTypeArguments()[which_tv] instanceof Class) return (Class)generic_super.getActualTypeArguments()[which_tv]; return findActualFieldClass(clz.getSuperclass(), f); } // Best effort conversion from an Object to a double public static double asDouble( Object o ) { if( o == null ) return Double.NaN; if( o instanceof Integer ) return ((Integer)o); if( o instanceof Long ) return ((Long)o); if( o instanceof Float ) return ((Float)o); if( o instanceof Double ) return ((Double)o); if( o instanceof Enum ) return ((Enum)o).ordinal(); System.out.println("Do not know how to convert a "+o.getClass()+" to a double"); throw H2O.fail(); } /** * Return the Field for the specified name. * <p> * Java reflection will either give you all the public fields all the way up the class hierarchy (getField()), * or will give you all the private/protected/public only in the single class (getDeclaredField()). * This method uses the latter but walks up the class hierarchy. */ public static Field findNamedField(Object o, String field_name) { Class clz = o.getClass(); Field f = null; do { try { f = clz.getDeclaredField(field_name); f.setAccessible(true); return f; } catch (NoSuchFieldException e) { // fall through and try our parent } clz = clz.getSuperclass(); } while (clz != Object.class); return null; } public static <T> T getFieldValue(Object o, String fieldName) { Field f = findNamedField(o, fieldName); if (f == null) { return null; } else { try { return (T) f.get(o); } catch (IllegalAccessException e) { return null; } } } @SuppressWarnings("unchecked") public static <T> T newInstance(String className, Class<T> instanceOf) throws ClassNotFoundException, IllegalAccessException, InstantiationException { Class<?> cls = Class.forName(className); if (! instanceOf.isAssignableFrom(cls)) { throw new IllegalStateException("Class " + className + " is not an instance of " + instanceOf.getName() + "."); } return (T) cls.newInstance(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/ReproducibilityInformationUtils.java
package water.util; import org.joda.time.DateTimeZone; import water.*; import water.api.RestApiExtension; import water.parser.ParseTime; import java.lang.management.ManagementFactory; import java.util.*; public class ReproducibilityInformationUtils { public static TwoDimTable createNodeInformationTable() { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("node"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("h2o"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("healthy"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("last_ping"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("num_cpus"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("sys_load"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("mem_value_size"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("free_mem"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("pojo_mem"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("swap_mem"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("free_disc"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("max_disc"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("pid"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("num_keys"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("tcps_active"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("open_fds"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("rpcs_active"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("nthreads"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("is_leader"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("total_mem"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("max_mem"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("java_version"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("jvm_launch_parameters"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("os_version"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("machine_physical_mem"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("machine_locale"); colTypes.add("string"); colFormat.add("%s"); H2ONode[] members = H2O.CLOUD.members(); final int rows = members.length; TwoDimTable table = new TwoDimTable( "Node Information", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); NodeInfoTask info = new NodeInfoTask().doAllNodes(); for (int row = 0; row < rows; row++) { int col = 0; table.set(row, col++, members[row].index()); table.set(row, col++, members[row].getIpPortString()); table.set(row, col++, Boolean.toString(members[row].isHealthy())); table.set(row, col++, members[row]._last_heard_from); table.set(row, col++, (int) members[row]._heartbeat._num_cpus); table.set(row, col++, members[row]._heartbeat._system_load_average); table.set(row, col++, members[row]._heartbeat.get_kv_mem()); table.set(row, col++, members[row]._heartbeat.get_free_mem()); table.set(row, col++, members[row]._heartbeat.get_pojo_mem()); table.set(row, col++, members[row]._heartbeat.get_swap_mem()); table.set(row, col++, members[row]._heartbeat.get_free_disk()); table.set(row, col++, members[row]._heartbeat.get_max_disk()); table.set(row, col++, members[row]._heartbeat._pid); table.set(row, col++, members[row]._heartbeat._keys); table.set(row, col++, members[row]._heartbeat._tcps_active); table.set(row, col++, members[row]._heartbeat._process_num_open_fds); table.set(row, col++, members[row]._heartbeat._rpcs); table.set(row, col++, (int) members[row]._heartbeat._nthreads); table.set(row, col++, Boolean.toString(row == H2O.CLOUD.leader().index() ? true : false)); for (int i=0; i < rows; i++) { if (members[row].index() == info.index[i]) { table.set(row, col++, info.totalMem[i]); table.set(row, col++, info.maxMem[i]); table.set(row, col++, info.javaVersion[i]); table.set(row, col++, info.jvmLaunchParameters[i]); table.set(row, col++, info.osVersion[i]); table.set(row, col++, info.machinePhysicalMem[i]); table.set(row, col++, info.machineLocale[i]); break; } } } return table; } public static class NodeInfoTask extends MRTask<NodeInfoTask> { private long[] totalMem; private long[] maxMem; private String[] javaVersion; private String[] jvmLaunchParameters; private String[] osVersion; private long[] machinePhysicalMem; private String[] machineLocale; private int[] index; @Override public void setupLocal() { totalMem = new long[H2O.CLOUD.size()]; maxMem = new long[H2O.CLOUD.size()]; Arrays.fill(totalMem, -1); Arrays.fill(maxMem, -1); javaVersion = new String[H2O.CLOUD.size()]; jvmLaunchParameters = new String[H2O.CLOUD.size()]; osVersion = new String[H2O.CLOUD.size()]; machinePhysicalMem = new long[H2O.CLOUD.size()]; machineLocale = new String[H2O.CLOUD.size()]; index = new int[H2O.CLOUD.size()]; Arrays.fill(index, -1); if (H2O.ARGS.client) { // do not fill node info on client node return; } Runtime runtime = Runtime.getRuntime(); totalMem[H2O.SELF.index()] = runtime.totalMemory(); maxMem[H2O.SELF.index()] = runtime.maxMemory(); javaVersion[H2O.SELF.index()] = "Java " + System.getProperty("java.version") + " (from " + System.getProperty("java.vendor") + ")"; jvmLaunchParameters[H2O.SELF.index()] = ManagementFactory.getRuntimeMXBean().getInputArguments().toString(); osVersion[H2O.SELF.index()] = System.getProperty("os.name") + " " + System.getProperty("os.version") + " (" + System.getProperty("os.arch") + ")"; machinePhysicalMem[H2O.SELF.index()] = OSUtils.getTotalPhysicalMemory(); machineLocale[H2O.SELF.index()] = Locale.getDefault().toString(); index[H2O.SELF.index()] = H2O.SELF.index(); } @Override public void reduce(final NodeInfoTask other) { for (int i = 0; i < H2O.CLOUD.size(); i++) { if (other.index[i] > -1) index[i] = other.index[i]; if (other.totalMem[i] > -1) totalMem[i] = other.totalMem[i]; if (other.maxMem[i] > -1) maxMem[i] = other.maxMem[i]; if (other.javaVersion != null) javaVersion[i] = other.javaVersion[i]; if (other.jvmLaunchParameters[i] != null) jvmLaunchParameters[i] = other.jvmLaunchParameters[i]; if (other.osVersion != null) osVersion[i] = other.osVersion[i]; if (other.machinePhysicalMem[i] > -1) machinePhysicalMem[i] = other.machinePhysicalMem[i]; if (other.machineLocale[i] != null) machineLocale[i] = other.machineLocale[i]; } } } public static TwoDimTable createClusterConfigurationTable() { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("H2O cluster uptime"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("H2O cluster timezone"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("H2O data parsing timezone"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("H2O cluster version"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("H2O cluster version age"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("H2O cluster name"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("H2O cluster total nodes"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("H2O cluster free memory"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("H2O cluster total cores"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("H2O cluster allowed cores"); colTypes.add("int"); colFormat.add("%d"); colHeaders.add("H2O cluster status"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("H2O internal security"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("H2O API Extensions"); colTypes.add("string"); colFormat.add("%s"); H2ONode[] members = H2O.CLOUD.members(); long freeMem = 0; int totalCores = 0; int clusterAllowedCores = 0; int unhealthlyNodes = 0; boolean locked = Paxos._cloudLocked; for (int i = 0; i < members.length; i++) { freeMem += members[i]._heartbeat.get_free_mem(); totalCores += members[i]._heartbeat._num_cpus; clusterAllowedCores += members[i]._heartbeat._cpus_allowed; if (!members[i].isHealthy()) unhealthlyNodes++; } String status = locked ? "locked" : "accepting new members"; status += unhealthlyNodes > 0 ? ", " + unhealthlyNodes + " nodes are not healthly" : ", healthly"; String apiExtensions = ""; for (RestApiExtension ext : ExtensionManager.getInstance().getRestApiExtensions()) { if (apiExtensions.isEmpty()) apiExtensions += ext.getName(); else apiExtensions += ", " + ext.getName(); } final int rows = 1; TwoDimTable table = new TwoDimTable( "Cluster Configuration", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); int row = 0; int col = 0; table.set(row, col++, System.currentTimeMillis() - H2O.START_TIME_MILLIS.get()); table.set(row, col++, DateTimeZone.getDefault().toString()); table.set(row, col++, ParseTime.getTimezone().toString()); table.set(row, col++, H2O.ABV.projectVersion()); table.set(row, col++, PrettyPrint.toAge(H2O.ABV.compiledOnDate(), new Date())); table.set(row, col++, H2O.ARGS.name); table.set(row, col++, H2O.CLOUD.size()); table.set(row, col++, freeMem); table.set(row, col++, totalCores); table.set(row, col++, clusterAllowedCores); table.set(row, col++, status); table.set(row, col++, Boolean.toString(H2OSecurityManager.instance().securityEnabled)); table.set(row, col++, apiExtensions); return table; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/RowDataUtils.java
package water.util; import hex.genmodel.easy.RowData; import water.fvec.Chunk; import water.fvec.Vec; import water.parser.BufferedString; public class RowDataUtils { public static void extractChunkRow(Chunk[] cs, String[] names, byte[] types, int row, RowData rowData) { BufferedString str = new BufferedString(); for (int col = 0; col < cs.length; col++) { final Object value; final byte type = types[col]; final Chunk chk = cs[col]; if (type == Vec.T_CAT || type == Vec.T_STR) { if (cs[col].isNA(row)) { value = Double.NaN; } else if (type == Vec.T_CAT) { value = chk.vec().domain()[(int) chk.at8(row)]; } else { value = chk.atStr(str, row).toString(); } } else if (type == Vec.T_NUM || type == Vec.T_TIME){ value = cs[col].atd(row); } else { throw new UnsupportedOperationException("Cannot convert column of type " + Vec.TYPE_STR[type]); } rowData.put(names[col], value); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/SB.java
package water.util; import java.util.regex.Matcher; import java.util.regex.Pattern; import water.H2O; import water.exceptions.JCodeSB; /** Tight/tiny StringBuilder wrapper. * Short short names on purpose; so they don't obscure the printing. * Can't believe this wasn't done long long ago. */ public final class SB implements JCodeSB<SB> { public final boolean _outputDoubles = H2O.getSysBoolProperty("java.output.doubles", false); public final StringBuilder _sb; int _indent = 0; public SB( ) { _sb = new StringBuilder( ); } public SB(String s) { _sb = new StringBuilder(s); } public SB ps( String s ) { _sb.append("\""); pj(s); _sb.append("\""); return this; } public SB p( String s ) { _sb.append(s); return this; } private SB p( float s ) { if( Float.isNaN(s) ) _sb.append( "Float.NaN"); else if( Float.isInfinite(s) ) { _sb.append(s > 0 ? "Float.POSITIVE_INFINITY" : "Float.NEGATIVE_INFINITY"); } else _sb.append(s); return this; } public SB p( double s ) { if( Double.isNaN(s) ) _sb.append("Double.NaN"); else if( Double.isInfinite(s) ) { _sb.append(s > 0 ? "Double.POSITIVE_INFINITY" : "Double.NEGATIVE_INFINITY"); } else _sb.append(s); return this; } public SB p( char s ) { _sb.append(s); return this; } public SB p( int s ) { _sb.append(s); return this; } public SB p( long s ) { _sb.append(s); return this; } public SB p( boolean s) { _sb.append(s); return this; } // Not spelled "p" on purpose: too easy to accidentally say "p(1.0)" and // suddenly call the the autoboxed version. public SB pobj( Object s ) { _sb.append(s.toString()); return this; } public SB i( int d ) { for( int i=0; i<d+_indent; i++ ) p(" "); return this; } public SB i( ) { return i(0); } public SB ip(String s) { return i().p(s); } public SB s() { _sb.append(' '); return this; } // Java specific append of double public SB pj( double s ) { if (Double.isInfinite(s)) _sb.append("Double.").append(s>0? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY"); else if (Double.isNaN(s)) _sb.append("Double.NaN"); else _sb.append(s); return this; } // Java specific append of float public SB pj( float s ) { if (_outputDoubles) { return pj((double) s); } if (Float.isInfinite(s)) _sb.append("Float.").append(s>0? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY"); else if (Float.isNaN(s)) _sb.append("Float.NaN"); else _sb.append(s).append('f'); return this; } /* Append Java string - escape all " and \ */ public SB pj( String s ) { _sb.append(escapeJava(s)); return this; } @Override public SB pj(String objectName, String fieldName) { _sb.append(objectName).append('.').append(fieldName); return this; } public SB p( IcedBitSet ibs ) { return ibs.toString(this); } // Increase indentation public SB ii( int i) { _indent += i; return this; } // Decrease indentation public SB di( int i) { _indent -= i; return this; } @Override public SB ci(JCodeSB sb) { _indent = sb.getIndent(); return this; } // Copy indent from given string buffer public SB nl( ) { return p('\n'); } // Convert a String[] into a valid Java String initializer public SB toJavaStringInit( String[] ss ) { if (ss==null) return p("null"); p('{'); for( int i=0; i<ss.length-1; i++ ) p('"').pj(ss[i]).p("\","); if( ss.length > 0 ) p('"').pj(ss[ss.length-1]).p('"'); return p('}'); } public SB toJavaStringInit( float[] ss ) { if (ss==null) return p("null"); p('{'); for( int i=0; i<ss.length-1; i++ ) pj(ss[i]).p(','); if( ss.length > 0 ) pj(ss[ss.length-1]); return p('}'); } public SB toJavaStringInit( double[] ss ) { if (ss==null) return p("null"); p('{'); for( int i=0; i<ss.length-1; i++ ) pj(ss[i]).p(','); if( ss.length > 0 ) pj(ss[ss.length-1]); return p('}'); } public SB toJavaStringInit( double[][] ss ) { if (ss==null) return p("null"); p('{'); for( int i=0; i<ss.length-1; i++ ) toJavaStringInit(ss[i]).p(','); if( ss.length > 0 ) toJavaStringInit(ss[ss.length-1]); return p('}'); } public SB toJavaStringInit( double[][][] ss ) { if (ss==null) return p("null"); p('{'); for( int i=0; i<ss.length-1; i++ ) toJavaStringInit(ss[i]).p(','); if( ss.length > 0 ) toJavaStringInit(ss[ss.length-1]); return p('}'); } public SB toJSArray(float[] nums) { p('['); for (int i=0; i<nums.length; i++) { if (i>0) p(','); p(nums[i]); } return p(']'); } public SB toJSArray(String[] ss) { p('['); for (int i=0; i<ss.length; i++) { if (i>0) p(','); p('"').p(ss[i]).p('"'); } return p(']'); } @Override public int getIndent() { return _indent; } // Mostly a fail, since we should just dump into the same SB. public SB p(JCodeSB sb) { _sb.append(sb.getContent()); return this; } @Override public String toString() { return _sb.toString(); } /** Java-string illegal characters which need to be escaped */ public static final Pattern[] ILLEGAL_CHARACTERS = new Pattern[] { Pattern.compile("\\",Pattern.LITERAL), Pattern.compile("\"",Pattern.LITERAL) }; public static final String[] REPLACEMENTS = new String [] { "\\\\\\\\", "\\\\\"" }; /** Escape all " and \ characters to provide a proper Java-like string * Does not escape unicode characters. */ public static String escapeJava(String s) { assert ILLEGAL_CHARACTERS.length == REPLACEMENTS.length; for (int i=0; i<ILLEGAL_CHARACTERS.length; i++ ) { Matcher m = ILLEGAL_CHARACTERS[i].matcher(s); s = m.replaceAll(REPLACEMENTS[i]); } return s; } @Override public String getContent() { return _sb.toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/SBPrintStream.java
package water.util; import java.io.OutputStream; import java.io.PrintStream; import java.io.UnsupportedEncodingException; import water.H2O; import water.exceptions.JCodeSB; /** * A simple stream mimicing API of {@link SB}. */ public class SBPrintStream extends PrintStream implements JCodeSB<SBPrintStream> { private final boolean _outputDoubles = H2O.getSysBoolProperty("java.output.doubles", false); private int _indent = 0; public SBPrintStream(OutputStream out) { super(out); } public SBPrintStream(OutputStream out, boolean autoFlush) { super(out, autoFlush); } public SBPrintStream(OutputStream out, boolean autoFlush, String encoding) throws UnsupportedEncodingException { super(out, autoFlush, encoding); } public SBPrintStream ps(String s) { append("\""); pj(s); append("\""); return this; } @Override public SBPrintStream p(JCodeSB s) { return p(s.getContent()); } public SBPrintStream p(String s) { append(s); return this; } private SBPrintStream p(float s) { if (Float.isNaN(s)) { append("Float.NaN"); } else if (Float.isInfinite(s)) { append(s > 0 ? "Float.POSITIVE_INFINITY" : "Float.NEGATIVE_INFINITY"); } else { append(s); } return this; } public SBPrintStream p(double s) { if (Double.isNaN(s)) { append("Double.NaN"); } else if (Double.isInfinite(s)) { append(s > 0 ? "Double.POSITIVE_INFINITY" : "Double.NEGATIVE_INFINITY"); } else { append(s); } return this; } public SBPrintStream p(char s) { append(s); return this; } public SBPrintStream p(int s) { append(s); return this; } public SBPrintStream p(long s) { append(s); return this; } public SBPrintStream p(boolean s) { append(Boolean.toString(s)); return this; } // Not spelled "p" on purpose: too easy to accidentally say "p(1.0)" and // suddenly call the the autoboxed version. public SBPrintStream pobj(Object s) { append(s.toString()); return this; } public SBPrintStream i(int d) { for (int i = 0; i < d + _indent; i++) { p(" "); } return this; } public SBPrintStream i() { return i(0); } public SBPrintStream ip(String s) { return i().p(s); } public SBPrintStream s() { append(' '); return this; } // Java specific append of double public SBPrintStream pj(double s) { if (Double.isInfinite(s)) { append("Double.").append(s > 0 ? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY"); } else if (Double.isNaN(s)) { append("Double.NaN"); } else { append(s); } return this; } // Java specific append of float public SBPrintStream pj(float s) { if (_outputDoubles) { return pj((double) s); } if (Float.isInfinite(s)) { append("Float.").append(s > 0 ? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY"); } else if (Float.isNaN(s)) { append("Float.NaN"); } else { append(s).append('f'); } return this; } /* Append Java string - escape all " and \ */ public SBPrintStream pj(String s) { append(SB.escapeJava(s)); return this; } @Override public SBPrintStream pj(String objectName, String fieldName) { append(objectName).append('.').append(fieldName); return this; } public SBPrintStream p(IcedBitSet ibs) { SB sb = new SB(); sb = ibs.toString(sb); return append(sb); } public SBPrintStream p(SB sb) { return append(sb); } // Increase indentation public SBPrintStream ii(int i) { _indent += i; return this; } // Decrease indentation public SBPrintStream di(int i) { _indent -= i; return this; } // Copy indent from given string buffer public SBPrintStream ci(JCodeSB sb) { _indent = sb.getIndent(); return this; } public SBPrintStream nl() { return p('\n'); } // Convert a String[] into a valid Java String initializer public SBPrintStream toJavaStringInit(String[] ss) { if (ss == null) { return p("null"); } p('{'); for (int i = 0; i < ss.length - 1; i++) { p('"').pj(ss[i]).p("\","); } if (ss.length > 0) { p('"').pj(ss[ss.length - 1]).p('"'); } return p('}'); } public SBPrintStream toJavaStringInit(float[] ss) { if (ss == null) { return p("null"); } p('{'); for (int i = 0; i < ss.length - 1; i++) { pj(ss[i]).p(','); } if (ss.length > 0) { pj(ss[ss.length - 1]); } return p('}'); } public SBPrintStream toJavaStringInit(double[] ss) { if (ss == null) { return p("null"); } p('{'); for (int i = 0; i < ss.length - 1; i++) { pj(ss[i]).p(','); } if (ss.length > 0) { pj(ss[ss.length - 1]); } return p('}'); } public SBPrintStream toJavaStringInit(double[][] ss) { if (ss == null) { return p("null"); } p('{'); for (int i = 0; i < ss.length - 1; i++) { toJavaStringInit(ss[i]).p(','); } if (ss.length > 0) { toJavaStringInit(ss[ss.length - 1]); } return p('}'); } public SBPrintStream toJavaStringInit(double[][][] ss) { if (ss == null) { return p("null"); } p('{'); for (int i = 0; i < ss.length - 1; i++) { toJavaStringInit(ss[i]).p(','); } if (ss.length > 0) { toJavaStringInit(ss[ss.length - 1]); } return p('}'); } public SBPrintStream toJSArray(float[] nums) { p('['); for (int i = 0; i < nums.length; i++) { if (i > 0) { p(','); } p(nums[i]); } return p(']'); } public SBPrintStream toJSArray(String[] ss) { p('['); for (int i = 0; i < ss.length; i++) { if (i > 0) { p(','); } p('"').p(ss[i]).p('"'); } return p(']'); } @Override public int getIndent() { return _indent; } @Override public String getContent() { throw new UnsupportedOperationException("Cannot get content of stream!"); } // // Copied from AbstractStringBuilder // FIXME: optimize that // public SBPrintStream append(float f) { append(Float.toString(f)); return this; } public SBPrintStream append(double d) { append(Double.toString(d)); return this; } public SBPrintStream append(int i) { append(Integer.toString(i)); return this; } public SBPrintStream append(long l) { append(Long.toString(l)); return this; } public SBPrintStream append(SB sb) { append(sb.toString()); return this; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/SetOfBytes.java
package water.util; import java.util.Arrays; /** * Stores an immutable set of bytes, for fast evaluation. * * Created by vpatryshev on 1/13/17. */ public class SetOfBytes { private boolean[] bits = new boolean[256]; public SetOfBytes(byte[] bytes) { for (byte b : bytes) bits[0xff&b] = true; } public SetOfBytes(String s) { this(s.getBytes()); } public boolean contains(int b) { return b < 256 && b > -129 && bits[0xff&b];} public boolean equals(Object other) { return other instanceof SetOfBytes && Arrays.equals(bits, ((SetOfBytes)other).bits); } public int hashCode() { return Arrays.hashCode(bits); } public int size() { int n = 0; for (int b = 0; b < 256; b++) if (bits[b]) n++; return n; } public byte[] getBytes() { byte[] out = new byte[size()]; int i = 0; for (int b = 0; b < 256; b++) if (bits[b]) out[i++] = (byte)b; return out; } @Override public String toString() { return "SetOfBytes(" + Arrays.toString(getBytes()) + ")"; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/StringUtils.java
package water.util; import water.parser.BufferedString; import java.io.PrintWriter; import java.io.StringWriter; import java.nio.charset.Charset; import java.util.*; import static java.util.Collections.unmodifiableMap; import static water.util.CollectionUtils.createMap; /** * String manipulation utilities. */ public class StringUtils { private static final Map<Character, Integer> HEX_CODE = unmodifiableMap(createMap( toCharacterArray("0123456789abcdefABCDEF"), new Integer[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15} )); /** * Print exception stack trace into a string. * * @param t an exception * @return string containing pretty printed exception */ public static String toString(Throwable t) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); t.printStackTrace(pw); return sw.toString(); } /** * Convenience function to test whether a string is empty. * @param s String to test * @return True if the string is either null or empty, false otherwise */ public static boolean isNullOrEmpty(String s) { return s == null || s.isEmpty(); } public static boolean isNullOrEmpty(BufferedString s) { return s == null || s.length() == 0; } /** * Expand ~ to user.home * @param path that can (but doesn't have to) contain a tilde (~) * @return expanded path */ public static String expandPath(String path) { return path.replaceFirst("^~", System.getProperty("user.home")); } public static String cleanString(String s) { //Tokenization/string cleaning for all datasets except for SST. // Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py String string = s; string = string.replaceAll("[^A-Za-z0-9(),!?\\'\\`]", " "); string = string.replaceAll("'s", " 's"); string = string.replaceAll("'ve", " 've"); string = string.replaceAll("n't", " n't"); string = string.replaceAll("'re", " 're"); string = string.replaceAll("'d", " 'd"); string = string.replaceAll("'ll", " 'll"); string = string.replaceAll(",", " , "); string = string.replaceAll("!", " ! "); string = string.replaceAll("\\(", " ( "); string = string.replaceAll("\\)", " ) "); string = string.replaceAll("\\?", " ? "); string = string.replaceAll("\\s{2,}", " "); return string.trim().toLowerCase(); } public static String[] tokenize(String text) { // System.out.println(cleanString(text)); return cleanString(text).split(" "); } public static int[] tokensToArray(String[] tokens, int padToLength, Map<String, Integer> dict) { assert(dict!=null); int len = tokens.length; int pad = padToLength - len; int[] data = new int[padToLength]; int ix = 0; for (String t : tokens) { Integer val = dict.get(t); int index; if (val == null) { index = dict.size(); dict.put(t, index); } else { index = val; } data[ix] = index; ix += 1; } for (int i = 0; i < pad; i++) { int index = dict.get(PADDING_SYMBOL); data[ix] = index; ix += 1; } return data; } public static String PADDING_SYMBOL = "</s>"; public static ArrayList<int[]> texts2array(List<String> texts) { int maxlen = 0; int index = 0; Map<String, Integer> dict = new HashMap<>(); dict.put(PADDING_SYMBOL, index); index += 1; for (String text : texts) { String[] tokens = tokenize(text); for (String token : tokens) { if (!dict.containsKey(token)) { dict.put(token, index); index += 1; } } int len = tokens.length; if (len > maxlen) maxlen = len; } // System.out.println(dict); // System.out.println("maxlen " + maxlen); // System.out.println("dict size " + dict.size()); ArrayList<int[]> array = new ArrayList<>(); for (String text: texts) { int[] data = tokensToArray(tokenize(text), maxlen, dict); // System.out.println(text); // System.out.println(Arrays.toString(data)); array.add(data); } return array; } /** * Join the array with the given delimiter, and return it as a string. * * @param delimiter string to be used as a separator between array elements * @param arr the array to join * @return a single string containing all elements in `arr` joined together */ public static String join(String delimiter, String[] arr) { return join(delimiter, Arrays.asList(arr)); } /** * Join the array with the given delimiter, and return it as a string. * * @param delimiter string to be used as a separator between array elements * @param strings the strings to join * @return a single string containing all elements in `strings` joined together */ public static String join(String delimiter, Iterable<String> strings) { StringBuilder sb = new StringBuilder(); for (String item : strings) { if (sb.length() > 0) sb.append(delimiter); sb.append(item); } return sb.toString(); } /** * Convert a string into the set of its characters. * * @param src Source string * @return Set of characters within the source string */ public static HashSet<Character> toCharacterSet(String src) { int n = src.length(); HashSet<Character> res = new HashSet<>(n); for (int i = 0; i < n; i++) res.add(src.charAt(i)); return res; } public static Character[] toCharacterArray(String src) { return ArrayUtils.box(src.toCharArray()); } public static int unhex(String str) { int res = 0; for (char c : str.toCharArray()) { if (!HEX_CODE.containsKey(c)) throw new NumberFormatException("Not a hexademical character " + c); res = (res << 4) + HEX_CODE.get(c); } return res; } public static byte[] bytesOf(CharSequence str) { return str.toString().getBytes(Charset.forName("UTF-8")); } public static byte[] toBytes(Object value) { return bytesOf(String.valueOf(value)); } public static String toString(byte[] bytes, int from, int length) { return new String(bytes, from, length, Charset.forName("UTF-8")); } public static String sanitizeIdentifier(String id) { char[] cs = id.toCharArray(); for( int i=1; i<cs.length; i++ ) if( !Character.isJavaIdentifierPart(cs[i]) ) cs[i] = '_'; return new String(cs); } public static String fixedLength(String s, int length) { String r = padRight(s, length); if (r.length() > length) { int a = Math.max(r.length() - length + 1, 0); int b = Math.max(a, r.length()); r = "#" + r.substring(a, b); } return r; } private static String padRight(String stringToPad, int size) { StringBuilder strb = new StringBuilder(stringToPad); while (strb.length() < size) if (strb.length() < size) strb.append(' '); return strb.toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/Tabulate.java
package water.util; import hex.Interaction; import water.*; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; /** * Simple Co-Occurrence based tabulation of X vs Y, where X and Y are two Vecs in a given dataset * Uses histogram of given resolution in X and Y * Handles numerical/categorical data and missing values * Supports observation weights * * Fills up two double[][] arrays: * _countData[xbin][ybin] contains the sum of observation weights (or 1) for co-occurrences in bins xbin/ybin * _responseData[xbin][2] contains the mean value of Y and the sum of observation weights for a given bin for X */ public class Tabulate extends Keyed<Tabulate> { public final Job<Tabulate> _job; public Frame _dataset; public Key[] _vecs = new Key[2]; public String _predictor; public String _response; public String _weight; int _nbins_predictor = 20; int _nbins_response = 10; // result double[][] _count_data; double[][] _response_data; public TwoDimTable _count_table; public TwoDimTable _response_table; // helper to speed up stuff static private class Stats extends Iced { Stats(Vec v) { _min = v.min(); _max = v.max(); _isCategorical = v.isCategorical(); _isInt = v.isInt(); _cardinality = v.cardinality(); _missing = v.naCnt() > 0 ? 1 : 0; _domain = v.domain(); } final double _min; final double _max; final boolean _isCategorical; final boolean _isInt; final int _cardinality; final int _missing; //0 or 1 final String[] _domain; } final private Stats[] _stats = new Stats[2]; public Tabulate() { _job = new Job(Key.<Tabulate>make(), Tabulate.class.getName(), "Tabulate job"); } private int bins(int v) { return v==1 ? _nbins_response : _nbins_predictor; } private int res(final int v) { final int missing = _stats[v]._missing; if (_stats[v]._isCategorical) return _stats[v]._cardinality + missing; return bins(v) + missing; } private int bin(final int v, final double val) { if (Double.isNaN(val)) { return 0; } int b; int bins = bins(v); if (_stats[v]._isCategorical) { assert((int)val == val); b = (int) val; } else { double d = (_stats[v]._max - _stats[v]._min) / bins; b = (int) ((val - _stats[v]._min) / d); assert(b>=0 && b<= bins); b = Math.min(b, bins -1);//avoid AIOOBE at upper bound } return b+_stats[v]._missing; } private String labelForBin(final int v, int b) { int missing = _stats[v]._missing; if (missing == 1 && b==0) return "missing(NA)"; if (missing == 1) b--; if (_stats[v]._isCategorical) return _stats[v]._domain[b]; int bins = bins(v); if (_stats[v]._isInt && (_stats[v]._max - _stats[v]._min + 1) <= bins) return Integer.toString((int)(_stats[v]._min + b)); double d = (_stats[v]._max - _stats[v]._min)/bins; return String.format("%5f", _stats[v]._min + (b + 0.5) * d); } public Tabulate execImpl() { if (_dataset == null) throw new H2OIllegalArgumentException("Dataset not found"); if (_nbins_predictor < 1) throw new H2OIllegalArgumentException("Number of bins for predictor must be >= 1"); if (_nbins_response < 1) throw new H2OIllegalArgumentException("Number of bins for response must be >= 1"); Vec x = _dataset.vec(_predictor); if (x == null) throw new H2OIllegalArgumentException("Predictor column " + _predictor + " not found"); if (x.cardinality() > _nbins_predictor) { Interaction in = new Interaction(); in._source_frame = _dataset._key; in._factor_columns = new String[]{_predictor}; in._max_factors = _nbins_predictor -1; in.execImpl(null); x = in._job._result.get().anyVec(); } else if (x.isInt() && (x.max() - x.min() + 1) <= _nbins_predictor) { x = x.toCategoricalVec(); } Vec y = _dataset.vec(_response); if (y == null) throw new H2OIllegalArgumentException("Response column " + _response + " not found"); if (y.cardinality() > _nbins_response) { Interaction in = new Interaction(); in._source_frame = _dataset._key; in._factor_columns = new String[]{_response}; in._max_factors = _nbins_response -1; in.execImpl(null); y = in._job._result.get().anyVec(); } else if (y.isInt() && (y.max() - y.min() + 1) <= _nbins_response) { y = y.toCategoricalVec(); } if (y!=null && y.cardinality() > 2) Log.warn("Response column has more than two factor levels - mean response depends on lexicographic order of factors!"); Vec w = _dataset.vec(_weight); //can be null if (w != null && (!w.isNumeric() && w.min() < 0)) throw new H2OIllegalArgumentException("Observation weights must be numeric with values >= 0"); if (x!=null) { _vecs[0] = x._key; _stats[0] = new Stats(x); } if (y!=null) { _vecs[1] = y._key; _stats[1] = new Stats(y); } Tabulate sp = w != null ? new CoOccurrence(this).doAll(x, y, w)._sp : new CoOccurrence(this).doAll(x, y)._sp; _count_table = sp.tabulationTwoDimTable(); _response_table = sp.responseCharTwoDimTable(); Log.info(_count_table.toString(2, false)); Log.info(_response_table.toString(2, false)); return sp; } private static class CoOccurrence extends MRTask<CoOccurrence> { final Tabulate _sp; CoOccurrence(Tabulate sp) {_sp = sp;} @Override protected void setupLocal() { _sp._count_data = new double[_sp.res(0)][_sp.res(1)]; _sp._response_data = new double[_sp.res(0)][2]; } @Override public void map(Chunk x, Chunk y) { map(x, y, (Chunk)null); } @Override public void map(Chunk x, Chunk y, Chunk w) { for (int r=0; r<x.len(); ++r) { int xbin = _sp.bin(0, x.atd(r)); int ybin = _sp.bin(1, y.atd(r)); double weight = w!=null?w.atd(r):1; if (Double.isNaN(weight)) continue; AtomicUtils.DoubleArray.add(_sp._count_data[xbin], ybin, weight); //increment co-occurrence count by w if (!y.isNA(r)) { AtomicUtils.DoubleArray.add(_sp._response_data[xbin], 0, weight * y.atd(r)); //add to mean response for x AtomicUtils.DoubleArray.add(_sp._response_data[xbin], 1, weight); //increment total for x } } } @Override public void reduce(CoOccurrence mrt) { if (_sp._response_data == mrt._sp._response_data) return; ArrayUtils.add(_sp._response_data, mrt._sp._response_data); } @Override protected void postGlobal() { //compute mean response for (int i=0; i<_sp._response_data.length; ++i) { _sp._response_data[i][0] /= _sp._response_data[i][1]; } } } public TwoDimTable tabulationTwoDimTable() { if (_response_data == null) return null; int predN = _count_data.length; int respN = _count_data[0].length; String tableHeader = "(Weighted) co-occurrence counts of '" + _predictor + "' and '" + _response + "'"; String[] rowHeaders = new String[predN * respN]; String[] colHeaders = new String[3]; //predictor response wcount String[] colTypes = new String[colHeaders.length]; String[] colFormats = new String[colHeaders.length]; colHeaders[0] = _predictor; colHeaders[1] = _response; colTypes[0] = "string"; colFormats[0] = "%s"; colTypes[1] = "string"; colFormats[1] = "%s"; colHeaders[2] = "counts"; colTypes[2] = "double"; colFormats[2] = "%f"; TwoDimTable table = new TwoDimTable( tableHeader, null/*tableDescription*/, rowHeaders, colHeaders, colTypes, colFormats, null); for (int p=0; p<predN; ++p) { String plabel = labelForBin(0, p); for (int r=0; r<respN; ++r) { String rlabel = labelForBin(1, r); for (int c=0; c<3; ++c) { table.set(r*predN + p, 0, plabel); table.set(r*predN + p, 1, rlabel); table.set(r*predN + p, 2, _count_data[p][r]); } } } return table; } public TwoDimTable responseCharTwoDimTable() { if (_response_data == null) return null; String tableHeader = "Mean value of '" + _response + "' and (weighted) counts for '" + _predictor + "' values"; int predN = _count_data.length; String[] rowHeaders = new String[predN]; //X String[] colHeaders = new String[3]; //Y String[] colTypes = new String[colHeaders.length]; String[] colFormats = new String[colHeaders.length]; colHeaders[0] = _predictor; colTypes[0] = "string"; colFormats[0] = "%s"; colHeaders[1] = "mean " + _response; colTypes[2] = "double"; colFormats[2] = "%f"; colHeaders[2] = "counts"; colTypes[1] = "double"; colFormats[1] = "%f"; TwoDimTable table = new TwoDimTable( tableHeader, null/*tableDescription*/, rowHeaders, colHeaders, colTypes, colFormats, null); for (int p=0; p<predN; ++p) { String plabel = labelForBin(0, p); table.set(p, 0, plabel); table.set(p, 1, _response_data[p][0]); table.set(p, 2, _response_data[p][1]); } return table; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/Timer.java
package water.util; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; /** * Simple Timer class. **/ public class Timer { private static final DateTimeFormatter longFormat = DateTimeFormat.forPattern("dd-MMM HH:mm:ss.SSS"); private static final DateTimeFormatter shortFormat= DateTimeFormat.forPattern( "HH:mm:ss.SSS"); final long _start = System.currentTimeMillis(); final long _nanos = System.nanoTime(); /**Return the difference between when the timer was created and the current time. */ public long time() { return System.currentTimeMillis() - _start; } public long nanos(){ return System.nanoTime() - _nanos; } /** Return the difference between when the timer was created and the current * time as a string along with the time of creation in date format. */ @Override public String toString() { final long now = System.currentTimeMillis(); return PrettyPrint.msecs(now - _start, false) + " (Wall: " + longFormat.print(now) + ") "; } /** return the start time of this timer.**/ String startAsString() { return longFormat.print(_start); } /** return the start time of this timer.**/ String startAsShortString() { return shortFormat.print(_start); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/Triple.java
package water.util; import java.util.ArrayList; import java.util.List; import water.util.Java7.Objects; /** Pair class with a clearer name than AbstractMap.SimpleEntry. */ // TODO(vlad): add proper comment, have three params public class Triple<V> { public V v1; public V v2; public V v3; public Triple(V v1, V v2, V v3) { this.v1=v1; this.v2=v2; this.v3=v3; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Triple)) return false; Triple<?> triple = (Triple<?>) o; return Objects.equals(v1, triple.v1) && Objects.equals(v2, triple.v2) && Objects.equals(v3, triple.v3); } @Override public int hashCode() { return Objects.hashCode(v1)*2017+Objects.hashCode(v2)*79+Objects.hashCode(v3); } @Override public String toString() { return "Triple(" + v1 +", " + v2 + ", " + v3 + ')'; } static public <V> List<Triple<V>> product(V[] v1s, V[] v2s, V[] v3s) { List<Triple<V>> out = new ArrayList<>(v1s.length*v2s.length*v3s.length); for (V v1 : v1s) for (V v2 : v2s) for (V v3 : v3s) out.add(new Triple<>(v1,v2,v3)); return out; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/TwoDimTable.java
package water.util; import water.AutoBuffer; import water.Iced; import water.IcedWrapper; import water.Key; import water.fvec.Frame; import water.fvec.Vec; import java.util.Arrays; /** * Serializable 2D Table containing Strings or doubles * Table can be named * Columns and Rows can be named * Fields can be empty */ public class TwoDimTable extends Iced { private String tableHeader; private final String tableDescription; private final String[] rowHeaders; private final String[] colHeaders; private final String[] colTypes; private final String[] colFormats; private final IcedWrapper[][] cellValues; private final String colHeaderForRowHeaders; //public static final double emptyDouble = Double.longBitsToDouble(0x7ff8000000000100L); //also a NaN, but not Double.NaN (0x7ff8000000000000) public static final double emptyDouble = Double.MIN_VALUE*2; //Some unlikely value /** * Check whether a double value is considered an "empty field". * @param d a double value * @return true iff d represents an empty field */ public static boolean isEmpty(final double d) { return Double.doubleToRawLongBits(d) == Double.doubleToRawLongBits(emptyDouble); } /** * Constructor for TwoDimTable (R rows, C columns) * @param tableHeader the table header * @param tableDescription the table description * @param rowHeaders R-dim array for row headers * @param colHeaders C-dim array for column headers * @param colTypes C-dim array for column types * @param colFormats C-dim array with printf format strings for each column * @param colHeaderForRowHeaders column header for row headers */ public TwoDimTable(String tableHeader, String tableDescription, String[] rowHeaders, String[] colHeaders, String[] colTypes, String[] colFormats, String colHeaderForRowHeaders) { if (tableHeader == null) tableHeader = ""; if (tableDescription == null) tableDescription = ""; this.colHeaderForRowHeaders = colHeaderForRowHeaders; if (rowHeaders == null) throw new IllegalArgumentException("rowHeaders is null"); else { for (int r = 0; r < rowHeaders.length; ++r) if (rowHeaders[r] == null) rowHeaders[r] = ""; } if (colHeaders == null) throw new IllegalArgumentException("colHeaders is null"); else { for (int c = 0; c < colHeaders.length; ++c) if (colHeaders[c] == null) colHeaders[c] = ""; } final int rowDim = rowHeaders.length; final int colDim = colHeaders.length; if (colTypes == null) { colTypes = new String[colDim]; Arrays.fill(colTypes, "string"); } else if (colTypes.length != colDim) throw new IllegalArgumentException("colTypes must have the same length as colHeaders"); else { for (int c = 0; c < colDim; ++c) { colTypes[c] = colTypes[c].toLowerCase(); if (!(colTypes[c].equals("double") || colTypes[c].equals("float") || colTypes[c].equals("int") || colTypes[c].equals("long") || colTypes[c].equals("string"))) throw new IllegalArgumentException(String.format("colTypes values must be one of \"double\", \"float\", \"int\", \"long\", or \"string\". Received \"%s\" as ColType %d", colTypes[c], c)); } } if (colFormats == null) { colFormats = new String[colDim]; Arrays.fill(colFormats, "%s"); } else if (colFormats.length != colDim) throw new IllegalArgumentException("colFormats must have the same length as colHeaders"); this.tableHeader = tableHeader; this.tableDescription = tableDescription; this.rowHeaders = rowHeaders; this.colHeaders = colHeaders; this.colTypes = colTypes; this.colFormats = colFormats; this.cellValues = new IcedWrapper[rowDim][colDim]; } /** * Constructor for TwoDimTable (R rows, C columns) * @param tableHeader the table header * @param tableDescription the table description * @param rowHeaders R-dim array for row headers * @param colHeaders C-dim array for column headers * @param colTypes C-dim array for column types * @param colFormats C-dim array with printf format strings for each column * @param colHeaderForRowHeaders column header for row headers * @param strCellValues String[R][C] array for string cell values, can be null (can provide String[R][], for example) * @param dblCellValues double[R][C] array for double cell values, can be empty (marked with emptyDouble - happens when initialized with double[R][]) */ public TwoDimTable(String tableHeader, String tableDescription, String[] rowHeaders, String[] colHeaders, String[] colTypes, String[] colFormats, String colHeaderForRowHeaders, String[][] strCellValues, double[][] dblCellValues) { this(tableHeader, tableDescription, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders); assert (isEmpty(emptyDouble)); assert (!Arrays.equals(new AutoBuffer().put8d(emptyDouble).buf(), new AutoBuffer().put8d(Double.NaN).buf())); final int rowDim = rowHeaders.length; final int colDim = colHeaders.length; for (int c = 0; c < colDim; ++c) { if (colTypes[c].equalsIgnoreCase("string")) { for (String[] vec : strCellValues) { if (vec == null) throw new IllegalArgumentException("Null string in strCellValues"); if (vec.length != colDim) throw new IllegalArgumentException("Each row in strCellValues must have the same length as colHeaders"); } break; } } for (int c = 0; c < colDim; ++c) { if (!colTypes[c].equalsIgnoreCase("string")) { for (double[] vec : dblCellValues) { if (vec.length != colDim) throw new IllegalArgumentException("Each row in dblCellValues must have the same length as colHeaders"); } break; } } for (int r = 0; r < rowDim; ++r) { for (int c = 0; c < colDim; ++c) { if (strCellValues[r] != null && strCellValues[r][c] != null && dblCellValues[r] != null && !isEmpty(dblCellValues[r][c])) throw new IllegalArgumentException("Cannot provide both a String and a Double at row " + r + " and column " + c + "."); } } for (int c = 0; c < colDim; ++c) { switch (colTypes[c]) { case "double": case "float": for (int r = 0; r < rowDim; ++r) set(r, c, dblCellValues[r][c]); break; case "int": case "long": for (int r = 0; r < rowDim; ++r) { double val = dblCellValues[r][c]; if (isEmpty(val)) set(r, c, Double.NaN); else if ((long)val==val) set(r, c, (long)val); else set(r, c, val); } break; case "string": for (int r = 0; r < rowDim; ++r) set(r, c, strCellValues[r][c]); break; default: throw new IllegalArgumentException("Column type " + colTypes[c] + " is not supported."); } } } /** * Accessor for table cells * @param row a row index * @param col a column index * @return Object (either String or Double or Float or Integer or Long) */ public Object get(final int row, final int col) { return cellValues[row][col] == null ? null : cellValues[row][col].get(); } public String getTableHeader() { return tableHeader; } public String getTableDescription() { return tableDescription; } public String[] getRowHeaders() { return rowHeaders; } public String[] getColHeaders() { return colHeaders; } public String getColHeaderForRowHeaders() { return colHeaderForRowHeaders; } public String[] getColTypes() { return colTypes; } public String[] getColFormats() { return colFormats; } public IcedWrapper[][] getCellValues() { return cellValues; } /** * Get row dimension * @return int */ public int getRowDim() { return rowHeaders.length; } /** * Get col dimension * @return int */ public int getColDim() { return colHeaders.length; } /** * Need to change table header when we are calling GLRM from PCA. * * @param newHeader: String containing new table header. */ public void setTableHeader(String newHeader) { if (!StringUtils.isNullOrEmpty(newHeader)) { this.tableHeader = newHeader; } } /** * Setter for table cells * @param row a row index * @param col a column index * @param o Object value */ public void set(final int row, final int col, final Object o) { if (o == null) cellValues[row][col] = new IcedWrapper(null); else if (o instanceof Double && Double.isNaN((double)o)) cellValues[row][col] = new IcedWrapper(Double.NaN); else if (o instanceof int[]) cellValues[row][col] = new IcedWrapper(Arrays.toString((int[])o)); else if (o instanceof long[]) cellValues[row][col] = new IcedWrapper(Arrays.toString((long[])o)); else if (o instanceof float[]) cellValues[row][col] = new IcedWrapper(Arrays.toString((float[])o)); else if (o instanceof double[]) cellValues[row][col] = new IcedWrapper(Arrays.toString((double[])o)); else if (colTypes[col]=="string") cellValues[row][col] = new IcedWrapper(o.toString()); else cellValues[row][col] = new IcedWrapper(o); } /** * Print table to String, using 2 spaces for padding between columns * @return String containing the ASCII version of the table */ public String toString() { return toString(2, true); } /** * Print table to String, using user-given padding * @param pad number of spaces for padding between columns * @return String containing the ASCII version of the table */ public String toString(final int pad) { return toString(pad, true); } private static final int PRINTOUT_ROW_LIMIT = 20; private boolean skip(int row) { assert(PRINTOUT_ROW_LIMIT % 2 == 0); if (getRowDim() <= PRINTOUT_ROW_LIMIT) return false; if (row <= PRINTOUT_ROW_LIMIT/2) return false; return row < getRowDim() - PRINTOUT_ROW_LIMIT / 2; } /** * Print table to String, using user-given padding * @param pad number of spaces for padding between columns * @param full whether to print the full table (otherwise top 5 and bottom 5 rows only) * @return String containing the ASCII version of the table */ public String toString(final int pad, boolean full) { if (pad < 0) throw new IllegalArgumentException("pad must be a non-negative integer"); final int rowDim = getRowDim(); final int colDim = getColDim(); final int actualRowDim = full ? rowDim : Math.min(PRINTOUT_ROW_LIMIT+1, rowDim); final String[][] cellStrings = new String[actualRowDim + 1][colDim + 1]; for (String[] row: cellStrings) Arrays.fill(row, ""); cellStrings[0][0] = colHeaderForRowHeaders != null ? colHeaderForRowHeaders : ""; int row = 0; for (int r = 0; r < rowDim; ++r) { if (!full && skip(r)) continue; cellStrings[row+1][0] = rowHeaders[r]; row++; } for (int c = 0; c < colDim; ++c) cellStrings[0][c+1] = colHeaders[c]; for (int c = 0; c < colDim; ++c) { final String formatString = colFormats[c]; row = 0; for (int r = 0; r < rowDim; ++r) { if (!full && skip(r)) continue; Object o = get(r,c); if ((o == null) || o instanceof Double && isEmpty((double)o)){ cellStrings[row + 1][c + 1] = ""; row++; continue; } else if (o instanceof Double && Double.isNaN((double)o)) { cellStrings[row + 1][c + 1] = "NaN"; row++; continue; } try { if (o instanceof Double) cellStrings[row + 1][c + 1] = String.format(formatString, o); else if (o instanceof Float) cellStrings[row + 1][c + 1] = String.format(formatString, o); else if (o instanceof Integer) cellStrings[row + 1][c + 1] = String.format(formatString, o); else if (o instanceof Long) cellStrings[row + 1][c + 1] = String.format(formatString, o); else if (o instanceof String) cellStrings[row + 1][c + 1] = (String)o; else cellStrings[row + 1][c + 1] = String.format(formatString, cellValues[r][c]); } catch(Throwable t) { cellStrings[row + 1][c + 1] = o.toString(); } row++; } } final int[] colLen = new int[colDim + 1]; for (int c = 0; c <= colDim; ++c) { for (int r = 0; r <= actualRowDim; ++r) { colLen[c] = Math.max(colLen[c], cellStrings[r][c].length()); } } final StringBuilder sb = new StringBuilder(); if (tableHeader.length() > 0) { sb.append(tableHeader); } if (tableDescription.length() > 0) { sb.append(" (").append(tableDescription).append(")"); } sb.append(":\n"); for (int r = 0; r <= actualRowDim; ++r) { int len = colLen[0]; if (actualRowDim != rowDim && r - 1 == PRINTOUT_ROW_LIMIT/2) { assert(!full); sb.append("---"); } else { if (len > 0) sb.append(String.format("%" + colLen[0] + "s", cellStrings[r][0])); for (int c = 1; c <= colDim; ++c) { len = colLen[c]; if (len > 0) sb.append(String.format("%" + (len + pad) + "s", cellStrings[r][c].equals("null") ? "" : cellStrings[r][c])); } } sb.append("\n"); } return sb.toString(); } public Frame asFrame(Key frameKey) { String[] colNames = new String[getColDim()]; System.arraycopy(getColHeaders(), 0, colNames, 0, getColDim()); Vec[] vecs = new Vec[colNames.length]; vecs[0] = Vec.makeVec(getRowHeaders(), Vec.newKey()); for (int j = 0; j < this.getColDim(); j++) { switch (getColTypes()[j]){ case "string": String[] strRow = new String[getRowDim()]; for (int i = 0; i < getRowDim(); i++) { strRow[i] = (String) get(i, j); } vecs[j] = Vec.makeVec(strRow, Vec.newKey()); break; case "int": case "long": double[] longRow = new double[getRowDim()]; for (int i = 0; i < getRowDim(); i++) { longRow[i] = ((Number) get(i, j)).longValue(); } vecs[j] = Vec.makeVec(longRow, Vec.newKey()); break; case "float": case "double": double[] dblRow = new double[getRowDim()]; for (int i = 0; i < getRowDim(); i++) { dblRow[i] = (double) get(i, j); } vecs[j] = Vec.makeVec(dblRow, Vec.newKey()); break; } } Frame fr = new Frame(frameKey, colNames, vecs); return fr; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/UnsafeUtils.java
package water.util; import sun.misc.Unsafe; import water.nbhm.UtilUnsafe; public class UnsafeUtils { private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final long _Bbase = _unsafe.arrayBaseOffset(byte[].class); public static byte get1 ( byte[] buf, int off ) { return _unsafe.getByte (buf, _Bbase+off); } public static int get2 ( byte[] buf, int off ) { return _unsafe.getShort (buf, _Bbase+off); } public static int get4 ( byte[] buf, int off ) { return _unsafe.getInt (buf, _Bbase+off); } public static long get8 ( byte[] buf, int off ) { return _unsafe.getLong (buf, _Bbase+off); } public static float get4f( byte[] buf, int off ) { return _unsafe.getFloat (buf, _Bbase+off); } public static double get8d( byte[] buf, int off ) { return _unsafe.getDouble(buf, _Bbase+off); } public static int set1 (byte[] buf, int off, byte x ) {_unsafe.putByte (buf, _Bbase+off, x); return 1;} public static int set2 (byte[] buf, int off, short x ) {_unsafe.putShort (buf, _Bbase+off, x); return 2;} public static int set4 (byte[] buf, int off, int x ) {_unsafe.putInt (buf, _Bbase+off, x); return 4;} public static int set4f(byte[] buf, int off, float f ) {_unsafe.putFloat (buf, _Bbase+off, f); return 4;} public static int set8 (byte[] buf, int off, long x ) {_unsafe.putLong (buf, _Bbase+off, x); return 8;} public static int set8d(byte[] buf, int off, double x) {_unsafe.putDouble(buf, _Bbase+off, x); return 8;} public static void copyMemory( byte[] srcBase, long srcOff, byte[] dstBase, long dstOff, long len ) { _unsafe.copyMemory(srcBase,_Bbase+srcOff,dstBase,_Bbase+dstOff,len); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/VecUtils.java
package water.util; import water.*; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2OIllegalValueException; import water.fvec.*; import water.nbhm.NonBlockingHashMapLong; import water.parser.BufferedString; import water.parser.Categorical; import java.util.*; import static water.util.RandomUtils.getRNG; public class VecUtils { /** * Create a new {@link Vec} of categorical values from an existing {@link Vec}. * * This method accepts all {@link Vec} types as input. The original Vec is not mutated. * * If src is a categorical {@link Vec}, a copy is returned. * * If src is a numeric {@link Vec}, the values are converted to strings used as domain * values. * * For all other types, an exception is currently thrown. These need to be replaced * with appropriate conversions. * * Throws H2OIllegalArgumentException() if the resulting domain exceeds * Categorical.MAX_CATEGORICAL_COUNT. * * @param src A {@link Vec} whose values will be used as the basis for a new categorical {@link Vec} * @return the resulting categorical Vec */ public static Vec toCategoricalVec(Vec src) { switch (src.get_type()) { case Vec.T_CAT: return src.makeCopy(src.domain()); case Vec.T_NUM: case Vec.T_BAD: return numericToCategorical(src); case Vec.T_STR: // PUBDEV-2204 return stringToCategorical(src); case Vec.T_TIME: // PUBDEV-2205 throw new H2OIllegalArgumentException("Changing time/date columns to a categorical" + " column has not been implemented yet."); case Vec.T_UUID: throw new H2OIllegalArgumentException("Changing UUID columns to a categorical" + " column has not been implemented yet."); default: throw new H2OIllegalArgumentException("Unrecognized column type " + src.get_type_str() + " given to toCategoricalVec()"); } } /** * Create a new {@link Vec} of categorical values from string {@link Vec}. * * FIXME: implement in more efficient way with Brandon's primitives for BufferedString manipulation * * @param vec a string {@link Vec} * @return a categorical {@link Vec} */ public static Vec stringToCategorical(Vec vec) { final String[] vecDomain = new CollectStringVecDomain().domain(vec); MRTask task = new MRTask() { transient private java.util.HashMap<String, Integer> lookupTable; @Override protected void setupLocal() { lookupTable = new java.util.HashMap<>(vecDomain.length); for (int i = 0; i < vecDomain.length; i++) { // FIXME: boxing lookupTable.put(vecDomain[i], i); } } @Override public void map(Chunk c, NewChunk nc) { BufferedString bs = new BufferedString(); for (int row = 0; row < c.len(); row++) { if (c.isNA(row)) { nc.addNA(); } else { c.atStr(bs, row); String strRepresentation = bs.toString(); if (strRepresentation.contains("\uFFFD")) { nc.addNum(lookupTable.get(bs.toSanitizedString()), 0); } else { nc.addNum(lookupTable.get(strRepresentation), 0); } } } } }; // Invoke tasks - one input vector, one ouput vector task.doAll(new byte[] {Vec.T_CAT}, vec); // Return result return task.outputFrame(null, null, new String[][] {vecDomain}).vec(0); } /** * Create a new {@link Vec} of categorical values from a numeric {@link Vec}. * * This currently only ingests a {@link Vec} of integers. * * Handling reals is PUBDEV-2207 * * @param src a numeric {@link Vec} * @return a categorical {@link Vec} */ public static Vec numericToCategorical(Vec src) { if (src.isInt()) { int min = (int) src.min(), max = (int) src.max(); // try to do the fast domain collection long dom[] = (min >= 0 && max < Integer.MAX_VALUE - 4) ? new CollectDomainFast(max).doAll(src).domain() : new CollectIntegerDomain().doAll(src).domain(); if (dom.length > Categorical.MAX_CATEGORICAL_COUNT) throw new H2OIllegalArgumentException("Column domain is too large to be represented as an categorical: " + dom.length + " > " + Categorical.MAX_CATEGORICAL_COUNT); return copyOver(src, Vec.T_CAT, dom); } else if(src.isNumeric()){ final double [] dom = new CollectDoubleDomain(null,10000).doAll(src).domain(); String [] strDom = new String[dom.length]; for(int i = 0; i < dom.length; ++i) strDom[i] = String.valueOf(dom[i]); Vec dst = src.makeZero(strDom); new MRTask(){ @Override public void map(Chunk c0, Chunk c1){ for(int r = 0; r < c0._len; ++r){ double d = c0.atd(r); if(Double.isNaN(d)) c1.setNA(r); else c1.set(r,Arrays.binarySearch(dom,d)); } } }.doAll(new Vec[]{src,dst}); assert dst.min() == 0; assert dst.max() == dom.length-1; return dst; } else throw new IllegalArgumentException("calling numericToCategorical conversion on a non numeric column"); } /** * Create a new {@link Vec} of numeric values from an existing {@link Vec}. * * This method accepts all {@link Vec} types as input. The original Vec is not mutated. * * If src is a categorical {@link Vec}, a copy is returned. * * If src is a string {@link Vec}, all values that can be are parsed into reals or integers, and all * others become NA. See stringToNumeric for parsing details. * * If src is a numeric {@link Vec}, a copy is made. * * If src is a time {@link Vec}, the milliseconds since the epoch are used to populate the new Vec. * * If src is a UUID {@link Vec}, the existing numeric storage is used to populate the new Vec. * * Throws H2OIllegalArgumentException() if the resulting domain exceeds * Categorical.MAX_CATEGORICAL_COUNT. * * @param src A {@link Vec} whose values will be used as the basis for a new numeric {@link Vec} * @return the resulting numeric {@link Vec} */ public static Vec toNumericVec(Vec src) { switch (src.get_type()) { case Vec.T_CAT: return categoricalToInt(src); case Vec.T_STR: return stringToNumeric(src); case Vec.T_NUM: case Vec.T_TIME: case Vec.T_UUID: return src.makeCopy(null, Vec.T_NUM); default: throw new H2OIllegalArgumentException("Unrecognized column type " + src.get_type_str() + " given to toNumericVec()"); } } public static Vec toIntegerVec(Vec src) { switch (src.get_type()) { case Vec.T_CAT: return categoricalToInt(src); case Vec.T_STR: return stringToInteger(src); case Vec.T_NUM: return numToInteger(src); default: throw new H2OIllegalArgumentException("Unrecognized column type " + src.get_type_str() + " given to toNumericVec()"); } } public static Vec numToInteger(final Vec src) { Vec res = new MRTask() { @Override public void map(Chunk srcV, NewChunk destV) { int cLen = srcV._len; for (int index=0; index<cLen; index++) { if (!srcV.isNA(index)) destV.addNum(Math.round(srcV.atd(index))); else destV.addNA(); } } }.doAll(Vec.T_NUM, src).outputFrame().anyVec(); return res; } /** * Create a new {@link Vec} of numeric values from a string {@link Vec}. Any rows that cannot be * converted to a number are set to NA. * * Currently only does basic numeric formats. No exponents, or hex values. Doesn't * even like commas or spaces. :( Needs love. Handling more numeric * representations is PUBDEV-2209 * * @param src a string {@link Vec} * @return a numeric {@link Vec} */ public static Vec stringToNumeric(Vec src) { if(!src.isString()) throw new H2OIllegalArgumentException("stringToNumeric conversion only works on string columns"); Vec res = new MRTask() { @Override public void map(Chunk chk, NewChunk newChk){ if (chk instanceof C0DChunk) { // all NAs for (int i=0; i < chk._len; i++) newChk.addNA(); } else { BufferedString tmpStr = new BufferedString(); for (int i=0; i < chk._len; i++) { if (!chk.isNA(i)) { tmpStr = chk.atStr(tmpStr, i); switch (tmpStr.getNumericType()) { case BufferedString.NA: newChk.addNA(); break; case BufferedString.INT: newChk.addNum(Long.parseLong(tmpStr.toString()),0); break; case BufferedString.REAL: newChk.addNum(Double.parseDouble(tmpStr.toString())); break; default: throw new H2OIllegalValueException("Received unexpected type when parsing a string to a number.", this); } } else newChk.addNA(); } } } }.doAll(Vec.T_NUM, src).outputFrame().anyVec(); assert res != null; return res; } public static Vec stringToInteger(Vec src) { if(!src.isString()) throw new H2OIllegalArgumentException("stringToNumeric conversion only works on string columns"); Vec res = new MRTask() { @Override public void map(Chunk chk, NewChunk newChk){ if (chk instanceof C0DChunk) { // all NAs for (int i=0; i < chk._len; i++) newChk.addNA(); } else { BufferedString tmpStr = new BufferedString(); for (int i=0; i < chk._len; i++) { if (!chk.isNA(i)) { tmpStr = chk.atStr(tmpStr, i); switch (tmpStr.getNumericType()) { case BufferedString.NA: newChk.addNA(); break; case BufferedString.INT: newChk.addNum(Long.parseLong(tmpStr.toString()),0); break; case BufferedString.REAL: long temp = Math.round(Double.parseDouble(tmpStr.toString())); newChk.addNum(temp); break; default: throw new H2OIllegalValueException("Received unexpected type when parsing a string to a number.", this); } } else newChk.addNA(); } } } }.doAll(Vec.T_NUM, src).outputFrame().anyVec(); assert res != null; return res; } /** * Create a new {@link Vec} of numeric values from a categorical {@link Vec}. * * If the first value in the domain of the src Vec is a stringified ints, * then it will use those ints. Otherwise, it will use the raw enumeration level mapping. * If the domain is stringified ints, then all of the domain must be able to be parsed as * an int. If it cannot be parsed as such, a NumberFormatException will be caught and * rethrown as an H2OIllegalArgumentException that declares the illegal domain value. * Otherwise, the this pointer is copied to a new Vec whose domain is null. * * The magic of this method should be eliminated. It should just use enumeration level * maps. If the user wants domains to be used, call categoricalDomainsToNumeric(). * PUBDEV-2209 * * @param src a categorical {@link Vec} * @return a numeric {@link Vec} */ public static Vec categoricalToInt(final Vec src) { if( src.isInt() && (src.domain()==null || src.domain().length == 0)) return copyOver(src, Vec.T_NUM, null); if( !src.isCategorical() ) throw new IllegalArgumentException("categoricalToInt conversion only works on categorical columns."); // check if the 1st lvl of the domain can be parsed as int boolean useDomain=false; Vec newVec = copyOver(src, Vec.T_NUM, null); try { Integer.parseInt(src.domain()[0]); useDomain=true; } catch (NumberFormatException e) { // makeCopy and return... } if( useDomain ) { new MRTask() { @Override public void map(Chunk c) { for (int i=0;i<c._len;++i) if( !c.isNA(i) ) c.set(i, Integer.parseInt(src.domain()[(int)c.at8(i)])); } }.doAll(newVec); } return newVec; } /** * Create a new {@link Vec} of string values from an existing {@link Vec}. * * This method accepts all {@link Vec} types as input. The original Vec is not mutated. * * If src is a string {@link Vec}, a copy of the {@link Vec} is made. * * If src is a categorical {@link Vec}, levels are dropped, and the {@link Vec} only records the string. * * For all numeric {@link Vec}s, the number is converted to a string. * * For all UUID {@link Vec}s, the hex representation is stored as a string. * * @param src A {@link Vec} whose values will be used as the basis for a new string {@link Vec} * @return the resulting string {@link Vec} */ public static Vec toStringVec(Vec src) { switch (src.get_type()) { case Vec.T_STR: return src.makeCopy(); case Vec.T_CAT: return categoricalToStringVec(src); case Vec.T_UUID: return UUIDToStringVec(src); case Vec.T_TIME: case Vec.T_NUM: case Vec.T_BAD: return numericToStringVec(src); default: throw new H2OIllegalArgumentException("Unrecognized column type " + src.get_type_str() + " given to toStringVec()."); } } /** * Create a new {@link Vec} of string values from a categorical {@link Vec}. * * Transformation is done by a {@link Categorical2StrChkTask} which provides a mapping * between values - without copying the underlying data. * * @param src a categorical {@link Vec} * @return a string {@link Vec} */ public static Vec categoricalToStringVec(Vec src) { if( !src.isCategorical() ) throw new H2OIllegalValueException("Can not convert a non-categorical column" + " using categoricalToStringVec().",src); return new Categorical2StrChkTask(src.domain()).doAll(Vec.T_STR,src).outputFrame().anyVec(); } private static class Categorical2StrChkTask extends MRTask<Categorical2StrChkTask> { final String[] _domain; Categorical2StrChkTask(String[] domain) { _domain=domain; } @Override public void map(Chunk c, NewChunk nc) { for(int i=0;i<c._len;++i) if (!c.isNA(i)) nc.addStr(_domain == null ? "" + c.at8(i) : _domain[(int) c.at8(i)]); else nc.addNA(); } } /** * Create a new {@link Vec} of string values from a numeric {@link Vec}. * * Currently only uses a default pretty printer. Would be better if * it accepted a format string PUBDEV-2211 * * @param src a numeric {@link Vec} * @return a string {@link Vec} */ public static Vec numericToStringVec(Vec src) { if (src.isCategorical() || src.isUUID()) throw new H2OIllegalValueException("Cannot convert a non-numeric column" + " using numericToStringVec() ",src); Vec res = new MRTask() { @Override public void map(Chunk chk, NewChunk newChk) { if (chk instanceof C0DChunk) { // all NAs for (int i=0; i < chk._len; i++) newChk.addNA(); } else { for (int i=0; i < chk._len; i++) { if (!chk.isNA(i)) newChk.addStr(PrettyPrint.number(chk, chk.atd(i), 4)); else newChk.addNA(); } } } }.doAll(Vec.T_STR, src).outputFrame().anyVec(); assert res != null; return res; } /** * Create a new {@link Vec} of string values from a UUID {@link Vec}. * * String {@link Vec} is the standard hexadecimal representations of a UUID. * * @param src a UUID {@link Vec} * @return a string {@link Vec} */ public static Vec UUIDToStringVec(Vec src) { if( !src.isUUID() ) throw new H2OIllegalArgumentException("UUIDToStringVec() conversion only works on UUID columns"); Vec res = new MRTask() { @Override public void map(Chunk chk, NewChunk newChk) { if (chk instanceof C0DChunk) { // all NAs for (int i=0; i < chk._len; i++) newChk.addNA(); } else { for (int i=0; i < chk._len; i++) { if (!chk.isNA(i)) newChk.addStr(PrettyPrint.UUID(chk.at16l(i), chk.at16h(i))); else newChk.addNA(); } } } }.doAll(Vec.T_STR,src).outputFrame().anyVec(); assert res != null; return res; } /** * Create a new {@link Vec} of numeric values from a categorical {@link Vec}. * * Numeric values are generated explicitly from the domain values, and not the * enumeration levels. If a domain value cannot be translated as a number, that * domain and all values for that domain will be NA. * * @param src a categorical {@link Vec} * @return a numeric {@link Vec} */ public static Vec categoricalDomainsToNumeric(final Vec src) { if( !src.isCategorical() ) throw new H2OIllegalArgumentException("categoricalToNumeric() conversion only works on categorical columns"); // check if the 1st lvl of the domain can be parsed as int return new MRTask() { @Override public void map(Chunk c) { for (int i=0;i<c._len;++i) if( !c.isNA(i) ) c.set(i, Integer.parseInt(src.domain()[(int)c.at8(i)])); } }.doAll(Vec.T_NUM, src).outputFrame().anyVec(); } public static class CollectDoubleDomain extends MRTask<CollectDoubleDomain> { final double [] _sortedKnownDomain; private IcedHashMap<IcedDouble,IcedInt> _uniques; // new uniques final int _maxDomain; final IcedInt _placeHolder = new IcedInt(1); public CollectDoubleDomain(double [] knownDomain, int maxDomainSize) { _maxDomain = maxDomainSize; _sortedKnownDomain = knownDomain == null?null:knownDomain.clone(); if(_sortedKnownDomain != null && !ArrayUtils.isSorted(knownDomain)) Arrays.sort(_sortedKnownDomain); } @Override public void setupLocal(){ _uniques = new IcedHashMap<>(); } public double [] domain(){ double [] res = MemoryManager.malloc8d(_uniques.size()); int i = 0; for(IcedDouble v:_uniques.keySet()) res[i++] = v._val; Arrays.sort(res); return res; } public String[] stringDomain(boolean integer){ double[] domain = domain(); String[] stringDomain = new String[domain.length]; for(int i=0; i < domain.length; i++){ if(integer) { stringDomain[i] = String.valueOf((int) domain[i]); } else { stringDomain[i] = String.valueOf(domain[i]); } } return stringDomain; } private IcedDouble addValue(IcedDouble val){ if(Double.isNaN(val._val)) return val; if(_sortedKnownDomain != null && Arrays.binarySearch(_sortedKnownDomain,val._val) >= 0) return val; // already known value if (!_uniques.containsKey(val)) { _uniques.put(val,_placeHolder); val = new IcedDouble(0); if(_uniques.size() > _maxDomain) onMaxDomainExceeded(_maxDomain, _uniques.size()); } return val; } @Override public void map(Chunk ys) { IcedDouble val = new IcedDouble(0); for( int row=ys.nextNZ(-1); row< ys._len; row = ys.nextNZ(row) ) val = addValue(val.setVal(ys.atd(row))); if(ys.isSparseZero()) addValue(val.setVal(0)); } @Override public void reduce(CollectDoubleDomain mrt) { if( _uniques != mrt._uniques ) _uniques.putAll(mrt._uniques); if(_uniques.size() > _maxDomain) onMaxDomainExceeded(_maxDomain, _uniques.size()); } protected void onMaxDomainExceeded(int maxDomainSize, int currentSize) { throw new RuntimeException("Too many unique values. Expected |uniques| < " + maxDomainSize + ", already got " + currentSize); } } /** Collect numeric domain of given {@link Vec} * A map-reduce task to collect up the unique values of an integer {@link Vec} * and returned as the domain for the {@link Vec}. * */ public static class CollectIntegerDomain extends MRTask<CollectIntegerDomain> { transient NonBlockingHashMapLong<String> _uniques; @Override protected void setupLocal() { _uniques = new NonBlockingHashMapLong<>(); } @Override public void map(Chunk ys) { for( int row=0; row< ys._len; row++ ) if( !ys.isNA(row) ) _uniques.put(ys.at8(row), ""); } @Override public void reduce(CollectIntegerDomain mrt) { if( _uniques != mrt._uniques ) _uniques.putAll(mrt._uniques); } public final AutoBuffer write_impl( AutoBuffer ab ) { return ab.putA8(_uniques==null ? null : _uniques.keySetLong()); } public final CollectIntegerDomain read_impl(AutoBuffer ab ) { long ls[] = ab.getA8(); assert _uniques == null || _uniques.size()==0; // Only receiving into an empty (shared) NBHM _uniques = new NonBlockingHashMapLong<>(); if( ls != null ) for( long l : ls ) _uniques.put(l, ""); return this; } @Override public final void copyOver(CollectIntegerDomain that) { _uniques = that._uniques; } /** Returns exact numeric domain of given {@link Vec} computed by this task. * The domain is always sorted. Hence: * domain()[0] - minimal domain value * domain()[domain().length-1] - maximal domain value */ public long[] domain() { long[] dom = _uniques.keySetLong(); Arrays.sort(dom); return dom; } } /** * Create a new categorical {@link Vec} with deduplicated domains from a categorical {@link Vec}. * * Categoricals may have the same values after munging, and should have the same domain index in the numerical chunk * representation. Unify categoricals that are the same by remapping their domain indices. * * Could be more efficient with a vec copy and replace domain indices as needed. PUBDEV-2587 */ public static class DomainDedupe extends MRTask<DomainDedupe> { private final HashMap<Integer, Integer> _oldToNewDomainIndex; public DomainDedupe(HashMap<Integer, Integer> oldToNewDomainIndex) {_oldToNewDomainIndex = oldToNewDomainIndex; } @Override public void map(Chunk c, NewChunk nc) { for( int row=0; row < c._len; row++) { if ( !c.isNA(row) ) { int oldDomain = (int) c.at8(row); nc.addNum(_oldToNewDomainIndex.get(oldDomain)); } else { nc.addNA(); } } } public static Vec domainDeduper(Vec vec, HashMap<String, ArrayList<Integer>> substringToOldDomainIndices) { HashMap<Integer, Integer> oldToNewDomainIndex = new HashMap<>(); int newDomainIndex = 0; SortedSet<String> alphabetizedSubstrings = new TreeSet<>(substringToOldDomainIndices.keySet()); for (String sub : alphabetizedSubstrings) { for (int oldDomainIndex : substringToOldDomainIndices.get(sub)) { oldToNewDomainIndex.put(oldDomainIndex, newDomainIndex); } newDomainIndex++; } VecUtils.DomainDedupe domainDedupe = new VecUtils.DomainDedupe(oldToNewDomainIndex); String[][] dom2D = {Arrays.copyOf(alphabetizedSubstrings.toArray(), alphabetizedSubstrings.size(), String[].class)}; return domainDedupe.doAll(new byte[]{Vec.T_CAT}, vec).outputFrame(null, null, dom2D).anyVec(); } } // >11x faster than CollectIntegerDomain /** (Optimized for positive ints) Collect numeric domain of given {@link Vec} * A map-reduce task to collect up the unique values of an integer {@link Vec} * and returned as the domain for the {@link Vec}. * */ public static class CollectDomainFast extends MRTask<CollectDomainFast> { private final int _s; private boolean[] _u; private long[] _d; public CollectDomainFast(int s) { _s=s; } @Override protected void setupLocal() { _u= MemoryManager.mallocZ(_s + 1); } @Override public void map(Chunk ys) { for( int row=0; row< ys._len; row++ ) if( !ys.isNA(row) ) _u[(int)ys.at8(row)]=true; } @Override public void reduce(CollectDomainFast mrt) { if( _u != mrt._u ) ArrayUtils.or(_u, mrt._u);} @Override protected void postGlobal() { int c=0; for (boolean b : _u) if(b) c++; _d=MemoryManager.malloc8(c); int id=0; for (int i = 0; i < _u.length;++i) if (_u[i]) _d[id++]=i; Arrays.sort(_d); //is this necessary? } /** Returns exact numeric domain of given {@link Vec} computed by this task. * The domain is always sorted. Hence: * domain()[0] - minimal domain value * domain()[domain().length-1] - maximal domain value */ public long[] domain() { return _d; } } /** * Collects current domain of a categorical vector in an optimized way. Original vector's domain is not modified. * * @param vec A categorical vector to collect domain of. * @return An array of String with the domain of given vector - possibly empty if the domain is empty. Never null. * @throws IllegalArgumentException If the given vector is not categorical */ public static String[] collectDomainFast(final Vec vec) throws IllegalArgumentException { if (!vec.isCategorical()) throw new IllegalArgumentException("Unable to collect domain on a non-categorical vector."); // Indices of the new, reduced domain. Still point to the original domain. final long[] newDomainIndices = new VecUtils.CollectDomainFast((int) vec.max()) .doAll(vec) .domain(); final String[] originalDomain = vec.domain(); final String[] newDomain = new String[newDomainIndices.length]; for (int i = 0; i < newDomain.length; ++i) { newDomain[i] = originalDomain[(int) newDomainIndices[i]]; } return newDomain; } public static class CollectDomainWeights extends MRTask<CollectDomainWeights> { private final int _s; // OUT private double[] _d; public CollectDomainWeights(int s) { _s = s; } @Override public void map(Chunk c, Chunk weights) { _d = MemoryManager.malloc8d(_s + 1); for (int row = 0; row < c._len; row++) if (!c.isNA(row)) { double weight = weights != null ? weights.atd(row) : 1; int level = (int) c.at8(row); _d[level] += weight; } } @Override public void map(Chunk c) { map(c, (Chunk) null); } @Override public void reduce(CollectDomainWeights mrt) { if (mrt._d != null) { ArrayUtils.add(_d, mrt._d); } } } /** * Collect the frequencies of each level in a categorical Vec. * * @param vec categorical Vec * @param weights optional weight Vec * @return (weighted) frequencies of each level of the input Vec */ public static double[] collectDomainWeights(final Vec vec, final Vec weights) { if (!vec.isCategorical()) throw new IllegalArgumentException("Unable to collect domain on a non-categorical vector."); final CollectDomainWeights cdw = new CollectDomainWeights((int) vec.max()); if (weights != null) { if (weights.naCnt() > 0) { throw new IllegalArgumentException("The vector of weights cannot contain any NAs"); } if (weights.min() < 0) { throw new IllegalArgumentException("Negative weights are not allowed."); } return cdw.doAll(vec, weights)._d; } else { return cdw.doAll(vec)._d; } } public static void deleteVecs(Vec[] vs, int cnt) { Futures f = new Futures(); for (int i =0; i < cnt; i++) vs[cnt].remove(f); f.blockForPending(); } private static Vec copyOver(Vec src, byte type, long[] domain) { String[][] dom = new String[1][]; dom[0]=domain==null?null:ArrayUtils.toString(domain); return new CPTask(domain).doAll(type, src).outputFrame(null,dom).anyVec(); } private static class CPTask extends MRTask<CPTask> { private final long[] _domain; CPTask(long[] domain) { _domain = domain;} @Override public void map(Chunk c, NewChunk nc) { for(int i=0;i<c._len;++i) { if( c.isNA(i) ) { nc.addNA(); continue; } if( _domain == null ) nc.addNum(c.at8(i)); else { long num = Arrays.binarySearch(_domain,c.at8(i)); // ~24 hits in worst case for 10M levels if( num < 0 ) throw new IllegalArgumentException("Could not find the categorical value!"); nc.addNum(num); } } } } private static class CollectStringVecDomain extends MRTask<CollectStringVecDomain> { private IcedHashMap<String, IcedInt> _uniques = null; private final IcedInt _placeHolder = new IcedInt(1); @Override protected void setupLocal() { _uniques = new IcedHashMap<>(); } @Override public void map(Chunk c) { BufferedString bs = new BufferedString(); for (int i = 0; i < c.len(); i++) { if (!c.isNA(i)) { c.atStr(bs, i); final String strRepresentation = bs.toString(); if (strRepresentation.contains("\uFFFD")) { _uniques.put(bs.toSanitizedString(), _placeHolder); } else { _uniques.put(strRepresentation, _placeHolder); } } } } @Override public void reduce(CollectStringVecDomain mrt) { if (_uniques != mrt._uniques) { // this is not local reduce _uniques.putAll(mrt._uniques); } } public String[] domain(Vec vec) { assert vec.isString() : "String vector expected. Unsupported vector type: " + vec.get_type_str(); this.doAll(vec); return domain(); } public String[] domain() { String[] dom = _uniques.keySet().toArray(new String[_uniques.size()]); Arrays.sort(dom); return dom; } } public static int [] getLocalChunkIds(Vec v){ if(v._cids != null) return v._cids; int [] res = new int[Math.max(v.nChunks()/H2O.CLOUD.size(),1)]; int j = 0; for(int i = 0; i < v.nChunks(); ++i){ if(v.isHomedLocally(i)) { if(res.length == j) res = Arrays.copyOf(res,2*res.length); res[j++] = i; } } return (v._cids = j == res.length?res:Arrays.copyOf(res,j)); } /** * Compute the mean (weighted) response per categorical level * Skip NA values (those are already a separate bucket in the tree building histograms, for which this is designed) */ public static class MeanResponsePerLevelTask extends MRTask<MeanResponsePerLevelTask> { // OUTPUT public double[] meanWeightedResponse; public double meanOverallWeightedResponse; // Internal private double[] wcounts; private int _len; public MeanResponsePerLevelTask(int len) { _len = len; } @Override public void map(Chunk c, Chunk w, Chunk r) { wcounts = new double[_len]; // no larger than 1M elements, so OK to replicate per thread (faster) meanWeightedResponse = new double[_len]; for (int i=0; i<c._len; ++i) { if (c.isNA(i)) continue; int level = (int)c.at8(i); if (w.isNA(i)) continue; double weight = w.atd(i); if (weight == 0) continue; if (r.isNA(i)) continue; double response = r.atd(i); wcounts[level] += weight; meanWeightedResponse[level] += weight*response; } } @Override public void reduce(MeanResponsePerLevelTask mrt) { ArrayUtils.add(wcounts, mrt.wcounts); ArrayUtils.add(meanWeightedResponse, mrt.meanWeightedResponse); mrt.wcounts = null; mrt.meanWeightedResponse = null; } @Override protected void postGlobal() { meanOverallWeightedResponse = 0; double sum = 0; for (int i = 0; i< meanWeightedResponse.length; ++i) { if (wcounts[i] != 0) { meanWeightedResponse[i] = meanWeightedResponse[i] / wcounts[i]; meanOverallWeightedResponse += meanWeightedResponse[i]; sum += wcounts[i]; } } meanOverallWeightedResponse /= sum; } } /** * Reorder an integer (such as Enum storage) Vec using an int -> int mapping */ public static class ReorderTask extends MRTask<ReorderTask> { private int[] _map; public ReorderTask(int[] mapping) { _map = mapping; } @Override public void map(Chunk c, NewChunk nc) { for (int i=0;i<c._len;++i) { if (c.isNA(i)) nc.addNA(); else nc.addNum(_map[(int)c.at8(i)], 0); } } } /** * Remaps vec's current domain levels to a new set of values. The cardinality of the new set of domain values might be * less than or equal to the cardinality of current domain values. The new domain set is automatically extracted from * the given mapping array. * <p> * Changes are made to this very vector, no copying is done. If you need the original vector to remain unmodified, * please make sure to copy it first. * * @param newDomainValues An array of new domain values. For each old domain value, there must be a new value in * this array. The value at each index of newDomainValues array represents the new mapping for * this very index. May not be null. * @param originalVec Vector with values corresponding to the original domain to be remapped. Remains unmodified. * @return A new instance of categorical {@link Vec} with exactly the same length as the original vector supplied. * Its domain values are re-mapped. * @throws UnsupportedOperationException When invoked on non-categorical vector * @throws IllegalArgumentException Length of newDomainValues must be equal to length of current domain values of * this vector */ public static Vec remapDomain(final String[] newDomainValues, final Vec originalVec) throws UnsupportedOperationException, IllegalArgumentException { // Sanity checks Objects.requireNonNull(newDomainValues); if (originalVec.domain() == null) throw new UnsupportedOperationException("Unable to remap domain values on a non-categorical vector."); if (newDomainValues.length != originalVec.domain().length) { throw new IllegalArgumentException(String.format("For each of original domain levels, there must be a new mapping." + "There are %o domain levels, however %o mappings were supplied.", originalVec.domain().length, newDomainValues.length)); } // Create a map of new domain values pointing to indices in the array of old domain values in this vec final Map<String, Set<Integer>> map = new HashMap<>(); for (int i = 0; i < newDomainValues.length; i++) { Set<Integer> indices = map.get(newDomainValues[i]); if (indices == null) { indices = new HashSet<>(1); indices.add(i); map.put(newDomainValues[i], indices); } else { indices.add(i); } } // Map from the old domain to the new domain // There might actually be less domain levels after the transformation final int[] indicesMap = MemoryManager.malloc4(originalVec.domain().length); final String[] reducedDomain = new String[map.size()]; int reducedDomainIdx = 0; for (String e : map.keySet()) { final Set<Integer> oldDomainIndices = map.get(e); reducedDomain[reducedDomainIdx] = e; for (int idx : oldDomainIndices) { indicesMap[idx] = reducedDomainIdx; } reducedDomainIdx++; } final RemapDomainTask remapDomainTask = new RemapDomainTask(indicesMap) .doAll(new byte[]{Vec.T_CAT}, originalVec); // Out of the mist of the RemapDomainTask comes a vector with remapped domain values assert remapDomainTask.outputFrame().numCols() == 1; Vec remappedVec = remapDomainTask.outputFrame().vec(0); remappedVec.setDomain(reducedDomain); return remappedVec; } /** * Maps old categorical values (indices to old array of domain levels) to new categorical values * (indices to a new array with new domain levels). Uses a simple array for mapping, */ private static class RemapDomainTask extends MRTask<RemapDomainTask> { private final int[] _domainIndicesMap; public RemapDomainTask(int[] domainIndicesMap) { _domainIndicesMap = domainIndicesMap; } @Override public void map(Chunk c, NewChunk nc) { for (int i = 0; i < c.len(); i++) { nc.addCategorical(_domainIndicesMap[(int) c.at8(i)]); } } } /** * DotProduct of two Vecs of the same length */ public static class DotProduct extends MRTask<DotProduct> { public double result; @Override public void map(Chunk[] bvs) { result = 0; int len = bvs[0]._len; for (int i = 0; i < len; i++) { result += bvs[0].atd(i) * bvs[1].atd(i); } } @Override public void reduce(DotProduct mrt) { result += mrt.result; } } public static class SequenceProduct extends MRTask<SequenceProduct> { @Override public void map(Chunk[] c, NewChunk[] nc) { for (int i = 0; i < c[0]._len; i++) { nc[0].addNum(c[0].atd(i) * c[1].atd(i)); } } } /** * Randomly shuffle a Vec using Fisher Yates shuffle * https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle */ public static class ShuffleVecTask extends MRTask<ShuffleVecTask> { private final long _seed; private final Vec _vec; private transient int[] _localChunks; private transient int[] _permutatedChunks; public ShuffleVecTask(final Vec vec, final long seed) { super(); _seed = seed; _vec = vec; } @Override protected void setupLocal() { _localChunks = VecUtils.getLocalChunkIds(_vec); _permutatedChunks = _localChunks.clone(); permute(_permutatedChunks, null); } private void permute(int[] arr, Random rng) { if (null == rng) rng = getRNG(_seed); for (int i = arr.length - 1; i > 0; i--) { int j = rng.nextInt(i + 1); final int old = arr[i]; arr[i] = arr[j]; arr[j] = old; } } @Override public void map(Chunk _cs, NewChunk nc) { Random rng = getRNG(_seed + _cs.start()); Chunk cs = _vec.chunkForChunkIdx(_permutatedChunks[Arrays.binarySearch(_localChunks, _cs.cidx())]); int[] permutedRows = ArrayUtils.seq(0, cs._len); permute(permutedRows, rng); for (int row : permutedRows) { if (cs.isNA(row)) { nc.addNA(); } else { switch (_cs.vec().get_type()) { case Vec.T_BAD: break; case Vec.T_UUID: nc.addUUID(cs, row); break; case Vec.T_STR: nc.addStr(cs, row); break; case Vec.T_NUM: nc.addNum(cs.atd(row)); break; case Vec.T_CAT: nc.addCategorical((int) cs.at8(row)); break; case Vec.T_TIME: nc.addNum(cs.at8(row), 0); break; default: throw new IllegalArgumentException("Unsupported vector type: " + cs.vec().get_type()); } } } } } /** * Randomly shuffle a Vec. * @param origVec original Vec * @param seed seed for random generator * @return shuffled Vec */ public static Vec shuffleVec(final Vec origVec, final long seed) { Vec v = new ShuffleVecTask(origVec, seed).doAll( origVec.get_type(), origVec).outputFrame().anyVec(); if (origVec.isCategorical()) v.setDomain(origVec.domain()); return v; } public static class MinMaxTask extends MRTask<MinMaxTask> { public double _min = Double.POSITIVE_INFINITY; public double _max = Double.NEGATIVE_INFINITY; @Override public void map(Chunk xs, Chunk weights) { double min = _min; double max = _max; for (int i = 0; i < xs._len; i++) { if (weights.atd(i) != 0) { double value = xs.atd(i); if (value < min) min = value; if (value > max) max = value; } } _min = min; _max = max; } @Override public void reduce(MinMaxTask mrt) { if (!mrt.isEmpty()) { _min = Math.min(_min, mrt._min); _max = Math.max(_max, mrt._max); } } private boolean isEmpty() { return _min == Double.POSITIVE_INFINITY && _max == Double.NEGATIVE_INFINITY; } } public static MinMaxTask findMinMax(Vec numVec, Vec weightVec) { return new MinMaxTask().doAll(numVec, weightVec); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/WaterLogger.java
package water.util; import water.logging.Logger; public class WaterLogger implements Logger { @Override public void trace(String message) { Log.trace(message); } @Override public void debug(String message) { Log.debug(message); } @Override public void info(String message) { Log.info(message); } @Override public void warn(String message) { Log.warn(message); } @Override public void error(String message) { Log.err(message); } @Override public void fatal(String message) { Log.fatal(message); } @Override public boolean isTraceEnabled() { return (Log.getLogLevel() >= Log.TRACE ? true : false); } @Override public boolean isDebugEnabled() { return (Log.getLogLevel() >= Log.DEBUG ? true : false); } @Override public boolean isInfoEnabled() { return (Log.getLogLevel() >= Log.INFO ? true : false); } @Override public boolean isWarnEnabled() { return (Log.getLogLevel() >= Log.WARN ? true : false); } @Override public boolean isErrorEnabled() { return (Log.getLogLevel() >= Log.ERRR ? true : false); } @Override public boolean isFatalEnabled() { return (Log.getLogLevel() >= Log.FATAL ? true : false); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/WaterMeterCpuTicks.java
package water.util; import water.*; public class WaterMeterCpuTicks extends Iced { // Input public int nodeidx; // Output public long[][] cpu_ticks; public void doIt() { H2ONode node = H2O.CLOUD._memary[nodeidx]; GetTicksTask ppt = new GetTicksTask(); Log.trace("GetTicksTask starting to node " + nodeidx + "..."); // Synchronous RPC call to get ticks from remote (possibly this) node. new RPC<>(node, ppt).call().get(); Log.trace("GetTicksTask completed to node " + nodeidx); cpu_ticks = ppt._cpuTicks; } private static class GetTicksTask extends DTask<GetTicksTask> { private long[][] _cpuTicks; public GetTicksTask() { super(H2O.GUI_PRIORITY); _cpuTicks = null; } @Override public void compute2() { LinuxProcFileReader lpfr = new LinuxProcFileReader(); lpfr.read(); if (lpfr.valid()) { _cpuTicks = lpfr.getCpuTicks(); } else { // In the case where there isn't any tick information, the client receives a json // response object containing an array of length 0. // // e.g. // { cpuTicks: [] } _cpuTicks = new long[0][0]; } tryComplete(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/WaterMeterIo.java
package water.util; import water.*; import water.api.API; import water.api.schemas3.SchemaV3; import water.persist.PersistManager; public class WaterMeterIo extends Iced { public static class IoStatsEntry extends SchemaV3<Iced, IoStatsEntry> { @API(help="Back end type", direction = API.Direction.OUTPUT) public String backend; @API(help="Number of store events", direction = API.Direction.OUTPUT) public long store_count; @API(help="Cumulative stored bytes", direction = API.Direction.OUTPUT) public long store_bytes; @API(help="Number of delete events", direction = API.Direction.OUTPUT) public long delete_count; @API(help="Number of load events", direction = API.Direction.OUTPUT) public long load_count; @API(help="Cumulative loaded bytes", direction = API.Direction.OUTPUT) public long load_bytes; } // Input public int nodeidx; // Output public IoStatsEntry persist_stats[]; public void doIt(boolean aggregateAllNodes) { if (! aggregateAllNodes) { doIt(nodeidx); return; } for (int i = 0; i < H2O.CLOUD.size(); i++) { WaterMeterIo io = new WaterMeterIo(); io.doIt(i); if (i == 0) { persist_stats = new IoStatsEntry[io.persist_stats.length]; for (int j = 0; j < persist_stats.length; j++) { persist_stats[j] = new IoStatsEntry(); persist_stats[j].backend = io.persist_stats[j].backend; } } for (int j = 0; j < persist_stats.length; j++) { persist_stats[j].store_count += io.persist_stats[j].store_count; persist_stats[j].store_bytes += io.persist_stats[j].store_bytes; persist_stats[j].delete_count += io.persist_stats[j].delete_count; persist_stats[j].load_count += io.persist_stats[j].load_count; persist_stats[j].load_bytes += io.persist_stats[j].load_bytes; } } } private void doIt(int idx) { H2ONode node = H2O.CLOUD._memary[idx]; GetTask t = new GetTask(); Log.trace("IO GetTask starting to node " + idx + "..."); // Synchronous RPC call to get ticks from remote (possibly this) node. new RPC<>(node, t).call().get(); Log.trace("IO GetTask completed to node " + idx); persist_stats = t._persist_stats; } private static class GetTask extends DTask<GetTask> { private IoStatsEntry _persist_stats[]; public GetTask() { super(H2O.MIN_HI_PRIORITY); _persist_stats = null; } @Override public void compute2() { PersistManager.PersistStatsEntry s[] = H2O.getPM().getStats(); int[] backendsToQuery = new int[] {Value.NFS, Value.HDFS, Value.S3, Value.ICE}; _persist_stats = new IoStatsEntry[backendsToQuery.length]; for (int i = 0; i < _persist_stats.length; i++) { int j = backendsToQuery[i]; _persist_stats[i] = new IoStatsEntry(); IoStatsEntry dest_e = _persist_stats[i]; switch (j) { case Value.ICE: dest_e.backend = "ice"; break; case Value.HDFS: dest_e.backend = "hdfs"; break; case Value.S3: dest_e.backend = "s3"; break; case Value.NFS: dest_e.backend = "local"; break; default: throw H2O.fail(); } PersistManager.PersistStatsEntry src_e = s[j]; dest_e.store_count = src_e.store_count.get(); dest_e.store_bytes = src_e.store_bytes.get(); dest_e.delete_count = src_e.delete_count.get(); dest_e.load_count = src_e.load_count.get(); dest_e.load_bytes = src_e.load_bytes.get(); } int[] backendsToZeroCheck = new int[] {0, 5, 6, 7}; for (int j : backendsToZeroCheck) { PersistManager.PersistStatsEntry src_e = s[j]; assert(src_e.store_count.get() == 0); assert(src_e.store_bytes.get() == 0); assert(src_e.delete_count.get() == 0); assert(src_e.load_count.get() == 0); assert(src_e.load_bytes.get() == 0); } tryComplete(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/annotations/IgnoreJRERequirement.java
package water.util.annotations; /** * The file was copied from animal-sniffer-annotations project: * https://github.com/mojohaus/animal-sniffer/ to avoid another dependency * to the h2o-core project. * * The animal-sniffer-annotations is release under MIT licence. * */ import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.Target; import static java.lang.annotation.RetentionPolicy.CLASS; @Retention(CLASS) @Documented @Target({ElementType.METHOD, ElementType.CONSTRUCTOR, ElementType.TYPE}) public @interface IgnoreJRERequirement { }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/FP.java
package water.util.fp; import java.util.*; /** * Elements of Functional Programming (known as FP) in Java * * @see <a href="https://en.wikipedia.org/wiki/Functional_programming">Wikipedia</a> * for details */ public class FP { // the following two borrowed from Java 7 library. public static boolean equal(Object a, Object b) { return (a == b) || (a != null && a.equals(b)); } public static int hashCode(Object o) { return o != null ? o.hashCode() : 0; } interface Option<T> extends Iterable<T> { boolean isEmpty(); boolean nonEmpty(); <U> Option<U> flatMap(Function<T, Option<U>> f); } public final static Option<?> None = new Option<Object>() { @Override public boolean isEmpty() { return true; } @Override public boolean nonEmpty() { return false; } @SuppressWarnings("unchecked") @Override public <U> Option<U> flatMap(Function<Object, Option<U>> f) { return (Option<U>) None; } @Override public Iterator<Object> iterator() { return Collections.emptyList().iterator(); } @Override public String toString() { return "None"; } @Override public int hashCode() { return -1; } }; public final static class Some<T> implements Option<T> { private List<T> contents; public Some(T t) { contents = Collections.singletonList(t); } @Override public boolean isEmpty() { return false; } @Override public boolean nonEmpty() { return true; } @Override public <U> Option<U> flatMap(Function<T, Option<U>> f) { return f.apply(get()); } @Override public Iterator<T> iterator() { return contents.iterator(); } @SuppressWarnings("unchecked") public T get() { return contents.get(0); } @Override public String toString() { return "Some(" + get() + ")"; } @Override public boolean equals(Object o) { return this == o || (o instanceof Some && equal(get(), (((Some<?>) o).get()))); } @Override public int hashCode() { return FP.hashCode(get()); } } public static <T> Option<T> Some(T t) { return new Some<>(t); } @SuppressWarnings("unchecked") public static <T> Option<T> Option(T t) { return t == null ? (Option<T>)None : new Some(t); } @SuppressWarnings("unchecked") public static <T> Option<T> flatten(Option<Option<T>> optOptT) { return optOptT.isEmpty() ? (Option<T>)None : ((Some<Option<T>>)optOptT).get(); } public static <T> Option<T> headOption(Iterator<T> it) { return Option(it.hasNext() ? it.next() : null); } public static <T> Option<T> headOption(Iterable<T> ts) { return headOption(ts.iterator()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/Foldable.java
package water.util.fp; import java.io.Serializable; /** * Represents a folding operation applicable to streams or collection * * Initial value of type Y is the value that is returned on an empty collection. * Apply is used on a pair of values to produce the next value. * Apply takes a value of argument type X and a value of result type Y. * * Having this, you can define reduction on a collection or a stream. * This is the core of map/reduce. * * @see <a href="https://en.wikipedia.org/wiki/Fold_(higher-order_function)">wikipedia</a> for details. */ public interface Foldable<X, Y> extends Serializable { Y initial(); Y apply(Y y, X x); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/Function.java
package water.util.fp; import java.io.Serializable; /** * Represents a single-argument function * * We could as well use Google guava library, but Guava's functions are not serializable. * We need serializable functions, to be able to pass them over the cloud. * * A function, in abstract settings, is something that takes a value of a given type (X) and * returns a value of (another) given type (Y). * @see <a href="https://en.wikipedia.org/wiki/Function_(mathematics)">wikipedia</a> for details. * */ public interface Function<X, Y> extends Serializable { Y apply(X x); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/Function2.java
package water.util.fp; import java.io.Serializable; /** * Represents a two-argument function * * We could as well use Google guava library, but Guava's functions are not serializable. * We need serializable functions, to be able to pass them over the cloud. * * A two-argument function, in abstract settings, is something that takes values of given type (X and Y) and returns a value of a given type (Z). * @see <a href="https://en.wikipedia.org/wiki/Function_(mathematics)">wikipedia</a> for details. */ public interface Function2<X, Y, Z> extends Serializable { Z apply(X x, Y y); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/Function3.java
package water.util.fp; import java.io.Serializable; /** * Represents a three-argument function * * We could as well use Google guava library, but Guava's functions are not serializable. * We need serializable functions, to be able to pass them over the cloud. * * A three-argument function, in abstract settings, is something that takes values of given type (X, Y and Z) and returns a value of a given type (T). * @see <a href="https://en.wikipedia.org/wiki/Function_(mathematics)">wikipedia</a> for details. */ public interface Function3<X, Y, Z, T> extends Serializable { T apply(X x, Y y, Z z); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/Functions.java
package water.util.fp; import java.util.*; /** * Operations on functions */ public class Functions { static class Composition<X,Y,Z> implements Function<X,Z> { private final Function<X, Y> f; private final Function<Y, Z> g; Composition(final Function<X,Y> f, final Function<Y, Z> g) { this.f = f; this.g = g; } @Override public int hashCode() { return f.hashCode() * 211 + g.hashCode() * 79; } @Override public boolean equals(Object obj) { if (!(obj instanceof Composition)) return false; Composition other = (Composition) obj; return Objects.equals(f, other.f) && Objects.equals(g, other.g); } @Override public Z apply(X x) { return g.apply(f.apply(x)); } } public static <X,Y,Z> Function<X, Z> compose(final Function<Y, Z> g, final Function<X,Y> f) { return new Composition<>(f, g); } public static <X> Function<X, X> identity() { return new Function<X, X>() { @Override public X apply(X x) { return x; } }; } public static <T> Function<Long, T> onList(final List<T> list) { return new Function<Long, T>() { public T apply(Long i) { return list.get(i.intValue()); } }; } public static <X, Y> Iterable<Y> map(Iterable<X> xs, Function<X, Y> f) { List<Y> ys = new LinkedList<>(); for (X x : xs) ys.add(f.apply(x)); return ys; } public static <X,Y> Function<X,Y> constant(final Y y) { return new Function<X, Y>() { public Y apply(X x) { return y; } }; } static class StringSplitter implements Unfoldable<String, String> { private final String separator; StringSplitter(String separator) { this.separator = separator; } @Override public List<String> apply(String s) { return Arrays.asList(s.split(separator)); } @Override public int hashCode() { return 211 + separator.hashCode() * 7; } @Override public boolean equals(Object obj) { if (!(obj instanceof StringSplitter)) return false; StringSplitter other = (StringSplitter) obj; return Objects.equals(separator, other.separator); } } public static Unfoldable<String, String> splitBy(final String separator) { return new StringSplitter(separator); } /** * Integrates "area under curve" (assuming it exists), * that is, for a parametric curve specified by functions x and y, * defined on integer domain [from, to], calculate the area * between x[from], x[to], horizontal axis, and the curve. * @param x x-component of the curve * @param y y-component of the curve * @param from min value of the curve range * @param to max value of the curve range * @return the area under curve, the result of integrating x*y' over [from,to]. */ public static double integrate(Function<Integer, Double> x, Function<Integer, Double> y, int from, int to) { double s = 0; double x0 = x.apply(from); double y0 = y.apply(from); for (int i = from + 1; i <= to; i++) { double x1 = x.apply(i); double y1 = y.apply(i); s += (y1+y0)*(x1-x0)*.5; x0 = x1; y0 = y1; } return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/JustCode.java
package water.util.fp; import java.io.Serializable; /** * An abstraction for the entities that don't have any data, * so they are equal if they are of the same class. * * Comparison is done by comparing canonical class names. This ensures that * classes loaded by different classloaders still are comparable. */ public abstract class JustCode implements Serializable { @Override public int hashCode() { return getClass().getName().hashCode(); } @Override public boolean equals(Object other) { return other != null && getClass().getName().equals(other.getClass().getName()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/Predicate.java
package water.util.fp; import java.util.LinkedList; import java.util.List; /** * Represents a single-argument function */ public abstract class Predicate<X> implements Function<X, Boolean> { public static Predicate<Object> NOT_NULL = new Predicate<Object>() { @Override public Boolean apply(Object x) { return x != null; } }; public <Y extends X> List<Y> filter(List<Y> xs) { List<Y> result = new LinkedList<>(); for (Y x : xs) if (apply(x)) result.add(x); return result; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/PureFunctions.java
package water.util.fp; /** * Stores stock pure functions, that is those that don't keep any context. * Pure functions have this feature that their equals() only compares classes. */ public class PureFunctions extends Functions { public static final Function<Double, Double> SQUARE = new Function<Double, Double>() { @Override public Double apply(Double x) { return x*x; } }; public static final Function2<Double, Double, Double> PLUS = new Function2<Double, Double, Double>() { @Override public Double apply(Double x, Double y) { return x+y; } }; public static final Function2<Double, Double, Double> PROD = new Function2<Double, Double, Double>() { @Override public Double apply(Double x, Double y) { return x*y; } }; public static final Function2<Double, Double, Double> X2_PLUS_Y2 = new Function2<Double, Double, Double>() { @Override public Double apply(Double x, Double y) { return x*x + y*y; } }; public static final Function3<Double, Double, Double, Double> X2_PLUS_Y2_PLUS_Z2 = new Function3<Double, Double, Double, Double>() { @Override public Double apply(Double x, Double y, Double z) { return x*x + y*y + z*z; } }; public static final Foldable<Double, Double> SUM = new Foldable<Double, Double>() { @Override public Double initial() { return 0.; } @Override public Double apply(Double sum, Double x) { return sum == null || x == null ? null : sum+x; } }; public static final Foldable<Double, Double> SUM_OF_SQUARES = new Foldable<Double, Double>() { @Override public Double initial() { return 0.; } @Override public Double apply(Double sum, Double x) { return sum == null || x == null ? null : sum+x*x; } }; public static final Foldable<Double, Double> PRODUCT = new Foldable<Double, Double>() { @Override public Double initial() { return 1.; } @Override public Double apply(Double sum, Double x) { return sum == null || x == null ? null : sum*x; } }; public static <X,Y,Z> Function<X, Z> compose(final Function<X,Y> f, final Function<Y, Z> g) { return new Function<X, Z>() { @Override public Z apply(X x) { return g.apply(f.apply(x)); } }; } abstract static class Function<X,Y> extends JustCode implements water.util.fp.Function<X, Y> {} abstract static class Function2<X,Y,Z> extends JustCode implements water.util.fp.Function2<X, Y, Z> {} abstract static class Function3<X,Y,Z,T> extends JustCode implements water.util.fp.Function3<X, Y, Z, T> {} abstract static class Foldable<X, Y> extends JustCode implements water.util.fp.Foldable<X, Y> { } abstract static class Unfoldable<X, Y> extends JustCode implements water.util.fp.Unfoldable<X, Y> { } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/fp/Unfoldable.java
package water.util.fp; import java.util.List; /** * Takes a value of type X, produces a multitude of values of type Y */ public interface Unfoldable<X, Y> extends Function<X, List<Y>> {}
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/webserver/CoreServletProvider.java
package water.webserver; import water.api.*; import water.server.ServletMeta; import water.server.ServletProvider; import java.util.*; public class CoreServletProvider implements ServletProvider { private static final List<ServletMeta> SERVLETS = Collections.unmodifiableList(Arrays.asList( new ServletMeta("/3/NodePersistentStorage.bin/*", NpsBinServlet.class), new ServletMeta("/3/PostFile.bin", PostFileServlet.class), new ServletMeta("/3/PostFile", PostFileServlet.class), new ServletMeta("/3/DownloadDataset", DatasetServlet.class), new ServletMeta("/3/DownloadDataset.bin", DatasetServlet.class), new ServletMeta("/3/PutKey.bin", PutKeyServlet.class), new ServletMeta("/3/PutKey", PutKeyServlet.class), new ServletMeta("/", RequestServer.class) )); @Override public List<ServletMeta> servlets() { return SERVLETS; } @Override public int priority() { return 0; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/webserver/H2OHttpViewImpl.java
package water.webserver; import org.apache.commons.io.IOUtils; import water.ExtensionManager; import water.api.RequestServer; import water.server.ServletService; import water.server.ServletUtils; import water.util.Log; import water.webserver.iface.*; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Collection; import java.util.LinkedHashMap; /** * This is intended to be a singleton per H2O node. */ public class H2OHttpViewImpl implements H2OHttpView { private static volatile boolean _acceptRequests = false; private final H2OHttpConfig config; public H2OHttpViewImpl(H2OHttpConfig config) { this.config = config; } public void acceptRequests() { _acceptRequests = true; } /** * @return URI scheme */ public String getScheme() { return config.jks == null ? "http" : "https"; } @Override public LinkedHashMap<String, Class<? extends HttpServlet>> getServlets() { return ServletService.INSTANCE.getAllServlets(); } @Override public LinkedHashMap<String, Class<? extends H2OWebsocketServlet>> getWebsockets() { return ServletService.INSTANCE.getAllWebsockets(); } @Override public boolean authenticationHandler(HttpServletRequest request, HttpServletResponse response) throws IOException { if (!config.loginType.needToCheckUserName()) { return false; } if (request.getUserPrincipal() == null) { throw new IllegalStateException("AuthenticateHandler called with request.userPrincipal is null"); } final String loginName = request.getUserPrincipal().getName(); if (loginName.equals(config.user_name)) { return false; } else { Log.warn("Login name (" + loginName + ") does not match cluster owner name (" + config.user_name + ")"); ServletUtils.sendResponseError(response, HttpServletResponse.SC_UNAUTHORIZED, "Login name does not match cluster owner name"); return true; } } @Override public boolean gateHandler(HttpServletRequest request, HttpServletResponse response) { ServletUtils.startRequestLifecycle(); while (! isAcceptingRequests()) { try { Thread.sleep(100); } catch (Exception ignore) {} } boolean isXhrRequest = false; if (request != null) { if (ServletUtils.isTraceRequest(request)) { ServletUtils.setResponseStatus(response, HttpServletResponse.SC_METHOD_NOT_ALLOWED); return true; } isXhrRequest = ServletUtils.isXhrRequest(request); } ServletUtils.setCommonResponseHttpHeaders(response, isXhrRequest); return false; } protected boolean isAcceptingRequests() { return _acceptRequests; } @Override public H2OHttpConfig getConfig() { return config; } @Override public Collection<RequestAuthExtension> getAuthExtensions() { return ExtensionManager.getInstance().getAuthExtensions(); } // normal login handler part //todo: consider using mostly the same code as in proxy part below @Override public boolean loginHandler(String target, HttpServletRequest request, HttpServletResponse response) throws IOException { if (! isLoginTarget(target)) { return false; } if (isPageRequest(request)) { sendLoginForm(request, response); } else { ServletUtils.sendResponseError(response, HttpServletResponse.SC_UNAUTHORIZED, "Access denied. Please login."); } return true; } private static void sendLoginForm(HttpServletRequest request, HttpServletResponse response) { final String uri = ServletUtils.getDecodedUri(request); try { byte[] bytes; try (InputStream resource = water.init.JarHash.getResource2("/login.html")) { if (resource == null) { throw new IllegalStateException("Login form not found"); } final ByteArrayOutputStream baos = new ByteArrayOutputStream(); water.util.FileUtils.copyStream(resource, baos, 2048); bytes = baos.toByteArray(); } response.setContentType(RequestServer.MIME_HTML); response.setContentLength(bytes.length); ServletUtils.setResponseStatus(response, HttpServletResponse.SC_OK); final OutputStream os = response.getOutputStream(); water.util.FileUtils.copyStream(new ByteArrayInputStream(bytes), os, 2048); } catch (Exception e) { ServletUtils.sendErrorResponse(response, e, uri); } finally { ServletUtils.logRequest("GET", request, response); } } private static boolean isPageRequest(HttpServletRequest request) { String accept = request.getHeader("Accept"); return (accept != null) && accept.contains(RequestServer.MIME_HTML); } private static boolean isLoginTarget(String target) { return target.equals("/login") || target.equals("/loginError"); } // proxy login handler part @Override public boolean proxyLoginHandler(String target, HttpServletRequest request, HttpServletResponse response) throws IOException { if (! isLoginTarget(target)) { return false; } if (isPageRequest(request)) { proxySendLoginForm(response); } else { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Access denied. Please login."); } return true; } private static byte[] proxyLoadLoginFormResource() throws IOException { final InputStream loginFormStream = H2OHttpView.class.getResourceAsStream("/www/login.html"); if (loginFormStream == null) { throw new IllegalStateException("Login form resource is missing."); } final ByteArrayOutputStream baos = new ByteArrayOutputStream(); IOUtils.copy(loginFormStream, baos); return baos.toByteArray(); } private byte[] proxyLoginFormData; private void proxySendLoginForm(HttpServletResponse response) throws IOException { if (proxyLoginFormData == null) { proxyLoginFormData = proxyLoadLoginFormResource(); } response.setContentType("text/html"); response.setContentLength(proxyLoginFormData.length); response.setStatus(HttpServletResponse.SC_OK); IOUtils.write(proxyLoginFormData, response.getOutputStream()); } }
0
java-sources/ai/h2o/h2o-ext-authsupport/3.22.0.5/ai/h2o/org/eclipse/jetty/plus/jaas
java-sources/ai/h2o/h2o-ext-authsupport/3.22.0.5/ai/h2o/org/eclipse/jetty/plus/jaas/spi/LdapLoginModule.java
package ai.h2o.org.eclipse.jetty.plus.jaas.spi; /** * LdapLoginModule is relocated in Sparkling Water to package ai.h2o.org.eclipse.jetty.plus.jaas.spi * This class lets user define login module that will work both for H2O and SW * (user needs to put "ai.h2o.org.eclipse.jetty.plus.jaas.spi.LdapLoginModule required" in the login conf) */ public class LdapLoginModule extends org.eclipse.jetty.plus.jaas.spi.LdapLoginModule { /* empty */ }
0
java-sources/ai/h2o/h2o-ext-jython-cfunc/3.46.0.7/org/python
java-sources/ai/h2o/h2o-ext-jython-cfunc/3.46.0.7/org/python/core/imp.java
// Copyright (c) Corporation for National Research Initiatives package org.python.core; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.util.Date; import java.util.Map; import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Level; import java.util.logging.Logger; import org.python.compiler.Module; import org.python.core.util.FileUtil; import org.python.core.util.LimitedCache; import org.python.core.util.PlatformUtil; /** * Utility functions for "import" support. * * Note that this class tries to match the names of the corresponding functions from CPython's * Python/import.c. In these cases we use CPython's function naming style (underscores and all * lowercase) instead of Java's typical camelCase style so that it's easier to compare with * import.c. */ public class imp { private static Logger logger = Logger.getLogger("org.python.import"); private static final String UNKNOWN_SOURCEFILE = "<unknown>"; private static final int APIVersion = 39; public static final int NO_MTIME = -1; // This should change to Python 3.x; note that 2.7 allows relative // imports unless `from __future__ import absolute_import` public static final int DEFAULT_LEVEL = -1; private static final boolean IS_OSX = PySystemState.getNativePlatform().equals("darwin"); /** * A bundle of a file name, the file's content and a last modified time, with no behaviour. As * used here, the file is a class file and the last modified time is that of the matching * source, while the filename is taken from the annotation in the class file. See * {@link imp#readCodeData(String, InputStream, boolean, long)}. */ public static class CodeData { private final byte[] bytes; private final long mtime; private final String filename; public CodeData(byte[] bytes, long mtime, String filename) { this.bytes = bytes; this.mtime = mtime; this.filename = filename; } public byte[] getBytes() { return bytes; } public long getMTime() { return mtime; } public String getFilename() { return filename; } } /** * A two-way selector given to * {@link imp#createFromPyClass(String, InputStream, boolean, String, String, long, CodeImport)} * that tells it whether the source file name to give the module-class constructor, and which * ends up in {@code co_filename} attribute of the module's {@code PyCode}, should be from the * caller or the compiled file. */ static enum CodeImport { /** Take the filename from the {@code sourceName} argument */ source, /** Take filename from the compiler annotation */ compiled_only; } /** A non-empty fromlist for __import__'ing sub-modules. */ private static final PyObject nonEmptyFromlist = new PyTuple(PyString.fromInterned("__doc__")); public static ClassLoader getSyspathJavaLoader() { return Py.getSystemState().getSyspathJavaLoader(); } /** * Selects the parent class loader for Jython, to be used for dynamically loaded classes and * resources. Chooses between the current and context class loader based on the following * criteria: * * <ul> * <li>If both are the same class loader, return that class loader. * <li>If either is null, then the non-null one is selected. * <li>If both are not null, and a parent/child relationship can be determined, then the child * is selected. * <li>If both are not null and not on a parent/child relationship, then the current class * loader is returned (since it is likely for the context class loader to <b>not</b> see the * Jython classes) * </ul> * * @return the parent class loader for Jython or null if both the current and context class * loaders are null. */ public static ClassLoader getParentClassLoader() { ClassLoader current = imp.class.getClassLoader(); ClassLoader context = Thread.currentThread().getContextClassLoader(); if (context == current || context == null) { return current; } else if (current == null) { return context; } else if (isAncestor(context, current)) { return current; } else if (isAncestor(current, context)) { return context; } else { return current; } } /** True iff a {@code possibleAncestor} is the ancestor of the {@code subject}. */ private static boolean isAncestor(ClassLoader possibleAncestor, ClassLoader subject) { try { ClassLoader parent = subject.getParent(); if (possibleAncestor == parent) { return true; } else if (parent == null || parent == subject) { // The subject is the boot class loader return false; } else { return isAncestor(possibleAncestor, parent); } } catch (SecurityException e) { return false; } } private imp() {} // Prevent instantiation. /** * If the given name is found in sys.modules, the entry from there is returned. Otherwise a new * {@link PyModule} is created for the name and added to {@code sys.modules}. Creating the * module does not execute the body of the module to initialise its attributes. * * @param name fully-qualified name of the module * @return created {@code PyModule} */ public static PyModule addModule(String name) { name = name.intern(); PyObject modules = Py.getSystemState().modules; PyModule module = (PyModule) modules.__finditem__(name); if (module != null) { return module; } module = new PyModule(name, null); PyModule __builtin__ = (PyModule) modules.__finditem__("__builtin__"); PyObject __dict__ = module.__getattr__("__dict__"); __dict__.__setitem__("__builtins__", __builtin__.__getattr__("__dict__")); __dict__.__setitem__("__package__", Py.None); modules.__setitem__(name, module); return module; } /** * Remove name from sys.modules if present. * * @param name the module name */ private static void removeModule(String name) { name = name.intern(); PyObject modules = Py.getSystemState().modules; if (modules.__finditem__(name) != null) { try { modules.__delitem__(name); } catch (PyException pye) { // another thread may have deleted it if (!pye.match(Py.KeyError)) { throw pye; } } } } /** * Read a stream as a new byte array and close the stream. * * @param fp to read * @return bytes read */ private static byte[] readBytes(InputStream fp) { try { return FileUtil.readBytes(fp); } catch (IOException ioe) { throw Py.IOError(ioe); } finally { try { fp.close(); } catch (IOException e) { throw Py.IOError(e); } } } /** Open a file, raising a {@code PyException} on error. */ private static InputStream makeStream(File file) { try { return new FileInputStream(file); } catch (IOException ioe) { throw Py.IOError(ioe); } } /** * As {@link #createFromPyClass(String, InputStream, boolean, String, String, long, CodeImport)} * but always constructs the named class using {@code sourceName} as argument and makes no check * on the last-modified time. * * @param name module name on which to base the class name as {@code name + "$py"} * @param fp stream from which to read class file (closed when read) * @param testing if {@code true}, failures are signalled by a {@code null} not an exception * @param sourceName used for identification in messages and the constructor of the named class. * @param compiledName used for identification in messages and {@code __file__}. * @return the module or {@code null} on failure (if {@code testing}). * @throws PyException {@code ImportError} on API mismatch or i/o error. */ static PyObject createFromPyClass(String name, InputStream fp, boolean testing, String sourceName, String compiledName) { return createFromPyClass(name, fp, testing, sourceName, compiledName, NO_MTIME); } /** * As {@link #createFromPyClass(String, InputStream, boolean, String, String, long, CodeImport)} * but always constructs the named class using {@code sourceName} as argument. * * @param name module name on which to base the class name as {@code name + "$py"} * @param fp stream from which to read class file (closed when read) * @param testing if {@code true}, failures are signalled by a {@code null} not an exception * @param sourceName used for identification in messages and the constructor of the named class. * @param compiledName used for identification in messages and {@code __file__}. * @param sourceLastModified time expected to match {@code MTime} annotation in the class file * @return the module or {@code null} on failure (if {@code testing}). * @throws PyException {@code ImportError} on API or last-modified time mismatch or i/o error. */ static PyObject createFromPyClass(String name, InputStream fp, boolean testing, String sourceName, String compiledName, long sourceLastModified) { return createFromPyClass(name, fp, testing, sourceName, compiledName, sourceLastModified, CodeImport.source); } /** * Create a Python module from its compiled form, reading the class file from the open input * stream passed in (which is closed once read). The method may be used in a "testing" mode in * which the module is imported (if possible), but error conditions return {@code null}, or in a * non-testing mode where they throw. The caller may choose whether the source file name to give * the module-class constructor, and which ends up in {@code co_filename} attribute of the * module's {@code PyCode}, should be {@code sourceName} or the compiled file (See * {@link CodeImport}.) * * @param name module name on which to base the class name as {@code name + "$py"} * @param fp stream from which to read class file (closed when read) * @param testing if {@code true}, failures are signalled by a {@code null} not an exception * @param sourceName used for identification in messages. * @param compiledName used for identification in messages and {@code __file__}. * @param sourceLastModified time expected to match {@code MTime} annotation in the class file * @param source choose what to use as the file name when initialising the class * @return the module or {@code null} on failure (if {@code testing}). * @throws PyException {@code ImportError} on API or last-modified time mismatch or i/o error. */ static PyObject createFromPyClass(String name, InputStream fp, boolean testing, String sourceName, String compiledName, long sourceLastModified, CodeImport source) { // Get the contents of a compiled ($py.class) file and some meta-data CodeData data = null; try { data = readCodeData(compiledName, fp, testing, sourceLastModified); } catch (IOException ioe) { if (!testing) { throw Py.ImportError(ioe.getMessage() + "[name=" + name + ", source=" + sourceName + ", compiled=" + compiledName + "]"); } } if (testing && data == null) { return null; } // Instantiate the class and have it produce its PyCode object. PyCode code; try { // Choose which file name to provide to the module-class constructor String display = source == CodeImport.compiled_only ? data.getFilename() : sourceName; code = BytecodeLoader.makeCode(name + "$py", data.getBytes(), display); } catch (Throwable t) { if (testing) { return null; } else { throw Py.JavaError(t); } } // Execute the PyCode object (run the module body) to populate the module __dict__ logger.log(Level.CONFIG, "import {0} # precompiled from {1}", new Object[] {name, compiledName}); return createFromCode(name, code, compiledName); } /** * As {@link #readCodeData(String, InputStream, boolean, long)} but do not check last-modified * time and return only the class file bytes as an array. * * @param name of source file (used for identification in error/log messages) * @param fp stream from which to read class file (closed when read) * @param testing if {@code true}, failures are signalled by a {@code null} not an exception * @return the class file bytes as an array or {@code null} on failure (if {@code testing}). * @throws PyException {@code ImportError} on API or last-modified time mismatch * @throws IOException from read failures */ public static byte[] readCode(String name, InputStream fp, boolean testing) throws IOException { return readCode(name, fp, testing, NO_MTIME); } /** * As {@link #readCodeData(String, InputStream, boolean, long)} but return only the class file * bytes as an array. * * @param name of source file (used for identification in error/log messages) * @param fp stream from which to read class file (closed when read) * @param testing if {@code true}, failures are signalled by a {@code null} not an exception * @param sourceLastModified time expected to match {@code MTime} annotation in the class file * @return the class file bytes as an array or {@code null} on failure (if {@code testing}). * @throws PyException {@code ImportError} on API or last-modified time mismatch * @throws IOException from read failures */ public static byte[] readCode(String name, InputStream fp, boolean testing, long sourceLastModified) throws IOException { CodeData data = readCodeData(name, fp, testing, sourceLastModified); if (data == null) { return null; } else { return data.getBytes(); } } /** * As {@link #readCodeData(String, InputStream, boolean, long)} but do not check last-modified * time. * * @param name of source file (used for identification in error/log messages) * @param fp stream from which to read class file (closed when read) * @param testing if {@code true}, failures are signalled by a {@code null} not an exception * @return the {@code CodeData} bundle or {@code null} on failure (if {@code testing}). * @throws PyException {@code ImportError} on API mismatch * @throws IOException from read failures */ public static CodeData readCodeData(String name, InputStream fp, boolean testing) throws IOException { return readCodeData(name, fp, testing, NO_MTIME); } /** * Create a {@link CodeData} object bundling the contents of a class file (given as a stream), * source-last-modified time supplied, and the name of the file taken from annotations on the * class. On the way, the method checks the API version annotation matches the current process, * and that the {@code org.python.compiler.MTime} annotation matches the source-last-modified * time passed in. * * @param name of source file (used for identification in error/log messages) * @param fp stream from which to read class file (closed when read) * @param testing if {@code true}, failures are signalled by a {@code null} not an exception * @param sourceLastModified time expected to match {@code MTime} annotation in the class file * @return the {@code CodeData} bundle or {@code null} on failure (if {@code testing}). * @throws PyException {@code ImportError} on API or last-modified time mismatch * @throws IOException from read failures */ public static CodeData readCodeData(String name, InputStream fp, boolean testing, long sourceLastModified) throws IOException, PyException { byte[] classFileData = readBytes(fp); AnnotationReader ar = new AnnotationReader(classFileData); // Check API version fossilised in the class file against that expected int api = ar.getVersion(); if (api != APIVersion) { if (testing) { return null; } else { String fmt = "compiled unit contains version %d code (%d required): %.200s"; throw Py.ImportError(String.format(fmt, api, APIVersion, name)); } } /* * The source-last-modified time is fossilised in the class file. The source may have been * installed from a JAR, and this will have resulted in rounding of the last-modified time * down (see build.xml::jar-sources) to the nearest 2 seconds. */ if (testing && sourceLastModified != NO_MTIME) { long diff = ar.getMTime() - sourceLastModified; if (diff > 2000L) { // = 2000 milliseconds logger.log(Level.FINE, "# {0} time is {1} ms later than source", new Object[] {name, diff}); return null; } } // All well: make the bundle. return new CodeData(classFileData, sourceLastModified, ar.getFilename()); } /** * Compile Python source in file to a class file represented by a byte array. * * @param name of module (class name will be name$py) * @param source file containing the source * @return Java byte code as array */ public static byte[] compileSource(String name, File source) { return compileSource(name, source, null); } /** * Compile Python source in file to a class file represented by a byte array. * * @param name of module (class name will be name$py) * @param source file containing the source * @param filename explicit source file name (or {@code null} to use that in source) * @return Java byte code as array */ public static byte[] compileSource(String name, File source, String filename) { if (filename == null) { filename = source.toString(); } long mtime = source.lastModified(); return compileSource(name, makeStream(source), filename, mtime); } /** * Compile Python source in file to a class file represented by a byte array. * * @param name of module (class name will be name$py) * @param source file containing the source * @param sourceFilename explicit source file name (or {@code null} to use that in source) * @param compiledFilename ignored (huh?) * @return Java byte code as array * @deprecated Use {@link #compileSource(String, File, String)} instead. */ @Deprecated public static byte[] compileSource(String name, File source, String sourceFilename, String compiledFilename) { return compileSource(name, source, sourceFilename); } /** Remove the last three characters of a file name and add the compiled suffix "$py.class". */ public static String makeCompiledFilename(String filename) { return filename.substring(0, filename.length() - 3) + "$py.class"; } /** * Stores the bytes in compiledSource in compiledFilename. * * If compiledFilename is null, it's set to the results of makeCompiledFilename(sourcefileName). * * If sourceFilename is null or set to UNKNOWN_SOURCEFILE, then null is returned. * * @return the compiledFilename eventually used; or null if a compiledFilename couldn't be * determined or if an error was thrown while writing to the cache file. */ public static String cacheCompiledSource(String sourceFilename, String compiledFilename, byte[] compiledSource) { if (compiledFilename == null) { if (sourceFilename == null || sourceFilename.equals(UNKNOWN_SOURCEFILE)) { return null; } compiledFilename = makeCompiledFilename(sourceFilename); } FileOutputStream fop = null; try { SecurityManager man = System.getSecurityManager(); if (man != null) { man.checkWrite(compiledFilename); } fop = new FileOutputStream(FileUtil.makePrivateRW(compiledFilename)); fop.write(compiledSource); fop.close(); return compiledFilename; } catch (IOException | SecurityException exc) { // If we can't write the cache file, just log and continue logger.log(Level.FINE, "Unable to write to source cache file ''{0}'' due to {1}", new Object[] {compiledFilename, exc}); return null; } finally { if (fop != null) { try { fop.close(); } catch (IOException e) { logger.log(Level.FINE, "Unable to close source cache file ''{0}'' due to {1}", new Object[] {compiledFilename, e}); } } } } /** * Compile Python source to a class file represented by a byte array. * * @param name of module (class name will be name$py) * @param source open input stream (will be closed) * @param filename of source (or {@code null} if unknown) * @return Java byte code as array */ public static byte[] compileSource(String name, InputStream source, String filename) { return compileSource(name, source, filename, NO_MTIME); } /** * Compile Python source to a class file represented by a byte array. * * @param name of module (class name will be name$py) * @param source open input stream (will be closed) * @param filename of source (or {@code null} if unknown) * @param mtime last-modified time of source, to annotate class * @return Java byte code as array */ public static byte[] compileSource(String name, InputStream source, String filename, long mtime) { ByteArrayOutputStream ofp = new ByteArrayOutputStream(); try { if (filename == null) { filename = UNKNOWN_SOURCEFILE; } org.python.antlr.base.mod node; try { // Compile source to AST node = ParserFacade.parse(source, CompileMode.exec, filename, new CompilerFlags()); } finally { source.close(); } // Generate code Module.compile(node, ofp, name + "$py", filename, true, false, null, mtime); return ofp.toByteArray(); } catch (Throwable t) { throw ParserFacade.fixParseError(null, t, filename); } } public static PyObject createFromSource(String name, InputStream fp, String filename) { return createFromSource(name, fp, filename, null, NO_MTIME); } public static PyObject createFromSource(String name, InputStream fp, String filename, String outFilename) { return createFromSource(name, fp, filename, outFilename, NO_MTIME); } /** * Compile Jython source (as an {@code InputStream}) to a module. * * @param name of the module to create (class will be name$py) * @param fp opened on the (Jython) source to compile (will be closed) * @param filename of the source backing {@code fp} (to embed in class as data) * @param outFilename in which to write the compiled class * @param mtime last modified time of the file backing {@code fp} * @return created module */ public static PyObject createFromSource(String name, InputStream fp, String filename, String outFilename, long mtime) { byte[] bytes = compileSource(name, fp, filename, mtime); if (!Py.getSystemState().dont_write_bytecode) { outFilename = cacheCompiledSource(filename, outFilename, bytes); } logger.log(Level.CONFIG, "import {0} # from {1}", new Object[]{name, filename}); PyCode code = BytecodeLoader.makeCode(name + "$py", bytes, filename); return createFromCode(name, code, filename); } /** * Returns a module with the given name whose contents are the results of running c. __file__ is * set to whatever is in c. */ public static PyObject createFromCode(String name, PyCode c) { return createFromCode(name, c, null); } /** * Return a Python module with the given {@code name} whose attributes are the result of running * {@code PyCode c}. If {@code moduleLocation != null} it is used to set {@code __file__ }. * <p> * In normal circumstances, if {@code c} comes from a local {@code .py} file or compiled * {@code $py.class} class the caller should should set {@code moduleLocation} to something like * {@code new File(moduleLocation).getAbsolutePath()}. If {@code c} comes from a remote file or * is a JAR, {@code moduleLocation} should be the full URI for that source or class. * * @param name fully-qualified name of the module * @param c code supplying the module * @param moduleLocation to become {@code __file__} if not {@code null} * @return the module object */ public static PyObject createFromCode(String name, PyCode c, String moduleLocation) { checkName(name); PyModule module = addModule(name); PyBaseCode code = null; if (c instanceof PyBaseCode) { code = (PyBaseCode) c; } if (moduleLocation != null) { // Standard library expects __file__ to be encoded bytes module.__setattr__("__file__", Py.fileSystemEncode(moduleLocation)); } else if (module.__findattr__("__file__") == null) { // Should probably never happen (but maybe with an odd custom builtins, or // Java Integration) logger.log(Level.WARNING, "{0} __file__ is unknown", name); } ReentrantLock importLock = Py.getSystemState().getImportLock(); importLock.lock(); try { PyFrame f = new PyFrame(code, module.__dict__, module.__dict__, null); code.call(Py.getThreadState(), f); return module; } catch (RuntimeException t) { removeModule(name); throw t; } finally { importLock.unlock(); } } @SuppressWarnings("unchecked") static PyObject createFromClass(String name, Class<?> c) { // Two choices. c implements PyRunnable or c is Java package if (PyRunnable.class.isAssignableFrom(c)) { try { if (ContainsPyBytecode.class.isAssignableFrom(c)) { BytecodeLoader.fixPyBytecode((Class<? extends ContainsPyBytecode>) c); } return createFromCode(name, ((PyRunnable) c.getDeclaredConstructor().newInstance()).getMain()); } catch (ReflectiveOperationException | SecurityException | IllegalArgumentException | IOException e) { throw Py.JavaError(e); } } return PyType.fromClass(c); } public static PyObject getImporter(PyObject p) { PySystemState sys = Py.getSystemState(); return getPathImporter(sys.path_importer_cache, sys.path_hooks, p); } /** * Return an importer object for an element of {@code sys.path} or of a package's * {@code __path__}, possibly by fetching it from the {@code cache}. If it wasn’t yet cached, * traverse {@code hooks} until a hook is found that can handle the path item. Return * {@link Py#None} if no hook could do so. This tells our caller it should fall back to the * built-in import mechanism. Cache the result in {@code cache}. Return a new reference to the * importer object. * <p> * This is the "path hooks" mechanism first described in PEP 302 * * @param cache normally {@code sys.path_importer_cache} * @param hooks normally (@code sys.path_hooks} * @param p an element of {@code sys.path} or of a package's {@code __path__} * @return the importer object for the path element or {@code Py.None} for "fall-back". */ static PyObject getPathImporter(PyObject cache, PyList hooks, PyObject p) { // Is it in the cache? PyObject importer = cache.__finditem__(p); if (importer != null) { return importer; } // Nothing in the cache, so check all hooks. PyObject iter = hooks.__iter__(); for (PyObject hook; (hook = iter.__iternext__()) != null;) { try { importer = hook.__call__(p); break; } catch (PyException e) { if (!e.match(Py.ImportError)) { throw e; } } } if (importer == null) { // No hook claims to handle the location p, so add an imp.NullImporter try { importer = new PyNullImporter(p); } catch (PyException e) { if (!e.match(Py.ImportError)) { throw e; } } } if (importer != null) { // We found an importer. Cache it for next time. cache.__setitem__(p, importer); } else { // Caller will fall-back to built-in mechanisms. importer = Py.None; } return importer; } /** * Try to load a Python module from {@code sys.meta_path}, as a built-in module, or from either * the {@code __path__} of the enclosing package or {@code sys.path} if the module is being * sought at the top level. * * @param name simple name of the module. * @param moduleName fully-qualified (dotted) name of the module (ending in {@code name}). * @param path {@code __path__} of the enclosing package (or {@code null} if top level). * @return the module if we can load it (or {@code null} if we can't). */ static PyObject find_module(String name, String moduleName, PyList path) { PyObject loader = Py.None; PySystemState sys = Py.getSystemState(); PyObject metaPath = sys.meta_path; // Check for importers along sys.meta_path for (PyObject importer : metaPath.asIterable()) { PyObject findModule = importer.__getattr__("find_module"); loader = findModule.__call__(new PyObject[] { // new PyString(moduleName), path == null ? Py.None : path}); if (loader != Py.None) { return loadFromLoader(loader, moduleName); } } // Attempt to load from (prepared) builtins in sys.builtins. PyObject ret = loadBuiltin(moduleName); if (ret != null) { return ret; } // Note the path here may be sys.path or the search path of a Python package. path = path == null ? sys.path : path; for (int i = 0; ret == null && i < path.__len__(); i++) { PyObject p = path.__getitem__(i); // Is there a path-specific importer? PyObject importer = getPathImporter(sys.path_importer_cache, sys.path_hooks, p); if (importer != Py.None) { // A specific importer is defined. Try its finder method. PyObject findModule = importer.__getattr__("find_module"); loader = findModule.__call__(new PyObject[] {new PyString(moduleName)}); if (loader != Py.None) { return loadFromLoader(loader, moduleName); } } // p could be a unicode or bytes object (in the file system encoding) String pathElement = fileSystemDecode(p, false); if (pathElement != null) { ret = loadFromSource(sys, name, moduleName, pathElement); } } return ret; } /** * Load a built-in module by reference to {@link PySystemState#builtins}, which maps Python * module names to class names. Special treatment is given to the modules {@code sys} and * {@code __builtin__}. * * @param fully-qualified name of module * @return the module named */ private static PyObject loadBuiltin(String name) { final String MSG = "import {0} # builtin"; if (name == "sys") { logger.log(Level.CONFIG, MSG, name); return Py.java2py(Py.getSystemState()); } if (name == "__builtin__") { logger.log(Level.CONFIG, MSG, new Object[] {name, name}); return new PyModule("__builtin__", Py.getSystemState().builtins); } String mod = PySystemState.getBuiltin(name); if (mod != null) { Class<?> c = Py.findClassEx(mod, "builtin module"); if (c != null) { logger.log(Level.CONFIG, "import {0} # builtin {1}", new Object[] {name, mod}); try { if (PyObject.class.isAssignableFrom(c)) { // xxx ok? return PyType.fromClass(c); } return createFromClass(name, c); } catch (NoClassDefFoundError e) { throw Py.ImportError( "Cannot import " + name + ", missing class " + c.getName()); } } } return null; } static PyObject loadFromLoader(PyObject importer, String name) { checkName(name); PyObject load_module = importer.__getattr__("load_module"); ReentrantLock importLock = Py.getSystemState().getImportLock(); importLock.lock(); try { return load_module.__call__(new PyObject[] {new PyString(name)}); } finally { importLock.unlock(); } } public static PyObject loadFromCompiled(String name, InputStream stream, String sourceName, String compiledName) { return createFromPyClass(name, stream, false, sourceName, compiledName); } /** * Import a module defined in Python by loading it from source (or a compiled * {@code name$pyclass}) file in the specified location (often an entry from {@code sys.path}, * or a sub-directory of it named for the {@code modName}. For example, if {@code name} is * "pkg1" and the {@code modName} is "pkg.pkg1", {@code location} might be "mylib/pkg". * * @param sys the sys module of the interpreter importing the module. * @param name by which to look for files or a directory representing the module. * @param modName name by which to enter the module in {@code sys.modules}. * @param location where to look for the {@code name}. * @return the module if we can load it (or {@code null} if we can't). */ static PyObject loadFromSource(PySystemState sys, String name, String modName, String location) { File sourceFile; // location/name/__init__.py or location/name.py File compiledFile; // location/name/__init__$py.class or location/name$py.class boolean haveSource = false, haveCompiled = false; // display* names are for mainly identification purposes (e.g. __file__) String displayLocation = (location.equals("") || location.equals(",")) ? null : location; String displaySourceName, displayCompiledName; try { /* * Distinguish package and module cases by choosing File objects sourceFile and * compiledFile based on name/__init__ or name. haveSource and haveCompiled are set true * if the corresponding source or compiled files exist, and this is what steers the * loading in the second part of the process. */ String dirName = sys.getPath(location); File dir = new File(dirName, name); if (dir.isDirectory()) { // This should be a package: location/name File displayDir = new File(displayLocation, name); // Source is location/name/__init__.py String sourceName = "__init__.py"; sourceFile = new File(dir, sourceName); displaySourceName = new File(displayDir, sourceName).getPath(); // Compiled is location/name/__init__$py.class String compiledName = makeCompiledFilename(sourceName); compiledFile = new File(dir, compiledName); displayCompiledName = new File(displayDir, compiledName).getPath(); // Check the directory name is ok according to case-matching option and platform. if (caseok(dir, name)) { haveSource = sourceFile.isFile() && Files.isReadable(sourceFile.toPath()); haveCompiled = compiledFile.isFile() && Files.isReadable(compiledFile.toPath()); } if (haveSource || haveCompiled) { // Create a PyModule (uninitialised) for name, called modName in sys.modules PyModule m = addModule(modName); PyString filename = Py.fileSystemEncode(displayDir.getPath()); m.__dict__.__setitem__("__path__", new PyList(new PyObject[] {filename})); } else { /* * There is neither source nor compiled code for __init__.py. In Jython, this * message warning is premature, as there may be a Java package by this name. */ String printDirName = PyString.encode_UnicodeEscape(dir.getPath(), '\''); Py.warning(Py.ImportWarning, String.format( "Not importing directory %s: missing __init__.py", printDirName)); } } else { // This is a (non-package) module: location/name // Source is location/name.py String sourceName = name + ".py"; sourceFile = new File(dirName, sourceName); // location/name.py displaySourceName = new File(displayLocation, sourceName).getPath(); // Compiled is location/name$py.class String compiledName = makeCompiledFilename(sourceName); compiledFile = new File(dirName, compiledName); // location/name$py.class displayCompiledName = new File(displayLocation, compiledName).getPath(); // Check file names exist (and readable) and ok according to case-matching option and platform. haveSource = sourceFile.isFile() && caseok(sourceFile, sourceName) && Files.isReadable(sourceFile.toPath()); haveCompiled = compiledFile.isFile() && caseok(compiledFile, compiledName) && Files.isReadable(compiledFile.toPath()); } /* * Now we are ready to load and execute the module in sourceFile or compiledFile, from * its compiled or source form, as directed by haveSource and haveCompiled. */ if (haveSource) { // Try to create the module from source or an existing compiled class. long pyTime = sourceFile.lastModified(); if (haveCompiled) { // We have the compiled file and will use that if it is not out of date logger.log(Level.FINE, "# trying precompiled {0}", compiledFile.getPath()); long classTime = compiledFile.lastModified(); if (classTime >= pyTime) { // The compiled file does not appear out of date relative to the source. PyObject ret = createFromPyClass(modName, makeStream(compiledFile), // true, // OK to fail here as we have the source displaySourceName, displayCompiledName, pyTime); if (ret != null) { return ret; } } else { logger.log(Level.FINE, "# {0} dated ({1,date} {1,time,long}) < ({2,date} {2,time,long})", new Object[] {name, new Date(classTime), new Date(pyTime)}); } } // The compiled class is not present, is out of date, or using it failed somehow. logger.log(Level.FINE, "# trying source {0}", sourceFile.getPath()); return createFromSource(modName, makeStream(sourceFile), displaySourceName, compiledFile.getPath(), pyTime); } else if (haveCompiled) { // There is no source, try loading compiled logger.log(Level.FINE, "# trying precompiled with no source {0}", compiledFile.getPath()); return createFromPyClass(modName, makeStream(compiledFile), // false, // throw ImportError here if this fails displaySourceName, displayCompiledName, NO_MTIME, CodeImport.compiled_only); } } catch (SecurityException e) { // We were prevented from reading some essential file, so pretend we didn't find it. } return null; } /** * Check that the canonical name of {@code file} matches {@code filename}, case-sensitively, * even when the OS platform is case-insensitive. This is used to obtain as a check during * import on platforms (Windows) that may be case-insensitive regarding file open. It is assumed * that {@code file} was derived from attempting to find {@code filename}, so it returns * {@code true} on a case-sensitive platform. * <p> * Algorithmically, we return {@code true} if any of the following is true: * <ul> * <li>{@link Options#caseok} is {@code true} (default is {@code false}).</li> * <li>The platform is case sensitive (according to * {@link PlatformUtil#isCaseInsensitive()})</li> * <li>The name part of the canonical path of {@code file} starts with {@code filename}</li> * <li>The name of any sibling (in the same directory as) {@code file} equals * {@code filename}</li> * </ul> * and false otherwise. * * @param file to be tested * @param filename to be matched * @return {@code file} matches {@code filename} */ public static boolean caseok(File file, String filename) { if (Options.caseok || !PlatformUtil.isCaseInsensitive()) { return true; } try { File canFile = new File(file.getCanonicalPath()); boolean match = filename.regionMatches(0, canFile.getName(), 0, filename.length()); if (!match) { // Get parent and look for exact match in listdir(). This is horrible, but rare. for (String c : file.getParentFile().list()) { if (c.equals(filename)) { return true; } } } return match; } catch (IOException exc) { return false; } } /** * Load the module by name. Upon loading the module it will be added to sys.modules. * * @param name the name of the module to load * @return the loaded module */ public static PyObject load(String name) { checkName(name); ReentrantLock importLock = Py.getSystemState().getImportLock(); importLock.lock(); try { return import_first(name, new StringBuilder()); } finally { importLock.unlock(); } } /** * Find the parent package name for a module. * <p> * If __name__ does not exist in the module or if level is <code>0</code>, then the parent is * <code>null</code>. If __name__ does exist and is not a package name, the containing package * is located. If no such package exists and level is <code>-1</code>, the parent is * <code>null</code>. If level is <code>-1</code>, the parent is the current name. Otherwise, * <code>level-1</code> dotted parts are stripped from the current name. For example, the * __name__ <code>"a.b.c"</code> and level <code>2</code> would return <code>"a.b"</code>, if * <code>c</code> is a package and would return <code>"a"</code>, if <code>c</code> is not a * package. * * @param dict the __dict__ of a loaded module that is the context of the import statement * @param level used for relative and absolute imports. -1 means try both, 0 means absolute * only, positive ints represent the level to look upward for a relative path (1 * means current package, 2 means one level up). See PEP 328 at * http://www.python.org/dev/peps/pep-0328/ * * @return the parent name for a module */ private static String get_parent(PyObject dict, int level) { String modname; int orig_level = level; if ((dict == null && level == -1) || level == 0) { // try an absolute import return null; } PyObject tmp = dict.__finditem__("__package__"); if (tmp != null && tmp != Py.None) { if (!Py.isInstance(tmp, PyString.TYPE)) { throw Py.ValueError("__package__ set to non-string"); } modname = ((PyString) tmp).getString(); } else { // __package__ not set, so figure it out and set it. tmp = dict.__finditem__("__name__"); if (tmp == null) { return null; } modname = tmp.toString(); // locate the current package tmp = dict.__finditem__("__path__"); if (tmp instanceof PyList) { // __path__ is set, so modname is already the package name. dict.__setitem__("__package__", new PyString(modname)); } else { // __name__ is not a package name, try one level upwards. int dot = modname.lastIndexOf('.'); if (dot == -1) { if (level <= -1) { // there is no package, perform an absolute search dict.__setitem__("__package__", Py.None); return null; } throw Py.ValueError("Attempted relative import in non-package"); } // modname should be the package name. modname = modname.substring(0, dot); dict.__setitem__("__package__", new PyString(modname)); } } // walk upwards if required (level >= 2) while (level-- > 1) { int dot = modname.lastIndexOf('.'); if (dot == -1) { throw Py.ValueError("Attempted relative import beyond toplevel package"); } modname = modname.substring(0, dot); } if (Py.getSystemState().modules.__finditem__(modname) == null) { if (orig_level < 1) { if (modname.length() > 0) { Py.warning(Py.RuntimeWarning, String.format( "Parent module '%.200s' not found " + "while handling absolute import", modname)); } } else { throw Py.SystemError(String.format( "Parent module '%.200s' not loaded, " + "cannot perform relative import", modname)); } } return modname.intern(); } /** * Try to import the module named by <i>parentName.name</i>. The method tries 3 ways, accepting * the first that * succeeds: * <ol> * <li>Check for the module (by its fully-qualified name) in {@code sys.modules}.</li> * <li>If {@code mod==null}, try to load the module via * {@link #find_module(String, String, PyList)}. If {@code mod!=null}, find it as an attribute * of {@code mod} via its {@link PyObject#impAttr(String)} method (which then falls back to * {@code find_module} if {@code mod} has a {@code __path__}). Either way, add the loaded module * to {@code sys.modules}.</li> * <li>Try to load the module as a Java package by the name {@code outerFullName} * {@link JavaImportHelper#tryAddPackage(String, PyObject)}.</li> * </ol> * Finally, if one is found, If a module by the given name already exists in {@code sys.modules} * it will be returned from there directly. Otherwise, in {@code mod==null} (frequent case) it * will be looked for via {@link #find_module(String, String, PyList)}. * <p> * The case {@code mod!=null} supports circumstances in which the module sought may be found as * an attribute of a parent module. * * @param mod if not {@code null}, a package where the module may be an attribute. * @param parentName parent name of the module. (Buffer is appended with "." and {@code name}. * @param name the (simple) name of the module to load * @param outerFullName name to use with the {@code JavaImportHelper}. * @param fromlist if not {@code null} the import is {@code from <module> import <fromlist>} * @return the imported module (or {@code null} or {@link Py#None} on failure). */ private static PyObject import_next(PyObject mod, StringBuilder parentName, String name, String outerFullName, PyObject fromlist) { // Concatenate the parent name and module name *modifying the parent name buffer* if (parentName.length() > 0 && name != null && name.length() > 0) { parentName.append('.'); } String fullName = parentName.append(name).toString().intern(); // Check if already in sys.modules (possibly Py.None). PyObject modules = Py.getSystemState().modules; PyObject ret = modules.__finditem__(fullName); if (ret != null) { return ret; } if (mod == null) { // We are looking for a top-level module ret = find_module(fullName, name, null); } else { // Look within mod as enclosing package ret = mod.impAttr(name.intern()); } if (ret == null || ret == Py.None) { // Maybe this is a Java package: full name from the import and maybe classes to import if (JavaImportHelper.tryAddPackage(outerFullName, fromlist)) { // The action has already added it to sys.modules ret = modules.__finditem__(fullName); } return ret; } // The find operation may have added to sys.modules the module object we seek. if (modules.__finditem__(fullName) == null) { modules.__setitem__(fullName, ret); // Nope, add it } else { ret = modules.__finditem__(fullName); // Yep, return that instead } // On OSX we currently have to monkeypatch setuptools.command.easy_install. if (IS_OSX && fullName.equals("setuptools.command")) { // See http://bugs.jython.org/issue2570 load("_fix_jython_setuptools_osx"); } return ret; } /** * Top of the import logic in the case of a simple {@code import a.b.c.m}. * * @param name fully-qualified name of module to import {@code import a.b.c.m} * @param parentName used as a workspace as the search descends the package hierarchy * @return the named module (never {@code null} or {@code None}) * @throws PyException {@code ImportError} if not found */ private static PyObject import_first(String name, StringBuilder parentName) throws PyException { PyObject ret = import_next(null, parentName, name, null, null); if (ret == null || ret == Py.None) { throw Py.ImportError("No module named " + name); } return ret; } /** * Top of the import logic in the case of a complex {@code from a.b.c.m import n1, n2, n3}. * * @param name fully-qualified name of module to import {@code a.b.c.m}. * @param parentName used as a workspace as the search descends the package hierarchy * @param fullName the "outer" name by which the module is known {@code a.b.c.m}. * @param fromlist names to import from the module {@code n1, n2, n3}. * @return the named module (never returns {@code null} or {@code None}) * @throws PyException {@code ImportError} if not found */ private static PyObject import_first(String name, StringBuilder parentName, String fullName, PyObject fromlist) throws PyException { // Try the "normal" Python import process PyObject ret = import_next(null, parentName, name, fullName, fromlist); // If unsuccessful try importing as a Java package if (ret == null || ret == Py.None) { if (JavaImportHelper.tryAddPackage(fullName, fromlist)) { ret = import_next(null, parentName, name, fullName, fromlist); } } // If still unsuccessful, it's an error if (ret == null || ret == Py.None) { throw Py.ImportError("No module named " + name); } return ret; } /** * Iterate through the components (after the first) of a fully-qualified module name * {@code a.b.c.m} finding the corresponding modules {@code a.b}, {@code a.b.c}, and * {@code a.b.c.m}, importing them if necessary. This is a helper to * {@link #import_module_level(String, boolean, PyObject, PyObject, int)}, used when the module * name involves more than one level. * <p> * This method may be called in support of (effectively) of a simple import statement like * {@code import a.b.c.m} or a complex statement {@code from a.b.c.m import n1, n2, n3}. This * method always returns the "deepest" name, in the example, the module {@code m} whose full * name is {@code a.b.c.m}. * * @param mod top module of the import * @param parentName used as a workspace as the search descends the package hierarchy * @param restOfName {@code b.c.m} * @param fullName {@code a.b.c.m} * @param fromlist names to import from the module {@code n1, n2, n3}. * @return the last named module (never {@code null} or {@code None}) * @throws PyException {@code ImportError} if not found */ // ??pending: check if result is really a module/jpkg/jclass? private static PyObject import_logic(PyObject mod, StringBuilder parentName, String restOfName, String fullName, PyObject fromlist) throws PyException { int dot = 0; int start = 0; do { // Extract the name that starts at restOfName[start:] up to next dot. String name; dot = restOfName.indexOf('.', start); if (dot == -1) { name = restOfName.substring(start); } else { name = restOfName.substring(start, dot); } PyJavaPackage jpkg = null; if (mod instanceof PyJavaPackage) { jpkg = (PyJavaPackage) mod; } // Find (and maybe import) the package/module corresponding to this new segment. mod = import_next(mod, parentName, name, fullName, fromlist); // Try harder when importing as a Java package :/ if (jpkg != null && (mod == null || mod == Py.None)) { // try again -- under certain circumstances a PyJavaPackage may // have been added as a side effect of the last import_next // attempt. see Lib/test_classpathimport.py#test_bug1126 mod = import_next(jpkg, parentName, name, fullName, fromlist); } // If still unsuccessful, it's an error if (mod == null || mod == Py.None) { throw Py.ImportError("No module named " + name); } // Next module/package simple-name starts just after the last dot we found start = dot + 1; } while (dot != -1); return mod; } /** * Import a module by name. This supports the default {@code __import__()} function * {@code __builtin__.__import__}. (Called with the import system locked.) * * @param name qualified name of the package/module to import (may be relative) * @param top if true, return the top module in the name, otherwise the last * @param modDict the __dict__ of the importing module (used to navigate a relative import) * @param fromlist list of names being imported * @param level 0=absolute, n&gt;0=relative levels to go up - 1, -1=try relative then absolute. * @return an imported module (Java or Python) */ private static PyObject import_module_level(String name, boolean top, PyObject modDict, PyObject fromlist, int level) { // Check for basic invalid call if (name.length() == 0) { if (level == 0 || modDict == null) { throw Py.ValueError("Empty module name"); } else { PyObject moduleName = modDict.__findattr__("__name__"); // XXX: should this test be for "__main__"? if (moduleName != null && moduleName.toString().equals("__name__")) { throw Py.ValueError("Attempted relative import in non-package"); } } } // Seek the module (in sys.modules) that the import is relative to. PyObject modules = Py.getSystemState().modules; PyObject pkgMod = null; String pkgName = null; if (modDict != null && modDict.isMappingType()) { pkgName = get_parent(modDict, level); pkgMod = modules.__finditem__(pkgName); if (pkgMod != null && !(pkgMod instanceof PyModule)) { pkgMod = null; } } // Extract the first element of the (fully qualified) name. int dot = name.indexOf('.'); String firstName; if (dot == -1) { firstName = name; } else { firstName = name.substring(0, dot); } // Import the first-named module, relative to pkgMod (which may be null) StringBuilder parentName = new StringBuilder(pkgMod != null ? pkgName : ""); PyObject topMod = import_next(pkgMod, parentName, firstName, name, fromlist); if (topMod == Py.None || topMod == null) { // The first attempt failed. parentName = new StringBuilder(""); // could throw ImportError if (level > 0) { // Import relative to context. pkgName was already computed from level. topMod = import_first(pkgName + "." + firstName, parentName, name, fromlist); } else { // Absolute import topMod = import_first(firstName, parentName, name, fromlist); } } PyObject mod = topMod; if (dot != -1) { // This is a dotted name: work through the remaining name elements. mod = import_logic(topMod, parentName, name.substring(dot + 1), name, fromlist); } if (top) { return topMod; } if (fromlist != null && fromlist != Py.None) { ensureFromList(mod, fromlist, name); } return mod; } /** Defend against attempt to import by filename (withdrawn feature). */ private static void checkNotFile(String name){ if (name.indexOf(File.separatorChar) != -1) { throw Py.ImportError("Import by filename is not supported."); } } /** * Enforce ASCII module name, as a guard on module names supplied as an argument. The parser * guarantees the name from an actual import statement is a valid identifier. */ private static void checkName(String name) { for (int i = name.length(); i > 0;) { if (name.charAt(--i) > 255) { throw Py.ImportError("No module named " + name); } } } /** * This cache supports {@link #fileSystemDecode(PyObject)} and * {@link #fileSystemDecode(PyObject, boolean)}. Observation shows the import mechanism converts * the same file name hundreds of times during any use of Jython, so we use this to remember the * conversions of recent file names. */ // 20 is plenty private static LimitedCache<PyObject, String> fileSystemDecodeCache = new LimitedCache<>(20); /** * A wrapper for {@link Py#fileSystemDecode(PyObject)} for <b>project internal use</b> within * the import mechanism to convert decoding errors that occur during import to either * {@code null} or {@link Py#ImportError(String)} calls (and a log message), which usually * results in quiet failure. * * @param p assumed to be a (partial) file path * @param raiseImportError if true and {@code p} cannot be decoded raise {@code ImportError}. * @return String form of the object {@code p} (or {@code null}). */ public static String fileSystemDecode(PyObject p, boolean raiseImportError) { try { String decoded = fileSystemDecodeCache.get(p); if (decoded == null) { decoded = Py.fileSystemDecode(p); fileSystemDecodeCache.add(p, decoded); } return decoded; } catch (PyException e) { if (e.match(Py.UnicodeDecodeError)) { // p is bytes we cannot convert to a String using the FS encoding if (raiseImportError) { logger.log(Level.CONFIG, "Cannot decode path entry {0}", p.__repr__()); throw Py.ImportError("cannot decode"); } return null; } else { // Any other kind of exception continues as itself throw e; } } } /** * For <b>project internal use</b>, equivalent to {@code fileSystemDecode(p, true)} (see * {@link #fileSystemDecode(PyObject, boolean)}). * * @param p assumed to be a (partial) file path * @return String form of the object {@code p}. */ public static String fileSystemDecode(PyObject p) { return fileSystemDecode(p, true); } /** * Ensure that the items mentioned in the from-list of an import are actually present, even if * they are modules we have not imported yet. * * @param mod module we are importing from * @param fromlist tuple of names to import * @param name of module we are importing from (as given, may be relative) */ private static void ensureFromList(PyObject mod, PyObject fromlist, String name) { ensureFromList(mod, fromlist, name, false); } /** * Ensure that the items mentioned in the from-list of an import are actually present, even if * they are modules we have not imported yet. * * @param mod module we are importing from * @param fromlist tuple of names to import * @param name of module we are importing from (as given, may be relative) * @param recursive true, when this method calls itself */ private static void ensureFromList(PyObject mod, PyObject fromlist, String name, boolean recursive) { // THE ONLY CUSTOM CHANGE MADE IN THIS FILE // The last Jython version that contains this "if" statement is 2.7.1b3. The newer versions throw an exception // on line 1495 "Item in from list not a string". The failing type [None] is created in the library, not in H2O // code. if (mod.__findattr__("__path__") == null) { return; } // THE END OF THE CUSTOM CHANGE // This can happen with imports like "from . import foo" if (name.length() == 0) { name = mod.__findattr__("__name__").toString(); } StringBuilder modNameBuffer = new StringBuilder(name); for (PyObject item : fromlist.asIterable()) { if (!Py.isInstance(item, PyBaseString.TYPE)) { throw Py.TypeError("Item in ``from list'' not a string"); } if (item.toString().equals("*")) { if (recursive) { // Avoid endless recursion continue; } PyObject all; if ((all = mod.__findattr__("__all__")) != null) { ensureFromList(mod, all, name, true); } } if (mod.__findattr__((PyString)item) == null) { String fullName = modNameBuffer.toString() + "." + item.toString(); import_next(mod, modNameBuffer, item.toString(), fullName, null); } } } /** * Import a module by name. * * @param name the name of the package to import * @param top if true, return the top module in the name, otherwise the last * @return an imported module (Java or Python) */ public static PyObject importName(String name, boolean top) { checkNotFile(name); checkName(name); ReentrantLock importLock = Py.getSystemState().getImportLock(); importLock.lock(); try { return import_module_level(name, top, null, null, DEFAULT_LEVEL); } finally { importLock.unlock(); } } /** * Import a module by name. This supports the default {@code __import__()} function * {@code __builtin__.__import__}. Locks the import system while it operates. * * @param name the fully-qualified name of the package/module to import * @param top if true, return the top module in the name, otherwise the last * @param modDict the __dict__ of the importing module (used for name in relative import) * @param fromlist list of names being imported * @param level 0=absolute, n&gt;0=relative levels to go up, -1=try relative then absolute. * @return an imported module (Java or Python) */ public static PyObject importName(String name, boolean top, PyObject modDict, PyObject fromlist, int level) { checkNotFile(name); checkName(name); ReentrantLock importLock = Py.getSystemState().getImportLock(); importLock.lock(); try { return import_module_level(name, top, modDict, fromlist, level); } finally { importLock.unlock(); } } /** * Called from jython generated code when a statement like "import spam" is executed. */ @Deprecated public static PyObject importOne(String mod, PyFrame frame) { return importOne(mod, frame, imp.DEFAULT_LEVEL); } /** * Called from jython generated code when a statement like "import spam" is executed. */ public static PyObject importOne(String mod, PyFrame frame, int level) { PyObject module = __builtin__.__import__(mod, frame.f_globals, frame.getLocals(), Py.None, level); return module; } /** * Called from jython generated code when a statement like "import spam as foo" is executed. */ @Deprecated public static PyObject importOneAs(String mod, PyFrame frame) { return importOneAs(mod, frame, imp.DEFAULT_LEVEL); } /** * Called from jython generated code when a statement like "import spam as foo" is executed. */ public static PyObject importOneAs(String mod, PyFrame frame, int level) { PyObject module = __builtin__.__import__(mod, frame.f_globals, frame.getLocals(), Py.None, level); int dot = mod.indexOf('.'); while (dot != -1) { int dot2 = mod.indexOf('.', dot + 1); String name; if (dot2 == -1) { name = mod.substring(dot + 1); } else { name = mod.substring(dot + 1, dot2); } module = module.__getattr__(name); dot = dot2; } return module; } /** * replaced by importFrom with level param. Kept for backwards compatibility. * * @deprecated use importFrom with level param. */ @Deprecated public static PyObject[] importFrom(String mod, String[] names, PyFrame frame) { return importFromAs(mod, names, null, frame, DEFAULT_LEVEL); } /** * Called from jython generated code when a statement like "from spam.eggs import foo, bar" is * executed. */ public static PyObject[] importFrom(String mod, String[] names, PyFrame frame, int level) { return importFromAs(mod, names, null, frame, level); } /** * replaced by importFromAs with level param. Kept for backwards compatibility. * * @deprecated use importFromAs with level param. */ @Deprecated public static PyObject[] importFromAs(String mod, String[] names, PyFrame frame) { return importFromAs(mod, names, null, frame, DEFAULT_LEVEL); } /** * Called from jython generated code when a statement like "from spam.eggs import foo as spam" * is executed. */ public static PyObject[] importFromAs(String mod, String[] names, String[] asnames, PyFrame frame, int level) { PyObject[] pyNames = new PyObject[names.length]; for (int i = 0; i < names.length; i++) { pyNames[i] = Py.newString(names[i]); } PyObject module = __builtin__.__import__(mod, frame.f_globals, frame.getLocals(), new PyTuple(pyNames), level); PyObject[] submods = new PyObject[names.length]; for (int i = 0; i < names.length; i++) { PyObject submod = module.__findattr__(names[i]); // XXX: Temporary fix for http://bugs.jython.org/issue1900 if (submod == null) { submod = module.impAttr(names[i]); } // end temporary fix. if (submod == null) { throw Py.ImportError("cannot import name " + names[i]); } submods[i] = submod; } return submods; } private final static PyTuple all = new PyTuple(Py.newString('*')); /** * Called from jython generated code when a statement like "from spam.eggs import *" is * executed. */ public static void importAll(String mod, PyFrame frame, int level) { PyObject module = __builtin__.__import__(mod, frame.f_globals, frame.getLocals(), all, level); importAll(module, frame); } @Deprecated public static void importAll(String mod, PyFrame frame) { importAll(mod, frame, DEFAULT_LEVEL); } public static void importAll(PyObject module, PyFrame frame) { PyObject names; boolean filter = true; if (module instanceof PyJavaPackage) { names = ((PyJavaPackage)module).fillDir(); } else { PyObject __all__ = module.__findattr__("__all__"); if (__all__ != null) { names = __all__; filter = false; } else { names = module.__dir__(); } } loadNames(names, module, frame.getLocals(), filter); } /** * From a module, load the attributes found in <code>names</code> into locals. * * @param filter if true, if the name starts with an underscore '_' do not add it to locals * @param locals the namespace into which names will be loaded * @param names the names to load from the module * @param module the fully imported module */ private static void loadNames(PyObject names, PyObject module, PyObject locals, boolean filter) { for (PyObject name : names.asIterable()) { String sname = ((PyString)name).internedString(); if (filter && sname.startsWith("_")) { continue; } else { try { PyObject value = module.__findattr__(sname); if (value == null) { PyObject nameObj = module.__findattr__("__name__"); if (nameObj != null) { String submodName = nameObj.__str__().toString() + '.' + sname; value = __builtin__ .__import__(submodName, null, null, nonEmptyFromlist); } } locals.__setitem__(sname, value); } catch (Exception exc) { continue; } } } } static PyObject reload(PyModule m) { PySystemState sys = Py.getSystemState(); PyObject modules = sys.modules; Map<String, PyModule> modules_reloading = sys.modules_reloading; ReentrantLock importLock = Py.getSystemState().getImportLock(); importLock.lock(); try { return _reload(m, modules, modules_reloading); } finally { modules_reloading.clear(); importLock.unlock(); } } private static PyObject _reload(PyModule m, PyObject modules, Map<String, PyModule> modules_reloading) { String name = m.__getattr__("__name__").toString().intern(); PyModule nm = (PyModule)modules.__finditem__(name); if (nm == null || !nm.__getattr__("__name__").toString().equals(name)) { throw Py.ImportError("reload(): module " + name + " not in sys.modules"); } PyModule existing_module = modules_reloading.get(name); if (existing_module != null) { // Due to a recursive reload, this module is already being reloaded. return existing_module; } // Since we are already in a re-entrant lock, // this test & set is guaranteed to be atomic modules_reloading.put(name, nm); PyList path = Py.getSystemState().path; String modName = name; int dot = name.lastIndexOf('.'); if (dot != -1) { String iname = name.substring(0, dot).intern(); PyObject pkg = modules.__finditem__(iname); if (pkg == null) { throw Py.ImportError("reload(): parent not in sys.modules"); } path = (PyList)pkg.__getattr__("__path__"); name = name.substring(dot + 1, name.length()).intern(); } nm.__setattr__("__name__", new PyString(modName)); // FIXME necessary?! try { PyObject ret = find_module(name, modName, path); modules.__setitem__(modName, ret); return ret; } catch (RuntimeException t) { // Need to restore module, due to the semantics of addModule, which removed it // Fortunately we are in a module import lock modules.__setitem__(modName, nm); throw t; } } public static int getAPIVersion() { return APIVersion; } }
0
java-sources/ai/h2o/h2o-ext-jython-cfunc/3.46.0.7/water
java-sources/ai/h2o/h2o-ext-jython-cfunc/3.46.0.7/water/udf/JythonCFuncLoader.java
package water.udf; import org.python.core.Py; import org.python.core.PySystemState; /** * Custom function loader, which can instantiate * functions written in Python. * * The provider internally uses Jython. * * Note: Jython caches the loaded python programs. That means * changing underlying function definition (i.e, Python code) is not * reflected! */ public class JythonCFuncLoader extends CFuncLoader { @Override public String getLang() { return "python"; } @Override public <F> F load(String jfuncName, Class<? extends F> targetKlazz, ClassLoader classLoader) { int idxLastDot = jfuncName.lastIndexOf('.'); String module = jfuncName.substring(0, idxLastDot); String clsName = jfuncName.substring(idxLastDot+1); ClassLoader savedCtxCl = Thread.currentThread().getContextClassLoader(); PySystemState savedSystemState = Py.getSystemState(); // Get a system state for the current thread try { Thread.currentThread().setContextClassLoader(classLoader); PySystemState newSystemState = new PySystemState(); newSystemState.setClassLoader(classLoader); Py.setSystemState(newSystemState); // Assign a new system state with a specific classloader to the current thread. return new JythonObjectFactory(targetKlazz, module, clsName).createObject(); } finally { Py.setSystemState(savedSystemState); Thread.currentThread().setContextClassLoader(savedCtxCl); } } }
0
java-sources/ai/h2o/h2o-ext-jython-cfunc/3.46.0.7/water
java-sources/ai/h2o/h2o-ext-jython-cfunc/3.46.0.7/water/udf/JythonObjectFactory.java
package water.udf; import org.python.core.Py; import org.python.core.PyArray; import org.python.core.PyClass; import org.python.core.PyObject; import org.python.core.PySystemState; public class JythonObjectFactory { private final Class interfaceType; private final PyObject klass; // Constructor obtains a reference to the importer, module, and the class name public JythonObjectFactory(Class interfaceType, String moduleName, String className) { this.interfaceType = interfaceType; PyObject importer = Py.getSystemState().getBuiltins().__getitem__(Py.newString("__import__")); PyObject module = importer.__call__(new PyObject[] {Py.newString(moduleName), PyArray.zeros(1, String.class)}, new String[] {"fromlist"} ); // Reload module definition - this is important to enable iterative updates of function definitions // from interactive environments module = org.python.core.__builtin__.reload(module); klass = module.__getattr__(className); } // All of the followng methods return // a coerced Jython object based upon the pieces of information // that were passed into the factory. The differences are // between them are the number of arguments that can be passed // in as arguents to the object. public <T> T createObject() { return (T) klass.__call__().__tojava__(interfaceType); } }
0
java-sources/ai/h2o/h2o-ext-krbstandalone/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-krbstandalone/3.46.0.7/hex/security/KerberosExtension.java
package hex.security; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import water.AbstractH2OExtension; import water.H2O; import water.init.StandaloneKerberosComponent; import water.persist.PersistHdfs; import water.persist.security.HdfsDelegationTokenRefresher; import water.util.Log; import java.io.IOException; import java.time.Duration; import java.util.List; import java.util.stream.Collectors; /** * Authenticates the H2O Node to access secured Hadoop cluster in a standalone mode. * * This extension assumes that if Hadoop configuration is present, and it has Kerberos enabled * the user will likely want to read data from HDFS even though H2O is running in a standalone mode (not on Hadoop). * The extension attempts to authenticate the user using an existing Kerberos ticket. This means the Kerberos ticket * needs to be manually acquired by the user on each node before the H2O instance is started. * * The extension fails gracefully if the user cannot be authenticated and doesn't stop H2O start-up. The failure * will be logged as an error. */ public class KerberosExtension extends AbstractH2OExtension { public static String NAME = "KrbStandalone"; private final H2O.OptArgs _args; @SuppressWarnings("unused") public KerberosExtension() { this(H2O.ARGS); } KerberosExtension(H2O.OptArgs args) { _args = args; } @Override public String getExtensionName() { return NAME; } @Override public boolean isEnabled() { // Enabled if running in Standalone mode only (regardless if launched from h2o.jar or java -cp h2odriver.jar water.H2OApp) return isStandalone(); } private boolean isStandalone() { return !_args.launchedWithHadoopJar(); } @Override public void onLocalNodeStarted() { Configuration conf = PersistHdfs.CONF; if (conf == null) return; // this is theoretically possible although unlikely if (isKerberosEnabled(conf)) { UserGroupInformation.setConfiguration(conf); final UserGroupInformation ugi; if (_args.keytab_path != null || _args.principal != null) { if (_args.keytab_path == null) { throw new RuntimeException("Option keytab_path needs to be specified when option principal is given."); } if (_args.principal == null) { throw new RuntimeException("Option principal needs to be specified when option keytab_path is given."); } Log.debug("Kerberos enabled in Hadoop configuration. Trying to login user from keytab."); ugi = loginUserFromKeytab(_args.principal, _args.keytab_path); } else { Log.debug("Kerberos enabled in Hadoop configuration. Trying to login the (default) user."); ugi = loginDefaultUser(); } if (ugi != null) { Log.info("Kerberos subsystem initialized. Using user '" + ugi.getShortUserName() + "'."); } if (_args.hdfs_token_refresh_interval != null) { long refreshIntervalSecs = parseRefreshIntervalToSecs(_args.hdfs_token_refresh_interval); Log.info("HDFS token will be refreshed every " + refreshIntervalSecs + "s (user specified " + _args.hdfs_token_refresh_interval + ")."); HdfsDelegationTokenRefresher.startRefresher(conf, _args.principal, _args.keytab_path, refreshIntervalSecs); } initComponents(conf, _args); } else { Log.info("Kerberos not configured"); if (_args.hdfs_token_refresh_interval != null) { Log.warn("Option hdfs_token_refresh_interval ignored because Kerberos is not configured."); } if (_args.keytab_path != null) { Log.warn("Option keytab_path ignored because Kerberos is not configured."); } if (_args.principal != null) { Log.warn("Option principal ignored because Kerberos is not configured."); } } } static void initComponents(Configuration conf, H2O.OptArgs args) { List<StandaloneKerberosComponent> components = StandaloneKerberosComponent.loadAll(); List<String> componentNames = components.stream().map(StandaloneKerberosComponent::name).collect(Collectors.toList()); Log.info("Standalone Kerberos components: " + componentNames); for (StandaloneKerberosComponent component : components) { boolean active = component.initComponent(conf, args); String statusMsg = active ? "successfully initialized" : "not active"; Log.info("Component " + component.name() + " " + statusMsg + "."); } } private long parseRefreshIntervalToSecs(String refreshInterval) { try { if (!refreshInterval.contains("P")) { // convenience - allow user to specify just "10M", instead of requiring "PT10M" refreshInterval = "PT" + refreshInterval; } return Duration.parse(refreshInterval.toLowerCase()).getSeconds(); } catch (Exception e) { throw new IllegalArgumentException("Unable to parse refresh interval, got " + refreshInterval + ". Example of correct specification '4H' (token will be refreshed every 4 hours).", e); } } private UserGroupInformation loginDefaultUser() { try { UserGroupInformation.loginUserFromSubject(null); return UserGroupInformation.getCurrentUser(); } catch (IOException e) { Log.err("Kerberos initialization FAILED. Kerberos ticket needs to be acquired before starting H2O (run kinit).", e); return null; } } private static UserGroupInformation loginUserFromKeytab(String authPrincipal, String authKeytabPath) { try { UserGroupInformation.loginUserFromKeytab(authPrincipal, authKeytabPath); return UserGroupInformation.getCurrentUser(); } catch (IOException e) { throw new RuntimeException("Failed to login user " + authPrincipal + " from keytab " + authKeytabPath); } } private static boolean isKerberosEnabled(Configuration conf) { return "kerberos".equals(conf.get("hadoop.security.authentication")); } }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/api/AssemblyToMojoPipelineExportHandler.java
package hex.api; import hex.mojopipeline.H2OAssemblyToMojoPipelineConverter; import hex.mojopipeline.ProtobufPipelineWriter; import mojo.spec.PipelineOuterClass; import water.DKV; import water.api.Handler; import water.api.StreamingSchema; import water.api.schemas99.AssemblyV99; import water.rapids.Assembly; public class AssemblyToMojoPipelineExportHandler extends Handler { @SuppressWarnings("unused") public StreamingSchema fetchMojoPipeline(final int version, final AssemblyV99 ass) { Assembly assembly = DKV.getGet(ass.assembly_id); if (assembly == null) { throw new IllegalArgumentException("Assembly doesn't exist in DKV. It must be fitted first."); } PipelineOuterClass.Pipeline pipeline = H2OAssemblyToMojoPipelineConverter.convertToProtoBufPipeline(assembly); return new StreamingSchema(new ProtobufPipelineWriter(pipeline), ass.file_name + ".mojo"); } }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/api/MojoPipelineApiRegister.java
package hex.api; import water.api.AbstractRegister; import water.api.RestApiContext; public class MojoPipelineApiRegister extends AbstractRegister { @Override public void registerEndPoints(RestApiContext context) { context.registerEndpoint( "_assembly_fetch_mojo_pipeline", "GET /99/Assembly.fetch_mojo_pipeline/{assembly_id}/{file_name}", AssemblyToMojoPipelineExportHandler.class, "fetchMojoPipeline", "Generate a MOJO 2 pipeline artifact from the Assembly"); } @Override public String getName() { return "Mojo 2 pipeline extensions"; } }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/mojopipeline/H2OAssemblyToMojoPipelineConverter.java
package hex.mojopipeline; import hex.genmodel.mojopipeline.transformers.*; import mojo.spec.Custom; import mojo.spec.PipelineOuterClass; import water.fvec.ByteVec; import water.fvec.NFSFileVec; import water.parser.ParseTime; import water.rapids.Assembly; import water.rapids.ast.AstParameter; import water.rapids.ast.params.AstId; import water.rapids.ast.params.AstNum; import water.rapids.ast.params.AstStr; import water.rapids.ast.params.AstStrList; import water.rapids.transforms.*; import mojo.spec.PipelineOuterClass.Pipeline; import mojo.spec.PipelineOuterClass.Transformation; import mojo.spec.PipelineOuterClass.Frame; import mojo.spec.ColumnOuterClass.Column; import mojo.spec.ColumnOuterClass; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.Map; public class H2OAssemblyToMojoPipelineConverter { public static Pipeline convertToProtoBufPipeline(Assembly assembly) { final Transform[] stages = assembly.steps(); final Transform firstStage = stages[0]; final Transform lastStage = stages[stages.length - 1]; Pipeline.Builder pipelineBuilder = Pipeline.newBuilder(); Column[] inputColumns = convertColumns(firstStage.getInputNames(), firstStage.getInputTypes()); pipelineBuilder.setFeatures(frame(inputColumns)); Frame.Builder interimsFrameBuilder = Frame.newBuilder(); InplaceOperationSimulator inplaceOperationSimulator = new InplaceOperationSimulator(); for (Transform stage : stages) { Transformation transformation = convertStage(stage, inplaceOperationSimulator); pipelineBuilder.addTransformations(transformation); if (!stage.isInPlace() &&stage.getNewNames().length > 0) { Column[] tempColumns = convertColumns(stage.getNewNames(), stage.getNewTypes()); interimsFrameBuilder.addAllColumns(Arrays.asList(tempColumns)); } } Column[] replacementColumns = convertColumns( inplaceOperationSimulator.getReplacementColumnNames(), inplaceOperationSimulator.getReplacementColumnTypes()); interimsFrameBuilder.addAllColumns(Arrays.asList(replacementColumns)); pipelineBuilder.setInterims(interimsFrameBuilder); setOutputColumns(pipelineBuilder, lastStage, inplaceOperationSimulator); Pipeline pipeline = pipelineBuilder.build(); return pipeline; } public static MojoPipeline convert(Assembly assembly) throws IOException { Pipeline pipeline = convertToProtoBufPipeline(assembly); File tempFile = File.createTempFile("Pipeline", ".mojo"); tempFile.deleteOnExit(); ProtobufPipelineWriter writer = new ProtobufPipelineWriter(pipeline); try (FileOutputStream outputStream = new FileOutputStream(tempFile)) { writer.writeTo(outputStream); } ByteVec mojoData = NFSFileVec.make(tempFile); return new MojoPipeline(mojoData); } private static Column convertColumn(String name, String type) { Column.Builder builder = Column.newBuilder(); builder = builder.setName(name); if (type.equals("Numeric")) { builder.setFloat64Type(ColumnOuterClass.Float64Type.newBuilder().build()); } else { builder.setStrType(ColumnOuterClass.StrType.newBuilder().build()); } return builder.build(); } private static Column[] convertColumns(String[] names, String[] types) { if (names.length != types.length) { throw new IllegalArgumentException( String.format("The length of names and types must be the same, " + "but length of names is %d and length of types is %d.", names.length, types.length)); } Column[] result = new Column[names.length]; for (int i = 0; i < result.length; i++) { result[i] = convertColumn(names[i], types[i]); } return result; } private static Transformation convertStage(Transform stage, InplaceOperationSimulator inplaceOperationSimulator){ if (stage instanceof H2OColSelect) { return convertColSelect((H2OColSelect)stage, inplaceOperationSimulator); } else if (stage instanceof H2OBinaryOp) { return convertBinaryOp((H2OBinaryOp)stage, inplaceOperationSimulator); } else if (stage instanceof H2OColOp) { return convertColOp((H2OColOp)stage, inplaceOperationSimulator); } else { throw new UnsupportedOperationException( String.format("Stage conversion of type %s is not supported yet.", stage.getClass().getName())); } } private static Transformation convertColSelect(H2OColSelect stage, InplaceOperationSimulator inplaceOperationSimulator){ Transformation.Builder builder = Transformation.newBuilder(); builder.setIdentityOp(PipelineOuterClass.IdentityOp.newBuilder()); for (String outputColumn : stage.getOutputNames()) { String updatedColumn = inplaceOperationSimulator.updateColumn(outputColumn); builder.addInputs(updatedColumn); builder.addOutputs(updatedColumn); } return builder.build(); } private static void setOutputColumns( Pipeline.Builder pipelineBuilder, Transform lastStage, InplaceOperationSimulator inplaceOperationSimulator) { Transformation.Builder builder = Transformation.newBuilder(); builder.setIdentityOp(PipelineOuterClass.IdentityOp.newBuilder()); for (String outputColumn : lastStage.getOutputNames()) { String inputColumn = inplaceOperationSimulator.updateColumn(outputColumn); builder.addInputs(inputColumn); builder.addOutputs("assembly_" + outputColumn); } Transformation extraIdentity = builder.build(); pipelineBuilder.addTransformations(extraIdentity); Column[] outputColumns = convertColumns( extraIdentity.getOutputsList().toArray(new String[0]), lastStage.getOutputTypes()); pipelineBuilder.setOutputs(frame(outputColumns)); } private static Transformation convertColOp(H2OColOp stage, InplaceOperationSimulator inplaceOperationSimulator){ Transformation.Builder builder = Transformation.newBuilder(); String functionName = stage.getAst()._asts[0].str(); Custom.CustomParam functionParam = Custom.CustomParam.newBuilder() .setName("function") .setStringParam(functionName) .build(); Custom.CustomParam timezoneParam = Custom.CustomParam.newBuilder() .setName("timezone") .setStringParam(ParseTime.getTimezone().getID()) .build(); Custom.CustomOp.Builder customOpBuilder = Custom.CustomOp.newBuilder(); customOpBuilder.addParams(functionParam); customOpBuilder.addParams(timezoneParam); convertParameters(stage, customOpBuilder); if (MathUnaryTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(MathUnaryTransform.Factory.TRANSFORMER_ID); } else if (StringUnaryTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(StringUnaryTransform.Factory.TRANSFORMER_ID); } else if (StringPropertiesUnaryTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(StringPropertiesUnaryTransform.Factory.TRANSFORMER_ID); } else if (StringGrepTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(StringGrepTransform.Factory.TRANSFORMER_ID); } else if (StringSplitTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(StringSplitTransform.Factory.TRANSFORMER_ID); } else if (TimeUnaryTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(TimeUnaryTransform.Factory.TRANSFORMER_ID); } else if (ToStringConversion.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(ToStringConversion.Factory.TRANSFORMER_ID); } else if (ToNumericConversion.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(ToNumericConversion.Factory.TRANSFORMER_ID); } else { throw new UnsupportedOperationException( String.format("The function '%s' in the stage '%s' is not supported.", functionName, stage.name())); } builder.setCustomOp(customOpBuilder.build()); for (String inputColumn : stage.getOldNames()) { String updatedColumn = inplaceOperationSimulator.updateColumn(inputColumn); builder.addInputs(updatedColumn); } if (stage.isInPlace()) { String[] oldNames = stage.getOldNames(); for (int i = 0; i < oldNames.length; i++) { String oldName = oldNames[i]; String newName = "temp_" + oldName + "_" + stage.name(); inplaceOperationSimulator.setNewReplacement(oldName, newName, stage.getNewTypes()[i]); builder.addOutputs(newName); } } else { for (String outputColumn : stage.getNewNames()) { builder.addOutputs(outputColumn); } } return builder.build(); } private static void convertParameters(H2OColOp stage, Custom.CustomOp.Builder builder) { for (Map.Entry<String,AstParameter> entry: stage.getParams().entrySet()) { String name = entry.getKey(); AstParameter value = entry.getValue(); Custom.CustomParam.Builder paramBuilder = Custom.CustomParam.newBuilder().setName(name); if (value instanceof AstNum) { AstNum parameter = (AstNum) value; paramBuilder.setFloat64Param(parameter.getNum()); } else if (value instanceof AstStr) { AstStr parameter = (AstStr) value; paramBuilder.setStringParam(parameter.getStr()); } else if (value instanceof AstStrList) { AstStrList parameter = (AstStrList) value; String joined = String.join("`````", parameter._strs); paramBuilder.setStringParam(joined); } else if (value instanceof AstId) { AstId parameter = (AstId) value; paramBuilder.setStringParam(parameter.str()); } builder.addParams(paramBuilder.build()); } } private static Transformation convertBinaryOp(H2OBinaryOp stage, InplaceOperationSimulator inplaceOperationSimulator){ Transformation.Builder builder = Transformation.newBuilder(); String functionName = stage.getAst()._asts[0].str(); Custom.CustomOp.Builder customOpBuilder = Custom.CustomOp.newBuilder(); customOpBuilder.addParams( Custom.CustomParam.newBuilder() .setName("function") .setStringParam(functionName) .build()); customOpBuilder.addParams( Custom.CustomParam.newBuilder() .setName("isLeftCol") .setBoolParam(stage.getIsLeftColumn()) .build()); customOpBuilder.addParams( Custom.CustomParam.newBuilder() .setName("isRightCol") .setBoolParam(stage.getIsRightColumn()) .build()); if(!stage.getIsLeftColumn()) { customOpBuilder.addParams( Custom.CustomParam.newBuilder() .setName("constValue") .setFloat64Param(stage.getAst()._asts[1].exec(null).getNum()) .build()); } if(!stage.getIsRightColumn()) { customOpBuilder.addParams( Custom.CustomParam.newBuilder() .setName("constValue") .setFloat64Param(stage.getAst()._asts[2].exec(null).getNum()) .build()); } convertParameters(stage, customOpBuilder); if (MathBinaryTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(MathBinaryTransform.Factory.TRANSFORMER_ID); } else if (StringPropertiesBinaryTransform.Factory.functionExists(functionName)) { customOpBuilder.setTransformerName(StringPropertiesBinaryTransform.Factory.TRANSFORMER_ID); } else { throw new UnsupportedOperationException( String.format("The function '%s' in the stage '%s' is not supported.", functionName, stage.name())); } builder.setCustomOp(customOpBuilder.build()); for (String inputColumn : stage.getOldNames()) { String updatedColumn = inplaceOperationSimulator.updateColumn(inputColumn); builder.addInputs(updatedColumn); } if (stage.isInPlace()) { String[] oldNames = stage.getOldNames(); String oldName = oldNames[0]; String newName = "temp_" + oldName + "_" + stage.name(); inplaceOperationSimulator.setNewReplacement(oldName, newName, stage.getNewTypes()[0]); builder.addOutputs(newName); } else { for (String outputColumn : stage.getNewNames()) { builder.addOutputs(outputColumn); } } return builder.build(); } private static Frame frame(Column[] cols) { return Frame.newBuilder().addAllColumns(Arrays.asList(cols)).build(); } }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/mojopipeline/InplaceOperationSimulator.java
package hex.mojopipeline; import java.util.ArrayList; import java.util.HashMap; import java.util.List; // MOJO2 doesn't support inplace conversions. The inplace function will be simulated via additional temporary columns public class InplaceOperationSimulator { HashMap<String, String> _originalToReplacementMapping = new HashMap<>(); List<String> _replacementNames = new ArrayList<>(); List<String> _replacementTypes = new ArrayList<>(); public String updateColumn(String column) { String tempColumn = _originalToReplacementMapping.get(column); if (tempColumn == null) { return column; } else { return tempColumn; } } public void setNewReplacement(String originalName, String replacementName, String replacementType) { _replacementNames.add(replacementName); _replacementTypes.add(replacementType); _originalToReplacementMapping.put(originalName, replacementName); } public String[] getReplacementColumnNames() { return _replacementNames.toArray(new String[0]);} public String[] getReplacementColumnTypes() { return _replacementTypes.toArray(new String[0]);} }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/mojopipeline/MojoPipeline.java
package hex.mojopipeline; import ai.h2o.mojos.runtime.api.MojoPipelineService; import ai.h2o.mojos.runtime.api.PipelineLoader; import ai.h2o.mojos.runtime.api.PipelineLoaderFactory; import ai.h2o.mojos.runtime.api.backend.MemoryReaderBackend; import ai.h2o.mojos.runtime.api.backend.ReaderBackend; import ai.h2o.mojos.runtime.frame.*; import ai.h2o.mojos.runtime.lic.LicenseException; import ai.h2o.mojos.runtime.frame.MojoColumn.Type; import mojo.spec.PipelineOuterClass; import water.*; import water.fvec.*; import water.parser.BufferedString; import java.io.*; import java.sql.Date; import java.sql.Timestamp; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Arrays; public class MojoPipeline extends Iced<MojoPipeline> { private ByteVec _mojoData; private transient MojoPipelineMeta _mojoPipelineMeta; public MojoPipeline(ByteVec mojoData) { _mojoData = mojoData; _mojoPipelineMeta = readPipelineMeta(_mojoData); } public Frame transform(Frame f, boolean allowTimestamps) { Frame adaptedFrame = adaptFrame(f, allowTimestamps); byte[] types = outputTypes(); return new MojoPipelineTransformer(_mojoData._key).doAll(types, adaptedFrame) .outputFrame(null, _mojoPipelineMeta.outputFrameMeta.getColumnNames(), null); } private byte[] outputTypes() { MojoFrameMeta outputMeta = _mojoPipelineMeta.outputFrameMeta; byte[] types = new byte[outputMeta.size()]; int i = 0; for (Type type : outputMeta.getColumnTypes()) { types[i++] = type.isnumeric || type == Type.Bool ? Vec.T_NUM : Vec.T_STR; } return types; } private Frame adaptFrame(Frame f, boolean allowTimestamps) { return adaptFrame(f, _mojoPipelineMeta.inputFrameMeta, allowTimestamps); } private static Frame adaptFrame(Frame f, MojoFrameMeta inputMeta, boolean allowTimestamps) { Frame adaptedFrame = new Frame(); for (int colIdx = 0; colIdx < inputMeta.size(); colIdx++) { String colName = inputMeta.getColumnName(colIdx); Vec v = f.vec(colName); if (v == null) { throw new IllegalArgumentException("Input frame is missing a column: " + colName); } if (v.get_type() == Vec.T_BAD || v.get_type() == Vec.T_UUID) { throw new UnsupportedOperationException("Columns of type " + v.get_type_str() + " are currently not supported."); } if (! allowTimestamps && v.get_type() == Vec.T_TIME && inputMeta.getColumnType(colName) == Type.Str) { throw new IllegalArgumentException("MOJO Pipelines currently do not support datetime columns represented as timestamps. " + "Please parse your dataset again and make sure column '" + colName + "' is parsed as String instead of Timestamp. " + "You can also enable implicit timestamp conversion in your client. Please refer to documentation of the transform function."); } adaptedFrame.add(colName, v); } return adaptedFrame; } private static ai.h2o.mojos.runtime.MojoPipeline readPipeline(ByteVec mojoData) { try { try (InputStream input = mojoData.openStream(null); ReaderBackend reader = MemoryReaderBackend.fromZipStream(input)) { return MojoPipelineService.loadPipeline(reader); } } catch (IOException | LicenseException e) { throw new RuntimeException(e); } } private static MojoPipelineMeta readPipelineMeta(ByteVec mojoData) { try { try (InputStream input = mojoData.openStream(null); ReaderBackend reader = MemoryReaderBackend.fromZipStream(input)) { final PipelineLoaderFactory factory = MojoPipelineService.INSTANCE.get(reader); final PipelineLoader loader = factory.createLoader(reader, null); return new MojoPipelineMeta(loader.getInput(), loader.getOutput()); } } catch (IOException | LicenseException e) { throw new RuntimeException(e); } } private static class MojoPipelineMeta { final MojoFrameMeta inputFrameMeta; final MojoFrameMeta outputFrameMeta; private MojoPipelineMeta(MojoFrameMeta inputFrameMeta, MojoFrameMeta outputFrameMeta) { this.inputFrameMeta = inputFrameMeta; this.outputFrameMeta = outputFrameMeta; } } private static class MojoPipelineTransformer extends MRTask<MojoPipelineTransformer> { private final Key<Vec> _mojoDataKey; private transient ai.h2o.mojos.runtime.MojoPipeline _pipeline; private MojoPipelineTransformer(Key<Vec> mojoDataKey) { _mojoDataKey = mojoDataKey; } @Override protected void setupLocal() { ByteVec mojoData = DKV.getGet(_mojoDataKey); _pipeline = readPipeline(mojoData); } @Override public void map(Chunk[] cs, NewChunk[] ncs) { assert cs.length == _pipeline.getInputMeta().size(); MojoFrameBuilder frameBuilder = _pipeline.getInputFrameBuilder(); MojoRowBuilder rowBuilder = frameBuilder.getMojoRowBuilder(); MojoChunkConverter[] conv = new MojoChunkConverter[cs.length]; MojoFrameMeta meta = _pipeline.getInputMeta(); for (int col = 0; col < cs.length; col++) { final Type type = meta.getColumnType(_fr.name(col)); final int idx = meta.getColumnIndex(_fr.name(col)); conv[col] = makeConverter(cs[col], idx, type); } // Convert chunks to a MojoFrame for (int i = 0; i < cs[0]._len; i++) { for (int col = 0; col < cs.length; col++) { Chunk c = cs[col]; if (! c.isNA(i)) { conv[col].convertValue(i, rowBuilder); } } frameBuilder.addRow(rowBuilder); } MojoFrame input = frameBuilder.toMojoFrame(); // Transform whole chunk at once MojoFrame transformed = _pipeline.transform(input); // Write to NewChunks for (int col = 0; col < ncs.length; col++) { NewChunk nc = ncs[col]; MojoColumn column = transformed.getColumn(col); assert column.size() == cs[0].len(); switch (column.getType()) { case Str: for (String s : (String[]) column.getData()) { nc.addStr(s); } break; case Bool: for (byte d : (byte[]) column.getData()) { nc.addNum(d, 0); } break; case Int32: for (int d : (int[]) column.getData()) { nc.addNum(d, 0); } break; case Int64: for (long d : (long[]) column.getData()) { nc.addNum(d, 0); } break; case Float32: for (float d : (float[]) column.getData()) { nc.addNum(d); } break; case Float64: for (double d : (double[]) column.getData()) { nc.addNum(d); } break; default: throw new UnsupportedOperationException("Output type " + column.getType() + " is currently not supported for MOJO2. See https://github.com/h2oai/h2o-3/issues/7898"); } } } } private static MojoChunkConverter makeConverter(Chunk c, int col, Type type) { switch (c.vec().get_type()) { case Vec.T_NUM: if (type == Type.Str) return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { // This is best effort - we might convert the double to an incorrect format (example: 1000 vs 1e3) final double val = _c.atd(i); target.setString(_col, String.valueOf(val)); } }; else if (type == Type.Bool) return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { final long val = _c.at8(i); target.setBool(_col, val == 1L); } }; else if (type.isfloat) return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { target.setDouble(_col, _c.atd(i)); } }; else if (type == Type.Int32) return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { target.setInt(_col, (int)_c.at8(i)); } }; else return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { target.setLong(_col, _c.at8(i)); } }; case Vec.T_CAT: return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { target.setValue(_col, _c.vec().domain()[(int) _c.at8(i)]); } }; case Vec.T_STR: if (type == Type.Str) return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { target.setString(_col, _c.atStr(new BufferedString(), i).toString()); } }; else return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { target.setValue(_col, _c.atStr(new BufferedString(), i).toString()); } }; case Vec.T_TIME: if (type == Type.Time64) return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { final long timestamp = _c.at8(i); target.setTimestamp(_col, new Timestamp(timestamp)); } }; else { final DateFormat dateFormatter = dateFormatter(); return new MojoChunkConverter(c, col) { @Override void convertValue(int i, MojoRowBuilder target) { final long timestamp = _c.at8(i); target.setValue(_col, dateFormatter.format(new Date(timestamp))); // Not ideal, would be better to pass directly } }; } default: throw new IllegalStateException("Unexpected column type: " + c.vec().get_type_str()); } } private static DateFormat dateFormatter() { return new SimpleDateFormat("MM/dd/yyyy'T'hh:mm:ss.sss"); } private static abstract class MojoChunkConverter { final int _col; final Chunk _c; private MojoChunkConverter(Chunk c, int col) { _c = c; _col = col; } abstract void convertValue(int i, MojoRowBuilder target); } }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/mojopipeline/MojoPipelineExtension.java
package hex.mojopipeline; import ai.h2o.mojos.runtime.api.PipelineLoaderFactory; import water.AbstractH2OExtension; import water.util.Log; import java.util.ServiceLoader; public class MojoPipelineExtension extends AbstractH2OExtension { private static String NAME = "MojoPipeline"; private boolean _initialized; private boolean _enabled; @Override public String getExtensionName() { return NAME; } @Override public void onLocalNodeStarted() { } @Override public boolean isEnabled() { if (! _initialized) { initialize(); } return _enabled; } private synchronized void initialize() { _enabled = hasMojoRuntime(); _initialized = true; if (! _enabled) { Log.debug("MOJO Runtime not found"); } } private boolean hasMojoRuntime() { // relying on implementation - need to improve MOJO2 API try { Class clazz = Class.forName("ai.h2o.mojos.runtime.api.PipelineLoaderFactory"); return ServiceLoader.load(clazz).iterator().hasNext(); } catch (ClassNotFoundException e) { return false; } } }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/mojopipeline/ProtobufPipelineWriter.java
package hex.mojopipeline; import mojo.spec.PipelineOuterClass; import water.api.StreamWriteOption; import water.api.StreamWriter; import java.io.IOException; import java.io.OutputStream; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; public class ProtobufPipelineWriter implements StreamWriter { PipelineOuterClass.Pipeline _pipeline; public ProtobufPipelineWriter(PipelineOuterClass.Pipeline pipeline) { _pipeline = pipeline; } @Override public void writeTo(OutputStream os, StreamWriteOption... options) { ZipOutputStream zos = new ZipOutputStream(os); try { zos.putNextEntry(new ZipEntry("mojo/")); zos.putNextEntry(new ZipEntry("mojo/pipeline.pb")); _pipeline.writeTo(zos); zos.closeEntry(); zos.close(); } catch (IOException e) { throw new RuntimeException(e); } } }
0
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/mojopipeline
java-sources/ai/h2o/h2o-ext-mojo-pipeline/3.46.0.7/hex/mojopipeline/rapids/AstPipelineTransform.java
package hex.mojopipeline.rapids; import water.DKV; import water.fvec.ByteVec; import water.fvec.Frame; import water.rapids.Env; import water.rapids.Val; import water.rapids.ast.AstPrimitive; import water.rapids.ast.AstRoot; import water.rapids.vals.ValFrame; import hex.mojopipeline.MojoPipeline; public class AstPipelineTransform extends AstPrimitive { @Override public String[] args() { return new String[]{"pipeline", "frame"}; } @Override public int nargs() { return 1 + 3; } // (mojo.pipeline.transform pipeline frame allowTimestamps) @Override public String str() { return "mojo.pipeline.transform"; } @Override public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) { Val mojoDataVal = stk.track(asts[1].exec(env)); Frame mojoDataFrame = mojoDataVal.isFrame() ? mojoDataVal.getFrame() : (Frame) DKV.getGet(mojoDataVal.getStr()); Frame frame = stk.track(asts[2].exec(env)).getFrame(); boolean allowTimestamps = stk.track(asts[3].exec(env)).getNum() != 0; ByteVec mojoData = (ByteVec) mojoDataFrame.anyVec(); Frame transformed = new MojoPipeline(mojoData).transform(frame, allowTimestamps); return new ValFrame(transformed); } }
0
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex/steam/SteamHelloMessenger.java
package hex.steam; import org.apache.log4j.Logger; import water.H2O; import java.io.IOException; import java.util.HashMap; import java.util.Map; public class SteamHelloMessenger implements SteamMessenger { Logger LOG = Logger.getLogger(SteamHelloMessenger.class); private SteamMessageSender sender; @Override public void onConnectionStateChange(SteamMessageSender sender) { this.sender = sender; } @Override public void onMessage(Map<String, String> message) { if ("hello".equals(message.get(TYPE))) { assert sender != null : "Received message but sender is null"; Map<String, String> response = new HashMap<>(); response.put(TYPE, "hello_response"); response.put(ID, message.get(ID) + "_response"); response.put("version", H2O.ABV.projectVersion()); response.put("branch", H2O.ABV.branchName()); response.put("hash", H2O.ABV.lastCommitHash()); response.put("cloud_size", String.valueOf(H2O.CLOUD.size())); try { sender.sendMessage(response); } catch (IOException e) { LOG.error("Failed to send response to hello.", e); } } } }
0
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex/steam/SteamMessageExchange.java
package hex.steam; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; import org.apache.log4j.Logger; import java.io.IOException; import java.lang.reflect.Type; import java.util.*; import static hex.steam.SteamMessenger.TYPE; public class SteamMessageExchange implements SteamMessageSender { private static final Logger LOG = Logger.getLogger(SteamMessageExchange.class); private static final Type MAP_TYPE = new TypeToken<Map<String, String>>() {}.getType(); private final Gson gson = new Gson(); private final SteamWebsocketServlet servlet; private final List<SteamMessenger> messengers; SteamMessageExchange(SteamWebsocketServlet servlet) { this.servlet = servlet; List<SteamMessenger> res = new ArrayList<>(); for (SteamMessenger l : ServiceLoader.load(SteamMessenger.class)) { res.add(l); } messengers = Collections.unmodifiableList(res); } public void onConnected() { for (SteamMessenger listener : messengers) { try { listener.onConnectionStateChange(this); } catch (Exception e) { LOG.error("Error while onConnected event.", e); } } } public void onConnectionClosed() { for (SteamMessenger listener : messengers) { try { listener.onConnectionStateChange(null); } catch (Exception e) { LOG.error("Error while onConnectionClosed event.", e); } } } public void distributeMessage(String message) { Map<String, String> parsedMessage = Collections.unmodifiableMap(gson.fromJson(message, MAP_TYPE)); for (SteamMessenger listener : messengers) { if (parsedMessage.get(TYPE) != null) { try { listener.onMessage(parsedMessage); } catch (Exception e) { LOG.error("Error while processing steam message.", e); } } else { LOG.error("Received message from steam without type: " + message); } } } @Override public void sendMessage(Map<String, String> message) throws IOException { servlet.sendMessage(gson.toJson(message)); } }
0
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex/steam/SteamMessageSender.java
package hex.steam; import java.io.IOException; import java.util.Map; /** * Send messages from H2O to Steam */ public interface SteamMessageSender { /** * Delivers message to Steam * * @param message message to be sent * @throws IOException when sending of the message fails for some reason */ void sendMessage(Map<String, String> message) throws IOException; }
0
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex/steam/SteamMessenger.java
package hex.steam; import java.util.Map; /** * Receives messages from Steam and is notified of connection state changes. */ public interface SteamMessenger { String ID = "_id"; String TYPE = "_type"; /** * Called by the servlet when connection state has changed. * * @param sender when connected it is set to a SteamMessageSender object which this messenger can use later to * send messages to steam. Once disconnected this method will be called with null to notify the * messenger that steam is not currently connected. */ void onConnectionStateChange(SteamMessageSender sender); /** * Called for every full message received from steam. All messages are passed to all listeners. * * @param message Parsed message from Steam. */ void onMessage(Map<String, String> message); }
0
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex/steam/SteamWebsocketServlet.java
package hex.steam; import water.webserver.iface.WebsocketConnection; import water.webserver.iface.WebsocketHandler; import water.webserver.iface.H2OWebsocketServlet; import java.io.IOException; public class SteamWebsocketServlet implements H2OWebsocketServlet { private static final Object CONNECTION_LOCK = new Object(); private WebsocketConnection currentConnection; private final SteamMessageExchange exchange; public SteamWebsocketServlet() { exchange = new SteamMessageExchange(this); } private class Handler implements WebsocketHandler { @Override public void onClose(WebsocketConnection connection) { synchronized (CONNECTION_LOCK) { if (currentConnection == connection) { currentConnection = null; SteamWebsocketServlet.this.exchange.onConnectionClosed(); } } } @Override public void onMessage(String message) { SteamWebsocketServlet.this.exchange.distributeMessage(message); } } @Override public WebsocketHandler onConnect(WebsocketConnection conn) { synchronized (CONNECTION_LOCK) { currentConnection = conn; SteamWebsocketServlet.this.exchange.onConnected(); } return new Handler(); } public void sendMessage(String message) throws IOException { WebsocketConnection conn; synchronized (CONNECTION_LOCK) { conn = currentConnection; } if (conn == null) { throw new IOException("Connection currently not available"); } else { conn.sendMessage(message); } } }
0
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex
java-sources/ai/h2o/h2o-ext-steam/3.46.0.7/hex/steam/SteamWebsocketServletProvider.java
package hex.steam; import water.server.ServletMeta; import water.server.ServletProvider; import water.server.WebsocketMeta; import java.util.Arrays; import java.util.Collections; import java.util.List; public class SteamWebsocketServletProvider implements ServletProvider { private static final List<WebsocketMeta> SERVLETS = Collections.unmodifiableList(Arrays.asList( new WebsocketMeta("/3/Steam.websocket", SteamWebsocketServlet.class) )); @Override public List<ServletMeta> servlets() { return Collections.emptyList(); } @Override public List<WebsocketMeta> websockets() { return SERVLETS; } @Override public int priority() { return 0; } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/BlendingParams.java
package ai.h2o.targetencoding; import water.Iced; public class BlendingParams extends Iced<BlendingParams> { private double _inflectionPoint; private double _smoothing; public BlendingParams(double inflectionPoint, double smoothing) { _inflectionPoint = inflectionPoint; _smoothing = smoothing; } /** * The inflection point of the sigmoid used as a shrinking factor. * For a given category, if the sample size is greater than this value, then the posterior weight is greater than the prior. * @return the inflection point of the sigmoid function. */ public double getInflectionPoint() { return _inflectionPoint; } /** * The smoothing factor, i.e. the inverse of the slope of the sigmoid at the inflection point. * When it tends to 0, the sigmoid becomes a step function. * @return the smoothing factor of the sigmoid function. */ public double getSmoothing() { return _smoothing; } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/ColumnsMapping.java
package ai.h2o.targetencoding; import water.Iced; public class ColumnsMapping extends Iced { private String[] _from; private String[] _to; public ColumnsMapping(String[] from, String[] to) { _from = from; _to = to; } public String[] from() { return _from; } public String[] to() { return _to; } } class ColumnsToSingleMapping extends ColumnsMapping { private String[] _toDomain; public ColumnsToSingleMapping(String[] from, String to, String[] toDomain) { super(from, new String[]{to}); _toDomain = toDomain; } public String toSingle() { return to()[0]; } public String[] toDomain() { return _toDomain; } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/TargetEncoder.java
package ai.h2o.targetencoding; import ai.h2o.targetencoding.TargetEncoderModel.DataLeakageHandlingStrategy; import ai.h2o.targetencoding.TargetEncoderModel.TargetEncoderOutput; import ai.h2o.targetencoding.TargetEncoderModel.TargetEncoderParameters; import ai.h2o.targetencoding.interaction.InteractionSupport; import hex.ModelBuilder; import hex.ModelCategory; import org.apache.log4j.Logger; import water.DKV; import water.Key; import water.Scope; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.Frame; import water.fvec.Vec; import water.util.IcedHashMap; import java.util.*; import static ai.h2o.targetencoding.TargetEncoderHelper.*; import static ai.h2o.targetencoding.TargetEncoderModel.NA_POSTFIX; public class TargetEncoder extends ModelBuilder<TargetEncoderModel, TargetEncoderParameters, TargetEncoderOutput> { private static final Logger LOG = Logger.getLogger(TargetEncoder.class); private TargetEncoderModel _targetEncoderModel; private String[][] _columnsToEncode; public TargetEncoder(TargetEncoderParameters parms) { super(parms); init(false); } public TargetEncoder(TargetEncoderParameters parms, Key<TargetEncoderModel> key) { super(parms, key); init(false); } public TargetEncoder(final boolean startupOnce) { super(new TargetEncoderParameters(), startupOnce); } @Override public void init(boolean expensive) { disableIgnoreConstColsFeature(expensive); ignoreUnusedColumns(expensive); super.init(expensive); assert _parms._nfolds == 0 : "nfolds usage forbidden in TargetEncoder"; if (expensive) { if (_parms._data_leakage_handling == null) _parms._data_leakage_handling = DataLeakageHandlingStrategy.None; if (_parms._data_leakage_handling == DataLeakageHandlingStrategy.KFold && _parms._fold_column == null) error("_fold_column", "Fold column is required when using KFold leakage handling strategy."); final Frame train = train(); _columnsToEncode = _parms._columns_to_encode; if (_columnsToEncode == null) { // detects columns that can be encoded final List<String> nonPredictors = Arrays.asList(_parms.getNonPredictors()); final List<String[]> columnsToEncode = new ArrayList<>(train.numCols()); for (int i = 0; i < train.numCols(); i++) { String colName = train.name(i); if (nonPredictors.contains(colName)) continue; if (!train.vec(i).isCategorical()) { warn("_train", "Column `" + colName + "` is not categorical and will therefore be ignored by target encoder."); continue; } columnsToEncode.add(new String[] {colName}); } _columnsToEncode = columnsToEncode.toArray(new String[0][]); } else { // validates column groups (which can be single columns) Set<String> validated = new HashSet<>(); for (String[] colGroup: _columnsToEncode) { if (colGroup.length != new HashSet<>(Arrays.asList(colGroup)).size()) { error("_columns_to_encode", "Columns interaction "+Arrays.toString(colGroup)+" contains duplicate columns."); } for (String col: colGroup) { if (!validated.contains(col)) { Vec vec = train.vec(col); if (vec == null) error("_columns_to_encode", "Column `"+col+"` from interaction "+Arrays.toString(colGroup)+" is not categorical or is missing from the training frame."); else if (!vec.isCategorical()) error("_columns_to_encode", "Column `"+col+"` from interaction "+Arrays.toString(colGroup)+" must first be converted into categorical to be used by target encoder."); validated.add(col); } } } } } } private void disableIgnoreConstColsFeature(boolean expensive) { _parms._ignore_const_cols = false; if (expensive && LOG.isInfoEnabled()) LOG.info("We don't want to ignore any columns during target encoding transformation " + "therefore `_ignore_const_cols` parameter was set to `false`"); } /** * autosets _ignored_columns when using _columns_to_encode param. * This ensures consistency when using the second param, * otherwise the metadata saved in the model (_domains, _names...) can be different, * and the score/predict result frame is also adapted differently. * @param expensive */ private void ignoreUnusedColumns(boolean expensive) { if (!expensive || _parms._columns_to_encode == null || _parms.train() == null) return; Set<String> usedColumns = new HashSet<>(Arrays.asList(_parms.getNonPredictors())); for (String[] colGroup: _parms._columns_to_encode) usedColumns.addAll(Arrays.asList(colGroup)); Set<String> unusedColumns = new HashSet<>(Arrays.asList(_parms.train()._names)); unusedColumns.removeAll(usedColumns); Set<String> ignoredColumns = _parms._ignored_columns == null ? new HashSet<>() : new HashSet<>(Arrays.asList(_parms._ignored_columns)); // ensures consistency when _ignored_columns is provided, `init` will then validate that columns listed in `_columns_to_encode` were not ignored. unusedColumns.addAll(ignoredColumns); _parms._ignored_columns = unusedColumns.toArray(new String[0]); } private class TargetEncoderDriver extends Driver { @Override public void computeImpl() { _targetEncoderModel = null; try { init(true); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(TargetEncoder.this); TargetEncoderOutput output = new TargetEncoderOutput(TargetEncoder.this); TargetEncoderModel model = new TargetEncoderModel(dest(), _parms, output); _targetEncoderModel = model.delete_and_lock(_job); // and clear & write-lock it (smashing any prior) Frame workingFrame = new Frame(train()); ColumnsToSingleMapping[] columnsToEncodeMapping = new ColumnsToSingleMapping[_columnsToEncode.length]; for (int i=0; i < columnsToEncodeMapping.length; i++) { String[] colGroup = _columnsToEncode[i]; int interactionCol = InteractionSupport.addFeatureInteraction(workingFrame, colGroup); String[] interactionDomain = workingFrame.vec(interactionCol).domain(); columnsToEncodeMapping[i] = new ColumnsToSingleMapping(colGroup, workingFrame.name(interactionCol), interactionDomain); } String[] singleColumnsToEncode = Arrays.stream(columnsToEncodeMapping).map(ColumnsToSingleMapping::toSingle).toArray(String[]::new); IcedHashMap<String, Frame> _targetEncodingMap = prepareEncodingMap(workingFrame, singleColumnsToEncode); for (Map.Entry<String, Frame> entry : _targetEncodingMap.entrySet()) { Frame encodings = entry.getValue(); Scope.untrack(encodings); } output.init(_targetEncodingMap, columnsToEncodeMapping); _job.update(1); } catch (Exception e) { if (_targetEncoderModel != null) { Scope.track_generic(_targetEncoderModel); } throw e; } finally { if (_targetEncoderModel != null) { _targetEncoderModel.update(_job); _targetEncoderModel.unlock(_job); } } } //TODO We might want to introduce parameter that will change this behaviour. We can treat NA's as extra class. private Frame filterOutNAsFromTargetColumn(Frame data, int targetColumnIndex) { return filterOutNAsInColumn(data, targetColumnIndex); } private IcedHashMap<String, Frame> prepareEncodingMap(Frame fr, String[] columnsToEncode) { Frame workingFrame = null; try { int targetIdx = fr.find(_parms._response_column); int foldColIdx = _parms._fold_column == null ? -1 : fr.find(_parms._fold_column); workingFrame = filterOutNAsFromTargetColumn(fr, targetIdx); IcedHashMap<String, Frame> columnToEncodings = new IcedHashMap<>(); for (String columnToEncode : columnsToEncode) { // TODO: parallelize int colIdx = workingFrame.find(columnToEncode); imputeCategoricalColumn(workingFrame, colIdx, columnToEncode + NA_POSTFIX); Frame encodings = buildEncodingsFrame(workingFrame, colIdx, targetIdx, foldColIdx, nclasses()); Frame finalEncodings = applyLeakageStrategyToEncodings( encodings, columnToEncode, _parms._data_leakage_handling, _parms._fold_column ); encodings.delete(); encodings = finalEncodings; if (encodings._key != null) DKV.remove(encodings._key); encodings._key = Key.make(_result.toString()+"_encodings_"+columnToEncode); DKV.put(encodings); columnToEncodings.put(columnToEncode, encodings); } return columnToEncodings; } finally { if (workingFrame != null) workingFrame.delete(); } } private Frame applyLeakageStrategyToEncodings(Frame encodings, String columnToEncode, DataLeakageHandlingStrategy leakageHandlingStrategy, String foldColumn) { Frame groupedEncodings = null; int encodingsTEColIdx = encodings.find(columnToEncode); try { Scope.enter(); switch (leakageHandlingStrategy) { case KFold: long[] foldValues = getUniqueColumnValues(encodings, encodings.find(foldColumn)); for (long foldValue : foldValues) { Frame outOfFoldEncodings = getOutOfFoldEncodings(encodings, foldColumn, foldValue); Scope.track(outOfFoldEncodings); Frame tmpEncodings = register(groupEncodingsByCategory(outOfFoldEncodings, encodingsTEColIdx)); Scope.track(tmpEncodings); addCon(tmpEncodings, foldColumn, foldValue); //groupEncodingsByCategory always removes the foldColumn, so we can reuse the same name immediately if (groupedEncodings == null) { groupedEncodings = tmpEncodings; } else { Frame newHoldoutEncodings = rBind(groupedEncodings, tmpEncodings); groupedEncodings.delete(); groupedEncodings = newHoldoutEncodings; } Scope.track(groupedEncodings); } break; case LeaveOneOut: case None: groupedEncodings = groupEncodingsByCategory(encodings, encodingsTEColIdx, foldColumn != null); break; default: throw new IllegalStateException("null or unsupported leakageHandlingStrategy"); } Scope.untrack(groupedEncodings); } finally { Scope.exit(); } return groupedEncodings; } private Frame getOutOfFoldEncodings(Frame encodingsFrame, String foldColumn, long foldValue) { int foldColumnIdx = encodingsFrame.find(foldColumn); return filterNotByValue(encodingsFrame, foldColumnIdx, foldValue); } } /** * Never do traditional cross-validation for Target Encoder Model. The {@link TargetEncoderHelper} class handles * fold column on it's own. * * @return Always false */ @Override public boolean nFoldCV() { return false; } @Override protected Driver trainModelImpl() { // We can use Model.Parameters to configure Target Encoder return new TargetEncoderDriver(); } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.TargetEncoder}; } @Override public boolean isSupervised() { return true; } @Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Stable; } @Override public boolean haveMojo() { return true; } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/TargetEncoderBroadcastJoin.java
package ai.h2o.targetencoding; import water.MRTask; import water.MemoryManager; import water.fvec.CategoricalWrappedVec; import water.fvec.Chunk; import water.fvec.Frame; class TargetEncoderBroadcastJoin { /** * @param leftFrame frame for which we want to keep the rows order. * @param leftCatColumnsIdxs indices of the categorical columns from `leftFrame` for which we want to calculate encodings. * Only one categorical column is currently supported. * @param leftFoldColumnIdx index of the fold column from `leftFrame` or `-1` if we don't use folds. * @param rightFrame supposedly small frame that we will broadcast to all nodes and use as a lookup table for joining. * @param rightCatColumnsIdxs indices of the categorical columns from `rightFrame`. * Only one categorical column is currently supported. * @param rightFoldColumnIdx index of the fold column from `rightFrame` or `-1` if we don't use folds. * @param maxFoldValue the highest fold value (e.g. 4 if 5 folds). * @return the `leftFrame` with joined numerators and denominators. */ static Frame join(Frame leftFrame, int[] leftCatColumnsIdxs, int leftFoldColumnIdx, Frame rightFrame, int[] rightCatColumnsIdxs, int rightFoldColumnIdx, int maxFoldValue) { int rightNumeratorIdx = rightFrame.find(TargetEncoderHelper.NUMERATOR_COL); int rightDenominatorIdx = rightFrame.find(TargetEncoderHelper.DENOMINATOR_COL); // currently supporting only one categorical column assert leftCatColumnsIdxs.length == 1; assert rightCatColumnsIdxs.length == 1; int leftCatColumnIdx = leftCatColumnsIdxs[0]; int rightCatColumnIdx = rightCatColumnsIdxs[0]; int rightCatCardinality = rightFrame.vec(rightCatColumnIdx).cardinality(); if (rightFoldColumnIdx != -1 && rightFrame.vec(rightFoldColumnIdx).max() > Integer.MAX_VALUE) throw new IllegalArgumentException("Fold value should be a non-negative integer (i.e. should belong to [0, Integer.MAX_VALUE] range)"); int[][] levelMappings = { CategoricalWrappedVec.computeMap( leftFrame.vec(leftCatColumnIdx).domain(), rightFrame.vec(rightCatColumnIdx).domain() ) }; double[][] encodingData = encodingsToArray( rightFrame, rightCatColumnIdx, rightFoldColumnIdx, rightNumeratorIdx, rightDenominatorIdx, rightCatCardinality, maxFoldValue ); // BroadcastJoiner is currently modifying the frame in-place, so we need to provide the numerator and denominator columns. Frame resultFrame = new Frame(leftFrame); resultFrame.add(TargetEncoderHelper.NUMERATOR_COL, resultFrame.anyVec().makeCon(0)); resultFrame.add(TargetEncoderHelper.DENOMINATOR_COL, resultFrame.anyVec().makeCon(0)); new BroadcastJoiner(leftCatColumnsIdxs, leftFoldColumnIdx, encodingData, levelMappings, rightCatCardinality-1) .doAll(resultFrame); return resultFrame; } static double[][] encodingsToArray(Frame encodingsFrame, int categoricalColIdx, int foldColIdx, int numColIdx, int denColIdx, int categoricalColCardinality, int maxFoldValue) { return new FrameWithEncodingDataToArray(categoricalColIdx, foldColIdx, numColIdx, denColIdx, categoricalColCardinality, maxFoldValue) .doAll(encodingsFrame) .getEncodingDataArray(); } private static class FrameWithEncodingDataToArray extends MRTask<FrameWithEncodingDataToArray> { /** * Numerators and denominators are being stored in the same row-array of the 2D array. * * _encodingDataPerNode[k][2*i] will be storing numerators * _encodingDataPerNode[k][2*i+1] will be storing denominators * (easier to inspect when debugging). * * where k is a current fold value (folds = {0,1,2,3...k}) or 0 when _foldColumnIdx == -1; */ final double[][] _encodingDataPerNode; final int _categoricalColumnIdx, _foldColumnIdx, _numeratorIdx, _denominatorIdx, _cardinalityOfCatCol; FrameWithEncodingDataToArray(int categoricalColumnIdx, int foldColumnIdx, int numeratorIdx, int denominatorIdx, int cardinalityOfCatCol, int maxFoldValue) { _categoricalColumnIdx = categoricalColumnIdx; _foldColumnIdx = foldColumnIdx; _numeratorIdx = numeratorIdx; _denominatorIdx = denominatorIdx; _cardinalityOfCatCol = cardinalityOfCatCol; if (foldColumnIdx == -1) { _encodingDataPerNode = MemoryManager.malloc8d(1, _cardinalityOfCatCol * 2); } else { assert maxFoldValue >= 1 : "There should be at least two folds in the fold column"; assert _cardinalityOfCatCol > 0 && _cardinalityOfCatCol < (Integer.MAX_VALUE / 2) : "Cardinality of categ. column should be within range (0, Integer.MAX_VALUE / 2 )"; _encodingDataPerNode = MemoryManager.malloc8d(maxFoldValue + 1,_cardinalityOfCatCol * 2); } } @Override public void map(Chunk[] cs) { Chunk categoricalChunk = cs[_categoricalColumnIdx]; Chunk numeratorChunk = cs[_numeratorIdx]; Chunk denominatorChunk = cs[_denominatorIdx]; for (int i = 0; i < categoricalChunk.len(); i++) { int level = (int) categoricalChunk.at8(i); // We are allowed to do casting to `int` as we have validation before submitting this MRTask int foldValue = _foldColumnIdx != -1 ? (int) cs[_foldColumnIdx].at8(i) : 0; double[] numDenArray = _encodingDataPerNode[foldValue]; numDenArray[2*level] = numeratorChunk.atd(i); numDenArray[2*level+1] = denominatorChunk.at8(i); } } @Override public void reduce(FrameWithEncodingDataToArray mrt) { double[][] leftArr = getEncodingDataArray(); double[][] rightArr = mrt.getEncodingDataArray(); // Note: we need to add `leftArr != rightArr` check due to the following circumstances: // 1. MRTasks are being only shallow cloned i.e. all internal fields are references to the same objects in memory // 2. MRTasks' shallow copies, that were also sent to other nodes, will become a deep copies of the original shallow copies (due to serialisation/deserialisation) // 3. there is a chance that reduce phase will start before map phase is finished: // // In the following example we are only concerned with what is happening within a single node (due to 2.). // // T(r) // / \ // / \ // Tl(r) Tr(r) // / \ / \ // Tll(m) Tlr(m) Trl(m) Trr(m) // , where (r) stands for reduce phase only, (m) - map phase only // // // Tll(m) and Tlr(m) manipulate the same array (due to 1.), but not the same cells -> no race condition. // The race arises because, for example, Tl(r) can occur in parallel with Trl(m), Tr(r) or both. // Once both Tl(r) and Tr(r) are completed, T(r) itself is safe. // // Steps that led to the race in a FrameWithEncodingDataToArray.reduce without `leftArr != rightArr` check: // - Tl(r); Math.max for a particular cell Cij was computed to be 0 (not yet assigning result to Cij cell) // - Trl(m) & Trr(m); Cij was updated during map phase with non-zero value of 42 // - Tr(r); Math.max for Cij was computed to be 42 and assigned to Cij cell of 2D array // - Tl(r); assigned to Cij cell of 2D array value of 0 and effectively overriding previously assigned value of 42 // if (leftArr != rightArr) { for (int rowIdx = 0; rowIdx < leftArr.length; rowIdx++) { for (int colIdx = 0; colIdx < leftArr[rowIdx].length; colIdx++) { double valueFromLeftArr = leftArr[rowIdx][colIdx]; double valueFromRIghtArr = rightArr[rowIdx][colIdx]; leftArr[rowIdx][colIdx] = Math.max(valueFromLeftArr, valueFromRIghtArr); } } } } double[][] getEncodingDataArray() { return _encodingDataPerNode; } } private static class BroadcastJoiner extends MRTask<BroadcastJoiner> { int _categoricalColumnIdx, _foldColumnIdx, _maxKnownCatLevel; double[][] _encodingDataArray; int[][] _levelMappings; BroadcastJoiner(int[] categoricalColumnsIdxs, int foldColumnIdx, double[][] encodingDataArray, int[][] levelMappings, int maxKnownCatLevel) { assert categoricalColumnsIdxs.length == 1 : "Only single column target encoding (i.e. one categorical column is used to produce its encodings) is supported for now"; assert levelMappings.length == 1; _categoricalColumnIdx = categoricalColumnsIdxs[0]; _foldColumnIdx = foldColumnIdx; _encodingDataArray = encodingDataArray; _levelMappings = levelMappings; _maxKnownCatLevel = maxKnownCatLevel; } @Override public void map(Chunk[] cs) { int[] levelMapping = _levelMappings[0]; //see constraint in constructor Chunk categoricalChunk = cs[_categoricalColumnIdx]; Chunk num = cs[cs.length - 2]; // numerator and denominator columns are appended in #join method. Chunk den = cs[cs.length - 1]; for (int i = 0; i < num.len(); i++) { int level = (int) categoricalChunk.at8(i); if (level >= levelMapping.length) { // should never happen: when joining, level is a category in the left frame, and levelMapping.length == size of the domain on that frame setEncodingComponentsToNAs(num, den, i); continue; } int mappedLevel = levelMapping[level]; int foldValue = _foldColumnIdx >= 0 ? (int)cs[_foldColumnIdx].at8(i) : 0; double[] numDenArray = _encodingDataArray[foldValue]; if (mappedLevel > _maxKnownCatLevel) { // level not in encodings (unseen in training data) -> NA setEncodingComponentsToNAs(num, den, i); } else { double denominator = numDenArray[2*mappedLevel+1]; if (denominator == 0) { // no occurrence of current level for this fold -> NA setEncodingComponentsToNAs(num, den, i); } else { double numerator = numDenArray[2*mappedLevel]; num.set(i, numerator); den.set(i, denominator); } } } } // Note: Later - in `TargetEncodingHelper.ApplyEncodings` task - NAs will be imputed by prior private void setEncodingComponentsToNAs(Chunk num, Chunk den, int i) { num.setNA(i); den.setNA(i); } } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/TargetEncoderHelper.java
package ai.h2o.targetencoding; import water.*; import water.fvec.*; import water.fvec.task.FillNAWithLongValueTask; import water.fvec.task.FilterByValueTask; import water.fvec.task.IsNotNaTask; import water.fvec.task.UniqTask; import org.apache.log4j.Logger; import water.rapids.Rapids; import water.rapids.Val; import water.rapids.ast.prims.advmath.AstKFold; import water.rapids.ast.prims.mungers.AstGroup; import water.rapids.ast.prims.mungers.AstGroup.NAHandling; import water.rapids.ast.prims.mungers.AstMelt; import water.rapids.vals.ValFrame; import water.rapids.vals.ValNum; import water.rapids.vals.ValStr; import water.rapids.vals.ValStrs; import water.util.*; import java.util.*; /** * This is a helper class for target encoding related logic, * grouping mainly distributed tasks or other utility functions needed to generate and apply the target encoding maps. * */ public class TargetEncoderHelper extends Iced<TargetEncoderHelper>{ static final String NUMERATOR_COL = "numerator"; static final String DENOMINATOR_COL = "denominator"; static final String TARGETCLASS_COL = "targetclass"; static final int NO_TARGET_CLASS = -1; // value used as a substitute for the target class in regression problems. private static final Logger LOG = Logger.getLogger(TargetEncoderHelper.class); private TargetEncoderHelper() {} /** * @param frame * @param name name of the fold column * @param nfolds number of folds * @param seed * @return the index of the new column */ public static int addKFoldColumn(Frame frame, String name, int nfolds, long seed) { Vec foldVec = frame.anyVec().makeZero(); frame.add(name, AstKFold.kfoldColumn(foldVec, nfolds, seed == -1 ? new Random().nextLong() : seed)); return frame.numCols() - 1; } static double computePriorMean(Frame encodings) { assert encodings.find(TARGETCLASS_COL) < 0; return computePriorMean(encodings, NO_TARGET_CLASS); } static double computePriorMean(Frame encodings, int targetClass) { int tcIdx = encodings.find(TARGETCLASS_COL); assert (targetClass == NO_TARGET_CLASS) == (tcIdx < 0); Frame fr = null; try { fr = tcIdx < 0 ? encodings : filterByValue(encodings, tcIdx, targetClass); Vec numeratorVec = fr.vec(NUMERATOR_COL); Vec denominatorVec = fr.vec(DENOMINATOR_COL); assert numeratorVec!=null; assert denominatorVec!=null; return numeratorVec.mean() / denominatorVec.mean(); } finally { if (fr != null && fr != encodings) fr.delete(); } } /** * If a fold column is provided, this produces a frame of shape * (unique(col, fold_col), 4) with columns [{col}, {fold_col}, numerator, denominator] * Otherwise, it produces a frame of shape * (unique(col), 3) with columns [{col}, numerator, denominator] * @param fr * @param columnToEncodeIdx * @param targetIdx * @param foldColumnIdx * @param nclasses: 1 for regression, 2 for binary, N for multiclass. * @return the frame used to compute TE posteriors for a given column to encode. */ static Frame buildEncodingsFrame(Frame fr, int columnToEncodeIdx, int targetIdx, int foldColumnIdx, int nclasses) { try { Scope.enter(); Frame result; AstGroup.AGG[] aggs; int[] groupBy = foldColumnIdx < 0 ? new int[]{columnToEncodeIdx} : new int[]{columnToEncodeIdx, foldColumnIdx}; if (nclasses > 2) { // multiclass String targetName = fr.name(targetIdx); Vec targetVec = fr.vec(targetIdx); // transform the target into multiple columns that each will be interpreted as a target // used to generate the new {targetclass}_te features Frame targetFr = new Frame(new String[]{targetName}, new Vec[]{targetVec}); Frame oheTarget = new FrameUtils.CategoricalOneHotEncoder(targetFr, new String[]{}).exec().get(); Scope.track(oheTarget); Frame expandedFr = new Frame(fr).add(oheTarget); // printFrame(expandedFr); // add one sum aggregator per targetclass -> this will produce a {targetclass} numerator. // add one single nrow aggregator for the shared denominator. aggs = new AstGroup.AGG[oheTarget.numCols() + 1]; for (int i = 0; i < oheTarget.numCols(); i++) { int partialTargetIdx = fr.numCols() + i; aggs[i] = new AstGroup.AGG(AstGroup.FCN.sum, partialTargetIdx, NAHandling.ALL, -1); } aggs[aggs.length - 1] = new AstGroup.AGG(AstGroup.FCN.nrow, targetIdx, NAHandling.ALL, -1); result = new AstGroup().performGroupingWithAggregations(expandedFr, groupBy, aggs).getFrame(); Scope.track(result); // renaming all those aggregation columns: // targetclass numerators get temporarily renamed into just the targetclass. // the denominator column gets its final name. String[] targetVals = new String[oheTarget.numCols()]; for (int i = 0; i < oheTarget.names().length; i++) { String oheCol = oheTarget.name(i); String targetVal = oheCol.replaceFirst(targetName + ".", ""); renameColumn(result, "sum_" + oheCol, targetVal); targetVals[i] = targetVal; } renameColumn(result, "nrow", DENOMINATOR_COL); // we don't want to carry around all those numerator columns, // so, melting them into a single numerator column + a targetclass column holding the corresponding target values. String[] idVars= foldColumnIdx < 0 ? new String[]{fr.name(columnToEncodeIdx), DENOMINATOR_COL} : new String[]{fr.name(columnToEncodeIdx), fr.name(foldColumnIdx), DENOMINATOR_COL}; result = melt(result, idVars, targetVals, TARGETCLASS_COL, NUMERATOR_COL, true); // convert targetclass column to ensure it has the same domain as target CategoricalWrappedVec.updateDomain(result.vec(TARGETCLASS_COL), targetVec.domain()); // printFrame(result); } else { // works for both binary and regression aggs = new AstGroup.AGG[2]; aggs[0] = new AstGroup.AGG(AstGroup.FCN.sum, targetIdx, NAHandling.ALL, -1); aggs[1] = new AstGroup.AGG(AstGroup.FCN.nrow, targetIdx, NAHandling.ALL, -1); result = new AstGroup().performGroupingWithAggregations(fr, groupBy, aggs).getFrame(); // change the default column names assigned by the aggregation task renameColumn(result, "sum_" + fr.name(targetIdx), NUMERATOR_COL); renameColumn(result, "nrow", DENOMINATOR_COL); // printFrame(result); } Scope.untrack(result); return result; } finally { Scope.exit(); } } /** * Group encodings by category (summing on all folds present in the frame). * Produces a frame of shape (unique(col), 3) with columns [{col}, numerator, denominator]. * @param encodingsFrame * @param teColumnIdx * @return */ static Frame groupEncodingsByCategory(Frame encodingsFrame, int teColumnIdx) { int numeratorIdx = encodingsFrame.find(NUMERATOR_COL); assert numeratorIdx >= 0; int denominatorIdx = encodingsFrame.find(DENOMINATOR_COL); assert denominatorIdx >= 0; int classesIdx = encodingsFrame.find(TARGETCLASS_COL); int [] groupBy = classesIdx < 0 ? new int[]{teColumnIdx} : new int[]{teColumnIdx, classesIdx}; AstGroup.AGG[] aggs = new AstGroup.AGG[2]; aggs[0] = new AstGroup.AGG(AstGroup.FCN.sum, numeratorIdx, NAHandling.ALL, -1); aggs[1] = new AstGroup.AGG(AstGroup.FCN.sum, denominatorIdx, NAHandling.ALL, -1); Frame result = new AstGroup().performGroupingWithAggregations(encodingsFrame, groupBy, aggs).getFrame(); //change the default column names assigned by the aggregation task renameColumn(result, "sum_"+ NUMERATOR_COL, NUMERATOR_COL); renameColumn(result, "sum_"+ DENOMINATOR_COL, DENOMINATOR_COL); return result; } static Frame groupEncodingsByCategory(Frame encodingsFrame, int teColumnIdx, boolean hasFolds) { if (hasFolds) { return groupEncodingsByCategory(encodingsFrame, teColumnIdx); } else { return encodingsFrame.deepCopy(Key.make().toString()); // XXX: is this really necessary? } } static void imputeCategoricalColumn(Frame data, int columnIdx, String naCategory) { Vec currentVec = data.vec(columnIdx); int indexForNACategory = currentVec.cardinality(); // Warn: Cardinality returns int but it could be larger than int for big datasets FillNAWithLongValueTask task = new FillNAWithLongValueTask(columnIdx, indexForNACategory); task.doAll(data); if (task._imputationHappened) { String[] oldDomain = currentVec.domain(); String[] newDomain = new String[indexForNACategory + 1]; System.arraycopy(oldDomain, 0, newDomain, 0, oldDomain.length); newDomain[indexForNACategory] = naCategory; updateColumnDomain(data, columnIdx, newDomain); } } private static void updateColumnDomain(Frame fr, int columnIdx, String[] domain) { fr.write_lock(); Vec updatedVec = fr.vec(columnIdx); // CategoricalWrappedVec.updateDomain(updatedVec, domain); // safer? remapping should be unnecessary in our use-case though updatedVec.setDomain(domain); DKV.put(updatedVec); fr.update(); fr.unlock(); } static long[] getUniqueColumnValues(Frame data, int columnIndex) { Vec uniqueValues = uniqueValuesBy(data, columnIndex).vec(0); long numberOfUniqueValues = uniqueValues.length(); assert numberOfUniqueValues <= Integer.MAX_VALUE : "Number of unique values exceeded Integer.MAX_VALUE"; int length = (int) numberOfUniqueValues; // We assume that the column should not have that many different values and will fit into node's memory. long[] uniqueValuesArr = MemoryManager.malloc8(length); for (int i = 0; i < uniqueValues.length(); i++) { uniqueValuesArr[i] = uniqueValues.at8(i); } uniqueValues.remove(); return uniqueValuesArr; } /** * Computes the blended prior and posterior probabilities:<pre>Pᵢ = 𝝺(nᵢ) ȳᵢ + (1 - 𝝺(nᵢ)) ȳ</pre> * Note that in case of regression problems, these prior/posterior values should be simply read as mean values without the need to change the formula. * The shrinkage factor lambda is a parametric logistic function defined as <pre>𝝺(n) = 1 / ( 1 + e^((k - n)/f) )</pre> * @param posteriorMean the posterior mean ( ȳᵢ ) for a given category. * @param priorMean the prior mean ( ȳ ). * @param numberOfRowsForCategory (nᵢ). * @param blendingParams the parameters (k and f) for the shrinkage function. * @return */ static double getBlendedValue(double posteriorMean, double priorMean, long numberOfRowsForCategory, BlendingParams blendingParams) { double lambda = 1.0 / (1 + Math.exp((blendingParams.getInflectionPoint() - numberOfRowsForCategory) / blendingParams.getSmoothing())); return lambda * posteriorMean + (1 - lambda) * priorMean; } /** merge the encodings by TE column */ static Frame mergeEncodings(Frame leftFrame, Frame encodingsFrame, int leftTEColumnIdx, int encodingsTEColumnIdx) { return mergeEncodings(leftFrame, encodingsFrame, leftTEColumnIdx, -1, encodingsTEColumnIdx, -1, 0); } /** merge the encodings by TE column + fold column */ static Frame mergeEncodings(Frame leftFrame, Frame encodingsFrame, int leftTEColumnIdx, int leftFoldColumnIdx, int encodingsTEColumnIdx, int encodingsFoldColumnIdx, int maxFoldValue) { return TargetEncoderBroadcastJoin.join( leftFrame, new int[]{leftTEColumnIdx}, leftFoldColumnIdx, encodingsFrame, new int[]{encodingsTEColumnIdx}, encodingsFoldColumnIdx, maxFoldValue); } /** * * @param fr the frame * @param newEncodedColumnName the new encoded column to compute and append to the original frame. * @param priorMean the global mean. * @param blendingParams if provided, those params are used to blend the prior and posterior values when calculating the encoded value. * @return the index of the new encoded column */ static int applyEncodings(Frame fr, String newEncodedColumnName, double priorMean, final BlendingParams blendingParams) { int numeratorIdx = fr.find(NUMERATOR_COL); assert numeratorIdx >= 0; int denominatorIdx = numeratorIdx + 1; // enforced by the Broadcast join Vec zeroVec = fr.anyVec().makeCon(0); fr.add(newEncodedColumnName, zeroVec); int encodedColumnIdx = fr.numCols() - 1; new ApplyEncodings(encodedColumnIdx, numeratorIdx, denominatorIdx, priorMean, blendingParams).doAll(fr); return encodedColumnIdx; } /** * Distributed task setting the encoded value on a specific column, * given 2 numerator and denominator columns already present on the frame * and additional pre-computations needed to compute the encoded value. * * Note that the encoded value will use blending iff `blendingParams` are provided. */ private static class ApplyEncodings extends MRTask<ApplyEncodings> { private int _encodedColIdx; private int _numeratorIdx; private int _denominatorIdx; private double _priorMean; private BlendingParams _blendingParams; ApplyEncodings(int encodedColIdx, int numeratorIdx, int denominatorIdx, double priorMean, BlendingParams blendingParams) { _encodedColIdx = encodedColIdx; _numeratorIdx = numeratorIdx; _denominatorIdx = denominatorIdx; _priorMean = priorMean; _blendingParams = blendingParams; } @Override public void map(Chunk cs[]) { Chunk num = cs[_numeratorIdx]; Chunk den = cs[_denominatorIdx]; Chunk encoded = cs[_encodedColIdx]; boolean useBlending = _blendingParams != null; for (int i = 0; i < num._len; i++) { if (num.isNA(i) || den.isNA(i)) { // 2 cases: category unseen during training, or not present in a given fold, shouldn't we make the distinction? encoded.setNA(i); } else if (den.at8(i) == 0) { //should never happen according to BroadcastJoiner, except after substracting target in LOO strategy. if (LOG.isDebugEnabled()) LOG.debug("Denominator is zero for column index = " + _encodedColIdx + ". Imputing with _priorMean = " + _priorMean); encoded.set(i, _priorMean); } else { double posteriorMean = num.atd(i) / den.atd(i); double encodedValue; if (useBlending) { long numberOfRowsInCurrentCategory = den.at8(i); // works for all type of problems encodedValue = getBlendedValue(posteriorMean, _priorMean, numberOfRowsInCurrentCategory, _blendingParams); } else { encodedValue = posteriorMean; } encoded.set(i, encodedValue); } } } } static void addNoise(Frame fr, int columnIdx, double noiseLevel, long seed) { if (seed == -1) seed = new Random().nextLong(); Vec zeroVec = fr.anyVec().makeCon(0); Vec randomVec = zeroVec.makeRand(seed); try { fr.add("runIf", randomVec); int runifIdx = fr.numCols() - 1; new AddNoiseTask(columnIdx, runifIdx, noiseLevel).doAll(fr); fr.remove(runifIdx); // Vec[] vecs = ArrayUtils.append(fr.vecs(), randomVec); // return new AddNoiseTask(columnIndex, fr.numCols(), noiseLevel).doAll(vecs).outputFrame(); } finally { randomVec.remove(); zeroVec.remove(); } } private static class AddNoiseTask extends MRTask<AddNoiseTask> { private int _columnIdx; private int _runifIdx; private double _noiseLevel; public AddNoiseTask(int columnIdx, int runifIdx, double noiseLevel) { _columnIdx = columnIdx; _runifIdx = runifIdx; _noiseLevel = noiseLevel; } @Override public void map(Chunk cs[]) { Chunk column = cs[_columnIdx]; Chunk runifCol = cs[_runifIdx]; for (int i = 0; i < column._len; i++) { if (!column.isNA(i)) { column.set(i, column.atd(i) + (runifCol.atd(i) * 2 - 1) * _noiseLevel); } } } } /** * @param fr the frame with a numerator and denominator columns, which will be modified based on the value in the target column. * @param targetColumn the name of the target column. * @param targetClass for regression use {@value NO_TARGET_CLASS}, * for classification this is the target value to match in order to decrement the numerator. */ static void subtractTargetValueForLOO(Frame fr, String targetColumn, int targetClass) { int numeratorIndex = fr.find(NUMERATOR_COL); int denominatorIndex = fr.find(DENOMINATOR_COL); int targetIndex = fr.find(targetColumn); assert numeratorIndex >= 0; assert denominatorIndex >= 0; assert targetIndex >= 0; new SubtractCurrentRowForLeaveOneOutTask(numeratorIndex, denominatorIndex, targetIndex, targetClass).doAll(fr); } private static class SubtractCurrentRowForLeaveOneOutTask extends MRTask<SubtractCurrentRowForLeaveOneOutTask> { private int _numeratorIdx; private int _denominatorIdx; private int _targetIdx; private int _targetClass; public SubtractCurrentRowForLeaveOneOutTask(int numeratorIdx, int denominatorIdx, int targetIdx, int targetClass) { _numeratorIdx = numeratorIdx; _denominatorIdx = denominatorIdx; _targetIdx = targetIdx; _targetClass = targetClass; } @Override public void map(Chunk cs[]) { Chunk num = cs[_numeratorIdx]; Chunk den = cs[_denominatorIdx]; Chunk target = cs[_targetIdx]; for (int i = 0; i < num._len; i++) { if (!target.isNA(i)) { double ti = target.atd(i); if (_targetClass == NO_TARGET_CLASS) // regression num.set(i, num.atd(i) - target.atd(i)); else if (_targetClass == ti) // classification num.set(i, num.atd(i) - 1); den.set(i, den.atd(i) - 1); } } } } static Frame melt(Frame fr, String[] idVars, String[] valueVars, String varCol, String valueCol, boolean skipNA) { Frame melted = new AstMelt().exec(new Val[]{ null, new ValFrame(fr), new ValStrs(idVars), new ValStrs(valueVars), new ValStr(varCol), new ValStr(valueCol), new ValNum(skipNA ? 1 : 0) }).getFrame(); return register(melted); } static Frame rBind(Frame a, Frame b) { if (a == null) { assert b != null; return b; } else { String tree = String.format("(rbind %s %s)", a._key, b._key); return execRapidsAndGetFrame(tree); } } private static Frame execRapidsAndGetFrame(String astTree) { Val val = Rapids.exec(astTree); return register(val.getFrame()); } /** * expand the frame with constant vector Frame * @return the index of the new vector. **/ static int addCon(Frame fr, String newColumnName, long constant) { Vec constVec = fr.anyVec().makeCon(constant); fr.add(newColumnName, constVec); return fr.numCols() - 1; } /** * @return frame without rows with NAs in `columnIndex` column */ static Frame filterOutNAsInColumn(Frame fr, int columnIndex) { Frame oneColumnFrame = new Frame(fr.vec(columnIndex)); Frame noNaPredicateFrame = new IsNotNaTask().doAll(1, Vec.T_NUM, oneColumnFrame).outputFrame(); Frame filtered = selectByPredicate(fr, noNaPredicateFrame); noNaPredicateFrame.delete(); return filtered; } /** * @return frame with all the rows except for those whose value in the `columnIndex' column equals to `value` */ static Frame filterNotByValue(Frame fr, int columnIndex, double value) { return filterByValueBase(fr, columnIndex, value, true); } /** * @return frame with all the rows whose value in the `columnIndex' column equals to `value` */ static Frame filterByValue(Frame fr, int columnIndex, double value) { return filterByValueBase(fr, columnIndex, value,false); } private static Frame filterByValueBase(Frame fr, int columnIndex, double value, boolean isInverted) { Frame predicateFrame = new FilterByValueTask(value, isInverted).doAll(1, Vec.T_NUM, new Frame(fr.vec(columnIndex))).outputFrame(); Frame filtered = selectByPredicate(fr, predicateFrame); predicateFrame.delete(); return filtered; } private static Frame selectByPredicate(Frame fr, Frame predicateFrame) { Vec predicate = predicateFrame.anyVec(); Vec[] vecs = ArrayUtils.append(fr.vecs(), predicate); return new Frame.DeepSelect().doAll(fr.types(), vecs).outputFrame(Key.make(), fr._names, fr.domains()); } /** return a frame with unique values from the specified column */ static Frame uniqueValuesBy(Frame fr, int columnIndex) { Vec vec0 = fr.vec(columnIndex); Vec v; if (vec0.isCategorical()) { v = Vec.makeSeq(0, vec0.domain().length, true); v.setDomain(vec0.domain()); DKV.put(v); } else { v = new UniqTask().doAll(vec0).toVec(); } return new Frame(v); } static void renameColumn(Frame fr, int colIndex, String newName) { String[] newNames = fr.names(); newNames[colIndex] = newName; fr.setNames(newNames); } static void renameColumn(Frame fr, String oldName, String newName) { renameColumn(fr, fr.find(oldName), newName); } static Map<String, Integer> nameToIndex(Frame fr) { return nameToIndex(fr.names()); } static Map<String, Integer> nameToIndex(String[] columns) { Map<String, Integer> nameToIdx = new HashMap<>(columns.length); for (int i = 0; i < columns.length; i++) { nameToIdx.put(columns[i], i); } return nameToIdx; } /** * @return Frame that is registered in DKV */ static Frame register(Frame frame) { frame._key = Key.make(); DKV.put(frame); return frame; } static void printFrame(Frame fr) { TwoDimTable twoDimTable = fr.toTwoDimTable(0, (int) fr.numRows(), false); System.out.println(twoDimTable.toString(2, true)); } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/TargetEncoderModel.java
package ai.h2o.targetencoding; import hex.Model; import hex.ModelCategory; import hex.ModelMetrics; import water.*; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Frame; import water.fvec.Vec; import water.fvec.task.FillNAWithDoubleValueTask; import org.apache.log4j.Logger; import water.udf.CFuncRef; import water.util.*; import java.util.*; import java.util.PrimitiveIterator.OfInt; import java.util.stream.IntStream; import java.util.stream.Stream; import static ai.h2o.targetencoding.TargetEncoderHelper.*; import static ai.h2o.targetencoding.interaction.InteractionSupport.addFeatureInteraction; public class TargetEncoderModel extends Model<TargetEncoderModel, TargetEncoderModel.TargetEncoderParameters, TargetEncoderModel.TargetEncoderOutput> { public static final String ALGO_NAME = "TargetEncoder"; public static final int NO_FOLD = -1; static final String NA_POSTFIX = "_NA"; static final String TMP_COLUMN_POSTFIX = "_tmp"; static final String ENCODED_COLUMN_POSTFIX = "_te"; static final BlendingParams DEFAULT_BLENDING_PARAMS = new BlendingParams(10, 20); private static final Logger LOG = Logger.getLogger(TargetEncoderModel.class); public enum DataLeakageHandlingStrategy { LeaveOneOut, KFold, None, } public static class TargetEncoderParameters extends Model.Parameters { public String[][] _columns_to_encode; public boolean _blending = false; public double _inflection_point = DEFAULT_BLENDING_PARAMS.getInflectionPoint(); public double _smoothing = DEFAULT_BLENDING_PARAMS.getSmoothing(); public DataLeakageHandlingStrategy _data_leakage_handling = DataLeakageHandlingStrategy.None; public double _noise = 0.01; public boolean _keep_original_categorical_columns = true; // not a good default, but backwards compatible. boolean _keep_interaction_columns = false; // not exposed: convenient for testing. @Override public String algoName() { return ALGO_NAME; } @Override public String fullName() { return "TargetEncoder"; } @Override public String javaName() { return TargetEncoderModel.class.getName(); } @Override public long progressUnits() { return 1; } public BlendingParams getBlendingParameters() { return _blending ? _inflection_point!=0 && _smoothing!=0 ? new BlendingParams(_inflection_point, _smoothing) : DEFAULT_BLENDING_PARAMS : null; } @Override protected boolean defaultDropConsCols() { return false; } } public static class TargetEncoderOutput extends Model.Output { public final TargetEncoderParameters _parms; public final int _nclasses; public ColumnsToSingleMapping[] _input_to_encoding_column; // maps input columns (or groups of columns) to the single column being effectively encoded (= key in _target_encoding_map). public ColumnsMapping[] _input_to_output_columns; // maps input columns (or groups of columns) to their corresponding encoded one(s). public IcedHashMap<String, Frame> _target_encoding_map; public IcedHashMap<String, Boolean> _te_column_to_hasNAs; //XXX: Map is a wrong choice for this, IcedHashSet would be perfect though public TargetEncoderOutput(TargetEncoder te) { super(te); _parms = te._parms; _nclasses = te.nclasses(); } void init(IcedHashMap<String, Frame> teMap, ColumnsToSingleMapping[] columnsToEncodeMapping) { _target_encoding_map = teMap; _input_to_encoding_column = columnsToEncodeMapping; _input_to_output_columns = buildInOutColumnsMapping(); _te_column_to_hasNAs = buildCol2HasNAsMap(); _model_summary = constructSummary(); } /** * builds the name of encoded columns */ private ColumnsMapping[] buildInOutColumnsMapping() { ColumnsMapping[] encMapping = new ColumnsMapping[_input_to_encoding_column.length]; for (int i=0; i < encMapping.length; i++) { ColumnsToSingleMapping toEncode = _input_to_encoding_column[i]; String[] groupCols = toEncode.from(); String columnToEncode = toEncode.toSingle(); Frame encodings = _target_encoding_map.get(columnToEncode); String[] encodedColumns = listUsedTargetClasses().mapToObj(tc -> encodedColumnName(columnToEncode, tc, encodings.vec(TARGETCLASS_COL)) ).toArray(String[]::new); encMapping[i] = new ColumnsMapping(groupCols, encodedColumns); } return encMapping; } private IcedHashMap<String, Boolean> buildCol2HasNAsMap() { final IcedHashMap<String, Boolean> col2HasNAs = new IcedHashMap<>(); for (Map.Entry<String, Frame> entry : _target_encoding_map.entrySet()) { String teColumn = entry.getKey(); Frame encodingsFrame = entry.getValue(); int teColCard = _parms.train().vec(teColumn) == null ? -1 // justifies the >0 test below: if teColumn is a transient (generated for interactions and therefore not in train), it can't have NAs as they're all already encoded. : _parms.train().vec(teColumn).cardinality(); boolean hasNAs = teColCard > 0 && teColCard < encodingsFrame.vec(teColumn).cardinality(); //XXX: _parms.train().vec(teColumn).naCnt() > 0 ? col2HasNAs.put(teColumn, hasNAs); } return col2HasNAs; } private IntStream listUsedTargetClasses() { return _nclasses == 1 ? IntStream.of(NO_TARGET_CLASS) // regression : _nclasses == 2 ? IntStream.of(1) // binary (use only positive target) : IntStream.range(1, _nclasses); // multiclass (skip only the 0 target for symmetry with binary) } private TwoDimTable constructSummary(){ TwoDimTable summary = new TwoDimTable( "Target Encoder model summary", "Summary for target encoder model", new String[_input_to_output_columns.length], new String[]{"Original name(s)", "Encoded column name(s)"}, new String[]{"string", "string"}, null, null ); for (int i = 0; i < _input_to_output_columns.length; i++) { ColumnsMapping mapping = _input_to_output_columns[i]; summary.set(i, 0, String.join(", ", mapping.from())); summary.set(i, 1, String.join(", ", mapping.to())); } return summary; } @Override public ModelCategory getModelCategory() { return ModelCategory.TargetEncoder; } } private static String encodedColumnName(String columnToEncode, int targetClass, Vec targetCol) { if (targetClass == NO_TARGET_CLASS || targetCol == null) { return columnToEncode + ENCODED_COLUMN_POSTFIX; } else { String targetClassName = targetCol.domain()[targetClass]; return columnToEncode + "_" + StringUtils.sanitizeIdentifier(targetClassName) + ENCODED_COLUMN_POSTFIX; } } /******* Start TargetEncoderModel per se *******/ public TargetEncoderModel(Key<TargetEncoderModel> selfKey, TargetEncoderParameters parms, TargetEncoderOutput output) { super(selfKey, parms, output); } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { throw H2O.unimpl("No Model Metrics for TargetEncoder."); } public Frame transformTraining(Frame fr) { return transformTraining(fr, NO_FOLD); } public Frame transformTraining(Frame fr, int outOfFold) { assert outOfFold == NO_FOLD || _parms._data_leakage_handling == DataLeakageHandlingStrategy.KFold; return transform(fr, true, outOfFold, _parms.getBlendingParameters(), _parms._noise); } public Frame transform(Frame fr) { return transform(fr, _parms.getBlendingParameters(), _parms._noise); } public Frame transform(Frame fr, BlendingParams blendingParams, double noiseLevel) { return transform(fr, false, NO_FOLD, blendingParams, noiseLevel); } /** * Applies target encoding to unseen data during training. * This means that DataLeakageHandlingStrategy is enforced to None. * * In the context of Target Encoding, {@link #transform(Frame, BlendingParams, double)} should be used to encode new data. * Whereas {@link #transformTraining(Frame)} should be used to encode training data. * * @param fr Data to transform * @param asTraining true iff transforming training data. * @param outOfFold if provided (if not = {@value NO_FOLD}), if asTraining=true, and if the model was trained with Kfold strategy, * then the frame will be encoded by aggregating encodings from all folds except this one. * This is mainly used during cross-validation. * @param blendingParams Parameters for blending. If null, blending parameters from models parameters are loaded. * If those are not set, DEFAULT_BLENDING_PARAMS from TargetEncoder class are used. * @param noiseLevel Level of noise applied (use -1 for default noise level, 0 to disable noise). * @return An instance of {@link Frame} with transformed fr, registered in DKV. */ public Frame transform(Frame fr, boolean asTraining, int outOfFold, BlendingParams blendingParams, double noiseLevel) { if (!canApplyTargetEncoding(fr)) return fr; try (Scope.Safe safe = Scope.safe(fr)) { Frame adaptFr = adaptForEncoding(fr); return Scope.untrack(applyTargetEncoding( adaptFr, asTraining, outOfFold, blendingParams, noiseLevel, null )); } } @Override protected double[] score0(double data[], double preds[]){ throw new UnsupportedOperationException("TargetEncoderModel doesn't support scoring on raw data. Use transform() or score() instead."); } /** * {@link #score(Frame)} always encodes as if the data were new (ie. not training data). */ @Override public Frame score(Frame fr, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) throws IllegalArgumentException { if (!canApplyTargetEncoding(fr)) { Frame res = new Frame(Key.make(destination_key), fr.names(), fr.vecs()); DKV.put(res); return res; } try (Scope.Safe safe = Scope.safe(fr)) { Frame adaptFr = adaptForEncoding(fr); return Scope.untrack(applyTargetEncoding( adaptFr, false, NO_FOLD, _parms.getBlendingParameters(), _parms._noise, Key.make(destination_key) )); } } private Frame adaptForEncoding(Frame fr) { Frame adaptFr = new Frame(fr); Map<String, Integer> nameToIdx = nameToIndex(fr); for (int i=0; i<_output._names.length; i++) { String col = _output._names[i]; String[] domain = _output._domains[i]; int toAdaptIdx; if (domain != null && (toAdaptIdx = nameToIdx.getOrDefault(col, -1)) >= 0) { Vec toAdapt = adaptFr.vec(toAdaptIdx); if (!Arrays.equals(toAdapt.domain(), domain)) { Vec adapted = toAdapt.adaptTo(domain); adaptFr.replace(toAdaptIdx, adapted); } } } return adaptFr; } private boolean canApplyTargetEncoding(Frame fr) { Set<String> frColumns = new HashSet<>(Arrays.asList(fr.names())); boolean canApply = Arrays.stream(_output._input_to_encoding_column) .map(m -> Arrays.asList(m.from())) .anyMatch(frColumns::containsAll); if (!canApply) { LOG.info("Frame "+fr._key+" has no columns to encode with TargetEncoder, skipping it: " + "columns="+Arrays.toString(fr.names())+", " + "target encoder columns="+Arrays.deepToString(Arrays.stream(_output._input_to_encoding_column).map(ColumnsMapping::from).toArray(String[][]::new)) ); } return canApply; } /** * Core method for applying pre-calculated encodings to the dataset. * * @param data dataset that will be used as a base for creation of encodings . * @param asTraining is true, the original dataLeakageStrategy is applied, otherwise this is forced to {@link DataLeakageHandlingStrategy#None}. * @param blendingParams this provides parameters allowing to mitigate the effect * caused by small observations of some categories when computing their encoded value. * Use null to disable blending. * @param noise amount of noise to add to the final encodings. * Use 0 to disable noise. * Use -1 to use the default noise level computed from the target. * @param resultKey key of the result frame * @return a new frame with the encoded columns. */ Frame applyTargetEncoding(Frame data, boolean asTraining, int outOfFold, BlendingParams blendingParams, double noise, Key<Frame> resultKey) { final String targetColumn = _parms._response_column; final String foldColumn = _parms._fold_column; final DataLeakageHandlingStrategy dataLeakageHandlingStrategy = asTraining ? _parms._data_leakage_handling : DataLeakageHandlingStrategy.None; final long seed = _parms._seed; assert outOfFold == NO_FOLD || dataLeakageHandlingStrategy == DataLeakageHandlingStrategy.KFold; // early check on frame requirements switch (dataLeakageHandlingStrategy) { case KFold: if (data.find(foldColumn) < 0) throw new H2OIllegalArgumentException("KFold strategy requires a fold column `"+_parms._fold_column+"` like the one used during training."); break; case LeaveOneOut: if (data.find(targetColumn) < 0) // Note: for KFold strategy we don't need targetColumn so that we can exclude values from // current fold - as everything is already precomputed and stored in encoding map. // This is not the case with LeaveOneOut when we need to subtract current row's value and for that // we need to make sure that response column is provided and is a binary categorical column. throw new H2OIllegalArgumentException("LeaveOneOut strategy requires a response column `"+_parms._response_column+"` like the one used during training."); break; } // applying defaults if (noise < 0 ) { noise = defaultNoiseLevel(data, data.find(targetColumn)); LOG.warn("No noise level specified, using default noise level: "+noise); } if (resultKey == null){ resultKey = Key.make(); } EncodingStrategy strategy; switch (dataLeakageHandlingStrategy) { case KFold: strategy = new KFoldEncodingStrategy(foldColumn, outOfFold, blendingParams, noise, seed); break; case LeaveOneOut: strategy = new LeaveOneOutEncodingStrategy(targetColumn, blendingParams, noise, seed); break; case None: default: strategy = new DefaultEncodingStrategy(blendingParams, noise, seed); break; } List<Keyed> tmps = new ArrayList<>(); Frame workingFrame = null; Key<Frame> tmpKey; try { workingFrame = makeWorkingFrame(data);; tmpKey = workingFrame._key; for (ColumnsToSingleMapping columnsToEncode: _output._input_to_encoding_column) { // TODO: parallelize this, should mainly require change in naming of num/den columns String[] colGroup = columnsToEncode.from(); String columnToEncode = columnsToEncode.toSingle(); Frame encodings = _output._target_encoding_map.get(columnToEncode); assert encodings != null; // passing the interaction domain obtained during training: // - this ensures that the interaction column will have the same domain as in training (no need to call adaptTo on the new Vec). // - this improves speed when creating the interaction column (no need to extract the domain). // - unseen values/interactions are however represented as NAs in the new column, which is acceptable as TE encodes them in the same way anyway. int colIdx = addFeatureInteraction(workingFrame, colGroup, columnsToEncode.toDomain()); if (colIdx < 0) { LOG.warn("Column "+Arrays.toString(colGroup)+" is missing in frame "+data._key); continue; } assert workingFrame.name(colIdx).equals(columnToEncode); // if not applying encodings to training data, then get rid of the foldColumn in encodings. if (dataLeakageHandlingStrategy != DataLeakageHandlingStrategy.KFold && encodings.find(foldColumn) >= 0) { encodings = groupEncodingsByCategory(encodings, encodings.find(columnToEncode)); tmps.add(encodings); } imputeCategoricalColumn(workingFrame, colIdx, columnToEncode + NA_POSTFIX); for (OfInt it = _output.listUsedTargetClasses().iterator(); it.hasNext(); ) { int tc = it.next(); try { workingFrame = strategy.apply(workingFrame, columnToEncode, encodings, tc); } finally { DKV.remove(tmpKey); tmpKey = workingFrame._key; } } // end for each target if (!_parms._keep_interaction_columns && colGroup.length > 1) tmps.add(workingFrame.remove(colIdx)); } // end for each columnToEncode if (!_parms._keep_original_categorical_columns) { Set<String> removed = new HashSet<>(); for (ColumnsMapping columnsToEncode: _output._input_to_encoding_column) { for (String col: columnsToEncode.from()) { if (removed.contains(col)) continue; tmps.add(workingFrame.remove(col)); removed.add(col); } } } DKV.remove(tmpKey); workingFrame._key = resultKey; reorderColumns(workingFrame); DKV.put(workingFrame); return workingFrame; } catch (Exception e) { if (workingFrame != null) workingFrame.delete(); throw e; } finally { for (Keyed tmp : tmps) tmp.remove(); } } private double defaultNoiseLevel(Frame fr, int targetIndex) { double defaultNoiseLevel = 0.01; double noiseLevel = 0.0; // When noise is not provided and there is no response column in the `data` frame -> no noise will be added to transformations if (targetIndex >= 0) { Vec targetVec = fr.vec(targetIndex); noiseLevel = targetVec.isNumeric() ? defaultNoiseLevel * (targetVec.max() - targetVec.min()) : defaultNoiseLevel; } return noiseLevel; } /** * Ideally there should be no need to deep copy columns that are not listed as input in _input_to_output_columns. * However if we keep the original columns in the output, then they are deleted in the model integration: {@link hex.ModelBuilder#trackEncoded}. * On the other side, if copied as a "ShallowVec" (extending WrappedVec) to prevent deletion of data in trackEncoded, * then we expose WrappedVec to the client it all non-integration use cases, which is strongly discouraged. * Catch-22 situation, so keeping the deepCopy for now as is occurs only for predictions, so the data are usually smaller. * @param fr * @return the working frame used to make predictions */ private Frame makeWorkingFrame(Frame fr) { return fr.deepCopy(Key.make().toString()); } /** * For model integration, we need to ensure that columns are offered to the model in a consistent order. * After TE encoding, columns are always in the following order: * <ol> * <li>non-categorical predictors present in training frame</li> * <li>TE-encoded predictors</li> * <li>remaining categorical predictors present in training frame</li> * <li>remaining predictors not present in training frame</li> * <li>non-predictors</li> * </ol> * This way, categorical encoder can later encode the remaining categorical predictors * without changing the index of TE cols: somehow necessary when integrating TE in the model Mojo. * * @param fr the frame whose columns need to be reordered. */ private void reorderColumns(Frame fr) { String[] toTheEnd = _parms.getNonPredictors(); Map<String, Integer> nameToIdx = nameToIndex(fr); List<Integer> toAppendAfterNumericals = new ArrayList<>(); String[] trainColumns = _output._names; Set<String> trainCols = new HashSet<>(Arrays.asList(trainColumns)); String[] notInTrainColumns = Arrays.stream(fr.names()) .filter(c -> !trainCols.contains(c)) .toArray(String[]::new); int[] newOrder = new int[fr.numCols()]; int offset = 0; for (String col : trainColumns) { if (nameToIdx.containsKey(col) && !ArrayUtils.contains(toTheEnd, col)) { int idx = nameToIdx.get(col); if (fr.vec(idx).isCategorical()) { toAppendAfterNumericals.add(idx); //first appending categoricals } else { newOrder[offset++] = idx; //adding all non-categoricals first } } } String[] encodedColumns = Arrays.stream(_output._input_to_output_columns) .flatMap(m -> Stream.of(m.to())) .toArray(String[]::new); Set<String> encodedCols = new HashSet<>(Arrays.asList(encodedColumns)); for (String col : encodedColumns) { // TE-encoded cols if (nameToIdx.containsKey(col)) newOrder[offset++] = nameToIdx.get(col); } for (String col : notInTrainColumns) { if (!encodedCols.contains(col)) toAppendAfterNumericals.add(nameToIdx.get(col)); // appending columns only in fr } for (String col : toTheEnd) { // then appending the trailing columns if (nameToIdx.containsKey(col)) toAppendAfterNumericals.add(nameToIdx.get(col)); } for (int idx : toAppendAfterNumericals) newOrder[offset++] = idx; fr.reOrder(newOrder); } @Override public TargetEncoderMojoWriter getMojo() { return new TargetEncoderMojoWriter(this); } @Override protected Futures remove_impl(Futures fs, boolean cascade) { if (_output._target_encoding_map != null) { for (Frame encodings : _output._target_encoding_map.values()) { encodings.delete(); } } return super.remove_impl(fs, cascade); } private static abstract class EncodingStrategy { BlendingParams _blendingParams; double _noise; long _seed; public EncodingStrategy(BlendingParams blendingParams, double noise, long seed) { _blendingParams = blendingParams; _noise = noise; _seed = seed; } public Frame apply(Frame fr, String columnToEncode, Frame encodings, int targetClass) { try { Scope.enter(); Frame appliedEncodings; int tcIdx = encodings.find(TARGETCLASS_COL); if (tcIdx < 0) { appliedEncodings = encodings; } else { appliedEncodings = filterByValue(encodings, tcIdx, targetClass); Scope.track(appliedEncodings); appliedEncodings.remove(TARGETCLASS_COL); } String encodedColumn = encodedColumnName(columnToEncode, targetClass, tcIdx < 0 ? null : encodings.vec(tcIdx)); Frame encoded = doApply(fr, columnToEncode, appliedEncodings, encodedColumn, targetClass); Scope.untrack(encoded); return encoded; } finally { Scope.exit(); } } public abstract Frame doApply(Frame fr, String columnToEncode, Frame encodings, String encodedColumn, int targetClass); protected void applyNoise(Frame frame, int columnIdx, double noiseLevel, long seed) { if (noiseLevel > 0) addNoise(frame, columnIdx, noiseLevel, seed); } /** FIXME: this method is modifying the original fr column in-place, one of the reasons why we currently need a complete deep-copy of the training frame... */ protected void imputeMissingValues(Frame fr, int columnIndex, double imputedValue) { Vec vec = fr.vec(columnIndex); assert vec.get_type() == Vec.T_NUM : "Imputation of missing value is supported only for numerical vectors."; if (vec.naCnt() > 0) { new FillNAWithDoubleValueTask(columnIndex, imputedValue).doAll(fr); if (LOG.isInfoEnabled()) LOG.info(String.format("Frame with id = %s was imputed with posterior value = %f ( %d rows were affected)", fr._key, imputedValue, vec.naCnt())); } } //XXX: usage of this method is confusing: // - if there was no NAs during training, we can only impute missing values on encoded column with priorMean (there's no posterior to blend with). // - if there was NAs during training, then encodings must contain entries for the NA category, so true NAs will be properly encoded at this point. // Therefore, we impute only unseen categories, which we can decide to encode with priorMean (after all we don't have posterior for this new category), // or as if these were true NAs: this is what the code below does, but it doesn't look fully justified, except maybe to reduce the leakage from priorMean. // If this is useful to reduce leakage, shouldn't we also use it in LOO + KFold strategies? protected double valueForImputation(String columnToEncode, Frame encodings, double priorMean, BlendingParams blendingParams) { assert encodings.name(0).equals(columnToEncode); int nRows = (int) encodings.numRows(); String lastDomain = encodings.domains()[0][nRows - 1]; boolean hadMissingValues = lastDomain.equals(columnToEncode + NA_POSTFIX); double numeratorNA = encodings.vec(NUMERATOR_COL).at(nRows - 1); long denominatorNA = encodings.vec(DENOMINATOR_COL).at8(nRows - 1); double posteriorNA = numeratorNA / denominatorNA; boolean useBlending = blendingParams != null; return !hadMissingValues ? priorMean // no NA during training, so no posterior, the new (unseen) cat can only be encoded using priorMean. : useBlending ? getBlendedValue(posteriorNA, priorMean, denominatorNA, blendingParams) // consider new (unseen) cat as a true NA + apply blending. : posteriorNA; // consider new (unseen) cat as a true NA + no blending. } protected void removeNumeratorAndDenominatorColumns(Frame fr) { Vec removedNumerator = fr.remove(NUMERATOR_COL); removedNumerator.remove(); Vec removedDenominator = fr.remove(DENOMINATOR_COL); removedDenominator.remove(); } } private static class KFoldEncodingStrategy extends EncodingStrategy { String _foldColumn; int _outOfFold; public KFoldEncodingStrategy(String foldColumn, int outOfFold, BlendingParams blendingParams, double noise, long seed) { super(blendingParams, noise, seed); _foldColumn = foldColumn; _outOfFold = outOfFold; } @Override public Frame doApply(Frame fr, String columnToEncode, Frame encodings, String encodedColumn, int targetClass) { Frame workingFrame = fr; int teColumnIdx = fr.find(columnToEncode); int foldColIdx; if (_outOfFold== NO_FOLD) { foldColIdx = fr.find(_foldColumn); } else { workingFrame = new Frame(fr); Vec tmpFoldCol = workingFrame.anyVec().makeCon(_outOfFold); Scope.track(tmpFoldCol); workingFrame.add(new String[] {_foldColumn+TMP_COLUMN_POSTFIX}, new Vec[]{tmpFoldCol}); foldColIdx = workingFrame.numCols()-1; } int encodingsFoldColIdx = encodings.find(_foldColumn); int encodingsTEColIdx = encodings.find(columnToEncode); long[] foldValues = getUniqueColumnValues(encodings, encodingsFoldColIdx); int maxFoldValue = (int) ArrayUtils.maxValue(foldValues); double priorMean = computePriorMean(encodings); //FIXME: we want prior for the outOfFold encodings Frame joinedFrame = mergeEncodings( workingFrame, encodings, teColumnIdx, foldColIdx, encodingsTEColIdx, encodingsFoldColIdx, maxFoldValue ); Scope.track(joinedFrame); if (_outOfFold!= NO_FOLD) { joinedFrame.remove(foldColIdx); } //XXX: the priorMean here is computed on the entire training set, regardless of the folding structure, therefore it introduces a data leakage. // Shouldn't we instead provide a priorMean per fold? // We should be able to compute those k priorMeans directly from the encodings Frame, so it doesn't require any change in the Mojo. // However, applyEncodings would also need an additional arg for the foldColumn. int encodedColIdx = applyEncodings(joinedFrame, encodedColumn, priorMean, _blendingParams); applyNoise(joinedFrame, encodedColIdx, _noise, _seed); // Cases when we can introduce NAs in the encoded column: // - if the column to encode contains categories unseen during training (including NA): // however this is very unlikely as KFold strategy is usually used when applying TE on the training frame. // - if during training, a category is present only in one fold, // then this couple (category, fold) will be missing in the encodings frame, // and mergeEncodings will put NAs for both num and den, turning into a NA in the encoded column after applyEncodings. imputeMissingValues(joinedFrame, encodedColIdx, priorMean); //XXX: same concern as above regarding priorMean. removeNumeratorAndDenominatorColumns(joinedFrame); return joinedFrame; } } private static class LeaveOneOutEncodingStrategy extends EncodingStrategy { String _targetColumn; public LeaveOneOutEncodingStrategy(String targetColumn, BlendingParams blendingParams, double noise, long seed) { super(blendingParams, noise, seed); _targetColumn = targetColumn; } @Override public Frame doApply(Frame fr, String columnToEncode, Frame encodings, String encodedColumn, int targetClass) { int teColumnIdx = fr.find(columnToEncode); int encodingsTEColIdx = encodings.find(columnToEncode); double priorMean = computePriorMean(encodings); Frame joinedFrame = mergeEncodings(fr, encodings, teColumnIdx, encodingsTEColIdx); Scope.track(joinedFrame); subtractTargetValueForLOO(joinedFrame, _targetColumn, targetClass); int encodedColIdx = applyEncodings(joinedFrame, encodedColumn, priorMean, _blendingParams); applyNoise(joinedFrame, encodedColIdx, _noise, _seed); // Cases when we can introduce NAs in the encoded column: // - only when the column to encode contains categories unseen during training (including NA). imputeMissingValues(joinedFrame, encodedColIdx, priorMean); removeNumeratorAndDenominatorColumns(joinedFrame); return joinedFrame; } } private static class DefaultEncodingStrategy extends EncodingStrategy { public DefaultEncodingStrategy(BlendingParams blendingParams, double noise, long seed) { super(blendingParams, noise, seed); } @Override public Frame doApply(Frame fr, String columnToEncode, Frame encodings, String encodedColumn, int targetClass) { int teColumnIdx = fr.find(columnToEncode); int encodingsTEColIdx = encodings.find(columnToEncode); double priorMean = computePriorMean(encodings); Frame joinedFrame = mergeEncodings(fr, encodings, teColumnIdx, encodingsTEColIdx); Scope.track(joinedFrame); int encodedColIdx = applyEncodings(joinedFrame, encodedColumn, priorMean, _blendingParams); applyNoise(joinedFrame, encodedColIdx, _noise, _seed); // Cases when we can introduce NAs in the encoded column: // - only when the column to encode contains categories unseen during training (including NA). // We impute NAs with mean computed from training set, which is a data leakage. // Note: In case of creating encoding map based on the holdout set we'd better use stratified sampling. // Maybe even choose size of holdout taking into account size of the minimal set that represents all levels. // Otherwise there are higher chances to get NA's for unseen categories. double valueForImputation = valueForImputation(columnToEncode, encodings, priorMean, _blendingParams); imputeMissingValues(joinedFrame, encodedColIdx, valueForImputation); removeNumeratorAndDenominatorColumns(joinedFrame); return joinedFrame; } } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/TargetEncoderMojoWriter.java
package ai.h2o.targetencoding; import ai.h2o.targetencoding.TargetEncoderModel.TargetEncoderOutput; import ai.h2o.targetencoding.TargetEncoderModel.TargetEncoderParameters; import hex.ModelMojoWriter; import water.fvec.Frame; import water.fvec.Vec; import water.util.IcedHashMap; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.stream.Collectors; import static ai.h2o.targetencoding.TargetEncoderHelper.*; import static hex.genmodel.algos.targetencoder.TargetEncoderMojoReader.*; public class TargetEncoderMojoWriter extends ModelMojoWriter<TargetEncoderModel, TargetEncoderParameters, TargetEncoderOutput> { @SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler public TargetEncoderMojoWriter() { } public TargetEncoderMojoWriter(TargetEncoderModel model) { super(model); } @Override public String mojoVersion() { return "1.00"; } @Override protected void writeModelData() throws IOException { writeTargetEncodingInfo(); writeTargetEncodingMap(); } /** * Writes target encoding's extra info */ private void writeTargetEncodingInfo() throws IOException { TargetEncoderOutput output = model._output; TargetEncoderParameters teParams = output._parms; writekv("keep_original_categorical_columns", teParams._keep_original_categorical_columns); writekv("with_blending", teParams._blending); if (teParams._blending) { writekv("inflection_point", teParams._inflection_point); writekv("smoothing", teParams._smoothing); } List<String> nonPredictors = Arrays.stream(new String[]{ model._output.weightsName(), model._output.offsetName(), model._output.foldName(), model._output.responseName() }).filter(Objects::nonNull).collect(Collectors.toList()); writekv("non_predictors", String.join(";", nonPredictors)); writeColumnsHasNAs(output._te_column_to_hasNAs, MISSING_VALUES_PRESENCE_MAP_PATH); writeColumnsMapping(output._input_to_encoding_column, INPUT_ENCODING_COLUMNS_MAPPING_PATH); writeColumnsMapping(output._input_to_output_columns, INPUT_OUTPUT_COLUMNS_MAPPING_PATH); } /** * Writes encoding map into the file line by line */ private void writeTargetEncodingMap() throws IOException { TargetEncoderOutput targetEncoderOutput = model._output; int nclasses = model._output._nclasses; Map<String, Frame> targetEncodingMap = targetEncoderOutput._target_encoding_map; groupEncodingsByFoldColumnIfNeeded(targetEncoderOutput, targetEncodingMap); startWritingTextFile(ENCODING_MAP_PATH); for (Entry<String, Frame> encodingsEntry : targetEncodingMap.entrySet()) { String column = encodingsEntry.getKey(); Frame encodings = encodingsEntry.getValue(); Vec.Reader catRead = encodings.vec(0).new Reader(); Vec.Reader numRead = encodings.vec(NUMERATOR_COL).new Reader(); Vec.Reader denRead = encodings.vec(DENOMINATOR_COL).new Reader(); Vec.Reader tcRead = nclasses > 2 ? encodings.vec(TARGETCLASS_COL).new Reader() : null; writeln("[" + column + "]"); for (int i=0; i<catRead.length(); i++) { String category = Long.toString(catRead.at8(i)); String[] components = tcRead == null ? new String[] {Double.toString(numRead.at(i)), Double.toString(denRead.at(i))} : new String[] {Double.toString(numRead.at(i)), Double.toString(denRead.at(i)), Long.toString(tcRead.at8(i))}; writelnkv(category, String.join(" ", components)); } } finishWritingTextFile(); } /** * For transforming (making predictions) non-training data we don't need `te folds` in our encoding maps. */ private void groupEncodingsByFoldColumnIfNeeded(TargetEncoderOutput targetEncoderOutput, Map<String, Frame> targetEncodingMap) { String foldColumn = targetEncoderOutput._parms._fold_column; if (foldColumn != null) { try { for (Entry<String, Frame> encodingMapEntry : targetEncodingMap.entrySet()) { String teColumn = encodingMapEntry.getKey(); Frame encodingsWithFolds = encodingMapEntry.getValue(); Frame encodingsWithoutFolds = groupEncodingsByCategory(encodingsWithFolds, encodingsWithFolds.find(teColumn) , true); targetEncodingMap.put(teColumn, encodingsWithoutFolds); encodingsWithFolds.delete(); } } catch (Exception ex) { throw new IllegalStateException("Failed to group encoding maps by fold column", ex); } } } //XXX: additional file unnecessary, we could just write the list/set of columns with NAs private void writeColumnsHasNAs(IcedHashMap<String, Boolean> col2HasNAs, String fileName) throws IOException { startWritingTextFile(fileName); for(Entry<String, Boolean> entry: col2HasNAs.entrySet()) { writelnkv(entry.getKey(), entry.getValue() ? "1" : "0"); } finishWritingTextFile(); } private void writeColumnsMapping(ColumnsMapping[] mapping, String fileName) throws IOException { startWritingTextFile(fileName); for (ColumnsMapping entry : mapping) { writeln("[from]"); for (String s : entry.from()) writeln(s); writeln("[to]"); for (String s : entry.to()) writeln(s); if (entry instanceof ColumnsToSingleMapping && entry.from().length > 1) { //write to_domain only if the to column is new (interaction col). writeln("[to_domain]"); for (String s : ((ColumnsToSingleMapping) entry).toDomain()) writeln(s); } } finishWritingTextFile(); } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/TargetEncoderPreprocessor.java
package ai.h2o.targetencoding; import hex.Model; import hex.ModelPreprocessor; import water.DKV; import water.Futures; import water.Key; import water.fvec.Frame; import java.util.Objects; import static ai.h2o.targetencoding.TargetEncoderModel.DataLeakageHandlingStrategy.*; public class TargetEncoderPreprocessor extends ModelPreprocessor<TargetEncoderPreprocessor> { private TargetEncoderModel _targetEncoder; public TargetEncoderPreprocessor(TargetEncoderModel targetEncoder) { super(Key.make(Objects.toString(targetEncoder._key)+"_preprocessor")); this._targetEncoder = targetEncoder; DKV.put(this); } @Override public Frame processTrain(Frame fr, Model.Parameters params) { if (useFoldTransform(params)) { return _targetEncoder.transformTraining(fr, params._cv_fold); } else { return _targetEncoder.transformTraining(fr); } } @Override public Frame processValid(Frame fr, Model.Parameters params) { if (useFoldTransform(params)) { return _targetEncoder.transformTraining(fr); } else { return _targetEncoder.transform(fr); } } @Override public Frame processScoring(Frame fr, Model model) { return _targetEncoder.transform(fr); } @Override public Model asModel() { return _targetEncoder; } @Override protected Futures remove_impl(Futures fs, boolean cascade) { if (cascade && _targetEncoder != null) _targetEncoder.remove(); return super.remove_impl(fs, cascade); } private boolean useFoldTransform(Model.Parameters params) { return params._is_cv_model && _targetEncoder._parms._data_leakage_handling == KFold; } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/interaction/CreateInteractionTask.java
package ai.h2o.targetencoding.interaction; import water.MRTask; import water.fvec.Chunk; import water.fvec.NewChunk; import java.util.Arrays; import java.util.HashMap; import java.util.Map; class CreateInteractionTask extends MRTask<CreateInteractionTask> { final InteractionsEncoder _encoder; final long[] _interactionDomain; // sorted by construction (see createInteractionColumn), or null private transient Map<Long, Integer> _interactionValueToCategoricalValue; public CreateInteractionTask(InteractionsEncoder encoder, String[] interactionDomain) { _encoder = encoder; _interactionDomain = interactionDomain==null ? null : Arrays.stream(interactionDomain).mapToLong(Long::parseLong).toArray(); } @Override protected void setupLocal() { if (_interactionDomain != null) { _interactionValueToCategoricalValue = new HashMap<>(); for (int i = 0; i < _interactionDomain.length; i++) { _interactionValueToCategoricalValue.put(_interactionDomain[i], i); } } } @Override public void map(Chunk[] cs, NewChunk nc) { for (int row = 0; row < cs[0].len(); row++) { int[] interactingValues = new int[cs.length]; for (int i = 0; i < cs.length; i++) { interactingValues[i] = cs[i].isNA(row) ? -1:(int) cs[i].at8(row); } long val = _encoder.encode(interactingValues); if (val < 0) { nc.addNA(); } else if (_interactionDomain==null) { nc.addNum(val); } else { int catVal = _interactionValueToCategoricalValue.getOrDefault(val, -1); if (catVal < 0) nc.addNA(); else nc.addCategorical(catVal); } } } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/interaction/InteractionSupport.java
package ai.h2o.targetencoding.interaction; import water.fvec.Frame; import water.fvec.Vec; import water.util.VecUtils; import java.util.Arrays; import java.util.HashSet; public class InteractionSupport { private static final String COL_INTERACTION_SEPARATOR = ":"; /** * @return the index of the interaction column for the group, or the index of the column if the group has only one. */ public static int addFeatureInteraction(Frame fr, String[] colGroup) { return addFeatureInteraction(fr, colGroup, null); } /** * @param interactionDomain the domain of the generated interaction column, if already known, * for example when computing interaction for predictions. * @return the index of the interaction column for the group, * or the index of the column if the group has only one, * or -1 if one column in the interaction group is missing. */ public static int addFeatureInteraction(Frame fr, String[] colGroup, String[] interactionDomain) { if (colGroup.length == 1) { return fr.find(colGroup[0]); } else if (new HashSet<>(Arrays.asList(fr.names())).containsAll(Arrays.asList(colGroup))) { return addInteractionColumn(fr, colGroup, interactionDomain); } else { // skip interaction if one col is missing (we could also replace missing columns with a col of NAs, but we don't do this today when a simple cat column is missing) return -1; } } private static int addInteractionColumn(Frame fr, String[] interactingColumns, String[] interactionDomain) { String interactionColName = String.join(COL_INTERACTION_SEPARATOR, interactingColumns); // any limit to col name length? int[] cols = Arrays.stream(interactingColumns).mapToInt(fr::find).toArray(); Vec interactionCol = createInteractionColumn(fr, cols, interactionDomain); fr.add(interactionColName, interactionCol); return fr.numCols()-1; } static Vec createInteractionColumn(Frame fr, int[] interactingColumnsIdx, String[] interactionDomain) { String[][] interactingDomains = new String[interactingColumnsIdx.length][]; Vec[] interactingVecs = new Vec[interactingColumnsIdx.length]; for (int i=0; i<interactingColumnsIdx.length; i++) { Vec vec = fr.vec(interactingColumnsIdx[i]); interactingVecs[i] = vec; interactingDomains[i] = vec.domain(); } final InteractionsEncoder encoder = new InteractionsEncoder(interactingDomains, true); byte interactionType = interactionDomain == null ? Vec.T_NUM : Vec.T_CAT; Vec interactionCol = new CreateInteractionTask(encoder, interactionDomain) .doAll(new byte[] {interactionType}, interactingVecs) .outputFrame(null, null, new String[][]{interactionDomain}) .lastVec(); if (interactionType != Vec.T_CAT) interactionCol = VecUtils.toCategoricalVec(interactionCol); // the domain is obtained from CollectDoubleDomain, so it is sorted by numerical value, and then converted to String return interactionCol; } }
0
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding
java-sources/ai/h2o/h2o-ext-target-encoder/3.46.0.7/ai/h2o/targetencoding/interaction/InteractionsEncoder.java
package ai.h2o.targetencoding.interaction; import water.Iced; import water.util.ArrayUtils; /** * The interaction value is simply encoded as: * val = val1 + (val2 * card1) + … + (valN * card1 * … * cardN-1) * where val1, val2, …, valN are the interacting values * and card1, …, cardN are the extended domain cardinalities (taking NAs into account) for interacting columns. */ class InteractionsEncoder extends Iced { static final String UNSEEN = "_UNSEEN_"; static final String NA = "_NA_"; private boolean _encodeUnseenAsNA; private String[][] _interactingDomains; private long[] _encodingFactors; InteractionsEncoder(String[][] interactingDomains, boolean encodeUnseenAsNA) { _encodeUnseenAsNA = encodeUnseenAsNA; _interactingDomains = interactingDomains; _encodingFactors = createEncodingFactors(); } long encode(int[] interactingValues) { long value = 0; for (int i = 0; i < interactingValues.length; i++) { int domainCard = _interactingDomains[i].length; long interactionFactor = _encodingFactors[i]; int ival = interactingValues[i]; if (ival >= domainCard) ival = domainCard; // unseen value during training if (ival < 0) ival = _encodeUnseenAsNA ? domainCard : (domainCard + 1); // NA value += ival * interactionFactor; } return value; } long encodeStr(String[] interactingValues) { int[] values = new int[interactingValues.length]; for (int i = 0; i < interactingValues.length; i++) { String[] domain = _interactingDomains[i]; String val = interactingValues[i]; int ival = val==null ? -1 : ArrayUtils.find(domain, val); if (ival < 0 && val != null) { //emulates distinction between NA and unseen. values[i] = domain.length; } else { values[i] = ival; } } return encode(values); } int[] decode(long interactionValue) { int[] values = new int[_encodingFactors.length]; long value = interactionValue; for (int i = _encodingFactors.length - 1; i >= 0; i--) { long factor = _encodingFactors[i]; values[i] = (int)(value / factor); value %= factor; } return values; } String[] decodeStr(long interactionValue) { int[] values = decode(interactionValue); String[] catValues = new String[values.length]; for (int i = 0; i < values.length; i++) { String[] domain = _interactingDomains[i]; int val = values[i]; catValues[i] = val < domain.length ? domain[val] : i==domain.length ? (_encodeUnseenAsNA ? null : UNSEEN) : null; } return catValues; } private long[] createEncodingFactors() { long[] factors = new long[_interactingDomains.length]; long multiplier = 1; for (int i = 0; i < _interactingDomains.length; i++) { int domainCard = _interactingDomains[i].length; int interactionFactor = _encodeUnseenAsNA ? (domainCard + 1) : (domainCard + 2); // +1 for NAs, +1 for potential unseen values factors[i] = multiplier; multiplier *= interactionFactor; } return factors; } }