index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/grid/GridSearch.java
package hex.grid; import hex.*; import hex.faulttolerance.Recovery; import hex.grid.HyperSpaceWalker.BaseWalker; import jsr166y.CountedCompleter; import water.*; import water.exceptions.H2OConcurrentModificationException; import water.exceptions.H2OGridException; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Frame; import water.util.Log; import water.util.PojoUtils; import java.util.*; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** * Grid search job. * * This job represents a generic interface to launch "any" hyper space * search. It triggers sub-jobs for each point in hyper space. It produces * <code>Grid</code> object which contains a list of build models. A triggered * model builder job can fail! * * Grid search is parametrized by hyper space walk strategy ({@link * HyperSpaceWalker} which defines how the space of hyper parameters * is traversed. * * The job is started by the <code>startGridSearch</code> method which create a new grid search, put * representation of Grid into distributed KV store, and for each parameter in hyper space of * possible parameters, it launches a separated model building job. The launch of jobs is sequential * and blocking. So after finish the last model, whole grid search job is done as well. * * By default, the grid search invokes cartesian grid search, but it can be * modified by passing explicit hyper space walk strategy via the * {@link #startGridSearch(Key, Key, HyperSpaceWalker, Recovery, int)} method. * * If any of forked jobs fails then the failure is ignored, and grid search * normally continue in traversing the hyper space. * * Typical usage from Java is: * <pre>{@code * // Create initial parameters and fill them by references to data * GBMModel.GBMParameters params = new GBMModel.GBMParameters(); * params._train = fr._key; * params._response_column = "cylinders"; * * // Define hyper-space to search * HashMap<String,Object[]> hyperParms = new HashMap<>(); * hyperParms.put("_ntrees", new Integer[]{1, 2}); * hyperParms.put("_distribution",new DistributionFamily[] {DistributionFamily.multinomial}); * hyperParms.put("_max_depth",new Integer[]{1,2,5}); * hyperParms.put("_learn_rate",new Float[]{0.01f,0.1f,0.3f}); * * // Launch grid search job creating GBM models * GridSearch gridSearchJob = GridSearch.startGridSearch(params, hyperParms, GBM_MODEL_FACTORY); * * // Block till the end of the job and get result * Grid grid = gridSearchJob.get() * * // Get built models * Model[] models = grid.getModels() * }</pre> * * @see HyperSpaceWalker * @see #startGridSearch(Key, Key, HyperSpaceWalker, Recovery, int) */ public final class GridSearch<MP extends Model.Parameters> { public static <MP extends Model.Parameters> Builder<MP> create(Key<Grid> destKey, HyperSpaceWalker<MP, ?> walker) { assert walker != null; return new Builder<>(destKey, walker); } public static <MP extends Model.Parameters> Builder<MP> create(Key<Grid> destKey, final MP params, final Map<String, Object[]> hyperParams) { assert params != null; assert hyperParams != null; HyperSpaceWalker<MP, ?> walker = BaseWalker.WalkerFactory.create( params, hyperParams, new SimpleParametersBuilderFactory<>(), new HyperSpaceSearchCriteria.CartesianSearchCriteria() ); return create(destKey, walker); } public static class Builder<MP extends Model.Parameters> { private final GridSearch<MP> _gridSearch; private Builder(Key<Grid> destKey, HyperSpaceWalker<MP, ?> hyperSpaceWalker) { assert hyperSpaceWalker != null; if (destKey == null) { MP params = hyperSpaceWalker.getParams(); destKey = gridKeyName(params.algoName(), params.train()); } _gridSearch = new GridSearch<>(destKey, hyperSpaceWalker); } public Builder<MP> withParallelism(int parallelism) { _gridSearch._parallelism = parallelism; return this; } public Builder<MP> withMaxConsecutiveFailures(int maxConsecutiveFailures) { _gridSearch._maxConsecutiveFailures = maxConsecutiveFailures; return this; } public Builder<MP> withJob(Key<Job> jobKey) { return withRecoverableJob(jobKey, null); } public Builder<MP> withRecoverableJob(Key<Job> jobKey, Recovery recovery) { assert _gridSearch._job == null; String algoName = _gridSearch._hyperSpaceWalker.getParams().algoName(); final boolean recoverable = recovery != null && H2O.ARGS.auto_recovery_dir != null; _gridSearch._recovery = recovery; _gridSearch._job = new Job<>(jobKey, _gridSearch._result, Grid.class.getName(), algoName+" Grid Search", recoverable); return this; } public Job<Grid> start() { if (_gridSearch._job == null) withJob(null); return _gridSearch.start(); } } private Job<Grid> _job; private Recovery<Grid> _recovery; private int _parallelism = SEQUENTIAL_MODEL_BUILDING; private int _maxConsecutiveFailures = Integer.MAX_VALUE; // for now, disabled by default private final Key<Grid> _result; /** Walks hyper space and for each point produces model parameters. It is * used only locally to fire new model builders. */ private final transient HyperSpaceWalker<MP, ?> _hyperSpaceWalker; private GridSearch(Key<Grid> gridKey, HyperSpaceWalker<MP, ?> hyperSpaceWalker) { assert hyperSpaceWalker != null : "Grid search needs to know how to walk around hyper space!"; _hyperSpaceWalker = hyperSpaceWalker; _result = gridKey; } Job<Grid> start() { final long gridSize = _hyperSpaceWalker.getMaxHyperSpaceSize(); Log.info("Starting gridsearch: estimated size of search space = " + gridSize); // Create grid object and lock it // Creation is done here, since we would like make sure that after leaving // this function the grid object is in DKV and accessible. final Grid<MP> grid = getOrCreateGrid(); long gridWork = _hyperSpaceWalker.estimateGridWork(maxModels()); // Install this as job functions return _job.start(new H2O.H2OCountedCompleter() { @Override public void compute2() { try { beforeGridStart(grid); if (_parallelism == 1) { gridSearch(grid); } else if (_parallelism > 1) { parallelGridSearch(grid); } else { throw new IllegalArgumentException(String.format("Grid search parallelism level must be >= 1. Give value is '%d'.", _parallelism)); } } finally { grid.unlock(_job); } afterGridCompleted(grid); tryComplete(); } @Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) { Log.warn("GridSearch job " + _job._description + " completed with exception: " + ex); try { // user (or AutoML) may want to cancel a grid search in the middle of the search for various reasons // without wanting to throw away the grid itself with all the models previously trained. if (Grid.isJobCanceled(ex) && !_job.isCrashing()) Log.info("Keeping incomplete grid "+_job._result+" after cancellation of job "+_job._description); else Keyed.remove(_job._result); // ensure that grid is cleaned up if it was completed abnormally. } catch (Exception logged) { Log.warn("Exception thrown when removing result from job " + _job._description, logged); } return true; } }, gridWork, maxRuntimeSecs()); } private Grid getOrCreateGrid() { Grid grid = loadFromDKV(); if (grid == null) { grid = createNewGrid(); } return grid; } private Grid loadFromDKV() { Keyed keyed = DKV.getGet(_result); if (keyed == null) return null; if (!(keyed instanceof Grid)) throw new H2OIllegalArgumentException("Name conflict: tried to create a Grid using the ID of a non-Grid object that's already in H2O: " + _job._result + "; it is a: " + keyed.getClass()); Grid grid = (Grid) keyed; grid.clearNonRelatedFailures(); Frame specTrainFrame = _hyperSpaceWalker.getParams().train(); Frame oldTrainFrame = grid.getTrainingFrame(); if (oldTrainFrame != null && !specTrainFrame._key.equals(oldTrainFrame._key) || oldTrainFrame != null && specTrainFrame.checksum() != oldTrainFrame.checksum()) throw new H2OIllegalArgumentException("training_frame", "grid", "Cannot append new models to a grid with different training input"); grid.write_lock(_job); return grid; } private Grid createNewGrid() { Grid grid = new Grid<>(_result, _hyperSpaceWalker, _parallelism); grid.delete_and_lock(_job); return grid; } /** * Returns expected number of models in resulting Grid object. * * The number can differ from final number of models due to visiting duplicate points in hyper * space. * * @return expected number of models produced by this grid search */ public long getModelCount() { return _hyperSpaceWalker.getMaxHyperSpaceSize(); } private long maxModels() { return _hyperSpaceWalker.search_criteria().stoppingCriteria() == null ? 0 : _hyperSpaceWalker.search_criteria().stoppingCriteria().getMaxModels(); } private double maxRuntimeSecs() { return _hyperSpaceWalker.search_criteria().stoppingCriteria() == null ? 0 : _hyperSpaceWalker.search_criteria().stoppingCriteria().getMaxRuntimeSecs(); } private double remainingTimeSecs() { return _job != null && _job._max_runtime_msecs > 0 // compute only if a time limit was assigned to the job ? (_job.start_time() + _job._max_runtime_msecs - System.currentTimeMillis()) / 1000. : Double.MAX_VALUE; } private ScoreKeeper.StoppingMetric sortingMetric() { return _hyperSpaceWalker.search_criteria().stoppingCriteria() == null ? ScoreKeeper.StoppingMetric.AUTO : _hyperSpaceWalker.search_criteria().stoppingCriteria().getStoppingMetric(); } private class ModelFeeder extends ParallelModelBuilder.ParallelModelBuilderCallback<ModelFeeder> { private final HyperSpaceWalker.HyperSpaceIterator<MP> hyperspaceIterator; private final Grid grid; private final Lock parallelSearchGridLock = new ReentrantLock(); public ModelFeeder(HyperSpaceWalker.HyperSpaceIterator<MP> hyperspaceIterator, Grid grid) { this.hyperspaceIterator = hyperspaceIterator; this.grid = grid; } @Override public void onBuildSuccess(final Model finishedModel, final ParallelModelBuilder parallelModelBuilder) { try { parallelSearchGridLock.lock(); constructScoringInfo(finishedModel); onModel(grid, finishedModel._input_parms.checksum(IGNORED_FIELDS_PARAM_HASH), finishedModel._key); _job.update(1); grid.update(_job); attemptGridSave(grid); } finally { parallelSearchGridLock.unlock(); } attemptBuildNextModel(parallelModelBuilder, finishedModel); } @Override public void onBuildFailure(final ParallelModelBuilder.ModelBuildFailure modelBuildFailure, final ParallelModelBuilder parallelModelBuilder) { parallelSearchGridLock.lock(); Throwable ex = modelBuildFailure.getThrowable(); try { grid.appendFailedModelParameters(null, modelBuildFailure.getParameters(), ex); } finally { parallelSearchGridLock.unlock(); } if (grid.countTotalFailures() > _maxConsecutiveFailures && grid.getModelCount() == 0 && !Grid.isJobCanceled(ex)) { _job.fail(new H2OGridException("Aborting Grid search after too many consecutive model failures.", ex)); } else { attemptBuildNextModel(parallelModelBuilder, null); } } private void attemptBuildNextModel(final ParallelModelBuilder parallelModelBuilder, final Model previousModel) { // Attempt to train next model try { parallelSearchGridLock.lock(); final MP nextModelParams = getNextModelParams(hyperspaceIterator, previousModel, grid); if (nextModelParams != null && isThereEnoughTime() && !_job.stop_requested() && !_hyperSpaceWalker.stopEarly(previousModel, grid.getScoringInfos()) ) { reconcileMaxRuntime(grid._key, nextModelParams); parallelModelBuilder.run(Collections.singletonList(ModelBuilder.make(nextModelParams))); } } finally { parallelSearchGridLock.unlock(); } } private void constructScoringInfo(final Model model) { ScoringInfo scoringInfo = new ScoringInfo(); scoringInfo.time_stamp_ms = System.currentTimeMillis(); model.fillScoringInfo(scoringInfo); grid.setScoringInfos(ScoringInfo.prependScoringInfo(scoringInfo, grid.getScoringInfos())); ScoringInfo.sort(grid.getScoringInfos(), sortingMetric()); } private boolean isThereEnoughTime() { final boolean enoughTime = remainingTimeSecs() > 0; if (!enoughTime) { Log.info("Grid max_runtime_secs of " + maxRuntimeSecs() + " secs has expired; stopping early."); } return enoughTime; } private MP getNextModelParams(final HyperSpaceWalker.HyperSpaceIterator<MP> hyperSpaceIterator, final Model model, final Grid grid){ MP params = null; while (params == null) { if (hyperSpaceIterator.hasNext()) { params = hyperSpaceIterator.nextModelParameters(); final Key modelKey = grid.getModelKey(params.checksum(IGNORED_FIELDS_PARAM_HASH)); if (modelKey != null) { params = null; } } else { break; } } return params; } } /** * Searches the hyperspace and builds models in a parallel way - building the models in parallel. * * @param grid Grid to add models to */ private void parallelGridSearch(final Grid<MP> grid) { final HyperSpaceWalker.HyperSpaceIterator<MP> iterator = _hyperSpaceWalker.iterator(); final ModelFeeder modelFeeder = new ModelFeeder(iterator, grid); final ParallelModelBuilder parallelModelBuilder = new ParallelModelBuilder(modelFeeder); List<ModelBuilder> startModels = new ArrayList<>(); while (startModels.size() < _parallelism && iterator.hasNext()) { final MP nextModelParameters = iterator.nextModelParameters(); final long checksum = nextModelParameters.checksum(IGNORED_FIELDS_PARAM_HASH); if (grid.getModelKey(checksum) == null) { startModels.add(ModelBuilder.make(nextModelParameters)); } } if(!startModels.isEmpty()) { parallelModelBuilder.run(startModels); parallelModelBuilder.join(); // Warning: keep in mind this is not being executed as a F/J task, it just looks like one } grid.update(_job); attemptGridSave(grid); if (_job.stop_requested()) { throw new Job.JobCancelledException(_job); } } /** * Invokes grid search based on specified hyper space walk strategy. * * It updates passed grid object in distributed store. * * @param grid grid object to save results; grid already locked */ private void gridSearch(Grid<MP> grid) { final String protoModelKey = grid._key + "_model_"; // Get iterator to traverse hyper space HyperSpaceWalker.HyperSpaceIterator<MP> it = _hyperSpaceWalker.iterator(); // Number of traversed model parameters int counter = grid.getModelCount(); while (it.hasNext()) { Model model = null; if (_job.stop_requested()) throw new Job.JobCancelledException(_job); // Handle end-user cancel request try { // Get parameters for next model MP params = it.nextModelParameters(); // Sequential model building, should never propagate // exception up, just mark combination of model parameters as wrong reconcileMaxRuntime(grid._key, params); try { ScoringInfo scoringInfo = new ScoringInfo(); scoringInfo.time_stamp_ms = System.currentTimeMillis(); //// build the model! model = buildModel(params, grid, ++counter, protoModelKey); if (model != null) { model.fillScoringInfo(scoringInfo); grid.setScoringInfos(ScoringInfo.prependScoringInfo(scoringInfo, grid.getScoringInfos())); ScoringInfo.sort(grid.getScoringInfos(), sortingMetric()); } } catch (RuntimeException e) { // Catch everything grid.appendFailedModelParameters(model != null ? model._key : null, params, e); if (Job.isCancelledException(e)) { assert model == null; final long checksum = params.checksum(IGNORED_FIELDS_PARAM_HASH); final Key<Model>[] modelKeys = findModelsByChecksum(checksum); if (modelKeys.length == 1) { Keyed.removeQuietly(modelKeys[0]); } else if (modelKeys.length > 1) { Log.warn("Checksum " + checksum + " " + "identified more than one model to clean-up, keeping all: " + Arrays.toString(modelKeys) + ". This could lead to a memory leak."); } else Log.debug("Model with param checksum " + checksum + " was cancelled before it was installed in DKV."); } else { Log.warn("Grid search: model builder for parameters " + params + " failed! Exception: ", e); if (grid.countTotalFailures() > _maxConsecutiveFailures && grid.getModelCount() == 0) { _job.fail(new H2OGridException("Aborting Grid search after too many consecutive model failures.", e)); } } } } catch (IllegalArgumentException e) { Log.warn("Grid search: construction of model parameters failed! Exception: ", e); // Model parameters cannot be constructed for some reason final Key<Model> failedModelKey = model != null ? model._key : null; it.onModelFailure(model, failedHyperParams -> grid.appendFailedModelParameters(failedModelKey, failedHyperParams, e)); } finally { // Update progress by 1 increment _job.update(1); // Always update grid in DKV after model building attempt grid.update(_job); attemptGridSave(grid); } // finally if (model != null && grid.getScoringInfos() != null && // did model build and scoringInfo creation succeed? _hyperSpaceWalker.stopEarly(model, grid.getScoringInfos())) { Log.info("Convergence detected based on simple moving average of the loss function. Grid building completed."); break; } } // while (it.hasNext(model)) Log.info("For grid: " + grid._key + " built: " + grid.getModelCount() + " models."); } /** * see {@code RandomDiscreteValueSearchCriteria.max_runtime_secs} for reconciliation logic */ private void reconcileMaxRuntime(Key<Grid<MP>> gridKey, Model.Parameters params) { double grid_max_runtime_secs = _job._max_runtime_msecs / 1000.; double time_remaining_secs = remainingTimeSecs(); if (grid_max_runtime_secs > 0) { Log.info("Grid time is limited to: " + grid_max_runtime_secs + " for grid: " + gridKey + ". Remaining time is: " + time_remaining_secs); if (time_remaining_secs < 0) { Log.info("Grid max_runtime_secs of " + grid_max_runtime_secs + " secs has expired; stopping early."); throw new Job.JobCancelledException(_job); } } if (params._max_runtime_secs > 0 ) { double was = params._max_runtime_secs; params._max_runtime_secs = Math.min(params._max_runtime_secs, time_remaining_secs); Log.info("Due to the grid time limit, changing model max runtime from: " + was + " secs to: " + params._max_runtime_secs + " secs."); } else { // params._max_runtime_secs == 0 params._max_runtime_secs = time_remaining_secs == Double.MAX_VALUE ? 0 : time_remaining_secs; //use standard 0 for no time limit. Log.info("Due to the grid time limit, changing model max runtime to: " + params._max_runtime_secs + " secs."); } } private void beforeGridStart(Grid grid) { if (_recovery != null) { _recovery.onStart(grid, _job); } } private void afterGridCompleted(Grid grid) { if (_recovery != null) { _recovery.onDone(grid); } } private void onModel(Grid grid, long checksum, Key<Model> modelKey) { grid.putModel(checksum, modelKey); if (_recovery != null) { _recovery.onModel(grid, modelKey); } } /** * Saves the grid, if folder for export is defined, otherwise does nothing. * * @param grid Grid to save. */ private void attemptGridSave(final Grid grid) { final String checkpointsDir = _hyperSpaceWalker.getParams()._export_checkpoints_dir; if (checkpointsDir == null) return; grid.exportBinary(checkpointsDir, false); } static final Set<String> IGNORED_FIELDS_PARAM_HASH = new HashSet<>(Arrays.asList( "_export_checkpoints_dir", "_max_runtime_secs" // We are modifying ourselves in Grid Search code )); /** * Build a model based on specified parameters and save it to resulting Grid object. * * Returns a model run with these parameters, typically built on demand and cached - expected to * be an expensive operation. If the model in question is "in progress", a 2nd build will NOT be * kicked off. This is a blocking call. * * If a new model is created, then the Grid object is updated in distributed store. If a model for * given parameters already exists, it is directly returned without updating the Grid object. If * model building fails then the Grid object is not updated and the method returns * <code>null</code>. * * @param params parameters for a new model * @param grid grid object holding created models * @param paramsIdx index of generated model parameter * @param protoModelKey prototype of model key * @return return a new model if it does not exist */ private Model buildModel(final MP params, Grid<MP> grid, int paramsIdx, String protoModelKey) { // Make sure that the model is not yet built (can be case of duplicated hyper parameters). // We first look in the grid _models cache, then we look in the DKV. // FIXME: get checksum here since model builder will modify instance of params!!! // Grid search might be continued over the very exact hyperspace, but with autoexporting disabled. // To prevent final long checksum = params.checksum(IGNORED_FIELDS_PARAM_HASH); Key<Model> key = grid.getModelKey(checksum); if (key != null) { if (DKV.get(key) == null) { // We know about a model that's been removed; rebuild. Log.info("GridSearch.buildModel(): model with these parameters was built but removed, rebuilding; checksum: " + checksum); } else { Log.info("GridSearch.buildModel(): model with these parameters already exists, skipping; checksum: " + checksum); return key.get(); } } // Is there a model with the same params in the DKV? Key<Model>[] modelKeys = findModelsByChecksum(checksum); if (modelKeys.length > 0) { onModel(grid, checksum, modelKeys[0]); return modelKeys[0].get(); } // Modify model key to have nice version with counter // Note: Cannot create it before checking the cache since checksum would differ for each model Key<Model> result = Key.make(protoModelKey + paramsIdx); // Build a new model assert grid.getModel(params) == null; Model m = ModelBuilder.trainModelNested(_job, result, params, null); assert checksum == m._input_parms.checksum(IGNORED_FIELDS_PARAM_HASH) : "Model checksum different from original params"; onModel(grid, checksum, result); return m; } @SuppressWarnings("unchecked") static Key<Model>[] findModelsByChecksum(final long checksum) { return KeySnapshot.globalSnapshot().filter(new KeySnapshot.KVFilter() { @Override public boolean filter(KeySnapshot.KeyInfo k) { if (! Value.isSubclassOf(k._type, Model.class)) return false; Model m = ((Model)k._key.get()); if ((m == null) || (m._parms == null)) return false; try { return m._parms.checksum(IGNORED_FIELDS_PARAM_HASH) == checksum; } catch (H2OConcurrentModificationException e) { // We are inspecting model parameters that doesn't belong to us - they might be modified (or deleted) while // checksum is being calculated: we skip them (see PUBDEV-5286) Log.warn("GridSearch encountered concurrent modification while searching DKV", e); return false; } catch (final RuntimeException e) { Throwable ex = e; boolean concurrentModification = false; while (ex.getCause() != null) { ex = ex.getCause(); if (ex instanceof H2OConcurrentModificationException) { concurrentModification = true; break; } } if (! concurrentModification) throw e; Log.warn("GridSearch encountered concurrent modification while searching DKV", e); return false; } } }).keys(); } /** * Defines a key for a new Grid object holding results of grid search. * * @return a grid key for a particular modeling class and frame. * @throws java.lang.IllegalArgumentException if frame is not saved to distributed store. */ protected static Key<Grid> gridKeyName(String modelName, Frame fr) { if (fr == null || fr._key == null) { throw new IllegalArgumentException("The frame being grid-searched over must have a Key"); } return Key.make("Grid_" + modelName + "_" + fr._key.toString() + H2O.calcNextUniqueModelId("")); } /** * Start a new grid search job. This is the method that gets called by GridSearchHandler.do_train(). * <p> * This method launches a "classical" grid search traversing cartesian grid of parameters * point-by-point, <b>or</b> a random hyperparameter search, depending on the value of the <i>strategy</i> * parameter. * * @param destKey A key to store result of grid search under. * @param params Default parameters for model builder. This object is used to create * a specific model parameters for a combination of hyper parameters. * @param hyperParams A set of arrays of hyper parameter values, used to specify a simple * fully-filled-in grid search. * @param paramsBuilderFactory defines a strategy for creating a new model parameters based on * common parameters and list of hyper-parameters * @param parallelism Level of model-building parallelism * @return GridSearch Job, with models run with these parameters, built as needed - expected to be * an expensive operation. If the models in question are "in progress", a 2nd build will NOT be * kicked off. This is a non-blocking call. * * @deprecated Prefer use of {@link GridSearch#create(Key, HyperSpaceWalker)} */ @Deprecated public static <MP extends Model.Parameters> Job<Grid> startGridSearch( final Key<Grid> destKey, final MP params, final Map<String, Object[]> hyperParams, final ModelParametersBuilderFactory<MP> paramsBuilderFactory, final HyperSpaceSearchCriteria searchCriteria, final int parallelism) { return startGridSearch( null, destKey, params, hyperParams, paramsBuilderFactory, searchCriteria, null, parallelism ); } /** * Start a new grid search job. This is the method that gets called by GridSearchHandler.do_train(). * <p> * This method launches a "classical" grid search traversing cartesian grid of parameters * point-by-point, <b>or</b> a random hyperparameter search, depending on the value of the <i>strategy</i> * parameter. * * @param destKey A key to store result of grid search under. * @param params Default parameters for model builder. This object is used to create * a specific model parameters for a combination of hyper parameters. * @param hyperParams A set of arrays of hyper parameter values, used to specify a simple * fully-filled-in grid search. * @param paramsBuilderFactory defines a strategy for creating a new model parameters based on * common parameters and list of hyper-parameters * @param recovery Defines recovery strategy for when the cluster crashes while grid is * training models. * @param parallelism Level of model-building parallelism * @return GridSearch Job, with models run with these parameters, built as needed - expected to be * an expensive operation. If the models in question are "in progress", a 2nd build will NOT be * kicked off. This is a non-blocking call. * * @deprecated Prefer use of {@link GridSearch#create(Key, HyperSpaceWalker)} */ @Deprecated public static <MP extends Model.Parameters> Job<Grid> startGridSearch( final Key<Job> jobKey, final Key<Grid> destKey, final MP params, final Map<String, Object[]> hyperParams, final ModelParametersBuilderFactory<MP> paramsBuilderFactory, final HyperSpaceSearchCriteria searchCriteria, final Recovery<Grid> recovery, final int parallelism) { return startGridSearch( jobKey, destKey, BaseWalker.WalkerFactory.create(params, hyperParams, paramsBuilderFactory, searchCriteria), recovery, parallelism ); } /** * Start a new sequential grid search job. * * <p>This method launches "classical" grid search traversing cartesian grid of parameters * point-by-point. For more advanced hyperparameter search behavior call the referenced method. * * @param destKey A key to store result of grid search under. * @param params Default parameters for model builder. This object is used to create a * specific model parameters for a combination of hyper parameters. * @param hyperParams A set of arrays of hyper parameter values, used to specify a simple * fully-filled-in grid search. * @return GridSearch Job, with models run with these parameters, built as needed - expected to be * an expensive operation. If the models in question are "in progress", a 2nd build will NOT be * kicked off. This is a non-blocking call. * * @deprecated Prefer use of {@link GridSearch#create(Key, Model.Parameters, Map)})} */ @Deprecated public static <MP extends Model.Parameters> Job<Grid> startGridSearch( final Key<Grid> destKey, final MP params, final Map<String, Object[]> hyperParams) { return GridSearch.create(destKey, params, hyperParams).start(); } /** * Start a new grid search job. * * <p>This method launches "classical" grid search traversing cartesian grid of parameters * point-by-point. For more advanced hyperparameter search behavior call the referenced method. * * @param destKey A key to store result of grid search under. * @param params Default parameters for model builder. This object is used to create a * specific model parameters for a combination of hyper parameters. * @param hyperParams A set of arrays of hyper parameter values, used to specify a simple * fully-filled-in grid search. * @param parallelism Level of parallelism during the process of building this grid's models * @return GridSearch Job, with models run with these parameters, built as needed - expected to be * an expensive operation. If the models in question are "in progress", a 2nd build will NOT be * kicked off. This is a non-blocking call. * * @deprecated Prefer use of {@link GridSearch#create(Key, Model.Parameters, Map)})} */ @Deprecated public static <MP extends Model.Parameters> Job<Grid> startGridSearch( final Key<Grid> destKey, final MP params, final Map<String, Object[]> hyperParams, final int parallelism ) { return GridSearch.create(destKey, params, hyperParams) .withParallelism(parallelism) .start(); } /** * Start a new grid search job. <p> This method launches any grid search traversing space of hyper * parameters based on specified strategy. * * @param destKey A key to store result of grid search under. * @param hyperSpaceWalker Defines a strategy for traversing a hyper space. The object itself * holds definition of hyper space. * @param parallelism Level of parallelism during the process of building of the grid models * @return GridSearch Job, with models run with these parameters, built as needed - expected to be * an expensive operation. If the models in question are "in progress", a 2nd build will NOT be * kicked off. This is a non-blocking call. * * @deprecated Prefer use of {@link GridSearch#create(Key, HyperSpaceWalker)} */ @Deprecated public static <MP extends Model.Parameters> Job<Grid> startGridSearch( final Key<Grid> destKey, final HyperSpaceWalker<MP, ?> hyperSpaceWalker, final int parallelism ) { return GridSearch.create(destKey, hyperSpaceWalker) .withParallelism(parallelism) .start(); } /** * Start a new grid search job. <p> This method launches any grid search traversing space of hyper * parameters based on specified strategy. * * @param destKey A key to store result of grid search under. * @param hyperSpaceWalker Defines a strategy for traversing a hyper space. The object itself * holds definition of hyper space. * @param recovery Defines recovery strategy for when the cluster crashes while grid is * training models. * @param parallelism Level of parallelism during the process of building of the grid models * @return GridSearch Job, with models run with these parameters, built as needed - expected to be * an expensive operation. If the models in question are "in progress", a 2nd build will NOT be * kicked off. This is a non-blocking call. * * @deprecated Prefer use of {@link GridSearch#create(Key, HyperSpaceWalker)} */ @Deprecated public static <MP extends Model.Parameters> Job<Grid> startGridSearch( final Key<Job> jobKey, final Key<Grid> destKey, final HyperSpaceWalker<MP, ?> hyperSpaceWalker, final Recovery<Grid> recovery, final int parallelism ) { return GridSearch.create(destKey, hyperSpaceWalker) .withParallelism(parallelism) .withRecoverableJob(jobKey, recovery) .start(); } public static <MP extends Model.Parameters> Job<Grid> resumeGridSearch( final Key<Job> jobKey, final Grid<MP> grid, final ModelParametersBuilderFactory<MP> paramsBuilderFactory, final Recovery<Grid> recovery ) { return startGridSearch( jobKey, ((Grid) grid)._key, grid.getParams(), grid.getHyperParams(), paramsBuilderFactory, grid.getSearchCriteria(), recovery, grid.getParallelism() ); } /** * The factory is producing a parameters builder which uses reflection to setup field values. * * @param <MP> type of model parameters object */ public static class SimpleParametersBuilderFactory<MP extends Model.Parameters> implements ModelParametersBuilderFactory<MP> { @Override public ModelParametersBuilder<MP> get(MP initialParams) { return new SimpleParamsBuilder<>(initialParams); } @Override public PojoUtils.FieldNaming getFieldNamingStrategy() { return PojoUtils.FieldNaming.CONSISTENT; } /** * The builder modifies initial model parameters directly by reflection. * * Usage: * <pre>{@code * GBMModel.GBMParameters params = * new SimpleParamsBuilder(initialParams) * .set("_ntrees", 30).set("_learn_rate", 0.01).build() * }</pre> * * @param <MP> type of model parameters object */ public static class SimpleParamsBuilder<MP extends Model.Parameters> implements ModelParametersBuilder<MP> { final private MP params; public SimpleParamsBuilder(MP initialParams) { params = initialParams; } @Override public ModelParametersBuilder<MP> set(String name, Object value) { PojoUtils.setField(params, name, value, PojoUtils.FieldNaming.CONSISTENT); return this; } @Override public MP build() { return params; } } } /** * Constant for adaptive parallelism level - number of models built in parallel is decided by H2O. */ public static final int ADAPTIVE_PARALLELISM_LEVEL = 0; public static final int SEQUENTIAL_MODEL_BUILDING = 1; /** * Converts user-given number representing parallelism level and regime to a final number, representing the number of models * built in parallel. * * @param parallelism User-desired parallelism, the frontend/client API representation. * @return An integer >= 1, representing the final number of models to be built in parallel. 1 effectively means sequential * (no parallelism). */ public static int getParallelismLevel(final int parallelism) { if (parallelism < 0) { throw new IllegalArgumentException(String.format("Grid search parallelism level must be >= 0. Give value is '%d'.", parallelism)); } if (parallelism == 0) { return getAdaptiveParallelism(); } else { return parallelism; } } /** * @return An integer with dynamically calculated level of parallelism based on Cluster's properties. */ public static int getAdaptiveParallelism() { return 2 * H2O.NUMCPUS; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/grid/HyperParameters.java
package hex.grid; import water.AutoBuffer; import water.H2O; import water.Iced; import java.util.Map; import java.util.TreeMap; /** * Wrapper class to make hyper-parameters serializable with Icer */ public class HyperParameters extends Iced<HyperParameters> { private transient Map<String, Object[]> values; public HyperParameters(Map<String, Object[]> values) { this.values = values; } public Map<String, Object[]> getValues() { return values; } public final AutoBuffer write_impl(AutoBuffer ab) { writeHyperParamsMap(ab, values); return ab; } private void writeHyperParamsMap(AutoBuffer ab, Map<String, Object[]> params) { ab.putInt(params.keySet().size()); for (String key : params.keySet()) { ab.putStr(key); Object[] vals = params.get(key); if (vals.length > 0 && vals[0] instanceof Map) { ab.putInt(vals.length); for (int j = 0; j < vals.length; j++) { writeHyperParamsMap(ab, (Map<String, Object[]>) vals[j]); } } else { ab.putInt(-1); ab.putASer(vals); } } } public final HyperParameters read_impl(AutoBuffer ab) { return new HyperParameters(readHyperParamsMap(ab)); } private Map<String, Object[]> readHyperParamsMap(AutoBuffer ab) { Map<String, Object[]> map = new TreeMap<>(); int len = ab.getInt(); for (int i = 0; i < len; i++) { String key = ab.getStr(); int subMapsCount = ab.getInt(); Object[] vals; if (subMapsCount >= 0) { vals = new Object[subMapsCount]; for (int j = 0; j < subMapsCount; j++) { vals[j] = readHyperParamsMap(ab); } } else { vals = ab.getASer(Object.class); } map.put(key, vals); } return map; } public final AutoBuffer writeJSON_impl(AutoBuffer ab) { throw H2O.unimpl(); } public final HyperParameters readJSON_impl(AutoBuffer ab) { throw H2O.unimpl(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/grid/HyperSpaceSearchCriteria.java
package hex.grid; import hex.ScoreKeeper.StoppingMetric; import water.Iced; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Frame; /** * Search criteria for a hyperparameter search including directives for how to search and * when to stop the search. */ public class HyperSpaceSearchCriteria extends Iced { public enum Strategy { Unknown, Cartesian, RandomDiscrete, Sequential }; // search strategy public static HyperSpaceSearchCriteria make(Strategy strategy) { switch (strategy) { case Cartesian: return new CartesianSearchCriteria(); case RandomDiscrete: return new RandomDiscreteValueSearchCriteria(); case Sequential: return new SequentialSearchCriteria(); default: throw new H2OIllegalArgumentException("strategy", strategy.toString()); } } public static class StoppingCriteria extends Iced { public static class Builder { private StoppingCriteria _criteria = new StoppingCriteria(); public Builder maxModels(int maxModels) { _criteria._max_models = maxModels; return this; } public Builder maxRuntimeSecs(double maxRuntimeSecs) { _criteria._max_runtime_secs = maxRuntimeSecs; return this; } public Builder stoppingMetric(StoppingMetric stoppingMetric) { _criteria._stopping_metric = stoppingMetric; return this; } public Builder stoppingRounds(int stoppingRounds) { _criteria._stopping_rounds = stoppingRounds; return this; } public Builder stoppingTolerance(double stoppingTolerance) { _criteria._stopping_tolerance = stoppingTolerance; return this; } public StoppingCriteria build() { return _criteria; } } public static Builder create() { return new Builder(); } // keeping those private fields in snake-case so that they can be set from Schema through reflection private int _max_models = 0; // no limit private double _max_runtime_secs = 0; // no time limit private StoppingMetric _stopping_metric = StoppingMetric.AUTO; private int _stopping_rounds = 0; private double _stopping_tolerance = 1e-3; // = Model.Parameters.defaultStoppingTolerance() public StoppingCriteria() {} public int getMaxModels() { return _max_models; } public double getMaxRuntimeSecs() { return _max_runtime_secs; } public int getStoppingRounds() { return _stopping_rounds; } public StoppingMetric getStoppingMetric() { return _stopping_metric; } public double getStoppingTolerance() { return _stopping_tolerance; } } public final Strategy _strategy; public final Strategy strategy() { return _strategy; } public StoppingCriteria stoppingCriteria() { return null; } public HyperSpaceSearchCriteria(Strategy strategy) { this._strategy = strategy; } /** * Search criteria for an exhaustive Cartesian hyperparameter search. */ public static final class CartesianSearchCriteria extends HyperSpaceSearchCriteria { public CartesianSearchCriteria() { super(Strategy.Cartesian); } } /** * Search criteria for a hyperparameter search including directives for how to search and * when to stop the search. * <p> * NOTE: client ought to call set_default_stopping_tolerance_for_frame(Frame) to get a reasonable stopping tolerance, especially for small N. */ public static final class RandomDiscreteValueSearchCriteria extends HyperSpaceSearchCriteria { private long _seed = -1; // -1 means true random private StoppingCriteria _stoppingCriteria; public RandomDiscreteValueSearchCriteria() { super(Strategy.RandomDiscrete); _stoppingCriteria = new StoppingCriteria(); } @Override public StoppingCriteria stoppingCriteria() { return _stoppingCriteria; } /** Seed for the random choices of hyperparameter values. Set to a value other than -1 to get a repeatable pseudorandom sequence. */ public long seed() { return _seed; } /** Max number of models to build. */ public int max_models() { return _stoppingCriteria._max_models; } /** * Max runtime for the entire grid, in seconds. Set to 0 to disable. Can be combined with <i>max_runtime_secs</i> in the model parameters. If * <i>max_runtime_secs</i> is not set in the model parameters then each model build is launched with a limit equal to * the remainder of the grid time. If <i>max_runtime_secs</i> <b>is</b> set in the mode parameters each build is launched * with a limit equal to the minimum of the model time limit and the remaining time for the grid. */ public double max_runtime_secs() { return _stoppingCriteria._max_runtime_secs; } /** * Early stopping based on convergence of stopping_metric. * Stop if simple moving average of the stopping_metric does not improve by stopping_tolerance for * k scoring events. * Can only trigger after at least 2k scoring events. Use 0 to disable. */ public int stopping_rounds() { return _stoppingCriteria._stopping_rounds; } /** Metric to use for convergence checking; only for _stopping_rounds > 0 */ public StoppingMetric stopping_metric() { return _stoppingCriteria._stopping_metric; } /** Relative tolerance for metric-based stopping criterion: stop if relative improvement is not at least this much. */ public double stopping_tolerance() { return _stoppingCriteria._stopping_tolerance; } /** Calculate a reasonable stopping tolerance for the Frame. * Currently uses only the NA percentage and nrows, but later * can take into account the response distribution, response variance, etc. * <p> * <pre>1/Math.sqrt((1 - frame.naFraction()) * frame.numRows())</pre> */ public static double default_stopping_tolerance_for_frame(Frame frame) { return Math.min(0.05, Math.max(0.001, 1/Math.sqrt((1 - frame.naFraction()) * frame.numRows()))); } public void set_default_stopping_tolerance_for_frame(Frame frame) { set_stopping_tolerance(default_stopping_tolerance_for_frame(frame)); } public void set_seed(long seed) { this._seed = seed; } public void set_max_models(int max_models) { cloneStoppingCriteria()._max_models = max_models; } public void set_max_runtime_secs(double max_runtime_secs) { cloneStoppingCriteria()._max_runtime_secs = max_runtime_secs; } public void set_stopping_rounds(int stopping_rounds) { cloneStoppingCriteria()._stopping_rounds = stopping_rounds; } public void set_stopping_metric(StoppingMetric stopping_metric) { cloneStoppingCriteria()._stopping_metric = stopping_metric; } public void set_stopping_tolerance(double stopping_tolerance) { cloneStoppingCriteria()._stopping_tolerance = stopping_tolerance; } private StoppingCriteria cloneStoppingCriteria() { _stoppingCriteria = (StoppingCriteria) _stoppingCriteria.clone(); return _stoppingCriteria; } } public static final class SequentialSearchCriteria extends HyperSpaceSearchCriteria { private StoppingCriteria _stoppingCriteria; private boolean _early_stopping; public SequentialSearchCriteria() { this(new StoppingCriteria(), true); } public SequentialSearchCriteria(StoppingCriteria stoppingCriteria) { this(stoppingCriteria, true); } public SequentialSearchCriteria(StoppingCriteria stoppingCriteria, boolean earlyStopping) { super(Strategy.Sequential); _stoppingCriteria = stoppingCriteria; _early_stopping = earlyStopping; } public boolean earlyStoppingEnabled() { return _early_stopping; } @Override public StoppingCriteria stoppingCriteria() { return _stoppingCriteria; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/grid/HyperSpaceWalker.java
package hex.grid; import hex.Model; import hex.ModelParametersBuilderFactory; import hex.ScoreKeeper; import hex.ScoringInfo; import hex.grid.HyperSpaceSearchCriteria.CartesianSearchCriteria; import hex.grid.HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria; import hex.grid.HyperSpaceSearchCriteria.Strategy; import water.exceptions.H2OIllegalArgumentException; import water.util.ArrayUtils; import water.util.PojoUtils; import java.util.*; import java.util.function.Consumer; import java.util.stream.Stream; import static hex.grid.HyperSpaceWalker.BaseWalker.SUBSPACES; public interface HyperSpaceWalker<MP extends Model.Parameters, C extends HyperSpaceSearchCriteria> { interface HyperSpaceIterator<MP extends Model.Parameters> { /** * Get next model parameters. * * <p>It should return model parameters for next point in hyper space. * Throws {@link java.util.NoSuchElementException} if there is no remaining point in space * to explore.</p> * * <p>The method can optimize based on previousModel, but should be * able to handle null-value.</p> * * @return model parameters for next point in hyper space or null if there is no such point. * * @throws IllegalArgumentException when model parameters cannot be constructed * @throws java.util.NoSuchElementException if the iteration has no more elements */ MP nextModelParameters(); /** * Returns true if the iterator can continue. Takes into account strategy-specific stopping criteria, if any. * @return true if the iterator can produce one more model parameters configuration. */ boolean hasNext(); /** * Inform the Iterator that a model build failed in case it needs to adjust its internal state. * Implementations are expected to consume the {@code withFailedModelHyperParams} callback with the hyperParams used to create the failed model. * @param failedModel: the model whose training failed. * @param withFailedModelHyperParams: consumes the "raw" hyperparameters values used for the failed model. */ void onModelFailure(Model failedModel, Consumer<Object[]> withFailedModelHyperParams); } // interface HyperSpaceIterator /** * Search criteria for the hyperparameter search including directives for how to search and * when to stop the search. */ C search_criteria(); /** Based on the last model, the given array of ScoringInfo, and our stopping criteria should we stop early? */ boolean stopEarly(Model model, ScoringInfo[] sk); /** * Returns an iterator to traverse this hyper-space. * * @return an iterator */ HyperSpaceIterator<MP> iterator(); /** * Returns hyper parameters names which are used for walking the hyper parameters space. * * The names have to match the names of attributes in model parameters MP. * * @return names of used hyper parameters */ String[] getHyperParamNames(); String[] getAllHyperParamNamesInSubspaces(); default String[] getAllHyperParamNames() { String[] hyperNames = getHyperParamNames(); String[] allHyperNames = hyperNames; String[] hyperParamNamesSubspace = getAllHyperParamNamesInSubspaces(); if (hyperParamNamesSubspace.length > 0) { allHyperNames = ArrayUtils.append(ArrayUtils.remove(hyperNames, SUBSPACES), hyperParamNamesSubspace); } return allHyperNames; } Map<String, Object[]> getHyperParams(); /** * Return estimated maximum size of hyperspace, not subject to any early stopping criteria. * * Can return -1 if estimate is not available. * * @return size of hyper space to explore */ long getMaxHyperSpaceSize(); /** * Return initial model parameters for search. * @return return model parameters */ MP getParams(); /** * Return estimated grid work. * Can return Long.MAX_VALUE if no estimate is available. * @param maxModels * @return estimate of grid work */ default long estimateGridWork(long maxModels) { HyperSpaceWalker.HyperSpaceIterator<MP> it = iterator(); long gridWork = 0; // if total grid space is known, walk it all and count up models to be built (not subject to time-based or converge-based early stopping) // skip it if no model limit it specified as the entire hyperspace can be extremely large. if (getMaxHyperSpaceSize() > 0 && maxModels > 0) { while (it.hasNext()) { try { Model.Parameters parms = it.nextModelParameters(); gridWork += (parms._nfolds > 0 ? (parms._nfolds + 1/*main model*/) : 1) * parms.progressUnits(); } catch (Throwable ex) { //swallow invalid combinations } } } else { gridWork = Long.MAX_VALUE; } return gridWork; } ModelParametersBuilderFactory<MP> getParametersBuilderFactory(); /** * Superclass for for all hyperparameter space walkers. * <p> * The external Grid / Hyperparameter search API uses a HashMap<String,Object> to describe a set of hyperparameter * values, where the String is a valid field name in the corresponding Model.Parameter, and the Object is * the field value (boxed as needed). */ abstract class BaseWalker<MP extends Model.Parameters, C extends HyperSpaceSearchCriteria> implements HyperSpaceWalker<MP, C> { /** * @see #search_criteria() */ final protected C _search_criteria; public static final String SUBSPACES = "subspaces"; /** * Search criteria for the hyperparameter search including directives for how to search and * when to stop the search. */ public C search_criteria() { return _search_criteria; } /** Based on the last model, the given array of ScoringInfo, and our stopping criteria should we stop early? */ @Override public boolean stopEarly(Model model, ScoringInfo[] sk) { return false; } /** * Parameters builder factory to create new instance of parameters. */ final transient ModelParametersBuilderFactory<MP> _paramsBuilderFactory; /** * Used "base" model parameters for this grid search. * The object is used as a prototype to create model parameters * for each point in hyper space. */ final MP _params; final MP _defaultParams; /** * Hyper space description - in this case only dimension and possible values. */ final protected Map<String, Object[]> _hyperParams; long model_number = 0l; // denote model number /** * Cached names of used hyper parameters. */ final protected String[] _hyperParamNames; final protected String[] _hyperParamNamesSubspace; // model parameters specified in subspaces of hyper parameters protected Map<String, Object[]>[] _hyperParamSubspaces; /** * Compute max size of hyper space to walk. May include duplicates if points in space are specified multiple * times. */ final protected long _maxHyperSpaceSize; /** * Java hackery so we can have a factory method on a class with type params. */ public static class WalkerFactory<MP extends Model.Parameters, C extends HyperSpaceSearchCriteria> { /** * Factory method to create an instance based on the given HyperSpaceSearchCriteria instance. */ public static <MP extends Model.Parameters, C extends HyperSpaceSearchCriteria> HyperSpaceWalker<MP, ? extends HyperSpaceSearchCriteria> create(MP params, Map<String, Object[]> hyperParams, ModelParametersBuilderFactory<MP> paramsBuilderFactory, C search_criteria) { Strategy strategy = search_criteria.strategy(); switch (strategy) { case Cartesian: return new HyperSpaceWalker.CartesianWalker<>(params, hyperParams, paramsBuilderFactory, (CartesianSearchCriteria) search_criteria); case RandomDiscrete: return new HyperSpaceWalker.RandomDiscreteValueWalker<>(params, hyperParams, paramsBuilderFactory, (RandomDiscreteValueSearchCriteria) search_criteria); case Sequential: return new SequentialWalker<>(params, hyperParams, paramsBuilderFactory, (HyperSpaceSearchCriteria.SequentialSearchCriteria) search_criteria); default: throw new H2OIllegalArgumentException("strategy", "GridSearch", strategy); } } } /** * * @param paramsBuilderFactory * @param hyperParams */ public BaseWalker(MP params, Map<String, Object[]> hyperParams, ModelParametersBuilderFactory<MP> paramsBuilderFactory, C search_criteria) { _params = params; _hyperParams = hyperParams; _paramsBuilderFactory = paramsBuilderFactory; _hyperParamNames = hyperParams.keySet().toArray(new String[0]); _hyperParamSubspaces = extractSubspaces(); _hyperParamNamesSubspace = extractSubspaceNames(); _hyperParams.remove(SUBSPACES); _search_criteria = search_criteria; _maxHyperSpaceSize = computeMaxSizeOfHyperSpace(); // Sanity check the hyperParams map, and check it against the params object try { _defaultParams = (MP) params.getClass().newInstance(); } catch (Exception e) { throw new H2OIllegalArgumentException("Failed to instantiate a new Model.Parameters object to get the default values."); } validateParams(_hyperParams, false); Arrays.stream(_hyperParamSubspaces).forEach(subspace -> validateParams(subspace, true)); } // BaseWalker() @Override public String[] getHyperParamNames() { return _hyperParamNames; } public String[] getAllHyperParamNamesInSubspaces() { return _hyperParamNamesSubspace; } @Override public Map<String, Object[]> getHyperParams() { return _hyperParams; } @Override public long getMaxHyperSpaceSize() { return _maxHyperSpaceSize; } @Override public MP getParams() { return _params; } @Override public ModelParametersBuilderFactory<MP> getParametersBuilderFactory() { return _paramsBuilderFactory; } private Map<String, Object[]>[] extractSubspaces() { if(!_hyperParams.containsKey(SUBSPACES)) { return new Map[0]; } return (Map<String, Object[]>[])_hyperParams.get(SUBSPACES); } private String[] extractSubspaceNames() { return Stream.of(_hyperParamSubspaces) .flatMap(m -> m.keySet().stream()) .toArray(String[]::new); } protected MP getModelParams(MP params, Object[] hyperParams, String[] hyperParamNames) { ModelParametersBuilderFactory.ModelParametersBuilder<MP> paramsBuilder = _paramsBuilderFactory.get(params); for (int i = 0; i < hyperParamNames.length; i++) { String paramName = hyperParamNames[i]; Object paramValue = hyperParams[i]; if (paramName.equals("valid")) { // change paramValue to key<Frame> for validation_frame paramName = "validation_frame"; // @#$, paramsSchema is still using validation_frame and training_frame } paramsBuilder.set(paramName, paramValue); } return paramsBuilder.build(); } protected long computeMaxSizeOfHyperSpace() { long work = 0; long free_param_combos = 1; for (Map<String, Object[]> subspace : _hyperParamSubspaces) { long subspace_param_combos = 1; for (Object[] o : subspace.values()) { subspace_param_combos *= o.length; } work += subspace_param_combos; } // work will be zero if there is no subspaces in hyper parameters for (Object[] p : _hyperParams.values()) { free_param_combos *= p.length; } work = work == 0 ? free_param_combos : free_param_combos * work; return work; } protected Map<String, Object[]> mergeHashMaps(Map<String, Object[]> hyperparams, Map<String, Object[]> subspace) { if(subspace == null) { return hyperparams; } Map<String, Object[]> m = new HashMap<>(); m.putAll(hyperparams); m.putAll(subspace); return m; } /** Given a list of indices for the hyperparameter values return an Object[] of the actual values. */ protected Object[] hypers(Map<String, Object[]> hyperParams, String[] hyperParamNames, int[] hidx) { Object[] hypers = new Object[hyperParamNames.length]; for (int i = 0; i < hidx.length; i++) { hypers[i] = hyperParams.get(hyperParamNames[i])[hidx[i]]; } return hypers; } protected int integerHash(Map<String, Object[]> hyperParams, String[] hyperParamNames, int[] ar, int subspaceNum) { Integer[] hashMe = new Integer[ar.length + 1]; for (int i = 0; i < ar.length; i++) hashMe[i] = ar[i] * hyperParams.get(hyperParamNames[i]).length; hashMe[ar.length] = subspaceNum; return Arrays.deepHashCode(hashMe); } private void validateParams(Map<String, Object[]> params, boolean isSubspace) { // if a parameter is specified in both model parameter and hyper-parameter, this is only allowed if the // parameter value is set to be default. Otherwise, an exception will be thrown. for (String key : params.keySet()) { // Throw if the user passed an empty value list: Object[] values = params.get(key); if (0 == values.length) throw new H2OIllegalArgumentException("Grid search hyperparameter value list is empty for hyperparameter: " + key); if ("seed".equals(key) || "_seed".equals(key)) continue; // initialized to the wall clock if (isSubspace && _hyperParams.containsKey(key)) { throw new H2OIllegalArgumentException("Grid search model parameter '" + key + "' is set in " + "both the subspaces and in the hyperparameters map. This is ambiguous; set it in one place" + " or the other, not both."); } validateParamVals(key); // Ugh. Java callers, like the JUnits or Sparkling Water users, use a leading _. REST users don't. } } private void validateParamVals(String key) { String prefix = (key.startsWith("_") ? "" : "_"); // Throw if params has a non-default value which is not in the hyperParams map Object defaultVal = PojoUtils.getFieldValue(_defaultParams, prefix + key, PojoUtils.FieldNaming.CONSISTENT); Object actualVal = PojoUtils.getFieldValue(_params, prefix + key, PojoUtils.FieldNaming.CONSISTENT); if (defaultVal != null && actualVal != null) { // both are not set to null if (defaultVal.getClass().isArray() && // array !PojoUtils.arraysEquals(defaultVal, actualVal)) { throw new H2OIllegalArgumentException("Grid search model parameter '" + key + "' is set in both the model parameters and in the hyperparameters map. This is ambiguous; set it in one place or the other, not both."); } // array if (!defaultVal.getClass().isArray() && // ! array !defaultVal.equals(actualVal)) { throw new H2OIllegalArgumentException("Grid search model parameter '" + key + "' is set in both the model parameters and in the hyperparameters map. This is ambiguous; set it in one place or the other, not both."); } // ! array } // both are set: defaultVal != null && actualVal != null // defaultVal is null but actualVal is not, raise exception if (defaultVal == null && !(actualVal == null)) { // only actual is set throw new H2OIllegalArgumentException("Grid search model parameter '" + key + "' is set in both the model parameters and in the hyperparameters map. This is ambiguous; set it in one place or the other, not both."); } } } /** * Hyperparameter space walker which visits each combination of hyperparameters in order. */ class CartesianWalker<MP extends Model.Parameters> extends BaseWalker<MP, CartesianSearchCriteria> { public CartesianWalker(MP params, Map<String, Object[]> hyperParams, ModelParametersBuilderFactory<MP> paramsBuilderFactory, CartesianSearchCriteria search_criteria) { super(params, hyperParams, paramsBuilderFactory, search_criteria); } @Override public HyperSpaceIterator<MP> iterator() { return new HyperSpaceIterator<MP>() { /** Hyper params permutation. */ private int[] _currentHyperparamIndices = null; private int _currentSubspace = _hyperParamSubspaces.length == 0 ? -1 : 0; private Map<String, Object[]> _currentHyperParams = _hyperParamSubspaces.length == 0 ? _hyperParams : mergeHashMaps(_hyperParams, _hyperParamSubspaces[0]); private String[] _currentHyperParamNames = _currentHyperParams.keySet().toArray(new String[0]); @Override public MP nextModelParameters() { _currentHyperparamIndices = _currentHyperparamIndices == null ? new int[_currentHyperParamNames.length] : nextModelIndices(_currentHyperparamIndices); if(_currentSubspace < _hyperParamSubspaces.length - 1 && _currentHyperparamIndices == null) { // getting to next subspaces here _currentHyperParams = mergeHashMaps(_hyperParams, _hyperParamSubspaces[++_currentSubspace]); _currentHyperParamNames = _currentHyperParams.keySet().toArray(new String[0]); _currentHyperparamIndices = new int[_currentHyperParamNames.length]; } if (_currentHyperparamIndices != null) { // Fill array of hyper-values Object[] hypers = hypers(_currentHyperParams, _currentHyperParamNames, _currentHyperparamIndices); // Get clone of parameters MP commonModelParams = (MP) _params.clone(); // Fill model parameters MP params = getModelParams(commonModelParams, hypers, _currentHyperParamNames); return params; } else { throw new NoSuchElementException("No more elements to explore in hyper-space!"); } } @Override public boolean hasNext() { // Checks to see that there is another valid combination of hyper parameters left in the hyperspace. if (_currentHyperparamIndices != null) { int[] hyperParamIndicesCopy = new int[_currentHyperparamIndices.length]; System.arraycopy(_currentHyperparamIndices, 0, hyperParamIndicesCopy, 0, _currentHyperparamIndices.length); if (nextModelIndices(hyperParamIndicesCopy) == null) { if(_currentSubspace == _hyperParamSubspaces.length - 1) { return false; } } } return true; } @Override public void onModelFailure(Model failedModel, Consumer<Object[]> withFailedModelHyperParams) { // FIXME: when using parallel grid search, there's no good reason to think that the current hyperparam indices where the ones used for the failed model withFailedModelHyperParams.accept(hypers(_currentHyperParams, _currentHyperParamNames, _currentHyperparamIndices)); } /** * Cartesian iteration over the hyper-parameter space, varying one hyperparameter at a * time. Mutates the indices that are passed in and returns them. Returns NULL when * the entire space has been traversed. */ private int[] nextModelIndices(int[] hyperparamIndices) { // Find the next parm to flip int i; for (i = 0; i < hyperparamIndices.length; i++) { if (hyperparamIndices[i] + 1 < _currentHyperParams.get(_currentHyperParamNames[i]).length) { break; } } if (i == hyperparamIndices.length) { return null; // All done, report null } // Flip indices for (int j = 0; j < i; j++) { hyperparamIndices[j] = 0; } hyperparamIndices[i]++; return hyperparamIndices; } }; // anonymous HyperSpaceIterator class } // iterator() } // class CartesianWalker /** * Hyperparameter space walker which visits random combinations of hyperparameters whose possible values are * given in explicit lists as they are with CartesianWalker. */ class RandomDiscreteValueWalker<MP extends Model.Parameters> extends BaseWalker<MP, RandomDiscreteValueSearchCriteria> { // Used by HyperSpaceIterator.nextModelIndices to ensure that the space is explored enough before giving up private static final double MIN_NUMBER_OF_SAMPLES = 1e4; private Random _random; private boolean _set_model_seed_from_search_seed; // true if model parameter seed is set to default value and false otherwise public RandomDiscreteValueWalker(MP params, Map<String, Object[]> hyperParams, ModelParametersBuilderFactory<MP> paramsBuilderFactory, RandomDiscreteValueSearchCriteria search_criteria) { super(params, hyperParams, paramsBuilderFactory, search_criteria); // seed the models using the search seed if it is the only one specified long defaultSeed = _defaultParams._seed; long actualSeed = _params._seed; long gridSeed = search_criteria.seed(); _set_model_seed_from_search_seed = defaultSeed == actualSeed && defaultSeed != gridSeed; _random = gridSeed == defaultSeed ? new Random() : new Random(gridSeed); } /** Based on the last model, the given array of ScoringInfo, and our stopping criteria should we stop early? */ @Override public boolean stopEarly(Model model, ScoringInfo[] sk) { return ScoreKeeper.stopEarly(ScoringInfo.scoreKeepers(sk), search_criteria().stopping_rounds(), ScoreKeeper.ProblemType.forSupervised(model._output.isClassifier()), search_criteria().stopping_metric(), search_criteria().stopping_tolerance(), "grid's best", true); } @Override public HyperSpaceIterator<MP> iterator() { return new HyperSpaceIterator<MP>() { /** All visited hyper params permutations, including the current one. */ private final List<int[]> _visitedPermutations = new ArrayList<>(); private final Set<Integer> _visitedPermutationHashes = new LinkedHashSet<>(); // for fast dupe lookup /** Current hyper params permutation. */ private int[] _currentHyperparamIndices = null; /** One-based count of the permutations we've visited, primarily used as an index into _visitedHyperparamIndices. */ private int _currentPermutationNum = 0; private int _currentSubspace = -1; private Map<String, Object[]> _currentHyperParams = _hyperParams; private String[] _currentHyperParamNames = _hyperParamNames; private boolean _exhausted = false; // TODO: override into a common subclass: @Override public MP nextModelParameters() { // NOTE: nextModel checks _visitedHyperparamIndices and does not return a duplicate set of indices. // NOTE: in RandomDiscreteValueWalker nextModelIndices() returns a new array each time, rather than // mutating the last one. _currentHyperparamIndices = nextModelIndices(); if (_currentHyperparamIndices != null) { _visitedPermutations.add(_currentHyperparamIndices); _visitedPermutationHashes.add(integerHash(_currentHyperParams, _currentHyperParamNames, _currentHyperparamIndices, _currentSubspace)); _currentPermutationNum++; // NOTE: 1-based counting // Fill array of hyper-values Object[] hypers = hypers(_currentHyperParams, _currentHyperParamNames, _currentHyperparamIndices); // Get clone of parameters MP commonModelParams = (MP) _params.clone(); // Fill model parameters MP params = getModelParams(commonModelParams, hypers, _currentHyperParamNames); // add max_runtime_secs in search criteria into params if applicable if (_search_criteria != null && _search_criteria.strategy() == Strategy.RandomDiscrete) { // ToDo: model seed setting will be different for parallel model building. // ToDo: This implementation only works for sequential model building. if (_set_model_seed_from_search_seed) { // set model seed = search_criteria.seed+(0, 1, 2,..., model number) params._seed = _search_criteria.seed() + (model_number++); } } return params; } else { throw new NoSuchElementException("No more elements to explore in hyper-space!"); } } @Override public boolean hasNext() { // Note: we compare _currentPermutationNum to max_models, because it counts successfully created models, but // we compare _visitedPermutationHashes.size() to _maxHyperSpaceSize because we want to stop when we have attempted each combo. // // _currentPermutationNum is 1-based return (_visitedPermutationHashes.size() < _maxHyperSpaceSize && (search_criteria().max_models() == 0 || _currentPermutationNum < search_criteria().max_models()) && !_exhausted ); } @Override public void onModelFailure(Model failedModel, Consumer<Object[]> withFailedModelHyperParams) { // FIXME: when using parallel grid search, there's no good reason to think that the current hyperparam indices where the ones used for the failed model _currentPermutationNum--; withFailedModelHyperParams.accept(hypers(_currentHyperParams, _currentHyperParamNames, _currentHyperparamIndices)); } /** * Random iteration over the hyper-parameter space. Does not repeat * previously-visited combinations. Returns NULL when we've hit the stopping * criteria. */ private int[] nextModelIndices() { int[] hyperparamIndices = new int[_currentHyperParamNames.length]; // To get a new hyper-parameter configuration: // Sample the space until a new configuration is found or stop if none was found // within max(MIN_NUMBER_OF_SAMPLES, _maxHyperSpaceSize) steps for (int j = 0; j < Math.max(MIN_NUMBER_OF_SAMPLES, _maxHyperSpaceSize); j++) { if (_hyperParamSubspaces.length != 0) { _currentSubspace = _random.nextInt(_hyperParamSubspaces.length); _currentHyperParams = mergeHashMaps(_hyperParams, _hyperParamSubspaces[_currentSubspace]); _currentHyperParamNames = _currentHyperParams.keySet().toArray(new String[0]); hyperparamIndices = new int[_currentHyperParamNames.length]; } for (int i = 0; i < _currentHyperParamNames.length; i++) { hyperparamIndices[i] = _random.nextInt(_currentHyperParams.get(_currentHyperParamNames[i]).length); } // check for aliases and loop if we've visited this combo before if (!_visitedPermutationHashes.contains(integerHash(_currentHyperParams, _currentHyperParamNames, hyperparamIndices, _currentSubspace))) return hyperparamIndices; } _exhausted = true; return null; } // nextModel }; // anonymous HyperSpaceIterator class } // iterator() @Override public long estimateGridWork(long maxModels) { // We don't want to randomly sample the whole hyperspace return Long.MAX_VALUE; } } // RandomDiscreteValueWalker }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/grid/SequentialWalker.java
package hex.grid; import hex.Model; import hex.ModelParametersBuilderFactory; import hex.ScoreKeeper; import hex.ScoringInfo; import hex.grid.HyperSpaceSearchCriteria.SequentialSearchCriteria; import hex.grid.HyperSpaceSearchCriteria.StoppingCriteria; import water.H2O; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.function.Consumer; import java.util.stream.Stream; public class SequentialWalker<MP extends Model.Parameters> implements HyperSpaceWalker<MP, SequentialSearchCriteria> { private final MP _params; private final Object[][] _hyperParams; private final String[] _hyperParamNames; private final ModelParametersBuilderFactory _paramsBuilderFactory; private final SequentialSearchCriteria _searchCriteria; public SequentialWalker(MP params, Object[][] hyperParams, ModelParametersBuilderFactory<MP> paramsBuilderFactory, SequentialSearchCriteria searchCriteria) { assert hyperParams.length > 1; assert Stream.of(hyperParams[0]).allMatch(c -> c instanceof String) : "first row of hyperParams must contains hyper-parameter names"; _params = params; _hyperParamNames = new String[hyperParams[0].length]; System.arraycopy(hyperParams[0], 0, _hyperParamNames, 0, _hyperParamNames.length); _hyperParams = Arrays.copyOfRange(hyperParams, 1, hyperParams.length); _paramsBuilderFactory = paramsBuilderFactory; _searchCriteria = searchCriteria; } public SequentialWalker(MP params, Map<String, Object[]> hyperParams, ModelParametersBuilderFactory<MP> paramsBuilderFactory, SequentialSearchCriteria searchCriteria) { assert hyperParams.size() > 1; _params = params; _paramsBuilderFactory = paramsBuilderFactory; _searchCriteria = searchCriteria; int paramsLength = hyperParams.entrySet().iterator().next().getValue().length; int counter = 0; _hyperParamNames = new String[hyperParams.size()]; _hyperParams = new Object[paramsLength][hyperParams.size()]; for(Map.Entry<String, Object[]> entry: hyperParams.entrySet()) { assert entry.getValue().length == paramsLength; _hyperParamNames[counter] = entry.getKey(); for (int i = 0; i < entry.getValue().length; i++) { _hyperParams[i][counter] = entry.getValue()[i]; } counter ++; } } @Override public SequentialSearchCriteria search_criteria() { return _searchCriteria; } @Override public String[] getHyperParamNames() { return _hyperParamNames; } @Override public String[] getAllHyperParamNamesInSubspaces() { return new String[0]; } @Override public Map<String, Object[]> getHyperParams() { Map<String, Object[]> result = new HashMap<>(); for (int i = 0; i < _hyperParamNames.length; i++) { Object[] values = new Object[_hyperParams.length]; for (int j = 0; j < _hyperParams.length; j++) values[j] = _hyperParams[j][i]; result.put(_hyperParamNames[i], values); } return result; } @Override public long getMaxHyperSpaceSize() { return _hyperParams.length; } @Override public MP getParams() { return _params; } @Override public ModelParametersBuilderFactory<MP> getParametersBuilderFactory() { return _paramsBuilderFactory; } @Override public boolean stopEarly(Model model, ScoringInfo[] sk) { if (!search_criteria().earlyStoppingEnabled()) return false; StoppingCriteria stoppingCriteria = search_criteria().stoppingCriteria(); return ScoreKeeper.stopEarly( ScoringInfo.scoreKeepers(sk), stoppingCriteria.getStoppingRounds(), ScoreKeeper.ProblemType.forSupervised(model._output.isClassifier()), stoppingCriteria.getStoppingMetric(), stoppingCriteria.getStoppingTolerance(), "grid's best", true ); } private MP getModelParams(MP params, Object[] hyperParams) { ModelParametersBuilderFactory.ModelParametersBuilder<MP> paramsBuilder = _paramsBuilderFactory.get(params.clone()); for (int i = 0; i < _hyperParamNames.length; i++) { String paramName = _hyperParamNames[i]; Object paramValue = hyperParams[i]; if (paramValue != null) paramsBuilder.set(paramName, paramValue); } return paramsBuilder.build(); } @Override public HyperSpaceIterator<MP> iterator() { return new HyperSpaceIterator<MP>() { private int _index = -1; @Override public MP nextModelParameters() { return getModelParams(_params, _hyperParams[++_index]); } @Override public boolean hasNext() { if (search_criteria().stoppingCriteria().getMaxModels() > 0 && _index >= search_criteria().stoppingCriteria().getMaxModels() - 1) return false; return _index+1 < getMaxHyperSpaceSize(); } @Override public void onModelFailure(Model failedModel, Consumer<Object[]> withFailedModelHyperParams) { withFailedModelHyperParams.accept(_hyperParams[_index]); //TODO: identify index of failedModel } }; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/AlgoName.java
package hex.leaderboard; import hex.Model; import water.Iced; import water.Key; public class AlgoName extends Iced<AlgoName> implements LeaderboardCell<String, AlgoName> { public static final LeaderboardColumn COLUMN = new LeaderboardColumn("algo", "string", "%s"); final Key<Model> _modelId; private String _algo; public AlgoName(Model model) { this._modelId = model._key; this._algo = model._parms.algoName(); } @Override public LeaderboardColumn getColumn() { return COLUMN; } @Override public Key<Model> getModelId() { return _modelId; } @Override public String getValue() { return _algo; } @Override public void setValue(String value) { throw new UnsupportedOperationException(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/Leaderboard.java
package hex.leaderboard; import hex.Model; import hex.ModelCategory; import hex.ModelContainer; import hex.ModelMetrics; import water.*; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Frame; import water.logging.Logger; import water.logging.LoggerFactory; import water.util.*; import java.util.*; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import java.util.stream.Stream; /** * Utility to track all the models built for a given dataset type. * <p> * Note that if a new Leaderboard is made for the same project_name it'll * keep using the old model list, which allows us to run AutoML multiple * times and keep adding to the leaderboard. * <p> * The models are returned sorted by either an appropriate default metric * for the model category (auc, mean per class error, or mean residual deviance), * or by a metric that's set via #setMetricAndDirection. * <p> * TODO: make this robust against removal of models from the DKV. */ public class Leaderboard extends Lockable<Leaderboard> implements ModelContainer<Model> { /** * What data should be used to generate leaderboard metrics. * "auto "is the default used by AutoML which can lead to some * models having metrics calculated on xval and others on train/valid. */ public enum ScoreData{ auto, xval, train, valid } /** * @param project_name * @return a Leaderboard id for the project name */ public static String idForProject(String project_name) { return "Leaderboard_" + project_name; } /** * @param project_name * @param score_data what metrics should be reported * @return a Leaderboard id for the project name, when score_data == auto use idForProject(String) to generate id */ public static String idForProject(String project_name, ScoreData score_data) { if (ScoreData.auto.equals(score_data)) return idForProject(project_name); return "Leaderboard_" + project_name + "_for_" + score_data.toString(); } /** * @param metric * @return true iff the metric is a loss function */ public static boolean isLossFunction(String metric) { return metric != null && !Arrays.asList("auc", "aucpr").contains(metric.toLowerCase()); } /** * Retrieves a leaderboard from DKV * * @param leaderboardKey * @param logger * @return an existing leaderboard if there's already one in DKV for this project, or null. */ public static Leaderboard getInstance(Key leaderboardKey, Logger logger) { Leaderboard leaderboard = DKV.getGet(leaderboardKey); if (null != leaderboard) { // set the logger if (leaderboard._eventLogger == null) { leaderboard._eventLogger = logger == null ? log : logger; } } return leaderboard; } /** * Retrieves a leaderboard from DKV * * @param projectName * @param logger * @param leaderboardFrame * @param sortMetric * @param scoreData * @return an existing leaderboard if there's already one in DKV for this project, or null. */ public static Leaderboard getInstance(String projectName, Logger logger, Frame leaderboardFrame, String sortMetric, ScoreData scoreData) { Leaderboard leaderboard = getInstance(Key.make(idForProject(projectName, scoreData)), logger); if (null != leaderboard) { if (leaderboardFrame != null && (!leaderboardFrame._key.equals(leaderboard._leaderboard_frame_key) || leaderboardFrame.checksum() != leaderboard._leaderboard_frame_checksum)) { throw new H2OIllegalArgumentException("Cannot use leaderboard "+projectName+" with a new leaderboard frame" +" (existing leaderboard frame: "+leaderboard._leaderboard_frame_key+")."); } if (sortMetric != null && !sortMetric.equals(leaderboard._sort_metric)) { leaderboard._sort_metric = sortMetric.toLowerCase(); if (leaderboard.getLeader() != null) leaderboard.setDefaultMetrics(leaderboard.getLeader()); //reinitialize } } return leaderboard; } /** * Retrieves a leaderboard from DKV or creates a fresh one and add it to DKV. * * Note that if the leaderboard is reused to add new models, we have to use the same leaderboard frame. * * IMPORTANT! * if the leaderboard is created without leaderboardFrame, the models will be sorted according to their default metrics * (in order of availability: cross-validation metrics, validation metrics, training metrics). * Therefore, if some models were trained with/without cross-validation, or with different training or validation frames, * then we can't guarantee the fairness of the leaderboard ranking. * * @param projectName * @param logger * @param leaderboardFrame * @param sortMetric * @param scoreData * @return an existing leaderboard if there's already one in DKV for this project, or a new leaderboard added to DKV. */ public static Leaderboard getOrMake(String projectName, Logger logger, Frame leaderboardFrame, String sortMetric, ScoreData scoreData) { Leaderboard leaderboard = getInstance(projectName, logger, leaderboardFrame, sortMetric, scoreData); if (null == leaderboard) { leaderboard = new Leaderboard(projectName, logger, leaderboardFrame, sortMetric, scoreData); } DKV.put(leaderboard); return leaderboard; } /** * @see #getOrMake(String, Logger, Frame, String, ScoreData) */ public static Leaderboard getOrMake(String projectName, Logger logger, Frame leaderboardFrame, String sortMetric){ return getOrMake(projectName, logger, leaderboardFrame, sortMetric, ScoreData.auto); } private static final Logger log = LoggerFactory.getLogger(Leaderboard.class); private transient Logger _eventLogger; // Used for event log when used from AutoML /** * Identifier for models that should be grouped together in the leaderboard * (e.g., "airlines" and "iris"). */ private final String _project_name; /** * List of models for this leaderboard, sorted by metric so that the best is first, * according to the standard metric for the given model type. * <p> * Updated inside addModels(). */ private Key<Model>[] _model_keys = new Key[0]; /** * Leaderboard/test set ModelMetrics objects for the models. * <p> * Updated inside addModels(). */ private final IcedHashMap<Key<ModelMetrics>, ModelMetrics> _leaderboard_model_metrics = new IcedHashMap<>(); /** * Map providing for a given metric name, the list of metric values in the same order as the models */ private IcedHashMap<String, double[]> _metric_values = new IcedHashMap<>(); private LeaderboardExtensionsProvider _extensionsProvider; /** * Map listing the leaderboard extensions per model */ private LeaderboardCell[] _extensions_cells = new LeaderboardCell[0]; /** * Metric used to sort this leaderboard. */ private String _sort_metric; /** * One of "auto", "xval", "valid", "train"; */ private ScoreData _score_data = ScoreData.auto; /** * Metrics reported in leaderboard * Regression metrics: mean_residual_deviance, rmse, mse, mae, rmsle * Binomial metrics: auc, logloss, aucpr, mean_per_class_error, rmse, mse * Multinomial metrics: logloss, mean_per_class_error, rmse, mse */ private String[] _metrics; /** * Frame for which we return the metrics, by default. */ private final Key<Frame> _leaderboard_frame_key; /** * Checksum for the Frame for which we return the metrics, by default. */ private final long _leaderboard_frame_checksum; private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); /** * Constructs a new leaderboard (doesn't put it in DKV). * @param projectName * @param logger * @param leaderboardFrame * @param sortMetric * @param scoreData */ public Leaderboard(String projectName, Logger logger, Frame leaderboardFrame, String sortMetric, ScoreData scoreData) { super(Key.make(idForProject(projectName, scoreData))); _project_name = projectName; _eventLogger = logger == null ? log : logger; _leaderboard_frame_key = leaderboardFrame == null ? null : leaderboardFrame._key; _leaderboard_frame_checksum = leaderboardFrame == null ? 0 : leaderboardFrame.checksum(); _sort_metric = sortMetric == null ? null : sortMetric.toLowerCase(); _score_data = scoreData; } /** * Assign a {@link LeaderboardExtensionsProvider} to this leaderboard instance. * @param provider the provider used to generate the optional extension columns from the leaderboard. * @see LeaderboardExtensionsProvider */ public void setExtensionsProvider(LeaderboardExtensionsProvider provider) { _extensionsProvider = provider; } public String getProject() { return _project_name; } /** * If no sort metric is provided when creating the leaderboard, * then a default sort metric will be automatically chosen based on the problem type: * <li> * <ul>binomial classification: auc</ul> * <ul>multinomial classification: logloss</ul> * <ul>regression: mean_residual_deviance</ul> * </li> * @return the metric used to sort the models in the leaderboard. */ public String getSortMetric() { return _sort_metric; } /** * The sort metric is always the first element in the list of metrics. * * @return the full list of metrics available in the leaderboard. */ public String[] getMetrics() { return _metrics == null ? (_sort_metric == null ? new String[0] : new String[]{_sort_metric}) : _metrics; } /** * Note: If no leaderboard was provided, then the models are sorted according to metrics obtained during training * in the following priority order depending on availability: * <li> * <ol>cross-validation metrics</ol> * <ol>validation metrics</ol> * <ol>training metrics</ol> * </li> * @return the frame (if any) used to score the models in the leaderboard. */ public Frame leaderboardFrame() { return _leaderboard_frame_key == null ? null : _leaderboard_frame_key.get(); } /** * @return list of keys of models sorted by the default metric for the model category, fetched from the DKV */ @Override public Key<Model>[] getModelKeys() { return _model_keys; } /** Return the number of models in this Leaderboard. */ @Override public int getModelCount() { return getModelKeys() == null ? 0 : getModelKeys().length; } /** * @return list of models sorted by the default metric for the model category */ @Override public Model[] getModels() { if (getModelCount() == 0) return new Model[0]; return getModelsFromKeys(getModelKeys()); } /** * @return list of models sorted by the given metric */ public Model[] getModelsSortedByMetric(String metric) { if (getModelCount() == 0) return new Model[0]; return getModelsFromKeys(sortModelKeys(getModelKeys(), metric)); } /** * @return the model with the best sort metric value. * @see #getSortMetric() */ public Model getLeader() { if (getModelCount() == 0) return null; return getModelKeys()[0].get(); } /** * @param modelKey * @return the rank for the given model key, according to the sort metric ranking (leader has rank 1). */ public int getModelRank(Key<Model> modelKey) { return ArrayUtils.find(getModelKeys(), modelKey) + 1; } /** * @return the ordered values (asc or desc depending if sort metric is a loss function or not) for the sort metric. * @see #getSortMetric() * @see #isLossFunction(String) */ public double[] getSortMetricValues() { return _sort_metric == null ? null : _metric_values.get(_sort_metric); } /** * * @param metricName * @return the metric values for the models in the same order as {@link #getModels()} */ public double[] getMetricValues(String metricName) { return _metric_values.get(metricName); } private void setDefaultMetrics(Model m) { write_lock(); String[] metrics = defaultMetricsForModel(m); if (_sort_metric == null) { _sort_metric = metrics.length > 0 ? metrics[0] : "mse"; // default to a metric "universally" available } // ensure metrics is ordered in such a way that sortMetric is the first metric, and without duplicates. int sortMetricIdx = ArrayUtils.find(metrics, _sort_metric); if (sortMetricIdx > 0) { metrics = ArrayUtils.remove(metrics, sortMetricIdx); metrics = ArrayUtils.prepend(metrics, _sort_metric); } else if (sortMetricIdx < 0){ metrics = ArrayUtils.append(new String[]{_sort_metric}, metrics); } _metrics = metrics; update(); unlock(); } public ModelMetrics getOrCreateModelMetrics(Key<Model> modelKey) { return getOrCreateModelMetrics(modelKey, getExtensionsAsMap()); } private ModelMetrics getModelMetrics(Model model) { switch (_score_data) { case auto: return ModelMetrics.defaultModelMetrics(model); case xval: return model._output._cross_validation_metrics; case valid: return model._output._validation_metrics; case train: return model._output._training_metrics; default: throw new H2OIllegalArgumentException("Unsupported score data argument: " + _score_data + ". Use one of: auto, xval, valid, train."); } } private ModelMetrics getOrCreateModelMetrics(Key<Model> modelKey, Map<Key<Model>, LeaderboardCell[]> extensions) { final Frame leaderboardFrame = leaderboardFrame(); ModelMetrics mm; Model model = modelKey.get(); if (leaderboardFrame == null) { // If leaderboardFrame is null, use default model metrics instead mm = getModelMetrics(model); } else { mm = ModelMetrics.getFromDKV(model, leaderboardFrame); if (mm == null) { // metrics haven't been computed yet (should occur max once per model) // optimization: as we need to score leaderboard, score from the scoring time extension if provided. LeaderboardCell scoringTimePerRow = getExtension(modelKey, ScoringTimePerRow.COLUMN.getName(), extensions); if (scoringTimePerRow != null && scoringTimePerRow.getValue() == null) { scoringTimePerRow.fetch(); mm = ModelMetrics.getFromDKV(model, leaderboardFrame); } } if (mm == null) { // last resort //scores and magically stores the metrics where we're looking for it on the next line model.score(leaderboardFrame).delete(); mm = ModelMetrics.getFromDKV(model, leaderboardFrame); } } return mm; } /** * Add the given models to the leaderboard. * Note that to make this easier to use from Grid, which returns its models in random order, * we allow the caller to add the same model multiple times and we eliminate the duplicates here. * @param modelKeys */ public void addModels(final Key<Model>[] modelKeys) { if (modelKeys == null || modelKeys.length == 0) return; if (null == _key) throw new H2OIllegalArgumentException("Can't add models to a Leaderboard which isn't in the DKV."); final Key<Model>[] oldModelKeys = _model_keys; final Key<Model> oldLeaderKey = (oldModelKeys == null || 0 == oldModelKeys.length) ? null : oldModelKeys[0]; // eliminate duplicates final Set<Key<Model>> uniques = new HashSet<>(Arrays.asList(ArrayUtils.append(oldModelKeys, modelKeys))); final List<Key<Model>> allModelKeys = new ArrayList<>(uniques); final Set<Key<Model>> newModelKeys = new HashSet<>(uniques); newModelKeys.removeAll(Arrays.asList(oldModelKeys)); // In case we're just re-adding existing models if (newModelKeys.isEmpty()) return; allModelKeys.forEach(DKV::prefetch); final ModelCategory[] allowedModelCategories = new ModelCategory[] { ModelCategory.Binomial, ModelCategory.Multinomial, ModelCategory.Regression, }; for (Key<Model> k : newModelKeys) { Model m = k.get(); if (m == null) continue; // warning handled in next loop below assert m.isSupervised(): "Leaderboard supports only supervised models!"; assert ArrayUtils.contains(allowedModelCategories, m._output.getModelCategory()) : "Leaderboard doesn't support " + m._output.getModelCategory() + " model category!"; _eventLogger.debug("Adding model "+k+" to leaderboard "+_key+"." + " Training time: model=" + Math.round(m._output._run_time / 1000.) + "s," + " total=" + Math.round(m._output._total_run_time / 1000.) + "s"); } final List<ModelMetrics> modelMetrics = new ArrayList<>(); final Map<Key<Model>, LeaderboardCell[]> extensions = new HashMap<>(); final List<Key<Model>> badKeys = new ArrayList<>(); for (Key<Model> modelKey : allModelKeys) { // fully rebuilding modelMetrics, so we loop through all keys, not only new ones Model model = modelKey.get(); if (model == null) { badKeys.add(modelKey); _eventLogger.warn("Model `"+modelKey+"` has unexpectedly been deleted from H2O: ignoring the model and/or removing it from the leaderboard."); continue; } if (_extensionsProvider != null) { extensions.put(modelKey, _extensionsProvider.createExtensions(model)); } ModelMetrics mm = getOrCreateModelMetrics(modelKey, extensions); assert mm != null: "Missing metrics for model "+modelKey; if (mm == null) { badKeys.add(modelKey); _eventLogger.warn("Metrics for model `"+modelKey+"` are missing: ignoring the model and/or removing it from the leaderboard."); continue; } modelMetrics.add(mm); } if (_metrics == null) { // lazily set to default for this model category Model model = null; String cm = modelKeys[0].get()._parms._custom_metric_func; String[] metricsFirst = defaultMetricsForModel(modelKeys[0].get()); for (Key<Model> k : modelKeys) { final String[] metrics = defaultMetricsForModel(model = k.get()); if (metrics.length != metricsFirst.length || !Arrays.equals(metricsFirst, metrics)) throw new H2OIllegalArgumentException("Models don't have the same metrics (e.g. model \"" + modelKeys[0].toString()+"\" and model \""+k+"\")."); if (!Objects.equals(cm, k.get()._parms._custom_metric_func)) throw new H2OIllegalArgumentException("Models don't have the same custom metrics (e.g. model \"" + modelKeys[0].toString()+"\" and model \""+k+"\")."); } setDefaultMetrics(model); } for (Key<Model> key : badKeys) { // keep everything clean for the update allModelKeys.remove(key); extensions.remove(key); } atomicUpdate(() -> { _leaderboard_model_metrics.clear(); modelMetrics.forEach(this::addModelMetrics); updateModels(allModelKeys.toArray(new Key[0])); _extensions_cells = new LeaderboardCell[0]; extensions.forEach(this::addExtensions); }, null); if (oldLeaderKey == null || !oldLeaderKey.equals(_model_keys[0])) { _eventLogger.info("New leader: "+_model_keys[0]+", "+ _sort_metric +": "+ _metric_values.get(_sort_metric)[0]); } } // addModels /** * @param modelKeys the keys of the models to be removed from this leaderboard. * @param cascade if true, the model itself and its dependencies will be completely removed from the backend. */ public void removeModels(final Key<Model>[] modelKeys, boolean cascade) { if (modelKeys == null || modelKeys.length == 0 || Arrays.stream(modelKeys).noneMatch(k -> ArrayUtils.contains(_model_keys, k))) return; Arrays.stream(modelKeys).filter(k -> ArrayUtils.contains(_model_keys, k)).forEach(k -> { _eventLogger.debug("Removing model "+k+" from leaderboard "+_key); }); Key<Model>[] remainingKeys = Arrays.stream(_model_keys).filter(k -> !ArrayUtils.contains(modelKeys, k)).toArray(Key[]::new); atomicUpdate(() -> { _model_keys = new Key[0]; addModels(remainingKeys); }, null); if (cascade) { for (Key<Model> key : modelKeys) { Keyed.remove(key); } } } public void ensureSorted() { atomicUpdate(() -> { updateModels(_model_keys); }, null); } private void updateModels(Key<Model>[] modelKeys) { final Key<Model>[] sortedModelKeys = sortModelKeys(modelKeys, _sort_metric); final Model[] sortedModels = getModelsFromKeys(sortedModelKeys); final IcedHashMap<String, double[]> metricValues = new IcedHashMap<>(); for (String metric : _metrics) { metricValues.put(metric, getMetrics(metric, sortedModels)); } _metric_values = metricValues; _model_keys = sortedModelKeys; } private void atomicUpdate(Runnable update, Key<Job> jobKey) { DKVUtils.atomicUpdate(this, update, jobKey, lock); } /** * @see #addModels(Key[]) */ @SuppressWarnings("unchecked") public <M extends Model> void addModel(final Key<M> key) { if (key == null) return; addModels(new Key[] {key}); } /** * @param key the key of the model to be removed from the leaderboard. * @param cascade if true, the model itself and it's dependencies will be completely removed from the backend. */ @SuppressWarnings("unchecked") public <M extends Model> void removeModel(final Key<M> key, boolean cascade) { if (key == null) return; removeModels(new Key[] {key}, cascade); } private void addModelMetrics(ModelMetrics modelMetrics) { if (modelMetrics != null) _leaderboard_model_metrics.put(modelMetrics._key, modelMetrics); } private <M extends Model> void addExtensions(final Key<M> key, LeaderboardCell... extensions) { if (key == null) return; assert ArrayUtils.contains(_model_keys, key); LeaderboardCell[] toAdd = Stream.of(extensions) .filter(lc -> getExtension(key, lc.getColumn().getName()) == null) .toArray(LeaderboardCell[]::new); _extensions_cells = ArrayUtils.append(_extensions_cells, toAdd); } public Map<Key<Model>, LeaderboardCell[]> getExtensionsAsMap() { return Arrays.stream(_extensions_cells).collect(Collectors.toMap( c -> c.getModelId(), c -> new LeaderboardCell[]{c}, (lhs, rhs) -> ArrayUtils.append(lhs, rhs) )); } private <M extends Model> LeaderboardCell[] getExtensions(final Key<M> key) { return Stream.of(_extensions_cells) .filter(c -> c.getModelId().equals(key)) .toArray(LeaderboardCell[]::new); } private <M extends Model> LeaderboardCell getExtension(final Key<M> key, String extName) { return getExtension(key, extName, Collections.singletonMap((Key<Model>)key, getExtensions(key))); } private <M extends Model> LeaderboardCell getExtension(final Key<M> key, String extName, Map<Key<Model>, LeaderboardCell[]> extensions) { if (extensions != null && extensions.containsKey(key)) { return Stream.of(extensions.get(key)) .filter(le -> le.getColumn().getName().equals(extName)) .findFirst() .orElse(null); } return null; } private static Model[] getModelsFromKeys(Key<Model>[] modelKeys) { Model[] models = new Model[modelKeys.length]; int i = 0; for (Key<Model> modelKey : modelKeys) models[i++] = DKV.getGet(modelKey); return models; } /** * Sort by metric on the leaderboard/test set or default model metrics. */ private Key<Model>[] sortModelKeys(Key<Model>[] modelKeys, String sortMetric) { boolean sortDecreasing = !isLossFunction(sortMetric); Comparator<Key<Model>> cmp = Comparator.comparingDouble(mk -> getMetric(sortMetric, mk.get())); if (sortDecreasing) cmp = cmp.reversed(); modelKeys = modelKeys.clone(); Arrays.sort(modelKeys, cmp); return modelKeys; } private double getMetric(String metric, Model model) { // If leaderboard frame exists, get metrics from there if (leaderboardFrame() != null) { return ModelMetrics.getMetricFromModelMetric( _leaderboard_model_metrics.get(ModelMetrics.buildKey(model, leaderboardFrame())), metric ); } else { // otherwise use default model metrics return ModelMetrics.getMetricFromModelMetric( getModelMetrics(model), metric ); } } private double[] getMetrics(String metric, Model[] models) { double[] metrics = new double[models.length]; int i = 0; for (Model m : models) { metrics[i++] = getMetric(metric, m); } return metrics; } /** * Delete object and its dependencies from DKV, including models. */ @Override protected Futures remove_impl(Futures fs, boolean cascade) { log.debug("Cleaning up leaderboard from models "+Arrays.toString(_model_keys)); if (cascade) { for (Key<Model> m : _model_keys) { Keyed.remove(m, fs, true); } } for (Key k : _leaderboard_model_metrics.keySet()) Keyed.remove(k, fs, true); return super.remove_impl(fs, cascade); } private static String[] defaultMetricsForModel(Model m) { ArrayList<String> result = new ArrayList<>(); if (m._output.isBinomialClassifier()) { //binomial Collections.addAll(result, "auc", "logloss", "aucpr", "mean_per_class_error", "rmse", "mse"); } else if (m._output.isMultinomialClassifier()) { // multinomial Collections.addAll(result, "mean_per_class_error", "logloss", "rmse", "mse"); } else if (m._output.isSupervised()) { // regression Collections.addAll(result, "rmse", "mse", "mae", "rmsle", "mean_residual_deviance"); } if (m._parms._custom_metric_func != null) result.add("custom"); return result.toArray(new String[0]); } private double[] getModelMetricValues(int rank) { assert rank >= 0 && rank < getModelKeys().length: "invalid rank"; if (_metrics == null) return new double[0]; final double[] values = new double[_metrics.length]; for (int i=0; i < _metrics.length; i++) { values[i] = _metric_values.get(_metrics[i])[rank]; } return values; } public String rankTsv() { String lineSeparator = "\n"; StringBuilder sb = new StringBuilder(); sb.append("Error").append(lineSeparator); for (int i = getModelKeys().length - 1; i >= 0; i--) { // TODO: allow the metric to be passed in. Note that this assumes the validation (or training) frame is the same. sb.append(Arrays.toString(getModelMetricValues(i))); sb.append(lineSeparator); } return sb.toString(); } private TwoDimTable makeTwoDimTable(String tableHeader, int nrows, LeaderboardColumn... columns) { assert columns.length > 0; assert _sort_metric != null || nrows == 0 : "sort_metrics needs to be always not-null for non-empty array!"; String description = nrows > 0 ? "models sorted in order of "+_sort_metric+", best first" : "no models in this leaderboard"; String[] rowHeaders = new String[nrows]; for (int i = 0; i < nrows; i++) rowHeaders[i] = ""+i; String[] colHeaders = Stream.of(columns).map(LeaderboardColumn::getName).toArray(String[]::new); String[] colTypes = Stream.of(columns).map(LeaderboardColumn::getType).toArray(String[]::new); String[] colFormats = Stream.of(columns).map(LeaderboardColumn::getFormat).toArray(String[]::new); String colHeaderForRowHeader = nrows > 0 ? "#" : "-"; return new TwoDimTable( tableHeader, description, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeader ); } private void addTwoDimTableRow(TwoDimTable table, int row, String modelID, String[] metrics, LeaderboardCell[] extensions) { int col = 0; table.set(row, col++, modelID); for (String metric : metrics) { double value = _metric_values.get(metric)[row]; table.set(row, col++, value); } for (LeaderboardCell extension: extensions) { if (extension != null) { Object value = extension.getValue() == null ? extension.fetch() : extension.getValue(); // for costly extensions, only fetch value on-demand if (!extension.isNA()) { table.set(row, col, value); } } col++; } } /** * Creates a {@link TwoDimTable} representation of the leaderboard. * If no extensions are provided, then the representation will only contain the model ids and the scoring metrics. * Each extension name will be represented in the table * if and only if it was also made available to the leaderboard by the {@link LeaderboardExtensionsProvider}, * otherwise it will just be ignored. * @param extensions optional columns for the leaderboard representation. * @return a {@link TwoDimTable} representation of the current leaderboard. * @see LeaderboardExtensionsProvider * @see LeaderboardColumn */ public TwoDimTable toTwoDimTable(String... extensions) { return toTwoDimTable("Leaderboard for project " + _project_name, false, extensions); } private TwoDimTable toTwoDimTable(String tableHeader, boolean leftJustifyModelIds, String... extensions) { final Lock readLock = lock.readLock(); if (readLock.tryLock()) { try { final Key<Model>[] modelKeys = _model_keys.clone(); // leaderboard can be retrieved when AutoML is still running: freezing current models state. final List<LeaderboardColumn> columns = getDefaultTableColumns(); final List<LeaderboardColumn> extColumns = new ArrayList<>(); if (getModelCount() > 0) { final Key<Model> leader = getModelKeys()[0]; LeaderboardCell[] extCells = (extensions.length > 0 && LeaderboardExtensionsProvider.ALL.equalsIgnoreCase(extensions[0])) ? Stream.of(getExtensions(leader)).filter(cell -> !cell.getColumn().isHidden()).toArray(LeaderboardCell[]::new) : Stream.of(extensions).map(e -> getExtension(leader, e)).toArray(LeaderboardCell[]::new); Stream.of(extCells).filter(Objects::nonNull).forEach(e -> extColumns.add(e.getColumn())); } columns.addAll(extColumns); TwoDimTable table = makeTwoDimTable(tableHeader, modelKeys.length, columns.toArray(new LeaderboardColumn[0])); int maxModelIdLen = Stream.of(modelKeys).mapToInt(k -> k.toString().length()).max().orElse(0); final String[] modelIDsFormatted = new String[modelKeys.length]; for (int i = 0; i < modelKeys.length; i++) { Key<Model> key = modelKeys[i]; if (leftJustifyModelIds) { // %-s doesn't work in TwoDimTable.toString(), so fake it here: modelIDsFormatted[i] = org.apache.commons.lang.StringUtils.rightPad(key.toString(), maxModelIdLen); } else { modelIDsFormatted[i] = key.toString(); } addTwoDimTableRow(table, i, modelIDsFormatted[i], getMetrics(), extColumns.stream().map(ext -> getExtension(key, ext.getName())).toArray(LeaderboardCell[]::new) ); } return table; } finally { readLock.unlock(); } } else { return makeTwoDimTable(tableHeader, 0, getDefaultTableColumns().toArray(new LeaderboardColumn[0])); } } private List<LeaderboardColumn> getDefaultTableColumns() { final List<LeaderboardColumn> columns = new ArrayList<>(); columns.add(ModelId.COLUMN); for (String metric : getMetrics()) { columns.add(MetricScore.getColumn(metric)); } return columns; } private String toString(String fieldSeparator, String lineSeparator, boolean includeTitle, boolean includeHeader) { final StringBuilder sb = new StringBuilder(); if (includeTitle) { sb.append("Leaderboard for project \"") .append(_project_name) .append("\": "); if (_model_keys.length == 0) { sb.append("<empty>"); return sb.toString(); } sb.append(lineSeparator); } boolean printedHeader = false; for (int i = 0; i < _model_keys.length; i++) { final Key<Model> key = _model_keys[i]; if (includeHeader && ! printedHeader) { sb.append("model_id"); sb.append(fieldSeparator); String [] metrics = _metrics != null ? _metrics : new String[0]; sb.append(Arrays.toString(metrics)); sb.append(lineSeparator); printedHeader = true; } sb.append(key.toString()); sb.append(fieldSeparator); double[] values = _metrics != null ? getModelMetricValues(i) : new double[0]; sb.append(Arrays.toString(values)); sb.append(lineSeparator); } return sb.toString(); } @Override public String toString() { return toString(" ; ", " | ", true, true); } public String toLogString() { return toTwoDimTable("Leaderboard for project "+_project_name, true).toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/LeaderboardCell.java
package hex.leaderboard; import hex.Model; import water.Freezable; import water.Key; public interface LeaderboardCell<V, SELF extends LeaderboardCell> extends Freezable<SELF> { /** * @return the column to which this cell belongs. */ LeaderboardColumn getColumn(); /** * @return the row index of this cell. */ Key<Model> getModelId(); /** * gets the current value of the cell. * If the value is not immediately available, this should return null, so that the client code can decide to call {@link #fetch()}. * This is an accessor, it is safe to call {@link #getValue()} multiple times without triggering any side effect. * @return the current cell value. */ V getValue(); /** * sets the cell value. * This can be useful for optimization, when the value is expensive to compute and available at some point during the automl run. * @param value */ void setValue(V value); /** * @return true id the value is not available */ default boolean isNA() { return getValue() == null; } /** * Fetch the value if necessary: this may be a long running task. * @return */ default V fetch() { return getValue(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/LeaderboardColumn.java
package hex.leaderboard; /** * Meta info for a leaderboard column. */ public class LeaderboardColumn { private final boolean _hidden; private final String _name; private final String _type; private final String _format; public LeaderboardColumn(String name, String type, String format) { this(name, type, format, false); } public LeaderboardColumn(String name, String type, String format, boolean hidden) { _name = name; _type = type; _format = format; _hidden = hidden; } public String getName() { return _name; } public String getType() { return _type; } public String getFormat() { return _format; } public boolean isHidden() { return _hidden; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/LeaderboardExtensionsProvider.java
package hex.leaderboard; import hex.Model; import water.Iced; /** * This provider allows a model generator (e.g. {@link ai.h2o.automl.AutoML}) to produce * optional columns for the leadeboard created from the models. */ public abstract class LeaderboardExtensionsProvider extends Iced<LeaderboardExtensionsProvider> { /** * Alias for "all extensions" where a list of extensions names is required. */ public static final String ALL = "ALL"; /** * Generates the extensions cells for a given model. * It is expected that all cells associated to a model are from a different extension, * i.e. they should all have a different {@link LeaderboardCell#getColumn()}. * @param model * @return an array of @{link LeaderboardCell} for the given model. */ public abstract LeaderboardCell[] createExtensions(Model model); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/MetricScore.java
package hex.leaderboard; import hex.Model; import water.Iced; import water.Key; import java.util.HashMap; import java.util.Map; /** * A cell for a scoring metric column. */ public class MetricScore extends Iced<MetricScore> implements LeaderboardCell<Double, MetricScore> { private static final Map<String, LeaderboardColumn> COLUMNS = new HashMap<>(); public static LeaderboardColumn getColumn(String metric) { if (!COLUMNS.containsKey(metric)) { COLUMNS.put(metric, new LeaderboardColumn(metric, "double", "%.6f")); } return COLUMNS.get(metric); } private final Key<Model> _modelId; private final String _metric; private Double _score; public MetricScore(Key<Model> modelId, String metric, Double score) { _modelId = modelId; _metric = metric; _score = score; } @Override public LeaderboardColumn getColumn() { return MetricScore.getColumn(_metric); } @Override public Key<Model> getModelId() { return _modelId; } @Override public Double getValue() { return _score; } @Override public void setValue(Double value) { _score = value; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/ModelId.java
package hex.leaderboard; import hex.Model; import water.Iced; import water.Key; /** * A cell for the model id column. */ public class ModelId extends Iced<ModelId> implements LeaderboardCell<String, ModelId> { public static final LeaderboardColumn COLUMN = new LeaderboardColumn("model_id", "string", "%s"); private final Key<Model> _modelId; public ModelId(Key<Model> modelId) { _modelId = modelId; } @Override public LeaderboardColumn getColumn() { return COLUMN; } @Override public Key<Model> getModelId() { return _modelId; } @Override public String getValue() { return _modelId.toString(); } @Override public void setValue(String value) { throw new UnsupportedOperationException(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/ScoringTimePerRow.java
package hex.leaderboard; import hex.Model; import water.Iced; import water.Key; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Frame; /** * A cell computing lazily the average time needed to score a single row with the model. * If there is a leaderboard frame available, this average time will be computed by scoring the entire frame. * Otherwise, the training frame will be used. */ public class ScoringTimePerRow extends Iced<ScoringTimePerRow> implements LeaderboardCell<Double, ScoringTimePerRow> { public static final LeaderboardColumn COLUMN = new LeaderboardColumn("predict_time_per_row_ms", "double", "%.6f"); private final Key<Model> _modelId; private final Key<Frame> _leaderboardFrameId; private Double _scoringTimePerRowMillis; public ScoringTimePerRow(Model model, Frame leaderboardFrame) { this(model._key, leaderboardFrame == null ? null : leaderboardFrame._key ); } public ScoringTimePerRow(Key<Model> modelId, Key<Frame> leaderboardFrameId) { _modelId = modelId; _leaderboardFrameId = leaderboardFrameId; } @Override public LeaderboardColumn getColumn() { return COLUMN; } @Override public Key<Model> getModelId() { return _modelId; } @Override public Double getValue() { if (_scoringTimePerRowMillis == null && _leaderboardFrameId == null) { throw new H2OIllegalArgumentException("predict_time_per_row_ms requires a leaderboard frame!"); } return _scoringTimePerRowMillis; } @Override public void setValue(Double value) { _scoringTimePerRowMillis = value; } @Override public boolean isNA() { return getValue() == null || getValue() < 0; } @Override public Double fetch() { if (getValue() == null) { try { Model model = _modelId.get(); Frame scoringFrame = _leaderboardFrameId != null ? _leaderboardFrameId.get() : null; if (scoringFrame != null) { long nrows = scoringFrame.numRows(); long start = System.nanoTime(); model.score(scoringFrame).delete(); long stop = System.nanoTime(); setValue((stop-start)/nrows/1e6); } else { setValue(-1d); } } catch (Exception e) { setValue(-1d); } } return getValue(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/leaderboard/TrainingTime.java
package hex.leaderboard; import hex.Model; import water.Iced; import water.Key; import water.util.Log; /** * A cell providing the time needed to train the model. */ public class TrainingTime extends Iced<TrainingTime> implements LeaderboardCell<Long, TrainingTime> { public static final LeaderboardColumn COLUMN = new LeaderboardColumn("training_time_ms", "long", "%s"); private final Key<Model> _modelId; private Long _trainingTimeMillis; public TrainingTime(Model model) { _modelId = model._key; _trainingTimeMillis = model._output._run_time; } public TrainingTime(Key<Model> modelId) { _modelId = modelId; } @Override public LeaderboardColumn getColumn() { return COLUMN; } @Override public Key<Model> getModelId() { return _modelId; } @Override public Long getValue() { return _trainingTimeMillis; } @Override public void setValue(Long value) { _trainingTimeMillis = value; } @Override public boolean isNA() { return getValue() == null || getValue() < 0; } @Override public Long fetch() { if (getValue() == null) { try { setValue(_modelId.get()._output._run_time); } catch (Exception e) { Log.err("Could not retrieve training time for model "+_modelId, e); setValue(-1L); } } return getValue(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/quantile/Quantile.java
package hex.quantile; import hex.ModelBuilder; import hex.ModelCategory; import water.*; import water.fvec.*; import water.util.ArrayUtils; import water.util.Log; import java.util.Arrays; /** * Quantile model builder... building a simple QuantileModel */ public class Quantile extends ModelBuilder<QuantileModel,QuantileModel.QuantileParameters,QuantileModel.QuantileOutput> { private int _ncols; @Override protected boolean logMe() { return false; } @Override public boolean isSupervised() { return false; } // Called from Nano thread; start the Quantile Job on a F/J thread public Quantile( QuantileModel.QuantileParameters parms ) { super(parms); init(false); } public Quantile( QuantileModel.QuantileParameters parms, Job job ) { super(parms, job); init(false); } @Override public Driver trainModelImpl() { return new QuantileDriver(); } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ModelCategory.Unknown}; } // any number of chunks is fine - don't rebalance - it's not worth it for a few passes over the data (at most) @Override protected int desiredChunks(final Frame original_fr, boolean local) { return 1; } /** Initialize the ModelBuilder, validating all arguments and preparing the * training frame. This call is expected to be overridden in the subclasses * and each subclass will start with "super.init();". This call is made * by the front-end whenever the GUI is clicked, and needs to be fast; * heavy-weight prep needs to wait for the trainModel() call. * * Validate the probs. */ @Override public void init(boolean expensive) { super.init(expensive); for( double p : _parms._probs ) if( p < 0.0 || p > 1.0 ) error("_probs","Probabilities must be between 0 and 1"); _ncols = train().numCols()-numSpecialCols(); //offset/weights/nfold - should only ever be weights if ( numSpecialCols() == 1 && _weights == null) throw new IllegalArgumentException("The only special Vec that is supported for Quantiles is observation weights."); if ( numSpecialCols() >1 ) throw new IllegalArgumentException("Cannot handle more than 1 special vec (weights)"); } private static class SumWeights extends MRTask<SumWeights> { double sum; @Override public void map(Chunk c, Chunk w) { for (int i=0;i<c.len();++i) if (!c.isNA(i)) { double wt = w.atd(i); // For now: let the user give small weights, results are probably not very good (same as for wtd.quantile in R) // if (wt > 0 && wt < 1) throw new H2OIllegalArgumentException("Quantiles only accepts weights that are either 0 or >= 1."); sum += wt; } } @Override public void reduce(SumWeights mrt) { sum+=mrt.sum; } } // ---------------------- private class QuantileDriver extends Driver { @Override public void computeImpl() { QuantileModel model = null; try { init(true); // The model to be built model = new QuantileModel(dest(), _parms, new QuantileModel.QuantileOutput(Quantile.this)); model._output._parameters = _parms; model._output._quantiles = new double[_ncols][_parms._probs.length]; model.delete_and_lock(_job); // --- // Run the main Quantile Loop Vec vecs[] = train().vecs(); for( int n=0; n<_ncols; n++ ) { if( stop_requested() ) return; // Stopped/cancelled Vec vec = vecs[n]; if (vec.isBad() || vec.isCategorical() || vec.isString() || vec.isTime() || vec.isUUID()) { model._output._quantiles[n] = new double[_parms._probs.length]; Arrays.fill(model._output._quantiles[n], Double.NaN); continue; } double sumRows=_weights == null ? vec.length()-vec.naCnt() : new SumWeights().doAll(vec, _weights).sum; // Compute top-level histogram Histo h1 = new Histo(vec.min(),vec.max(),0,sumRows,vec.isInt()); h1 = _weights==null ? h1.doAll(vec) : h1.doAll(vec, _weights); // For each probability, see if we have it exactly - or else run // passes until we do. for( int p = 0; p < _parms._probs.length; p++ ) { double prob = _parms._probs[p]; Histo h = h1; // Start from the first global histogram model._output._iterations++; // At least one iter per-prob-per-column while( Double.isNaN(model._output._quantiles[n][p] = h.findQuantile(prob,_parms._combine_method)) ) { h = _weights == null ? h.refinePass(prob).doAll(vec) : h.refinePass(prob).doAll(vec, _weights); // Full pass at higher resolution model._output._iterations++; // also count refinement iterations } // Update the model model.update(_job); // Update model in K/V store _job.update(0); // One unit of work } StringBuilder sb = new StringBuilder(); sb.append("Quantile: iter: ").append(model._output._iterations).append(" Qs=").append(Arrays.toString(model._output._quantiles[n])); Log.debug(sb); } } finally { if( model != null ) model.unlock(_job); } } } public static class StratifiedQuantilesTask extends H2O.H2OCountedCompleter<StratifiedQuantilesTask> { // INPUT final double _prob; final Vec _response; //vec to compute quantile for final Vec _weights; //obs weights final Vec _strata; //continuous integer range mapping into the _quantiles[id][] final QuantileModel.CombineMethod _combine_method; // OUTPUT public double[/*strata*/] _quantiles; public int[] _nids; public StratifiedQuantilesTask(H2O.H2OCountedCompleter cc, double prob, Vec response, // response Vec weights, // obs weights Vec strata, // stratification QuantileModel.CombineMethod combine_method) { super(cc); _response = response; _prob=prob; _combine_method=combine_method; _weights=weights; _strata=strata; } @Override public void compute2() { final int strataMin = (int)_strata.min(); final int strataMax = (int)_strata.max(); if (strataMin < 0 && strataMax < 0) { Log.warn("No quantiles can be computed since there are no non-OOB rows."); tryComplete(); return; } final int nstrata = strataMax - strataMin + 1; Log.info("Computing quantiles for (up to) " + nstrata + " different strata."); _quantiles = new double[nstrata]; _nids = new int[nstrata]; Arrays.fill(_quantiles,Double.NaN); Vec weights = _weights != null ? _weights : _response.makeCon(1); for (int i=0;i<nstrata;++i) { //loop over nodes Vec newWeights = weights.makeCopy(); //only keep weights for this stratum (node), set rest to 0 if (_strata!=null) { _nids[i] = strataMin+i; new KeepOnlyOneStrata(_nids[i]).doAll(_strata, newWeights); } double sumRows = new SumWeights().doAll(_response, newWeights).sum; if (sumRows>0) { Histo h = new Histo(_response.min(), _response.max(), 0, sumRows, _response.isInt()); h.doAll(_response, newWeights); while (Double.isNaN(_quantiles[i] = h.findQuantile(_prob, _combine_method))) h = h.refinePass(_prob).doAll(_response, newWeights); newWeights.remove(); //sanity check quantiles assert (_quantiles[i] <= _response.max() + 1e-6); assert (_quantiles[i] >= _response.min() - 1e-6); } } if (_weights != weights) weights.remove(); tryComplete(); } private static class KeepOnlyOneStrata extends MRTask<KeepOnlyOneStrata> { KeepOnlyOneStrata(int stratumToKeep) { this.stratumToKeep = stratumToKeep; } int stratumToKeep; @Override public void map(Chunk strata, Chunk newW) { for (int i=0; i<strata._len; ++i) { // Log.info("NID:" + ((int) strata.at8(i))); if ((int) strata.at8(i) != stratumToKeep) newW.set(i, 0); } } } } // ------------------------------------------------------------------------- private final static class Histo extends MRTask<Histo> { private static final int NBINS = 1024; // Default bin count private final int _nbins; // Actual bin count private final double _lb; // Lower bound of bin[0] private final double _step; // Step-size per-bin private final double _start_row; // Starting cumulative count of weighted rows for this lower-bound private final double _nrows; // Total datasets (weighted) rows private final boolean _isInt; // Column only holds ints // Big Data output result double _bins[/*nbins*/]; // Weighted count of rows in each bin double _mins[/*nbins*/]; // Smallest element in bin double _maxs[/*nbins*/]; // Largest element in bin private Histo(double lb, double ub, double start_row, double nrows, boolean isInt) { boolean is_int = (isInt && (ub - lb < NBINS)); _nbins = is_int ? (int) (ub - lb + 1) : NBINS; _lb = lb; double ulp = Math.ulp(Math.max(Math.abs(lb), Math.abs(ub))); _step = is_int ? 1 : (ub + ulp - lb) / _nbins; _start_row = start_row; _nrows = nrows; _isInt = isInt; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("range : " + _lb + " ... " + (_lb + _nbins * _step)); sb.append("\npsum0 : " + _start_row); sb.append("\ncounts: " + Arrays.toString(_bins)); sb.append("\nmaxs : " + Arrays.toString(_maxs)); sb.append("\nmins : " + Arrays.toString(_mins)); sb.append("\n"); return sb.toString(); } @Override public void map(Chunk chk, Chunk weight) { _bins = new double[_nbins]; _mins = new double[_nbins]; _maxs = new double[_nbins]; Arrays.fill(_mins, Double.MAX_VALUE); Arrays.fill(_maxs, -Double.MAX_VALUE); double d; for (int row = 0; row < chk._len; row++) { double w = weight.atd(row); if (w == 0) continue; if (!Double.isNaN(d = chk.atd(row))) { // na.rm=true double idx = (d - _lb) / _step; if (!(0.0 <= idx && idx < _bins.length)) continue; int i = (int) idx; if (_bins[i] == 0) _mins[i] = _maxs[i] = d; // Capture unique value else { if (d < _mins[i]) _mins[i] = d; if (d > _maxs[i]) _maxs[i] = d; } _bins[i] += w; // Bump row counts by row weight } } } @Override public void map(Chunk chk) { map(chk, new C0DChunk(1, chk.len())); } @Override public void reduce(Histo h) { for (int i = 0; i < _nbins; i++) { // Keep min/max if (_mins[i] > h._mins[i]) _mins[i] = h._mins[i]; if (_maxs[i] < h._maxs[i]) _maxs[i] = h._maxs[i]; } ArrayUtils.add(_bins, h._bins); } /** @return Quantile for probability prob, or NaN if another pass is needed. */ double findQuantile( double prob, QuantileModel.CombineMethod method ) { double p2 = prob*(_nrows-1); // Desired fractional row number for this probability long r2 = (long)p2; int loidx = findBin(r2); // Find bin holding low value double lo = (loidx == _nbins) ? binEdge(_nbins) : _maxs[loidx]; if( loidx<_nbins && r2==p2 && _mins[loidx]==lo ) return lo; // Exact row number, exact bin? Then quantile is exact long r3 = r2+1; int hiidx = findBin(r3); // Find bin holding high value double hi = (hiidx == _nbins) ? binEdge(_nbins) : _mins[hiidx]; if( loidx==hiidx ) // Somewhere in the same bin? return (lo==hi) ? lo : Double.NaN; // Only if bin is constant, otherwise must refine the bin // Split across bins - the interpolate between the hi of the lo bin, and // the lo of the hi bin return computeQuantile(lo,hi,r2,_nrows,prob,method); } private double binEdge( int idx ) { return _lb+_step*idx; } // bin for row; can be _nbins if just off the end (normally expect 0 to nbins-1) // row == position in (weighted) population private int findBin( double row ) { long sum = (long)_start_row; for( int i=0; i<_nbins; i++ ) if( (long)row < (sum += _bins[i]) ) return i; return _nbins; } // Run another pass over the data, with refined endpoints, to home in on // the exact elements for this probability. Histo refinePass( double prob ) { double prow = prob*(_nrows-1); // Desired fractional row number for this probability long lorow = (long)prow; // Lower integral row number int loidx = findBin(lorow); // Find bin holding low value // If loidx is the last bin, then high must be also the last bin - and we // have an exact quantile (equal to the high bin) and we didn't need // another refinement pass assert loidx < _nbins; double lo = _mins[loidx]; // Lower end of range to explore // If probability does not hit an exact row, we need the elements on // either side - so the next row up from the low row long hirow = lorow==prow ? lorow : lorow+1; int hiidx = findBin(hirow); // Find bin holding high value // Upper end of range to explore - except at the very high end cap double hi = hiidx==_nbins ? binEdge(_nbins) : _maxs[hiidx]; long sum = (long)_start_row; for( int i=0; i<loidx; i++ ) sum += _bins[i]; return new Histo(lo,hi,sum,_nrows,_isInt); } } /** Compute the correct final quantile from these 4 values. If the lo and hi * elements are equal, use them. However if they differ, then there is no * single value which exactly matches the desired quantile. There are * several well-accepted definitions in this case - including picking either * the lo or the hi, or averaging them, or doing a linear interpolation. * @param lo the highest element less than or equal to the desired quantile * @param hi the lowest element greater than or equal to the desired quantile * @param row row number (zero based) of the lo element; high element is +1 * @return desired quantile. */ static double computeQuantile( double lo, double hi, double row, double nrows, double prob, QuantileModel.CombineMethod method ) { if( lo==hi ) return lo; // Equal; pick either if( method == null ) method= QuantileModel.CombineMethod.INTERPOLATE; switch( method ) { case INTERPOLATE: return linearInterpolate(lo,hi,row,nrows,prob); case AVERAGE: return 0.5*(hi+lo); case LOW: return lo; case HIGH: return hi; default: Log.info("Unknown even sample size quantile combination type: " + method + ". Doing linear interpolation."); return linearInterpolate(lo,hi,row,nrows,prob); } } private static double linearInterpolate(double lo, double hi, double row, double nrows, double prob) { // Unequal, linear interpolation double plo = (row+0)/(nrows-1); // Note that row numbers are inclusive on the end point, means we need a -1 double phi = (row+1)/(nrows-1); // Passed in the row number for the low value, high is the next row, so +1 assert plo <= prob && prob <= phi; return lo + (hi-lo)*(prob-plo)/(phi-plo); // Classic linear interpolation } @Override public boolean haveMojo() { return false; } @Override public boolean havePojo() { return false; } public static double calcQuantile(Vec v, double quantile) { Frame fr = new Frame(Key.make(), new String[]{"V"}, new Vec[]{v}); try { DKV.put(fr._key, fr); QuantileModel.QuantileParameters parms = new QuantileModel.QuantileParameters(); parms._train = fr._key; parms._probs = new double[]{quantile}; QuantileModel kmm = new Quantile(parms).trainModelNested(null); kmm.delete(); return kmm._output._quantiles[0][0]; } finally { DKV.remove(fr._key); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/quantile/QuantileModel.java
package hex.quantile; import hex.Model; import hex.ModelCategory; import hex.ModelMetrics; import water.H2O; import water.Key; public class QuantileModel extends Model<QuantileModel,QuantileModel.QuantileParameters,QuantileModel.QuantileOutput> { public enum CombineMethod { INTERPOLATE, AVERAGE, LOW, HIGH } public static class QuantileParameters extends Model.Parameters { // Set of probabilities to compute public double _probs[/*Q*/] = new double[]{0.001,0.01,0.1,0.25,0.333,0.50,0.667,0.75,0.9,0.99,0.999}; public CombineMethod _combine_method = CombineMethod.INTERPOLATE; protected boolean defaultDropConsCols() { return false; } public String algoName() { return "Quantiles"; } public String fullName() { return "Quantiles"; } public String javaName() { return QuantileModel.class.getName(); } @Override public long progressUnits() { return train().numCols()*_probs.length; } } public static class QuantileOutput extends Model.Output { public QuantileParameters _parameters; // Model parameters public int _iterations; // Iterations executed public double _quantiles[/*N*/][/*Q*/]; // Our N columns, Q quantiles reported public QuantileOutput( Quantile b ) { super(b); } @Override public ModelCategory getModelCategory() { return ModelCategory.Unknown; } } QuantileModel( Key selfKey, QuantileParameters parms, QuantileOutput output) { super(selfKey,parms,output); } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { throw H2O.unimpl("No model metrics for Quantile."); } @Override protected double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/]) { throw H2O.unimpl(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/schemas/ClusteringModelBuilderSchema.java
package hex.schemas; import hex.ClusteringModelBuilder; import water.api.schemas3.ClusteringModelParametersSchemaV3; public class ClusteringModelBuilderSchema<B extends ClusteringModelBuilder, S extends ClusteringModelBuilderSchema<B,S,P>, P extends ClusteringModelParametersSchemaV3> extends ModelBuilderSchema<B,S,P> { }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/schemas/GridSchemaV99.java
package hex.schemas; import hex.Model; import hex.ModelMetrics; import hex.grid.Grid; import water.DKV; import water.Key; import water.api.*; import water.api.schemas3.*; import water.exceptions.H2OIllegalArgumentException; import water.util.TwoDimTable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Set; /** * REST endpoint representing single grid object. * * FIXME: Grid should contain also grid definition - model parameters and definition of hyper parameters. */ public class GridSchemaV99 extends SchemaV3<Grid, GridSchemaV99> { // // Inputs // @API(help = "Grid id") public KeyV3.GridKeyV3 grid_id; @API(help = "Model performance metric to sort by. Examples: logloss, residual_deviance, mse, rmse, mae,rmsle, auc, r2, f1, recall, precision, accuracy, mcc, err, err_count, lift_top_group, max_per_class_error", required = false, direction = API.Direction.INOUT) public String sort_by; @API(help = "Specify whether sort order should be decreasing.", required = false, direction = API.Direction.INOUT) public boolean decreasing; // // Outputs // @API(help = "Model IDs built by a grid search") public KeyV3.ModelKeyV3[] model_ids; @API(help = "Used hyper parameters.", direction = API.Direction.OUTPUT) public String[] hyper_names; @API(help = "List of failed parameters", direction = API.Direction.OUTPUT) public ModelParametersSchemaV3[] failed_params; // Using common ancestor of XXXParamsV3 @API(help = "List of detailed warning messages", direction = API.Direction.OUTPUT) public String[] warning_details; @API(help = "List of detailed failure messages", direction = API.Direction.OUTPUT) public String[] failure_details; @API(help = "List of detailed failure stack traces", direction = API.Direction.OUTPUT) public String[] failure_stack_traces; @API(help = "List of raw parameters causing model building failure", direction = API.Direction.OUTPUT) public String[][] failed_raw_params; @API(help = "Training model metrics for the returned models; only returned if sort_by is set", direction = API.Direction.OUTPUT) public ModelMetricsBaseV3[] training_metrics; @API(help = "Validation model metrics for the returned models; only returned if sort_by is set", direction = API.Direction.OUTPUT) public ModelMetricsBaseV3[] validation_metrics; @API(help = "Cross validation model metrics for the returned models; only returned if sort_by is set", direction = API.Direction.OUTPUT) public ModelMetricsBaseV3[] cross_validation_metrics; @API(help = "Cross validation model metrics summary for the returned models; only returned if sort_by is set", direction = API.Direction.OUTPUT) public TwoDimTableV3[] cross_validation_metrics_summary; @API(help = "Directory for Grid automatic checkpointing", direction = API.Direction.OUTPUT) public String export_checkpoints_dir; @API(help="Summary", direction=API.Direction.OUTPUT) TwoDimTableV3 summary_table; @API(help="Scoring history", direction=API.Direction.OUTPUT, level=API.Level.secondary) TwoDimTableV3 scoring_history; @Override public Grid createImpl() { return Grid.GRID_PROTO; } @Override public GridSchemaV99 fillFromImpl(Grid grid) { Key<Model>[] gridModelKeys = grid.getModelKeys(); // Return only keys which are referencing to existing objects in DKV // However, here is still implicit race, since we are sending // keys to client, but referenced models can be deleted in meantime // Hence, client has to be responsible for handling this situation // - call getModel and check for null model List<Key<Model>> modelKeys = new ArrayList<>(gridModelKeys.length); // pre-allocate for (Key k : gridModelKeys) { if (k != null && DKV.get(k) != null) { modelKeys.add(k); } } // Default sort order -- TODO: Outsource if (sort_by == null && modelKeys.size() > 0 && modelKeys.get(0) != null) { Model m = DKV.getGet(modelKeys.get(0)); Model.GridSortBy sortBy = m != null ? m.getDefaultGridSortBy() : null; if (sortBy != null) { sort_by = sortBy._name; decreasing = sortBy._decreasing; } } // Check that we have a valid metric // If not, show all possible metrics if (modelKeys.size() > 0 && sort_by != null) { Set<String> possibleMetrics = ModelMetrics.getAllowedMetrics(modelKeys.get(0)); if (!possibleMetrics.contains(sort_by.toLowerCase())) { throw new H2OIllegalArgumentException("Invalid argument for sort_by specified. Must be one of: " + Arrays.toString(possibleMetrics.toArray(new String[0]))); } } // Are we sorting by model metrics? if (null != sort_by && ! sort_by.isEmpty()) { // sort the model keys modelKeys = ModelMetrics.sortModelsByMetric(sort_by, decreasing, modelKeys); // fill the metrics arrays training_metrics = new ModelMetricsBaseV3[modelKeys.size()]; validation_metrics = new ModelMetricsBaseV3[modelKeys.size()]; cross_validation_metrics = new ModelMetricsBaseV3[modelKeys.size()]; cross_validation_metrics_summary = new TwoDimTableV3[modelKeys.size()]; for (int i = 0; i < modelKeys.size(); i++) { Model m = DKV.getGet(modelKeys.get(i)); if (m != null) { Model.Output o = m._output; if (null != o._training_metrics) training_metrics[i] = (ModelMetricsBaseV3) SchemaServer.schema(3, o._training_metrics).fillFromImpl(o ._training_metrics); if (null != o._validation_metrics) validation_metrics[i] = (ModelMetricsBaseV3) SchemaServer.schema(3, o ._validation_metrics).fillFromImpl(o._validation_metrics); if (null != o._cross_validation_metrics) cross_validation_metrics[i] = (ModelMetricsBaseV3) SchemaServer .schema(3, o._cross_validation_metrics).fillFromImpl(o._cross_validation_metrics); if (o._cross_validation_metrics_summary != null) cross_validation_metrics_summary[i] = new TwoDimTableV3(o._cross_validation_metrics_summary); } } } KeyV3.ModelKeyV3[] modelIds = new KeyV3.ModelKeyV3[modelKeys.size()]; Key<Model>[] keys = new Key[modelKeys.size()]; for (int i = 0; i < modelIds.length; i++) { modelIds[i] = new KeyV3.ModelKeyV3(modelKeys.get(i)); keys[i] = modelIds[i].key(); } grid_id = new KeyV3.GridKeyV3(grid._key); model_ids = modelIds; hyper_names = grid.getHyperNames(); final Grid.SearchFailure failures = grid.getFailures(); failed_params = toModelParametersSchema(failures.getFailedParameters()); failure_details = failures.getFailureDetails(); failure_stack_traces = failures.getFailureStackTraces(); failed_raw_params = failures.getFailedRawParameters(); warning_details = failures.getWarningDetails(); export_checkpoints_dir = grid.getParams() != null ? grid.getParams()._export_checkpoints_dir : null; TwoDimTable t = grid.createSummaryTable(keys, sort_by, decreasing); if (t!=null) summary_table = new TwoDimTableV3().fillFromImpl(t); TwoDimTable h = grid.createScoringHistoryTable(); if (h != null) scoring_history = new TwoDimTableV3().fillFromImpl(h); return this; } private ModelParametersSchemaV3[] toModelParametersSchema(Model.Parameters[] modelParameters) { if (modelParameters==null) return null; ModelParametersSchemaV3[] result = new ModelParametersSchemaV3[modelParameters.length]; for (int i = 0; i < modelParameters.length; i++) { if (modelParameters[i] != null) { result[i] = (ModelParametersSchemaV3) SchemaServer.schema(SchemaServer.getLatestVersion(), modelParameters[i]) .fillFromImpl(modelParameters[i]); } else { result[i] = null; } } return result; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/schemas/GridSearchSchema.java
package hex.schemas; import hex.Model; import hex.grid.Grid; import hex.grid.HyperSpaceSearchCriteria; import water.H2O; import water.Key; import water.api.API; import water.api.schemas3.JobV3; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelParametersSchemaV3; import water.api.schemas3.SchemaV3; import water.exceptions.H2OIllegalArgumentException; import water.util.IcedHashMap; import java.util.*; import static hex.grid.HyperSpaceWalker.BaseWalker.SUBSPACES; import static water.api.API.Direction.INOUT; import static water.api.API.Direction.INPUT; /** * This is a common grid search schema composed of two parameters: default parameters for a builder * and hyper parameters which are given as a mapping from parameter name to list of possible * values. * <p> * TODO: this needs a V99 subclass for bindings generation. * * @param <G> a specific implementation type for GridSearch holding results of grid search (model list) * @param <S> self type * @param <MP> actual model parameters type * @param <P> a specific model builder parameters schema, since we cannot derive it from P */ public class GridSearchSchema<G extends Grid<MP>, S extends GridSearchSchema<G, S, MP, P>, MP extends Model.Parameters, P extends ModelParametersSchemaV3> extends SchemaV3<G, S> { // // Inputs // @API(help = "Basic model builder parameters.", direction = INPUT) public P parameters; @API(help = "Grid search parameters.", direction = INOUT) public IcedHashMap<String, Object[]> hyper_parameters; @API(help = "Destination id for this grid; auto-generated if not specified.", direction = INOUT) public KeyV3.GridKeyV3 grid_id; @API(help="Hyperparameter search criteria, including strategy and early stopping directives. If it is not given, " + "exhaustive Cartesian is used.", direction = INOUT) public HyperSpaceSearchCriteriaV99 search_criteria; @API(help = "Level of parallelism during grid model building. 1 = sequential building (default). 0 for adaptive " + "parallelism. Any number > 1 sets the exact number of models built in parallel.") public int parallelism; @API(help= "Path to a directory where grid will save everything necessary to resume training after cluster crash.", direction = INPUT) public String recovery_dir; @API(help= "Key to use for the Job handling this GridSearch (internal use only).", direction = INPUT) public KeyV3.JobKeyV3 job_id; // // Outputs // @API(help = "Number of all models generated by grid search.", direction = API.Direction.OUTPUT) public int total_models; @API(help = "Job Key.", direction = API.Direction.OUTPUT) public JobV3 job; private static final int SEQUENTIAL_GRID_SEARCH = 1; // 1 model built at a time = sequential :) private static Map<String, Object[]> paramValuesToArray(Map<String, Object> params) { Map<String, Object[]> result = new HashMap<>(); for (Map.Entry<String, Object> e : params.entrySet()) { String k = e.getKey(); Object v = e.getValue(); Object[] arr = SUBSPACES.equals(k) ? ((List) v).stream().map(x -> paramValuesToArray((Map<String, Object>) x)).toArray(Map[]::new) : v instanceof List ? ((List) v).toArray() : new Object[]{v}; result.put(k, arr); } return result; } @Override public S fillFromParms(Properties parms) { if( parms.containsKey("hyper_parameters") ) { Map<String, Object> m; try { m = water.util.JSONUtils.parse(parms.getProperty("hyper_parameters")); // Convert lists and singletons into arrays hyper_parameters.putAll(paramValuesToArray(m)); } catch (Exception e) { // usually JsonSyntaxException, but can also be things like IllegalStateException or NumberFormatException throw new H2OIllegalArgumentException("Can't parse the hyper_parameters dictionary; got error: " + e.getMessage() + " for raw value: " + parms.getProperty("hyper_parameters")); } parms.remove("hyper_parameters"); } if( parms.containsKey("search_criteria") ) { Properties p; try { p = water.util.JSONUtils.parseToProperties(parms.getProperty("search_criteria")); if (! p.containsKey("strategy")) { throw new H2OIllegalArgumentException("search_criteria.strategy", "null"); } HyperSpaceSearchCriteria.Strategy strategy = HyperSpaceSearchCriteria.Strategy.valueOf((String) p.get("strategy")); search_criteria = HyperSpaceSearchCriteriaV99.make(strategy); if (p.containsKey("max_runtime_secs") && Double.parseDouble((String) p.get("max_runtime_secs"))<0) { throw new H2OIllegalArgumentException("max_runtime_secs must be >= 0 (0 for unlimited time)", strategy.toString()); } if (p.containsKey("max_models") && Integer.parseInt((String) p.get("max_models"))<0) { throw new H2OIllegalArgumentException("max_models must be >= 0 (0 for all models)", strategy.toString()); } search_criteria.fillWithDefaults(); search_criteria.fillFromParms(p); } catch (Exception e) { // usually JsonSyntaxException, but can also be things like IllegalStateException or NumberFormatException throw new H2OIllegalArgumentException("Can't parse the search_criteria dictionary; got error: " + e.getMessage() + " for raw value: " + parms.getProperty("search_criteria")); } parms.remove("search_criteria"); } else { // Fall back to Cartesian if there's no search_criteria specified. search_criteria = new HyperSpaceSearchCriteriaV99.CartesianSearchCriteriaV99(); } if (parms.containsKey("grid_id")) { grid_id = new KeyV3.GridKeyV3(Key.make(parms.getProperty("grid_id"))); parms.remove("grid_id"); } if (parms.containsKey("parallelism")) { final String parallelismProperty = parms.getProperty("parallelism"); try { this.parallelism = Integer.parseInt(parallelismProperty); if (this.parallelism < 0) { throw new IllegalArgumentException(String.format("Parallelism level must be >= 0. Given value: '%d'", parallelism)); } } catch (NumberFormatException e) { final String errorMessage = String.format("Could not parse given parallelism value: '%s' - not a number.", parallelismProperty); throw new IllegalArgumentException(errorMessage, e); } parms.remove("parallelism"); } else { this.parallelism = SEQUENTIAL_GRID_SEARCH; } if (parms.containsKey("recovery_dir")) { this.recovery_dir = parms.getProperty("recovery_dir"); parms.remove("recovery_dir"); } if (parms.containsKey("job_id")) { this.job_id = new KeyV3.JobKeyV3(Key.make(parms.getProperty("job_id"))); parms.remove("job_id"); } // Do not check validity of parameters, GridSearch is tolerant of bad // parameters (on purpose, many hyper-param points in the grid might be // illegal for whatever reason). this.parameters.fillFromParms(parms, false); return (S) this; } @Override public S fillFromImpl(G impl) { throw H2O.unimpl(); //S s = super.fillFromImpl(impl); //s.parameters = createParametersSchema(); //s.parameters.fillFromImpl((MP) parameters.createImpl()); //return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/schemas/HyperSpaceSearchCriteriaV99.java
package hex.schemas; import hex.ScoreKeeper.StoppingMetric; import hex.grid.HyperSpaceSearchCriteria; import hex.grid.HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria; import water.api.API; import water.api.EnumValuesProvider; import water.api.schemas3.SchemaV3; import water.exceptions.H2OIllegalArgumentException; import water.util.PojoUtils; /** * Search criteria for a hyperparameter search including directives for how to search and * when to stop the search. */ public class HyperSpaceSearchCriteriaV99<I extends HyperSpaceSearchCriteria, S extends HyperSpaceSearchCriteriaV99<I,S>> extends SchemaV3<I, S> { @API(help = "Hyperparameter space search strategy.", required = true, valuesProvider = StrategyValuesProvider.class, direction = API.Direction.INOUT) public HyperSpaceSearchCriteria.Strategy strategy; public static HyperSpaceSearchCriteriaV99 make(HyperSpaceSearchCriteria.Strategy strategy){ switch (strategy) { case Cartesian: return new CartesianSearchCriteriaV99(); case RandomDiscrete: return new RandomDiscreteValueSearchCriteriaV99(); case Sequential: return new SequentialSearchCriteriaV99(); default: throw new H2OIllegalArgumentException("search_criteria.strategy", strategy.toString()); } } /** * Search criteria for an exhaustive Cartesian hyperparameter search. */ public static class CartesianSearchCriteriaV99 extends HyperSpaceSearchCriteriaV99<HyperSpaceSearchCriteria.CartesianSearchCriteria, CartesianSearchCriteriaV99> { public CartesianSearchCriteriaV99() { strategy = HyperSpaceSearchCriteria.Strategy.Cartesian; } } /** * Search criteria for random hyperparameter search using hyperparameter values given by * lists. Includes directives for how to search and when to stop the search. */ public static class RandomDiscreteValueSearchCriteriaV99 extends HyperSpaceSearchCriteriaV99<RandomDiscreteValueSearchCriteria, RandomDiscreteValueSearchCriteriaV99> { public RandomDiscreteValueSearchCriteriaV99() { strategy = HyperSpaceSearchCriteria.Strategy.RandomDiscrete; } public RandomDiscreteValueSearchCriteriaV99(long seed, int max_models, int max_runtime_secs) { strategy = HyperSpaceSearchCriteria.Strategy.RandomDiscrete; this.seed = seed; this.max_models = max_models; this.max_runtime_secs = max_runtime_secs; } @API(help = "Seed for random number generator; set to a value other than -1 for reproducibility.", direction = API.Direction.INOUT) public long seed; @API(help = "Maximum number of models to build (optional).", direction = API.Direction.INOUT) public int max_models; @API(help = "Maximum time to spend building models (optional).", direction = API.Direction.INOUT) public double max_runtime_secs; @API(help = "Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)", level = API.Level.secondary, direction=API.Direction.INOUT, gridable = true) public int stopping_rounds; @API(help = "Metric to use for early stopping (AUTO: logloss for classification, deviance for regression)", valuesProvider = RandomSearchStoppingMetricValuesProvider.class, level = API.Level.secondary, direction=API.Direction.INOUT, gridable = true) public StoppingMetric stopping_metric; @API(help = "Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)", level = API.Level.secondary, direction=API.Direction.INOUT, gridable = true) public double stopping_tolerance; @Override public RandomDiscreteValueSearchCriteria fillImpl(RandomDiscreteValueSearchCriteria impl) { RandomDiscreteValueSearchCriteria filledImpl = super.fillImpl(impl); PojoUtils.copyProperties(filledImpl.stoppingCriteria(), this, PojoUtils.FieldNaming.DEST_HAS_UNDERSCORES); return filledImpl; } @Override public RandomDiscreteValueSearchCriteriaV99 fillFromImpl(RandomDiscreteValueSearchCriteria impl) { RandomDiscreteValueSearchCriteriaV99 schema = super.fillFromImpl(impl); PojoUtils.copyProperties(this, impl.stoppingCriteria(), PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES); return schema; } } public static class RandomSearchStoppingMetricValuesProvider extends EnumValuesProvider<StoppingMetric> { public RandomSearchStoppingMetricValuesProvider() { super(StoppingMetric.class, new StoppingMetric[]{ // non-supported stopping metrics in grid search, cf. ScoringInfo.metric StoppingMetric.custom, StoppingMetric.custom_increasing, }); } } /** * Search criteria for a Sequential hyperparameter search. */ public static class SequentialSearchCriteriaV99 extends HyperSpaceSearchCriteriaV99<HyperSpaceSearchCriteria.SequentialSearchCriteria, SequentialSearchCriteriaV99> { public SequentialSearchCriteriaV99() { strategy = HyperSpaceSearchCriteria.Strategy.Sequential; } @API(help = "Maximum number of models to build (optional).", direction = API.Direction.INOUT) public int max_models; @API(help = "Maximum time to spend building models (optional).", direction = API.Direction.INOUT) public double max_runtime_secs; @API(help = "Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)", level = API.Level.secondary, direction=API.Direction.INOUT, gridable = true) public int stopping_rounds; @API(help = "Metric to use for early stopping (AUTO: logloss for classification, deviance for regression)", valuesProvider = RandomSearchStoppingMetricValuesProvider.class, level = API.Level.secondary, direction=API.Direction.INOUT, gridable = true) public StoppingMetric stopping_metric; @API(help = "Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)", level = API.Level.secondary, direction=API.Direction.INOUT, gridable = true) public double stopping_tolerance; @API(help = "Use early stopping", level = API.Level.secondary, direction=API.Direction.INOUT, gridable = true) public boolean early_stopping; @Override public HyperSpaceSearchCriteria.SequentialSearchCriteria fillImpl(HyperSpaceSearchCriteria.SequentialSearchCriteria impl) { HyperSpaceSearchCriteria.SequentialSearchCriteria filledImpl = super.fillImpl(impl); PojoUtils.copyProperties(filledImpl.stoppingCriteria(), this, PojoUtils.FieldNaming.DEST_HAS_UNDERSCORES); return filledImpl; } @Override public SequentialSearchCriteriaV99 fillFromImpl(HyperSpaceSearchCriteria.SequentialSearchCriteria impl) { SequentialSearchCriteriaV99 schema = super.fillFromImpl(impl); PojoUtils.copyProperties(this, impl.stoppingCriteria(), PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES); return schema; } } public static class StrategyValuesProvider extends EnumValuesProvider<HyperSpaceSearchCriteria.Strategy> { public StrategyValuesProvider() { super(HyperSpaceSearchCriteria.Strategy.class); } } /** * Fill with the default values from the corresponding Iced object. */ public S fillWithDefaults() { HyperSpaceSearchCriteria defaults = HyperSpaceSearchCriteria.make(strategy); fillFromImpl((I)defaults); return (S) this; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/schemas/ModelBuilderSchema.java
package hex.schemas; import hex.Model; import hex.ModelBuilder; import hex.ModelCategory; import water.AutoBuffer; import water.H2O; import water.api.API; import water.api.SpecifiesHttpResponseCode; import water.api.schemas3.*; import water.util.IcedSortedHashMap; import water.util.ReflectionUtils; import java.util.Properties; public class ModelBuilderSchema<B extends ModelBuilder, S extends ModelBuilderSchema<B,S,P>, P extends ModelParametersSchemaV3> extends RequestSchemaV3<B,S> implements SpecifiesHttpResponseCode { // NOTE: currently ModelBuilderSchema has its own JSON serializer. // If you add more fields here you MUST add them to writeJSON_impl() below. public static class IcedHashMapStringModelBuilderSchema extends IcedSortedHashMap<String, ModelBuilderSchema> {} // Input fields @API(help="Model builder parameters.") public P parameters; // Output fields @API(help="The algo name for this ModelBuilder.", direction=API.Direction.OUTPUT) public String algo; @API(help="The pretty algo name for this ModelBuilder (e.g., Generalized Linear Model, rather than GLM).", direction=API.Direction.OUTPUT) public String algo_full_name; @API(help="Model categories this ModelBuilder can build.", values={ "Unknown", "Binomial", "Ordinal", "Multinomial", "Regression", "Clustering", "AutoEncoder", "DimReduction" }, direction = API.Direction.OUTPUT) public ModelCategory[] can_build; @API(help="Indicator whether the model is supervised or not.", direction=API.Direction.OUTPUT) public boolean supervised; @API(help="Should the builder always be visible, be marked as beta, or only visible if the user starts up with the experimental flag?", values = { "Experimental", "Beta", "AlwaysVisible" }, direction = API.Direction.OUTPUT) public ModelBuilder.BuilderVisibility visibility; @API(help = "Job Key", direction = API.Direction.OUTPUT) public JobV3 job; @API(help="Parameter validation messages", direction=API.Direction.OUTPUT) public ValidationMessageV3 messages[]; @API(help="Count of parameter validation errors", direction=API.Direction.OUTPUT) public int error_count; @API(help="HTTP status to return for this build.", json = false, direction=API.Direction.OUTPUT) public int __http_status; // The handler sets this to 400 if we're building and error_count > 0, else 200. public ModelBuilderSchema() { this.parameters = createParametersSchema(); } public void setHttpStatus(int status) { __http_status = status; } public int httpStatus() { return __http_status; } /** Factory method to create the model-specific parameters schema. */ final public P createParametersSchema() { // special case, because ModelBuilderSchema is the top of the tree and is parameterized differently if (ModelBuilderSchema.class == this.getClass()) { return (P)new ModelParametersSchemaV3(); } try { Class<? extends ModelParametersSchemaV3> parameters_class = ReflectionUtils.findActualClassParameter(this.getClass(), 2); return (P)parameters_class.newInstance(); } catch (Exception e) { throw H2O.fail("Caught exception trying to instantiate a builder instance for ModelBuilderSchema: " + this + ": " + e, e); } } public S fillFromParms(Properties parms) { this.parameters.fillFromParms(parms); return (S)this; } /** Create the corresponding impl object, as well as its parameters object. */ @Override final public B createImpl() { return ModelBuilder.make(getSchemaType(), null, null); } @Override public B fillImpl(B impl) { super.fillImpl(impl); parameters.fillImpl(impl._parms); impl.init(false); // validate parameters return impl; } // Generic filling from the impl @Override public S fillFromImpl(B builder) { // DO NOT, because it can already be running: builder.init(false); // check params this.algo = builder._parms.algoName().toLowerCase(); this.algo_full_name = builder._parms.fullName(); this.supervised = builder.isSupervised(); this.can_build = builder.can_build(); this.visibility = builder.builderVisibility(); job = builder._job == null ? null : new JobV3(builder._job); // In general, you can ask about a builder in-progress, and the error // message list can be growing - so you have to be prepared to read it // racily. Common for Grid searches exploring with broken parameter // choices. final ModelBuilder.ValidationMessage[] msgs = builder._messages; // Racily growing; read only once if( msgs != null ) { this.messages = new ValidationMessageV3[msgs.length]; int i = 0; for (ModelBuilder.ValidationMessage vm : msgs) { if( vm != null ) this.messages[i++] = new ValidationMessageV3().fillFromImpl(vm); // TODO: version // Note: does default field_name mapping } // default fieldname hacks ValidationMessageV3.mapValidationMessageFieldNames(this.messages, new String[]{"_train", "_valid"}, new String[]{"training_frame", "validation_frame"}); } this.error_count = builder.error_count(); parameters = createParametersSchema(); parameters.fillFromImpl(builder._parms); parameters.model_id = builder.dest() == null ? null : new KeyV3.ModelKeyV3(builder.dest()); return (S)this; } // TODO: Drop this writeJSON_impl and use the default one. // TODO: Pull out the help text & metadata into the ParameterSchema for the front-end to display. public final AutoBuffer writeJSON_impl( AutoBuffer ab ) { ab.putJSON("job", job); ab.put1(','); ab.putJSONStr("algo", algo); ab.put1(','); ab.putJSONStr("algo_full_name", algo_full_name); ab.put1(','); ab.putJSONAEnum("can_build", can_build); ab.put1(','); ab.putJSONEnum("visibility", visibility); ab.put1(','); ab.putJSONZ("supervised", supervised); ab.put1(','); ab.putJSONA("messages", messages); ab.put1(','); ab.putJSON4("error_count", error_count); ab.put1(','); // Builds ModelParameterSchemaV2 objects for each field, and then calls writeJSON on the array ModelParametersSchemaV3.writeParametersJSON(ab, parameters, null, createParametersSchema().fillFromImpl((Model.Parameters)parameters.createImpl()), "parameters"); return ab; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/schemas/QuantileV3.java
package hex.schemas; import hex.quantile.Quantile; import hex.quantile.QuantileModel; import water.api.API; import water.api.schemas3.ModelParametersSchemaV3; public class QuantileV3 extends ModelBuilderSchema<Quantile,QuantileV3,QuantileV3.QuantileParametersV3> { public static final class QuantileParametersV3 extends ModelParametersSchemaV3<QuantileModel.QuantileParameters, QuantileParametersV3> { static public String[] own_fields = new String[] {"probs", "combine_method", "weights_column"}; // Input fields @API(help="Probabilities for quantiles") public double probs[]; @API(help="How to combine quantiles for even sample sizes", values={"INTERPOLATE", "AVG", "LO", "HI"}) public QuantileModel.CombineMethod combine_method; } // QuantileParametersV2 }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/segments/LocalSequentialSegmentModelsBuilder.java
package hex.segments; import hex.Model; import hex.ModelBuilder; import water.*; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import water.util.Log; import java.util.Arrays; class LocalSequentialSegmentModelsBuilder extends Iced<LocalSequentialSegmentModelsBuilder> { private final Job<SegmentModels> _job; private final Model.Parameters _blueprint_parms; private final Frame _segments; private final Frame _full_train; private final Frame _full_valid; private final WorkAllocator _allocator; LocalSequentialSegmentModelsBuilder(Job<SegmentModels> job, Model.Parameters blueprint_parms, Frame segments, Frame fullTrain, Frame fullValid, WorkAllocator allocator) { _job = job; _blueprint_parms = blueprint_parms; _segments = segments; _full_train = fullTrain; _full_valid = fullValid; _allocator = allocator; } SegmentModelsStats buildModels(SegmentModels segmentModels) { Vec.Reader[] segmentVecReaders = new Vec.Reader[_segments.numCols()]; for (int i = 0; i < segmentVecReaders.length; i++) segmentVecReaders[i] = _segments.vec(i).new Reader(); SegmentModelsStats stats = new SegmentModelsStats(); for (long segmentIdx = _allocator.getNextWorkItem(); segmentIdx < _allocator.getMaxWork(); segmentIdx = _allocator.getNextWorkItem()) { if (_job.stop_requested()) throw new Job.JobCancelledException(); // Handle end-user cancel request double[] segmentVals = readRow(segmentVecReaders, segmentIdx); final ModelBuilder builder = makeBuilder(segmentIdx, segmentVals); Exception failure = null; try { builder.init(false); if (builder.error_count() == 0) { builder.trainModel().get(); } } catch (Exception e) { failure = e; } finally { _job.update(1); SegmentModels.SegmentModelResult result = segmentModels.addResult(segmentIdx, builder, failure); Log.info("Finished building a model for segment id=", segmentIdx, ", result: ", result); cleanUp(builder); if (result.isSuccessful()) stats._succeeded++; else stats._failed++; } } return stats; } private void cleanUp(ModelBuilder builder) { Futures fs = new Futures(); Keyed.remove(builder._parms._train, fs, true); Keyed.remove(builder._parms._valid, fs, true); fs.blockForPending(); } private ModelBuilder makeBuilder(long segmentIdx, double[] segmentVals) { Key<Model> mKey = SegmentModelsUtils.makeUniqueModelKey(_job._result, segmentIdx); ModelBuilder builder = ModelBuilder.make(_blueprint_parms, mKey); builder._parms._train = makeSegmentFrame(_full_train, segmentIdx, segmentVals); builder._parms._valid = makeSegmentFrame(_full_valid, segmentIdx, segmentVals); return builder; } private Key<Frame> makeSegmentFrame(Frame f, long segmentIdx, double[] segmentVals) { if (f == null) return null; Key<Frame> segmentFrameKey = Key.make(f.toString() + "_segment_" + segmentIdx); Frame segmentFrame = new MakeSegmentFrame(segmentVals) .doAll(f.types(), f) .outputFrame(segmentFrameKey, f.names(), f.domains()); assert segmentFrameKey.equals(segmentFrame._key); return segmentFrameKey; } private static double[] readRow(Vec.Reader[] vecReaders, long r) { double[] row = new double[vecReaders.length]; for (int i = 0; i < row.length; i++) row[i] = vecReaders[i].isNA(r) ? Double.NaN : vecReaders[i].at(r); return row; } private static class MakeSegmentFrame extends MRTask<MakeSegmentFrame> { private final double[] _match_row; MakeSegmentFrame(double[] matchRow) { _match_row = matchRow; } @Override public void map(Chunk[] cs, NewChunk[] ncs) { int cnt = 0; int rows[] = new int[cs[0]._len]; each_row: for (int row = 0; row < rows.length; row++) { for (int i = 0; i < _match_row.length; i++) { if (Double.isNaN(_match_row[i]) && !cs[i].isNA(row)) continue each_row; if (_match_row[i] != cs[i].atd(row)) continue each_row; } rows[cnt++] = row; } if (cnt == 0) return; rows = cnt == rows.length ? rows : Arrays.copyOf(rows, cnt); for (int i = 0; i < cs.length; i++) { cs[i].extractRows(ncs[i], rows); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/segments/SegmentModels.java
package hex.segments; import hex.Model; import hex.ModelBuilder; import water.*; import water.api.schemas3.KeyV3; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import water.parser.BufferedString; import water.util.ArrayUtils; import water.util.StringUtils; /** * Collection of Segment Models */ public class SegmentModels extends Keyed<SegmentModels> { private final Frame _segments; private final Vec _results; /** * Initialize the Segment Models structure, allocates keys for each SegmentModelResult * * @param key destination key * @param segments segments * @return instance of SegmentModels */ public static SegmentModels make(Key<SegmentModels> key, Frame segments) { SegmentModels segmentModels = new SegmentModels(key, segments); DKV.put(segmentModels); return segmentModels; } private SegmentModels(Key<SegmentModels> key, Frame segments) { super(key); _results = new MakeResultKeys().doAll(Vec.T_STR, segments).outputFrame().vec(0); _segments = segments.deepCopy(Key.makeUserHidden(Key.make().toString()).toString()); } SegmentModelResult addResult(long segmentIdx, ModelBuilder mb, Exception e) { Key<SegmentModelResult> resultKey = Key.make(_results.atStr(new BufferedString(), segmentIdx).toString()); SegmentModelResult result = new SegmentModelResult(resultKey, mb, e); DKV.put(result); return result; } /** * Converts the collection of Segment Models to a Frame representation * * @return Frame with segment column, followed by model key, job status, error and warning columns */ public Frame toFrame() { Frame result = _segments.deepCopy(null); // never expose the underlying Segments Frame (someone could delete it) Frame models = new ToFrame().doAll(new byte[]{Vec.T_STR, Vec.T_CAT, Vec.T_STR, Vec.T_STR}, new Frame(_results)) .outputFrame( new String[]{"model", "status", "errors", "warnings"}, new String[][]{null, Job.JobStatus.domain(), null, null} ); result.add(models); return result; } @Override public Class<? extends KeyV3> makeSchema() { return KeyV3.SegmentModelsKeyV3.class; } static class SegmentModelResult extends Keyed<SegmentModelResult> { final Key<Model> _model; final Job.JobStatus _status; final String[] _errors; final String[] _warns; @SuppressWarnings("unchecked") SegmentModelResult(Key<SegmentModelResult> selfKey, ModelBuilder mb, Exception e) { this(selfKey, mb.dest(), getConsolidatedStatus(mb), getErrors(mb, e), mb._job.warns()); } private SegmentModelResult(Key<SegmentModelResult> key, Key<Model> model, Job.JobStatus status, String[] errors, String[] warns) { super(key); _model = model; _status = status; _errors = errors; _warns = warns; } private static Job.JobStatus getConsolidatedStatus(ModelBuilder mb) { if (mb.error_count() > 0) return Job.JobStatus.FAILED; // do not get status from the job because was not even started (PENDING state) else return mb._job.getStatus(); } private static String[] getErrors(ModelBuilder mb, Exception e) { if (mb.error_count() == 0 && e == null) return null; String[] errors = new String[0]; if (mb.error_count() > 0) errors = ArrayUtils.append(errors, mb.validationErrors()); if (e != null) errors = ArrayUtils.append(errors, StringUtils.toString(e)); return errors; } public boolean isSuccessful() { return _status == Job.JobStatus.SUCCEEDED; } @Override public String toString() { return "model=" + _model + ", status=" + _status; } } private static class MakeResultKeys extends MRTask<MakeResultKeys> { @Override public void map(Chunk[] cs, NewChunk nc) { for (int i = 0; i < cs[0]._len; i++) nc.addStr(Key.makeUserHidden(Key.make().toString()).toString()); } } static class ToFrame extends MRTask<ToFrame> { @Override public void map(Chunk[] cs, NewChunk[] ncs) { assert cs.length == 1; Chunk c = cs[0]; BufferedString bs = new BufferedString(); for (int i = 0; i < c._len; i++) { SegmentModelResult result = DKV.getGet(Key.make(c.atStr(bs, i).toString())); if (result == null) { for (NewChunk nc : ncs) nc.addNA(); } else { int col = 0; ncs[col++].addStr(result._model.toString()); ncs[col++].addNum(result._status.ordinal()); if (result._errors != null) ncs[col++].addStr(String.join("\n", result._errors)); else ncs[col++].addNA(); if (result._warns != null) ncs[col++].addStr(String.join("\n", result._warns)); else ncs[col++].addNA(); assert col == ncs.length; } } } } @Override protected Futures remove_impl(Futures fs, boolean cascade) { if (_segments != null) { _segments.remove(fs, cascade); } if (_results != null) { fs.add(new CleanUpSegmentResults().dfork(_results)); } return fs; } static class CleanUpSegmentResults extends MRTask<CleanUpSegmentResults> { @Override public void map(Chunk c) { BufferedString bs = new BufferedString(); Futures fs = new Futures(); for (int i = 0; i < c._len; i++) Keyed.remove(Key.make(c.atStr(bs, i).toString()), fs, true); fs.blockForPending(); } @Override protected void postGlobal() { _fr.remove(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/segments/SegmentModelsBuilder.java
package hex.segments; import hex.Model; import water.*; import water.fvec.Frame; import water.rapids.ast.prims.mungers.AstGroup; import water.util.Log; import java.util.List; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.Stream; public class SegmentModelsBuilder { private static final AtomicLong nextSegmentModelsNum = new AtomicLong(0); private final SegmentModelsParameters _parms; private final Model.Parameters _blueprint_parms; public SegmentModelsBuilder(SegmentModelsParameters parms, Model.Parameters blueprintParms) { _parms = parms; _blueprint_parms = blueprintParms; } public Job<SegmentModels> buildSegmentModels() { if (_parms._parallelism <= 0) { throw new IllegalArgumentException("Parameter `parallelism` has to be a positive number, received=" + _parms._parallelism); } final Frame segments; if (_parms._segments != null) { segments = validateSegmentsFrame(_parms._segments, _parms._segment_columns); } else { segments = makeSegmentsFrame(_blueprint_parms._train, _parms._segment_columns); } final Job<SegmentModels> job = new Job<>(makeDestKey(), SegmentModels.class.getName(), _blueprint_parms.algoName()); SegmentModelsBuilderTask segmentBuilder = new SegmentModelsBuilderTask( job, segments, _blueprint_parms._train, _blueprint_parms._valid, _parms._parallelism); return job.start(segmentBuilder, segments.numRows()); } private Frame makeSegmentsFrame(Key<Frame> trainKey, String[] segmentColumns) { Frame train = validateSegmentsFrame(trainKey, segmentColumns); return new AstGroup() .performGroupingWithAggregations(train, train.find(segmentColumns), new AstGroup.AGG[0]) .getFrame(); } private Key<SegmentModels> makeDestKey() { if (_parms._segment_models_id != null) return _parms._segment_models_id; String id = H2O.calcNextUniqueObjectId("segment_models", nextSegmentModelsNum, _blueprint_parms.algoName()); return Key.make(id); } private static Frame validateSegmentsFrame(Key<Frame> segmentsKey, String[] segmentColumns) { Frame segments = segmentsKey.get(); if (segments == null) { throw new IllegalStateException("Frame `" + segmentsKey + "` doesn't exist."); } List<String> invalidColumns = Stream.of(segmentColumns != null ? segmentColumns : segments.names()) .filter(name -> !segments.vec(name).isCategorical() && !segments.vec(name).isInt()) .collect(Collectors.toList()); if (!invalidColumns.isEmpty()) { throw new IllegalStateException( "Columns to segment-by can only be categorical and integer of type, invalid columns: " + invalidColumns); } return segments; } private class SegmentModelsBuilderTask extends H2O.H2OCountedCompleter<SegmentModelsBuilderTask> { private final Job<SegmentModels> _job; private final Frame _segments; private final Frame _full_train; private final Frame _full_valid; private final Key _counter_key; private final int _parallelism; private SegmentModelsBuilderTask(Job<SegmentModels> job, Frame segments, Key<Frame> train, Key<Frame> valid, int parallelism) { _job = job; _segments = segments; _full_train = reorderColumns(train); _full_valid = reorderColumns(valid); _counter_key = Key.make(); _parallelism = parallelism; } @Override public void compute2() { try { _blueprint_parms.read_lock_frames(_job); SegmentModels segmentModels = SegmentModels.make(_job._result, _segments); WorkAllocator allocator = new WorkAllocator(_counter_key, _segments.numRows()); LocalSequentialSegmentModelsBuilder localBuilder = new LocalSequentialSegmentModelsBuilder( _job, _blueprint_parms, _segments, _full_train, _full_valid, allocator); SegmentModelsStats stats = new MultiNodeRunner(localBuilder, segmentModels, _parallelism).doAllNodes()._stats; Log.info("Finished per-segment model building; summary: ", stats); } finally { _blueprint_parms.read_unlock_frames(_job); if (_segments._key == null) { // segments frame was auto-generated _segments.remove(); } DKV.remove(_counter_key); } tryComplete(); } private Frame reorderColumns(Key<Frame> key) { if (key == null) return null; Frame f = key.get(); if (f == null) { throw new IllegalStateException("Key " + key + " doesn't point to an existing Frame."); } Frame mutating = new Frame(f); Frame reordered = new Frame(_segments.names(), mutating.vecs(_segments.names())) .add(mutating.remove(_segments.names())); reordered._key = f._key; return reordered; } } private static class MultiNodeRunner extends MRTask<MultiNodeRunner> { final LocalSequentialSegmentModelsBuilder _builder; final SegmentModels _segment_models; final int _parallelism; // OUT SegmentModelsStats _stats; private MultiNodeRunner(LocalSequentialSegmentModelsBuilder builder, SegmentModels segmentModels, int parallelism) { _builder = builder; _segment_models = segmentModels; _parallelism = parallelism; } @Override protected void setupLocal() { if (_parallelism == 1) { _stats = _builder.buildModels(_segment_models); } else { ExecutorService executor = Executors.newFixedThreadPool(_parallelism); _stats = Stream.<Callable<SegmentModelsStats>>generate( () -> (() -> _builder.clone().buildModels(_segment_models))) .limit(_parallelism) .map(executor::submit) .map(future -> { try { return future.get(); } catch (ExecutionException | InterruptedException e) { throw new RuntimeException("Failed to build segment-models", e); } }).reduce((a, b) -> { a.reduce(b); return a; }).get(); } Log.info("Finished per-segment model building on node ", H2O.SELF, "; summary: ", _stats); } @Override public void reduce(MultiNodeRunner mrt) { _stats.reduce(mrt._stats); } } public static class SegmentModelsParameters extends Iced<SegmentModelsParameters> { Key<SegmentModels> _segment_models_id; Key<Frame> _segments; String[] _segment_columns; int _parallelism = 1; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/segments/SegmentModelsStats.java
package hex.segments; import water.Iced; class SegmentModelsStats extends Iced<SegmentModelsStats> { int _succeeded; int _failed; void reduce(SegmentModelsStats other) { _succeeded += other._succeeded; _failed += other._failed; } @Override public String toString() { return "succeeded=" + _succeeded + ", failed=" + _failed; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/segments/SegmentModelsUtils.java
package hex.segments; import hex.Model; import water.Key; public class SegmentModelsUtils { static Key<Model> makeUniqueModelKey(Key<SegmentModels> smKey, long segmentIdx) { return Key.make(smKey.toString() + "_" + segmentIdx); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/segments/WorkAllocator.java
package hex.segments; import water.DKV; import water.Iced; import water.Key; import water.util.IcedLong; class WorkAllocator extends Iced<WorkAllocator> { private final Key _counter_key; private final long _max_work; WorkAllocator(Key counterKey, long maxWork) { _counter_key = counterKey; _max_work = maxWork; DKV.put(_counter_key, new IcedLong(-1)); } long getNextWorkItem() { return IcedLong.incrementAndGet(_counter_key); } long getMaxWork() { return _max_work; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/tfidf/DocumentFrequencyTask.java
package hex.tfidf; import water.Scope; import water.fvec.Frame; import water.rapids.ast.prims.mungers.AstGroup; /** * Task class using map-reduce to compute document frequency values for words in given documents. */ public class DocumentFrequencyTask { /** * Column names to be used for output frame. */ private static final String[] OUTPUT_FRAME_COL_NAMES = new String[] { "Word", "DF" }; /** * Computes document frequency values for given words in documents. * * @param wordFrame input frame of <b>unique</b> words (unique per document) for which document frequency values * should be computed. Expected row format: <code>docID, word</code> * * @return frame containing document frequency values for given words. */ public static Frame compute(final Frame wordFrame) { Scope.enter(); Frame dfFrame; try { AstGroup.AGG[] aggs = new AstGroup.AGG[1]; aggs[0] = new AstGroup.AGG(AstGroup.FCN.nrow, 0, AstGroup.NAHandling.ALL, -1); int[] groupByColumns = new int[]{ 1 }; // Construct final frame with DF values dfFrame = new AstGroup().performGroupingWithAggregations(wordFrame, groupByColumns, aggs).getFrame(); Scope.untrack(dfFrame.keys()); dfFrame.setNames(OUTPUT_FRAME_COL_NAMES); } finally { Scope.exit(); } return dfFrame; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/tfidf/InverseDocumentFrequencyTask.java
package hex.tfidf; import water.MRTask; import water.fvec.Chunk; import water.fvec.NewChunk; /** * Map-reduce task computing inverse document frequency values for words in given documents. */ public class InverseDocumentFrequencyTask extends MRTask<InverseDocumentFrequencyTask> { // IN /** * Number of documents in given corpus. */ private final long _documentsCnt; public InverseDocumentFrequencyTask(long documentsCnt) { _documentsCnt = documentsCnt; } /** * Computes Inverse Document Frequency value for a word with given Document Frequency. * * @param documentFrequency DF value of a word for which IDF should be computed. * * @return Inverse Document Frequency value for a word with given Document Frequency. */ private double idf(long documentFrequency) { return Math.log(((double)(_documentsCnt + 1)) / (documentFrequency + 1)); } @Override public void map(Chunk cs, NewChunk nc) { for (int row = 0; row < cs._len; row++) { if (cs.isNA(row)) continue; // Ignore NAs nc.addNum(idf(cs.at8(row))); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/tfidf/TermFrequencyTask.java
package hex.tfidf; import water.fvec.Frame; import water.rapids.ast.prims.mungers.AstGroup; /** * Task class using map-reduce to compute term frequency values for words in given documents. */ public class TermFrequencyTask { /** * Column names to be used for output frame. */ private static final String[] OUTPUT_FRAME_COL_NAMES = new String[] { "DocID", "Word", "TF" }; /** * Computes term frequency values for given words in documents. * * @param wordFrame input frame of words for which term frequency * values should be computed. For exact format * see {@link TfIdfPreprocessorTask}. * * @return frame containing term frequency values for given words. */ public static Frame compute(final Frame wordFrame) { AstGroup.AGG[] aggs = new AstGroup.AGG[1]; aggs[0] = new AstGroup.AGG(AstGroup.FCN.nrow, 0, AstGroup.NAHandling.ALL, -1); int[] groupByColumns = new int[]{ 0, 1 }; Frame tfFrame = new AstGroup().performGroupingWithAggregations(wordFrame, groupByColumns, aggs).getFrame(); tfFrame.setNames(OUTPUT_FRAME_COL_NAMES); return tfFrame; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/tfidf/TfIdfPreprocessorTask.java
package hex.tfidf; import water.MRTask; import water.fvec.Chunk; import water.fvec.NewChunk; import water.parser.BufferedString; /** * Map-Reduce task for pre-processing data before computing TF-IDF.<br> * <br> * * <p> * Input format - 2 columns: <code>documentID, documentString</code> * </p> * * <p> * Output row format: <code>documentID, word</code> * </p> */ public class TfIdfPreprocessorTask extends MRTask<TfIdfPreprocessorTask> { /** * Words delimiter regex in documents. */ private static final String WORDS_DELIMITER_REGEX = "\\s+"; // IN /** * Index of a column containing document IDs in given input frame. */ private final int _docIdsColIdx; /** * Index of a column containing document contents in given input frame. */ private final int _docContentsColIdx; public TfIdfPreprocessorTask(int docIdsColIdx, int docContentsColIdx) { _docIdsColIdx = docIdsColIdx; _docContentsColIdx = docContentsColIdx; } @Override public void map(Chunk[] cs, NewChunk[] ncs) { Chunk inputDocumentIds = cs[_docIdsColIdx]; Chunk inputDocs = cs[_docContentsColIdx]; NewChunk outputDocumentIds = ncs[0]; NewChunk outputTokens = ncs[1]; for (int row = 0; row < inputDocs._len; row++) { if (inputDocs.isNA(row)) continue; // Ignore NAs String document = inputDocs.atStr(new BufferedString(), row).toString(); long documentId = inputDocumentIds.at8(row); String[] words = document.split(WORDS_DELIMITER_REGEX); for (String word : words) { outputDocumentIds.addNum(documentId); outputTokens.addStr(word); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/jsr166y/CountedCompleter.java
/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ package jsr166y; /** * A resultless {@link ForkJoinTask} with a completion action * performed when triggered and there are no remaining pending * actions. Uses of CountedCompleter are similar to those of other * completion based components (such as {@link * java.nio.channels.CompletionHandler}) except that multiple * <em>pending</em> completions may be necessary to trigger the {@link * #onCompletion} action, not just one. Unless initialized otherwise, * the {@link #getPendingCount pending count} starts at zero, but may * be (atomically) changed using methods {@link #setPendingCount}, * {@link #addToPendingCount}, and {@link * #compareAndSetPendingCount}. Upon invocation of {@link * #tryComplete}, if the pending action count is nonzero, it is * decremented; otherwise, the completion action is performed, and if * this completer itself has a completer, the process is continued * with its completer. As is the case with related synchronization * components such as {@link Phaser} and {@link * java.util.concurrent.Semaphore} these methods affect only internal * counts; they do not establish any further internal bookkeeping. In * particular, the identities of pending tasks are not maintained. As * illustrated below, you can create subclasses that do record some or * all pended tasks or their results when needed. * * <p>A concrete CountedCompleter class must define method {@link * #compute}, that should, in almost all use cases, invoke {@code * tryComplete()} once before returning. The class may also optionally * override method {@link #onCompletion} to perform an action upon * normal completion, and method {@link #onExceptionalCompletion} to * perform an action upon any exception. * * <p>A CountedCompleter that does not itself have a completer (i.e., * one for which {@link #getCompleter} returns {@code null}) can be * used as a regular ForkJoinTask with this added functionality. * However, any completer that in turn has another completer serves * only as an internal helper for other computations, so its own task * status (as reported in methods such as {@link ForkJoinTask#isDone}) * is arbitrary; this status changes only upon explicit invocations of * {@link #complete}, {@link ForkJoinTask#cancel}, {@link * ForkJoinTask#completeExceptionally} or upon exceptional completion * of method {@code compute}. Upon any exceptional completion, the * exception may be relayed to a task's completer (and its completer, * and so on), if one exists and it has not otherwise already * completed. * * <p><b>Sample Usages.</b> * * <p><b>Parallel recursive decomposition.</b> CountedCompleters may * be arranged in trees similar to those often used with {@link * RecursiveAction}s, although the constructions involved in setting * them up typically vary. Even though they entail a bit more * bookkeeping, CountedCompleters may be better choices when applying * a possibly time-consuming operation (that cannot be further * subdivided) to each element of an array or collection; especially * when the operation takes a significantly different amount of time * to complete for some elements than others, either because of * intrinsic variation (for example IO) or auxiliary effects such as * garbage collection. Because CountedCompleters provide their own * continuations, other threads need not block waiting to perform * them. * * <p> For example, here is an initial version of a class that uses * divide-by-two recursive decomposition to divide work into single * pieces (leaf tasks). Even when work is split into individual calls, * tree-based techniques are usually preferable to directly forking * leaf tasks, because they reduce inter-thread communication and * improve load balancing. In the recursive case, the second of each * pair of subtasks to finish triggers completion of its parent * (because no result combination is performed, the default no-op * implementation of method {@code onCompletion} is not overridden). A * static utility method sets up the base task and invokes it: * * <pre> {@code * class MyOperation<E> { void apply(E e) { ... } } * * class ForEach<E> extends CountedCompleter { * * public static <E> void forEach(ForkJoinPool pool, E[] array, MyOperation<E> op) { * pool.invoke(new ForEach<E>(null, array, op, 0, array.length)); * } * * final E[] array; final MyOperation<E> op; final int lo, hi; * ForEach(CountedCompleter p, E[] array, MyOperation<E> op, int lo, int hi) { * super(p); * this.array = array; this.op = op; this.lo = lo; this.hi = hi; * } * * public void compute() { // version 1 * if (hi - lo >= 2) { * int mid = (lo + hi) >>> 1; * setPendingCount(2); // must set pending count before fork * new ForEach(this, array, op, mid, hi).fork(); // right child * new ForEach(this, array, op, lo, mid).fork(); // left child * } * else if (hi > lo) * op.apply(array[lo]); * tryComplete(); * } * } }</pre> * * This design can be improved by noticing that in the recursive case, * the task has nothing to do after forking its right task, so can * directly invoke its left task before returning. (This is an analog * of tail recursion removal.) Also, because the task returns upon * executing its left task (rather than falling through to invoke * tryComplete) the pending count is set to one: * * <pre> {@code * class ForEach<E> ... * public void compute() { // version 2 * if (hi - lo >= 2) { * int mid = (lo + hi) >>> 1; * setPendingCount(1); // only one pending * new ForEach(this, array, op, mid, hi).fork(); // right child * new ForEach(this, array, op, lo, mid).compute(); // direct invoke * } * else { * if (hi > lo) * op.apply(array[lo]); * tryComplete(); * } * } * }</pre> * * As a further improvement, notice that the left task need not even * exist. Instead of creating a new one, we can iterate using the * original task, and add a pending count for each fork: * * <pre> {@code * class ForEach<E> ... * public void compute() { // version 3 * int l = lo, h = hi; * while (h - l >= 2) { * int mid = (l + h) >>> 1; * addToPendingCount(1); * new ForEach(this, array, op, mid, h).fork(); // right child * h = mid; * } * if (h > l) * op.apply(array[l]); * tryComplete(); * } * }</pre> * * Additional improvements of such classes might entail precomputing * pending counts so that they can be established in constructors, * specializing classes for leaf steps, subdividing by say, four, * instead of two per iteration, and using an adaptive threshold * instead of always subdividing down to single elements. * * <p><b>Recording subtasks.</b> CountedCompleter tasks that combine * results of multiple subtasks usually need to access these results * in method {@link #onCompletion}. As illustrated in the following * class (that performs a simplified form of map-reduce where mappings * and reductions are all of type {@code E}), one way to do this in * divide and conquer designs is to have each subtask record its * sibling, so that it can be accessed in method {@code onCompletion}. * For clarity, this class uses explicit left and right subtasks, but * variants of other streamlinings seen in the above example may also * apply. * * <pre> {@code * class MyMapper<E> { E apply(E v) { ... } } * class MyReducer<E> { E apply(E x, E y) { ... } } * class MapReducer<E> extends CountedCompleter { * final E[] array; final MyMapper<E> mapper; * final MyReducer<E> reducer; final int lo, hi; * MapReducer sibling; * E result; * MapReducer(CountedCompleter p, E[] array, MyMapper<E> mapper, * MyReducer<E> reducer, int lo, int hi) { * super(p); * this.array = array; this.mapper = mapper; * this.reducer = reducer; this.lo = lo; this.hi = hi; * } * public void compute() { * if (hi - lo >= 2) { * int mid = (lo + hi) >>> 1; * MapReducer<E> left = new MapReducer(this, array, mapper, reducer, lo, mid); * MapReducer<E> right = new MapReducer(this, array, mapper, reducer, mid, hi); * left.sibling = right; * right.sibling = left; * setPendingCount(1); // only right is pending * right.fork(); * left.compute(); // directly execute left * } * else { * if (hi > lo) * result = mapper.apply(array[lo]); * tryComplete(); * } * } * public void onCompletion(CountedCompleter caller) { * if (caller != this) { * MapReducer<E> child = (MapReducer<E>)caller; * MapReducer<E> sib = child.sibling; * if (sib == null || sib.result == null) * result = child.result; * else * result = reducer.apply(child.result, sib.result); * } * } * * public static <E> E mapReduce(ForkJoinPool pool, E[] array, * MyMapper<E> mapper, MyReducer<E> reducer) { * MapReducer<E> mr = new MapReducer<E>(null, array, mapper, * reducer, 0, array.length); * pool.invoke(mr); * return mr.result; * } * } }</pre> * * <p><b>Triggers.</b> Some CountedCompleters are themselves never * forked, but instead serve as bits of plumbing in other designs; * including those in which the completion of one of more async tasks * triggers another async task. For example: * * <pre> {@code * class HeaderBuilder extends CountedCompleter { ... } * class BodyBuilder extends CountedCompleter { ... } * class PacketSender extends CountedCompleter { * PacketSender(...) { super(null, 1); ... } // trigger on second completion * public void compute() { } // never called * public void onCompletion(CountedCompleter caller) { sendPacket(); } * } * // sample use: * PacketSender p = new PacketSender(); * new HeaderBuilder(p, ...).fork(); * new BodyBuilder(p, ...).fork(); * }</pre> * * @since 1.8 * @author Doug Lea */ public abstract class CountedCompleter extends ForkJoinTask<Void> { private static final long serialVersionUID = 5232453752276485070L; /** This task's completer, or null if none */ /*final*/ CountedCompleter completer; /** The number of pending tasks until completion */ volatile int pending; /** * Creates a new CountedCompleter with the given completer * and initial pending count. * * @param completer this tasks completer, or {@code null} if none * @param initialPendingCount the initial pending count */ protected CountedCompleter(CountedCompleter completer, int initialPendingCount) { this.completer = completer; this.pending = initialPendingCount; } /** * Creates a new CountedCompleter with the given completer * and an initial pending count of zero. * * @param completer this tasks completer, or {@code null} if none */ protected CountedCompleter(CountedCompleter completer) { this.completer = completer; } /** * Creates a new CountedCompleter with no completer * and an initial pending count of zero. */ protected CountedCompleter() { this.completer = null; } /** * The main computation performed by this task. */ public abstract void compute(); /** * Performs an action when method {@link #tryComplete} is invoked * and there are no pending counts, or when the unconditional * method {@link #complete} is invoked. By default, this method * does nothing. * * @param caller the task invoking this method (which may * be this task itself). */ public void onCompletion(CountedCompleter caller) { } /** * Performs an action when method {@link #completeExceptionally} * is invoked or method {@link #compute} throws an exception, and * this task has not otherwise already completed normally. On * entry to this method, this task {@link * ForkJoinTask#isCompletedAbnormally}. The return value of this * method controls further propagation: If {@code true} and this * task has a completer, then this completer is also completed * exceptionally. The default implementation of this method does * nothing except return {@code true}. * * @param ex the exception * @param caller the task invoking this method (which may * be this task itself). * @return true if this exception should be propagated to this * tasks completer, if one exists. */ public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) { return true; } /** * Returns the completer established in this task's constructor, * or {@code null} if none. * * @return the completer */ public final CountedCompleter getCompleter() { return completer; } // Cliff Click's Horrible Hack // I must 'clone' or 'newInstance' these things... so to avoid a // reflective Constructor call to set the parent I made the 'completer' // field non-final. This happens immediately after clone/newInstance and a // following volatile set of the pending() field while make the change // visible to other threads. Example: old.clone().setCompleter(completer) public final void setCompleter( CountedCompleter x ) { completer = x; } /** * Returns the current pending count. * * @return the current pending count */ public final int getPendingCount() { return pending; } /** * Sets the pending count to the given value. * * @param count the count */ public final void setPendingCount(int count) { pending = count; } /** * Adds (atomically) the given value to the pending count. * * @param delta the value to add */ public final void addToPendingCount(int delta) { int c; // note: can replace with intrinsic in jdk8 do {} while (!U.compareAndSwapInt(this, PENDING, c = pending, c+delta)); } /** * Sets (atomically) the pending count to the given count only if * it currently holds the given expected value. * * @param expected the expected value * @param count the new value * @return true is successful */ public final boolean compareAndSetPendingCount(int expected, int count) { return U.compareAndSwapInt(this, PENDING, expected, count); } /** * If the pending count is nonzero, decrements the count; * otherwise invokes {@link #onCompletion} and then similarly * tries to complete this task's completer, if one exists, * else marks this task as complete. */ public final void tryComplete() { __tryComplete(this); // CountedCompleter a = this, s = a; // for (int c;;) { // if ((c = a.pending) == 0) { // a.onCompletion(s); // if ((a = (s = a).completer) == null) { // s.quietlyComplete(); // return; // } // } // else if (U.compareAndSwapInt(a, PENDING, c, c - 1)) // return; // } } /** * H2O Hack to get distributed FJ behavior closer to the behavior on local node. * It allows us to continue in "completion propagation" interrupted by remote task. * Should *not* be called by anyone outside of RPC mechanism. * * In standard FJ, tryComplete is always called by the task itself and the task is thus it's own caller. * Afterwards, the FJ framework will start propagating the completion up the task tree, walking up the list of completers(parents) * each time calling onCompletion of the parent with the current node (the child which triggered the completion) being passed as the "caller" argument. * * When there is a distributed task in the chain, the sequence is broken as the task is completed on a remote node. * We want to be able to continue the completion chain, i.e. the remote task should now call onCompletion of its parent with itself passed as the caller argument. * We can not call tryComplete on the task, since it has already been called on the remote. * Instead, we explicitly set the caller argument in this overloaded tryComplete calls * * Example: * * new RPC(node,task).addCompletor(f(x) {...}) * * When we receive the reponse, we want to pass task as x to f. * We call f.__tryComplete(task) * * @param caller - The child task completing this */ public final void __tryComplete(CountedCompleter caller) { CountedCompleter a = this, s = caller; for (int c;;) { if((c = a.pending) == 0) { a.onCompletion(s); if ((a = (s = a).completer) == null) { s.quietlyComplete(); return; } } else if (U.compareAndSwapInt(a, PENDING, c, c - 1)) return; } } /** * Regardless of pending count, invokes {@link #onCompletion}, * marks this task as complete with a {@code null} return value, * and further triggers {@link #tryComplete} on this task's * completer, if one exists. This method may be useful when * forcing completion as soon as any one (versus all) of several * subtask results are obtained. * * @param mustBeNull the {@code null} completion value */ public void complete(Void mustBeNull) { CountedCompleter p; onCompletion(this); quietlyComplete(); if ((p = completer) != null) p.tryComplete(); } /** * Support for FJT exception propagation */ void internalPropagateException(Throwable ex) { CountedCompleter a = this, s = a; while (a.onExceptionalCompletion(ex, s) && (a = (s = a).completer) != null && a.status >= 0) a.recordExceptionalCompletion(ex); } /** * Implements execution conventions for CountedCompleters */ protected final boolean exec() { compute(); return false; } /** * Always returns {@code null}. * * @return {@code null} always */ public final Void getRawResult() { return null; } /** * Requires null completion value. */ protected final void setRawResult(Void mustBeNull) { } // Unsafe mechanics private static final sun.misc.Unsafe U; private static final long PENDING; static { try { U = getUnsafe(); PENDING = U.objectFieldOffset (CountedCompleter.class.getDeclaredField("pending")); } catch (Exception e) { throw new Error(e); } } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ private static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException se) { try { return java.security.AccessController.doPrivileged (new java.security .PrivilegedExceptionAction<sun.misc.Unsafe>() { public sun.misc.Unsafe run() throws Exception { java.lang.reflect.Field f = sun.misc .Unsafe.class.getDeclaredField("theUnsafe"); f.setAccessible(true); return (sun.misc.Unsafe) f.get(null); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/jsr166y/ForkJoinPool.java
/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ package jsr166y; import water.H2ORuntime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.AbstractExecutorService; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RunnableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.AbstractQueuedSynchronizer; import java.util.concurrent.locks.Condition; /** * An {@link ExecutorService} for running {@link ForkJoinTask}s. * A {@code ForkJoinPool} provides the entry point for submissions * from non-{@code ForkJoinTask} clients, as well as management and * monitoring operations. * * <p>A {@code ForkJoinPool} differs from other kinds of {@link * ExecutorService} mainly by virtue of employing * <em>work-stealing</em>: all threads in the pool attempt to find and * execute tasks submitted to the pool and/or created by other active * tasks (eventually blocking waiting for work if none exist). This * enables efficient processing when most tasks spawn other subtasks * (as do most {@code ForkJoinTask}s), as well as when many small * tasks are submitted to the pool from external clients. Especially * when setting <em>asyncMode</em> to true in constructors, {@code * ForkJoinPool}s may also be appropriate for use with event-style * tasks that are never joined. * * <p>A {@code ForkJoinPool} is constructed with a given target * parallelism level; by default, equal to the number of available * processors. The pool attempts to maintain enough active (or * available) threads by dynamically adding, suspending, or resuming * internal worker threads, even if some tasks are stalled waiting to * join others. However, no such adjustments are guaranteed in the * face of blocked IO or other unmanaged synchronization. The nested * {@link ManagedBlocker} interface enables extension of the kinds of * synchronization accommodated. * * <p>In addition to execution and lifecycle control methods, this * class provides status check methods (for example * {@link #getStealCount}) that are intended to aid in developing, * tuning, and monitoring fork/join applications. Also, method * {@link #toString} returns indications of pool state in a * convenient form for informal monitoring. * * <p> As is the case with other ExecutorServices, there are three * main task execution methods summarized in the following table. * These are designed to be used primarily by clients not already * engaged in fork/join computations in the current pool. The main * forms of these methods accept instances of {@code ForkJoinTask}, * but overloaded forms also allow mixed execution of plain {@code * Runnable}- or {@code Callable}- based activities as well. However, * tasks that are already executing in a pool should normally instead * use the within-computation forms listed in the table unless using * async event-style tasks that are not usually joined, in which case * there is little difference among choice of methods. * * <table BORDER CELLPADDING=3 CELLSPACING=1 summary=""> * <tr> * <td></td> * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td> * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td> * </tr> * <tr> * <td> <b>Arrange async execution</b></td> * <td> {@link #execute(ForkJoinTask)}</td> * <td> {@link ForkJoinTask#fork}</td> * </tr> * <tr> * <td> <b>Await and obtain result</b></td> * <td> {@link #invoke(ForkJoinTask)}</td> * <td> {@link ForkJoinTask#invoke}</td> * </tr> * <tr> * <td> <b>Arrange exec and obtain Future</b></td> * <td> {@link #submit(ForkJoinTask)}</td> * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td> * </tr> * </table> * * <p><b>Sample Usage.</b> Normally a single {@code ForkJoinPool} is * used for all parallel task execution in a program or subsystem. * Otherwise, use would not usually outweigh the construction and * bookkeeping overhead of creating a large set of threads. For * example, a common pool could be used for the {@code SortTasks} * illustrated in {@link RecursiveAction}. Because {@code * ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon * daemon} mode, there is typically no need to explicitly {@link * #shutdown} such a pool upon program exit. * * <pre> {@code * static final ForkJoinPool mainPool = new ForkJoinPool(); * ... * public void sort(long[] array) { * mainPool.invoke(new SortTask(array, 0, array.length)); * }}</pre> * * <p><b>Implementation notes</b>: This implementation restricts the * maximum number of running threads to 32767. Attempts to create * pools with greater than the maximum number result in * {@code IllegalArgumentException}. * * <p>This implementation rejects submitted tasks (that is, by throwing * {@link RejectedExecutionException}) only when the pool is shut down * or internal resources have been exhausted. * * @since 1.7 * @author Doug Lea */ public class ForkJoinPool extends AbstractExecutorService { /* * Implementation Overview * * This class and its nested classes provide the main * functionality and control for a set of worker threads: * Submissions from non-FJ threads enter into submission queues. * Workers take these tasks and typically split them into subtasks * that may be stolen by other workers. Preference rules give * first priority to processing tasks from their own queues (LIFO * or FIFO, depending on mode), then to randomized FIFO steals of * tasks in other queues. * * WorkQueues * ========== * * Most operations occur within work-stealing queues (in nested * class WorkQueue). These are special forms of Deques that * support only three of the four possible end-operations -- push, * pop, and poll (aka steal), under the further constraints that * push and pop are called only from the owning thread (or, as * extended here, under a lock), while poll may be called from * other threads. (If you are unfamiliar with them, you probably * want to read Herlihy and Shavit's book "The Art of * Multiprocessor programming", chapter 16 describing these in * more detail before proceeding.) The main work-stealing queue * design is roughly similar to those in the papers "Dynamic * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005 * (http://research.sun.com/scalable/pubs/index.html) and * "Idempotent work stealing" by Michael, Saraswat, and Vechev, * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). * The main differences ultimately stem from GC requirements that * we null out taken slots as soon as we can, to maintain as small * a footprint as possible even in programs generating huge * numbers of tasks. To accomplish this, we shift the CAS * arbitrating pop vs poll (steal) from being on the indices * ("base" and "top") to the slots themselves. So, both a * successful pop and poll mainly entail a CAS of a slot from * non-null to null. Because we rely on CASes of references, we * do not need tag bits on base or top. They are simple ints as * used in any circular array-based queue (see for example * ArrayDeque). Updates to the indices must still be ordered in a * way that guarantees that top == base means the queue is empty, * but otherwise may err on the side of possibly making the queue * appear nonempty when a push, pop, or poll have not fully * committed. Note that this means that the poll operation, * considered individually, is not wait-free. One thief cannot * successfully continue until another in-progress one (or, if * previously empty, a push) completes. However, in the * aggregate, we ensure at least probabilistic non-blockingness. * If an attempted steal fails, a thief always chooses a different * random victim target to try next. So, in order for one thief to * progress, it suffices for any in-progress poll or new push on * any empty queue to complete. (This is why we normally use * method pollAt and its variants that try once at the apparent * base index, else consider alternative actions, rather than * method poll.) * * This approach also enables support of a user mode in which local * task processing is in FIFO, not LIFO order, simply by using * poll rather than pop. This can be useful in message-passing * frameworks in which tasks are never joined. However neither * mode considers affinities, loads, cache localities, etc, so * rarely provide the best possible performance on a given * machine, but portably provide good throughput by averaging over * these factors. (Further, even if we did try to use such * information, we do not usually have a basis for exploiting it. * For example, some sets of tasks profit from cache affinities, * but others are harmed by cache pollution effects.) * * WorkQueues are also used in a similar way for tasks submitted * to the pool. We cannot mix these tasks in the same queues used * for work-stealing (this would contaminate lifo/fifo * processing). Instead, we loosely associate submission queues * with submitting threads, using a form of hashing. The * ThreadLocal Submitter class contains a value initially used as * a hash code for choosing existing queues, but may be randomly * repositioned upon contention with other submitters. In * essence, submitters act like workers except that they never * take tasks, and they are multiplexed on to a finite number of * shared work queues. However, classes are set up so that future * extensions could allow submitters to optionally help perform * tasks as well. Insertion of tasks in shared mode requires a * lock (mainly to protect in the case of resizing) but we use * only a simple spinlock (using bits in field runState), because * submitters encountering a busy queue move on to try or create * other queues -- they block only when creating and registering * new queues. * * Management * ========== * * The main throughput advantages of work-stealing stem from * decentralized control -- workers mostly take tasks from * themselves or each other. We cannot negate this in the * implementation of other management responsibilities. The main * tactic for avoiding bottlenecks is packing nearly all * essentially atomic control state into two volatile variables * that are by far most often read (not written) as status and * consistency checks. * * Field "ctl" contains 64 bits holding all the information needed * to atomically decide to add, inactivate, enqueue (on an event * queue), dequeue, and/or re-activate workers. To enable this * packing, we restrict maximum parallelism to (1<<15)-1 (which is * far in excess of normal operating range) to allow ids, counts, * and their negations (used for thresholding) to fit into 16bit * fields. * * Field "runState" contains 32 bits needed to register and * deregister WorkQueues, as well as to enable shutdown. It is * only modified under a lock (normally briefly held, but * occasionally protecting allocations and resizings) but even * when locked remains available to check consistency. * * Recording WorkQueues. WorkQueues are recorded in the * "workQueues" array that is created upon pool construction and * expanded if necessary. Updates to the array while recording * new workers and unrecording terminated ones are protected from * each other by a lock but the array is otherwise concurrently * readable, and accessed directly. To simplify index-based * operations, the array size is always a power of two, and all * readers must tolerate null slots. Shared (submission) queues * are at even indices, worker queues at odd indices. Grouping * them together in this way simplifies and speeds up task * scanning. * * All worker thread creation is on-demand, triggered by task * submissions, replacement of terminated workers, and/or * compensation for blocked workers. However, all other support * code is set up to work with other policies. To ensure that we * do not hold on to worker references that would prevent GC, ALL * accesses to workQueues are via indices into the workQueues * array (which is one source of some of the messy code * constructions here). In essence, the workQueues array serves as * a weak reference mechanism. Thus for example the wait queue * field of ctl stores indices, not references. Access to the * workQueues in associated methods (for example signalWork) must * both index-check and null-check the IDs. All such accesses * ignore bad IDs by returning out early from what they are doing, * since this can only be associated with termination, in which * case it is OK to give up. All uses of the workQueues array * also check that it is non-null (even if previously * non-null). This allows nulling during termination, which is * currently not necessary, but remains an option for * resource-revocation-based shutdown schemes. It also helps * reduce JIT issuance of uncommon-trap code, which tends to * unnecessarily complicate control flow in some methods. * * Event Queuing. Unlike HPC work-stealing frameworks, we cannot * let workers spin indefinitely scanning for tasks when none can * be found immediately, and we cannot start/resume workers unless * there appear to be tasks available. On the other hand, we must * quickly prod them into action when new tasks are submitted or * generated. In many usages, ramp-up time to activate workers is * the main limiting factor in overall performance (this is * compounded at program start-up by JIT compilation and * allocation). So we try to streamline this as much as possible. * We park/unpark workers after placing in an event wait queue * when they cannot find work. This "queue" is actually a simple * Treiber stack, headed by the "id" field of ctl, plus a 15bit * counter value (that reflects the number of times a worker has * been inactivated) to avoid ABA effects (we need only as many * version numbers as worker threads). Successors are held in * field WorkQueue.nextWait. Queuing deals with several intrinsic * races, mainly that a task-producing thread can miss seeing (and * signalling) another thread that gave up looking for work but * has not yet entered the wait queue. We solve this by requiring * a full sweep of all workers (via repeated calls to method * scan()) both before and after a newly waiting worker is added * to the wait queue. During a rescan, the worker might release * some other queued worker rather than itself, which has the same * net effect. Because enqueued workers may actually be rescanning * rather than waiting, we set and clear the "parker" field of * WorkQueues to reduce unnecessary calls to unpark. (This * requires a secondary recheck to avoid missed signals.) Note * the unusual conventions about Thread.interrupts surrounding * parking and other blocking: Because interrupts are used solely * to alert threads to check termination, which is checked anyway * upon blocking, we clear status (using Thread.interrupted) * before any call to park, so that park does not immediately * return due to status being set via some other unrelated call to * interrupt in user code. * * Signalling. We create or wake up workers only when there * appears to be at least one task they might be able to find and * execute. When a submission is added or another worker adds a * task to a queue that previously had fewer than two tasks, they * signal waiting workers (or trigger creation of new ones if * fewer than the given parallelism level -- see signalWork). * These primary signals are buttressed by signals during rescans; * together these cover the signals needed in cases when more * tasks are pushed but untaken, and improve performance compared * to having one thread wake up all workers. * * Trimming workers. To release resources after periods of lack of * use, a worker starting to wait when the pool is quiescent will * time out and terminate if the pool has remained quiescent for * SHRINK_RATE nanosecs. This will slowly propagate, eventually * terminating all workers after long periods of non-use. * * Shutdown and Termination. A call to shutdownNow atomically sets * a runState bit and then (non-atomically) sets each worker's * runState status, cancels all unprocessed tasks, and wakes up * all waiting workers. Detecting whether termination should * commence after a non-abrupt shutdown() call requires more work * and bookkeeping. We need consensus about quiescence (i.e., that * there is no more work). The active count provides a primary * indication but non-abrupt shutdown still requires a rechecking * scan for any workers that are inactive but not queued. * * Joining Tasks * ============= * * Any of several actions may be taken when one worker is waiting * to join a task stolen (or always held) by another. Because we * are multiplexing many tasks on to a pool of workers, we can't * just let them block (as in Thread.join). We also cannot just * reassign the joiner's run-time stack with another and replace * it later, which would be a form of "continuation", that even if * possible is not necessarily a good idea since we sometimes need * both an unblocked task and its continuation to progress. * Instead we combine two tactics: * * Helping: Arranging for the joiner to execute some task that it * would be running if the steal had not occurred. * * Compensating: Unless there are already enough live threads, * method tryCompensate() may create or re-activate a spare * thread to compensate for blocked joiners until they unblock. * * A third form (implemented in tryRemoveAndExec and * tryPollForAndExec) amounts to helping a hypothetical * compensator: If we can readily tell that a possible action of a * compensator is to steal and execute the task being joined, the * joining thread can do so directly, without the need for a * compensation thread (although at the expense of larger run-time * stacks, but the tradeoff is typically worthwhile). * * The ManagedBlocker extension API can't use helping so relies * only on compensation in method awaitBlocker. * * The algorithm in tryHelpStealer entails a form of "linear" * helping: Each worker records (in field currentSteal) the most * recent task it stole from some other worker. Plus, it records * (in field currentJoin) the task it is currently actively * joining. Method tryHelpStealer uses these markers to try to * find a worker to help (i.e., steal back a task from and execute * it) that could hasten completion of the actively joined task. * In essence, the joiner executes a task that would be on its own * local deque had the to-be-joined task not been stolen. This may * be seen as a conservative variant of the approach in Wagner & * Calder "Leapfrogging: a portable technique for implementing * efficient futures" SIGPLAN Notices, 1993 * (http://portal.acm.org/citation.cfm?id=155354). It differs in * that: (1) We only maintain dependency links across workers upon * steals, rather than use per-task bookkeeping. This sometimes * requires a linear scan of workQueues array to locate stealers, * but often doesn't because stealers leave hints (that may become * stale/wrong) of where to locate them. A stealHint is only a * hint because a worker might have had multiple steals and the * hint records only one of them (usually the most current). * Hinting isolates cost to when it is needed, rather than adding * to per-task overhead. (2) It is "shallow", ignoring nesting * and potentially cyclic mutual steals. (3) It is intentionally * racy: field currentJoin is updated only while actively joining, * which means that we miss links in the chain during long-lived * tasks, GC stalls etc (which is OK since blocking in such cases * is usually a good idea). (4) We bound the number of attempts * to find work (see MAX_HELP) and fall back to suspending the * worker and if necessary replacing it with another. * * It is impossible to keep exactly the target parallelism number * of threads running at any given time. Determining the * existence of conservatively safe helping targets, the * availability of already-created spares, and the apparent need * to create new spares are all racy, so we rely on multiple * retries of each. Compensation in the apparent absence of * helping opportunities is challenging to control on JVMs, where * GC and other activities can stall progress of tasks that in * turn stall out many other dependent tasks, without us being * able to determine whether they will ever require compensation. * Even though work-stealing otherwise encounters little * degradation in the presence of more threads than cores, * aggressively adding new threads in such cases entails risk of * unwanted positive feedback control loops in which more threads * cause more dependent stalls (as well as delayed progress of * unblocked threads to the point that we know they are available) * leading to more situations requiring more threads, and so * on. This aspect of control can be seen as an (analytically * intractable) game with an opponent that may choose the worst * (for us) active thread to stall at any time. We take several * precautions to bound losses (and thus bound gains), mainly in * methods tryCompensate and awaitJoin: (1) We only try * compensation after attempting enough helping steps (measured * via counting and timing) that we have already consumed the * estimated cost of creating and activating a new thread. (2) We * allow up to 50% of threads to be blocked before initially * adding any others, and unless completely saturated, check that * some work is available for a new worker before adding. Also, we * create up to only 50% more threads until entering a mode that * only adds a thread if all others are possibly blocked. All * together, this means that we might be half as fast to react, * and create half as many threads as possible in the ideal case, * but present vastly fewer anomalies in all other cases compared * to both more aggressive and more conservative alternatives. * * Style notes: There is a lot of representation-level coupling * among classes ForkJoinPool, ForkJoinWorkerThread, and * ForkJoinTask. The fields of WorkQueue maintain data structures * managed by ForkJoinPool, so are directly accessed. There is * little point trying to reduce this, since any associated future * changes in representations will need to be accompanied by * algorithmic changes anyway. Several methods intrinsically * sprawl because they must accumulate sets of consistent reads of * volatiles held in local variables. Methods signalWork() and * scan() are the main bottlenecks, so are especially heavily * micro-optimized/mangled. There are lots of inline assignments * (of form "while ((local = field) != 0)") which are usually the * simplest way to ensure the required read orderings (which are * sometimes critical). This leads to a "C"-like style of listing * declarations of these locals at the heads of methods or blocks. * There are several occurrences of the unusual "do {} while * (!cas...)" which is the simplest way to force an update of a * CAS'ed variable. There are also other coding oddities that help * some methods perform reasonably even when interpreted (not * compiled). * * The order of declarations in this file is: * (1) Static utility functions * (2) Nested (static) classes * (3) Static fields * (4) Fields, along with constants used when unpacking some of them * (5) Internal control methods * (6) Callbacks and other support for ForkJoinTask methods * (7) Exported methods * (8) Static block initializing statics in minimally dependent order */ // Static utilities /** * If there is a security manager, makes sure caller has * permission to modify threads. */ private static void checkPermission() { SecurityManager security = System.getSecurityManager(); if (security != null) security.checkPermission(modifyThreadPermission); } // Nested classes /** * Factory for creating new {@link ForkJoinWorkerThread}s. * A {@code ForkJoinWorkerThreadFactory} must be defined and used * for {@code ForkJoinWorkerThread} subclasses that extend base * functionality or initialize threads with different contexts. */ public static interface ForkJoinWorkerThreadFactory { /** * Returns a new worker thread operating in the given pool. * * @param pool the pool this thread works in * @throws NullPointerException if the pool is null */ public ForkJoinWorkerThread newThread(ForkJoinPool pool); } /** * Default ForkJoinWorkerThreadFactory implementation; creates a * new ForkJoinWorkerThread. */ static class DefaultForkJoinWorkerThreadFactory implements ForkJoinWorkerThreadFactory { public ForkJoinWorkerThread newThread(ForkJoinPool pool) { return new ForkJoinWorkerThread(pool); } } /** * A simple non-reentrant lock used for exclusion when managing * queues and workers. We use a custom lock so that we can readily * probe lock state in constructions that check among alternative * actions. The lock is normally only very briefly held, and * sometimes treated as a spinlock, but other usages block to * reduce overall contention in those cases where locked code * bodies perform allocation/resizing. */ static final class Mutex extends AbstractQueuedSynchronizer { public final boolean tryAcquire(int ignore) { return compareAndSetState(0, 1); } public final boolean tryRelease(int ignore) { setState(0); return true; } public final void lock() { acquire(0); } public final void unlock() { release(0); } public final boolean isHeldExclusively() { return getState() == 1; } public final Condition newCondition() { return new ConditionObject(); } } /** * Class for artificial tasks that are used to replace the target * of local joins if they are removed from an interior queue slot * in WorkQueue.tryRemoveAndExec. We don't need the proxy to * actually do anything beyond having a unique identity. */ static final class EmptyTask extends ForkJoinTask<Void> { EmptyTask() { status = ForkJoinTask.NORMAL; } // force done public final Void getRawResult() { return null; } public final void setRawResult(Void x) {} public final boolean exec() { return true; } } /** * Queues supporting work-stealing as well as external task * submission. See above for main rationale and algorithms. * Implementation relies heavily on "Unsafe" intrinsics * and selective use of "volatile": * * Field "base" is the index (mod array.length) of the least valid * queue slot, which is always the next position to steal (poll) * from if nonempty. Reads and writes require volatile orderings * but not CAS, because updates are only performed after slot * CASes. * * Field "top" is the index (mod array.length) of the next queue * slot to push to or pop from. It is written only by owner thread * for push, or under lock for trySharedPush, and accessed by * other threads only after reading (volatile) base. Both top and * base are allowed to wrap around on overflow, but (top - base) * (or more commonly -(base - top) to force volatile read of base * before top) still estimates size. * * The array slots are read and written using the emulation of * volatiles/atomics provided by Unsafe. Insertions must in * general use putOrderedObject as a form of releasing store to * ensure that all writes to the task object are ordered before * its publication in the queue. (Although we can avoid one case * of this when locked in trySharedPush.) All removals entail a * CAS to null. The array is always a power of two. To ensure * safety of Unsafe array operations, all accesses perform * explicit null checks and implicit bounds checks via * power-of-two masking. * * In addition to basic queuing support, this class contains * fields described elsewhere to control execution. It turns out * to work better memory-layout-wise to include them in this * class rather than a separate class. * * Performance on most platforms is very sensitive to placement of * instances of both WorkQueues and their arrays -- we absolutely * do not want multiple WorkQueue instances or multiple queue * arrays sharing cache lines. (It would be best for queue objects * and their arrays to share, but there is nothing available to * help arrange that). Unfortunately, because they are recorded * in a common array, WorkQueue instances are often moved to be * adjacent by garbage collectors. To reduce impact, we use field * padding that works OK on common platforms; this effectively * trades off slightly slower average field access for the sake of * avoiding really bad worst-case access. (Until better JVM * support is in place, this padding is dependent on transient * properties of JVM field layout rules.) We also take care in * allocating, sizing and resizing the array. Non-shared queue * arrays are initialized (via method growArray) by workers before * use. Others are allocated on first use. */ static final class WorkQueue { /** * Capacity of work-stealing queue array upon initialization. * Must be a power of two; at least 4, but should be larger to * reduce or eliminate cacheline sharing among queues. * Currently, it is much larger, as a partial workaround for * the fact that JVMs often place arrays in locations that * share GC bookkeeping (especially cardmarks) such that * per-write accesses encounter serious memory contention. */ static final int INITIAL_QUEUE_CAPACITY = 1 << 13; /** * Maximum size for queue arrays. Must be a power of two less * than or equal to 1 << (31 - width of array entry) to ensure * lack of wraparound of index calculations, but defined to a * value a bit less than this to help users trap runaway * programs before saturating systems. */ static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M volatile long totalSteals; // cumulative number of steals int seed; // for random scanning; initialize nonzero volatile int eventCount; // encoded inactivation count; < 0 if inactive int nextWait; // encoded record of next event waiter int rescans; // remaining scans until block int nsteals; // top-level task executions since last idle final int mode; // lifo, fifo, or shared int poolIndex; // index of this queue in pool (or 0) int stealHint; // index of most recent known stealer volatile int runState; // 1: locked, -1: terminate; else 0 volatile int base; // index of next slot for poll int top; // index of next slot for push ForkJoinTask<?>[] array; // the elements (initially unallocated) final ForkJoinPool pool; // the containing pool (may be null) final ForkJoinWorkerThread owner; // owning thread or null if shared volatile Thread parker; // == owner during call to park; else null volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin ForkJoinTask<?> currentSteal; // current non-local task being executed // Heuristic padding to ameliorate unfortunate memory placements Object p00, p01, p02, p03, p04, p05, p06, p07; Object p08, p09, p0a, p0b, p0c, p0d, p0e; WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode) { this.mode = mode; this.pool = pool; this.owner = owner; // Place indices in the center of array (that is not yet allocated) base = top = INITIAL_QUEUE_CAPACITY >>> 1; } /** * Returns the approximate number of tasks in the queue. */ final int queueSize() { int n = base - top; // non-owner callers must read base first return (n >= 0) ? 0 : -n; // ignore transient negative } /** * Provides a more accurate estimate of whether this queue has * any tasks than does queueSize, by checking whether a * near-empty queue has at least one unclaimed task. */ final boolean isEmpty() { ForkJoinTask<?>[] a; int m, s; int n = base - (s = top); return (n >= 0 || (n == -1 && ((a = array) == null || (m = a.length - 1) < 0 || U.getObjectVolatile (a, ((m & (s - 1)) << ASHIFT) + ABASE) == null))); } /** * Pushes a task. Call only by owner in unshared queues. * * @param task the task. Caller must ensure non-null. * @throw RejectedExecutionException if array cannot be resized */ final void push(ForkJoinTask<?> task) { ForkJoinTask<?>[] a; ForkJoinPool p; int s = top, m, n; if ((a = array) != null) { // ignore if queue removed U.putOrderedObject (a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task); if ((n = (top = s + 1) - base) <= 2) { if ((p = pool) != null) p.signalWork(); } else if (n >= m) growArray(true); } } /** * Pushes a task if lock is free and array is either big * enough or can be resized to be big enough. * * @param task the task. Caller must ensure non-null. * @return true if submitted */ final boolean trySharedPush(ForkJoinTask<?> task) { boolean submitted = false; if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { ForkJoinTask<?>[] a = array; int s = top; try { if ((a != null && a.length > s + 1 - base) || (a = growArray(false)) != null) { // must presize int j = (((a.length - 1) & s) << ASHIFT) + ABASE; U.putObject(a, (long)j, task); // don't need "ordered" top = s + 1; submitted = true; } } finally { runState = 0; // unlock } } return submitted; } /** * Takes next task, if one exists, in LIFO order. Call only * by owner in unshared queues. (We do not have a shared * version of this method because it is never needed.) */ final ForkJoinTask<?> pop() { ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m; if ((a = array) != null && (m = a.length - 1) >= 0) { for (int s; (s = top - 1) - base >= 0;) { long j = ((m & s) << ASHIFT) + ABASE; if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null) break; if (U.compareAndSwapObject(a, j, t, null)) { top = s; return t; } } } return null; } /** * Takes a task in FIFO order if b is base of queue and a task * can be claimed without contention. Specialized versions * appear in ForkJoinPool methods scan and tryHelpStealer. */ final ForkJoinTask<?> pollAt(int b) { ForkJoinTask<?> t; ForkJoinTask<?>[] a; if ((a = array) != null) { int j = (((a.length - 1) & b) << ASHIFT) + ABASE; if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null && base == b && U.compareAndSwapObject(a, j, t, null)) { base = b + 1; return t; } } return null; } /** * Takes next task, if one exists, in FIFO order. */ final ForkJoinTask<?> poll() { ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t; while ((b = base) - top < 0 && (a = array) != null) { int j = (((a.length - 1) & b) << ASHIFT) + ABASE; t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); if (t != null) { if (base == b && U.compareAndSwapObject(a, j, t, null)) { base = b + 1; return t; } } else if (base == b) { if (b + 1 == top) break; Thread.yield(); // wait for lagging update } } return null; } /** * Takes next task, if one exists, in order specified by mode. */ final ForkJoinTask<?> nextLocalTask() { return mode == 0 ? pop() : poll(); } /** * Returns next task, if one exists, in order specified by mode. */ final ForkJoinTask<?> peek() { ForkJoinTask<?>[] a = array; int m; if (a == null || (m = a.length - 1) < 0) return null; int i = mode == 0 ? top - 1 : base; int j = ((i & m) << ASHIFT) + ABASE; return (ForkJoinTask<?>)U.getObjectVolatile(a, j); } /** * Pops the given task only if it is at the current top. */ final boolean tryUnpush(ForkJoinTask<?> t) { ForkJoinTask<?>[] a; int s; if ((a = array) != null && (s = top) != base && U.compareAndSwapObject (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { top = s; return true; } return false; } /** * Polls the given task only if it is at the current base. */ final boolean pollFor(ForkJoinTask<?> task) { ForkJoinTask<?>[] a; int b; if ((b = base) - top < 0 && (a = array) != null) { int j = (((a.length - 1) & b) << ASHIFT) + ABASE; if (U.getObjectVolatile(a, j) == task && base == b && U.compareAndSwapObject(a, j, task, null)) { base = b + 1; return true; } } return false; } /** * Initializes or doubles the capacity of array. Call either * by owner or with lock held -- it is OK for base, but not * top, to move while resizings are in progress. * * @param rejectOnFailure if true, throw exception if capacity * exceeded (relayed ultimately to user); else return null. */ final ForkJoinTask<?>[] growArray(boolean rejectOnFailure) { ForkJoinTask<?>[] oldA = array; int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY; if (size <= MAXIMUM_QUEUE_CAPACITY) { int oldMask, t, b; ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size]; if (oldA != null && (oldMask = oldA.length - 1) >= 0 && (t = top) - (b = base) > 0) { int mask = size - 1; do { ForkJoinTask<?> x; int oldj = ((b & oldMask) << ASHIFT) + ABASE; int j = ((b & mask) << ASHIFT) + ABASE; x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj); if (x != null && U.compareAndSwapObject(oldA, oldj, x, null)) U.putObjectVolatile(a, j, x); } while (++b != t); } return a; } else if (!rejectOnFailure) return null; else throw new RejectedExecutionException("Queue capacity exceeded"); } /** * Removes and cancels all known tasks, ignoring any exceptions. */ final void cancelAll() { ForkJoinTask.cancelIgnoringExceptions(currentJoin); ForkJoinTask.cancelIgnoringExceptions(currentSteal); for (ForkJoinTask<?> t; (t = poll()) != null; ) ForkJoinTask.cancelIgnoringExceptions(t); } /** * Computes next value for random probes. Scans don't require * a very high quality generator, but also not a crummy one. * Marsaglia xor-shift is cheap and works well enough. Note: * This is manually inlined in its usages in ForkJoinPool to * avoid writes inside busy scan loops. */ final int nextSeed() { int r = seed; r ^= r << 13; r ^= r >>> 17; return seed = r ^= r << 5; } // Execution methods /** * Pops and runs tasks until empty. */ private void popAndExecAll() { // A bit faster than repeated pop calls ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t; while ((a = array) != null && (m = a.length - 1) >= 0 && (s = top - 1) - base >= 0 && (t = ((ForkJoinTask<?>) U.getObject(a, j = ((m & s) << ASHIFT) + ABASE))) != null) { if (U.compareAndSwapObject(a, j, t, null)) { top = s; t.doExec(); } } } /** * Polls and runs tasks until empty. */ private void pollAndExecAll() { for (ForkJoinTask<?> t; (t = poll()) != null;) t.doExec(); } /** * If present, removes from queue and executes the given task, or * any other cancelled task. Returns (true) immediately on any CAS * or consistency check failure so caller can retry. * * @return 0 if no progress can be made, else positive * (this unusual convention simplifies use with tryHelpStealer.) */ final int tryRemoveAndExec(ForkJoinTask<?> task) { int stat = 1; boolean removed = false, empty = true; ForkJoinTask<?>[] a; int m, s, b, n; if ((a = array) != null && (m = a.length - 1) >= 0 && (n = (s = top) - (b = base)) > 0) { for (ForkJoinTask<?> t;;) { // traverse from s to b int j = ((--s & m) << ASHIFT) + ABASE; t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); if (t == null) // inconsistent length break; else if (t == task) { if (s + 1 == top) { // pop if (!U.compareAndSwapObject(a, j, task, null)) break; top = s; removed = true; } else if (base == b) // replace with proxy removed = U.compareAndSwapObject(a, j, task, new EmptyTask()); break; } else if (t.status >= 0) empty = false; else if (s + 1 == top) { // pop and throw away if (U.compareAndSwapObject(a, j, t, null)) top = s; break; } if (--n == 0) { if (!empty && base == b) stat = 0; break; } } } if (removed) task.doExec(); return stat; } /** * Executes a top-level task and any local tasks remaining * after execution. */ final void runTask(ForkJoinTask<?> t) { if (t != null) { currentSteal = t; t.doExec(); if (top != base) { // process remaining local tasks if (mode == 0) popAndExecAll(); else pollAndExecAll(); } ++nsteals; currentSteal = null; } } /** * Executes a non-top-level (stolen) task. */ final void runSubtask(ForkJoinTask<?> t) { if (t != null) { ForkJoinTask<?> ps = currentSteal; currentSteal = t; t.doExec(); currentSteal = ps; } } /** * Returns true if owned and not known to be blocked. */ final boolean isApparentlyUnblocked() { Thread wt; Thread.State s; return (eventCount >= 0 && (wt = owner) != null && (s = wt.getState()) != Thread.State.BLOCKED && s != Thread.State.WAITING && s != Thread.State.TIMED_WAITING); } /** * If this owned and is not already interrupted, try to * interrupt and/or unpark, ignoring exceptions. */ final void interruptOwner() { Thread wt, p; if ((wt = owner) != null && !wt.isInterrupted()) { try { wt.interrupt(); } catch (SecurityException ignore) { } } if ((p = parker) != null) U.unpark(p); } // Unsafe mechanics private static final sun.misc.Unsafe U; private static final long RUNSTATE; private static final int ABASE; private static final int ASHIFT; static { int s; try { U = getUnsafe(); Class<?> k = WorkQueue.class; Class<?> ak = ForkJoinTask[].class; RUNSTATE = U.objectFieldOffset (k.getDeclaredField("runState")); ABASE = U.arrayBaseOffset(ak); s = U.arrayIndexScale(ak); } catch (Exception e) { throw new Error(e); } if ((s & (s-1)) != 0) throw new Error("data type scale not a power of two"); ASHIFT = 31 - Integer.numberOfLeadingZeros(s); } } /** * Per-thread records for threads that submit to pools. Currently * holds only pseudo-random seed / index that is used to choose * submission queues in method doSubmit. In the future, this may * also incorporate a means to implement different task rejection * and resubmission policies. * * Seeds for submitters and workers/workQueues work in basically * the same way but are initialized and updated using slightly * different mechanics. Both are initialized using the same * approach as in class ThreadLocal, where successive values are * unlikely to collide with previous values. This is done during * registration for workers, but requires a separate AtomicInteger * for submitters. Seeds are then randomly modified upon * collisions using xorshifts, which requires a non-zero seed. */ static final class Submitter { int seed; Submitter() { int s = nextSubmitterSeed.getAndAdd(SEED_INCREMENT); seed = (s == 0) ? 1 : s; // ensure non-zero } } /** ThreadLocal class for Submitters */ static final class ThreadSubmitter extends ThreadLocal<Submitter> { public Submitter initialValue() { return new Submitter(); } } // static fields (initialized in static initializer below) /** * Creates a new ForkJoinWorkerThread. This factory is used unless * overridden in ForkJoinPool constructors. */ public static final ForkJoinWorkerThreadFactory defaultForkJoinWorkerThreadFactory; /** * Generator for assigning sequence numbers as pool names. */ private static final AtomicInteger poolNumberGenerator; /** * Generator for initial hashes/seeds for submitters. Accessed by * Submitter class constructor. */ static final AtomicInteger nextSubmitterSeed; /** * Permission required for callers of methods that may start or * kill threads. */ private static final RuntimePermission modifyThreadPermission; /** * Per-thread submission bookeeping. Shared across all pools * to reduce ThreadLocal pollution and because random motion * to avoid contention in one pool is likely to hold for others. */ private static final ThreadSubmitter submitters; // static constants /** * The wakeup interval (in nanoseconds) for a worker waiting for a * task when the pool is quiescent to instead try to shrink the * number of workers. The exact value does not matter too * much. It must be short enough to release resources during * sustained periods of idleness, but not so short that threads * are continually re-created. */ private static final long SHRINK_RATE = 1L * 1000L * 1000L * 1000L; // 1 seconds /** * The timeout value for attempted shrinkage, includes * some slop to cope with system timer imprecision. */ private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); /** * The maximum stolen->joining link depth allowed in method * tryHelpStealer. Must be a power of two. This value also * controls the maximum number of times to try to help join a task * without any apparent progress or change in pool state before * giving up and blocking (see awaitJoin). Depths for legitimate * chains are unbounded, but we use a fixed constant to avoid * (otherwise unchecked) cycles and to bound staleness of * traversal parameters at the expense of sometimes blocking when * we could be helping. */ private static final int MAX_HELP = 64; /** * Secondary time-based bound (in nanosecs) for helping attempts * before trying compensated blocking in awaitJoin. Used in * conjunction with MAX_HELP to reduce variance due to different * polling rates associated with different helping options. The * value should roughly approximate the time required to create * and/or activate a worker thread. */ private static final long COMPENSATION_DELAY = 1L << 18; // ~0.25 millisec /** * Increment for seed generators. See class ThreadLocal for * explanation. */ private static final int SEED_INCREMENT = 0x61c88647; /** * Bits and masks for control variables * * Field ctl is a long packed with: * AC: Number of active running workers minus target parallelism (16 bits) * TC: Number of total workers minus target parallelism (16 bits) * ST: true if pool is terminating (1 bit) * EC: the wait count of top waiting thread (15 bits) * ID: poolIndex of top of Treiber stack of waiters (16 bits) * * When convenient, we can extract the upper 32 bits of counts and * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = * (int)ctl. The ec field is never accessed alone, but always * together with id and st. The offsets of counts by the target * parallelism and the positionings of fields makes it possible to * perform the most common checks via sign tests of fields: When * ac is negative, there are not enough active workers, when tc is * negative, there are not enough total workers, and when e is * negative, the pool is terminating. To deal with these possibly * negative fields, we use casts in and out of "short" and/or * signed shifts to maintain signedness. * * When a thread is queued (inactivated), its eventCount field is * set negative, which is the only way to tell if a worker is * prevented from executing tasks, even though it must continue to * scan for them to avoid queuing races. Note however that * eventCount updates lag releases so usage requires care. * * Field runState is an int packed with: * SHUTDOWN: true if shutdown is enabled (1 bit) * SEQ: a sequence number updated upon (de)registering workers (30 bits) * INIT: set true after workQueues array construction (1 bit) * * The sequence number enables simple consistency checks: * Staleness of read-only operations on the workQueues array can * be checked by comparing runState before vs after the reads. */ // bit positions/shifts for fields private static final int AC_SHIFT = 48; private static final int TC_SHIFT = 32; private static final int ST_SHIFT = 31; private static final int EC_SHIFT = 16; // bounds private static final int SMASK = 0xffff; // short bits private static final int MAX_CAP = 0x7fff; // max #workers - 1 private static final int SQMASK = 0xfffe; // even short bits private static final int SHORT_SIGN = 1 << 15; private static final int INT_SIGN = 1 << 31; // masks private static final long STOP_BIT = 0x0001L << ST_SHIFT; private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; // units for incrementing and decrementing private static final long TC_UNIT = 1L << TC_SHIFT; private static final long AC_UNIT = 1L << AC_SHIFT; // masks and units for dealing with u = (int)(ctl >>> 32) private static final int UAC_SHIFT = AC_SHIFT - 32; private static final int UTC_SHIFT = TC_SHIFT - 32; private static final int UAC_MASK = SMASK << UAC_SHIFT; private static final int UTC_MASK = SMASK << UTC_SHIFT; private static final int UAC_UNIT = 1 << UAC_SHIFT; private static final int UTC_UNIT = 1 << UTC_SHIFT; // masks and units for dealing with e = (int)ctl private static final int E_MASK = 0x7fffffff; // no STOP_BIT private static final int E_SEQ = 1 << EC_SHIFT; // runState bits private static final int SHUTDOWN = 1 << 31; // access mode for WorkQueue static final int LIFO_QUEUE = 0; static final int FIFO_QUEUE = 1; static final int SHARED_QUEUE = -1; // Instance fields /* * Field layout order in this class tends to matter more than one * would like. Runtime layout order is only loosely related to * declaration order and may differ across JVMs, but the following * empirically works OK on current JVMs. */ volatile long ctl; // main pool control final int parallelism; // parallelism level final int localMode; // per-worker scheduling mode final int submitMask; // submit queue index bound int nextSeed; // for initializing worker seeds volatile int runState; // shutdown status and seq WorkQueue[] workQueues; // main registry final Mutex lock; // for registration final Condition termination; // for awaitTermination final ForkJoinWorkerThreadFactory factory; // factory for new workers final Thread.UncaughtExceptionHandler ueh; // per-worker UEH final AtomicLong stealCount; // collect counts when terminated final AtomicInteger nextWorkerNumber; // to create worker name string final String workerNamePrefix; // to create worker name string // Creating, registering, and deregistering workers /** * Tries to create and start a worker */ private void addWorker() { Throwable ex = null; ForkJoinWorkerThread wt = null; try { if ((wt = factory.newThread(this)) != null) { wt.start(); return; } } catch (Throwable e) { ex = e; } deregisterWorker(wt, ex); // adjust counts etc on failure } /** * Callback from ForkJoinWorkerThread constructor to assign a * public name. This must be separate from registerWorker because * it is called during the "super" constructor call in * ForkJoinWorkerThread. */ final String nextWorkerName() { return workerNamePrefix.concat (Integer.toString(nextWorkerNumber.addAndGet(1))); } /** * Callback from ForkJoinWorkerThread constructor to establish its * poolIndex and record its WorkQueue. To avoid scanning bias due * to packing entries in front of the workQueues array, we treat * the array as a simple power-of-two hash table using per-thread * seed as hash, expanding as needed. * * @param w the worker's queue */ final void registerWorker(WorkQueue w) { Mutex lock = this.lock; lock.lock(); try { WorkQueue[] ws = workQueues; if (w != null && ws != null) { // skip on shutdown/failure int rs, n = ws.length, m = n - 1; int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence w.seed = (s == 0) ? 1 : s; // ensure non-zero seed int r = (s << 1) | 1; // use odd-numbered indices if (ws[r &= m] != null) { // collision int probes = 0; // step by approx half size int step = (n <= 4) ? 2 : ((n >>> 1) & SQMASK) + 2; while (ws[r = (r + step) & m] != null) { if (++probes >= n) { workQueues = ws = Arrays.copyOf(ws, n <<= 1); m = n - 1; probes = 0; } } } w.eventCount = w.poolIndex = r; // establish before recording ws[r] = w; // also update seq runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN); } } finally { lock.unlock(); } } /** * Final callback from terminating worker, as well as upon failure * to construct or start a worker in addWorker. Removes record of * worker from array, and adjusts counts. If pool is shutting * down, tries to complete termination. * * @param wt the worker thread or null if addWorker failed * @param ex the exception causing failure, or null if none */ final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { Mutex lock = this.lock; WorkQueue w = null; if (wt != null && (w = wt.workQueue) != null) { w.runState = -1; // ensure runState is set stealCount.getAndAdd(w.totalSteals + w.nsteals); int idx = w.poolIndex; lock.lock(); try { // remove record from array WorkQueue[] ws = workQueues; if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) ws[idx] = null; } finally { lock.unlock(); } } long c; // adjust ctl counts do {} while (!U.compareAndSwapLong (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) | ((c - TC_UNIT) & TC_MASK) | (c & ~(AC_MASK|TC_MASK))))); if (!tryTerminate(false, false) && w != null) { w.cancelAll(); // cancel remaining tasks if (w.array != null) // suppress signal if never ran signalWork(); // wake up or create replacement if (ex == null) // help clean refs on way out ForkJoinTask.helpExpungeStaleExceptions(); } if (ex != null) // rethrow U.throwException(ex); } // Submissions /** * Unless shutting down, adds the given task to a submission queue * at submitter's current queue index (modulo submission * range). If no queue exists at the index, one is created. If * the queue is busy, another index is randomly chosen. The * submitMask bounds the effective number of queues to the * (nearest power of two for) parallelism level. * * @param task the task. Caller must ensure non-null. */ private void doSubmit(ForkJoinTask<?> task) { Submitter s = submitters.get(); for (int r = s.seed, m = submitMask;;) { WorkQueue[] ws; WorkQueue q; int k = r & m & SQMASK; // use only even indices if (runState < 0 || (ws = workQueues) == null || ws.length <= k) throw new RejectedExecutionException(); // shutting down else if ((q = ws[k]) == null) { // create new queue WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE); Mutex lock = this.lock; // construct outside lock lock.lock(); try { // recheck under lock int rs = runState; // to update seq if (ws == workQueues && ws[k] == null) { ws[k] = nq; runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN)); } } finally { lock.unlock(); } } else if (q.trySharedPush(task)) { signalWork(); return; } else if (m > 1) { // move to a different index r ^= r << 13; // same xorshift as WorkQueues r ^= r >>> 17; s.seed = r ^= r << 5; } else Thread.yield(); // yield if no alternatives } } // Maintaining ctl counts /** * Increments active count; mainly called upon return from blocking. */ final void incrementActiveCount() { long c; do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); } /** * Tries to activate or create a worker if too few are active. */ final void signalWork() { long c; int u; while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p; if ((e = (int)c) > 0) { // at least one waiting if (ws != null && (i = e & SMASK) < ws.length && (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) { long nc = (((long)(w.nextWait & E_MASK)) | ((long)(u + UAC_UNIT) << 32)); if (U.compareAndSwapLong(this, CTL, c, nc)) { w.eventCount = (e + E_SEQ) & E_MASK; if ((p = w.parker) != null) U.unpark(p); // activate and release break; } } else break; } else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total long nc = (long)(((u + UTC_UNIT) & UTC_MASK) | ((u + UAC_UNIT) & UAC_MASK)) << 32; if (U.compareAndSwapLong(this, CTL, c, nc)) { addWorker(); break; } } else break; } } // Scanning for tasks /** * Top-level runloop for workers, called by ForkJoinWorkerThread.run. */ final void runWorker(WorkQueue w) { w.growArray(false); // initialize queue array in this thread do { w.runTask(scan(w)); } while (w.runState >= 0); } /** * Scans for and, if found, returns one task, else possibly * inactivates the worker. This method operates on single reads of * volatile state and is designed to be re-invoked continuously, * in part because it returns upon detecting inconsistencies, * contention, or state changes that indicate possible success on * re-invocation. * * The scan searches for tasks across a random permutation of * queues (starting at a random index and stepping by a random * relative prime, checking each at least once). The scan * terminates upon either finding a non-empty queue, or completing * the sweep. If the worker is not inactivated, it takes and * returns a task from this queue. On failure to find a task, we * take one of the following actions, after which the caller will * retry calling this method unless terminated. * * * If pool is terminating, terminate the worker. * * * If not a complete sweep, try to release a waiting worker. If * the scan terminated because the worker is inactivated, then the * released worker will often be the calling worker, and it can * succeed obtaining a task on the next call. Or maybe it is * another worker, but with same net effect. Releasing in other * cases as well ensures that we have enough workers running. * * * If not already enqueued, try to inactivate and enqueue the * worker on wait queue. Or, if inactivating has caused the pool * to be quiescent, relay to idleAwaitWork to check for * termination and possibly shrink pool. * * * If already inactive, and the caller has run a task since the * last empty scan, return (to allow rescan) unless others are * also inactivated. Field WorkQueue.rescans counts down on each * scan to ensure eventual inactivation and blocking. * * * If already enqueued and none of the above apply, park * awaiting signal, * * @param w the worker (via its WorkQueue) * @return a task or null of none found */ private final ForkJoinTask<?> scan(WorkQueue w) { WorkQueue[] ws; // first update random seed int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5; int rs = runState, m; // volatile read order matters if ((ws = workQueues) != null && (m = ws.length - 1) > 0) { int ec = w.eventCount; // ec is negative if inactive int step = (r >>> 16) | 1; // relative prime for (int j = (m + 1) << 2; ; r += step) { WorkQueue q; ForkJoinTask<?> t; ForkJoinTask<?>[] a; int b; if ((q = ws[r & m]) != null && (b = q.base) - q.top < 0 && (a = q.array) != null) { // probably nonempty int i = (((a.length - 1) & b) << ASHIFT) + ABASE; t = (ForkJoinTask<?>)U.getObjectVolatile(a, i); if (q.base == b && ec >= 0 && t != null && U.compareAndSwapObject(a, i, t, null)) { if (q.top - (q.base = b + 1) > 1) signalWork(); // help pushes signal return t; } else if (ec < 0 || j <= m) { rs = 0; // mark scan as imcomplete break; // caller can retry after release } } if (--j < 0) break; } long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; if (e < 0) // decode ctl on empty scan w.runState = -1; // pool is terminating else if (rs == 0 || rs != runState) { // incomplete scan WorkQueue v; Thread p; // try to release a waiter if (e > 0 && a < 0 && w.eventCount == ec && (v = ws[e & m]) != null && v.eventCount == (e | INT_SIGN)) { long nc = ((long)(v.nextWait & E_MASK) | ((c + AC_UNIT) & (AC_MASK|TC_MASK))); if (ctl == c && U.compareAndSwapLong(this, CTL, c, nc)) { v.eventCount = (e + E_SEQ) & E_MASK; if ((p = v.parker) != null) U.unpark(p); } } } else if (ec >= 0) { // try to enqueue/inactivate long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); w.nextWait = e; w.eventCount = ec | INT_SIGN; // mark as inactive if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc)) w.eventCount = ec; // unmark on CAS failure else { if ((ns = w.nsteals) != 0) { w.nsteals = 0; // set rescans if ran task w.rescans = (a > 0) ? 0 : a + parallelism; w.totalSteals += ns; } if (a == 1 - parallelism) // quiescent idleAwaitWork(w, nc, c); } } else if (w.eventCount < 0) { // already queued if ((nr = w.rescans) > 0) { // continue rescanning int ac = a + parallelism; if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0) Thread.yield(); // yield before block } else { Thread.interrupted(); // clear status Thread wt = Thread.currentThread(); U.putObject(wt, PARKBLOCKER, this); w.parker = wt; // emulate LockSupport.park if (w.eventCount < 0) // recheck U.park(false, 0L); w.parker = null; U.putObject(wt, PARKBLOCKER, null); } } } return null; } /** * If inactivating worker w has caused the pool to become * quiescent, checks for pool termination, and, so long as this is * not the only worker, waits for event for up to SHRINK_RATE * nanosecs. On timeout, if ctl has not changed, terminates the * worker, which will in turn wake up another worker to possibly * repeat this process. * * @param w the calling worker * @param currentCtl the ctl value triggering possible quiescence * @param prevCtl the ctl value to restore if thread is terminated */ private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) { if (w.eventCount < 0 && !tryTerminate(false, false) && (int)prevCtl != 0 && !hasQueuedSubmissions() && ctl == currentCtl) { Thread wt = Thread.currentThread(); Thread.yield(); // yield before block while (ctl == currentCtl) { long startTime = System.nanoTime(); Thread.interrupted(); // timed variant of version in scan() U.putObject(wt, PARKBLOCKER, this); w.parker = wt; if (ctl == currentCtl) U.park(false, SHRINK_RATE); w.parker = null; U.putObject(wt, PARKBLOCKER, null); if (ctl != currentCtl) break; if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) { w.eventCount = (w.eventCount + E_SEQ) | E_MASK; w.runState = -1; // shrink break; } } } } /** * Tries to locate and execute tasks for a stealer of the given * task, or in turn one of its stealers, Traces currentSteal -> * currentJoin links looking for a thread working on a descendant * of the given task and with a non-empty queue to steal back and * execute tasks from. The first call to this method upon a * waiting join will often entail scanning/search, (which is OK * because the joiner has nothing better to do), but this method * leaves hints in workers to speed up subsequent calls. The * implementation is very branchy to cope with potential * inconsistencies or loops encountering chains that are stale, * unknown, or so long that they are likely cyclic. * * @param joiner the joining worker * @param task the task to join * @return 0 if no progress can be made, negative if task * known complete, else positive */ private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { int stat = 0, steps = 0; // bound to avoid cycles if (joiner != null && task != null) { // hoist null checks restart: for (;;) { ForkJoinTask<?> subtask = task; // current target for (WorkQueue j = joiner, v;;) { // v is stealer of subtask WorkQueue[] ws; int m, s, h; if ((s = task.status) < 0) { stat = s; break restart; } if ((ws = workQueues) == null || (m = ws.length - 1) <= 0) break restart; // shutting down if ((v = ws[h = (j.stealHint | 1) & m]) == null || v.currentSteal != subtask) { for (int origin = h;;) { // find stealer if (((h = (h + 2) & m) & 15) == 1 && (subtask.status < 0 || j.currentJoin != subtask)) continue restart; // occasional staleness check if ((v = ws[h]) != null && v.currentSteal == subtask) { j.stealHint = h; // save hint break; } if (h == origin) break restart; // cannot find stealer } } for (;;) { // help stealer or descend to its stealer ForkJoinTask[] a; int b; if (subtask.status < 0) // surround probes with continue restart; // consistency checks if ((b = v.base) - v.top < 0 && (a = v.array) != null) { int i = (((a.length - 1) & b) << ASHIFT) + ABASE; ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObjectVolatile(a, i); if (subtask.status < 0 || j.currentJoin != subtask || v.currentSteal != subtask) continue restart; // stale stat = 1; // apparent progress if (t != null && v.base == b && U.compareAndSwapObject(a, i, t, null)) { v.base = b + 1; // help stealer joiner.runSubtask(t); } else if (v.base == b && ++steps == MAX_HELP) break restart; // v apparently stalled } else { // empty -- try to descend ForkJoinTask<?> next = v.currentJoin; if (subtask.status < 0 || j.currentJoin != subtask || v.currentSteal != subtask) continue restart; // stale else if (next == null || ++steps == MAX_HELP) break restart; // dead-end or maybe cyclic else { subtask = next; j = v; break; } } } } } } return stat; } /** * If task is at base of some steal queue, steals and executes it. * * @param joiner the joining worker * @param task the task */ private void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) { WorkQueue[] ws; if ((ws = workQueues) != null) { for (int j = 1; j < ws.length && task.status >= 0; j += 2) { WorkQueue q = ws[j]; if (q != null && q.pollFor(task)) { joiner.runSubtask(task); break; } } } } /** * Tries to decrement active count (sometimes implicitly) and * possibly release or create a compensating worker in preparation * for blocking. Fails on contention or termination. Otherwise, * adds a new thread if no idle workers are available and either * pool would become completely starved or: (at least half * starved, and fewer than 50% spares exist, and there is at least * one task apparently available). Even though the availability * check requires a full scan, it is worthwhile in reducing false * alarms. * * @param task if non-null, a task being waited for * @param blocker if non-null, a blocker being waited for * @return true if the caller can block, else should recheck and retry */ final boolean tryCompensate(ForkJoinTask<?> task, ManagedBlocker blocker) { int pc = parallelism, e; long c = ctl; WorkQueue[] ws = workQueues; if ((e = (int)c) >= 0 && ws != null) { int u, a, ac, hc; int tc = (short)((u = (int)(c >>> 32)) >>> UTC_SHIFT) + pc; boolean replace = false; if ((a = u >> UAC_SHIFT) <= 0) { if ((ac = a + pc) <= 1) replace = true; else if ((e > 0 || (task != null && ac <= (hc = pc >>> 1) && tc < pc + hc))) { WorkQueue w; for (int j = 0; j < ws.length; ++j) { if ((w = ws[j]) != null && !w.isEmpty()) { replace = true; break; // in compensation range and tasks available } } } } if ((task == null || task.status >= 0) && // recheck need to block (blocker == null || !blocker.isReleasable()) && ctl == c) { if (!replace) { // no compensation long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); if (U.compareAndSwapLong(this, CTL, c, nc)) return true; } else if (e != 0) { // release an idle worker WorkQueue w; Thread p; int i; if ((i = e & SMASK) < ws.length && (w = ws[i]) != null) { long nc = ((long)(w.nextWait & E_MASK) | (c & (AC_MASK|TC_MASK))); if (w.eventCount == (e | INT_SIGN) && U.compareAndSwapLong(this, CTL, c, nc)) { w.eventCount = (e + E_SEQ) & E_MASK; if ((p = w.parker) != null) U.unpark(p); return true; } } } else if (tc < MAX_CAP) { // create replacement long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); if (U.compareAndSwapLong(this, CTL, c, nc)) { addWorker(); return true; } } } } return false; } /** * Helps and/or blocks until the given task is done. * * @param joiner the joining worker * @param task the task * @return task status on exit */ final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) { int s; if ((s = task.status) >= 0) { ForkJoinTask<?> prevJoin = joiner.currentJoin; joiner.currentJoin = task; long startTime = 0L; for (int k = 0;;) { if ((s = (joiner.isEmpty() ? // try to help tryHelpStealer(joiner, task) : joiner.tryRemoveAndExec(task))) == 0 && (s = task.status) >= 0) { if (k == 0) { startTime = System.nanoTime(); tryPollForAndExec(joiner, task); // check uncommon case } else if ((k & (MAX_HELP - 1)) == 0 && System.nanoTime() - startTime >= COMPENSATION_DELAY && tryCompensate(task, null)) { if (task.trySetSignal()) { synchronized (task) { if (task.status >= 0) { try { // see ForkJoinTask task.wait(); // for explanation } catch (InterruptedException ie) { } } else task.notifyAll(); } } long c; // re-activate do {} while (!U.compareAndSwapLong (this, CTL, c = ctl, c + AC_UNIT)); } } if (s < 0 || (s = task.status) < 0) { joiner.currentJoin = prevJoin; break; } else if ((k++ & (MAX_HELP - 1)) == MAX_HELP >>> 1) Thread.yield(); // for politeness } } return s; } /** * Stripped-down variant of awaitJoin used by timed joins. Tries * to help join only while there is continuous progress. (Caller * will then enter a timed wait.) * * @param joiner the joining worker * @param task the task * @return task status on exit */ final int helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) { int s; while ((s = task.status) >= 0 && (joiner.isEmpty() ? tryHelpStealer(joiner, task) : joiner.tryRemoveAndExec(task)) != 0) ; return s; } /** * Returns a (probably) non-empty steal queue, if one is found * during a random, then cyclic scan, else null. This method must * be retried by caller if, by the time it tries to use the queue, * it is empty. */ private WorkQueue findNonEmptyStealQueue(WorkQueue w) { // Similar to loop in scan(), but ignoring submissions int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5; int step = (r >>> 16) | 1; for (WorkQueue[] ws;;) { int rs = runState, m; if ((ws = workQueues) == null || (m = ws.length - 1) < 1) return null; for (int j = (m + 1) << 2; ; r += step) { WorkQueue q = ws[((r << 1) | 1) & m]; if (q != null && !q.isEmpty()) return q; else if (--j < 0) { if (runState == rs) return null; break; } } } } /** * Runs tasks until {@code isQuiescent()}. We piggyback on * active count ctl maintenance, but rather than blocking * when tasks cannot be found, we rescan until all others cannot * find tasks either. */ final void helpQuiescePool(WorkQueue w) { for (boolean active = true;;) { ForkJoinTask<?> localTask; // exhaust local queue while ((localTask = w.nextLocalTask()) != null) localTask.doExec(); WorkQueue q = findNonEmptyStealQueue(w); if (q != null) { ForkJoinTask<?> t; int b; if (!active) { // re-establish active count long c; active = true; do {} while (!U.compareAndSwapLong (this, CTL, c = ctl, c + AC_UNIT)); } if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) w.runSubtask(t); } else { long c; if (active) { // decrement active count without queuing active = false; do {} while (!U.compareAndSwapLong (this, CTL, c = ctl, c -= AC_UNIT)); } else c = ctl; // re-increment on exit if ((int)(c >> AC_SHIFT) + parallelism == 0) { do {} while (!U.compareAndSwapLong (this, CTL, c = ctl, c + AC_UNIT)); break; } } } } /** * Gets and removes a local or stolen task for the given worker. * * @return a task, if available */ final ForkJoinTask<?> nextTaskFor(WorkQueue w) { for (ForkJoinTask<?> t;;) { WorkQueue q; int b; if ((t = w.nextLocalTask()) != null) return t; if ((q = findNonEmptyStealQueue(w)) == null) return null; if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) return t; } } /** * Returns the approximate (non-atomic) number of idle threads per * active thread to offset steal queue size for method * ForkJoinTask.getSurplusQueuedTaskCount(). */ final int idlePerActive() { // Approximate at powers of two for small values, saturate past 4 int p = parallelism; int a = p + (int)(ctl >> AC_SHIFT); return (a > (p >>>= 1) ? 0 : a > (p >>>= 1) ? 1 : a > (p >>>= 1) ? 2 : a > (p >>>= 1) ? 4 : 8); } // Termination /** * Possibly initiates and/or completes termination. The caller * triggering termination runs three passes through workQueues: * (0) Setting termination status, followed by wakeups of queued * workers; (1) cancelling all tasks; (2) interrupting lagging * threads (likely in external tasks, but possibly also blocked in * joins). Each pass repeats previous steps because of potential * lagging thread creation. * * @param now if true, unconditionally terminate, else only * if no work and no active workers * @param enable if true, enable shutdown when next possible * @return true if now terminating or terminated */ private boolean tryTerminate(boolean now, boolean enable) { Mutex lock = this.lock; for (long c;;) { if (((c = ctl) & STOP_BIT) != 0) { // already terminating if ((short)(c >>> TC_SHIFT) == -parallelism) { lock.lock(); // don't need try/finally termination.signalAll(); // signal when 0 workers lock.unlock(); } return true; } if (runState >= 0) { // not yet enabled if (!enable) return false; lock.lock(); runState |= SHUTDOWN; lock.unlock(); } if (!now) { // check if idle & no tasks if ((int)(c >> AC_SHIFT) != -parallelism || hasQueuedSubmissions()) return false; // Check for unqueued inactive workers. One pass suffices. WorkQueue[] ws = workQueues; WorkQueue w; if (ws != null) { for (int i = 1; i < ws.length; i += 2) { if ((w = ws[i]) != null && w.eventCount >= 0) return false; } } } if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) { for (int pass = 0; pass < 3; ++pass) { WorkQueue[] ws = workQueues; if (ws != null) { WorkQueue w; int n = ws.length; for (int i = 0; i < n; ++i) { if ((w = ws[i]) != null) { w.runState = -1; if (pass > 0) { w.cancelAll(); if (pass > 1) w.interruptOwner(); } } } // Wake up workers parked on event queue int i, e; long cc; Thread p; while ((e = (int)(cc = ctl) & E_MASK) != 0 && (i = e & SMASK) < n && (w = ws[i]) != null) { long nc = ((long)(w.nextWait & E_MASK) | ((cc + AC_UNIT) & AC_MASK) | (cc & (TC_MASK|STOP_BIT))); if (w.eventCount == (e | INT_SIGN) && U.compareAndSwapLong(this, CTL, cc, nc)) { w.eventCount = (e + E_SEQ) & E_MASK; w.runState = -1; if ((p = w.parker) != null) U.unpark(p); } } } } } } } // Exported methods // Constructors /** * Creates a {@code ForkJoinPool} with parallelism equal to {@link * java.lang.Runtime#availableProcessors}, using the {@linkplain * #defaultForkJoinWorkerThreadFactory default thread factory}, * no UncaughtExceptionHandler, and non-async LIFO processing mode. * * @throws SecurityException if a security manager exists and * the caller is not permitted to modify threads * because it does not hold {@link * java.lang.RuntimePermission}{@code ("modifyThread")} */ public ForkJoinPool() { this(H2ORuntime.availableProcessors(), defaultForkJoinWorkerThreadFactory, null, false); } /** * Creates a {@code ForkJoinPool} with the indicated parallelism * level, the {@linkplain * #defaultForkJoinWorkerThreadFactory default thread factory}, * no UncaughtExceptionHandler, and non-async LIFO processing mode. * * @param parallelism the parallelism level * @throws IllegalArgumentException if parallelism less than or * equal to zero, or greater than implementation limit * @throws SecurityException if a security manager exists and * the caller is not permitted to modify threads * because it does not hold {@link * java.lang.RuntimePermission}{@code ("modifyThread")} */ public ForkJoinPool(int parallelism) { this(parallelism, defaultForkJoinWorkerThreadFactory, null, false); } /** * Creates a {@code ForkJoinPool} with the given parameters. * * @param parallelism the parallelism level. For default value, * use {@link java.lang.Runtime#availableProcessors}. * @param factory the factory for creating new threads. For default value, * use {@link #defaultForkJoinWorkerThreadFactory}. * @param handler the handler for internal worker threads that * terminate due to unrecoverable errors encountered while executing * tasks. For default value, use {@code null}. * @param asyncMode if true, * establishes local first-in-first-out scheduling mode for forked * tasks that are never joined. This mode may be more appropriate * than default locally stack-based mode in applications in which * worker threads only process event-style asynchronous tasks. * For default value, use {@code false}. * @throws IllegalArgumentException if parallelism less than or * equal to zero, or greater than implementation limit * @throws NullPointerException if the factory is null * @throws SecurityException if a security manager exists and * the caller is not permitted to modify threads * because it does not hold {@link * java.lang.RuntimePermission}{@code ("modifyThread")} */ public ForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory, Thread.UncaughtExceptionHandler handler, boolean asyncMode) { checkPermission(); if (factory == null) throw new NullPointerException(); if (parallelism <= 0 || parallelism > MAX_CAP) throw new IllegalArgumentException(); this.parallelism = parallelism; this.factory = factory; this.ueh = handler; this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; long np = (long)(-parallelism); // offset ctl counts this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); // Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2. int n = parallelism - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; int size = (n + 1) << 1; // #slots = 2*#workers this.submitMask = size - 1; // room for max # of submit queues this.workQueues = new WorkQueue[size]; this.termination = (this.lock = new Mutex()).newCondition(); this.stealCount = new AtomicLong(); this.nextWorkerNumber = new AtomicInteger(); int pn = poolNumberGenerator.incrementAndGet(); StringBuilder sb = new StringBuilder("FJ-"); sb.append(Integer.toString(pn)); sb.append("-"); this.workerNamePrefix = sb.toString(); lock.lock(); this.runState = 1; // set init flag lock.unlock(); } // Execution methods /** * Performs the given task, returning its result upon completion. * If the computation encounters an unchecked Exception or Error, * it is rethrown as the outcome of this invocation. Rethrown * exceptions behave in the same way as regular exceptions, but, * when possible, contain stack traces (as displayed for example * using {@code ex.printStackTrace()}) of both the current thread * as well as the thread actually encountering the exception; * minimally only the latter. * * @param task the task * @return the task's result * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be * scheduled for execution */ public <T> T invoke(ForkJoinTask<T> task) { if (task == null) throw new NullPointerException(); doSubmit(task); return task.join(); } /** * Arranges for (asynchronous) execution of the given task. * * @param task the task * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be * scheduled for execution */ public void execute(ForkJoinTask<?> task) { if (task == null) throw new NullPointerException(); doSubmit(task); } // AbstractExecutorService methods /** * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be * scheduled for execution */ public void execute(Runnable task) { if (task == null) throw new NullPointerException(); ForkJoinTask<?> job; if (task instanceof ForkJoinTask<?>) // avoid re-wrap job = (ForkJoinTask<?>) task; else job = new ForkJoinTask.AdaptedRunnableAction(task); doSubmit(job); } /** * Submits a ForkJoinTask for execution. * * @param task the task to submit * @return the task * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be * scheduled for execution */ public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) { if (task == null) throw new NullPointerException(); doSubmit(task); return task; } /** * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be * scheduled for execution */ public <T> ForkJoinTask<T> submit(Callable<T> task) { ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task); doSubmit(job); return job; } /** * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be * scheduled for execution */ public <T> ForkJoinTask<T> submit(Runnable task, T result) { ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result); doSubmit(job); return job; } /** * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be * scheduled for execution */ public ForkJoinTask<?> submit(Runnable task) { if (task == null) throw new NullPointerException(); ForkJoinTask<?> job; if (task instanceof ForkJoinTask<?>) // avoid re-wrap job = (ForkJoinTask<?>) task; else job = new ForkJoinTask.AdaptedRunnableAction(task); doSubmit(job); return job; } /** * @throws NullPointerException {@inheritDoc} * @throws RejectedExecutionException {@inheritDoc} */ public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) { // In previous versions of this class, this method constructed // a task to run ForkJoinTask.invokeAll, but now external // invocation of multiple tasks is at least as efficient. List<ForkJoinTask<T>> fs = new ArrayList<ForkJoinTask<T>>(tasks.size()); // Workaround needed because method wasn't declared with // wildcards in return type but should have been. @SuppressWarnings({"unchecked", "rawtypes"}) List<Future<T>> futures = (List<Future<T>>) (List) fs; boolean done = false; try { for (Callable<T> t : tasks) { ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t); doSubmit(f); fs.add(f); } for (ForkJoinTask<T> f : fs) f.quietlyJoin(); done = true; return futures; } finally { if (!done) for (ForkJoinTask<T> f : fs) f.cancel(false); } } /** * Returns the factory used for constructing new workers. * * @return the factory used for constructing new workers */ public ForkJoinWorkerThreadFactory getFactory() { return factory; } /** * Returns the handler for internal worker threads that terminate * due to unrecoverable errors encountered while executing tasks. * * @return the handler, or {@code null} if none */ public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() { return ueh; } /** * Returns the targeted parallelism level of this pool. * * @return the targeted parallelism level of this pool */ public int getParallelism() { return parallelism; } /** * Returns the number of worker threads that have started but not * yet terminated. The result returned by this method may differ * from {@link #getParallelism} when threads are created to * maintain parallelism when others are cooperatively blocked. * * @return the number of worker threads */ public int getPoolSize() { return parallelism + (short)(ctl >>> TC_SHIFT); } /** * Returns {@code true} if this pool uses local first-in-first-out * scheduling mode for forked tasks that are never joined. * * @return {@code true} if this pool uses async mode */ public boolean getAsyncMode() { return localMode != 0; } /** * Returns an estimate of the number of worker threads that are * not blocked waiting to join tasks or for other managed * synchronization. This method may overestimate the * number of running threads. * * @return the number of worker threads */ public int getRunningThreadCount() { int rc = 0; WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { for (int i = 1; i < ws.length; i += 2) { if ((w = ws[i]) != null && w.isApparentlyUnblocked()) ++rc; } } return rc; } /** * Returns an estimate of the number of threads that are currently * stealing or executing tasks. This method may overestimate the * number of active threads. * * @return the number of active threads */ public int getActiveThreadCount() { int r = parallelism + (int)(ctl >> AC_SHIFT); return (r <= 0) ? 0 : r; // suppress momentarily negative values } /** * Returns {@code true} if all worker threads are currently idle. * An idle worker is one that cannot obtain a task to execute * because none are available to steal from other threads, and * there are no pending submissions to the pool. This method is * conservative; it might not return {@code true} immediately upon * idleness of all threads, but will eventually become true if * threads remain inactive. * * @return {@code true} if all threads are currently idle */ public boolean isQuiescent() { return (int)(ctl >> AC_SHIFT) + parallelism == 0; } /** * Returns an estimate of the total number of tasks stolen from * one thread's work queue by another. The reported value * underestimates the actual total number of steals when the pool * is not quiescent. This value may be useful for monitoring and * tuning fork/join programs: in general, steal counts should be * high enough to keep threads busy, but low enough to avoid * overhead and contention across threads. * * @return the number of steals */ public long getStealCount() { long count = stealCount.get(); WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { for (int i = 1; i < ws.length; i += 2) { if ((w = ws[i]) != null) count += w.totalSteals; } } return count; } /** * Returns an estimate of the total number of tasks currently held * in queues by worker threads (but not including tasks submitted * to the pool that have not begun executing). This value is only * an approximation, obtained by iterating across all threads in * the pool. This method may be useful for tuning task * granularities. * * @return the number of queued tasks */ public long getQueuedTaskCount() { long count = 0; WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { for (int i = 1; i < ws.length; i += 2) { if ((w = ws[i]) != null) count += w.queueSize(); } } return count; } /** * Returns an estimate of the number of tasks submitted to this * pool that have not yet begun executing. This method may take * time proportional to the number of submissions. * * @return the number of queued submissions */ public int getQueuedSubmissionCount() { int count = 0; WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { for (int i = 0; i < ws.length; i += 2) { if ((w = ws[i]) != null) count += w.queueSize(); } } return count; } /** * Returns {@code true} if there are any tasks submitted to this * pool that have not yet begun executing. * * @return {@code true} if there are any queued submissions */ public boolean hasQueuedSubmissions() { WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { for (int i = 0; i < ws.length; i += 2) { if ((w = ws[i]) != null && !w.isEmpty()) return true; } } return false; } /** * Removes and returns the next unexecuted submission if one is * available. This method may be useful in extensions to this * class that re-assign work in systems with multiple pools. * * @return the next submission, or {@code null} if none */ protected ForkJoinTask<?> pollSubmission() { WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; if ((ws = workQueues) != null) { for (int i = 0; i < ws.length; i += 2) { if ((w = ws[i]) != null && (t = w.poll()) != null) return t; } } return null; } /** * Removes all available unexecuted submitted and forked tasks * from scheduling queues and adds them to the given collection, * without altering their execution status. These may include * artificially generated or wrapped tasks. This method is * designed to be invoked only when the pool is known to be * quiescent. Invocations at other times may not remove all * tasks. A failure encountered while attempting to add elements * to collection {@code c} may result in elements being in * neither, either or both collections when the associated * exception is thrown. The behavior of this operation is * undefined if the specified collection is modified while the * operation is in progress. * * @param c the collection to transfer elements into * @return the number of elements transferred */ protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) { int count = 0; WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; if ((ws = workQueues) != null) { for (int i = 0; i < ws.length; ++i) { if ((w = ws[i]) != null) { while ((t = w.poll()) != null) { c.add(t); ++count; } } } } return count; } /** * Returns a string identifying this pool, as well as its state, * including indications of run state, parallelism level, and * worker and task counts. * * @return a string identifying this pool, as well as its state */ public String toString() { // Use a single pass through workQueues to collect counts long qt = 0L, qs = 0L; int rc = 0; long st = stealCount.get(); long c = ctl; WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { for (int i = 0; i < ws.length; ++i) { if ((w = ws[i]) != null) { int size = w.queueSize(); if ((i & 1) == 0) qs += size; else { qt += size; st += w.totalSteals; if (w.isApparentlyUnblocked()) ++rc; } } } } int pc = parallelism; int tc = pc + (short)(c >>> TC_SHIFT); int ac = pc + (int)(c >> AC_SHIFT); if (ac < 0) // ignore transient negative ac = 0; String level; if ((c & STOP_BIT) != 0) level = (tc == 0) ? "Terminated" : "Terminating"; else level = runState < 0 ? "Shutting down" : "Running"; return super.toString() + "[" + level + ", parallelism = " + pc + ", size = " + tc + ", active = " + ac + ", running = " + rc + ", steals = " + st + ", tasks = " + qt + ", submissions = " + qs + "]"; } /** * Initiates an orderly shutdown in which previously submitted * tasks are executed, but no new tasks will be accepted. * Invocation has no additional effect if already shut down. * Tasks that are in the process of being submitted concurrently * during the course of this method may or may not be rejected. * * @throws SecurityException if a security manager exists and * the caller is not permitted to modify threads * because it does not hold {@link * java.lang.RuntimePermission}{@code ("modifyThread")} */ public void shutdown() { checkPermission(); tryTerminate(false, true); } /** * Attempts to cancel and/or stop all tasks, and reject all * subsequently submitted tasks. Tasks that are in the process of * being submitted or executed concurrently during the course of * this method may or may not be rejected. This method cancels * both existing and unexecuted tasks, in order to permit * termination in the presence of task dependencies. So the method * always returns an empty list (unlike the case for some other * Executors). * * @return an empty list * @throws SecurityException if a security manager exists and * the caller is not permitted to modify threads * because it does not hold {@link * java.lang.RuntimePermission}{@code ("modifyThread")} */ public List<Runnable> shutdownNow() { checkPermission(); tryTerminate(true, true); return Collections.emptyList(); } /** * Returns {@code true} if all tasks have completed following shut down. * * @return {@code true} if all tasks have completed following shut down */ public boolean isTerminated() { long c = ctl; return ((c & STOP_BIT) != 0L && (short)(c >>> TC_SHIFT) == -parallelism); } /** * Returns {@code true} if the process of termination has * commenced but not yet completed. This method may be useful for * debugging. A return of {@code true} reported a sufficient * period after shutdown may indicate that submitted tasks have * ignored or suppressed interruption, or are waiting for IO, * causing this executor not to properly terminate. (See the * advisory notes for class {@link ForkJoinTask} stating that * tasks should not normally entail blocking operations. But if * they do, they must abort them on interrupt.) * * @return {@code true} if terminating but not yet terminated */ public boolean isTerminating() { long c = ctl; return ((c & STOP_BIT) != 0L && (short)(c >>> TC_SHIFT) != -parallelism); } /** * Returns {@code true} if this pool has been shut down. * * @return {@code true} if this pool has been shut down */ public boolean isShutdown() { return runState < 0; } /** * Blocks until all tasks have completed execution after a shutdown * request, or the timeout occurs, or the current thread is * interrupted, whichever happens first. * * @param timeout the maximum time to wait * @param unit the time unit of the timeout argument * @return {@code true} if this executor terminated and * {@code false} if the timeout elapsed before termination * @throws InterruptedException if interrupted while waiting */ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); final Mutex lock = this.lock; lock.lock(); try { for (;;) { if (isTerminated()) return true; if (nanos <= 0) return false; nanos = termination.awaitNanos(nanos); } } finally { lock.unlock(); } } /** * Interface for extending managed parallelism for tasks running * in {@link ForkJoinPool}s. * * <p>A {@code ManagedBlocker} provides two methods. Method * {@code isReleasable} must return {@code true} if blocking is * not necessary. Method {@code block} blocks the current thread * if necessary (perhaps internally invoking {@code isReleasable} * before actually blocking). These actions are performed by any * thread invoking {@link ForkJoinPool#managedBlock}. The * unusual methods in this API accommodate synchronizers that may, * but don't usually, block for long periods. Similarly, they * allow more efficient internal handling of cases in which * additional workers may be, but usually are not, needed to * ensure sufficient parallelism. Toward this end, * implementations of method {@code isReleasable} must be amenable * to repeated invocation. * * <p>For example, here is a ManagedBlocker based on a * ReentrantLock: * <pre> {@code * class ManagedLocker implements ManagedBlocker { * final ReentrantLock lock; * boolean hasLock = false; * ManagedLocker(ReentrantLock lock) { this.lock = lock; } * public boolean block() { * if (!hasLock) * lock.lock(); * return true; * } * public boolean isReleasable() { * return hasLock || (hasLock = lock.tryLock()); * } * }}</pre> * * <p>Here is a class that possibly blocks waiting for an * item on a given queue: * <pre> {@code * class QueueTaker<E> implements ManagedBlocker { * final BlockingQueue<E> queue; * volatile E item = null; * QueueTaker(BlockingQueue<E> q) { this.queue = q; } * public boolean block() throws InterruptedException { * if (item == null) * item = queue.take(); * return true; * } * public boolean isReleasable() { * return item != null || (item = queue.poll()) != null; * } * public E getItem() { // call after pool.managedBlock completes * return item; * } * }}</pre> */ public static interface ManagedBlocker { /** * Possibly blocks the current thread, for example waiting for * a lock or condition. * * @return {@code true} if no additional blocking is necessary * (i.e., if isReleasable would return true) * @throws InterruptedException if interrupted while waiting * (the method is not required to do so, but is allowed to) */ boolean block() throws InterruptedException; /** * Returns {@code true} if blocking is unnecessary. */ boolean isReleasable(); } /** * Blocks in accord with the given blocker. If the current thread * is a {@link ForkJoinWorkerThread}, this method possibly * arranges for a spare thread to be activated if necessary to * ensure sufficient parallelism while the current thread is blocked. * * <p>If the caller is not a {@link ForkJoinTask}, this method is * behaviorally equivalent to * <pre> {@code * while (!blocker.isReleasable()) * if (blocker.block()) * return; * }</pre> * * If the caller is a {@code ForkJoinTask}, then the pool may * first be expanded to ensure parallelism, and later adjusted. * * @param blocker the blocker * @throws InterruptedException if blocker.block did so */ public static void managedBlock(ManagedBlocker blocker) throws InterruptedException { Thread t = Thread.currentThread(); ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ? ((ForkJoinWorkerThread)t).pool : null); while (!blocker.isReleasable()) { if (p == null || p.tryCompensate(null, blocker)) { try { do {} while (!blocker.isReleasable() && !blocker.block()); } finally { if (p != null) p.incrementActiveCount(); } break; } } } // AbstractExecutorService overrides. These rely on undocumented // fact that ForkJoinTask.adapt returns ForkJoinTasks that also // implement RunnableFuture. protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) { return new ForkJoinTask.AdaptedRunnable<T>(runnable, value); } protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) { return new ForkJoinTask.AdaptedCallable<T>(callable); } // Unsafe mechanics private static final sun.misc.Unsafe U; private static final long CTL; private static final long PARKBLOCKER; private static final int ABASE; private static final int ASHIFT; static { poolNumberGenerator = new AtomicInteger(); nextSubmitterSeed = new AtomicInteger(0x55555555); modifyThreadPermission = new RuntimePermission("modifyThread"); defaultForkJoinWorkerThreadFactory = new DefaultForkJoinWorkerThreadFactory(); submitters = new ThreadSubmitter(); int s; try { U = getUnsafe(); Class<?> k = ForkJoinPool.class; Class<?> ak = ForkJoinTask[].class; CTL = U.objectFieldOffset (k.getDeclaredField("ctl")); Class<?> tk = Thread.class; PARKBLOCKER = U.objectFieldOffset (tk.getDeclaredField("parkBlocker")); ABASE = U.arrayBaseOffset(ak); s = U.arrayIndexScale(ak); } catch (Exception e) { throw new Error(e); } if ((s & (s-1)) != 0) throw new Error("data type scale not a power of two"); ASHIFT = 31 - Integer.numberOfLeadingZeros(s); } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ private static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException se) { try { return java.security.AccessController.doPrivileged (new java.security .PrivilegedExceptionAction<sun.misc.Unsafe>() { public sun.misc.Unsafe run() throws Exception { java.lang.reflect.Field f = sun.misc .Unsafe.class.getDeclaredField("theUnsafe"); f.setAccessible(true); return (sun.misc.Unsafe) f.get(null); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/jsr166y/ForkJoinTask.java
/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ package jsr166y; import java.io.Serializable; import java.util.Collection; import java.util.List; import java.util.RandomAccess; import java.lang.ref.WeakReference; import java.lang.ref.ReferenceQueue; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RunnableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.locks.ReentrantLock; import java.lang.reflect.Constructor; /** * Abstract base class for tasks that run within a {@link ForkJoinPool}. * A {@code ForkJoinTask} is a thread-like entity that is much * lighter weight than a normal thread. Huge numbers of tasks and * subtasks may be hosted by a small number of actual threads in a * ForkJoinPool, at the price of some usage limitations. * * <p>A "main" {@code ForkJoinTask} begins execution when submitted * to a {@link ForkJoinPool}. Once started, it will usually in turn * start other subtasks. As indicated by the name of this class, * many programs using {@code ForkJoinTask} employ only methods * {@link #fork} and {@link #join}, or derivatives such as {@link * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also * provides a number of other methods that can come into play in * advanced usages, as well as extension mechanics that allow * support of new forms of fork/join processing. * * <p>A {@code ForkJoinTask} is a lightweight form of {@link Future}. * The efficiency of {@code ForkJoinTask}s stems from a set of * restrictions (that are only partially statically enforceable) * reflecting their main use as computational tasks calculating pure * functions or operating on purely isolated objects. The primary * coordination mechanisms are {@link #fork}, that arranges * asynchronous execution, and {@link #join}, that doesn't proceed * until the task's result has been computed. Computations should * ideally avoid {@code synchronized} methods or blocks, and should * minimize other blocking synchronization apart from joining other * tasks or using synchronizers such as Phasers that are advertised to * cooperate with fork/join scheduling. Subdividable tasks should also * not perform blocking IO, and should ideally access variables that * are completely independent of those accessed by other running * tasks. These guidelines are loosely enforced by not permitting * checked exceptions such as {@code IOExceptions} to be * thrown. However, computations may still encounter unchecked * exceptions, that are rethrown to callers attempting to join * them. These exceptions may additionally include {@link * RejectedExecutionException} stemming from internal resource * exhaustion, such as failure to allocate internal task * queues. Rethrown exceptions behave in the same way as regular * exceptions, but, when possible, contain stack traces (as displayed * for example using {@code ex.printStackTrace()}) of both the thread * that initiated the computation as well as the thread actually * encountering the exception; minimally only the latter. * * <p>It is possible to define and use ForkJoinTasks that may block, * but doing do requires three further considerations: (1) Completion * of few if any <em>other</em> tasks should be dependent on a task * that blocks on external synchronization or IO. Event-style async * tasks that are never joined (for example, those subclassing {@link * CountedCompleter}) often fall into this category. (2) To minimize * resource impact, tasks should be small; ideally performing only the * (possibly) blocking action. (3) Unless the {@link * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly * blocked tasks is known to be less than the pool's {@link * ForkJoinPool#getParallelism} level, the pool cannot guarantee that * enough threads will be available to ensure progress or good * performance. * * <p>The primary method for awaiting completion and extracting * results of a task is {@link #join}, but there are several variants: * The {@link Future#get} methods support interruptible and/or timed * waits for completion and report results using {@code Future} * conventions. Method {@link #invoke} is semantically * equivalent to {@code fork(); join()} but always attempts to begin * execution in the current thread. The "<em>quiet</em>" forms of * these methods do not extract results or report exceptions. These * may be useful when a set of tasks are being executed, and you need * to delay processing of results or exceptions until all complete. * Method {@code invokeAll} (available in multiple versions) * performs the most common form of parallel invocation: forking a set * of tasks and joining them all. * * <p>In the most typical usages, a fork-join pair act like a call * (fork) and return (join) from a parallel recursive function. As is * the case with other forms of recursive calls, returns (joins) * should be performed innermost-first. For example, {@code a.fork(); * b.fork(); b.join(); a.join();} is likely to be substantially more * efficient than joining {@code a} before {@code b}. * * <p>The execution status of tasks may be queried at several levels * of detail: {@link #isDone} is true if a task completed in any way * (including the case where a task was cancelled without executing); * {@link #isCompletedNormally} is true if a task completed without * cancellation or encountering an exception; {@link #isCancelled} is * true if the task was cancelled (in which case {@link #getException} * returns a {@link java.util.concurrent.CancellationException}); and * {@link #isCompletedAbnormally} is true if a task was either * cancelled or encountered an exception, in which case {@link * #getException} will return either the encountered exception or * {@link java.util.concurrent.CancellationException}. * * <p>The ForkJoinTask class is not usually directly subclassed. * Instead, you subclass one of the abstract classes that support a * particular style of fork/join processing, typically {@link * RecursiveAction} for most computations that do not return results, * {@link RecursiveTask} for those that do, and {@link * CountedCompleter} for those in which completed actions trigger * other actions. Normally, a concrete ForkJoinTask subclass declares * fields comprising its parameters, established in a constructor, and * then defines a {@code compute} method that somehow uses the control * methods supplied by this base class. While these methods have * {@code public} access (to allow instances of different task * subclasses to call each other's methods), some of them may only be * called from within other ForkJoinTasks (as may be determined using * method {@link #inForkJoinPool}). Attempts to invoke them in other * contexts result in exceptions or errors, possibly including {@code * ClassCastException}. * * <p>Method {@link #join} and its variants are appropriate for use * only when completion dependencies are acyclic; that is, the * parallel computation can be described as a directed acyclic graph * (DAG). Otherwise, executions may encounter a form of deadlock as * tasks cyclically wait for each other. However, this framework * supports other methods and techniques (for example the use of * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that * may be of use in constructing custom subclasses for problems that * are not statically structured as DAGs. To support such usages a * ForkJoinTask may be atomically <em>tagged</em> with a {@code short} * value using {@link #setForkJoinTaskTag} or {@link * #compareAndSetForkJoinTaskTag} and checked using {@link * #getForkJoinTaskTag}. The ForkJoinTask implementation does not use * these {@code protected} methods or tags for any purpose, but they * may be of use in the construction of specialized subclasses. For * example, parallel graph traversals can use the supplied methods to * avoid revisiting nodes/tasks that have already been processed. * (Method names for tagging are bulky in part to encourage definition * of methods that reflect their usage patterns.) * * <p>Most base support methods are {@code final}, to prevent * overriding of implementations that are intrinsically tied to the * underlying lightweight task scheduling framework. Developers * creating new basic styles of fork/join processing should minimally * implement {@code protected} methods {@link #exec}, {@link * #setRawResult}, and {@link #getRawResult}, while also introducing * an abstract computational method that can be implemented in its * subclasses, possibly relying on other {@code protected} methods * provided by this class. * * <p>ForkJoinTasks should perform relatively small amounts of * computation. Large tasks should be split into smaller subtasks, * usually via recursive decomposition. As a very rough rule of thumb, * a task should perform more than 100 and less than 10000 basic * computational steps, and should avoid indefinite looping. If tasks * are too big, then parallelism cannot improve throughput. If too * small, then memory and internal task maintenance overhead may * overwhelm processing. * * <p>This class provides {@code adapt} methods for {@link Runnable} * and {@link Callable}, that may be of use when mixing execution of * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are * of this form, consider using a pool constructed in <em>asyncMode</em>. * * <p>ForkJoinTasks are {@code Serializable}, which enables them to be * used in extensions such as remote execution frameworks. It is * sensible to serialize tasks only before or after, but not during, * execution. Serialization is not relied on during execution itself. * * @since 1.7 * @author Doug Lea */ public abstract class ForkJoinTask<V> implements Future<V>, Serializable { /* * See the internal documentation of class ForkJoinPool for a * general implementation overview. ForkJoinTasks are mainly * responsible for maintaining their "status" field amidst relays * to methods in ForkJoinWorkerThread and ForkJoinPool. * * The methods of this class are more-or-less layered into * (1) basic status maintenance * (2) execution and awaiting completion * (3) user-level methods that additionally report results. * This is sometimes hard to see because this file orders exported * methods in a way that flows well in javadocs. */ /* * The status field holds run control status bits packed into a * single int to minimize footprint and to ensure atomicity (via * CAS). Status is initially zero, and takes on nonnegative * values until completed, upon which status (anded with * DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks * undergoing blocking waits by other threads have the SIGNAL bit * set. Completion of a stolen task with SIGNAL set awakens any * waiters via notifyAll. Even though suboptimal for some * purposes, we use basic builtin wait/notify to take advantage of * "monitor inflation" in JVMs that we would otherwise need to * emulate to avoid adding further per-task bookkeeping overhead. * We want these monitors to be "fat", i.e., not use biasing or * thin-lock techniques, so use some odd coding idioms that tend * to avoid them, mainly by arranging that every synchronized * block performs a wait, notifyAll or both. * * These control bits occupy only (some of) the upper half (16 * bits) of status field. The lower bits are used for user-defined * tags. */ /** The run status of this task */ volatile int status; // accessed directly by pool and workers static final int DONE_MASK = 0xf0000000; // mask out non-completion bits static final int NORMAL = 0xf0000000; // must be negative static final int CANCELLED = 0xc0000000; // must be < NORMAL static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED static final int SIGNAL = 0x00010000; // must be >= 1 << 16 static final int SMASK = 0x0000ffff; // short bits for tags /** * Marks completion and wakes up threads waiting to join this * task. * * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL * @return completion status on exit */ private int setCompletion(int completion) { for (int s;;) { if ((s = status) < 0) return s; if (U.compareAndSwapInt(this, STATUS, s, s | completion)) { if ((s >>> 16) != 0) synchronized (this) { notifyAll(); } return completion; } } } /** * Primary execution method for stolen tasks. Unless done, calls * exec and records status if completed, but doesn't wait for * completion otherwise. * * @return status on exit from this method */ final int doExec() { int s; boolean completed; if ((s = status) >= 0) { try { completed = exec(); } catch (Throwable rex) { return setExceptionalCompletion(rex); } if (completed) s = setCompletion(NORMAL); } return s; } /** * Tries to set SIGNAL status unless already completed. Used by * ForkJoinPool. Other variants are directly incorporated into * externalAwaitDone etc. * * @return true if successful */ final boolean trySetSignal() { int s = status; return s >= 0 && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL); } /** * Blocks a non-worker-thread until completion. * @return status upon completion */ private int externalAwaitDone() { boolean interrupted = false; int s; while ((s = status) >= 0) { if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { synchronized (this) { if (status >= 0) { try { wait(); } catch (InterruptedException ie) { interrupted = true; } } else notifyAll(); } } } if (interrupted) Thread.currentThread().interrupt(); return s; } /** * Blocks a non-worker-thread until completion or interruption. */ private int externalInterruptibleAwaitDone() throws InterruptedException { int s; if (Thread.interrupted()) throw new InterruptedException(); while ((s = status) >= 0) { if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { synchronized (this) { if (status >= 0) wait(); else notifyAll(); } } } return s; } /** * Implementation for join, get, quietlyJoin. Directly handles * only cases of already-completed, external wait, and * unfork+exec. Others are relayed to ForkJoinPool.awaitJoin. * * @return status upon completion */ private int doJoin() { int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w; if ((s = status) >= 0) { if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) { if (!(w = (wt = (ForkJoinWorkerThread)t).workQueue). tryUnpush(this) || (s = doExec()) >= 0) s = wt.pool.awaitJoin(w, this); } else s = externalAwaitDone(); } return s; } /** * Implementation for invoke, quietlyInvoke. * * @return status upon completion */ private int doInvoke() { int s; Thread t; ForkJoinWorkerThread wt; if ((s = doExec()) >= 0) { if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) s = (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue, this); else s = externalAwaitDone(); } return s; } // Exception table support /** * Table of exceptions thrown by tasks, to enable reporting by * callers. Because exceptions are rare, we don't directly keep * them with task objects, but instead use a weak ref table. Note * that cancellation exceptions don't appear in the table, but are * instead recorded as status values. * * Note: These statics are initialized below in static block. */ private static final ExceptionNode[] exceptionTable; private static final ReentrantLock exceptionTableLock; private static final ReferenceQueue<Object> exceptionTableRefQueue; /** * Fixed capacity for exceptionTable. */ private static final int EXCEPTION_MAP_CAPACITY = 32; /** * Key-value nodes for exception table. The chained hash table * uses identity comparisons, full locking, and weak references * for keys. The table has a fixed capacity because it only * maintains task exceptions long enough for joiners to access * them, so should never become very large for sustained * periods. However, since we do not know when the last joiner * completes, we must use weak references and expunge them. We do * so on each operation (hence full locking). Also, some thread in * any ForkJoinPool will call helpExpungeStaleExceptions when its * pool becomes isQuiescent. */ static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> { final Throwable ex; ExceptionNode next; final long thrower; // use id not ref to avoid weak cycles ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) { super(task, exceptionTableRefQueue); this.ex = ex; this.next = next; this.thrower = Thread.currentThread().getId(); } } /** * Records exception and sets status. * * @return status on exit */ final int recordExceptionalCompletion(Throwable ex) { int s; if ((s = status) >= 0) { int h = System.identityHashCode(this); final ReentrantLock lock = exceptionTableLock; lock.lock(); try { expungeStaleExceptions(); ExceptionNode[] t = exceptionTable; int i = h & (t.length - 1); for (ExceptionNode e = t[i]; ; e = e.next) { if (e == null) { t[i] = new ExceptionNode(this, ex, t[i]); break; } if (e.get() == this) // already present break; } } finally { lock.unlock(); } s = setCompletion(EXCEPTIONAL); } return s; } /** * Records exception and possibly propagates * * @return status on exit */ private int setExceptionalCompletion(Throwable ex) { int s = recordExceptionalCompletion(ex); if ((s & DONE_MASK) == EXCEPTIONAL) internalPropagateException(ex); return s; } /** * Hook for exception propagation support for tasks with completers. */ void internalPropagateException(Throwable ex) { } /** * Cancels, ignoring any exceptions thrown by cancel. Used during * worker and pool shutdown. Cancel is spec'ed not to throw any * exceptions, but if it does anyway, we have no recourse during * shutdown, so guard against this case. */ static final void cancelIgnoringExceptions(ForkJoinTask<?> t) { if (t != null && t.status >= 0) { try { t.cancel(false); } catch (Throwable ignore) { } } } /** * Removes exception node and clears status */ private void clearExceptionalCompletion() { int h = System.identityHashCode(this); final ReentrantLock lock = exceptionTableLock; lock.lock(); try { ExceptionNode[] t = exceptionTable; int i = h & (t.length - 1); ExceptionNode e = t[i]; ExceptionNode pred = null; while (e != null) { ExceptionNode next = e.next; if (e.get() == this) { if (pred == null) t[i] = next; else pred.next = next; break; } pred = e; e = next; } expungeStaleExceptions(); status = 0; } finally { lock.unlock(); } } /** * Returns a rethrowable exception for the given task, if * available. To provide accurate stack traces, if the exception * was not thrown by the current thread, we try to create a new * exception of the same type as the one thrown, but with the * recorded exception as its cause. If there is no such * constructor, we instead try to use a no-arg constructor, * followed by initCause, to the same effect. If none of these * apply, or any fail due to other exceptions, we return the * recorded exception, which is still correct, although it may * contain a misleading stack trace. * * @return the exception, or null if none */ private Throwable getThrowableException() { if ((status & DONE_MASK) != EXCEPTIONAL) return null; int h = System.identityHashCode(this); ExceptionNode e; final ReentrantLock lock = exceptionTableLock; lock.lock(); try { expungeStaleExceptions(); ExceptionNode[] t = exceptionTable; e = t[h & (t.length - 1)]; while (e != null && e.get() != this) e = e.next; } finally { lock.unlock(); } Throwable ex; if (e == null || (ex = e.ex) == null) return null; if (false && e.thrower != Thread.currentThread().getId()) { Class<? extends Throwable> ec = ex.getClass(); try { Constructor<?> noArgCtor = null; Constructor<?>[] cs = ec.getConstructors();// public ctors only for (int i = 0; i < cs.length; ++i) { Constructor<?> c = cs[i]; Class<?>[] ps = c.getParameterTypes(); if (ps.length == 0) noArgCtor = c; else if (ps.length == 1 && ps[0] == Throwable.class) return (Throwable)(c.newInstance(ex)); } if (noArgCtor != null) { Throwable wx = (Throwable)(noArgCtor.newInstance()); wx.initCause(ex); return wx; } } catch (Exception ignore) { } } return ex; } /** * Poll stale refs and remove them. Call only while holding lock. */ private static void expungeStaleExceptions() { for (Object x; (x = exceptionTableRefQueue.poll()) != null;) { if (x instanceof ExceptionNode) { ForkJoinTask<?> key = ((ExceptionNode)x).get(); ExceptionNode[] t = exceptionTable; int i = System.identityHashCode(key) & (t.length - 1); ExceptionNode e = t[i]; ExceptionNode pred = null; while (e != null) { ExceptionNode next = e.next; if (e == x) { if (pred == null) t[i] = next; else pred.next = next; break; } pred = e; e = next; } } } } /** * If lock is available, poll stale refs and remove them. * Called from ForkJoinPool when pools become quiescent. */ static final void helpExpungeStaleExceptions() { final ReentrantLock lock = exceptionTableLock; if (lock.tryLock()) { try { expungeStaleExceptions(); } finally { lock.unlock(); } } } /** * Throws exception, if any, associated with the given status. */ private void reportException(int s) { Throwable ex = ((s == CANCELLED) ? new CancellationException() : (s == EXCEPTIONAL) ? getThrowableException() : null); if (ex != null) U.throwException(ex); } // public methods /** * Arranges to asynchronously execute this task. While it is not * necessarily enforced, it is a usage error to fork a task more * than once unless it has completed and been reinitialized. * Subsequent modifications to the state of this task or any data * it operates on are not necessarily consistently observable by * any thread other than the one executing it unless preceded by a * call to {@link #join} or related methods, or a call to {@link * #isDone} returning {@code true}. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @return {@code this}, to simplify usage */ public final ForkJoinTask<V> fork() { ((ForkJoinWorkerThread)Thread.currentThread()).workQueue.push(this); return this; } /** * Returns the result of the computation when it {@link #isDone is * done}. This method differs from {@link #get()} in that * abnormal completion results in {@code RuntimeException} or * {@code Error}, not {@code ExecutionException}, and that * interrupts of the calling thread do <em>not</em> cause the * method to abruptly return by throwing {@code * InterruptedException}. * * @return the computed result */ public final V join() { int s; if ((s = doJoin() & DONE_MASK) != NORMAL) reportException(s); return getRawResult(); } /** * Commences performing this task, awaits its completion if * necessary, and returns its result, or throws an (unchecked) * {@code RuntimeException} or {@code Error} if the underlying * computation did so. * * @return the computed result */ public final V invoke() { int s; if ((s = doInvoke() & DONE_MASK) != NORMAL) reportException(s); return getRawResult(); } /** * Forks the given tasks, returning when {@code isDone} holds for * each task or an (unchecked) exception is encountered, in which * case the exception is rethrown. If more than one task * encounters an exception, then this method throws any one of * these exceptions. If any task encounters an exception, the * other may be cancelled. However, the execution status of * individual tasks is not guaranteed upon exceptional return. The * status of each task may be obtained using {@link * #getException()} and related methods to check if they have been * cancelled, completed normally or exceptionally, or left * unprocessed. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @param t1 the first task * @param t2 the second task * @throws NullPointerException if any task is null */ public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) { int s1, s2; t2.fork(); if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL) t1.reportException(s1); if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL) t2.reportException(s2); } /** * Forks the given tasks, returning when {@code isDone} holds for * each task or an (unchecked) exception is encountered, in which * case the exception is rethrown. If more than one task * encounters an exception, then this method throws any one of * these exceptions. If any task encounters an exception, others * may be cancelled. However, the execution status of individual * tasks is not guaranteed upon exceptional return. The status of * each task may be obtained using {@link #getException()} and * related methods to check if they have been cancelled, completed * normally or exceptionally, or left unprocessed. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @param tasks the tasks * @throws NullPointerException if any task is null */ public static void invokeAll(ForkJoinTask<?>... tasks) { Throwable ex = null; int last = tasks.length - 1; for (int i = last; i >= 0; --i) { ForkJoinTask<?> t = tasks[i]; if (t == null) { if (ex == null) ex = new NullPointerException(); } else if (i != 0) t.fork(); else if (t.doInvoke() < NORMAL && ex == null) ex = t.getException(); } for (int i = 1; i <= last; ++i) { ForkJoinTask<?> t = tasks[i]; if (t != null) { if (ex != null) t.cancel(false); else if (t.doJoin() < NORMAL) ex = t.getException(); } } if (ex != null) U.throwException(ex); } /** * Forks all tasks in the specified collection, returning when * {@code isDone} holds for each task or an (unchecked) exception * is encountered, in which case the exception is rethrown. If * more than one task encounters an exception, then this method * throws any one of these exceptions. If any task encounters an * exception, others may be cancelled. However, the execution * status of individual tasks is not guaranteed upon exceptional * return. The status of each task may be obtained using {@link * #getException()} and related methods to check if they have been * cancelled, completed normally or exceptionally, or left * unprocessed. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @param tasks the collection of tasks * @return the tasks argument, to simplify usage * @throws NullPointerException if tasks or any element are null */ public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) { if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) { invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()])); return tasks; } @SuppressWarnings("unchecked") List<? extends ForkJoinTask<?>> ts = (List<? extends ForkJoinTask<?>>) tasks; Throwable ex = null; int last = ts.size() - 1; for (int i = last; i >= 0; --i) { ForkJoinTask<?> t = ts.get(i); if (t == null) { if (ex == null) ex = new NullPointerException(); } else if (i != 0) t.fork(); else if (t.doInvoke() < NORMAL && ex == null) ex = t.getException(); } for (int i = 1; i <= last; ++i) { ForkJoinTask<?> t = ts.get(i); if (t != null) { if (ex != null) t.cancel(false); else if (t.doJoin() < NORMAL) ex = t.getException(); } } if (ex != null) U.throwException(ex); return tasks; } /** * Attempts to cancel execution of this task. This attempt will * fail if the task has already completed or could not be * cancelled for some other reason. If successful, and this task * has not started when {@code cancel} is called, execution of * this task is suppressed. After this method returns * successfully, unless there is an intervening call to {@link * #reinitialize}, subsequent calls to {@link #isCancelled}, * {@link #isDone}, and {@code cancel} will return {@code true} * and calls to {@link #join} and related methods will result in * {@code CancellationException}. * * <p>This method may be overridden in subclasses, but if so, must * still ensure that these properties hold. In particular, the * {@code cancel} method itself must not throw exceptions. * * <p>This method is designed to be invoked by <em>other</em> * tasks. To terminate the current task, you can just return or * throw an unchecked exception from its computation method, or * invoke {@link #completeExceptionally}. * * @param mayInterruptIfRunning this value has no effect in the * default implementation because interrupts are not used to * control cancellation. * * @return {@code true} if this task is now cancelled */ public boolean cancel(boolean mayInterruptIfRunning) { return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED; } public final boolean isDone() { return status < 0; } public final boolean isCancelled() { return (status & DONE_MASK) == CANCELLED; } /** * Returns {@code true} if this task threw an exception or was cancelled. * * @return {@code true} if this task threw an exception or was cancelled */ public final boolean isCompletedAbnormally() { return status < NORMAL; } /** * Returns {@code true} if this task completed without throwing an * exception and was not cancelled. * * @return {@code true} if this task completed without throwing an * exception and was not cancelled */ public final boolean isCompletedNormally() { return (status & DONE_MASK) == NORMAL; } /** * Returns the exception thrown by the base computation, or a * {@code CancellationException} if cancelled, or {@code null} if * none or if the method has not yet completed. * * @return the exception, or {@code null} if none */ public final Throwable getException() { int s = status & DONE_MASK; return ((s >= NORMAL) ? null : (s == CANCELLED) ? new CancellationException() : getThrowableException()); } /** * Completes this task abnormally, and if not already aborted or * cancelled, causes it to throw the given exception upon * {@code join} and related operations. This method may be used * to induce exceptions in asynchronous tasks, or to force * completion of tasks that would not otherwise complete. Its use * in other situations is discouraged. This method is * overridable, but overridden versions must invoke {@code super} * implementation to maintain guarantees. * * @param ex the exception to throw. If this exception is not a * {@code RuntimeException} or {@code Error}, the actual exception * thrown will be a {@code RuntimeException} with cause {@code ex}. */ public void completeExceptionally(Throwable ex) { setExceptionalCompletion((ex instanceof RuntimeException) || (ex instanceof Error) ? ex : new RuntimeException(ex)); } /** * Completes this task, and if not already aborted or cancelled, * returning the given value as the result of subsequent * invocations of {@code join} and related operations. This method * may be used to provide results for asynchronous tasks, or to * provide alternative handling for tasks that would not otherwise * complete normally. Its use in other situations is * discouraged. This method is overridable, but overridden * versions must invoke {@code super} implementation to maintain * guarantees. * * @param value the result value for this task */ public void complete(V value) { try { setRawResult(value); } catch (Throwable rex) { setExceptionalCompletion(rex); return; } setCompletion(NORMAL); } /** * Completes this task normally without setting a value. The most * recent value established by {@link #setRawResult} (or {@code * null} by default) will be returned as the result of subsequent * invocations of {@code join} and related operations. * * @since 1.8 */ public final void quietlyComplete() { setCompletion(NORMAL); } /** * Waits if necessary for the computation to complete, and then * retrieves its result. * * @return the computed result * @throws CancellationException if the computation was cancelled * @throws ExecutionException if the computation threw an * exception * @throws InterruptedException if the current thread is not a * member of a ForkJoinPool and was interrupted while waiting */ public final V get() throws InterruptedException, ExecutionException { int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ? doJoin() : externalInterruptibleAwaitDone(); Throwable ex; if ((s &= DONE_MASK) == CANCELLED) throw new CancellationException(); if (s == EXCEPTIONAL && (ex = getThrowableException()) != null) throw new ExecutionException(ex); return getRawResult(); } /** * Waits if necessary for at most the given time for the computation * to complete, and then retrieves its result, if available. * * @param timeout the maximum time to wait * @param unit the time unit of the timeout argument * @return the computed result * @throws CancellationException if the computation was cancelled * @throws ExecutionException if the computation threw an * exception * @throws InterruptedException if the current thread is not a * member of a ForkJoinPool and was interrupted while waiting * @throws TimeoutException if the wait timed out */ public final V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return get(timeout, unit, false); } /** * Waits if necessary for at most the given time for the computation * to complete, and then retrieves its result, if available. * * This version of the method allows forcing blocking wait even * for F/J threads. * * @see #get * @param timeout the maximum time to wait * @param unit the time unit of the timeout argument * @param canBlock if enabled allows even a F/J thread to wait for * the task to complete * (just like it would for a regular thread) * @return the computed result * @throws CancellationException if the computation was cancelled * @throws ExecutionException if the computation threw an * exception * @throws InterruptedException if the current thread is not a * member of a ForkJoinPool and was interrupted while waiting * @throws TimeoutException if the wait timed out */ public final V get(long timeout, TimeUnit unit, boolean canBlock) throws InterruptedException, ExecutionException, TimeoutException { if (Thread.interrupted()) throw new InterruptedException(); // Messy in part because we measure in nanosecs, but wait in millisecs int s; long ns, ms; if ((s = status) >= 0 && (ns = unit.toNanos(timeout)) > 0L) { long deadline = System.nanoTime() + ns; ForkJoinPool p = null; ForkJoinPool.WorkQueue w = null; Thread t = Thread.currentThread(); if (!canBlock && t instanceof ForkJoinWorkerThread) { ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t; p = wt.pool; w = wt.workQueue; s = p.helpJoinOnce(w, this); // no retries on failure } boolean interrupted = false; try { while ((s = status) >= 0) { if (w != null && w.runState < 0) cancelIgnoringExceptions(this); else if (!canBlock) { if (p == null || p.tryCompensate(this, null)) canBlock = true; } else { if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { synchronized (this) { if (status >= 0) { try { wait(ms); } catch (InterruptedException ie) { if (p == null) interrupted = true; } } else notifyAll(); } } if ((s = status) < 0 || interrupted || (ns = deadline - System.nanoTime()) <= 0L) break; } } } finally { if (p != null && canBlock) p.incrementActiveCount(); } if (interrupted) throw new InterruptedException(); } if ((s &= DONE_MASK) != NORMAL) { Throwable ex; if (s == CANCELLED) throw new CancellationException(); if (s != EXCEPTIONAL) throw new TimeoutException(); if ((ex = getThrowableException()) != null) throw new ExecutionException(ex); } return getRawResult(); } /** * Joins this task, without returning its result or throwing its * exception. This method may be useful when processing * collections of tasks when some have been cancelled or otherwise * known to have aborted. */ public final void quietlyJoin() { doJoin(); } /** * Commences performing this task and awaits its completion if * necessary, without returning its result or throwing its * exception. */ public final void quietlyInvoke() { doInvoke(); } /** * Possibly executes tasks until the pool hosting the current task * {@link ForkJoinPool#isQuiescent is quiescent}. This method may * be of use in designs in which many tasks are forked, but none * are explicitly joined, instead executing them until all are * processed. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. */ public static void helpQuiesce() { ForkJoinWorkerThread wt = (ForkJoinWorkerThread)Thread.currentThread(); wt.pool.helpQuiescePool(wt.workQueue); } /** * Resets the internal bookkeeping state of this task, allowing a * subsequent {@code fork}. This method allows repeated reuse of * this task, but only if reuse occurs when this task has either * never been forked, or has been forked, then completed and all * outstanding joins of this task have also completed. Effects * under any other usage conditions are not guaranteed. * This method may be useful when executing * pre-constructed trees of subtasks in loops. * * <p>Upon completion of this method, {@code isDone()} reports * {@code false}, and {@code getException()} reports {@code * null}. However, the value returned by {@code getRawResult} is * unaffected. To clear this value, you can invoke {@code * setRawResult(null)}. */ public void reinitialize() { if ((status & DONE_MASK) == EXCEPTIONAL) clearExceptionalCompletion(); else status = 0; } /** * Returns the pool hosting the current task execution, or null * if this task is executing outside of any ForkJoinPool. * * @see #inForkJoinPool * @return the pool, or {@code null} if none */ public static ForkJoinPool getPool() { Thread t = Thread.currentThread(); return (t instanceof ForkJoinWorkerThread) ? ((ForkJoinWorkerThread) t).pool : null; } /** * Returns {@code true} if the current thread is a {@link * ForkJoinWorkerThread} executing as a ForkJoinPool computation. * * @return {@code true} if the current thread is a {@link * ForkJoinWorkerThread} executing as a ForkJoinPool computation, * or {@code false} otherwise */ public static boolean inForkJoinPool() { return Thread.currentThread() instanceof ForkJoinWorkerThread; } /** * Tries to unschedule this task for execution. This method will * typically succeed if this task is the most recently forked task * by the current thread, and has not commenced executing in * another thread. This method may be useful when arranging * alternative local processing of tasks that could have been, but * were not, stolen. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @return {@code true} if unforked */ public boolean tryUnfork() { return ((ForkJoinWorkerThread)Thread.currentThread()) .workQueue.tryUnpush(this); } /** * Returns an estimate of the number of tasks that have been * forked by the current worker thread but not yet executed. This * value may be useful for heuristic decisions about whether to * fork other tasks. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @return the number of tasks */ public static int getQueuedTaskCount() { return ((ForkJoinWorkerThread) Thread.currentThread()) .workQueue.queueSize(); } /** * Returns an estimate of how many more locally queued tasks are * held by the current worker thread than there are other worker * threads that might steal them. This value may be useful for * heuristic decisions about whether to fork other tasks. In many * usages of ForkJoinTasks, at steady state, each worker should * aim to maintain a small constant surplus (for example, 3) of * tasks, and to process computations locally if this threshold is * exceeded. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @return the surplus number of tasks, which may be negative */ public static int getSurplusQueuedTaskCount() { /* * The aim of this method is to return a cheap heuristic guide * for task partitioning when programmers, frameworks, tools, * or languages have little or no idea about task granularity. * In essence by offering this method, we ask users only about * tradeoffs in overhead vs expected throughput and its * variance, rather than how finely to partition tasks. * * In a steady state strict (tree-structured) computation, * each thread makes available for stealing enough tasks for * other threads to remain active. Inductively, if all threads * play by the same rules, each thread should make available * only a constant number of tasks. * * The minimum useful constant is just 1. But using a value of * 1 would require immediate replenishment upon each steal to * maintain enough tasks, which is infeasible. Further, * partitionings/granularities of offered tasks should * minimize steal rates, which in general means that threads * nearer the top of computation tree should generate more * than those nearer the bottom. In perfect steady state, each * thread is at approximately the same level of computation * tree. However, producing extra tasks amortizes the * uncertainty of progress and diffusion assumptions. * * So, users will want to use values larger, but not much * larger than 1 to both smooth over transient shortages and * hedge against uneven progress; as traded off against the * cost of extra task overhead. We leave the user to pick a * threshold value to compare with the results of this call to * guide decisions, but recommend values such as 3. * * When all threads are active, it is on average OK to * estimate surplus strictly locally. In steady-state, if one * thread is maintaining say 2 surplus tasks, then so are * others. So we can just use estimated queue length. * However, this strategy alone leads to serious mis-estimates * in some non-steady-state conditions (ramp-up, ramp-down, * other stalls). We can detect many of these by further * considering the number of "idle" threads, that are known to * have zero queued tasks, so compensate by a factor of * (#idle/#active) threads. */ ForkJoinWorkerThread wt = (ForkJoinWorkerThread)Thread.currentThread(); return wt.workQueue.queueSize() - wt.pool.idlePerActive(); } // Extension methods /** * Returns the result that would be returned by {@link #join}, even * if this task completed abnormally, or {@code null} if this task * is not known to have been completed. This method is designed * to aid debugging, as well as to support extensions. Its use in * any other context is discouraged. * * @return the result, or {@code null} if not completed */ public abstract V getRawResult(); /** * Forces the given value to be returned as a result. This method * is designed to support extensions, and should not in general be * called otherwise. * * @param value the value */ protected abstract void setRawResult(V value); /** * Immediately performs the base action of this task and returns * true if, upon return from this method, this task is guaranteed * to have completed normally. This method may return false * otherwise, to indicate that this task is not necessarily * complete (or is not known to be complete), for example in * asynchronous actions that require explicit invocations of * completion methods. This method may also throw an (unchecked) * exception to indicate abnormal exit. This method is designed to * support extensions, and should not in general be called * otherwise. * * @return {@code true} if this task is known to have completed normally */ protected abstract boolean exec(); /** * Returns, but does not unschedule or execute, a task queued by * the current thread but not yet executed, if one is immediately * available. There is no guarantee that this task will actually * be polled or executed next. Conversely, this method may return * null even if a task exists but cannot be accessed without * contention with other threads. This method is designed * primarily to support extensions, and is unlikely to be useful * otherwise. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @return the next task, or {@code null} if none are available */ protected static ForkJoinTask<?> peekNextLocalTask() { return ((ForkJoinWorkerThread) Thread.currentThread()).workQueue.peek(); } /** * Unschedules and returns, without executing, the next task * queued by the current thread but not yet executed. This method * is designed primarily to support extensions, and is unlikely to * be useful otherwise. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @return the next task, or {@code null} if none are available */ protected static ForkJoinTask<?> pollNextLocalTask() { return ((ForkJoinWorkerThread) Thread.currentThread()) .workQueue.nextLocalTask(); } /** * Unschedules and returns, without executing, the next task * queued by the current thread but not yet executed, if one is * available, or if not available, a task that was forked by some * other thread, if available. Availability may be transient, so a * {@code null} result does not necessarily imply quiescence * of the pool this task is operating in. This method is designed * primarily to support extensions, and is unlikely to be useful * otherwise. * * <p>This method may be invoked only from within {@code * ForkJoinPool} computations (as may be determined using method * {@link #inForkJoinPool}). Attempts to invoke in other contexts * result in exceptions or errors, possibly including {@code * ClassCastException}. * * @return a task, or {@code null} if none are available */ protected static ForkJoinTask<?> pollTask() { ForkJoinWorkerThread wt = (ForkJoinWorkerThread)Thread.currentThread(); return wt.pool.nextTaskFor(wt.workQueue); } // tag operations /** * Returns the tag for this task. * * @return the tag for this task * @since 1.8 */ public final short getForkJoinTaskTag() { return (short)status; } /** * Atomically sets the tag value for this task. * * @param tag the tag value * @return the previous value of the tag * @since 1.8 */ public final short setForkJoinTaskTag(short tag) { for (int s;;) { if (U.compareAndSwapInt(this, STATUS, s = status, (s & ~SMASK) | (tag & SMASK))) return (short)s; } } /** * Atomically conditionally sets the tag value for this task. * Among other applications, tags can be used as visit markers * in tasks operating on graphs, as in methods that check: {@code * if (task.compareAndSetForkJoinTaskTag((short)0, (short)1))} * before processing, otherwise exiting because the node has * already been visited. * * @param e the expected tag value * @param tag the new tag value * @return true if successful; i.e., the current value was * equal to e and is now tag. * @since 1.8 */ public final boolean compareAndSetForkJoinTaskTag(short e, short tag) { for (int s;;) { if ((short)(s = status) != e) return false; if (U.compareAndSwapInt(this, STATUS, s, (s & ~SMASK) | (tag & SMASK))) return true; } } /** * Adaptor for Runnables. This implements RunnableFuture * to be compliant with AbstractExecutorService constraints * when used in ForkJoinPool. */ static final class AdaptedRunnable<T> extends ForkJoinTask<T> implements RunnableFuture<T> { final Runnable runnable; T result; AdaptedRunnable(Runnable runnable, T result) { if (runnable == null) throw new NullPointerException(); this.runnable = runnable; this.result = result; // OK to set this even before completion } public final T getRawResult() { return result; } public final void setRawResult(T v) { result = v; } public final boolean exec() { runnable.run(); return true; } public final void run() { invoke(); } private static final long serialVersionUID = 5232453952276885070L; } /** * Adaptor for Runnables without results */ static final class AdaptedRunnableAction extends ForkJoinTask<Void> implements RunnableFuture<Void> { final Runnable runnable; AdaptedRunnableAction(Runnable runnable) { if (runnable == null) throw new NullPointerException(); this.runnable = runnable; } public final Void getRawResult() { return null; } public final void setRawResult(Void v) { } public final boolean exec() { runnable.run(); return true; } public final void run() { invoke(); } private static final long serialVersionUID = 5232453952276885070L; } /** * Adaptor for Callables */ static final class AdaptedCallable<T> extends ForkJoinTask<T> implements RunnableFuture<T> { final Callable<? extends T> callable; T result; AdaptedCallable(Callable<? extends T> callable) { if (callable == null) throw new NullPointerException(); this.callable = callable; } public final T getRawResult() { return result; } public final void setRawResult(T v) { result = v; } public final boolean exec() { try { result = callable.call(); return true; } catch (Error err) { throw err; } catch (RuntimeException rex) { throw rex; } catch (Exception ex) { throw new RuntimeException(ex); } } public final void run() { invoke(); } private static final long serialVersionUID = 2838392045355241008L; } /** * Returns a new {@code ForkJoinTask} that performs the {@code run} * method of the given {@code Runnable} as its action, and returns * a null result upon {@link #join}. * * @param runnable the runnable action * @return the task */ public static ForkJoinTask<?> adapt(Runnable runnable) { return new AdaptedRunnableAction(runnable); } /** * Returns a new {@code ForkJoinTask} that performs the {@code run} * method of the given {@code Runnable} as its action, and returns * the given result upon {@link #join}. * * @param runnable the runnable action * @param result the result upon completion * @return the task */ public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) { return new AdaptedRunnable<T>(runnable, result); } /** * Returns a new {@code ForkJoinTask} that performs the {@code call} * method of the given {@code Callable} as its action, and returns * its result upon {@link #join}, translating any checked exceptions * encountered into {@code RuntimeException}. * * @param callable the callable action * @return the task */ public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) { return new AdaptedCallable<T>(callable); } // Serialization support private static final long serialVersionUID = -7721805057305804111L; /** * Saves this task to a stream (that is, serializes it). * * @serialData the current run status and the exception thrown * during execution, or {@code null} if none */ private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { s.defaultWriteObject(); s.writeObject(getException()); } /** * Reconstitutes this task from a stream (that is, deserializes it). */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { s.defaultReadObject(); Object ex = s.readObject(); if (ex != null) setExceptionalCompletion((Throwable)ex); } // Unsafe mechanics private static final sun.misc.Unsafe U; private static final long STATUS; static { exceptionTableLock = new ReentrantLock(); exceptionTableRefQueue = new ReferenceQueue<Object>(); exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY]; try { U = getUnsafe(); STATUS = U.objectFieldOffset (ForkJoinTask.class.getDeclaredField("status")); } catch (Exception e) { throw new Error(e); } } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ private static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException se) { try { return java.security.AccessController.doPrivileged (new java.security .PrivilegedExceptionAction<sun.misc.Unsafe>() { public sun.misc.Unsafe run() throws Exception { java.lang.reflect.Field f = sun.misc .Unsafe.class.getDeclaredField("theUnsafe"); f.setAccessible(true); return (sun.misc.Unsafe) f.get(null); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/jsr166y/LinkedTransferQueue.java
/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ package jsr166y; import water.H2ORuntime; import java.util.AbstractQueue; import java.util.Collection; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Queue; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.LockSupport; /** * An unbounded {@link TransferQueue} based on linked nodes. * This queue orders elements FIFO (first-in-first-out) with respect * to any given producer. The <em>head</em> of the queue is that * element that has been on the queue the longest time for some * producer. The <em>tail</em> of the queue is that element that has * been on the queue the shortest time for some producer. * * <p>Beware that, unlike in most collections, the {@code size} method * is <em>NOT</em> a constant-time operation. Because of the * asynchronous nature of these queues, determining the current number * of elements requires a traversal of the elements, and so may report * inaccurate results if this collection is modified during traversal. * Additionally, the bulk operations {@code addAll}, * {@code removeAll}, {@code retainAll}, {@code containsAll}, * {@code equals}, and {@code toArray} are <em>not</em> guaranteed * to be performed atomically. For example, an iterator operating * concurrently with an {@code addAll} operation might view only some * of the added elements. * * <p>This class and its iterator implement all of the * <em>optional</em> methods of the {@link Collection} and {@link * Iterator} interfaces. * * <p>Memory consistency effects: As with other concurrent * collections, actions in a thread prior to placing an object into a * {@code LinkedTransferQueue} * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a> * actions subsequent to the access or removal of that element from * the {@code LinkedTransferQueue} in another thread. * * <p>This class is a member of the * <a href="{@docRoot}/../technotes/guides/collections/index.html"> * Java Collections Framework</a>. * * @since 1.7 * @author Doug Lea * @param <E> the type of elements held in this collection */ public class LinkedTransferQueue<E> extends AbstractQueue<E> implements TransferQueue<E>, java.io.Serializable { private static final long serialVersionUID = -3223113410248163686L; /* * *** Overview of Dual Queues with Slack *** * * Dual Queues, introduced by Scherer and Scott * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are * (linked) queues in which nodes may represent either data or * requests. When a thread tries to enqueue a data node, but * encounters a request node, it instead "matches" and removes it; * and vice versa for enqueuing requests. Blocking Dual Queues * arrange that threads enqueuing unmatched requests block until * other threads provide the match. Dual Synchronous Queues (see * Scherer, Lea, & Scott * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) * additionally arrange that threads enqueuing unmatched data also * block. Dual Transfer Queues support all of these modes, as * dictated by callers. * * A FIFO dual queue may be implemented using a variation of the * Michael & Scott (M&S) lock-free queue algorithm * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). * It maintains two pointer fields, "head", pointing to a * (matched) node that in turn points to the first actual * (unmatched) queue node (or null if empty); and "tail" that * points to the last node on the queue (or again null if * empty). For example, here is a possible queue with four data * elements: * * head tail * | | * v v * M -> U -> U -> U -> U * * The M&S queue algorithm is known to be prone to scalability and * overhead limitations when maintaining (via CAS) these head and * tail pointers. This has led to the development of * contention-reducing variants such as elimination arrays (see * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and * optimistic back pointers (see Ladan-Mozes & Shavit * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). * However, the nature of dual queues enables a simpler tactic for * improving M&S-style implementations when dual-ness is needed. * * In a dual queue, each node must atomically maintain its match * status. While there are other possible variants, we implement * this here as: for a data-mode node, matching entails CASing an * "item" field from a non-null data value to null upon match, and * vice-versa for request nodes, CASing from null to a data * value. (Note that the linearization properties of this style of * queue are easy to verify -- elements are made available by * linking, and unavailable by matching.) Compared to plain M&S * queues, this property of dual queues requires one additional * successful atomic operation per enq/deq pair. But it also * enables lower cost variants of queue maintenance mechanics. (A * variation of this idea applies even for non-dual queues that * support deletion of interior elements, such as * j.u.c.ConcurrentLinkedQueue.) * * Once a node is matched, its match status can never again * change. We may thus arrange that the linked list of them * contain a prefix of zero or more matched nodes, followed by a * suffix of zero or more unmatched nodes. (Note that we allow * both the prefix and suffix to be zero length, which in turn * means that we do not use a dummy header.) If we were not * concerned with either time or space efficiency, we could * correctly perform enqueue and dequeue operations by traversing * from a pointer to the initial node; CASing the item of the * first unmatched node on match and CASing the next field of the * trailing node on appends. (Plus some special-casing when * initially empty). While this would be a terrible idea in * itself, it does have the benefit of not requiring ANY atomic * updates on head/tail fields. * * We introduce here an approach that lies between the extremes of * never versus always updating queue (head and tail) pointers. * This offers a tradeoff between sometimes requiring extra * traversal steps to locate the first and/or last unmatched * nodes, versus the reduced overhead and contention of fewer * updates to queue pointers. For example, a possible snapshot of * a queue is: * * head tail * | | * v v * M -> M -> U -> U -> U -> U * * The best value for this "slack" (the targeted maximum distance * between the value of "head" and the first unmatched node, and * similarly for "tail") is an empirical matter. We have found * that using very small constants in the range of 1-3 work best * over a range of platforms. Larger values introduce increasing * costs of cache misses and risks of long traversal chains, while * smaller values increase CAS contention and overhead. * * Dual queues with slack differ from plain M&S dual queues by * virtue of only sometimes updating head or tail pointers when * matching, appending, or even traversing nodes; in order to * maintain a targeted slack. The idea of "sometimes" may be * operationalized in several ways. The simplest is to use a * per-operation counter incremented on each traversal step, and * to try (via CAS) to update the associated queue pointer * whenever the count exceeds a threshold. Another, that requires * more overhead, is to use random number generators to update * with a given probability per traversal step. * * In any strategy along these lines, because CASes updating * fields may fail, the actual slack may exceed targeted * slack. However, they may be retried at any time to maintain * targets. Even when using very small slack values, this * approach works well for dual queues because it allows all * operations up to the point of matching or appending an item * (hence potentially allowing progress by another thread) to be * read-only, thus not introducing any further contention. As * described below, we implement this by performing slack * maintenance retries only after these points. * * As an accompaniment to such techniques, traversal overhead can * be further reduced without increasing contention of head * pointer updates: Threads may sometimes shortcut the "next" link * path from the current "head" node to be closer to the currently * known first unmatched node, and similarly for tail. Again, this * may be triggered with using thresholds or randomization. * * These ideas must be further extended to avoid unbounded amounts * of costly-to-reclaim garbage caused by the sequential "next" * links of nodes starting at old forgotten head nodes: As first * described in detail by Boehm * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC * delays noticing that any arbitrarily old node has become * garbage, all newer dead nodes will also be unreclaimed. * (Similar issues arise in non-GC environments.) To cope with * this in our implementation, upon CASing to advance the head * pointer, we set the "next" link of the previous head to point * only to itself; thus limiting the length of connected dead lists. * (We also take similar care to wipe out possibly garbage * retaining values held in other Node fields.) However, doing so * adds some further complexity to traversal: If any "next" * pointer links to itself, it indicates that the current thread * has lagged behind a head-update, and so the traversal must * continue from the "head". Traversals trying to find the * current tail starting from "tail" may also encounter * self-links, in which case they also continue at "head". * * It is tempting in slack-based scheme to not even use CAS for * updates (similarly to Ladan-Mozes & Shavit). However, this * cannot be done for head updates under the above link-forgetting * mechanics because an update may leave head at a detached node. * And while direct writes are possible for tail updates, they * increase the risk of long retraversals, and hence long garbage * chains, which can be much more costly than is worthwhile * considering that the cost difference of performing a CAS vs * write is smaller when they are not triggered on each operation * (especially considering that writes and CASes equally require * additional GC bookkeeping ("write barriers") that are sometimes * more costly than the writes themselves because of contention). * * *** Overview of implementation *** * * We use a threshold-based approach to updates, with a slack * threshold of two -- that is, we update head/tail when the * current pointer appears to be two or more steps away from the * first/last node. The slack value is hard-wired: a path greater * than one is naturally implemented by checking equality of * traversal pointers except when the list has only one element, * in which case we keep slack threshold at one. Avoiding tracking * explicit counts across method calls slightly simplifies an * already-messy implementation. Using randomization would * probably work better if there were a low-quality dirt-cheap * per-thread one available, but even ThreadLocalRandom is too * heavy for these purposes. * * With such a small slack threshold value, it is not worthwhile * to augment this with path short-circuiting (i.e., unsplicing * interior nodes) except in the case of cancellation/removal (see * below). * * We allow both the head and tail fields to be null before any * nodes are enqueued; initializing upon first append. This * simplifies some other logic, as well as providing more * efficient explicit control paths instead of letting JVMs insert * implicit NullPointerExceptions when they are null. While not * currently fully implemented, we also leave open the possibility * of re-nulling these fields when empty (which is complicated to * arrange, for little benefit.) * * All enqueue/dequeue operations are handled by the single method * "xfer" with parameters indicating whether to act as some form * of offer, put, poll, take, or transfer (each possibly with * timeout). The relative complexity of using one monolithic * method outweighs the code bulk and maintenance problems of * using separate methods for each case. * * Operation consists of up to three phases. The first is * implemented within method xfer, the second in tryAppend, and * the third in method awaitMatch. * * 1. Try to match an existing node * * Starting at head, skip already-matched nodes until finding * an unmatched node of opposite mode, if one exists, in which * case matching it and returning, also if necessary updating * head to one past the matched node (or the node itself if the * list has no other unmatched nodes). If the CAS misses, then * a loop retries advancing head by two steps until either * success or the slack is at most two. By requiring that each * attempt advances head by two (if applicable), we ensure that * the slack does not grow without bound. Traversals also check * if the initial head is now off-list, in which case they * start at the new head. * * If no candidates are found and the call was untimed * poll/offer, (argument "how" is NOW) return. * * 2. Try to append a new node (method tryAppend) * * Starting at current tail pointer, find the actual last node * and try to append a new node (or if head was null, establish * the first node). Nodes can be appended only if their * predecessors are either already matched or are of the same * mode. If we detect otherwise, then a new node with opposite * mode must have been appended during traversal, so we must * restart at phase 1. The traversal and update steps are * otherwise similar to phase 1: Retrying upon CAS misses and * checking for staleness. In particular, if a self-link is * encountered, then we can safely jump to a node on the list * by continuing the traversal at current head. * * On successful append, if the call was ASYNC, return. * * 3. Await match or cancellation (method awaitMatch) * * Wait for another thread to match node; instead cancelling if * the current thread was interrupted or the wait timed out. On * multiprocessors, we use front-of-queue spinning: If a node * appears to be the first unmatched node in the queue, it * spins a bit before blocking. In either case, before blocking * it tries to unsplice any nodes between the current "head" * and the first unmatched node. * * Front-of-queue spinning vastly improves performance of * heavily contended queues. And so long as it is relatively * brief and "quiet", spinning does not much impact performance * of less-contended queues. During spins threads check their * interrupt status and generate a thread-local random number * to decide to occasionally perform a Thread.yield. While * yield has underdefined specs, we assume that it might help, * and will not hurt, in limiting impact of spinning on busy * systems. We also use smaller (1/2) spins for nodes that are * not known to be front but whose predecessors have not * blocked -- these "chained" spins avoid artifacts of * front-of-queue rules which otherwise lead to alternating * nodes spinning vs blocking. Further, front threads that * represent phase changes (from data to request node or vice * versa) compared to their predecessors receive additional * chained spins, reflecting longer paths typically required to * unblock threads during phase changes. * * * ** Unlinking removed interior nodes ** * * In addition to minimizing garbage retention via self-linking * described above, we also unlink removed interior nodes. These * may arise due to timed out or interrupted waits, or calls to * remove(x) or Iterator.remove. Normally, given a node that was * at one time known to be the predecessor of some node s that is * to be removed, we can unsplice s by CASing the next field of * its predecessor if it still points to s (otherwise s must * already have been removed or is now offlist). But there are two * situations in which we cannot guarantee to make node s * unreachable in this way: (1) If s is the trailing node of list * (i.e., with null next), then it is pinned as the target node * for appends, so can only be removed later after other nodes are * appended. (2) We cannot necessarily unlink s given a * predecessor node that is matched (including the case of being * cancelled): the predecessor may already be unspliced, in which * case some previous reachable node may still point to s. * (For further explanation see Herlihy & Shavit "The Art of * Multiprocessor Programming" chapter 9). Although, in both * cases, we can rule out the need for further action if either s * or its predecessor are (or can be made to be) at, or fall off * from, the head of list. * * Without taking these into account, it would be possible for an * unbounded number of supposedly removed nodes to remain * reachable. Situations leading to such buildup are uncommon but * can occur in practice; for example when a series of short timed * calls to poll repeatedly time out but never otherwise fall off * the list because of an untimed call to take at the front of the * queue. * * When these cases arise, rather than always retraversing the * entire list to find an actual predecessor to unlink (which * won't help for case (1) anyway), we record a conservative * estimate of possible unsplice failures (in "sweepVotes"). * We trigger a full sweep when the estimate exceeds a threshold * ("SWEEP_THRESHOLD") indicating the maximum number of estimated * removal failures to tolerate before sweeping through, unlinking * cancelled nodes that were not unlinked upon initial removal. * We perform sweeps by the thread hitting threshold (rather than * background threads or by spreading work to other threads) * because in the main contexts in which removal occurs, the * caller is already timed-out, cancelled, or performing a * potentially O(n) operation (e.g. remove(x)), none of which are * time-critical enough to warrant the overhead that alternatives * would impose on other threads. * * Because the sweepVotes estimate is conservative, and because * nodes become unlinked "naturally" as they fall off the head of * the queue, and because we allow votes to accumulate even while * sweeps are in progress, there are typically significantly fewer * such nodes than estimated. Choice of a threshold value * balances the likelihood of wasted effort and contention, versus * providing a worst-case bound on retention of interior nodes in * quiescent queues. The value defined below was chosen * empirically to balance these under various timeout scenarios. * * Note that we cannot self-link unlinked interior nodes during * sweeps. However, the associated garbage chains terminate when * some successor ultimately falls off the head of the list and is * self-linked. */ /** True if on multiprocessor */ private static final boolean MP = H2ORuntime.availableProcessors() > 1; /** * The number of times to spin (with randomly interspersed calls * to Thread.yield) on multiprocessor before blocking when a node * is apparently the first waiter in the queue. See above for * explanation. Must be a power of two. The value is empirically * derived -- it works pretty well across a variety of processors, * numbers of CPUs, and OSes. */ private static final int FRONT_SPINS = 1 << 7; /** * The number of times to spin before blocking when a node is * preceded by another node that is apparently spinning. Also * serves as an increment to FRONT_SPINS on phase changes, and as * base average frequency for yielding during spins. Must be a * power of two. */ private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; /** * The maximum number of estimated removal failures (sweepVotes) * to tolerate before sweeping through the queue unlinking * cancelled nodes that were not unlinked upon initial * removal. See above for explanation. The value must be at least * two to avoid useless sweeps when removing trailing nodes. */ static final int SWEEP_THRESHOLD = 32; /** * Queue nodes. Uses Object, not E, for items to allow forgetting * them after use. Relies heavily on Unsafe mechanics to minimize * unnecessary ordering constraints: Writes that are intrinsically * ordered wrt other accesses or CASes use simple relaxed forms. */ static final class Node { final boolean isData; // false if this is a request node volatile Object item; // initially non-null if isData; CASed to match volatile Node next; volatile Thread waiter; // null until waiting // CAS methods for fields final boolean casNext(Node cmp, Node val) { return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); } final boolean casItem(Object cmp, Object val) { // assert cmp == null || cmp.getClass() != Node.class; return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); } /** * Constructs a new node. Uses relaxed write because item can * only be seen after publication via casNext. */ Node(Object item, boolean isData) { UNSAFE.putObject(this, itemOffset, item); // relaxed write this.isData = isData; } /** * Links node to itself to avoid garbage retention. Called * only after CASing head field, so uses relaxed write. */ final void forgetNext() { UNSAFE.putObject(this, nextOffset, this); } /** * Sets item to self and waiter to null, to avoid garbage * retention after matching or cancelling. Uses relaxed writes * because order is already constrained in the only calling * contexts: item is forgotten only after volatile/atomic * mechanics that extract items. Similarly, clearing waiter * follows either CAS or return from park (if ever parked; * else we don't care). */ final void forgetContents() { UNSAFE.putObject(this, itemOffset, this); UNSAFE.putObject(this, waiterOffset, null); } /** * Returns true if this node has been matched, including the * case of artificial matches due to cancellation. */ final boolean isMatched() { Object x = item; return (x == this) || ((x == null) == isData); } /** * Returns true if this is an unmatched request node. */ final boolean isUnmatchedRequest() { return !isData && item == null; } /** * Returns true if a node with the given mode cannot be * appended to this node because this node is unmatched and * has opposite data mode. */ final boolean cannotPrecede(boolean haveData) { boolean d = isData; Object x; return d != haveData && (x = item) != this && (x != null) == d; } /** * Tries to artificially match a data node -- used by remove. */ final boolean tryMatchData() { // assert isData; Object x = item; if (x != null && x != this && casItem(x, null)) { LockSupport.unpark(waiter); return true; } return false; } private static final long serialVersionUID = -3375979862319811754L; // Unsafe mechanics private static final sun.misc.Unsafe UNSAFE; private static final long itemOffset; private static final long nextOffset; private static final long waiterOffset; static { try { UNSAFE = getUnsafe(); Class<?> k = Node.class; itemOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("item")); nextOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("next")); waiterOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("waiter")); } catch (Exception e) { throw new Error(e); } } } /** head of the queue; null until first enqueue */ transient volatile Node head; /** tail of the queue; null until first append */ private transient volatile Node tail; /** The number of apparent failures to unsplice removed nodes */ private transient volatile int sweepVotes; // CAS methods for fields private boolean casTail(Node cmp, Node val) { return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); } private boolean casHead(Node cmp, Node val) { return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); } private boolean casSweepVotes(int cmp, int val) { return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val); } /* * Possible values for "how" argument in xfer method. */ private static final int NOW = 0; // for untimed poll, tryTransfer private static final int ASYNC = 1; // for offer, put, add private static final int SYNC = 2; // for transfer, take private static final int TIMED = 3; // for timed poll, tryTransfer @SuppressWarnings("unchecked") static <E> E cast(Object item) { // assert item == null || item.getClass() != Node.class; return (E) item; } /** * Implements all queuing methods. See above for explanation. * * @param e the item or null for take * @param haveData true if this is a put, else a take * @param how NOW, ASYNC, SYNC, or TIMED * @param nanos timeout in nanosecs, used only if mode is TIMED * @return an item if matched, else e * @throws NullPointerException if haveData mode but e is null */ private E xfer(E e, boolean haveData, int how, long nanos) { if (haveData && (e == null)) throw new NullPointerException(); Node s = null; // the node to append, if needed retry: for (;;) { // restart on append race for (Node h = head, p = h; p != null;) { // find & match first node boolean isData = p.isData; Object item = p.item; if (item != p && (item != null) == isData) { // unmatched if (isData == haveData) // can't match break; if (p.casItem(item, e)) { // match for (Node q = p; q != h;) { Node n = q.next; // update by 2 unless singleton if (head == h && casHead(h, n == null ? q : n)) { h.forgetNext(); break; } // advance and retry if ((h = head) == null || (q = h.next) == null || !q.isMatched()) break; // unless slack < 2 } LockSupport.unpark(p.waiter); return LinkedTransferQueue.<E>cast(item); } } Node n = p.next; p = (p != n) ? n : (h = head); // Use head if p offlist } if (how != NOW) { // No matches available if (s == null) s = new Node(e, haveData); Node pred = tryAppend(s, haveData); if (pred == null) continue retry; // lost race vs opposite mode if (how != ASYNC) return awaitMatch(s, pred, e, (how == TIMED), nanos); } return e; // not waiting } } /** * Tries to append node s as tail. * * @param s the node to append * @param haveData true if appending in data mode * @return null on failure due to losing race with append in * different mode, else s's predecessor, or s itself if no * predecessor */ private Node tryAppend(Node s, boolean haveData) { for (Node t = tail, p = t;;) { // move p to last node and append Node n, u; // temps for reads of next & tail if (p == null && (p = head) == null) { if (casHead(null, s)) return s; // initialize } else if (p.cannotPrecede(haveData)) return null; // lost race vs opposite mode else if ((n = p.next) != null) // not last; keep traversing p = p != t && t != (u = tail) ? (t = u) : // stale tail (p != n) ? n : null; // restart if off list else if (!p.casNext(null, s)) p = p.next; // re-read on CAS failure else { if (p != t) { // update if slack now >= 2 while ((tail != t || !casTail(t, s)) && (t = tail) != null && (s = t.next) != null && // advance and retry (s = s.next) != null && s != t); } return p; } } } /** * Spins/yields/blocks until node s is matched or caller gives up. * * @param s the waiting node * @param pred the predecessor of s, or s itself if it has no * predecessor, or null if unknown (the null case does not occur * in any current calls but may in possible future extensions) * @param e the comparison value for checking match * @param timed if true, wait only until timeout elapses * @param nanos timeout in nanosecs, used only if timed is true * @return matched item, or e if unmatched on interrupt or timeout */ private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { long lastTime = timed ? System.nanoTime() : 0L; Thread w = Thread.currentThread(); int spins = -1; // initialized after first item and cancel checks ThreadLocalRandom randomYields = null; // bound if needed for (;;) { Object item = s.item; if (item != e) { // matched // assert item != s; s.forgetContents(); // avoid garbage return LinkedTransferQueue.<E>cast(item); } if ((w.isInterrupted() || (timed && nanos <= 0)) && s.casItem(e, s)) { // cancel unsplice(pred, s); return e; } if (spins < 0) { // establish spins at/near front if ((spins = spinsFor(pred, s.isData)) > 0) randomYields = ThreadLocalRandom.current(); } else if (spins > 0) { // spin --spins; if (randomYields.nextInt(CHAINED_SPINS) == 0) Thread.yield(); // occasionally yield } else if (s.waiter == null) { s.waiter = w; // request unpark then recheck } else if (timed) { long now = System.nanoTime(); if ((nanos -= now - lastTime) > 0) LockSupport.parkNanos(this, nanos); lastTime = now; } else { LockSupport.park(this); } } } /** * Returns spin/yield value for a node with given predecessor and * data mode. See above for explanation. */ private static int spinsFor(Node pred, boolean haveData) { if (MP && pred != null) { if (pred.isData != haveData) // phase change return FRONT_SPINS + CHAINED_SPINS; if (pred.isMatched()) // probably at front return FRONT_SPINS; if (pred.waiter == null) // pred apparently spinning return CHAINED_SPINS; } return 0; } /* -------------- Traversal methods -------------- */ /** * Returns the successor of p, or the head node if p.next has been * linked to self, which will only be true if traversing with a * stale pointer that is now off the list. */ final Node succ(Node p) { Node next = p.next; return (p == next) ? head : next; } /** * Returns the first unmatched node of the given mode, or null if * none. Used by methods isEmpty, hasWaitingConsumer. */ private Node firstOfMode(boolean isData) { for (Node p = head; p != null; p = succ(p)) { if (!p.isMatched()) return (p.isData == isData) ? p : null; } return null; } /** * Returns the item in the first unmatched node with isData; or * null if none. Used by peek. */ private E firstDataItem() { for (Node p = head; p != null; p = succ(p)) { Object item = p.item; if (p.isData) { if (item != null && item != p) return LinkedTransferQueue.<E>cast(item); } else if (item == null) return null; } return null; } /** * Traverses and counts unmatched nodes of the given mode. * Used by methods size and getWaitingConsumerCount. */ private int countOfMode(boolean data) { int count = 0; for (Node p = head; p != null; ) { if (!p.isMatched()) { if (p.isData != data) return 0; if (++count == Integer.MAX_VALUE) // saturated break; } Node n = p.next; if (n != p) p = n; else { count = 0; p = head; } } return count; } final class Itr implements Iterator<E> { private Node nextNode; // next node to return item for private E nextItem; // the corresponding item private Node lastRet; // last returned node, to support remove private Node lastPred; // predecessor to unlink lastRet /** * Moves to next node after prev, or first node if prev null. */ private void advance(Node prev) { /* * To track and avoid buildup of deleted nodes in the face * of calls to both Queue.remove and Itr.remove, we must * include variants of unsplice and sweep upon each * advance: Upon Itr.remove, we may need to catch up links * from lastPred, and upon other removes, we might need to * skip ahead from stale nodes and unsplice deleted ones * found while advancing. */ Node r, b; // reset lastPred upon possible deletion of lastRet if ((r = lastRet) != null && !r.isMatched()) lastPred = r; // next lastPred is old lastRet else if ((b = lastPred) == null || b.isMatched()) lastPred = null; // at start of list else { Node s, n; // help with removal of lastPred.next while ((s = b.next) != null && s != b && s.isMatched() && (n = s.next) != null && n != s) b.casNext(s, n); } this.lastRet = prev; for (Node p = prev, s, n;;) { s = (p == null) ? head : p.next; if (s == null) break; else if (s == p) { p = null; continue; } Object item = s.item; if (s.isData) { if (item != null && item != s) { nextItem = LinkedTransferQueue.<E>cast(item); nextNode = s; return; } } else if (item == null) break; // assert s.isMatched(); if (p == null) p = s; else if ((n = s.next) == null) break; else if (s == n) p = null; else p.casNext(s, n); } nextNode = null; nextItem = null; } Itr() { advance(null); } public final boolean hasNext() { return nextNode != null; } public final E next() { Node p = nextNode; if (p == null) throw new NoSuchElementException(); E e = nextItem; advance(p); return e; } public final void remove() { final Node lastRet = this.lastRet; if (lastRet == null) throw new IllegalStateException(); this.lastRet = null; if (lastRet.tryMatchData()) unsplice(lastPred, lastRet); } } /* -------------- Removal methods -------------- */ /** * Unsplices (now or later) the given deleted/cancelled node with * the given predecessor. * * @param pred a node that was at one time known to be the * predecessor of s, or null or s itself if s is/was at head * @param s the node to be unspliced */ final void unsplice(Node pred, Node s) { s.forgetContents(); // forget unneeded fields /* * See above for rationale. Briefly: if pred still points to * s, try to unlink s. If s cannot be unlinked, because it is * trailing node or pred might be unlinked, and neither pred * nor s are head or offlist, add to sweepVotes, and if enough * votes have accumulated, sweep. */ if (pred != null && pred != s && pred.next == s) { Node n = s.next; if (n == null || (n != s && pred.casNext(s, n) && pred.isMatched())) { for (;;) { // check if at, or could be, head Node h = head; if (h == pred || h == s || h == null) return; // at head or list empty if (!h.isMatched()) break; Node hn = h.next; if (hn == null) return; // now empty if (hn != h && casHead(h, hn)) h.forgetNext(); // advance head } if (pred.next != pred && s.next != s) { // recheck if offlist for (;;) { // sweep now if enough votes int v = sweepVotes; if (v < SWEEP_THRESHOLD) { if (casSweepVotes(v, v + 1)) break; } else if (casSweepVotes(v, 0)) { sweep(); break; } } } } } } /** * Unlinks matched (typically cancelled) nodes encountered in a * traversal from head. */ private void sweep() { for (Node p = head, s, n; p != null && (s = p.next) != null; ) { if (!s.isMatched()) // Unmatched nodes are never self-linked p = s; else if ((n = s.next) == null) // trailing node is pinned break; else if (s == n) // stale // No need to also check for p == s, since that implies s == n p = head; else p.casNext(s, n); } } /** * Main implementation of remove(Object) */ private boolean findAndRemove(Object e) { if (e != null) { for (Node pred = null, p = head; p != null; ) { Object item = p.item; if (p.isData) { if (item != null && item != p && e.equals(item) && p.tryMatchData()) { unsplice(pred, p); return true; } } else if (item == null) break; pred = p; if ((p = p.next) == pred) { // stale pred = null; p = head; } } } return false; } /** * Creates an initially empty {@code LinkedTransferQueue}. */ public LinkedTransferQueue() { } /** * Creates a {@code LinkedTransferQueue} * initially containing the elements of the given collection, * added in traversal order of the collection's iterator. * * @param c the collection of elements to initially contain * @throws NullPointerException if the specified collection or any * of its elements are null */ public LinkedTransferQueue(Collection<? extends E> c) { this(); addAll(c); } /** * Inserts the specified element at the tail of this queue. * As the queue is unbounded, this method will never block. * * @throws NullPointerException if the specified element is null */ public void put(E e) { xfer(e, true, ASYNC, 0); } /** * Inserts the specified element at the tail of this queue. * As the queue is unbounded, this method will never block or * return {@code false}. * * @return {@code true} (as specified by * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit) * BlockingQueue.offer}) * @throws NullPointerException if the specified element is null */ public boolean offer(E e, long timeout, TimeUnit unit) { xfer(e, true, ASYNC, 0); return true; } /** * Inserts the specified element at the tail of this queue. * As the queue is unbounded, this method will never return {@code false}. * * @return {@code true} (as specified by {@link Queue#offer}) * @throws NullPointerException if the specified element is null */ public boolean offer(E e) { xfer(e, true, ASYNC, 0); return true; } /** * Inserts the specified element at the tail of this queue. * As the queue is unbounded, this method will never throw * {@link IllegalStateException} or return {@code false}. * * @return {@code true} (as specified by {@link Collection#add}) * @throws NullPointerException if the specified element is null */ public boolean add(E e) { xfer(e, true, ASYNC, 0); return true; } /** * Transfers the element to a waiting consumer immediately, if possible. * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * otherwise returning {@code false} without enqueuing the element. * * @throws NullPointerException if the specified element is null */ public boolean tryTransfer(E e) { return xfer(e, true, NOW, 0) == null; } /** * Transfers the element to a consumer, waiting if necessary to do so. * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * else inserts the specified element at the tail of this queue * and waits until the element is received by a consumer. * * @throws NullPointerException if the specified element is null */ public void transfer(E e) throws InterruptedException { if (xfer(e, true, SYNC, 0) != null) { Thread.interrupted(); // failure possible only due to interrupt throw new InterruptedException(); } } /** * Transfers the element to a consumer if it is possible to do so * before the timeout elapses. * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * else inserts the specified element at the tail of this queue * and waits until the element is received by a consumer, * returning {@code false} if the specified wait time elapses * before the element can be transferred. * * @throws NullPointerException if the specified element is null */ public boolean tryTransfer(E e, long timeout, TimeUnit unit) throws InterruptedException { if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) return true; if (!Thread.interrupted()) return false; throw new InterruptedException(); } public E take() throws InterruptedException { E e = xfer(null, false, SYNC, 0); if (e != null) return e; Thread.interrupted(); throw new InterruptedException(); } public E poll(long timeout, TimeUnit unit) throws InterruptedException { E e = xfer(null, false, TIMED, unit.toNanos(timeout)); if (e != null || !Thread.interrupted()) return e; throw new InterruptedException(); } public E poll() { return xfer(null, false, NOW, 0); } /** * @throws NullPointerException {@inheritDoc} * @throws IllegalArgumentException {@inheritDoc} */ public int drainTo(Collection<? super E> c) { if (c == null) throw new NullPointerException(); if (c == this) throw new IllegalArgumentException(); int n = 0; for (E e; (e = poll()) != null;) { c.add(e); ++n; } return n; } /** * @throws NullPointerException {@inheritDoc} * @throws IllegalArgumentException {@inheritDoc} */ public int drainTo(Collection<? super E> c, int maxElements) { if (c == null) throw new NullPointerException(); if (c == this) throw new IllegalArgumentException(); int n = 0; for (E e; n < maxElements && (e = poll()) != null;) { c.add(e); ++n; } return n; } /** * Returns an iterator over the elements in this queue in proper sequence. * The elements will be returned in order from first (head) to last (tail). * * <p>The returned iterator is a "weakly consistent" iterator that * will never throw {@link java.util.ConcurrentModificationException * ConcurrentModificationException}, and guarantees to traverse * elements as they existed upon construction of the iterator, and * may (but is not guaranteed to) reflect any modifications * subsequent to construction. * * @return an iterator over the elements in this queue in proper sequence */ public Iterator<E> iterator() { return new Itr(); } public E peek() { return firstDataItem(); } /** * Returns {@code true} if this queue contains no elements. * * @return {@code true} if this queue contains no elements */ public boolean isEmpty() { for (Node p = head; p != null; p = succ(p)) { if (!p.isMatched()) return !p.isData; } return true; } public boolean hasWaitingConsumer() { return firstOfMode(false) != null; } /** * Returns the number of elements in this queue. If this queue * contains more than {@code Integer.MAX_VALUE} elements, returns * {@code Integer.MAX_VALUE}. * * <p>Beware that, unlike in most collections, this method is * <em>NOT</em> a constant-time operation. Because of the * asynchronous nature of these queues, determining the current * number of elements requires an O(n) traversal. * * @return the number of elements in this queue */ public int size() { return countOfMode(true); } public int getWaitingConsumerCount() { return countOfMode(false); } /** * Removes a single instance of the specified element from this queue, * if it is present. More formally, removes an element {@code e} such * that {@code o.equals(e)}, if this queue contains one or more such * elements. * Returns {@code true} if this queue contained the specified element * (or equivalently, if this queue changed as a result of the call). * * @param o element to be removed from this queue, if present * @return {@code true} if this queue changed as a result of the call */ public boolean remove(Object o) { return findAndRemove(o); } /** * Returns {@code true} if this queue contains the specified element. * More formally, returns {@code true} if and only if this queue contains * at least one element {@code e} such that {@code o.equals(e)}. * * @param o object to be checked for containment in this queue * @return {@code true} if this queue contains the specified element */ public boolean contains(Object o) { if (o == null) return false; for (Node p = head; p != null; p = succ(p)) { Object item = p.item; if (p.isData) { if (item != null && item != p && o.equals(item)) return true; } else if (item == null) break; } return false; } /** * Always returns {@code Integer.MAX_VALUE} because a * {@code LinkedTransferQueue} is not capacity constrained. * * @return {@code Integer.MAX_VALUE} (as specified by * {@link java.util.concurrent.BlockingQueue#remainingCapacity() * BlockingQueue.remainingCapacity}) */ public int remainingCapacity() { return Integer.MAX_VALUE; } /** * Saves the state to a stream (that is, serializes it). * * @serialData All of the elements (each an {@code E}) in * the proper order, followed by a null * @param s the stream */ private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { s.defaultWriteObject(); for (E e : this) s.writeObject(e); // Use trailing null as sentinel s.writeObject(null); } /** * Reconstitutes the Queue instance from a stream (that is, * deserializes it). * * @param s the stream */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { s.defaultReadObject(); for (;;) { @SuppressWarnings("unchecked") E item = (E) s.readObject(); if (item == null) break; else offer(item); } } // Unsafe mechanics private static final sun.misc.Unsafe UNSAFE; private static final long headOffset; private static final long tailOffset; private static final long sweepVotesOffset; static { try { UNSAFE = getUnsafe(); Class<?> k = LinkedTransferQueue.class; headOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("head")); tailOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("tail")); sweepVotesOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("sweepVotes")); } catch (Exception e) { throw new Error(e); } } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException se) { try { return java.security.AccessController.doPrivileged (new java.security .PrivilegedExceptionAction<sun.misc.Unsafe>() { public sun.misc.Unsafe run() throws Exception { java.lang.reflect.Field f = sun.misc .Unsafe.class.getDeclaredField("theUnsafe"); f.setAccessible(true); return (sun.misc.Unsafe) f.get(null); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/jsr166y/Phaser.java
/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ package jsr166y; import water.H2ORuntime; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.LockSupport; /** * A reusable synchronization barrier, similar in functionality to * {@link java.util.concurrent.CyclicBarrier CyclicBarrier} and * {@link java.util.concurrent.CountDownLatch CountDownLatch} * but supporting more flexible usage. * * <p> <b>Registration.</b> Unlike the case for other barriers, the * number of parties <em>registered</em> to synchronize on a phaser * may vary over time. Tasks may be registered at any time (using * methods {@link #register}, {@link #bulkRegister}, or forms of * constructors establishing initial numbers of parties), and * optionally deregistered upon any arrival (using {@link * #arriveAndDeregister}). As is the case with most basic * synchronization constructs, registration and deregistration affect * only internal counts; they do not establish any further internal * bookkeeping, so tasks cannot query whether they are registered. * (However, you can introduce such bookkeeping by subclassing this * class.) * * <p> <b>Synchronization.</b> Like a {@code CyclicBarrier}, a {@code * Phaser} may be repeatedly awaited. Method {@link * #arriveAndAwaitAdvance} has effect analogous to {@link * java.util.concurrent.CyclicBarrier#await CyclicBarrier.await}. Each * generation of a phaser has an associated phase number. The phase * number starts at zero, and advances when all parties arrive at the * phaser, wrapping around to zero after reaching {@code * Integer.MAX_VALUE}. The use of phase numbers enables independent * control of actions upon arrival at a phaser and upon awaiting * others, via two kinds of methods that may be invoked by any * registered party: * * <ul> * * <li> <b>Arrival.</b> Methods {@link #arrive} and * {@link #arriveAndDeregister} record arrival. These methods * do not block, but return an associated <em>arrival phase * number</em>; that is, the phase number of the phaser to which * the arrival applied. When the final party for a given phase * arrives, an optional action is performed and the phase * advances. These actions are performed by the party * triggering a phase advance, and are arranged by overriding * method {@link #onAdvance(int, int)}, which also controls * termination. Overriding this method is similar to, but more * flexible than, providing a barrier action to a {@code * CyclicBarrier}. * * <li> <b>Waiting.</b> Method {@link #awaitAdvance} requires an * argument indicating an arrival phase number, and returns when * the phaser advances to (or is already at) a different phase. * Unlike similar constructions using {@code CyclicBarrier}, * method {@code awaitAdvance} continues to wait even if the * waiting thread is interrupted. Interruptible and timeout * versions are also available, but exceptions encountered while * tasks wait interruptibly or with timeout do not change the * state of the phaser. If necessary, you can perform any * associated recovery within handlers of those exceptions, * often after invoking {@code forceTermination}. Phasers may * also be used by tasks executing in a {@link ForkJoinPool}, * which will ensure sufficient parallelism to execute tasks * when others are blocked waiting for a phase to advance. * * </ul> * * <p> <b>Termination.</b> A phaser may enter a <em>termination</em> * state, that may be checked using method {@link #isTerminated}. Upon * termination, all synchronization methods immediately return without * waiting for advance, as indicated by a negative return value. * Similarly, attempts to register upon termination have no effect. * Termination is triggered when an invocation of {@code onAdvance} * returns {@code true}. The default implementation returns {@code * true} if a deregistration has caused the number of registered * parties to become zero. As illustrated below, when phasers control * actions with a fixed number of iterations, it is often convenient * to override this method to cause termination when the current phase * number reaches a threshold. Method {@link #forceTermination} is * also available to abruptly release waiting threads and allow them * to terminate. * * <p> <b>Tiering.</b> Phasers may be <em>tiered</em> (i.e., * constructed in tree structures) to reduce contention. Phasers with * large numbers of parties that would otherwise experience heavy * synchronization contention costs may instead be set up so that * groups of sub-phasers share a common parent. This may greatly * increase throughput even though it incurs greater per-operation * overhead. * * <p>In a tree of tiered phasers, registration and deregistration of * child phasers with their parent are managed automatically. * Whenever the number of registered parties of a child phaser becomes * non-zero (as established in the {@link #Phaser(Phaser,int)} * constructor, {@link #register}, or {@link #bulkRegister}), the * child phaser is registered with its parent. Whenever the number of * registered parties becomes zero as the result of an invocation of * {@link #arriveAndDeregister}, the child phaser is deregistered * from its parent. * * <p><b>Monitoring.</b> While synchronization methods may be invoked * only by registered parties, the current state of a phaser may be * monitored by any caller. At any given moment there are {@link * #getRegisteredParties} parties in total, of which {@link * #getArrivedParties} have arrived at the current phase ({@link * #getPhase}). When the remaining ({@link #getUnarrivedParties}) * parties arrive, the phase advances. The values returned by these * methods may reflect transient states and so are not in general * useful for synchronization control. Method {@link #toString} * returns snapshots of these state queries in a form convenient for * informal monitoring. * * <p><b>Sample usages:</b> * * <p>A {@code Phaser} may be used instead of a {@code CountDownLatch} * to control a one-shot action serving a variable number of parties. * The typical idiom is for the method setting this up to first * register, then start the actions, then deregister, as in: * * <pre> {@code * void runTasks(List<Runnable> tasks) { * final Phaser phaser = new Phaser(1); // "1" to register self * // create and start threads * for (final Runnable task : tasks) { * phaser.register(); * new Thread() { * public void run() { * phaser.arriveAndAwaitAdvance(); // await all creation * task.run(); * } * }.start(); * } * * // allow threads to start and deregister self * phaser.arriveAndDeregister(); * }}</pre> * * <p>One way to cause a set of threads to repeatedly perform actions * for a given number of iterations is to override {@code onAdvance}: * * <pre> {@code * void startTasks(List<Runnable> tasks, final int iterations) { * final Phaser phaser = new Phaser() { * protected boolean onAdvance(int phase, int registeredParties) { * return phase >= iterations || registeredParties == 0; * } * }; * phaser.register(); * for (final Runnable task : tasks) { * phaser.register(); * new Thread() { * public void run() { * do { * task.run(); * phaser.arriveAndAwaitAdvance(); * } while (!phaser.isTerminated()); * } * }.start(); * } * phaser.arriveAndDeregister(); // deregister self, don't wait * }}</pre> * * If the main task must later await termination, it * may re-register and then execute a similar loop: * <pre> {@code * // ... * phaser.register(); * while (!phaser.isTerminated()) * phaser.arriveAndAwaitAdvance();}</pre> * * <p>Related constructions may be used to await particular phase numbers * in contexts where you are sure that the phase will never wrap around * {@code Integer.MAX_VALUE}. For example: * * <pre> {@code * void awaitPhase(Phaser phaser, int phase) { * int p = phaser.register(); // assumes caller not already registered * while (p < phase) { * if (phaser.isTerminated()) * // ... deal with unexpected termination * else * p = phaser.arriveAndAwaitAdvance(); * } * phaser.arriveAndDeregister(); * }}</pre> * * * <p>To create a set of {@code n} tasks using a tree of phasers, you * could use code of the following form, assuming a Task class with a * constructor accepting a {@code Phaser} that it registers with upon * construction. After invocation of {@code build(new Task[n], 0, n, * new Phaser())}, these tasks could then be started, for example by * submitting to a pool: * * <pre> {@code * void build(Task[] tasks, int lo, int hi, Phaser ph) { * if (hi - lo > TASKS_PER_PHASER) { * for (int i = lo; i < hi; i += TASKS_PER_PHASER) { * int j = Math.min(i + TASKS_PER_PHASER, hi); * build(tasks, i, j, new Phaser(ph)); * } * } else { * for (int i = lo; i < hi; ++i) * tasks[i] = new Task(ph); * // assumes new Task(ph) performs ph.register() * } * }}</pre> * * The best value of {@code TASKS_PER_PHASER} depends mainly on * expected synchronization rates. A value as low as four may * be appropriate for extremely small per-phase task bodies (thus * high rates), or up to hundreds for extremely large ones. * * <p><b>Implementation notes</b>: This implementation restricts the * maximum number of parties to 65535. Attempts to register additional * parties result in {@code IllegalStateException}. However, you can and * should create tiered phasers to accommodate arbitrarily large sets * of participants. * * @since 1.7 * @author Doug Lea */ public class Phaser { /* * This class implements an extension of X10 "clocks". Thanks to * Vijay Saraswat for the idea, and to Vivek Sarkar for * enhancements to extend functionality. */ /** * Primary state representation, holding four bit-fields: * * unarrived -- the number of parties yet to hit barrier (bits 0-15) * parties -- the number of parties to wait (bits 16-31) * phase -- the generation of the barrier (bits 32-62) * terminated -- set if barrier is terminated (bit 63 / sign) * * Except that a phaser with no registered parties is * distinguished by the otherwise illegal state of having zero * parties and one unarrived parties (encoded as EMPTY below). * * To efficiently maintain atomicity, these values are packed into * a single (atomic) long. Good performance relies on keeping * state decoding and encoding simple, and keeping race windows * short. * * All state updates are performed via CAS except initial * registration of a sub-phaser (i.e., one with a non-null * parent). In this (relatively rare) case, we use built-in * synchronization to lock while first registering with its * parent. * * The phase of a subphaser is allowed to lag that of its * ancestors until it is actually accessed -- see method * reconcileState. */ private volatile long state; private static final int MAX_PARTIES = 0xffff; private static final int MAX_PHASE = Integer.MAX_VALUE; private static final int PARTIES_SHIFT = 16; private static final int PHASE_SHIFT = 32; private static final int UNARRIVED_MASK = 0xffff; // to mask ints private static final long PARTIES_MASK = 0xffff0000L; // to mask longs private static final long COUNTS_MASK = 0xffffffffL; private static final long TERMINATION_BIT = 1L << 63; // some special values private static final int ONE_ARRIVAL = 1; private static final int ONE_PARTY = 1 << PARTIES_SHIFT; private static final int ONE_DEREGISTER = ONE_ARRIVAL|ONE_PARTY; private static final int EMPTY = 1; // The following unpacking methods are usually manually inlined private static int unarrivedOf(long s) { int counts = (int)s; return (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK); } private static int partiesOf(long s) { return (int)s >>> PARTIES_SHIFT; } private static int phaseOf(long s) { return (int)(s >>> PHASE_SHIFT); } private static int arrivedOf(long s) { int counts = (int)s; return (counts == EMPTY) ? 0 : (counts >>> PARTIES_SHIFT) - (counts & UNARRIVED_MASK); } /** * The parent of this phaser, or null if none */ private final Phaser parent; /** * The root of phaser tree. Equals this if not in a tree. */ private final Phaser root; /** * Heads of Treiber stacks for waiting threads. To eliminate * contention when releasing some threads while adding others, we * use two of them, alternating across even and odd phases. * Subphasers share queues with root to speed up releases. */ private final AtomicReference<QNode> evenQ; private final AtomicReference<QNode> oddQ; private AtomicReference<QNode> queueFor(int phase) { return ((phase & 1) == 0) ? evenQ : oddQ; } /** * Returns message string for bounds exceptions on arrival. */ private String badArrive(long s) { return "Attempted arrival of unregistered party for " + stateToString(s); } /** * Returns message string for bounds exceptions on registration. */ private String badRegister(long s) { return "Attempt to register more than " + MAX_PARTIES + " parties for " + stateToString(s); } /** * Main implementation for methods arrive and arriveAndDeregister. * Manually tuned to speed up and minimize race windows for the * common case of just decrementing unarrived field. * * @param adjust value to subtract from state; * ONE_ARRIVAL for arrive, * ONE_DEREGISTER for arriveAndDeregister */ private int doArrive(int adjust) { final Phaser root = this.root; for (;;) { long s = (root == this) ? state : reconcileState(); int phase = (int)(s >>> PHASE_SHIFT); if (phase < 0) return phase; int counts = (int)s; int unarrived = (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK); if (unarrived <= 0) throw new IllegalStateException(badArrive(s)); if (UNSAFE.compareAndSwapLong(this, stateOffset, s, s-=adjust)) { if (unarrived == 1) { long n = s & PARTIES_MASK; // base of next state int nextUnarrived = (int)n >>> PARTIES_SHIFT; if (root == this) { if (onAdvance(phase, nextUnarrived)) n |= TERMINATION_BIT; else if (nextUnarrived == 0) n |= EMPTY; else n |= nextUnarrived; int nextPhase = (phase + 1) & MAX_PHASE; n |= (long)nextPhase << PHASE_SHIFT; UNSAFE.compareAndSwapLong(this, stateOffset, s, n); releaseWaiters(phase); } else if (nextUnarrived == 0) { // propagate deregistration phase = parent.doArrive(ONE_DEREGISTER); UNSAFE.compareAndSwapLong(this, stateOffset, s, s | EMPTY); } else phase = parent.doArrive(ONE_ARRIVAL); } return phase; } } } /** * Implementation of register, bulkRegister * * @param registrations number to add to both parties and * unarrived fields. Must be greater than zero. */ private int doRegister(int registrations) { // adjustment to state long adjust = ((long)registrations << PARTIES_SHIFT) | registrations; final Phaser parent = this.parent; int phase; for (;;) { long s = (parent == null) ? state : reconcileState(); int counts = (int)s; int parties = counts >>> PARTIES_SHIFT; int unarrived = counts & UNARRIVED_MASK; if (registrations > MAX_PARTIES - parties) throw new IllegalStateException(badRegister(s)); phase = (int)(s >>> PHASE_SHIFT); if (phase < 0) break; if (counts != EMPTY) { // not 1st registration if (parent == null || reconcileState() == s) { if (unarrived == 0) // wait out advance root.internalAwaitAdvance(phase, null); else if (UNSAFE.compareAndSwapLong(this, stateOffset, s, s + adjust)) break; } } else if (parent == null) { // 1st root registration long next = ((long)phase << PHASE_SHIFT) | adjust; if (UNSAFE.compareAndSwapLong(this, stateOffset, s, next)) break; } else { synchronized (this) { // 1st sub registration if (state == s) { // recheck under lock phase = parent.doRegister(1); if (phase < 0) break; // finish registration whenever parent registration // succeeded, even when racing with termination, // since these are part of the same "transaction". while (!UNSAFE.compareAndSwapLong (this, stateOffset, s, ((long)phase << PHASE_SHIFT) | adjust)) { s = state; phase = (int)(root.state >>> PHASE_SHIFT); // assert (int)s == EMPTY; } break; } } } } return phase; } /** * Resolves lagged phase propagation from root if necessary. * Reconciliation normally occurs when root has advanced but * subphasers have not yet done so, in which case they must finish * their own advance by setting unarrived to parties (or if * parties is zero, resetting to unregistered EMPTY state). * * @return reconciled state */ private long reconcileState() { final Phaser root = this.root; long s = state; if (root != this) { int phase, p; // CAS to root phase with current parties, tripping unarrived while ((phase = (int)(root.state >>> PHASE_SHIFT)) != (int)(s >>> PHASE_SHIFT) && !UNSAFE.compareAndSwapLong (this, stateOffset, s, s = (((long)phase << PHASE_SHIFT) | ((phase < 0) ? (s & COUNTS_MASK) : (((p = (int)s >>> PARTIES_SHIFT) == 0) ? EMPTY : ((s & PARTIES_MASK) | p)))))) s = state; } return s; } /** * Creates a new phaser with no initially registered parties, no * parent, and initial phase number 0. Any thread using this * phaser will need to first register for it. */ public Phaser() { this(null, 0); } /** * Creates a new phaser with the given number of registered * unarrived parties, no parent, and initial phase number 0. * * @param parties the number of parties required to advance to the * next phase * @throws IllegalArgumentException if parties less than zero * or greater than the maximum number of parties supported */ public Phaser(int parties) { this(null, parties); } /** * Equivalent to {@link #Phaser(Phaser, int) Phaser(parent, 0)}. * * @param parent the parent phaser */ public Phaser(Phaser parent) { this(parent, 0); } /** * Creates a new phaser with the given parent and number of * registered unarrived parties. When the given parent is non-null * and the given number of parties is greater than zero, this * child phaser is registered with its parent. * * @param parent the parent phaser * @param parties the number of parties required to advance to the * next phase * @throws IllegalArgumentException if parties less than zero * or greater than the maximum number of parties supported */ public Phaser(Phaser parent, int parties) { if (parties >>> PARTIES_SHIFT != 0) throw new IllegalArgumentException("Illegal number of parties"); int phase = 0; this.parent = parent; if (parent != null) { final Phaser root = parent.root; this.root = root; this.evenQ = root.evenQ; this.oddQ = root.oddQ; if (parties != 0) phase = parent.doRegister(1); } else { this.root = this; this.evenQ = new AtomicReference<>(); this.oddQ = new AtomicReference<>(); } this.state = (parties == 0) ? (long)EMPTY : ((long)phase << PHASE_SHIFT) | ((long)parties << PARTIES_SHIFT) | ((long)parties); } /** * Adds a new unarrived party to this phaser. If an ongoing * invocation of {@link #onAdvance} is in progress, this method * may await its completion before returning. If this phaser has * a parent, and this phaser previously had no registered parties, * this child phaser is also registered with its parent. If * this phaser is terminated, the attempt to register has * no effect, and a negative value is returned. * * @return the arrival phase number to which this registration * applied. If this value is negative, then this phaser has * terminated, in which case registration has no effect. * @throws IllegalStateException if attempting to register more * than the maximum supported number of parties */ public int register() { return doRegister(1); } /** * Adds the given number of new unarrived parties to this phaser. * If an ongoing invocation of {@link #onAdvance} is in progress, * this method may await its completion before returning. If this * phaser has a parent, and the given number of parties is greater * than zero, and this phaser previously had no registered * parties, this child phaser is also registered with its parent. * If this phaser is terminated, the attempt to register has no * effect, and a negative value is returned. * * @param parties the number of additional parties required to * advance to the next phase * @return the arrival phase number to which this registration * applied. If this value is negative, then this phaser has * terminated, in which case registration has no effect. * @throws IllegalStateException if attempting to register more * than the maximum supported number of parties * @throws IllegalArgumentException if {@code parties < 0} */ public int bulkRegister(int parties) { if (parties < 0) throw new IllegalArgumentException(); if (parties == 0) return getPhase(); return doRegister(parties); } /** * Arrives at this phaser, without waiting for others to arrive. * * <p>It is a usage error for an unregistered party to invoke this * method. However, this error may result in an {@code * IllegalStateException} only upon some subsequent operation on * this phaser, if ever. * * @return the arrival phase number, or a negative value if terminated * @throws IllegalStateException if not terminated and the number * of unarrived parties would become negative */ public int arrive() { return doArrive(ONE_ARRIVAL); } /** * Arrives at this phaser and deregisters from it without waiting * for others to arrive. Deregistration reduces the number of * parties required to advance in future phases. If this phaser * has a parent, and deregistration causes this phaser to have * zero parties, this phaser is also deregistered from its parent. * * <p>It is a usage error for an unregistered party to invoke this * method. However, this error may result in an {@code * IllegalStateException} only upon some subsequent operation on * this phaser, if ever. * * @return the arrival phase number, or a negative value if terminated * @throws IllegalStateException if not terminated and the number * of registered or unarrived parties would become negative */ public int arriveAndDeregister() { return doArrive(ONE_DEREGISTER); } /** * Arrives at this phaser and awaits others. Equivalent in effect * to {@code awaitAdvance(arrive())}. If you need to await with * interruption or timeout, you can arrange this with an analogous * construction using one of the other forms of the {@code * awaitAdvance} method. If instead you need to deregister upon * arrival, use {@code awaitAdvance(arriveAndDeregister())}. * * <p>It is a usage error for an unregistered party to invoke this * method. However, this error may result in an {@code * IllegalStateException} only upon some subsequent operation on * this phaser, if ever. * * @return the arrival phase number, or the (negative) * {@linkplain #getPhase() current phase} if terminated * @throws IllegalStateException if not terminated and the number * of unarrived parties would become negative */ public int arriveAndAwaitAdvance() { // Specialization of doArrive+awaitAdvance eliminating some reads/paths final Phaser root = this.root; for (;;) { long s = (root == this) ? state : reconcileState(); int phase = (int)(s >>> PHASE_SHIFT); if (phase < 0) return phase; int counts = (int)s; int unarrived = (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK); if (unarrived <= 0) throw new IllegalStateException(badArrive(s)); if (UNSAFE.compareAndSwapLong(this, stateOffset, s, s -= ONE_ARRIVAL)) { if (unarrived > 1) return root.internalAwaitAdvance(phase, null); if (root != this) return parent.arriveAndAwaitAdvance(); long n = s & PARTIES_MASK; // base of next state int nextUnarrived = (int)n >>> PARTIES_SHIFT; if (onAdvance(phase, nextUnarrived)) n |= TERMINATION_BIT; else if (nextUnarrived == 0) n |= EMPTY; else n |= nextUnarrived; int nextPhase = (phase + 1) & MAX_PHASE; n |= (long)nextPhase << PHASE_SHIFT; if (!UNSAFE.compareAndSwapLong(this, stateOffset, s, n)) return (int)(state >>> PHASE_SHIFT); // terminated releaseWaiters(phase); return nextPhase; } } } /** * Awaits the phase of this phaser to advance from the given phase * value, returning immediately if the current phase is not equal * to the given phase value or this phaser is terminated. * * @param phase an arrival phase number, or negative value if * terminated; this argument is normally the value returned by a * previous call to {@code arrive} or {@code arriveAndDeregister}. * @return the next arrival phase number, or the argument if it is * negative, or the (negative) {@linkplain #getPhase() current phase} * if terminated */ public int awaitAdvance(int phase) { final Phaser root = this.root; long s = (root == this) ? state : reconcileState(); int p = (int)(s >>> PHASE_SHIFT); if (phase < 0) return phase; if (p == phase) return root.internalAwaitAdvance(phase, null); return p; } /** * Awaits the phase of this phaser to advance from the given phase * value, throwing {@code InterruptedException} if interrupted * while waiting, or returning immediately if the current phase is * not equal to the given phase value or this phaser is * terminated. * * @param phase an arrival phase number, or negative value if * terminated; this argument is normally the value returned by a * previous call to {@code arrive} or {@code arriveAndDeregister}. * @return the next arrival phase number, or the argument if it is * negative, or the (negative) {@linkplain #getPhase() current phase} * if terminated * @throws InterruptedException if thread interrupted while waiting */ public int awaitAdvanceInterruptibly(int phase) throws InterruptedException { final Phaser root = this.root; long s = (root == this) ? state : reconcileState(); int p = (int)(s >>> PHASE_SHIFT); if (phase < 0) return phase; if (p == phase) { QNode node = new QNode(this, phase, true, false, 0L); p = root.internalAwaitAdvance(phase, node); if (node.wasInterrupted) throw new InterruptedException(); } return p; } /** * Awaits the phase of this phaser to advance from the given phase * value or the given timeout to elapse, throwing {@code * InterruptedException} if interrupted while waiting, or * returning immediately if the current phase is not equal to the * given phase value or this phaser is terminated. * * @param phase an arrival phase number, or negative value if * terminated; this argument is normally the value returned by a * previous call to {@code arrive} or {@code arriveAndDeregister}. * @param timeout how long to wait before giving up, in units of * {@code unit} * @param unit a {@code TimeUnit} determining how to interpret the * {@code timeout} parameter * @return the next arrival phase number, or the argument if it is * negative, or the (negative) {@linkplain #getPhase() current phase} * if terminated * @throws InterruptedException if thread interrupted while waiting * @throws TimeoutException if timed out while waiting */ public int awaitAdvanceInterruptibly(int phase, long timeout, TimeUnit unit) throws InterruptedException, TimeoutException { long nanos = unit.toNanos(timeout); final Phaser root = this.root; long s = (root == this) ? state : reconcileState(); int p = (int)(s >>> PHASE_SHIFT); if (phase < 0) return phase; if (p == phase) { QNode node = new QNode(this, phase, true, true, nanos); p = root.internalAwaitAdvance(phase, node); if (node.wasInterrupted) throw new InterruptedException(); else if (p == phase) throw new TimeoutException(); } return p; } /** * Forces this phaser to enter termination state. Counts of * registered parties are unaffected. If this phaser is a member * of a tiered set of phasers, then all of the phasers in the set * are terminated. If this phaser is already terminated, this * method has no effect. This method may be useful for * coordinating recovery after one or more tasks encounter * unexpected exceptions. */ public void forceTermination() { // Only need to change root state final Phaser root = this.root; long s; while ((s = root.state) >= 0) { if (UNSAFE.compareAndSwapLong(root, stateOffset, s, s | TERMINATION_BIT)) { // signal all threads releaseWaiters(0); // Waiters on evenQ releaseWaiters(1); // Waiters on oddQ return; } } } /** * Returns the current phase number. The maximum phase number is * {@code Integer.MAX_VALUE}, after which it restarts at * zero. Upon termination, the phase number is negative, * in which case the prevailing phase prior to termination * may be obtained via {@code getPhase() + Integer.MIN_VALUE}. * * @return the phase number, or a negative value if terminated */ public final int getPhase() { return (int)(root.state >>> PHASE_SHIFT); } /** * Returns the number of parties registered at this phaser. * * @return the number of parties */ public int getRegisteredParties() { return partiesOf(state); } /** * Returns the number of registered parties that have arrived at * the current phase of this phaser. If this phaser has terminated, * the returned value is meaningless and arbitrary. * * @return the number of arrived parties */ public int getArrivedParties() { return arrivedOf(reconcileState()); } /** * Returns the number of registered parties that have not yet * arrived at the current phase of this phaser. If this phaser has * terminated, the returned value is meaningless and arbitrary. * * @return the number of unarrived parties */ public int getUnarrivedParties() { return unarrivedOf(reconcileState()); } /** * Returns the parent of this phaser, or {@code null} if none. * * @return the parent of this phaser, or {@code null} if none */ public Phaser getParent() { return parent; } /** * Returns the root ancestor of this phaser, which is the same as * this phaser if it has no parent. * * @return the root ancestor of this phaser */ public Phaser getRoot() { return root; } /** * Returns {@code true} if this phaser has been terminated. * * @return {@code true} if this phaser has been terminated */ public boolean isTerminated() { return root.state < 0L; } /** * Overridable method to perform an action upon impending phase * advance, and to control termination. This method is invoked * upon arrival of the party advancing this phaser (when all other * waiting parties are dormant). If this method returns {@code * true}, this phaser will be set to a final termination state * upon advance, and subsequent calls to {@link #isTerminated} * will return true. Any (unchecked) Exception or Error thrown by * an invocation of this method is propagated to the party * attempting to advance this phaser, in which case no advance * occurs. * * <p>The arguments to this method provide the state of the phaser * prevailing for the current transition. The effects of invoking * arrival, registration, and waiting methods on this phaser from * within {@code onAdvance} are unspecified and should not be * relied on. * * <p>If this phaser is a member of a tiered set of phasers, then * {@code onAdvance} is invoked only for its root phaser on each * advance. * * <p>To support the most common use cases, the default * implementation of this method returns {@code true} when the * number of registered parties has become zero as the result of a * party invoking {@code arriveAndDeregister}. You can disable * this behavior, thus enabling continuation upon future * registrations, by overriding this method to always return * {@code false}: * * <pre> {@code * Phaser phaser = new Phaser() { * protected boolean onAdvance(int phase, int parties) { return false; } * }}</pre> * * @param phase the current phase number on entry to this method, * before this phaser is advanced * @param registeredParties the current number of registered parties * @return {@code true} if this phaser should terminate */ protected boolean onAdvance(int phase, int registeredParties) { return registeredParties == 0; } /** * Returns a string identifying this phaser, as well as its * state. The state, in brackets, includes the String {@code * "phase = "} followed by the phase number, {@code "parties = "} * followed by the number of registered parties, and {@code * "arrived = "} followed by the number of arrived parties. * * @return a string identifying this phaser, as well as its state */ public String toString() { return stateToString(reconcileState()); } /** * Implementation of toString and string-based error messages */ private String stateToString(long s) { return super.toString() + "[phase = " + phaseOf(s) + " parties = " + partiesOf(s) + " arrived = " + arrivedOf(s) + "]"; } // Waiting mechanics /** * Removes and signals threads from queue for phase. */ private void releaseWaiters(int phase) { QNode q; // first element of queue Thread t; // its thread AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ; while ((q = head.get()) != null && q.phase != (int)(root.state >>> PHASE_SHIFT)) { if (head.compareAndSet(q, q.next) && (t = q.thread) != null) { q.thread = null; LockSupport.unpark(t); } } } /** * Variant of releaseWaiters that additionally tries to remove any * nodes no longer waiting for advance due to timeout or * interrupt. Currently, nodes are removed only if they are at * head of queue, which suffices to reduce memory footprint in * most usages. * * @return current phase on exit */ private int abortWait(int phase) { AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ; for (;;) { Thread t; QNode q = head.get(); int p = (int)(root.state >>> PHASE_SHIFT); if (q == null || ((t = q.thread) != null && q.phase == p)) return p; if (head.compareAndSet(q, q.next) && t != null) { q.thread = null; LockSupport.unpark(t); } } } /** The number of CPUs, for spin control */ private static final int NCPU = H2ORuntime.availableProcessors(); /** * The number of times to spin before blocking while waiting for * advance, per arrival while waiting. On multiprocessors, fully * blocking and waking up a large number of threads all at once is * usually a very slow process, so we use rechargeable spins to * avoid it when threads regularly arrive: When a thread in * internalAwaitAdvance notices another arrival before blocking, * and there appear to be enough CPUs available, it spins * SPINS_PER_ARRIVAL more times before blocking. The value trades * off good-citizenship vs big unnecessary slowdowns. */ static final int SPINS_PER_ARRIVAL = (NCPU < 2) ? 1 : 1 << 8; /** * Possibly blocks and waits for phase to advance unless aborted. * Call only on root phaser. * * @param phase current phase * @param node if non-null, the wait node to track interrupt and timeout; * if null, denotes noninterruptible wait * @return current phase */ private int internalAwaitAdvance(int phase, QNode node) { // assert root == this; releaseWaiters(phase-1); // ensure old queue clean boolean queued = false; // true when node is enqueued int lastUnarrived = 0; // to increase spins upon change int spins = SPINS_PER_ARRIVAL; long s; int p; while ((p = (int)((s = state) >>> PHASE_SHIFT)) == phase) { if (node == null) { // spinning in noninterruptible mode int unarrived = (int)s & UNARRIVED_MASK; if (unarrived != lastUnarrived && (lastUnarrived = unarrived) < NCPU) spins += SPINS_PER_ARRIVAL; boolean interrupted = Thread.interrupted(); if (interrupted || --spins < 0) { // need node to record intr node = new QNode(this, phase, false, false, 0L); node.wasInterrupted = interrupted; } } else if (node.isReleasable()) // done or aborted break; else if (!queued) { // push onto queue AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ; QNode q = node.next = head.get(); if ((q == null || q.phase == phase) && (int)(state >>> PHASE_SHIFT) == phase) // avoid stale enq queued = head.compareAndSet(q, node); } else { try { ForkJoinPool.managedBlock(node); } catch (InterruptedException ie) { node.wasInterrupted = true; } } } if (node != null) { if (node.thread != null) node.thread = null; // avoid need for unpark() if (node.wasInterrupted && !node.interruptible) Thread.currentThread().interrupt(); if (p == phase && (p = (int)(state >>> PHASE_SHIFT)) == phase) return abortWait(phase); // possibly clean up on abort } releaseWaiters(phase); return p; } /** * Wait nodes for Treiber stack representing wait queue */ static final class QNode implements ForkJoinPool.ManagedBlocker { final Phaser phaser; final int phase; final boolean interruptible; final boolean timed; boolean wasInterrupted; long nanos; long lastTime; volatile Thread thread; // nulled to cancel wait QNode next; QNode(Phaser phaser, int phase, boolean interruptible, boolean timed, long nanos) { this.phaser = phaser; this.phase = phase; this.interruptible = interruptible; this.nanos = nanos; this.timed = timed; this.lastTime = timed ? System.nanoTime() : 0L; thread = Thread.currentThread(); } public boolean isReleasable() { if (thread == null) return true; if (phaser.getPhase() != phase) { thread = null; return true; } if (Thread.interrupted()) wasInterrupted = true; if (wasInterrupted && interruptible) { thread = null; return true; } if (timed) { if (nanos > 0L) { long now = System.nanoTime(); nanos -= now - lastTime; lastTime = now; } if (nanos <= 0L) { thread = null; return true; } } return false; } public boolean block() { if (isReleasable()) return true; else if (!timed) LockSupport.park(this); else if (nanos > 0) LockSupport.parkNanos(this, nanos); return isReleasable(); } } // Unsafe mechanics private static final sun.misc.Unsafe UNSAFE; private static final long stateOffset; static { try { UNSAFE = getUnsafe(); Class<?> k = Phaser.class; stateOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("state")); } catch (Exception e) { throw new Error(e); } } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ private static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException se) { try { return java.security.AccessController.doPrivileged (new java.security .PrivilegedExceptionAction<sun.misc.Unsafe>() { public sun.misc.Unsafe run() throws Exception { java.lang.reflect.Field f = sun.misc .Unsafe.class.getDeclaredField("theUnsafe"); f.setAccessible(true); return (sun.misc.Unsafe) f.get(null); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/AbstractH2OExtension.java
package water; import water.init.AbstractBuildVersion; import water.util.Log; public abstract class AbstractH2OExtension { /** * @return The name of this extension. */ public abstract String getExtensionName(); /** * Any up-front initialization that needs to happen before H2O is started. * This is called in {@code H2OApp} before {@code H2O.main()} is called. */ public void init() {} /** * Called during the start up process of {@code H2OApp}, after the local * network connections are opened. */ public void onLocalNodeStarted() {} /** * Print stuff (into System.out) for {@code java -jar h2o.jar -help} */ public void printHelp() {} /** * To be called by parseArguments() on a failure. * @param message Message to give to the user. */ public static void parseFailed(String message) { H2O.parseFailed(message); } /** * Parse arguments used by this extension. * Call parseFailed() above on a failure, which will exit H2O. * * @param args List of arguments this extension might want to consume. * @return Modified list with the ones consumed by this extension removed. */ public String[] parseArguments(String[] args) { return args; } /** * Validate arguments used by this extension. */ public void validateArguments() {} /** * Get extension-specific build information. * * @return build information. */ public AbstractBuildVersion getBuildVersion() { return AbstractBuildVersion.UNKNOWN_VERSION; } /** * Print a short message when the extension finishes initializing. */ public void printInitialized() { Log.info(getExtensionName() + " extension initialized"); } /** Is this extension enabled? */ public boolean isEnabled() { return true; } @Override public final String toString() { return getExtensionName(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/AbstractKey.java
package water; /** * Created by tomas on 4/25/16. */ public class AbstractKey { }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/AnyThrow.java
package water; /** * Helps you avoid wrapping an exception into RTE. Just throws whatever you like. */ public class AnyThrow { public static void throwUnchecked(Throwable e) { AnyThrow.<RuntimeException>throwAny(e); } @SuppressWarnings("unchecked") private static <E extends Throwable> void throwAny(Throwable e) throws E { throw (E)e; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Atomic.java
package water; /** * Atomic update of a Key * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ abstract public class Atomic<T extends Atomic> extends DTask<T> { protected Key _key; // Transaction key public Atomic(){ super(H2O.ATOMIC_PRIORITY); } public Atomic(H2O.H2OCountedCompleter completer){super(completer,H2O.ATOMIC_PRIORITY);} // User's function to be run atomically. The Key's Value is fetched from the // home STORE and passed in. The returned Value is atomically installed as // the new Value (and the function is retried until it runs atomically). The // original Value is supposed to be read-only. If the original Key misses // (no Value), one is created with 0 length and wrong Value._type to allow // the Key to passed in (as part of the Value) abstract protected Value atomic( Value val ); /** Executed on the transaction key's <em>home</em> node after any successful * atomic update. Override this if you need to perform some action after * the update succeeds (eg cleanup). */ protected void onSuccess( Value old ){} /** Block until it completes, even if run remotely */ public final Atomic<T> invoke( Key key ) { RPC<Atomic<T>> rpc = fork(key); return (rpc == null ? this : rpc.get()); // Block for it } // Fork off public final RPC<Atomic<T>> fork(Key key) { _key = key; if( key.home() ) { // Key is home? compute2(); // Also, run it blocking/now return null; } else { // Else run it remotely return RPC.call(key.home_node(),this); } } // The (remote) workhorse: @Override public final void compute2() { assert _key.home() : "Atomic on wrong node; SELF="+H2O.SELF+ ", key_home="+_key.home_node()+", key_is_home="+_key.home()+", class="+getClass(); Futures fs = new Futures(); // Must block on all invalidates eventually Value val1 = DKV.get(_key); while( true ) { // Run users' function. This is supposed to read-only from val1 and // return new val2 to atomically install. Value val2 = atomic(val1); if( val2 == null ) { // ABORT: they gave up // Strongly order XTNs on same key, EVEN if aborting. Generally abort // means some interesting condition is already met, but perhaps met by // the exactly proceeding XTN whose invalidates are still roaming about // the system. If we do not block, the Atomic.invoke might complete // before the invalidates, and the invoker might then do a DKV.get() // and get his original value - instead of inval & fetching afresh. if (val1 != null) val1.blockTillNoReaders(); // Prior XTN that made val1 may not yet have settled out; block for it break; } assert val1 != val2; // No returning the same Value // Attempt atomic update Value res = DKV.DputIfMatch(_key,val2,val1,fs); if( res == val1 ) { // Success? fs.blockForPending(); // Block for any pending invalidates on the atomic update onSuccess(val1); // Call user's post-XTN function break; } val1 = res; // Otherwise try again with the current value } // and retry _key = null; // No need for key no more, don't send it back tryComplete(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/AutoBuffer.java
package water; import java.io.*; import java.lang.reflect.Array; import java.net.*; import java.nio.*; import java.nio.channels.*; import java.util.ArrayList; import java.util.Random; import water.network.SocketChannelUtils; import water.util.Log; import water.util.StringUtils; import water.util.TwoDimTable; import static water.H2O.OptArgs.SYSTEM_PROP_PREFIX; /** A ByteBuffer backed mixed Input/Output streaming class, using Iced serialization. * * Reads/writes empty/fill the ByteBuffer as needed. When it is empty/full it * we go to the ByteChannel for more/less. Because DirectByteBuffers are * expensive to make, we keep a few pooled. * * When talking to a remote H2O node, switches between UDP and TCP transport * protocols depending on the message size. The TypeMap is not included, and * is assumed to exist on the remote H2O node. * * Supports direct NIO FileChannel read/write to disk, used during user-mode * swapping. The TypeMap is not included on write, and is assumed to be the * current map on read. * * Support read/write from byte[] - and this defeats the purpose of a * Streaming protocol, but is frequently handy for small structures. The * TypeMap is not included, and is assumed to be the current map on read. * * Supports read/write from a standard Stream, which by default assumes it is * NOT going in and out of the same Cloud, so the TypeMap IS included. The * serialized object can only be read back into the same minor version of H2O. * * @author <a href="mailto:cliffc@h2o.ai"></a> */ public final class AutoBuffer implements AutoCloseable { // Maximum size of an array we allow to allocate (the value is designed // to mimic the behavior of OpenJDK libraries) private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private static String H2O_SYSTEM_SERIALIZATION_IGNORE_VERSION = SYSTEM_PROP_PREFIX + "serialization.ignore.version"; // The direct ByteBuffer for schlorping data about. // Set to null to indicate the AutoBuffer is closed. ByteBuffer _bb; public String sourceName = "???"; public boolean isClosed() { return _bb == null ; } // The ByteChannel for moving data in or out. Could be a SocketChannel (for // a TCP connection) or a FileChannel (spill-to-disk) or a DatagramChannel // (for a UDP connection). Null on closed AutoBuffers. Null on initial // remote-writing AutoBuffers which are still deciding UDP vs TCP. Not-null // for open AutoBuffers doing file i/o or reading any TCP/UDP or having // written at least one buffer to TCP/UDP. private Channel _chan; // A Stream for moving data in. Null unless this AutoBuffer is // stream-based, in which case _chan field is null. This path supports // persistance: reading and writing objects from different H2O cluster // instances (but exactly the same H2O revision). The only required // similarity is same-classes-same-fields; changes here will probably // silently crash. If the fields are named the same but the semantics // differ, then again the behavior is probably silent crash. private InputStream _is; private short[] _typeMap; // Mapping from input stream map to current map, or null // If we need a SocketChannel, raise the priority so we get the I/O over // with. Do not want to have some TCP socket open, blocking the TCP channel // and then have the thread stalled out. If we raise the priority - be sure // to lower it again. Note this is for TCP channels ONLY, and only because // we are blocking another Node with I/O. private int _oldPrior = -1; // Where to send or receive data via TCP or UDP (choice made as we discover // how big the message is); used to lazily create a Channel. If NULL, then // _chan should be a pre-existing Channel, such as a FileChannel. final H2ONode _h2o; // TRUE for read-mode. FALSE for write-mode. Can be flipped for rapid turnaround. private boolean _read; // TRUE if this AutoBuffer has never advanced past the first "page" of data. // The UDP-flavor, port# and task fields are only valid until we read over // them when flipping the ByteBuffer to the next chunk of data. Used in // asserts all over the place. private boolean _firstPage; // Total size written out from 'new' to 'close'. Only updated when actually // reading or writing data, or after close(). For profiling only. int _size; //int _zeros, _arys; // More profiling: start->close msec, plus nano's spent in blocking I/O // calls. The difference between (close-start) and i/o msec is the time the // i/o thread spends doing other stuff (e.g. allocating Java objects or // (de)serializing). long _time_start_ms, _time_close_ms, _time_io_ns; // I/O persistence flavor: Value.ICE, NFS, HDFS, S3, TCP. Used to record I/O time. final byte _persist; // The assumed max UDP packetsize static final int MTU = 1500-8/*UDP packet header size*/; // Enable this to test random TCP fails on open or write static final Random RANDOM_TCP_DROP = null; //new Random(); static final java.nio.charset.Charset UTF_8 = java.nio.charset.Charset.forName("UTF-8"); /** Incoming TCP request. Make a read-mode AutoBuffer from the open Channel, * in this case without additional arguments, the requests comes from outside the H2O cluster * * */ public AutoBuffer(ByteChannel sock ) { this(sock, null, (short) 0); } /** Incoming TCP request. Make a read-mode AutoBuffer from the open Channel, * figure the originating H2ONode from the first few bytes read. * * remoteAddress set to null means that the communication is originating from non-h2o node, non-null value * represents the case where the communication is coming from h2o node. * */ public AutoBuffer( ByteChannel sock, InetAddress remoteAddress, short timestamp ) { _chan = sock; raisePriority(); // Make TCP priority high _bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer _bb.flip(); _read = true; // Reading by default _firstPage = true; // Read Inet from socket, port from the stream, figure out H2ONode if(remoteAddress!=null) { assert timestamp != 0; _h2o = H2ONode.intern(remoteAddress, getPort(), timestamp); }else{ // In case the communication originates from non-h2o node, we set _h2o node to null. // It is done for 2 reasons: // - H2ONode.intern creates a new thread and if there's a lot of connections // from non-h2o environment, it could end up with too many open files exception. // - H2OIntern also reads port (getPort()) and additional information which we do not send // in communication originating from non-h2o nodes _h2o = null; } _firstPage = true; // Yes, must reset this. _time_start_ms = System.currentTimeMillis(); _persist = Value.TCP; } /** Make an AutoBuffer to write to an H2ONode. Requests for full buffer will * open a TCP socket and roll through writing to the target. Smaller * requests will send via UDP. Small requests get ordered by priority, so * that e.g. NACK and ACKACK messages have priority over most anything else. * This helps in UDP floods to shut down flooding senders. */ private byte _msg_priority; AutoBuffer( H2ONode h2o, byte priority ) { // If UDP goes via TCP, we write into a HBB up front, because this will be copied again // into a large outgoing buffer. _bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder()); _chan = null; // Channel made lazily only if we write alot _h2o = h2o; _read = false; // Writing by default _firstPage = true; // Filling first page assert _h2o != null; _time_start_ms = System.currentTimeMillis(); _persist = Value.TCP; _msg_priority = priority; } /** Spill-to/from-disk request. */ public AutoBuffer( FileChannel fc, boolean read, byte persist ) { _bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer _chan = fc; // Write to read/write _h2o = null; // File Channels never have an _h2o _read = read; // Mostly assert reading vs writing if( read ) _bb.flip(); _time_start_ms = System.currentTimeMillis(); _persist = persist; // One of Value.ICE, NFS, S3, HDFS } /** Read from UDP multicast. Same as the byte[]-read variant, except there is an H2O. */ AutoBuffer( DatagramPacket pack ) { _size = pack.getLength(); _bb = ByteBuffer.wrap(pack.getData(), 0, pack.getLength()).order(ByteOrder.nativeOrder()); _bb.position(0); _read = true; _firstPage = true; _chan = null; _h2o = H2ONode.intern(pack.getAddress(), getPort(), getTimestamp()); _persist = 0; // No persistance } /** Read from a UDP_TCP buffer; could be in the middle of a large buffer */ AutoBuffer( H2ONode h2o, byte[] buf, int off, int len ) { assert buf != null : "null fed to ByteBuffer.wrap"; _h2o = h2o; _bb = ByteBuffer.wrap(buf,off,len).order(ByteOrder.nativeOrder()); _chan = null; _read = true; _firstPage = true; _persist = 0; // No persistance _size = len; } /** Read from a fixed byte[]; should not be closed. */ public AutoBuffer( byte[] buf ) { this(null,buf,0, buf.length); } /** Write to an ever-expanding byte[]. Instead of calling {@link #close()}, * call {@link #buf()} to retrieve the final byte[]. */ public AutoBuffer( ) { _bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder()); _chan = null; _h2o = null; _read = false; _firstPage = true; _persist = 0; // No persistance } /** Write to a known sized byte[]. Instead of calling close(), call * {@link #bufClose()} to retrieve the final byte[]. */ public AutoBuffer( int len ) { _bb = ByteBuffer.wrap(MemoryManager.malloc1(len)).order(ByteOrder.nativeOrder()); _chan = null; _h2o = null; _read = false; _firstPage = true; _persist = 0; // No persistance } /** Write to a persistent Stream, including all TypeMap info to allow later * reloading (by the same exact rev of H2O). */ public AutoBuffer( OutputStream os, boolean persist ) { _bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder()); _read = false; _chan = Channels.newChannel(os); _h2o = null; _firstPage = true; _persist = 0; if( persist ) { String[] typeMap = (H2O.CLOUD.leader() == H2O.SELF) ? TypeMap.CLAZZES : FetchClazzes.fetchClazzes(); put1(0x1C).put1(0xED).putStr(H2O.ABV.projectVersion()).putAStr(typeMap); } else put1(0); } /** Read from a persistent Stream (including all TypeMap info) into same * exact rev of H2O). */ public AutoBuffer( InputStream is ) { this(is, null); } public AutoBuffer(InputStream is, String[] typeMap) { _chan = null; _h2o = null; _firstPage = true; _persist = 0; _read = true; _bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder()); _bb.flip(); _is = is; int b = get1U(); if (typeMap == null) { if (b == 0) return; // No persistence info int magic = get1U(); if (b != 0x1C || magic != 0xED) throw new IllegalArgumentException("Missing magic number 0x1CED at stream start"); checkVersion(getStr()); typeMap = getAStr(); assert typeMap != null; } else { if (b != 0) throw new IllegalStateException("Corrupted communication stream: zero byte expected at the beginning."); } _typeMap = new short[typeMap.length]; for (int i = 0; i < _typeMap.length; i++) _typeMap[i] = (short) (typeMap[i] == null ? 0 : TypeMap.onIce(typeMap[i])); } private void checkVersion(String version) { final boolean ignoreVersion = Boolean.getBoolean(H2O_SYSTEM_SERIALIZATION_IGNORE_VERSION); if (! version.equals(H2O.ABV.projectVersion())) { final String msg = H2O.technote(14,"Found version "+version+", but running version "+H2O.ABV.projectVersion() ); if (ignoreVersion) Log.warn("Loading data from a different version! " + msg); else throw new IllegalArgumentException(msg); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("[AB ").append(_read ? "read " : "write "); sb.append(_firstPage?"first ":"2nd ").append(_h2o); sb.append(" ").append(Value.nameOfPersist(_persist)); if( _bb != null ) sb.append(" 0 <= ").append(_bb.position()).append(" <= ").append(_bb.limit()); if( _bb != null ) sb.append(" <= ").append(_bb.capacity()); return sb.append("]").toString(); } // Fetch a DBB from an object pool... they are fairly expensive to make // because a native call is required to get the backing memory. I've // included BB count tracking code to help track leaks. As of 12/17/2012 the // leaks are under control, but figure this may happen again so keeping these // counters around. // // We use 2 pool sizes: lots of small UDP packet-sized buffers and fewer // larger TCP-sized buffers. private static final boolean DEBUG = Boolean.getBoolean("h2o.find-ByteBuffer-leaks"); private static long HWM=0; static class BBPool { long _made, _cached, _freed; long _numer, _denom, _goal=4*H2O.NUMCPUS, _lastGoal; final ArrayList<ByteBuffer> _bbs = new ArrayList<>(); final int _size; // Big or small size of ByteBuffers BBPool( int sz) { _size=sz; } private ByteBuffer stats( ByteBuffer bb ) { if( !DEBUG ) return bb; if( ((_made+_cached)&255)!=255 ) return bb; // Filter printing to 1 in 256 long now = System.currentTimeMillis(); if( now < HWM ) return bb; HWM = now+1000; water.util.SB sb = new water.util.SB(); sb.p("BB").p(this==BBP_BIG?1:0).p(" made=").p(_made).p(" -freed=").p(_freed).p(", cache hit=").p(_cached).p(" ratio=").p(_numer/_denom).p(", goal=").p(_goal).p(" cache size=").p(_bbs.size()).nl(); for( int i=0; i<H2O.MAX_PRIORITY; i++ ) { int x = H2O.getWrkQueueSize(i); if( x > 0 ) sb.p('Q').p(i).p('=').p(x).p(' '); } Log.warn(sb.nl().toString()); return bb; } ByteBuffer make() { while( true ) { // Repeat loop for DBB OutOfMemory errors ByteBuffer bb=null; synchronized(_bbs) { int sz = _bbs.size(); if( sz > 0 ) { bb = _bbs.remove(sz-1); _cached++; _numer++; } } if( bb != null ) return stats(bb); // Cache empty; go get one from C/Native memory try { bb = ByteBuffer.allocateDirect(_size).order(ByteOrder.nativeOrder()); synchronized(this) { _made++; _denom++; _goal = Math.max(_goal,_made-_freed); _lastGoal=System.nanoTime(); } // Goal was too low, raise it return stats(bb); } catch( OutOfMemoryError oome ) { // java.lang.OutOfMemoryError: Direct buffer memory if( !"Direct buffer memory".equals(oome.getMessage()) ) throw oome; System.out.println("OOM DBB - Sleeping & retrying"); try { Thread.sleep(100); } catch( InterruptedException ignore ) { } } } } void free(ByteBuffer bb) { // Heuristic: keep the ratio of BB's made to cache-hits at a fixed level. // Free to GC if ratio is high, free to internal cache if low. long ratio = _numer/(_denom+1); synchronized(_bbs) { if( ratio < 100 || _bbs.size() < _goal ) { // low hit/miss ratio or below goal bb.clear(); // Clear-before-add _bbs.add(bb); } else _freed++; // Toss the extras (above goal & ratio) long now = System.nanoTime(); if( now-_lastGoal > 1000000000L ) { // Once/sec, drop goal by 10% _lastGoal = now; if( ratio > 110 ) // If ratio is really high, lower goal _goal=Math.max(4*H2O.NUMCPUS,(long)(_goal*0.99)); // Once/sec, lower numer/denom... means more recent activity outweighs really old stuff long denom = (long) (0.99 * _denom); // Proposed reduction if( denom > 10 ) { // Keep a little precision _numer = (long) (0.99 * _numer); // Keep ratio between made & cached the same _denom = denom; // ... by lowering both by 10% } } } } static int FREE( ByteBuffer bb ) { if(bb.isDirect()) (bb.capacity()==BBP_BIG._size ? BBP_BIG : BBP_SML).free(bb); return 0; // Flow coding } } static BBPool BBP_SML = new BBPool( 2*1024); // Bytebuffer "common small size", for UDP static BBPool BBP_BIG = new BBPool(64*1024); // Bytebuffer "common big size", for TCP public static int TCP_BUF_SIZ = BBP_BIG._size; private int bbFree() { if(_bb != null && _bb.isDirect()) BBPool.FREE(_bb); _bb = null; return 0; // Flow-coding } // You thought TCP was a reliable protocol, right? WRONG! Fails 100% of the // time under heavy network load. Connection-reset-by-peer & connection // timeouts abound, even after a socket open and after a 1st successful // ByteBuffer write. It *appears* that the reader is unaware that a writer // was told "go ahead and write" by the TCP stack, so all these fails are // only on the writer-side. public static class AutoBufferException extends RuntimeException { public final IOException _ioe; AutoBufferException( IOException ioe ) { super(ioe); _ioe = ioe; } } // For reads, just assert all was read and close and release resources. // (release ByteBuffer back to the common pool). For writes, force any final // bytes out. If the write is to an H2ONode and is short, send via UDP. // AutoBuffer close calls order; i.e. a reader close() will block until the // writer does a close(). @Override public final void close() { //if( _size > 2048 ) System.out.println("Z="+_zeros+" / "+_size+", A="+_arys); if( isClosed() ) return; // Already closed assert _h2o != null || _chan != null || _is != null; // Byte-array backed should not be closed try { if( _chan == null ) { // No channel? if( _read ) { if( _is != null ) _is.close(); return; } else { // Write // For small-packet write, send via UDP. Since nothing is sent until // now, this close() call trivially orders - since the reader will not // even start (much less close()) until this packet is sent. if( _bb.position() < MTU) { udpSend(); return; } // oops - Big Write, switch to TCP and finish out there } } // Force AutoBuffer 'close' calls to order; i.e. block readers until // writers do a 'close' - by writing 1 more byte in the close-call which // the reader will have to wait for. if( hasTCP()) { // TCP connection? try { if( _read ) { // Reader? int x = get1U(); // Read 1 more byte assert x == 0xab : "AB.close instead of 0xab sentinel got "+x+", "+this; assert _chan != null; // chan set by incoming reader, since we KNOW it is a TCP // Write the reader-handshake-byte. SocketChannelUtils.underlyingSocketChannel(_chan).socket().getOutputStream().write(0xcd); // do not close actually reader socket; recycle it in TCPReader thread } else { // Writer? put1(0xab); // Write one-more byte ; might set _chan from null to not-null sendPartial(); // Finish partial writes; might set _chan from null to not-null assert _chan != null; // _chan is set not-null now! // Read the writer-handshake-byte. int x = SocketChannelUtils.underlyingSocketChannel(_chan).socket().getInputStream().read(); // either TCP con was dropped or other side closed connection without reading/confirming (e.g. task was cancelled). if( x == -1 ) throw new IOException("Other side closed connection before handshake byte read"); assert x == 0xcd : "Handshake; writer expected a 0xcd from reader but got "+x; } } catch( IOException ioe ) { try { _chan.close(); } catch( IOException ignore ) {} // Silently close _chan = null; // No channel now, since i/o error throw ioe; // Rethrow after close } finally { if( !_read ) _h2o.freeTCPSocket((ByteChannel) _chan); // Recycle writable TCP channel restorePriority(); // And if we raised priority, lower it back } } else { // FileChannel if( !_read ) sendPartial(); // Finish partial file-system writes _chan.close(); _chan = null; // Closed file channel } } catch( IOException e ) { // Dunno how to handle so crash-n-burn throw new AutoBufferException(e); } finally { bbFree(); _time_close_ms = System.currentTimeMillis(); // TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections assert isClosed(); } } // Need a sock for a big read or write operation. // See if we got one already, else open a new socket. private void tcpOpen() throws IOException { assert _firstPage && _bb.limit() >= 1+2+2+4; // At least something written assert _chan == null; // assert _bb.position()==0; _chan = _h2o.getTCPSocket(); raisePriority(); } // Just close the channel here without reading anything. Without the task // object at hand we do not know what (how many bytes) should we read from // the channel. And since the other side will try to read confirmation from // us before closing the channel, we can not read till the end. So we just // close the channel and let the other side to deal with it and figure out // the task has been cancelled (still sending ack ack back). void drainClose() { if( isClosed() ) return; // Already closed final Channel chan = _chan; // Read before closing assert _h2o != null || chan != null; // Byte-array backed should not be closed if( chan != null ) { // Channel assumed sick from prior IOException try { chan.close(); } catch( IOException ignore ) {} // Silently close _chan = null; // No channel now! if( !_read && SocketChannelUtils.isSocketChannel(chan)) _h2o.freeTCPSocket((ByteChannel) chan); // Recycle writable TCP channel } restorePriority(); // And if we raised priority, lower it back bbFree(); _time_close_ms = System.currentTimeMillis(); // TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections assert isClosed(); } // True if we opened a TCP channel, or will open one to close-and-send boolean hasTCP() { assert !isClosed(); return SocketChannelUtils.isSocketChannel(_chan) || (_h2o!=null && _bb.position() >= MTU); } // Size in bytes sent, after a close() int size() { return _size; } //int zeros() { return _zeros; } public int position () { return _bb.position(); } public AutoBuffer position(int p) {_bb.position(p); return this;} /** Skip over some bytes in the byte buffer. Caller is responsible for not * reading off end of the bytebuffer; generally this is easy for * array-backed autobuffers and difficult for i/o-backed bytebuffers. */ public void skip(int skip) { _bb.position(_bb.position()+skip); } // Return byte[] from a writable AutoBuffer public final byte[] buf() { assert _h2o==null && _chan==null && !_read && !_bb.isDirect(); return MemoryManager.arrayCopyOfRange(_bb.array(), _bb.arrayOffset(), _bb.position()); } public final byte[] bufClose() { byte[] res = _bb.array(); bbFree(); return res; } // For TCP sockets ONLY, raise the thread priority. We assume we are // blocking other Nodes with our network I/O, so try to get the I/O // over with. private void raisePriority() { if(_oldPrior == -1){ assert SocketChannelUtils.isSocketChannel(_chan); _oldPrior = Thread.currentThread().getPriority(); Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1); } } private void restorePriority() { if( _oldPrior == -1 ) return; Thread.currentThread().setPriority(_oldPrior); _oldPrior = -1; } // Send via UDP socket. Unlike eg TCP sockets, we only need one for sending // so we keep a global one. Also, we do not close it when done, and we do // not connect it up-front to a target - but send the entire packet right now. private int udpSend() throws IOException { assert _chan == null; TimeLine.record_send(this,false); _size = _bb.position(); assert _size < AutoBuffer.BBP_SML._size; _bb.flip(); // Flip for sending if( _h2o==H2O.SELF ) { // SELF-send is the multi-cast signal water.init.NetworkInit.multicast(_bb, _msg_priority); } else { // Else single-cast send // Send via bulk TCP _h2o.sendMessage(_bb, _msg_priority); } return 0; // Flow-coding } // Flip to write-mode AutoBuffer clearForWriting(byte priority) { assert _read; _read = false; _msg_priority = priority; _bb.clear(); _firstPage = true; return this; } // Flip to read-mode public AutoBuffer flipForReading() { assert !_read; _read = true; _bb.flip(); _firstPage = true; return this; } /** Ensure the buffer has space for sz more bytes */ private ByteBuffer getSp( int sz ) { return sz > _bb.remaining() ? getImpl(sz) : _bb; } /** Ensure buffer has at least sz bytes in it. * - Also, set position just past this limit for future reading. */ private ByteBuffer getSz(int sz) { assert _firstPage : "getSz() is only valid for early UDP bytes"; if( sz > _bb.limit() ) getImpl(sz); _bb.position(sz); return _bb; } private ByteBuffer getImpl( int sz ) { assert _read : "Reading from a buffer in write mode"; _bb.compact(); // Move remaining unread bytes to start of buffer; prep for reading // Its got to fit or we asked for too much assert _bb.position()+sz <= _bb.capacity() : "("+_bb.position()+"+"+sz+" <= "+_bb.capacity()+")"; long ns = System.nanoTime(); while( _bb.position() < sz ) { // Read until we got enuf try { int res = readAnInt(); // Read more // Readers are supposed to be strongly typed and read the exact expected bytes. // However, if a TCP connection fails mid-read we'll get a short-read. // This is indistinguishable from a mis-alignment between the writer and reader! if( res <= 0 ) throw new AutoBufferException(new EOFException("Reading "+sz+" bytes, AB="+this)); if( _is != null ) _bb.position(_bb.position()+res); // Advance BB for Streams manually _size += res; // What we read } catch( IOException e ) { // Dunno how to handle so crash-n-burn // Linux/Ubuntu message for a reset-channel if( e.getMessage().equals("An existing connection was forcibly closed by the remote host") ) throw new AutoBufferException(e); // Windows message for a reset-channel if( e.getMessage().equals("An established connection was aborted by the software in your host machine") ) throw new AutoBufferException(e); throw Log.throwErr(e); } } _time_io_ns += (System.nanoTime()-ns); _bb.flip(); // Prep for handing out bytes //for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++; _firstPage = false; // First page of data is gone gone gone return _bb; } private int readAnInt() throws IOException { if (_is == null) return ((ReadableByteChannel) _chan).read(_bb); final byte[] array = _bb.array(); final int position = _bb.position(); final int remaining = _bb.remaining(); try { return _is.read(array, position, remaining); } catch (IOException ioe) { throw new IOException("Failed reading " + remaining + " bytes into buffer[" + array.length + "] at " + position + " from " + sourceName + " " + _is, ioe); } } /** Put as needed to keep from overflowing the ByteBuffer. */ private ByteBuffer putSp( int sz ) { assert !_read; if (sz > _bb.remaining()) { if ((_h2o == null && _chan == null) || (_bb.hasArray() && _bb.capacity() < BBP_BIG._size)) expandByteBuffer(sz); else sendPartial(); assert sz <= _bb.remaining(); } return _bb; } // Do something with partial results, because the ByteBuffer is full. // If we are doing I/O, ship the bytes we have now and flip the ByteBuffer. private ByteBuffer sendPartial() { // Doing I/O with the full ByteBuffer - ship partial results _size += _bb.position(); if( _chan == null ) TimeLine.record_send(this, true); _bb.flip(); // Prep for writing. try { if( _chan == null ) tcpOpen(); // This is a big operation. Open a TCP socket as-needed. //for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++; long ns = System.nanoTime(); while( _bb.hasRemaining() ) { ((WritableByteChannel) _chan).write(_bb); if( RANDOM_TCP_DROP != null && SocketChannelUtils.isSocketChannel(_chan) && RANDOM_TCP_DROP.nextInt(100) == 0 ) throw new IOException("Random TCP Write Fail"); } _time_io_ns += (System.nanoTime()-ns); } catch( IOException e ) { // Some kind of TCP fail? // Change to an unchecked exception (so we don't have to annotate every // frick'n put1/put2/put4/read/write call). Retry & recovery happens at // a higher level. AutoBuffers are used for many things including e.g. // disk i/o & UDP writes; this exception only happens on a failed TCP // write - and we don't want to make the other AutoBuffer users have to // declare (and then ignore) this exception. throw new AutoBufferException(e); } _firstPage = false; _bb.clear(); return _bb; } // Called when the byte buffer doesn't have enough room // If buffer is array backed, and the needed room is small, // increase the size of the backing array, // otherwise dump into a large direct buffer private ByteBuffer expandByteBuffer(int sizeHint) { final long needed = (long) sizeHint - _bb.remaining() + _bb.capacity(); // Max needed is 2G if ((_h2o==null && _chan == null) || (_bb.hasArray() && needed < MTU)) { if (needed > MAX_ARRAY_SIZE) { throw new IllegalArgumentException("Cannot allocate more than 2GB array: sizeHint="+sizeHint+", " + "needed="+needed + ", bb.remaining()=" + _bb.remaining() + ", bb.capacity()="+_bb.capacity()); } byte[] ary = _bb.array(); // just get twice what is currently needed but not more then max array size (2G) // Be careful not to overflow because of integer math! int newLen = (int) Math.min(1L << (water.util.MathUtils.log2(needed)+1), MAX_ARRAY_SIZE); int oldpos = _bb.position(); _bb = ByteBuffer.wrap(MemoryManager.arrayCopyOfRange(ary,0,newLen),oldpos,newLen-oldpos) .order(ByteOrder.nativeOrder()); } else if (_bb.capacity() != BBP_BIG._size) { //avoid expanding existing BBP items int oldPos = _bb.position(); _bb.flip(); _bb = BBP_BIG.make().put(_bb); _bb.position(oldPos); } return _bb; } @SuppressWarnings("unused") public String getStr(int off, int len) { return new String(_bb.array(), _bb.arrayOffset()+off, len, UTF_8); } // ----------------------------------------------- // Utility functions to get various Java primitives @SuppressWarnings("unused") public boolean getZ() { return get1()!=0; } @SuppressWarnings("unused") public byte get1 () { return getSp(1).get (); } @SuppressWarnings("unused") public int get1U() { return get1() & 0xFF; } @SuppressWarnings("unused") public char get2 () { return getSp(2).getChar (); } @SuppressWarnings("unused") public short get2s () { return getSp(2).getShort (); } @SuppressWarnings("unused") public int get3 () { getSp(3); return get1U() | get1U() << 8 | get1U() << 16; } @SuppressWarnings("unused") public int get4 () { return getSp(4).getInt (); } @SuppressWarnings("unused") public float get4f() { return getSp(4).getFloat (); } @SuppressWarnings("unused") public long get8 () { return getSp(8).getLong (); } @SuppressWarnings("unused") public double get8d() { return getSp(8).getDouble(); } int get1U(int off) { return _bb.get (off)&0xFF; } int get4 (int off) { return _bb.getInt (off); } long get8 (int off) { return _bb.getLong(off); } @SuppressWarnings("unused") public AutoBuffer putZ (boolean b){ return put1(b?1:0); } @SuppressWarnings("unused") public AutoBuffer put1 ( int b) { assert b >= -128 && b <= 255 : ""+b+" is not a byte"; putSp(1).put((byte)b); return this; } @SuppressWarnings("unused") public AutoBuffer put2 ( char c) { putSp(2).putChar (c); return this; } @SuppressWarnings("unused") public AutoBuffer put2 ( short s) { putSp(2).putShort (s); return this; } @SuppressWarnings("unused") public AutoBuffer put2s ( short s) { return put2(s); } @SuppressWarnings("unused") public AutoBuffer put3( int x ) { assert (-1<<24) <= x && x < (1<<24); return put1((x)&0xFF).put1((x >> 8)&0xFF).put1(x >> 16); } @SuppressWarnings("unused") public AutoBuffer put4 ( int i) { putSp(4).putInt (i); return this; } @SuppressWarnings("unused") public AutoBuffer put4f( float f) { putSp(4).putFloat (f); return this; } @SuppressWarnings("unused") public AutoBuffer put8 ( long l) { putSp(8).putLong (l); return this; } @SuppressWarnings("unused") public AutoBuffer put8d(double d) { putSp(8).putDouble(d); return this; } public AutoBuffer put(Freezable f) { if( f == null ) return putInt(TypeMap.NULL); assert f.frozenType() > 0 : "No TypeMap for "+f.getClass().getName(); putInt(f.frozenType()); return f.write(this); } public <T extends Freezable> T get() { return getFreezable(null); } public <T extends Freezable> T get(Class<T> tc) { if (tc == null) throw new IllegalArgumentException("Class argument cannot be null"); return getFreezable(tc); } @SuppressWarnings("unchecked") private <T extends Freezable> T getFreezable(Class<T> tc) { int id = getInt(); if( id == TypeMap.NULL ) return null; if( _is!=null ) { id = remapFrozenId(id); } return (T) TypeMap .newFreezable(id, tc) .read(this); } private int remapFrozenId(int id) { assert _typeMap != null; if (id >= _typeMap.length) throw new IllegalStateException("Class with frozenType=" + id + " cannot be deserialized because it is not part of the TypeMap."); return _typeMap[id]; } // Write Key's target IFF the Key is not null; target can be null. public AutoBuffer putKey(Key k) { if( k==null ) return this; // Key is null ==> write nothing Keyed kd = DKV.getGet(k); put(kd); return kd == null ? this : kd.writeAll_impl(this); } public Keyed getKey(Key k, Futures fs) { return k==null ? null : getKey(fs); // Key is null ==> read nothing } public Keyed getKey(Futures fs) { Keyed kd = get(Keyed.class); if( kd == null ) return null; DKV.put(kd,fs); return kd.readAll_impl(this,fs); } // Put a (compressed) integer. Specifically values in the range -1 to ~250 // will take 1 byte, values near a Short will take 1+2 bytes, values near an // Int will take 1+4 bytes, and bigger values 1+8 bytes. This compression is // optimized for small integers (including -1 which is often used as a "array // is null" flag when passing the array length). public AutoBuffer putInt(int x) { if( 0 <= (x+1)&& (x+1) <= 253 ) return put1(x+1); if( Short.MIN_VALUE <= x && x <= Short.MAX_VALUE ) return put1(255).put2((short)x); return put1(254).put4(x); } // Get a (compressed) integer. See above for the compression strategy and reasoning. public int getInt( ) { int x = get1U(); if( x <= 253 ) return x-1; if( x==255 ) return (short)get2(); assert x==254; return get4(); } // Put a zero-compressed array. Compression is: // If null : putInt(-1) // Else // putInt(# of leading nulls) // putInt(# of non-nulls) // If # of non-nulls is > 0, putInt( # of trailing nulls) long putZA( Object[] A ) { if( A==null ) { putInt(-1); return 0; } int x=0; for( ; x<A.length; x++ ) if( A[x ]!=null ) break; int y=A.length; for( ; y>x; y-- ) if( A[y-1]!=null ) break; putInt(x); // Leading zeros to skip putInt(y-x); // Mixed non-zero guts in middle if( y > x ) // If any trailing nulls putInt(A.length-y); // Trailing zeros return ((long)x<<32)|(y-x); // Return both leading zeros, and middle non-zeros } // Get the lengths of a zero-compressed array. // Returns -1 if null. // Returns a long of (leading zeros | middle non-zeros). // If there are non-zeros, caller has to read the trailing zero-length. long getZA( ) { int x=getInt(); // Length of leading zeros if( x == -1 ) return -1; // or a null int nz=getInt(); // Non-zero in the middle return ((long)x<<32)|(long)nz; // Return both ints } // TODO: untested. . . @SuppressWarnings("unused") public AutoBuffer putAEnum(Enum[] enums) { //_arys++; long xy = putZA(enums); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putEnum(enums[i]); return this; } @SuppressWarnings("unused") public <E extends Enum> E[] getAEnum(E[] values) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls E[] ts = (E[]) Array.newInstance(values.getClass().getComponentType(), x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getEnum(values); return ts; } @SuppressWarnings("unused") public AutoBuffer putA(Freezable[] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) put(fs[i]); return this; } public AutoBuffer putAA(Freezable[][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA(fs[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAAA(Freezable[][][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA(fs[i]); return this; } public <T extends Freezable> T[] getA(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[] ts = (T[]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = get(tc); return ts; } public <T extends Freezable> T[][] getAA(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls Class<T[]> tcA = (Class<T[]>) Array.newInstance(tc, 0).getClass(); T[][] ts = (T[][]) Array.newInstance(tcA, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getA(tc); return ts; } @SuppressWarnings("unused") public <T extends Freezable> T[][][] getAAA(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls Class<T[] > tcA = (Class<T[] >) Array.newInstance(tc , 0).getClass(); Class<T[][]> tcAA = (Class<T[][]>) Array.newInstance(tcA, 0).getClass(); T[][][] ts = (T[][][]) Array.newInstance(tcAA, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getAA(tc); return ts; } public AutoBuffer putAStr(String[] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putStr(fs[i]); return this; } public String[] getAStr() { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls String[] ts = new String[x+y+z]; for( int i = x; i < x+y; ++i ) ts[i] = getStr(); return ts; } @SuppressWarnings("unused") public AutoBuffer putAAStr(String[][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAStr(fs[i]); return this; } @SuppressWarnings("unused") public String[][] getAAStr() { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls String[][] ts = new String[x+y+z][]; for( int i = x; i < x+y; ++i ) ts[i] = getAStr(); return ts; } // Read the smaller of _bb.remaining() and len into buf. // Return bytes read, which could be zero. int read( byte[] buf, int off, int len ) { int sz = Math.min(_bb.remaining(),len); _bb.get(buf,off,sz); return sz; } // ----------------------------------------------- // Utility functions to handle common UDP packet tasks. // Get the 1st control byte int getCtrl( ) { return getSz(1).get(0)&0xFF; } // Get node timestamp in next 2 bytes // the timestamp is on purpose where port was previously. // In code, getPort is used to skip bytes before the value and the bytes for port itself. // This ensures getPort will still have the same side-effect except we skip also the timestamp which is desired short getTimestamp() { return getSz(1+2).getShort(1);} // Get the port in next 2 bytes // Same port extraction is done in portPack method in TimelineSnapshot. If 'getPort' method is changed, // there is a big chance that 'portPack' method needs to be changed as well to be consistent. int getPort( ) { return getSz(1+2+2).getChar(1+2); } // Get the task# in the next 4 bytes int getTask( ) { return getSz(1+2+2+4).getInt(1+2+2); } // Get the flag in the next 1 byte int getFlag( ) { return getSz(1+2+2+4+1).get(1+2+2+4); } /** * Write UDP into the ByteBuffer with custom sender's port number * * This method sets the ctrl, port, task. * Ready to write more bytes afterwards * * @param type type of the UDP datagram * @param senderPort port of the sender of the datagram */ AutoBuffer putUdp(UDP.udp type, int senderPort){ return putUdp(type, senderPort, H2O.SELF.getTimestamp()); } AutoBuffer putUdp(UDP.udp type, int senderPort, short timestamp){ assert _bb.position() == 0; putSp(_bb.position()+1+2+2); _bb.put ((byte)type.ordinal()); _bb.putShort(timestamp); _bb.putChar((char)senderPort); return this; } /** * Write UDP into the ByteBuffer with the current node as the sender. * * This method sets the ctrl, port, task. * Ready to write more bytes afterwards * * @param type type of the UDP datagram */ AutoBuffer putUdp(UDP.udp type) { return putUdp(type, H2O.H2O_PORT); } AutoBuffer putTask(UDP.udp type, int tasknum) { return putUdp(type).put4(tasknum); } AutoBuffer putTask(int ctrl, int tasknum) { assert _bb.position() == 0; putSp(_bb.position()+1+2+2+4); _bb.put((byte)ctrl).putShort(H2O.SELF.getTimestamp()).putChar((char)H2O.H2O_PORT).putInt(tasknum); return this; } // ----------------------------------------------- // Utility functions to read & write arrays public boolean[] getAZ() { int len = getInt(); if (len == -1) return null; boolean[] r = new boolean[len]; for (int i=0;i<len;++i) r[i] = getZ(); return r; } public byte[] getA1( ) { //_arys++; int len = getInt(); return len == -1 ? null : getA1(len); } public byte[] getA1( int len ) { byte[] buf = MemoryManager.malloc1(len); int sofar = 0; while( sofar < len ) { int more = Math.min(_bb.remaining(), len - sofar); _bb.get(buf, sofar, more); sofar += more; if( sofar < len ) getSp(Math.min(_bb.capacity(), len-sofar)); } return buf; } public short[] getA2( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; short[] buf = MemoryManager.malloc2(len); int sofar = 0; while( sofar < buf.length ) { ShortBuffer as = _bb.asShortBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*2); if( sofar < len ) getSp(Math.min(_bb.capacity()-1, (len-sofar)*2)); } return buf; } public int[] getA4( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; int[] buf = MemoryManager.malloc4(len); int sofar = 0; while( sofar < buf.length ) { IntBuffer as = _bb.asIntBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*4); if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4)); } return buf; } public float[] getA4f( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; float[] buf = MemoryManager.malloc4f(len); int sofar = 0; while( sofar < buf.length ) { FloatBuffer as = _bb.asFloatBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*4); if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4)); } return buf; } public long[] getA8( ) { //_arys++; // Get the lengths of lead & trailing zero sections, and the non-zero // middle section. int x = getInt(); if( x == -1 ) return null; int y = getInt(); // Non-zero in the middle int z = y==0 ? 0 : getInt();// Trailing zeros long[] buf = MemoryManager.malloc8(x+y+z); switch( get1U() ) { // 1,2,4 or 8 for how the middle section is passed case 1: for( int i=x; i<x+y; i++ ) buf[i] = get1U(); return buf; case 2: for( int i=x; i<x+y; i++ ) buf[i] = (short)get2(); return buf; case 4: for( int i=x; i<x+y; i++ ) buf[i] = get4(); return buf; case 8: break; default: throw H2O.fail(); } int sofar = x; while( sofar < x+y ) { LongBuffer as = _bb.asLongBuffer(); int more = Math.min(as.remaining(), x+y - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*8); if( sofar < x+y ) getSp(Math.min(_bb.capacity()-7, (x+y-sofar)*8)); } return buf; } public double[] getA8d( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; double[] buf = MemoryManager.malloc8d(len); int sofar = 0; while( sofar < len ) { DoubleBuffer as = _bb.asDoubleBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*8); if( sofar < len ) getSp(Math.min(_bb.capacity()-7, (len-sofar)*8)); } return buf; } @SuppressWarnings("unused") public byte[][] getAA1( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls byte[][] ary = new byte[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA1(); return ary; } @SuppressWarnings("unused") public short[][] getAA2( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls short[][] ary = new short[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA2(); return ary; } public int[][] getAA4( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls int[][] ary = new int[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA4(); return ary; } @SuppressWarnings("unused") public float[][] getAA4f( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls float[][] ary = new float[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA4f(); return ary; } public long[][] getAA8( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls long[][] ary = new long[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA8(); return ary; } @SuppressWarnings("unused") public double[][] getAA8d( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls double[][] ary = new double[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA8d(); return ary; } @SuppressWarnings("unused") public int[][][] getAAA4( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls int[][][] ary = new int[x+y+z][][]; for( int i=x; i<x+y; i++ ) ary[i] = getAA4(); return ary; } @SuppressWarnings("unused") public long[][][] getAAA8( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls long[][][] ary = new long[x+y+z][][]; for( int i=x; i<x+y; i++ ) ary[i] = getAA8(); return ary; } public double[][][] getAAA8d( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls double[][][] ary = new double[x+y+z][][]; for( int i=x; i<x+y; i++ ) ary[i] = getAA8d(); return ary; } public String getStr( ) { int len = getInt(); return len == -1 ? null : new String(getA1(len), UTF_8); } public <E extends Enum> E getEnum(E[] values ) { int idx = get1(); return idx == -1 ? null : values[idx]; } public AutoBuffer putAZ( boolean[] ary ) { if( ary == null ) return putInt(-1); putInt(ary.length); for (boolean anAry : ary) putZ(anAry); return this; } public AutoBuffer putA1( byte[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); return putA1(ary,ary.length); } public AutoBuffer putA1( byte[] ary, int length ) { return putA1(ary,0,length); } public AutoBuffer putA1( byte[] ary, int sofar, int length ) { if (length - sofar > _bb.remaining()) expandByteBuffer(length-sofar); while( sofar < length ) { int len = Math.min(length - sofar, _bb.remaining()); _bb.put(ary, sofar, len); sofar += len; if( sofar < length ) sendPartial(); } return this; } AutoBuffer putA2( short[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); if (ary.length*2 > _bb.remaining()) expandByteBuffer(ary.length*2); int sofar = 0; while( sofar < ary.length ) { ShortBuffer sb = _bb.asShortBuffer(); int len = Math.min(ary.length - sofar, sb.remaining()); sb.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + sb.position()*2); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putA4( int[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); // Note: based on Brandon commit this should improve performance during parse (7d950d622ee3037555ecbab0e39404f8f0917652) if (ary.length*4 > _bb.remaining()) { expandByteBuffer(ary.length*4); // Try to expand BB buffer to fit input array } int sofar = 0; while( sofar < ary.length ) { IntBuffer ib = _bb.asIntBuffer(); int len = Math.min(ary.length - sofar, ib.remaining()); ib.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + ib.position()*4); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putA8( long[] ary ) { //_arys++; if( ary == null ) return putInt(-1); // Trim leading & trailing zeros. Pass along the length of leading & // trailing zero sections, and the non-zero section in the middle. int x=0; for( ; x<ary.length; x++ ) if( ary[x ]!=0 ) break; int y=ary.length; for( ; y>x; y-- ) if( ary[y-1]!=0 ) break; int nzlen = y-x; putInt(x); putInt(nzlen); if( nzlen > 0 ) // If any trailing nulls putInt(ary.length-y); // Trailing zeros // Size trim the NZ section: pass as bytes or shorts if possible. long min=Long.MAX_VALUE, max=Long.MIN_VALUE; for( int i=x; i<y; i++ ) { if( ary[i]<min ) min=ary[i]; if( ary[i]>max ) max=ary[i]; } if( 0 <= min && max < 256 ) { // Ship as unsigned bytes put1(1); for( int i=x; i<y; i++ ) put1((int)ary[i]); return this; } if( Short.MIN_VALUE <= min && max < Short.MAX_VALUE ) { // Ship as shorts put1(2); for( int i=x; i<y; i++ ) put2((short)ary[i]); return this; } if( Integer.MIN_VALUE <= min && max < Integer.MAX_VALUE ) { // Ship as ints put1(4); for( int i=x; i<y; i++ ) put4((int)ary[i]); return this; } put1(8); // Ship as full longs int sofar = x; if ((y-sofar)*8 > _bb.remaining()) expandByteBuffer(nzlen*8); while( sofar < y ) { LongBuffer lb = _bb.asLongBuffer(); int len = Math.min(y - sofar, lb.remaining()); lb.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + lb.position() * 8); if( sofar < y ) sendPartial(); } return this; } public AutoBuffer putA4f( float[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); if (ary.length*4 > _bb.remaining()) expandByteBuffer(ary.length*4); int sofar = 0; while( sofar < ary.length ) { FloatBuffer fb = _bb.asFloatBuffer(); int len = Math.min(ary.length - sofar, fb.remaining()); fb.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + fb.position()*4); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putA8d( double[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); if (ary.length*8 > _bb.remaining()) expandByteBuffer(ary.length*8); int sofar = 0; while( sofar < ary.length ) { DoubleBuffer db = _bb.asDoubleBuffer(); int len = Math.min(ary.length - sofar, db.remaining()); db.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + db.position()*8); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putAA1( byte[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA1(ary[i]); return this; } @SuppressWarnings("unused") AutoBuffer putAA2( short[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA2(ary[i]); return this; } public AutoBuffer putAA4( int[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA4(ary[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAA4f( float[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA4f(ary[i]); return this; } public AutoBuffer putAA8( long[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA8(ary[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAA8d( double[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA8d(ary[i]); return this; } public AutoBuffer putAAA4( int[][][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA4(ary[i]); return this; } public AutoBuffer putAAA8( long[][][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA8(ary[i]); return this; } public AutoBuffer putAAA8d( double[][][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA8d(ary[i]); return this; } // Put a String as bytes (not chars!) public AutoBuffer putStr( String s ) { if( s==null ) return putInt(-1); return putA1(StringUtils.bytesOf(s)); } @SuppressWarnings("unused") public AutoBuffer putEnum( Enum x ) { return put1(x==null ? -1 : x.ordinal()); } public static void writeToChannel(AutoBuffer ab, ByteChannel channel) throws IOException { ab.flipForReading(); channel.write(ab._bb); ab.clearForWriting(H2O.MAX_PRIORITY); } /** * Serializes a BootstrapFreezable into a byte array. Because BootstrapFreezables * have known ids - there is no need to also serialize the TypeMap. * @param o a BootstrapFreezable to serialize * @return byte array representing the object */ public static byte[] serializeBootstrapFreezable(BootstrapFreezable<?> o) { ByteArrayOutputStream result; try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); AutoBuffer ab = new AutoBuffer(baos, false)) { ab.put(o); result = baos; } catch (IOException e) { throw Log.throwErr(e); } return result.toByteArray(); } public static BootstrapFreezable<?> deserializeBootstrapFreezable(byte[] bytes) { try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes); AutoBuffer ab = new AutoBuffer(bais, TypeMap.bootstrapClasses())) { return ab.get(); } catch (IOException e) { throw Log.throwErr(e); } } public static byte[] javaSerializeWritePojo(Object o) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream out = null; try { out = new ObjectOutputStream(bos); out.writeObject(o); out.close(); return bos.toByteArray(); } catch (IOException e) { throw Log.throwErr(e); } } public static Object javaSerializeReadPojo(byte [] bytes) { try { final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes)); Object o = ois.readObject(); return o; } catch (IOException e) { String className = nameOfClass(bytes); throw Log.throwErr(new RuntimeException("Failed to deserialize " + className, e)); } catch (ClassNotFoundException e) { throw Log.throwErr(e); } } static String nameOfClass(byte[] bytes) { if (bytes == null) return "(null)"; if (bytes.length < 11) return "(no name)"; int nameSize = Math.min(40, Math.max(3, bytes[7])); return new String(bytes, 8, Math.min(nameSize, bytes.length - 8)); } // ========================================================================== // Java Serializable objects // Note: These are heck-a-lot more expensive than their Freezable equivalents. @SuppressWarnings("unused") public AutoBuffer putSer( Object obj ) { if (obj == null) return putA1(null); return putA1(javaSerializeWritePojo(obj)); } @SuppressWarnings("unused") public AutoBuffer putASer(Object[] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putSer(fs[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAASer(Object[][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putASer(fs[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAAASer(Object[][][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAASer(fs[i]); return this; } @SuppressWarnings("unused") public Object getSer() { byte[] ba = getA1(); return ba == null ? null : javaSerializeReadPojo(ba); } @SuppressWarnings("unused") public <T> T getSer(Class<T> tc) { return (T)getSer(); } @SuppressWarnings("unused") public <T> T[] getASer(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[] ts = (T[]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getSer(tc); return ts; } @SuppressWarnings("unused") public <T> T[][] getAASer(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[][] ts = (T[][]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getASer(tc); return ts; } @SuppressWarnings("unused") public <T> T[][][] getAAASer(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[][][] ts = (T[][][]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getAASer(tc); return ts; } // ========================================================================== // JSON AutoBuffer printers public AutoBuffer putJNULL( ) { return put1('n').put1('u').put1('l').put1('l'); } // Escaped JSON string private AutoBuffer putJStr( String s ) { byte[] b = StringUtils.bytesOf(s); int off=0; for( int i=0; i<b.length; i++ ) { if( b[i] == '\\' || b[i] == '"') { // Double up backslashes, escape quotes putA1(b,off,i); // Everything so far (no backslashes) put1('\\'); // The extra backslash off=i; // Advance the "so far" variable } // Handle remaining special cases in JSON // if( b[i] == '/' ) { putA1(b,off,i); put1('\\'); put1('/'); off=i+1; continue;} if( b[i] == '\b' ) { putA1(b,off,i); put1('\\'); put1('b'); off=i+1; continue;} if( b[i] == '\f' ) { putA1(b,off,i); put1('\\'); put1('f'); off=i+1; continue;} if( b[i] == '\n' ) { putA1(b,off,i); put1('\\'); put1('n'); off=i+1; continue;} if( b[i] == '\r' ) { putA1(b,off,i); put1('\\'); put1('r'); off=i+1; continue;} if( b[i] == '\t' ) { putA1(b,off,i); put1('\\'); put1('t'); off=i+1; continue;} // ASCII Control characters if( b[i] == 127 ) { putA1(b,off,i); put1('\\'); put1('u'); put1('0'); put1('0'); put1('7'); put1('f'); off=i+1; continue;} if( b[i] >= 0 && b[i] < 32 ) { String hexStr = Integer.toHexString(b[i]); putA1(b, off, i); put1('\\'); put1('u'); for (int j = 0; j < 4 - hexStr.length(); j++) put1('0'); for (int j = 0; j < hexStr.length(); j++) put1(hexStr.charAt(hexStr.length()-j-1)); off=i+1; } } return putA1(b,off,b.length); } public AutoBuffer putJSONStrUnquoted ( String s ) { return s==null ? putJNULL() : putJStr(s); } public AutoBuffer putJSONStrUnquoted ( String name, String s ) { return s==null ? putJSONStr(name).put1(':').putJNULL() : putJSONStr(name).put1(':').putJStr(s); } public AutoBuffer putJSONName( String s ) { return put1('"').putJStr(s).put1('"'); } public AutoBuffer putJSONStr ( String s ) { return s==null ? putJNULL() : putJSONName(s); } public AutoBuffer putJSONAStr(String[] ss) { if( ss == null ) return putJNULL(); put1('['); for( int i=0; i<ss.length; i++ ) { if( i>0 ) put1(','); putJSONStr(ss[i]); } return put1(']'); } public AutoBuffer putJSONAAStr( String[][] sss) { if( sss == null ) return putJNULL(); put1('['); for( int i=0; i<sss.length; i++ ) { if( i>0 ) put1(','); putJSONAStr(sss[i]); } return put1(']'); } @SuppressWarnings("unused") public AutoBuffer putJSONStr (String name, String s ) { return putJSONStr(name).put1(':').putJSONStr(s); } @SuppressWarnings("unused") public AutoBuffer putJSONAStr (String name, String[] ss ) { return putJSONStr(name).put1(':').putJSONAStr(ss); } @SuppressWarnings("unused") public AutoBuffer putJSONAAStr(String name, String[][]sss) { return putJSONStr(name).put1(':').putJSONAAStr(sss); } @SuppressWarnings("unused") public AutoBuffer putJSONSer (String name, Object o ) { return putJSONStr(name).put1(':').putJNULL(); } @SuppressWarnings("unused") public AutoBuffer putJSONASer (String name, Object[] oo ) { return putJSONStr(name).put1(':').putJNULL(); } @SuppressWarnings("unused") public AutoBuffer putJSONAASer (String name, Object[][] ooo ) { return putJSONStr(name).put1(':').putJNULL(); } @SuppressWarnings("unused") public AutoBuffer putJSONAAASer(String name, Object[][][] oooo) { return putJSONStr(name).put1(':').putJNULL(); } public AutoBuffer putJSONAZ( String name, boolean[] f) { return putJSONStr(name).put1(':').putJSONAZ(f); } public AutoBuffer putJSON(Freezable ice) { return ice == null ? putJNULL() : ice.writeJSON(this); } public AutoBuffer putJSONA( Freezable fs[] ) { if( fs == null ) return putJNULL(); put1('['); for( int i=0; i<fs.length; i++ ) { if( i>0 ) put1(','); putJSON(fs[i]); } return put1(']'); } public AutoBuffer putJSONAA( Freezable fs[][]) { if( fs == null ) return putJNULL(); put1('['); for( int i=0; i<fs.length; i++ ) { if( i>0 ) put1(','); putJSONA(fs[i]); } return put1(']'); } public AutoBuffer putJSONAAA( Freezable fs[][][]) { if( fs == null ) return putJNULL(); put1('['); for( int i=0; i<fs.length; i++ ) { if( i>0 ) put1(','); putJSONAA(fs[i]); } return put1(']'); } @SuppressWarnings("unused") public AutoBuffer putJSON ( String name, Freezable f ) { return putJSONStr(name).put1(':').putJSON (f); } public AutoBuffer putJSONA ( String name, Freezable f[] ) { return putJSONStr(name).put1(':').putJSONA (f); } @SuppressWarnings("unused") public AutoBuffer putJSONAA( String name, Freezable f[][]){ return putJSONStr(name).put1(':').putJSONAA(f); } @SuppressWarnings("unused") public AutoBuffer putJSONAAA( String name, Freezable f[][][]){ return putJSONStr(name).put1(':').putJSONAAA(f); } @SuppressWarnings("unused") public AutoBuffer putJSONZ( String name, boolean value ) { return putJSONStr(name).put1(':').putJStr("" + value); } private AutoBuffer putJSONAZ(boolean [] b) { if (b == null) return putJNULL(); put1('['); for( int i = 0; i < b.length; ++i) { if (i > 0) put1(','); putJStr(""+b[i]); } return put1(']'); } // Most simple integers private AutoBuffer putJInt( int i ) { byte b[] = StringUtils.toBytes(i); return putA1(b,b.length); } public AutoBuffer putJSON1( byte b ) { return putJInt(b); } public AutoBuffer putJSONA1( byte ary[] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSON1(ary[i]); } return put1(']'); } private AutoBuffer putJSONAA1(byte ary[][]) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONA1(ary[i]); } return put1(']'); } @SuppressWarnings("unused") public AutoBuffer putJSON1 (String name, byte b ) { return putJSONStr(name).put1(':').putJSON1(b); } @SuppressWarnings("unused") public AutoBuffer putJSONA1 (String name, byte b[] ) { return putJSONStr(name).put1(':').putJSONA1(b); } @SuppressWarnings("unused") public AutoBuffer putJSONAA1(String name, byte b[][]) { return putJSONStr(name).put1(':').putJSONAA1(b); } public AutoBuffer putJSONAEnum(String name, Enum[] enums) { return putJSONStr(name).put1(':').putJSONAEnum(enums); } public AutoBuffer putJSONAEnum( Enum[] enums ) { if( enums == null ) return putJNULL(); put1('['); for( int i=0; i<enums.length; i++ ) { if( i>0 ) put1(','); putJSONEnum(enums[i]); } return put1(']'); } AutoBuffer putJSON2( char c ) { return putJSON4(c); } AutoBuffer putJSON2( String name, char c ) { return putJSONStr(name).put1(':').putJSON2(c); } AutoBuffer putJSON2( short c ) { return putJSON4(c); } AutoBuffer putJSON2( String name, short c ) { return putJSONStr(name).put1(':').putJSON2(c); } public AutoBuffer putJSONA2( String name, short ary[] ) { return putJSONStr(name).put1(':').putJSONA2(ary); } AutoBuffer putJSONA2( short ary[] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSON2(ary[i]); } return put1(']'); } AutoBuffer putJSON8 ( long l ) { return putJStr(Long.toString(l)); } AutoBuffer putJSONA8( long ary[] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSON8(ary[i]); } return put1(']'); } AutoBuffer putJSONAA8( long ary[][] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONA8(ary[i]); } return put1(']'); } AutoBuffer putJSONAAA8( long ary[][][] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONAA8(ary[i]); } return put1(']'); } AutoBuffer putJSONEnum( Enum e ) { return e==null ? putJNULL() : put1('"').putJStr(e.toString()).put1('"'); } public AutoBuffer putJSON8 ( String name, long l ) { return putJSONStr(name).put1(':').putJSON8(l); } public AutoBuffer putJSONEnum( String name, Enum e ) { return putJSONStr(name).put1(':').putJSONEnum(e); } public AutoBuffer putJSONA8( String name, long ary[] ) { return putJSONStr(name).put1(':').putJSONA8(ary); } public AutoBuffer putJSONAA8( String name, long ary[][] ) { return putJSONStr(name).put1(':').putJSONAA8(ary); } public AutoBuffer putJSONAAA8( String name, long ary[][][] ) { return putJSONStr(name).put1(':').putJSONAAA8(ary); } public AutoBuffer putJSONZ(boolean b) { return putJStr(Boolean.toString(b)); } public AutoBuffer putJSON4(int i) { return putJStr(Integer.toString(i)); } AutoBuffer putJSONA4( int[] a) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSON4(a[i]); } return put1(']'); } AutoBuffer putJSONAA4( int[][] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONA4(a[i]); } return put1(']'); } AutoBuffer putJSONAAA4( int[][][] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONAA4(a[i]); } return put1(']'); } public AutoBuffer putJSON4 ( String name, int i ) { return putJSONStr(name).put1(':').putJSON4(i); } public AutoBuffer putJSONA4( String name, int[] a) { return putJSONStr(name).put1(':').putJSONA4(a); } public AutoBuffer putJSONAA4( String name, int[][] a ) { return putJSONStr(name).put1(':').putJSONAA4(a); } public AutoBuffer putJSONAAA4( String name, int[][][] a ) { return putJSONStr(name).put1(':').putJSONAAA4(a); } public AutoBuffer putJSON4f ( float f ) { return f==Float.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(f==Float.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Float.isNaN(f)?putJSONStr(JSON_NAN):putJStr(Float .toString(f)))); } public AutoBuffer putJSON4f ( String name, float f ) { return putJSONStr(name).put1(':').putJSON4f(f); } AutoBuffer putJSONA4f( float[] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSON4f(a[i]); } return put1(']'); } public AutoBuffer putJSONA4f(String name, float[] a) { putJSONStr(name).put1(':'); return putJSONA4f(a); } AutoBuffer putJSONAA4f(String name, float[][] a) { putJSONStr(name).put1(':'); if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONA4f(a[i]); } return put1(']'); } public AutoBuffer putJSON8d( double d ) { if (TwoDimTable.isEmpty(d)) return putJNULL(); return d==Double.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(d==Double.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Double.isNaN(d)?putJSONStr(JSON_NAN):putJStr(Double.toString(d)))); } public AutoBuffer putJSON8d( String name, double d ) { return putJSONStr(name).put1(':').putJSON8d(d); } public AutoBuffer putJSONA8d( String name, double[] a ) { return putJSONStr(name).put1(':').putJSONA8d(a); } public AutoBuffer putJSONAA8d( String name, double[][] a) { return putJSONStr(name).put1(':').putJSONAA8d(a); } public AutoBuffer putJSONAAA8d( String name, double[][][] a) { return putJSONStr(name).put1(':').putJSONAAA8d(a); } public AutoBuffer putJSONA8d( double[] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSON8d(a[i]); } return put1(']'); } public AutoBuffer putJSONAA8d( double[][] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONA8d(a[i]); } return put1(']'); } AutoBuffer putJSONAAA8d( double ary[][][] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONAA8d(ary[i]); } return put1(']'); } static final String JSON_NAN = "NaN"; static final String JSON_POS_INF = "Infinity"; static final String JSON_NEG_INF = "-Infinity"; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/BootstrapFreezable.java
package water; /** * Marker interface - can be used to annotate Freezables that are part of bootstrap class collection. * BootstrapFreezables can be used to exchnage H2O-serialized data between different cluster instances * without a need to remap type ids because type ids are fixed. * * This interface also limits what classes can be deserialized only to known classes with safe behavior. * * @param <T> */ public interface BootstrapFreezable<T extends BootstrapFreezable<T>> extends Freezable<T> { }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ChunkSplitter.java
package water; import water.fvec.Chunk; import water.fvec.NewChunk; import water.util.Log; /** Helper to provide access to package * hidden methods and attributes. */ public class ChunkSplitter { /** Extract portion of given chunk into given output chunk. */ public static void extractChunkPart(Chunk ic, Chunk oc, int startRow, int nrows, Futures fs) { try { NewChunk dst = new NewChunk(oc); dst._len = dst._sparseLen = 0; // Iterate over values skip all 0 ic.extractRows(dst, startRow,startRow+nrows); // Handle case when last added value is followed by zeros till startRow+nrows assert dst._len == oc._len : "NewChunk.dst.len = " + dst._len + ", oc._len = " + oc._len; dst.close(dst.cidx(), fs); } catch(RuntimeException t){ Log.err("got exception in chunkSplitter, ic = " + ic + ", oc = " + oc + " startRow = " + startRow + " nrows = " + nrows); throw t; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Cleaner.java
package water; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import water.fvec.Chunk; import water.util.Log; import water.util.PrettyPrint; /** Store Cleaner: User-Mode Swap-To-Disk */ class Cleaner extends Thread { // msec time at which the STORE was dirtied. // Long.MAX_VALUE if clean. static private volatile long _dirty; // When was store dirtied static long dirty() { return _dirty; } // exposed for testing only static void dirty_store() { dirty_store(System.currentTimeMillis()); } static void dirty_store( long x ) { // Keep earliest dirty time seen if( x < _dirty ) _dirty = x; } static volatile long HEAP_USED_AT_LAST_GC; static volatile long KV_USED_AT_LAST_GC; static volatile long TIME_AT_LAST_GC=System.currentTimeMillis(); static final Cleaner THE_CLEANER = new Cleaner(); static void kick_store_cleaner() { synchronized(THE_CLEANER) { THE_CLEANER.notifyAll(); } } private static void block_store_cleaner() { synchronized(THE_CLEANER) { try { THE_CLEANER.wait(5000); } catch (InterruptedException ignore) { } } } volatile boolean _did_sweep; static void block_for_test() throws InterruptedException { THE_CLEANER._did_sweep = false; synchronized(THE_CLEANER) { while( !THE_CLEANER._did_sweep ) THE_CLEANER.wait(); } } // Desired cache level. Set by the MemoryManager asynchronously. static volatile long DESIRED; Cleaner() { super("MemCleaner"); setDaemon(true); setPriority(MAX_PRIORITY-2); _dirty = Long.MAX_VALUE; // Set to clean-store Histo.current(true); // Build/allocate a first histogram Histo.current(true); // Force a recompute with a good eldest MemoryManager.set_goals("init",false); } public static boolean isDiskFull() { // free disk space < 5K? long space = availableDiskSpace(); return space >= 0 && space < (5 << 10); } public static long availableDiskSpace() { return H2O.getPM().getIce().getUsableSpace(); } // Cleaner thread runs in a forever loop. (This call cannot be synchronized, // lest we hold the lock during a (very long) clean process - and various // async callbacks attempt to "kick" the Cleaner awake - which will require // taking the lock... blocking the kicking thread for the duration. @Override /*synchronized*/ public void run() { boolean diskFull = false; while( true ) { // Sweep the K/V store, writing out Values (cleaning) and free'ing // - Clean all "old" values (lazily, optimistically) // - Clean and free old values if above the desired cache level // Do not let optimistic cleaning get in the way of emergency cleaning. // Get a recent histogram, computing one as needed Histo h = Histo.current(false); long now = System.currentTimeMillis(); long dirty = _dirty; // When things first got dirtied // Start cleaning if: "dirty" was set a "long" time ago, or we beyond // the desired cache levels. Inverse: go back to sleep if the cache // is below desired levels & nothing has been dirty awhile. if( h._cached < DESIRED && // Cache is low and (now-dirty < 5000) ) { // not dirty a long time // Block asleep, waking every 5 secs to check for stuff, or when poked block_store_cleaner(); continue; // Awoke; loop back and re-check histogram. } now = System.currentTimeMillis(); _dirty = Long.MAX_VALUE; // Reset, since we are going write stuff out MemoryManager.set_goals("preclean",false); // The age beyond which we need to toss out things to hit the desired // caching levels. If forced, be exact (toss out the minimal amount). // If lazy, store-to-disk things down to 1/2 the desired cache level // and anything older than 5 secs. boolean force = (h._cached >= DESIRED || !MemoryManager.CAN_ALLOC); // Forced to clean if( force && diskFull ) // Try to clean the diskFull flag diskFull = isDiskFull(); long clean_to_age = h.clean_to(force ? DESIRED : (DESIRED>>1)); // If not forced cleaning, expand the cleaning age to allows Values // more than 5sec old if( !force ) clean_to_age = Math.max(clean_to_age,now-5000); if( DESIRED == -1 ) clean_to_age = now; // Test mode: clean all // No logging if under memory pressure: can deadlock the cleaner thread String s = h+" DESIRED="+(DESIRED>>20)+"M dirtysince="+(now-dirty)+" force="+force+" clean2age="+(now-clean_to_age); if( MemoryManager.canAlloc() ) Log.debug(s); else System.err.println(s); long cleaned = 0; // Disk i/o bytes long freed = 0; // memory freed bytes long io_ns = 0; // i/o ns writing // For faster K/V store walking get the NBHM raw backing array, // and walk it directly. Object[] kvs = H2O.STORE.raw_array(); // Start the walk at slot 2, because slots 0,1 hold meta-data for( int i=2; i<kvs.length; i += 2 ) { // In the raw backing array, Keys and Values alternate in slots Object ok = kvs[i], ov = kvs[i+1]; if( !(ok instanceof Key ) ) continue; // Ignore tombstones and Primes and null's if( !(ov instanceof Value) ) continue; // Ignore tombstones and Primes and null's Value val = (Value)ov; byte[] m = val.rawMem(); Object p = val.rawPOJO(); if( m == null && p == null ) continue; // Nothing to throw out if( val.isLockable() ) continue; // we do not want to throw out Lockables. boolean isChunk = p instanceof Chunk && !((Chunk)p).isVolatile(); // Ignore things younger than the required age. In particular, do // not spill-to-disk all dirty things we find. long touched = val._lastAccessedTime; if( touched > clean_to_age ) { // Too recently touched? // But can toss out a byte-array if already deserialized & on disk // (no need for both forms). Note no savings for Chunks, for which m==p._mem if( val.isPersisted() && m != null && p != null && !isChunk ) { val.freeMem(); // Toss serialized form, since can rebuild from POJO freed += val._max; } dirty_store(touched); // But may write it out later continue; // Too young } // Spiller turned off? if( !H2O.ARGS.cleaner ) continue; // CNC - Memory cleaning turned off, except for Chunks // Too many POJOs are written to dynamically; cannot spill & reload // them without losing changes. // Should I write this value out to disk? // Should I further force it from memory? if( isChunk && !val.isPersisted() && !diskFull && ((Key)ok).home() ) { // && (force || (lazyPersist() && lazy_clean(key)))) { long now_ns = System.nanoTime(); try { val.storePersist(); } // Write to disk catch( FileNotFoundException fnfe ) { continue; } // Can happen due to racing key delete/remove catch( IOException e ) { Log.warn( isDiskFull() ? "Disk full! Disabling swapping to disk." + (force?" Memory low! Please free some space in " + H2O.ICE_ROOT + "!":"") : "Disk swapping failed! " + e.getMessage()); // Something is wrong so mark disk as full anyways so we do not // attempt to write again. (will retry next run when memory is low) diskFull = true; } if( m == null ) m = val.rawMem(); if( m != null ) cleaned += m.length; // Accumulate i/o bytes io_ns += System.nanoTime() - now_ns; // Accumulate i/o time } // And, under pressure, free all if( isChunk && force && (val.isPersisted() || !((Key)ok).home()) ) { val.freeMem (); if( m != null ) freed += val._max; m = null; val.freePOJO(); if( p != null ) freed += val._max; p = null; if( isChunk ) freed -= val._max; // Double-counted freed mem for Chunks since val._pojo._mem & val._mem are the same. } // If we have both forms, toss the byte[] form - can be had by // serializing again. if( m != null && p != null && !isChunk ) { val.freeMem(); freed += val._max; } // If a GC cycle happened and we can no longer alloc, start forcing // from RAM as we go force = (h._cached >= DESIRED || !MemoryManager.CAN_ALLOC); // Forced to clean } String s1 = "Cleaner pass took: "+PrettyPrint.msecs(System.currentTimeMillis()-now,true)+ ", spilled "+PrettyPrint.bytes(cleaned)+" in "+PrettyPrint.usecs(io_ns>>10); h = Histo.current(true); // Force a new histogram MemoryManager.set_goals("postclean",false); // No logging if under memory pressure: can deadlock the cleaner thread String s2 = h+" diski_o="+PrettyPrint.bytes(cleaned)+", freed="+(freed>>20)+"M, DESIRED="+(DESIRED>>20)+"M"; if( MemoryManager.canAlloc() ) Log.debug(s1,s2); else System.err.println(s1+"\n"+s2); // For testing thread synchronized(this) { _did_sweep = true; if( DESIRED == -1 ) DESIRED = 0; // Turn off test-mode after 1 sweep notifyAll(); // Wake up testing thread } } } // Histogram class static class Histo { // Current best histogram static private volatile Histo H; // Return the current best histogram, recomputing in-place if it is getting // stale. Synchronized so the same histogram can be called into here and // will be only computed into one-at-a-time. synchronized static Histo current( boolean force ) { final Histo h = H; // Grab current best histogram if( !force && System.currentTimeMillis() < h._when+2000 ) return h; // It is recent; use it if( h != null && h._clean && _dirty==Long.MAX_VALUE ) return h; // No change to the K/V store, so no point // Use last oldest value for computing the next histogram in-place return (H = new Histo(h==null ? 0 : h._oldest)); // Record current best histogram & return it } // Latest best-effort cached amount, without forcing a histogram to be // built nor blocking for one being in-progress. static long cached() { return H._cached; } static long swapped(){ return H._swapped;} final long[] _hs = new long[128]; long _oldest; // Time of the oldest K/V discovered this pass long _eldest; // Time of the eldest K/V found in some prior pass long _hStep; // Histogram step: (now-eldest)/histogram.length long _cached; // Total alive data in the histogram long _total; // Total data in local K/V long _when; // When was this histogram computed long _swapped;// On-disk stuff Value _vold; // For assertions: record the oldest Value boolean _clean; // Was "clean" K/V when built? // Compute a histogram Histo( long eldest ) { Arrays.fill(_hs, 0); _when = System.currentTimeMillis(); _eldest = eldest; // Eldest seen in some prior pass _hStep = Math.max(1,(_when-eldest)/_hs.length); boolean clean = _dirty==Long.MAX_VALUE; // Compute the hard way Object[] kvs = H2O.STORE.raw_array(); long cached = 0; // Total K/V cached in ram long total = 0; // Total K/V in local node long swapped=0; // Total K/V persisted long oldest = Long.MAX_VALUE; // K/V with the longest time since being touched Value vold = null; // Start the walk at slot 2, because slots 0,1 hold meta-data for( int i=2; i<kvs.length; i += 2 ) { // In the raw backing array, Keys and Values alternate in slots Object ok = kvs[i], ov = kvs[i+1]; if( !(ok instanceof Key ) ) continue; // Ignore tombstones and Primes and null's if( !(ov instanceof Value) ) continue; // Ignore tombstones and Primes and null's Value val = (Value)ov; if( val.isNull() ) { Value.STORE_get(val._key); continue; } // Another flavor of NULL total += val._max; if( val.isPersisted() ) swapped += val._max; int len = 0; byte[] m = val.rawMem(); Object p = val.rawPOJO(); if( m != null ) len += val._max; if( p != null ) len += val._max; if( m != null && p instanceof Chunk ) len -= val._max; // Do not double-count Chunks if( len == 0 ) continue; cached += len; // Accumulate total amount of cached keys if( val._lastAccessedTime < oldest ) { // Found an older Value? vold = val; // Record oldest Value seen oldest = val._lastAccessedTime; } // Compute histogram bucket int idx = (int)((val._lastAccessedTime - eldest)/_hStep); if( idx < 0 ) idx = 0; else if( idx >= _hs.length ) idx = _hs.length-1; _hs[idx] += len; // Bump histogram bucket } _cached = cached; // Total cached; NOTE: larger than sum of histogram buckets _total = total; // Total used data _swapped = swapped; _oldest = oldest; // Oldest seen in this pass _vold = vold; _clean = clean && _dirty==Long.MAX_VALUE; // Looks like a clean K/V the whole time? } // Compute the time (in msec) for which we need to throw out things // to throw out enough things to hit the desired cached memory level. long clean_to( long desired ) { long age = _eldest; // Age of bucket zero if( _cached < desired ) return age; // Already there; nothing to remove long s = 0; // Total amount toss out for( long t : _hs ) { // For all buckets... s += t; // Raise amount tossed out age += _hStep; // Raise age beyond which you need to go if( _cached - s < desired ) break; } return age; } // Pretty print @Override public String toString() { long x = _eldest; long now = System.currentTimeMillis(); return "H(cached:"+(_cached>>20)+"M, eldest:"+x+"L < +"+(_oldest-x)+"ms <...{"+_hStep+"ms}...< +"+(_hStep*_hs.length)+"ms < +"+(now-x)+")"; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ClientDisconnectCheckThread.java
package water; class ClientDisconnectCheckThread extends Thread { public ClientDisconnectCheckThread() { super("ClientDisconnectCheckThread"); setDaemon(true); } private boolean isTimeoutExceeded(H2ONode client, long timeout) { return (System.currentTimeMillis() - client._last_heard_from) >= timeout; } /** * This method checks whether the client is disconnected from this node due to some problem such as client or network * is unreachable. */ static void handleClientDisconnect(H2ONode client) { if(client != H2O.SELF) { if (H2O.isFlatfileEnabled()) { H2O.removeNodeFromFlatfile(client); } H2O.removeClient(client); } } @Override public void run() { while (true) { for(H2ONode client: H2O.getClients()){ if(isTimeoutExceeded(client, H2O.ARGS.clientDisconnectTimeout)){ handleClientDisconnect(client); } } try { Thread.sleep(H2O.ARGS.clientDisconnectTimeout); } catch (InterruptedException ignore) {} } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ClientRandomDisconnectThread.java
package water; import water.util.Log; import java.util.Random; // Emulates Random Client disconnects public class ClientRandomDisconnectThread extends Thread { public ClientRandomDisconnectThread() { super("ClientRandomDisconnectThread"); setDaemon(true); } @Override public void run() { Log.warn("-----------------------------------------------------------"); Log.warn("| Random Client Disconnect Attack - for development only! |"); Log.warn("-----------------------------------------------------------"); try { Thread.sleep(H2O.ARGS.clientDisconnectTimeout); } catch (InterruptedException ignore) {} Random r = new Random(); while (true) { final int timeout = r.nextInt((int) H2O.ARGS.clientDisconnectTimeout / 10); Log.warn("Random Attack: Clients will get killed in " + timeout + "ms."); try { Thread.sleep(timeout); } catch (InterruptedException ignore) {} for (H2ONode client: H2O.getClients()) { if (client != H2O.SELF) { Log.warn("Random Attack: Emulating client disconnect: " + client._key); ClientDisconnectCheckThread.handleClientDisconnect(client); } } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/DKV.java
package water; /** A Distributed Key/Value Store. * <p> * Functions to Get and Put Values into the K/V store by Key. * <p> * The <em>Java Memory Model</em> is observed for all operations. Reads/Gets * will block until the data is available, and will pull from the local cache * is possible. * <p> * Writes/Puts do not block directly, but take a Futures argument. Typically * a Put requires some kind of coherency traffic and perhaps multiple network * hops. The Futures argument can be used to tell when when a given Put (or a * collection of them) has completed. Calls to Put without a Futures merely * make one internally and block till completion. * <p> * <em><b>Performance Concerns</b></em> * <p> * Keys can be cached locally, or not. Cached reads take no more time than a * NonBlockingHashMap lookup (typically a hundred nanos or so). Remote reads * require the serialized POJO to pass over the network, plus a little bit of * management logic; time is typically completely determined by network speeds * and object size. * <p> * Local Puts (one where the Key is homed on this Node) update directly in the * local K/V store, taking no more time than a NonBlockingHashMap write. * Remote Puts will serialize and ship data over the wire, taking time related * to object size and network speed. * <p> * Blocking for a Put to complete takes longer, requiring all invalidates to * have happened and perhaps a response from the home node (multiple * network-hop latencies); the invalidates and response are typically a single * UDP packet, but must make a round-trip. * <p> * Puts to unrelated Keys can all proceed in parallel, and will typically be * network bound, and can be blocked for in bulk by a single Futures argument. * <p> * Puts to the same Key will be serialized (the first Put will fully complete, * including all invalidates, before a 2nd Put to the same Key from the same * Node can proceed). Assuming no other Node does a Get on this Key, no * invalidates will be required for the 2nd and later Puts and they will need * only the single round-trip. * <p> * Note that this class works on one Key at a time, and does not understand * composite Key structures (such as a {@link water.fvec.Vec} Key and all its related * {@link water.fvec.Chunk} Keys - instead it serves as the building block for such * structures. * <p> * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public abstract class DKV { /** Make the mapping <em>key -&gt; v</em>. Blocking, caching. */ static public Value put( Key key, Iced v ) { return put(key,new Value(key,v)); } /** Make the mapping <em>key -&gt; v</em>. Caching. */ static public Value put( Key key, Iced v, Futures fs ) { return put(key,new Value(key,v),fs); } /** Make the mapping <em>key -&gt; v</em>. */ static public Value put( Key key, Iced v, Futures fs,boolean dontCache ) { return put(key,new Value(key,v),fs,dontCache); } /** Make the mapping <em>keyed._key -&gt; keyed</em>. Blocking, caching. */ static public Value put( Keyed keyed ) { return put(keyed._key,new Value(keyed._key,keyed)); } /** Make the mapping <em>keyed._key -&gt; keyed</em>. Caching. */ static public Value put( Keyed keyed, Futures fs ) { return put(keyed._key,new Value(keyed._key,keyed),fs); } /** Make the mapping <em>key -&gt; val</em>. Blocking, caching. */ static public Value put( Key key, Value val ) { Futures fs = new Futures(); Value old = put(key,val,fs); fs.blockForPending(); return old; } /** Make the mapping <em>key -&gt; val</em>. Caching. */ static public Value put( Key key, Value val, Futures fs ) { return put(key,val,fs,false); } /** Make the mapping <em>key -&gt; val</em>. */ static public Value put( Key key, Value val, Futures fs, boolean dontCache ) { assert key != null; assert val==null || val._key == key:"non-matching keys " + key + " != " + val._key; while( true ) { Value old = Value.STORE_get(key); // Raw-get: do not lazy-manifest if overwriting Value res = DputIfMatch(key,val,old,fs,dontCache); if( res == old ) return old; // PUT is globally visible now? if( val != null && val._key != key ) key = val._key; } } /** Remove any mapping for <em>key</em>. Blocking. */ static public Value remove( Key key ) { return put(key,null); } /** Remove any mapping for <em>key</em>. */ static public Value remove( Key key, Futures fs ) { return put(key,null,fs); } /** Default caching call to {@link #DputIfMatch(Key,Value,Value,Futures,boolean)} */ static public Value DputIfMatch( Key key, Value val, Value old, Futures fs) { return DputIfMatch(key, val, old, fs, false); } /** Update the mapping for Key <em>key</em>, from Value <em>old</em> to Value * <em>val</em>. Fails if the Key is not mapped to <em>old</em>, returning * the Value it IS mapped to. Takes a required {@link Futures}, which can * be used to note when the operation has completed globally. If the * <em>dontCache</em> hint is passed in, the Value <em>val</em> is NOT * cached locally, useful streaming a large dataset through and expecting * most of the data to eventually be homed remotely. * <p> * Additionally, this operation <em>locks</em> the Cloud to the current size. * No new Nodes may join after a Key is successfully entered into the DKV. * <p> * @return The Value this Key used to be mapped to; if the returned * Value.equals(old) then the update succeeded, else it failed. */ static public Value DputIfMatch( Key key, Value val, Value old, Futures fs, boolean dontCache ) { // For debugging where keys are created from // try { System.err.flush(); System.err.println(key); Thread.dumpStack(); System.err.flush(); } catch (Throwable t) {} // First: I must block repeated remote PUTs to the same Key until all prior // ones complete - the home node needs to see these PUTs in order. // Repeated PUTs on the home node are already ordered. if( old != null && !key.home() ) old.startRemotePut(); // local update first, since this is a weak update if( val == null && key.home() ) val = Value.makeNull(key); Value res = H2O.putIfMatch(key,val,old); if( res != old ) // Failed? return res; // Return fail value // Check for trivial success: no need to invalidate remotes if the new // value equals the old. if( old != null && old == val ) { System.out.println("No invalidate, new==old"); return old; // Trivial success? } if( old != null && val != null && val.equals(old) ) { System.out.println("No invalidate, new.equals(old)"); return old; // Less trivial success, but no network i/o } // Before we start doing distributed writes... block until the cloud // stabilizes. After we start doing distributed writes, it is an error to // change cloud shape - the distributed writes will be in the wrong place. Paxos.lockCloud(key); // The 'D' part of DputIfMatch: do Distribution. // If PUT is on HOME, invalidate remote caches // If PUT is on non-HOME, replicate/push to HOME if( key.home() ) { // On HOME? if( old != null ) old.lockAndInvalidate(H2O.SELF,val,fs); else val.lowerActiveGetCount(null); // Remove initial read-lock, accounting for pending inv counts } else { // On non-HOME? // Start a write, but do not block for it TaskPutKey.put(key.home_node(),key,val,fs, dontCache); } return old; } // Stall until all existing writes have completed. // Used to order successive writes. static void write_barrier() { for( H2ONode h2o : H2O.CLOUD._memary ) for( RPC rpc : h2o.tasks() ) if( rpc._dt instanceof TaskPutKey || rpc._dt instanceof Atomic ) rpc.get(); } static public <T extends Iced> T getGet(String key) { return key == null ? null : (T)getGet(Key.make(key)); } static public <T extends Iced> T getGet(Key key) { if (null == key) return null; Value v = get(key); if (null == v) return null; return v.get(); } /** Return the {@link Value} mapped to Key <em>key</em>, or null if no * mapping. Blocks till data available, always caches. * @return The {@link Value} mapped to Key <em>key</em>, or null if no * mapping. */ static public Value get ( Key key ) { return get(key,true ); } /** Prefetch and cache the Value for Key <em>key</em>. Non-blocking. */ static public void prefetch( Key key ) { get(key,false); } /** Return the {@link Value} mapped to Key formed by <em>key_name</em>, or * null if no mapping. Blocks till data available, always caches. * @return The {@link Value} mapped to Key formed by <em>key_name</em>, or * null if no mapping. */ static public Value get ( String key_name) { return get(Key.make(key_name),true ); } /** Prefetch and cache the Value for Key formed by <em>key_name</em>. * Non-blocking. */ static public void prefetch( String key_name ) { get(Key.make(key_name),false); } static private Value get( Key key, boolean blocking ) { // Read the Cloud once per put-attempt, to keep a consistent snapshot. H2O cloud = H2O.CLOUD; Value val = Value.STORE_get(key); // Hit in local cache? if( val != null ) { if( val.rawMem() != null || val.rawPOJO() != null || val.isPersisted() ) return val; assert !key.home(); // Master must have *something*; we got nothing & need to fetch } // While in theory we could read from any replica, we always need to // inform the home-node that his copy has been Shared... in case it // changes and he needs to issue an invalidate. For now, always and only // fetch from the Home node. H2ONode home = cloud._memary[key.home(cloud)]; // If we missed in the cache AND we are the home node, then there is // no V for this K (or we have a disk failure). if( home == H2O.SELF ) return null; // Pending write to same key from this node? Take that write instead. // Moral equivalent of "peeking into the cpu store buffer". Can happen, // e.g., because a prior 'put' of a null (i.e. a remove) is still mid- // send to the remote, so the local get has missed above, but a remote // get still might 'win' because the remote 'remove' is still in-progress. TaskPutKey tpk = home.pendingPutKey(key); if( tpk != null ) return tpk._xval == null || tpk._xval.isNull() ? null : tpk._xval; // Get data "the hard way" RPC<TaskGetKey> tgk = TaskGetKey.start(home,key); return blocking ? TaskGetKey.get(tgk) : null; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/DKVManager.java
package water; import hex.Model; import water.fvec.Frame; import water.fvec.Vec; import java.util.*; public class DKVManager { public static void clear() { // Bulk brainless key removal. Completely wipes all Keys without regard. new MRTask(H2O.MIN_HI_PRIORITY) { @Override public void setupLocal() { H2O.raw_clear(); water.fvec.Vec.ESPC.clear(); } }.doAllNodes(); // Wipe the backing store without regard as well H2O.getPM().getIce().cleanUp(); H2O.updateNotIdle(); } /** * Clears keys in all H2O nodes, except for the ones marked as retained. * Only Model and Frame keys are retained. If a key of any other type is provided, it will be removed as well. * <p> * Model's training and validation frames are retained automatically with the specified model. However, cross validation models are NOT retained. * * @param retainedKeys Keys of {@link Frame}s and {@link Model}s to be retained. Only Frame and Model keys are accepted. */ public static void retain(final Key... retainedKeys) { final Set<Key> retainedSet = new HashSet<>(retainedKeys.length); retainedSet.addAll(Arrays.asList(retainedKeys)); // Frames and models have multiple nested keys. Those must be extracted and kept from deletion as well. extractNestedKeys(retainedSet); final Key[] allRetainedkeys = retainedSet.toArray(new Key[retainedSet.size()]); final NodeKeysRemovalTask nodeKeysRemovalTask = new NodeKeysRemovalTask(allRetainedkeys); for (final H2ONode node : H2O.CLOUD.members()) { H2O.runOnH2ONode(node, nodeKeysRemovalTask); } } /** * Iterates through the keys provided by the user, dropping any keys that are not a Model key or a Frame key. * Afterwards, extracts * * @param retainedKeys A {@link Set} of retained keys to insert the extracted {@link Frame} and {@link Model} keys to. * Should contain user-specified keys to retain in order to extract anything. * @throws IllegalArgumentException If any of the keys given to be retained is not a Model key nor a Frame key */ private static void extractNestedKeys(final Set<Key> retainedKeys) throws IllegalArgumentException { final Iterator<Key> keysIterator = retainedKeys.iterator(); // Traverse keys provided by the user only. final Set<Key> newKeys = new HashSet<>(); // Avoid concurrent modification of retainedKeys set + avoid introducing locking & internally synchronized set structures while (keysIterator.hasNext()) { final Key key = keysIterator.next(); final Value value = DKV.get(key); if (value == null || value.isNull()) { continue; // Ignore missing values } else if (!value.isFrame() && !value.isModel()) { throw new IllegalArgumentException(String.format("Given key %s is of type %d. Please provide only Model and Frame keys.", key.toString(), value.type())); } else if (value.isFrame()) { extractFrameKeys(newKeys, value.get()); } } retainedKeys.addAll(newKeys); // Add the newly found keys to the original retainedKeys set after the iteration to avoid concurrent modification } /** * Extracts keys a {@link Frame} points to. * * @param retainedkeys A set of retained keys to insert the extracted {@link Frame} keys to. * @param frame An instance of {@link Frame} to extract the keys from. */ private static void extractFrameKeys(final Set<Key> retainedkeys, final Frame frame) { Objects.requireNonNull(frame); final Key<Vec>[] frameKeys = frame.keys(); for (Key k : frameKeys) { retainedkeys.add(k); } } private static final class NodeKeysRemovalTask extends H2O.RemoteRunnable<NodeKeysRemovalTask> { private final Key[] _ignoredKeys; private NodeKeysRemovalTask(final Key[] retainedKeys) { _ignoredKeys = retainedKeys; } @Override public void run() { final Set<Key> keys = H2O.localKeySet(); final Set<Key> ignoredSet = new HashSet<>(); final Futures futures = new Futures(); for (final Key ignoredKey : _ignoredKeys) { ignoredSet.add(ignoredKey); } for (final Key key : keys) { if (ignoredSet.contains(key)) continue; // Do not perform DKV.get at all if the key is to be ignored final Value value = DKV.get(key); if (value == null || value.isNull()) continue; if (value.isModel()) { Keyed.remove(key, futures, false); } else if (value.isFrame()) { final Frame frame = value.get(); frame.retain(futures, ignoredSet); } futures.blockForPending(); // Delete one key at a time. } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/DTask.java
package water; import jsr166y.CountedCompleter; import water.H2O.H2OCountedCompleter; import water.util.DistributedException; import java.io.*; /** Objects which are passed and {@link #dinvoke} is remotely executed.<p> * <p> * Efficient serialization methods for subclasses will be automatically * generated, but explicit ones can be provided. Transient fields will * <em>not</em> be mirrored between the VMs. * <ol> * <li>On the local vm, this task will be serialized and sent to a remote.</li> * <li>On the remote, the task will be deserialized.</li> * <li>On the remote, the {@link #dinvoke(H2ONode)} method will be executed.</li> * <li>On the remote, the task will be serialized and sent to the local vm</li> * <li>On the local vm, the task will be deserialized * <em>into the original instance</em></li> * <li>On the local vm, the {@link #onAck()} method will be executed.</li> * <li>On the remote, the {@link #onAckAck()} method will be executed.</li> * </ol> * */ public abstract class DTask<T extends DTask> extends H2OCountedCompleter<T> { protected DTask(H2OCountedCompleter completer, byte prior){super(completer,prior);} protected DTask(H2OCountedCompleter completer){super(completer);} protected DTask(byte prior) { super(prior); } protected DTask() { super(); } /** A distributable exception object, thrown by {@link #dinvoke}. */ protected byte[] _ex; /** True if {@link #dinvoke} threw an exception. * @return True if _ex is non-null */ public final boolean hasException() { return _ex != null; } /** Capture the first exception in _ex. Later setException attempts are ignored. */ public synchronized void setException(Throwable ex) { if(_ex == null) { _ex = AutoBuffer.javaSerializeWritePojo(((ex instanceof DistributedException) ? (DistributedException) ex : new DistributedException(ex,false /* don't want this setException(ex) call in the stacktrace */))); } } /** The _ex field as a RuntimeException or null. * @return The _ex field as a RuntimeException or null. */ public Throwable getDException() {return _ex == null?null:(Throwable)AutoBuffer.javaSerializeReadPojo(_ex);} // Track if the reply came via TCP - which means a timeout on ACKing the TCP // result does NOT need to get the entire result again, just that the client // needs more time to process the TCP result. transient boolean _repliedTcp; // Any return/reply/result was sent via TCP /** Top-level remote execution hook. Called on the <em>remote</em>. */ public void dinvoke( H2ONode sender ) { // note: intentionally using H2O.submit here instead of direct compute2 call here to preserve FJ behavior // such as exceptions being caught and handled via onExceptionalCompletion // can't use fork() to keep correct priority level H2O.submitTask(this); } /** 2nd top-level execution hook. After the primary task has received a * result (ACK) and before we have sent an ACKACK, this method is executed on * the <em>local vm</em>. Transients from the local vm are available here. */ public void onAck() {} /** 3rd top-level execution hook. After the original vm sent an ACKACK, this * method is executed on the <em>remote</em>. Transients from the remote vm * are available here. */ public void onAckAck() {} /** Override to remove 2 lines of logging per RPC. 0.5M RPC's will lead to * 1M lines of logging at about 50 bytes/line produces 50M of log file, * which will swamp all other logging output. */ public boolean logVerbose() { return true; } // For MRTasks, we need to copyOver protected void copyOver( T src ) { icer().copyOver((T)this,src); } /** Task to be executed at the home node of the given key. * Basically a wrapper around DTask which enables us to bypass * remote/local distinction (RPC versus submitTask). */ public static abstract class DKeyTask<T extends DKeyTask,V extends Keyed> extends DTask<DKeyTask>{ private final Key _key; public DKeyTask(H2OCountedCompleter cmp,final Key k) { super(cmp); _key = k; } /** Override map(); will be run on Key's home node */ protected abstract void map(V v); @Override public final void compute2(){ if(_key.home()){ Value val = Value.STORE_get(_key); if( val != null ) map(val.<V>get()); // Call map locally tryComplete(); } else { // Else call remotely new RPC(_key.home_node(),this).addCompleter(this).call(); } } // onCompletion must be empty here, may be invoked twice (on remote and local) @Override public final void onCompletion(CountedCompleter cc){} /** Convenience non-blocking submit to work queues */ public void submitTask() {H2O.submitTask(this);} /** Convenience blocking submit to work queues */ public T invokeTask() { H2O.submitTask(this); join(); return (T)this; } } /** Task to cleanly remove value from the K/V (call it's remove() * destructor) without the need to fetch it locally first. */ public static class RemoveCall extends DKeyTask { public RemoveCall(H2OCountedCompleter cmp, Key k) { super(cmp, k);} @Override protected void map(Keyed val) { val.remove();} } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ExtensionManager.java
package water; import water.api.RequestServer; import water.api.RestApiExtension; import water.api.SchemaServer; import water.util.Log; import water.util.StringUtils; import water.webserver.iface.RequestAuthExtension; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.ServiceLoader; public class ExtensionManager { private static ExtensionManager extManager = new ExtensionManager(); /** System property to force enable/disable named REST API extension */ private static String PROP_TOGGLE_REST_EXT = H2O.OptArgs.SYSTEM_PROP_PREFIX + "ext.rest.toggle."; /** System property to force enable/disable named Core extension */ private static String PROP_TOGGLE_CORE_EXT = H2O.OptArgs.SYSTEM_PROP_PREFIX + "ext.core.toggle."; /** System property to force enable/disable named Auth extension */ private static String PROP_TOGGLE_AUTH_EXT = H2O.OptArgs.SYSTEM_PROP_PREFIX + "ext.auth.toggle."; private ExtensionManager(){ } public static ExtensionManager getInstance(){ return extManager; } private HashMap<String, AbstractH2OExtension> coreExtensions = new HashMap<>(); private HashMap<String, RestApiExtension> restApiExtensions = new HashMap<>(); private HashMap<String, H2OListenerExtension> listenerExtensions = new HashMap<>(); private HashMap<String, RequestAuthExtension> authExtensions = new HashMap<>(); private long registerCoreExtensionsMillis = 0; private long registerListenerExtensionsMillis = 0; private long registerAuthExtensionsMillis = 0; // Be paranoid and check that this doesn't happen twice. private boolean extensionsRegistered = false; private boolean restApiExtensionsRegistered = false; private boolean listenerExtensionsRegistered = false; private boolean authExtensionsRegistered = false; public StringBuilder makeExtensionReport(StringBuilder sb) { try { // Core String[] coreExts = getCoreExtensionNames().clone(); for (int i = 0; i < coreExts.length; i++) if (! isCoreExtensionEnabled(coreExts[i])) coreExts[i] = coreExts[i] + "(disabled)"; sb.append("Core Extensions: ").append(StringUtils.join(",", coreExts)).append("; "); // Rest String[] restExts = getRestApiExtensionNames().clone(); for (int i = 0; i < restExts.length; i++) { RestApiExtension restExt = restApiExtensions.get(restExts[i]); if (restExt == null) restExts[i] = restExts[i] + "(???)"; else if (! isEnabled(restExt)) restExts[i] = restExts[i] + "(disabled)"; } sb.append("Rest Extensions: ").append(StringUtils.join(",", restExts)).append("; "); // Listeners String[] listernerExts = getListenerExtensionNames(); sb.append("Listener Extensions: ").append(StringUtils.join(",", listernerExts)).append("; "); } catch (Exception e) { Log.err("Failed to generate the extension report", e); sb.append(e.getMessage()); } return sb; } public AbstractH2OExtension getCoreExtension(String extensionName) { return coreExtensions.get(extensionName); } public Collection<AbstractH2OExtension> getCoreExtensions() { return coreExtensions.values(); } public boolean isCoreExtensionsEnabled(String extensionName) { AbstractH2OExtension ext = getCoreExtension(extensionName); return ext != null && ext.isEnabled(); } /** * Register H2O extensions. * <p/> * Use SPI to find all classes that extends water.AbstractH2OExtension * and call H2O.addCoreExtension() for each. */ public void registerCoreExtensions() { if (extensionsRegistered) { throw H2O.fail("Extensions already registered"); } long before = System.currentTimeMillis(); ServiceLoader<AbstractH2OExtension> extensionsLoader = ServiceLoader.load(AbstractH2OExtension.class); for (AbstractH2OExtension ext : extensionsLoader) { if (isEnabled(ext)) { ext.init(); coreExtensions.put(ext.getExtensionName(), ext); } } extensionsRegistered = true; registerCoreExtensionsMillis = System.currentTimeMillis() - before; } public Collection<RestApiExtension> getRestApiExtensions(){ return restApiExtensions.values(); } private boolean areDependantCoreExtensionsEnabled(List<String> names){ for(String name: names){ AbstractH2OExtension ext = coreExtensions.get(name); if(ext == null || !ext.isEnabled()){ return false; } } return true; } /** * Register REST API routes. * * Use reflection to find all classes that inherit from {@link water.api.AbstractRegister} * and call the register() method for each. * */ public void registerRestApiExtensions() { if (restApiExtensionsRegistered) { throw H2O.fail("APIs already registered"); } // Log core extension registrations here so the message is grouped in the right spot. for (AbstractH2OExtension e : getCoreExtensions()) { e.printInitialized(); } Log.info("Registered " + coreExtensions.size() + " core extensions in: " + registerCoreExtensionsMillis + "ms"); Log.info("Registered H2O core extensions: " + Arrays.toString(getCoreExtensionNames())); if(listenerExtensions.size() > 0) { Log.info("Registered: " + listenerExtensions.size() + " listener extensions in: " + registerListenerExtensionsMillis + "ms"); Log.info("Registered Listeners extensions: " + Arrays.toString(getListenerExtensionNames())); } if(authExtensions.size() > 0) { Log.info("Registered: " + authExtensions.size() + " auth extensions in: " + registerAuthExtensionsMillis + "ms"); Log.info("Registered Auth extensions: " + Arrays.toString(getAuthExtensionNames())); } long before = System.currentTimeMillis(); RequestServer.DummyRestApiContext dummyRestApiContext = new RequestServer.DummyRestApiContext(); ServiceLoader<RestApiExtension> restApiExtensionLoader = ServiceLoader.load(RestApiExtension.class); for (RestApiExtension r : restApiExtensionLoader) { try { if (isEnabled(r)) { r.registerEndPoints(dummyRestApiContext); r.registerSchemas(dummyRestApiContext); restApiExtensions.put(r.getName(), r); } } catch (Exception e) { Log.err("Cannot register extension: " + r + ". Skipping it...", e); } } restApiExtensionsRegistered = true; long registerApisMillis = System.currentTimeMillis() - before; Log.info("Registered: " + RequestServer.numRoutes() + " REST APIs in: " + registerApisMillis + "ms"); Log.info("Registered REST API extensions: " + Arrays.toString(getRestApiExtensionNames())); // Register all schemas SchemaServer.registerAllSchemasIfNecessary(dummyRestApiContext.getAllSchemas()); } private boolean isEnabled(RestApiExtension r) { String forceToggle = System.getProperty(PROP_TOGGLE_REST_EXT + r.getName()); return forceToggle != null ? Boolean.parseBoolean(forceToggle) : areDependantCoreExtensionsEnabled(r.getRequiredCoreExtensions()); } private boolean isEnabled(AbstractH2OExtension r) { String forceToggle = System.getProperty(PROP_TOGGLE_CORE_EXT + r.getExtensionName()); return forceToggle != null ? Boolean.parseBoolean(forceToggle) : r.isEnabled(); } private boolean isEnabled(RequestAuthExtension r) { String forceToggle = System.getProperty(PROP_TOGGLE_AUTH_EXT + r.getName()); return forceToggle != null ? Boolean.parseBoolean(forceToggle) : r.isEnabled(); } private String[] getRestApiExtensionNames(){ return restApiExtensions.keySet().toArray(new String[restApiExtensions.keySet().size()]); } private String[] getCoreExtensionNames(){ return coreExtensions.keySet().toArray(new String[coreExtensions.keySet().size()]); } private String[] getListenerExtensionNames(){ return listenerExtensions.keySet().toArray(new String[listenerExtensions.keySet().size()]); } private String[] getAuthExtensionNames(){ return authExtensions.keySet().toArray(new String[authExtensions.keySet().size()]); } public boolean isCoreExtensionEnabled(String name) { if (coreExtensions.containsKey(name)) { return coreExtensions.get(name).isEnabled(); } else { return false; } } /** * Register various listener extensions * * Use reflection to find all classes that inherit from {@link water.api.AbstractRegister} * and call the register() method for each. * */ public void registerListenerExtensions() { if (listenerExtensionsRegistered) { throw H2O.fail("Listeners already registered"); } long before = System.currentTimeMillis(); ServiceLoader<H2OListenerExtension> extensionsLoader = ServiceLoader.load(H2OListenerExtension.class); for (H2OListenerExtension ext : extensionsLoader) { ext.init(); listenerExtensions.put(ext.getName(), ext); } listenerExtensionsRegistered = true; registerListenerExtensionsMillis = System.currentTimeMillis() - before; } public Collection<H2OListenerExtension> getListenerExtensions(){ return listenerExtensions.values(); } public void registerAuthExtensions() { if (authExtensionsRegistered) { throw H2O.fail("Auth extensions already registered"); } long before = System.currentTimeMillis(); ServiceLoader<RequestAuthExtension> extensionsLoader = ServiceLoader.load(RequestAuthExtension.class); for (RequestAuthExtension ext : extensionsLoader) { if (isEnabled(ext)) { authExtensions.put(ext.getName(), ext); } } authExtensionsRegistered = true; registerAuthExtensionsMillis = System.currentTimeMillis() - before; } public Collection<RequestAuthExtension> getAuthExtensions(){ return authExtensions.values(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/FJPacket.java
package water; import water.H2O.H2OCountedCompleter; import water.UDP.udp; import water.util.Log; /** * A class to handle the work of a received UDP packet. Typically we'll do a * small amount of work based on the packet contents (such as returning a Value * requested by another Node, or recording a heartbeat). * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ class FJPacket extends H2OCountedCompleter { final AutoBuffer _ab; final int _ctrl; // 1st byte of packet FJPacket( AutoBuffer ab, int ctrl ) { // Run at max priority until we decrypt the packet enough to get priorities out super(UDP.udp.UDPS[ctrl]._prior); _ab = ab; _ctrl = ctrl; assert 0 < _ctrl && _ctrl < udp.UDPS.length; assert udp.UDPS[_ctrl]._udp != null:"missing message handler " + _ctrl; } @Override public void compute2() { _ab.getPort(); // skip past the port if( _ctrl <= UDP.udp.nack.ordinal() ) { AutoBuffer ab = UDP.udp.UDPS[_ctrl]._udp.call(_ab); if(ab != null && !ab.isClosed()) ab.close(); } else RPC.remote_exec(_ab); tryComplete(); } /** Exceptional completion path; mostly does printing if the exception was * not handled earlier in the stack. */ @Override public boolean onExceptionalCompletion(Throwable ex, jsr166y.CountedCompleter caller) { Log.err("onExCompletion for " + this, ex); return true; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/FetchClazz.java
package water; // Helper to fetch classForName strings from IDs from the leader class FetchClazz extends DTask<FetchClazz> { private final int _id; // OUT String _clazz; private FetchClazz(int id) { super(H2O.FETCH_ACK_PRIORITY); _id=id; } /** * Fetch class name for a given id from the leader * @param id class id * @return class name or null if leader doesn't have the id mapping */ static String fetchClazz(int id) { return fetchClazz(H2O.CLOUD.leader(), id); } private static String fetchClazz(H2ONode node, int id) { return RPC.call(node, new FetchClazz(id)).get()._clazz; } @Override public void compute2() { _clazz = TypeMap.classNameLocal(_id); tryComplete(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/FetchClazzes.java
package water; class FetchClazzes extends DTask<FetchClazzes> { // OUT String[] _clazzes; private FetchClazzes() { super(); } public static String[] fetchClazzes() { String[] clazzes = RPC.call(H2O.CLOUD.leader(), new FetchClazzes()).get()._clazzes; assert clazzes != null; return clazzes; } @Override public void compute2() { _clazzes = TypeMap.CLAZZES; tryComplete(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/FetchId.java
package water; // Helper to fetch class IDs from class Strings from the leader class FetchId extends DTask<FetchId> { final String _clazz; int _id; private FetchId(String s) { super(H2O.FETCH_ACK_PRIORITY); _clazz=s; } static public int fetchId(String s) { return RPC.call(H2O.CLOUD.leader(), new FetchId(s)).get()._id; } @Override public void compute2() { _id = TypeMap.onIce(_clazz); tryComplete(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/FrameSizeMonitor.java
package water; import org.apache.log4j.Logger; import water.fvec.Frame; import water.fvec.NewChunk; import water.parser.FVecParseWriter; import java.util.HashSet; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.Consumer; public class FrameSizeMonitor implements Runnable, Thread.UncaughtExceptionHandler { private static final Logger LOG = Logger.getLogger(FrameSizeMonitor.class); private static final String ENABLED_PROP = "util.frameSizeMonitor.enabled"; private static final String SAFE_COEF_PROP = "util.frameSizeMonitor.safetyCoefficient"; private static final String SAFE_FREE_MEM_DEFAULT_COEF = "0.2"; private static final boolean ENABLED; private static final float SAFE_FREE_MEM_COEF; private static final int SLEEP_MS = 100; private static final int MB = 1024 * 1024; private static final float FIRST_CHECK_PROGRESS = 0.02f; private static final ConcurrentMap<Key<Job>, FrameSizeMonitor> registry = new ConcurrentHashMap<>(); static { ENABLED = H2O.getSysBoolProperty(ENABLED_PROP, false); SAFE_FREE_MEM_COEF = Float.parseFloat(H2O.getSysProperty(SAFE_COEF_PROP, SAFE_FREE_MEM_DEFAULT_COEF)); } private final Key<Job> jobKey; private final Set<FVecParseWriter> writers = new HashSet<>(); private final long totalMemory = getTotalMemory(); private long committedMemory = 0; FrameSizeMonitor(Key<Job> jobKey) { this.jobKey = jobKey; } public static void get(Key<Job> jobKey, Consumer<FrameSizeMonitor> c) { if (!ENABLED) return; FrameSizeMonitor monitor = registry.computeIfAbsent(jobKey, key -> { if (jobKey.get().stop_requested()) { // throw an exception to stop the parsing throw new IllegalStateException("Memory is running low. Forcefully terminating."); } else { FrameSizeMonitor m = new FrameSizeMonitor(jobKey); Thread t = new Thread(m, "FrameSizeMonitor-" + jobKey.get()._result); t.setUncaughtExceptionHandler(m); t.start(); return m; } }); c.accept(monitor); } private static void finish(Key<Job> jobKey) { synchronized (registry) { registry.remove(jobKey); } } @Override public void run() { float nextProgress = FIRST_CHECK_PROGRESS; Job<Frame> job = jobKey.get(); while (job.isRunning() && nextProgress < 1f) { if (!MemoryManager.canAlloc()) { LOG.info("FrameSizeMonitor: MemoryManager is running low on memory, stopping job " + jobKey + " writing frame " + job._result); job.fail(new RuntimeException("Aborting due to critically low memory.")); break; } float currentProgress = job.progress(); if (currentProgress >= nextProgress) { if (isMemoryUsageOverLimit() && isFrameSizeOverLimit(currentProgress, job)) { job.fail(new RuntimeException("Aborting due to projected memory usage too high.")); break; } else if (nextProgress < 0.1f) { nextProgress = currentProgress + 0.01f; } else { nextProgress = currentProgress + 0.1f; } } else if (LOG.isDebugEnabled()) { LOG.debug("FrameSizeMonitor: waiting for progress " + currentProgress + " to jump over " + nextProgress); } synchronized (this) { try { wait(SLEEP_MS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } } if (LOG.isDebugEnabled()) { if (!job.isStopped()) { job.get(); // wait for job to finish } if (job.isDone()) { LOG.debug("FrameSizeMonitor: finished monitoring job " + jobKey + ", final frame size is " + (job._result.get().byteSize() / MB) + " MB"); } } finish(jobKey); } @Override public void uncaughtException(Thread t, Throwable e) { LOG.error(e); finish(jobKey); } private boolean isMemoryUsageOverLimit() { long availableMemory = getAvailableMemory(); long minimumAvailableMemory = (long) (totalMemory * 2 * SAFE_FREE_MEM_COEF); if (availableMemory < minimumAvailableMemory) { LOG.debug("FrameSizeMonitor: Checking output of job " + jobKey + " because the available memory " + (availableMemory / MB) + " MB is lower than threshold " + (minimumAvailableMemory / MB) + " MB " + "(" + SAFE_FREE_MEM_COEF + " of " + (totalMemory / MB) + " MB total memory)"); return true; } else { LOG.debug("FrameSizeMonitor: Overall memory usage is ok, still have " + (availableMemory / MB) + " MB available of " + (minimumAvailableMemory / MB) + " MB required."); return false; } } private boolean isFrameSizeOverLimit(float progress, Job job) { long currentCommittedMemory = committedMemory; long currentInProgressMemory = getInProgressMemory(); long projectedTotalFrameSize = (long) (currentInProgressMemory + (currentCommittedMemory / progress)); long projectedAdditionalFrameSize = projectedTotalFrameSize - currentCommittedMemory - currentInProgressMemory; long availableMemory = getAvailableMemory(); long usableMemory = (long) (availableMemory - (totalMemory * SAFE_FREE_MEM_COEF)); if (LOG.isDebugEnabled()) { LOG.debug("FrameSizeMonitor: Frame " + job._result + ": \n" + " committed: " + (currentCommittedMemory / MB) + " MB\n" + " loading: " + (currentInProgressMemory / MB) + " MB\n" + " progress: " + progress + "\n" + " projected additional: " + (projectedAdditionalFrameSize / MB) + " MB\n" + " projected total: " + (projectedTotalFrameSize / MB) + " MB\n" + " availableMemory: " + (availableMemory / MB) + " MB\n" + " totalMemory: " + (totalMemory / MB) + " MB\n" + " usableMemory: " + (usableMemory / MB) + " MB\n" + " enough: " + (projectedAdditionalFrameSize <= usableMemory)); } if (projectedAdditionalFrameSize > usableMemory) { LOG.error("FrameSizeMonitor: Stopping job " + jobKey + " writing frame " + job._result + " because the projected size of " + (projectedAdditionalFrameSize / MB) + " MB " + " does not safely fit in " + (availableMemory / MB) + " MB of available memory."); return true; } else { if (LOG.isDebugEnabled()) { LOG.debug("FrameSizeMonitor: Projected memory " + (projectedAdditionalFrameSize / MB) + "MB for frame " + job._result + " fits safely into " + (availableMemory / MB) + " MB of available memory."); } return false; } } private long getInProgressMemory() { long usedMemory = 0; synchronized (writers) { for (FVecParseWriter writer : writers) { NewChunk[] nvs = writer.getNvs(); if (nvs != null) { usedMemory += getUsedMemory(nvs); } } } return usedMemory; } private long getUsedMemory(NewChunk[] nvs) { long usedMemory = 0; for (NewChunk nv : nvs) { if (nv != null) { usedMemory += nv.byteSize(); } } return usedMemory; } private long getTotalMemory() { return H2O.SELF._heartbeat.get_kv_mem() + H2O.SELF._heartbeat.get_pojo_mem() + H2O.SELF._heartbeat.get_free_mem(); } private long getAvailableMemory() { return H2O.SELF._heartbeat.get_free_mem(); } public static void register(Key<Job> jobKey, FVecParseWriter writer) { get(jobKey, t -> t.register(writer)); } public void register(FVecParseWriter writer) { synchronized (writers) { writers.add(writer); } } public static void closed(Key<Job> jobKey, FVecParseWriter writer, long mem) { get(jobKey, t -> t.closed(writer, mem)); } public void closed(FVecParseWriter writer, long mem) { synchronized (writers) { writers.remove(writer); committedMemory += mem; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Freezable.java
package water; /** * Auto-serializer interface using a delegator pattern (the faster option is * to byte-code gen directly in all Iced classes, but this requires all Iced * classes go through a ClassLoader). * <p> * Freezable is a marker interface, and {@link Iced} is the companion marker * class. Marked classes have 2-byte integer type associated with them, and * an auto-genned delegate class created to actually do serialization. * Serialization is extremely dense (includes various compressions), and * typically memory-bandwidth bound to generate. Note that the first object implementing Freezable * is the start of the serialization, serialization for parent objects is not going to be generated and user needs to provide * custom serialization if that is the case. * <p> * H2O uses Iced classes as the primary means of moving Java Objects around * the cluster. * <p> * Default serialization behavior can be override by user by implementing his own serialization methods. (NOTE:All custom serialization methods must be declared as either final or static!) * * If given Freezable class contains custom serialization method, it uses it instead of the default autogenerated one (i.e. no auto-serialization happens for this class!), * however, all Freezable parents are still going to be serialized automatically. * * The serialization behavior for a given freezable class F can be described in following steps (here shown on serialization into bytes, other methods are analogical): * H2O will generate F$Icer extends ((parent of F)$Icer if freezable, water.Icer otherwise). * * F$Icer.write( F f, Autobuffer ab) { super.write(f,ab); * // if F has custom serialization defined: * return f.write_impl(ab) (or return F.write_impl(f,ab), depending on the flavor of implemented custom serialization); * // otherwise auto serialize all non-static non-transient memebers of F and return ab * } * * The default serialization behavior can be overriden for given class by implementing one of or all of following custom serialization methods: * * 1) override serialization into AutoBuffer provide either * * public final AutoBuffer write_impl(Autobuffer ab); * or * public static AutoBuffer write_impl(Autobuffer ab, T t); * * 2) to override deserialization from AutoBuffer provide either * * public final T read_impl(Autobuffer ab); * or * public static T read_impl(Autobuffer ab, T t); * * 3) to override serialization into JSON provide either * * public final AutoBuffer writeJSON_impl(Autobuffer ab); * or * public static AutoBuffer writeJSON_impl(Autobuffer ab, T t); * * 4) to override deserialization from JSON provide either* * * public final T readJSON_impl(Autobuffer ab); * or * public static T readJSON_impl(Autobuffer ab, T t); * * 5) override serialization into array of bytes: * useful for Freezable directly supported by byte array (e.g. for memory efficiency reason), @see Chunk * * provide @Override T byte [] asBytes() * * 6) override de-serialization from array of bytes containing exactly the bytes containing the freezable and nothing more: * useful for Freezable directly supported by byte array (e.g. for memory efficiency reason), @see Chunk * * provide @Override T reloadFromBytes(byte [] ary) * * </p> * */ public interface Freezable<T extends Freezable> extends Cloneable { /** Standard "write thyself into the AutoBuffer" call, using the fast Iced * protocol. Real work is in the delegate {@link Icer} classes. * @param ab <code>AutoBuffer</code> to write this object to. * @return Returns the original {@link AutoBuffer} for flow-coding. */ AutoBuffer write(AutoBuffer ab); /** Standard "read thyself from the AutoBuffer" call, using the fast Iced protocol. Real work * is in the delegate {@link Icer} classes. * @param ab <code>AutoBuffer</code> to read this object from. * @return Returns a new instance of object reconstructed from AutoBuffer. */ T read(AutoBuffer ab); /** Standard "write thyself into the AutoBuffer" call, using JSON. Real work * is in the delegate {@link Icer} classes. * @param ab <code>AutoBuffer</code> to write this object to. * @return Returns the original {@link AutoBuffer} for flow-coding. */ AutoBuffer writeJSON(AutoBuffer ab); /** Standard "read thyself from the AutoBuffer" call, using JSON. Real work * is in the delegate {@link Icer} classes. * @param ab <code>AutoBuffer</code> to read this object from. * @return Returns an instance of object reconstructed from JSON data. */ T readJSON(AutoBuffer ab); /** Returns a small dense integer, which is cluster-wide unique per-class. * Useful as an array index. * @return Small integer, unique per-type */ int frozenType(); /** Return serialized version of self as a byte array. * Useful for Freezables directly supported by byte array (@see Chunk) * In most cases, just use the Autobuffer version. * @return serialized bytes */ byte [] asBytes(); /** * Replace yourself with deserialized version from the given bytes. * Useful for Freezables directly supported by byte array (@see Chunk). * In most cases, just use the Autobuffer version. * @param ary byte array containing exactly (i.e. nothing else) the serialized version of the Freezable * @return this freshly reloaded from the given bytes. * */ T reloadFromBytes(byte [] ary); /** Make clone public, but without the annoying exception. * @return Returns this object cloned. */ public T clone(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Futures.java
package water; import java.util.Arrays; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import water.util.Log; /** A collection of Futures that can be extended, or blocked on the whole * collection. Undefined if you try to add Futures while blocking. * <p> * Used as a service to sub-tasks, collect pending-but-not-yet-done future * tasks that need to complete prior to *this* task completing... or if the * caller of this task is knowledgeable, pass these pending tasks along to him * to block on before he completes. * <p> * Highly efficient under a high load of short-completion-time Futures. Safe * to call with e.g. millions of Futures per second, as long as they all * complete in roughly the same rate. */ public class Futures { // implemented as an exposed array mostly because ArrayList doesn't offer // synchronization and constant-time removal. Future[] _pending = new Future[1]; int _pending_cnt; private Throwable _ex; private void waitAndCheckForException(Future f) { try { f.get(); } catch(CancellationException ex){ // ignore cancelled tasks } catch(Throwable t) { if(_ex == null) _ex = t instanceof ExecutionException?t.getCause():t; } } /** Some Future task which needs to complete before this Futures completes */ synchronized public Futures add( Future f ) { if( f == null ) return this; if(f.isDone()) { waitAndCheckForException(f); return this; } // NPE here if this Futures has already been added to some other Futures // list, and should be added to again. if( _pending_cnt == _pending.length ) { cleanCompleted(); if( _pending_cnt == _pending.length ) _pending = Arrays.copyOf(_pending,_pending_cnt<<1); } _pending[_pending_cnt++] = f; return this; } /** Clean out from the list any pending-tasks which are already done. Note * that this drops the algorithm from O(n) to O(1) in practice, since mostly * things clean out as fast as new ones are added and the list never gets * very large. */ synchronized private void cleanCompleted(){ for( int i=0; i<_pending_cnt; i++ ) if( _pending[i].isDone() ) {// Done? waitAndCheckForException(_pending[i]); // Do cheap array compression to remove from list _pending[i--] = _pending[--_pending_cnt]; } } /** Merge pending-task lists (often as part of doing a 'reduce' step) */ public void add( Futures fs ) { if( fs == null ) return; assert fs != this; // No recursive death, please for( int i=0; i<fs._pending_cnt; i++ ) add(fs._pending[i]); // NPE here if using a dead Future fs._pending = null; // You are dead, should never be inserted into again } /** Block until all pending futures have completed or canceled. */ public final void blockForPending() { // Block until the last Future finishes. while (true) { Future f; synchronized (this) { if (_pending_cnt == 0) break; f = _pending[--_pending_cnt]; } waitAndCheckForException(f); } if (_ex != null) throw new RuntimeException(_ex); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2O.java
package water; import hex.ModelBuilder; import jsr166y.CountedCompleter; import jsr166y.ForkJoinPool; import jsr166y.ForkJoinWorkerThread; import org.apache.log4j.LogManager; import org.apache.log4j.PropertyConfigurator; import water.UDPRebooted.ShutdownTsk; import water.api.LogsHandler; import water.api.RequestServer; import water.exceptions.H2OFailException; import water.exceptions.H2OIllegalArgumentException; import water.init.*; import water.nbhm.NonBlockingHashMap; import water.parser.DecryptionTool; import water.parser.ParserService; import water.persist.PersistManager; import water.server.ServletUtils; import water.util.*; import water.webserver.iface.WebServer; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.*; import java.nio.file.FileSystems; import java.nio.file.PathMatcher; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; /** * Start point for creating or joining an <code>H2O</code> Cloud. * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ final public class H2O { public static final String DEFAULT_JKS_PASS = "h2oh2o"; public static final int H2O_DEFAULT_PORT = 54321; public static final Map<Integer, Integer> GITHUB_DISCUSSIONS = createMap(); static Map<Integer, Integer> createMap() { Integer[] GHDiscussion = new Integer[]{15512, 15513, 15514, 15515, 15516, 15517, 15518, 15519, 15520, 15521, 15522, 15523, 15524, 15525}; Integer[] techNoteNumber = new Integer[]{1,2,3,4,5,7,9,10,11,12,13,14,15,16}; Map<Integer, Integer> mapTNToGH = new HashMap<>(); int mapLen = GHDiscussion.length; for (int index=0; index<mapLen; index++) mapTNToGH.put(techNoteNumber[index], GHDiscussion[index]); return mapTNToGH; } //------------------------------------------------------------------------------------------------------------------- // Command-line argument parsing and help //------------------------------------------------------------------------------------------------------------------- /** * Print help about command line arguments. */ public static void printHelp() { String defaultFlowDirMessage; if (DEFAULT_FLOW_DIR() == null) { // If you start h2o on Hadoop, you must set -flow_dir. // H2O doesn't know how to guess a good one. // user.home doesn't make sense. defaultFlowDirMessage = " (The default is none; saving flows not available.)\n"; } else { defaultFlowDirMessage = " (The default is '" + DEFAULT_FLOW_DIR() + "'.)\n"; } String s = "\n" + "Usage: java [-Xmx<size>] -jar h2o.jar [options]\n" + " (Note that every option has a default and is optional.)\n" + "\n" + " -h | -help\n" + " Print this help.\n" + "\n" + " -version\n" + " Print version info and exit.\n" + "\n" + " -name <h2oCloudName>\n" + " Cloud name used for discovery of other nodes.\n" + " Nodes with the same cloud name will form an H2O cloud\n" + " (also known as an H2O cluster).\n" + "\n" + " -flatfile <flatFileName>\n" + " Configuration file explicitly listing H2O cloud node members.\n" + "\n" + " -ip <ipAddressOfNode>\n" + " IP address of this node.\n" + "\n" + " -port <port>\n" + " Port number for this node (note: port+1 is also used by default).\n" + " (The default port is " + ARGS.port + ".)\n" + "\n" + " -network <IPv4network1Specification>[,<IPv4network2Specification> ...]\n" + " The IP address discovery code will bind to the first interface\n" + " that matches one of the networks in the comma-separated list.\n" + " Use instead of -ip when a broad range of addresses is legal.\n" + " (Example network specification: '10.1.2.0/24' allows 256 legal\n" + " possibilities.)\n" + "\n" + " -ice_root <fileSystemPath>\n" + " The directory where H2O spills temporary data to disk.\n" + "\n" + " -log_dir <fileSystemPath>\n" + " The directory where H2O writes logs to disk.\n" + " (This usually has a good default that you need not change.)\n" + "\n" + " -log_level <TRACE,DEBUG,INFO,WARN,ERRR,FATAL>\n" + " Write messages at this logging level, or above. Default is INFO.\n" + "\n" + " -max_log_file_size\n" + " Maximum size of INFO and DEBUG log files. The file is rolled over after a specified size has been reached.\n" + " (The default is 3MB. Minimum is 1MB and maximum is 99999MB)\n" + "\n" + " -flow_dir <server side directory or HDFS directory>\n" + " The directory where H2O stores saved flows.\n" + defaultFlowDirMessage + "\n" + " -nthreads <#threads>\n" + " Maximum number of threads in the low priority batch-work queue.\n" + " (The default is " + (char)H2ORuntime.availableProcessors() + ".)\n" + "\n" + " -client\n" + " Launch H2O node in client mode.\n" + "\n" + " -notify_local <fileSystemPath>\n" + " Specifies a file to write when the node is up. The file contains one line with the IP and\n" + " port of the embedded web server. e.g. 192.168.1.100:54321\n" + "\n" + " -context_path <context_path>\n" + " The context path for jetty.\n" + "\n" + "Authentication options:\n" + "\n" + " -jks <filename>\n" + " Java keystore file\n" + "\n" + " -jks_pass <password>\n" + " (Default is '" + DEFAULT_JKS_PASS + "')\n" + "\n" + " -jks_alias <alias>\n" + " (Optional, use if the keystore has multiple certificates and you want to use a specific one.)\n" + "\n" + " -hostname_as_jks_alias\n" + " (Optional, use if you want to use the machine hostname as your certificate alias.)\n" + "\n" + " -hash_login\n" + " Use Jetty HashLoginService\n" + "\n" + " -ldap_login\n" + " Use Jetty Ldap login module\n" + "\n" + " -kerberos_login\n" + " Use Jetty Kerberos login module\n" + "\n" + " -spnego_login\n" + " Use Jetty SPNEGO login service\n" + "\n" + " -pam_login\n" + " Use Jetty PAM login module\n" + "\n" + " -login_conf <filename>\n" + " LoginService configuration file\n" + "\n" + " -spnego_properties <filename>\n" + " SPNEGO login module configuration file\n" + "\n" + " -form_auth\n" + " Enables Form-based authentication for Flow (default is Basic authentication)\n" + "\n" + " -session_timeout <minutes>\n" + " Specifies the number of minutes that a session can remain idle before the server invalidates\n" + " the session and requests a new login. Requires '-form_auth'. Default is no timeout\n" + "\n" + " -internal_security_conf <filename>\n" + " Path (absolute or relative) to a file containing all internal security related configurations\n" + "\n" + "Cloud formation behavior:\n" + "\n" + " New H2O nodes join together to form a cloud at startup time.\n" + " Once a cloud is given work to perform, it locks out new members\n" + " from joining.\n" + "\n" + "Examples:\n" + "\n" + " Start an H2O node with 4GB of memory and a default cloud name:\n" + " $ java -Xmx4g -jar h2o.jar\n" + "\n" + " Start an H2O node with 6GB of memory and a specify the cloud name:\n" + " $ java -Xmx6g -jar h2o.jar -name MyCloud\n" + "\n" + " Start an H2O cloud with three 2GB nodes and a default cloud name:\n" + " $ java -Xmx2g -jar h2o.jar &\n" + " $ java -Xmx2g -jar h2o.jar &\n" + " $ java -Xmx2g -jar h2o.jar &\n" + "\n"; System.out.print(s); for (AbstractH2OExtension e : extManager.getCoreExtensions()) { e.printHelp(); } } /** * Singleton ARGS instance that contains the processed arguments. */ public static final OptArgs ARGS = new OptArgs(); /** * A class containing all of the authentication arguments for H2O. */ public static class BaseArgs { //----------------------------------------------------------------------------------- // Authentication & Security //----------------------------------------------------------------------------------- /** -jks is Java KeyStore file on local filesystem */ public String jks = null; /** -jks_pass is Java KeyStore password; default is 'h2oh2o' */ public String jks_pass = DEFAULT_JKS_PASS; /** -jks_alias if the keystore has multiple certificates and you want to use a specific one */ public String jks_alias = null; /** -hostname_as_jks_alias if you want to use the machine hostname as your certificate alias */ public boolean hostname_as_jks_alias = false; /** -hash_login enables HashLoginService */ public boolean hash_login = false; /** -ldap_login enables ldaploginmodule */ public boolean ldap_login = false; /** -kerberos_login enables krb5loginmodule */ public boolean kerberos_login = false; /** -kerberos_login enables SpnegoLoginService */ public boolean spnego_login = false; /** -pam_login enables pamloginmodule */ public boolean pam_login = false; /** -login_conf is login configuration service file on local filesystem */ public String login_conf = null; /** -spnego_properties is SPNEGO configuration file on local filesystem */ public String spnego_properties = null; /** -form_auth enables Form-based authentication */ public boolean form_auth = false; /** -session_timeout maximum duration of session inactivity in minutes **/ String session_timeout_spec = null; // raw value specified by the user public int session_timeout = 0; // parsed value (in minutes) /** -user_name=user_name; Set user name */ public String user_name = System.getProperty("user.name"); /** -internal_security_conf path (absolute or relative) to a file containing all internal security related configurations */ public String internal_security_conf = null; /** -internal_security_conf_rel_paths interpret paths of internal_security_conf relative to the main config file */ public boolean internal_security_conf_rel_paths = false; /** -internal_security_enabled is a boolean that indicates if internal communication paths are secured*/ public boolean internal_security_enabled = false; /** -allow_insecure_xgboost is a boolean that allows xgboost to run in a secured cluster */ public boolean allow_insecure_xgboost = false; /** -use_external_xgboost; invoke XGBoost on external cluster started by Steam */ public boolean use_external_xgboost = false; /** -decrypt_tool specifies the DKV key where a default decrypt tool will be installed*/ public String decrypt_tool = null; //----------------------------------------------------------------------------------- // Kerberos //----------------------------------------------------------------------------------- public String principal = null; public String keytab_path = null; public String hdfs_token_refresh_interval = null; //----------------------------------------------------------------------------------- // Networking //----------------------------------------------------------------------------------- /** -port=####; Specific Browser/API/HTML port */ public int port; /** -baseport=####; Port to start upward searching from. */ public int baseport = H2O_DEFAULT_PORT; /** -port_offset=####; Offset between the API(=web) port and the internal communication port; api_port + port_offset = h2o_port */ public int port_offset = 1; /** -web_ip=ip4_or_ip6; IP used for web server. By default it listen to all interfaces. */ public String web_ip = null; /** -ip=ip4_or_ip6; Named IP4/IP6 address instead of the default */ public String ip; /** -network=network; Network specification for acceptable interfaces to bind to */ public String network; /** -context_path=jetty_context_path; the context path for jetty */ public String context_path = ""; public KeyValueArg[] extra_headers = new KeyValueArg[0]; public PathMatcher file_deny_glob = FileSystems.getDefault().getPathMatcher("glob:{/bin/*,/etc/*,/var/*,/usr/*,/proc/*,**/.**}"); } public static class KeyValueArg { public final String _key; public final String _value; private KeyValueArg(String key, String value) { _key = key; _value = value; } } /** * A class containing all of the arguments for H2O. */ public static class OptArgs extends BaseArgs { // Prefix of hidden system properties public static final String SYSTEM_PROP_PREFIX = "sys.ai.h2o."; public static final String SYSTEM_DEBUG_CORS = H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.cors"; //----------------------------------------------------------------------------------- // Help and info //----------------------------------------------------------------------------------- /** -help, -help=true; print help and exit*/ public boolean help = false; /** -version, -version=true; print version and exit */ public boolean version = false; //----------------------------------------------------------------------------------- // Clouding //----------------------------------------------------------------------------------- /** -name=name; Set cloud name */ public String name = System.getProperty("user.name"); // Cloud name /** -flatfile=flatfile; Specify a list of cluster IP addresses */ public String flatfile; //----------------------------------------------------------------------------------- // Node configuration //----------------------------------------------------------------------------------- /** -ice_root=ice_root; ice root directory; where temp files go */ public String ice_root; /** -cleaner; enable user-mode spilling of big data to disk in ice_root */ public boolean cleaner = false; /** -nthreads=nthreads; Max number of F/J threads in the low-priority batch queue */ public short nthreads= (short)H2ORuntime.availableProcessors(); /** -log_dir=/path/to/dir; directory to save logs in */ public String log_dir; /** -flow_dir=/path/to/dir; directory to save flows in */ public String flow_dir; /** -disable_web; disable Jetty and REST API interface */ public boolean disable_web = false; /** -disable_net; do not listen to incoming traffic and do not try to discover other nodes, for single node deployments */ public boolean disable_net = false; /** -disable_flow; disable access to H2O Flow, keep REST API interface available to clients */ public boolean disable_flow = false; /** -client, -client=true; Client-only; no work; no homing of Keys (but can cache) */ public boolean client; /** -allow_clients, -allow_clients=true; Enable clients to connect to this H2O node - disabled by default */ public boolean allow_clients = false; public boolean allow_unsupported_java = false; /** If this timeout is set to non 0 value, stop the cluster if there hasn't been any rest api request to leader * node after the given timeout. Unit is milliseconds. */ public int rest_api_ping_timeout = 0; /** specifies a file to write when the node is up */ public String notify_local; /** what the is ratio of available off-heap memory to maximum JVM heap memory */ public double off_heap_memory_ratio = 0; //----------------------------------------------------------------------------------- // HDFS & AWS //----------------------------------------------------------------------------------- /** -hdfs_config=hdfs_config; configuration file of the HDFS */ public String[] hdfs_config = null; /** -hdfs_skip=hdfs_skip; used by Hadoop driver to not unpack and load any HDFS jar file at runtime. */ public boolean hdfs_skip = false; /** -aws_credentials=aws_credentials; properties file for aws credentials */ public String aws_credentials = null; /** -configure_s3_using_s3a; use S3A(FileSystem) to configure S3 client */ public boolean configure_s3_using_s3a = false; /** --ga_hadoop_ver=ga_hadoop_ver; Version string for Hadoop */ public String ga_hadoop_ver = null; /** -Hkey=value; additional configuration to merge into the Hadoop Configuration */ public final Properties hadoop_properties = new Properties(); //----------------------------------------------------------------------------------- // Recovery //----------------------------------------------------------------------------------- /** -auto_recovery_dir=hdfs://path/to/recovery; Where to store {@link hex.faulttolerance.Recoverable} job data */ public String auto_recovery_dir; //----------------------------------------------------------------------------------- // Debugging //----------------------------------------------------------------------------------- /** -log_level=log_level; One of DEBUG, INFO, WARN, ERRR. Default is INFO. */ public String log_level; /** -max_log_file_size=max_log_file_size; Maximum size of log file. The file is rolled over after a specified size has been reached.*/ public String max_log_file_size; /** -random_udp_drop, -random_udp_drop=true; test only, randomly drop udp incoming */ public boolean random_udp_drop; /** -md5skip, -md5skip=true; test-only; Skip the MD5 Jar checksum; allows jars from different builds to mingle in the same cloud */ public boolean md5skip = false; /** -quiet Enable quiet mode and avoid any prints to console, useful for client embedding */ public boolean quiet = false; /** Timeout specifying how long to wait before we check if the client has disconnected from this node */ public long clientDisconnectTimeout = HeartBeatThread.CLIENT_TIMEOUT * 20; /** -embedded; when running embedded into another application (eg. Sparkling Water) - enforce all threads to be daemon threads */ public boolean embedded = false; /** * Optionally disable algorithms marked as beta or experimental. * Everything is on by default. */ public ModelBuilder.BuilderVisibility features_level = ModelBuilder.BuilderVisibility.Experimental; @Override public String toString() { StringBuilder result = new StringBuilder(); //determine fields declared in this class only (no fields of superclass) Field[] fields = this.getClass().getDeclaredFields(); //print field names paired with their values result.append("[ "); for (Field field : fields) { try { result.append(field.getName()); result.append(": "); //requires access to private field: result.append(field.get(this)); result.append(", "); } catch (IllegalAccessException ex) { Log.err(ex, ex); } } result.deleteCharAt(result.length() - 2); result.deleteCharAt(result.length() - 1); result.append(" ]"); return result.toString(); } /** * Whether this H2O instance was launched on hadoop (using 'hadoop jar h2odriver.jar') or not. */ public boolean launchedWithHadoopJar() { return hdfs_skip; } } public static void parseFailed(String message) { System.out.println(""); System.out.println("ERROR: " + message); System.out.println(""); printHelp(); H2O.exitQuietly(1); // argument parsing failed -> we might have inconsistent ARGS and not be able to initialize logging } /** * Use when given arguments are incompatible for cluster to run. * Log is flushed into stdout to show important debugging information */ public static void clusterInitializationFailed() { Log.flushBufferedMessagesToStdout(); H2O.exitQuietly(1); } public static class OptString { final String _s; String _lastMatchedFor; public OptString(String s) { _s = s; } public boolean matches(String s) { _lastMatchedFor = s; if (_s.equals("-" + s)) return true; if (_s.equals("--" + s)) return true; return false; } public int incrementAndCheck(int i, String[] args) { i = i + 1; if (i >= args.length) parseFailed(_lastMatchedFor + " not specified"); return i; } public int parseInt(String a) { try { return Integer.parseInt(a); } catch (Exception e) { } parseFailed("Argument " + _lastMatchedFor + " must be an integer (was given '" + a + "')" ); return 0; } public int parsePort(String portString){ int portNum = parseInt(portString); if(portNum < 0 || portNum > 65535){ parseFailed("Argument " + _lastMatchedFor + " must be an integer between 0 and 65535"); return 0; }else{ return portNum; } } public String checkFileSize(String fileSizeString){ int length = fileSizeString.length(); if(length > 2 && length < 8 && fileSizeString.substring(length-2, length).equals("MB")){ try { Integer.parseInt(fileSizeString.substring(0, length-2)); return fileSizeString; } catch (NumberFormatException ex){ parseFailed("Argument " + _lastMatchedFor + " must be String value from 1MB to 99999MB."); return null; } } parseFailed("Argument " + _lastMatchedFor + " must be String value from 1MB to 99999MB."); return null; } @Override public String toString() { return _s; } } /** * Dead stupid argument parser. */ static void parseArguments(String[] args) { for (AbstractH2OExtension e : extManager.getCoreExtensions()) { args = e.parseArguments(args); } parseH2OArgumentsTo(args, ARGS); } public static OptArgs parseH2OArgumentsTo(String[] args, OptArgs trgt) { for (int i = 0; i < args.length; i++) { OptString s = new OptString(args[i]); if (s.matches("h") || s.matches("help")) { trgt.help = true; } else if (s.matches("version")) { trgt.version = true; } else if (s.matches("name")) { i = s.incrementAndCheck(i, args); trgt.name = args[i]; } else if (s.matches("flatfile")) { i = s.incrementAndCheck(i, args); trgt.flatfile = args[i]; } else if (s.matches("port")) { i = s.incrementAndCheck(i, args); trgt.port = s.parsePort(args[i]); } else if (s.matches("baseport")) { i = s.incrementAndCheck(i, args); trgt.baseport = s.parsePort(args[i]); } else if (s.matches("port_offset")) { i = s.incrementAndCheck(i, args); trgt.port_offset = s.parsePort(args[i]); // port offset has the same properties as a port, we don't allow negative offsets } else if (s.matches("ip")) { i = s.incrementAndCheck(i, args); trgt.ip = args[i]; } else if (s.matches("web_ip")) { i = s.incrementAndCheck(i, args); trgt.web_ip = args[i]; } else if (s.matches("network")) { i = s.incrementAndCheck(i, args); trgt.network = args[i]; } else if (s.matches("client")) { trgt.client = true; } else if (s.matches("allow_clients")) { trgt.allow_clients = true; } else if (s.matches("allow_unsupported_java")) { trgt.allow_unsupported_java = true; } else if (s.matches("rest_api_ping_timeout")) { i = s.incrementAndCheck(i, args); trgt.rest_api_ping_timeout = s.parseInt(args[i]); } else if (s.matches("notify_local")) { i = s.incrementAndCheck(i, args); trgt.notify_local = args[i]; } else if (s.matches("off_heap_memory_ratio")) { i = s.incrementAndCheck(i, args); trgt.off_heap_memory_ratio = Double.parseDouble(args[i]); } else if (s.matches("user_name")) { i = s.incrementAndCheck(i, args); trgt.user_name = args[i]; } else if (s.matches("ice_root")) { i = s.incrementAndCheck(i, args); trgt.ice_root = args[i]; } else if (s.matches("log_dir")) { i = s.incrementAndCheck(i, args); trgt.log_dir = args[i]; } else if (s.matches("flow_dir")) { i = s.incrementAndCheck(i, args); trgt.flow_dir = args[i]; } else if (s.matches("disable_web")) { trgt.disable_web = true; } else if (s.matches("disable_net")) { trgt.disable_net = true; } else if (s.matches("disable_flow")) { trgt.disable_flow = true; } else if (s.matches("context_path")) { i = s.incrementAndCheck(i, args); String value = args[i]; trgt.context_path = value.startsWith("/") ? value.trim().length() == 1 ? "" : value : "/" + value; } else if (s.matches("nthreads")) { i = s.incrementAndCheck(i, args); int nthreads = s.parseInt(args[i]); if (nthreads >= 1) { //otherwise keep default (all cores) if (nthreads > Short.MAX_VALUE) throw H2O.unimpl("Can't handle more than " + Short.MAX_VALUE + " threads."); trgt.nthreads = (short) nthreads; } } else if (s.matches("hdfs_config")) { i = s.incrementAndCheck(i, args); trgt.hdfs_config = ArrayUtils.append(trgt.hdfs_config, args[i]); } else if (s.matches("hdfs_skip")) { trgt.hdfs_skip = true; } else if (s.matches("H")) { i = s.incrementAndCheck(i, args); String key = args[i]; i = s.incrementAndCheck(i, args); String value = args[i]; trgt.hadoop_properties.setProperty(key, value); } else if (s.matches("aws_credentials")) { i = s.incrementAndCheck(i, args); trgt.aws_credentials = args[i]; } else if (s.matches("configure_s3_using_s3a")) { trgt.configure_s3_using_s3a = true; } else if (s.matches("ga_hadoop_ver")) { i = s.incrementAndCheck(i, args); trgt.ga_hadoop_ver = args[i]; } else if (s.matches("ga_opt_out")) { // JUnits pass this as a system property, but it usually a flag without an arg if (i+1 < args.length && args[i+1].equals("yes")) i++; } else if (s.matches("auto_recovery_dir")) { i = s.incrementAndCheck(i, args); trgt.auto_recovery_dir = args[i]; } else if (s.matches("log_level")) { i = s.incrementAndCheck(i, args); trgt.log_level = args[i]; } else if (s.matches("max_log_file_size")) { i = s.incrementAndCheck(i, args); trgt.max_log_file_size = s.checkFileSize(args[i]); } else if (s.matches("random_udp_drop")) { trgt.random_udp_drop = true; } else if (s.matches("md5skip")) { trgt.md5skip = true; } else if (s.matches("quiet")) { trgt.quiet = true; } else if(s.matches("cleaner")) { trgt.cleaner = true; } else if (s.matches("jks")) { i = s.incrementAndCheck(i, args); trgt.jks = args[i]; } else if (s.matches("jks_pass")) { i = s.incrementAndCheck(i, args); trgt.jks_pass = args[i]; } else if (s.matches("jks_alias")) { i = s.incrementAndCheck(i, args); trgt.jks_alias = args[i]; } else if (s.matches("hostname_as_jks_alias")) { trgt.hostname_as_jks_alias = true; } else if (s.matches("hash_login")) { trgt.hash_login = true; } else if (s.matches("ldap_login")) { trgt.ldap_login = true; } else if (s.matches("kerberos_login")) { trgt.kerberos_login = true; } else if (s.matches("spnego_login")) { trgt.spnego_login = true; } else if (s.matches("pam_login")) { trgt.pam_login = true; } else if (s.matches("login_conf")) { i = s.incrementAndCheck(i, args); trgt.login_conf = args[i]; } else if (s.matches("spnego_properties")) { i = s.incrementAndCheck(i, args); trgt.spnego_properties = args[i]; } else if (s.matches("form_auth")) { trgt.form_auth = true; } else if (s.matches("session_timeout")) { i = s.incrementAndCheck(i, args); trgt.session_timeout_spec = args[i]; try { trgt.session_timeout = Integer.parseInt(args[i]); } catch (Exception e) { /* ignored */ } } else if (s.matches("internal_security_conf")) { i = s.incrementAndCheck(i, args); trgt.internal_security_conf = args[i]; } else if (s.matches("internal_security_conf_rel_paths")) { trgt.internal_security_conf_rel_paths = true; } else if (s.matches("allow_insecure_xgboost")) { trgt.allow_insecure_xgboost = true; } else if (s.matches("use_external_xgboost")) { trgt.use_external_xgboost = true; } else if (s.matches("decrypt_tool")) { i = s.incrementAndCheck(i, args); trgt.decrypt_tool = args[i]; } else if (s.matches("principal")) { i = s.incrementAndCheck(i, args); trgt.principal = args[i]; } else if (s.matches("keytab")) { i = s.incrementAndCheck(i, args); trgt.keytab_path = args[i]; } else if (s.matches("hdfs_token_refresh_interval")) { i = s.incrementAndCheck(i, args); trgt.hdfs_token_refresh_interval = args[i]; } else if (s.matches("no_latest_check")) { // ignored Log.trace("Invoked with 'no_latest_check' option (NOOP in current release)."); } else if(s.matches(("client_disconnect_timeout"))){ i = s.incrementAndCheck(i, args); int clientDisconnectTimeout = s.parseInt(args[i]); if (clientDisconnectTimeout <= 0) { throw new IllegalArgumentException("Interval for checking if client is disconnected has to be positive (milliseconds)."); } trgt.clientDisconnectTimeout = clientDisconnectTimeout; } else if (s.matches("useUDP")) { Log.warn("Support for UDP communication was removed from H2O, using TCP."); } else if (s.matches("watchdog_client_retry_timeout")) { warnWatchdogRemoved("watchdog_client_retry_timeout"); } else if (s.matches("watchdog_client")) { warnWatchdogRemoved("watchdog_client"); } else if (s.matches("watchdog_client_connect_timeout")) { warnWatchdogRemoved("watchdog_client_connect_timeout"); } else if (s.matches("watchdog_stop_without_client")) { warnWatchdogRemoved("watchdog_stop_without_client"); } else if (s.matches("features")) { i = s.incrementAndCheck(i, args); trgt.features_level = ModelBuilder.BuilderVisibility.valueOfIgnoreCase(args[i]); Log.info(String.format("Limiting algorithms available to level: %s", trgt.features_level.name())); } else if (s.matches("add_http_header")) { i = s.incrementAndCheck(i, args); String key = args[i]; i = s.incrementAndCheck(i, args); String value = args[i]; trgt.extra_headers = ArrayUtils.append(trgt.extra_headers, new KeyValueArg(key, value)); } else if (s.matches("file_deny_glob")) { i = s.incrementAndCheck(i, args); String key = args[i]; try { trgt.file_deny_glob = FileSystems.getDefault().getPathMatcher("glob:" + key); } catch (Exception e) { throw new IllegalArgumentException("Error parsing file_deny_glob parameter"); } } else if(s.matches("embedded")) { trgt.embedded = true; } else { parseFailed("Unknown argument (" + s + ")"); } } return trgt; } private static void warnWatchdogRemoved(String param) { Log.warn("Support for watchdog client communication was removed and '" + param + "' argument has no longer any effect. " + "It will be removed in the next major release 3.30."); } private static void validateArguments() { if (ARGS.jks != null) { if (! new File(ARGS.jks).exists()) { parseFailed("File does not exist: " + ARGS.jks); } } if (ARGS.jks_alias != null && ARGS.hostname_as_jks_alias) { parseFailed("Options -jks_alias and -hostname_as_jks_alias are mutually exclusive, specify only one of them"); } if (ARGS.login_conf != null) { if (! new File(ARGS.login_conf).exists()) { parseFailed("File does not exist: " + ARGS.login_conf); } } int login_arg_count = 0; if (ARGS.hash_login) login_arg_count++; if (ARGS.ldap_login) login_arg_count++; if (ARGS.kerberos_login) login_arg_count++; if (ARGS.spnego_login) login_arg_count++; if (ARGS.pam_login) login_arg_count++; if (login_arg_count > 1) { parseFailed("Can only specify one of -hash_login, -ldap_login, -kerberos_login, -spnego_login and -pam_login"); } if (ARGS.hash_login || ARGS.ldap_login || ARGS.kerberos_login || ARGS.pam_login || ARGS.spnego_login) { if (H2O.ARGS.login_conf == null) { parseFailed("Must specify -login_conf argument"); } } else { if (H2O.ARGS.form_auth) { parseFailed("No login method was specified. Form-based authentication can only be used in conjunction with of a LoginService.\n" + "Pick a LoginService by specifying '-<method>_login' option."); } } if (ARGS.spnego_login) { if (H2O.ARGS.spnego_properties == null) { parseFailed("Must specify -spnego_properties argument"); } if (H2O.ARGS.form_auth) { parseFailed("Form-based authentication not supported when SPNEGO login is enabled."); } } if (ARGS.session_timeout_spec != null) { if (! ARGS.form_auth) { parseFailed("Session timeout can only be enabled for Form based authentication (use -form_auth)"); } if (ARGS.session_timeout <= 0) parseFailed("Invalid session timeout specification (" + ARGS.session_timeout + ")"); } if (ARGS.rest_api_ping_timeout < 0) { parseFailed(String.format("rest_api_ping_timeout needs to be 0 or higher, was (%d)", ARGS.rest_api_ping_timeout)); } // Validate extension arguments for (AbstractH2OExtension e : extManager.getCoreExtensions()) { e.validateArguments(); } } //------------------------------------------------------------------------------------------------------------------- // Embedded configuration for a full H2O node to be implanted in another // piece of software (e.g. Hadoop mapper task). //------------------------------------------------------------------------------------------------------------------- public static volatile AbstractEmbeddedH2OConfig embeddedH2OConfig; /** * Register embedded H2O configuration object with H2O instance. */ public static void setEmbeddedH2OConfig(AbstractEmbeddedH2OConfig c) { embeddedH2OConfig = c; } /** * Returns an instance of {@link AbstractEmbeddedH2OConfig}. The origin of the embedded config might be either * from directly setting the embeddedH2OConfig field via setEmbeddedH2OConfig setter, or dynamically provided via * service loader. Directly set {@link AbstractEmbeddedH2OConfig} is always prioritized. ServiceLoader lookup is only * performed if no config is previously set. * <p> * Result of first ServiceLoader lookup is also considered final - once a service is found, dynamic lookup is not * performed any further. * * @return An instance of {@link AbstractEmbeddedH2OConfig}, if set or dynamically provided. Otherwise null * @author Michal Kurka */ public static AbstractEmbeddedH2OConfig getEmbeddedH2OConfig() { if (embeddedH2OConfig != null) { return embeddedH2OConfig; } embeddedH2OConfig = discoverEmbeddedConfigProvider() .map(embeddedConfigProvider -> { Log.info(String.format("Dynamically loaded '%s' as AbstractEmbeddedH2OConfigProvider.", embeddedConfigProvider.getName())); return embeddedConfigProvider.getConfig(); }).orElse(null); return embeddedH2OConfig; } /** * Uses {@link ServiceLoader} to discover active instances of {@link EmbeddedConfigProvider}. Only one provider * may be active at a time. If more providers are detected, {@link IllegalStateException} is thrown. * * @return An {@link Optional} of {@link EmbeddedConfigProvider}, if a single active provider is found. Otherwise * an empty optional. * @throws IllegalStateException When there are multiple active instances {@link EmbeddedConfigProvider} discovered. */ private static Optional<EmbeddedConfigProvider> discoverEmbeddedConfigProvider() throws IllegalStateException { final ServiceLoader<EmbeddedConfigProvider> configProviders = ServiceLoader.load(EmbeddedConfigProvider.class); EmbeddedConfigProvider provider = null; for (final EmbeddedConfigProvider candidateProvider : configProviders) { candidateProvider.init(); if (!candidateProvider.isActive()) continue; if (provider != null) { throw new IllegalStateException("Multiple active EmbeddedH2OConfig providers: " + provider.getName() + " and " + candidateProvider.getName() + " (possibly other as well)."); } provider = candidateProvider; } return Optional.ofNullable(provider); } /** * Tell the embedding software that this H2O instance belongs to * a cloud of a certain size. * This may be non-blocking. * * @param ip IP address this H2O can be reached at. * @param port Port this H2O can be reached at (for REST API and browser). * @param size Number of H2O instances in the cloud. */ public static void notifyAboutCloudSize(InetAddress ip, int port, InetAddress leaderIp, int leaderPort, int size) { if (ARGS.notify_local != null && !ARGS.notify_local.trim().isEmpty()) { final File notifyFile = new File(ARGS.notify_local); final File parentDir = notifyFile.getParentFile(); if (parentDir != null && !parentDir.isDirectory()) { if (!parentDir.mkdirs()) { Log.err("Cannot make parent dir for notify file."); H2O.exit(-1); } } try(BufferedWriter output = new BufferedWriter(new FileWriter(notifyFile))) { output.write(SELF_ADDRESS.getHostAddress()); output.write(':'); output.write(Integer.toString(API_PORT)); output.flush(); } catch (IOException e) { Log.err("Unable to write notify file."); H2O.exit(-1); } } if (embeddedH2OConfig != null) { embeddedH2OConfig.notifyAboutCloudSize(ip, port, leaderIp, leaderPort, size); } } public static void closeAll() { try { H2O.getWebServer().stop(); } catch( Exception ignore ) { } try { NetworkInit.close(); } catch( IOException ignore ) { } PersistManager PM = H2O.getPM(); if( PM != null ) PM.getIce().cleanUp(); } /** Notify embedding software instance H2O wants to exit. Shuts down a single Node. * @param status H2O's requested process exit value. */ public static void exit(int status) { // Log subsystem might be still caching message, let it know to flush the cache and start logging even if we don't have SELF yet Log.notifyAboutProcessExiting(); exitQuietly(status); } /** * Notify embedding software instance H2O wants to exit. Shuts down a single Node. * Exit without logging any buffered messages, invoked when H2O arguments are not correctly parsed * and we might thus not be able to successfully initialize the logging subsystem. * * @param status H2O's requested process exit value. */ private static void exitQuietly(int status) { // Embedded H2O path (e.g. inside Hadoop mapper task). if( embeddedH2OConfig != null ) embeddedH2OConfig.exit(status); // Standalone H2O path,p or if the embedded config does not exit System.exit(status); } /** Cluster shutdown itself by sending a shutdown UDP packet. */ public static void shutdown(int status) { if(status == 0) H2O.orderlyShutdown(); UDPRebooted.T.error.send(H2O.SELF); H2O.exit(status); } /** Orderly shutdown with infinite timeout for confirmations from the nodes in the cluster */ public static int orderlyShutdown() { return orderlyShutdown(-1); } public static int orderlyShutdown(int timeout) { boolean [] confirmations = new boolean[H2O.CLOUD.size()]; if (H2O.SELF.index() >= 0) { // Do not wait for clients to shutdown confirmations[H2O.SELF.index()] = true; } Futures fs = new Futures(); for(H2ONode n:H2O.CLOUD._memary) { if(n != H2O.SELF) fs.add(new RPC(n, new ShutdownTsk(H2O.SELF,n.index(), 1000, confirmations, 0)).call()); } if(timeout > 0) try { Thread.sleep(timeout); } catch (Exception ignore) {} else fs.blockForPending(); // todo, should really have block for pending with a timeout int failedToShutdown = 0; // shutdown failed for(boolean b:confirmations) if(!b) failedToShutdown++; return failedToShutdown; } private static volatile boolean _shutdownRequested = false; public static void requestShutdown() { _shutdownRequested = true; } public static boolean getShutdownRequested() { return _shutdownRequested; } //------------------------------------------------------------------------------------------------------------------- public static final AbstractBuildVersion ABV = AbstractBuildVersion.getBuildVersion(); //------------------------------------------------------------------------------------------------------------------- private static boolean _haveInheritedLog4jConfiguration = false; public static boolean haveInheritedLog4jConfiguration() { return _haveInheritedLog4jConfiguration; } public static void configureLogging() { if (LogManager.getCurrentLoggers().hasMoreElements()) { _haveInheritedLog4jConfiguration = true; return; } else if (System.getProperty("log4j.configuration") != null) { _haveInheritedLog4jConfiguration = true; return; } // Disable logging from a few specific classes at startup. // (These classes may (or may not) be re-enabled later on.) // // The full logger initialization is done by setLog4jProperties() in class water.util.Log. // The trick is the output path / file isn't known until the H2O API PORT is chosen, // so real logger initialization has to happen somewhat late in the startup lifecycle. java.util.Properties p = new java.util.Properties(); p.setProperty("log4j.rootCategory", "WARN, console"); p.setProperty("log4j.logger.org.eclipse.jetty", "WARN"); p.setProperty("log4j.appender.console", "org.apache.log4j.ConsoleAppender"); p.setProperty("log4j.appender.console.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.console.layout.ConversionPattern", "%m%n"); PropertyConfigurator.configure(p); System.setProperty("org.eclipse.jetty.LEVEL", "WARN"); // Log jetty stuff to stdout for now. // TODO: figure out how to wire this into log4j. System.setProperty("org.eclipse.jetty.util.log.class", "org.eclipse.jetty.util.log.StdErrLog"); } //------------------------------------------------------------------------------------------------------------------- public static class AboutEntry { private String name; private String value; public String getName() { return name; } public String getValue() { return value; } AboutEntry(String n, String v) { name = n; value = v; } } private static ArrayList<AboutEntry> aboutEntries = new ArrayList<>(); @SuppressWarnings("unused") public static void addAboutEntry(String name, String value) { AboutEntry e = new AboutEntry(name, value); aboutEntries.add(e); } @SuppressWarnings("unused") public static ArrayList<AboutEntry> getAboutEntries() { return aboutEntries; } //------------------------------------------------------------------------------------------------------------------- private static final AtomicLong nextModelNum = new AtomicLong(0); /** * Calculate a unique model id that includes User-Agent info (if it can be discovered). * For the user agent info to be discovered, this needs to be called from a Jetty thread. * * This lets us distinguish models created from R vs. other front-ends, for example. * At some future point, it could make sense to include a sessionId here. * * The algorithm is: * descModel_[userAgentPrefixIfKnown_]cloudId_monotonicallyIncreasingInteger * * Right now because of the way the REST API works, a bunch of numbers are created and * thrown away. So the values are monotonically increasing but not contiguous. * * @param desc Model description. * @return The suffix. */ public static String calcNextUniqueModelId(String desc) { return calcNextUniqueObjectId("model", nextModelNum, desc); } synchronized public static String calcNextUniqueObjectId(String type, AtomicLong sequenceSource, String desc) { StringBuilder sb = new StringBuilder(); sb.append(desc).append('_').append(type).append('_'); // Append user agent string if we can figure it out. String source = ServletUtils.getUserAgent(); if (source != null) { StringBuilder ua = new StringBuilder(); if (source.contains("Safari")) { ua.append("safari"); } else if (source.contains("Python")) { ua.append("python"); } else { for (int i = 0; i < source.length(); i++) { char c = source.charAt(i); if (c >= 'a' && c <= 'z') { ua.append(c); continue; } else if (c >= 'A' && c <= 'Z') { ua.append(c); continue; } break; } } if (ua.toString().length() > 0) { sb.append(ua.toString()).append("_"); } } // REST API needs some refactoring to avoid burning lots of extra numbers. // // I actually tried only doing the addAndGet only for POST requests (and junk UUID otherwise), // but that didn't eliminate the gaps. long n = sequenceSource.addAndGet(1); sb.append(CLUSTER_ID).append("_").append(n); return sb.toString(); } //------------------------------------------------------------------------------------------------------------------- // This piece of state is queried by Steam. // It's used to inform the Admin user the last time each H2O instance did something. // Admins can take this information and decide whether to kill idle clusters to reclaim tied up resources. private static volatile long lastTimeSomethingHappenedMillis = System.currentTimeMillis(); private static volatile AtomicInteger activeRapidsExecs = new AtomicInteger(); /** * Get the number of milliseconds the H2O cluster has been idle. * @return milliseconds since the last interesting thing happened. */ public static long getIdleTimeMillis() { long latestEndTimeMillis = -1; // If there are any running rapids queries, consider that not idle. if (activeRapidsExecs.get() > 0) { updateNotIdle(); } else { // If there are any running jobs, consider that not idle. // Remember the latest job ending time as well. Job[] jobs = Job.jobs(); for (int i = jobs.length - 1; i >= 0; i--) { Job j = jobs[i]; if (j.isRunning()) { updateNotIdle(); break; } if (j.end_time() > latestEndTimeMillis) { latestEndTimeMillis = j.end_time(); } } } long latestTimeMillis = Math.max(latestEndTimeMillis, lastTimeSomethingHappenedMillis); // Calculate milliseconds and clamp at zero. long now = System.currentTimeMillis(); long deltaMillis = now - latestTimeMillis; if (deltaMillis < 0) { deltaMillis = 0; } return deltaMillis; } /** * Update the last time that something happened to reset the idle timer. * This is meant to be callable safely from almost anywhere. */ public static void updateNotIdle() { lastTimeSomethingHappenedMillis = System.currentTimeMillis(); } /** * Increment the current number of active Rapids exec calls. */ public static void incrementActiveRapidsCounter() { updateNotIdle(); activeRapidsExecs.incrementAndGet(); } /** * Decrement the current number of active Rapids exec calls. */ public static void decrementActiveRapidsCounter() { updateNotIdle(); activeRapidsExecs.decrementAndGet(); } //------------------------------------------------------------------------------------------------------------------- // Atomically set once during startup. Guards against repeated startups. public static final AtomicLong START_TIME_MILLIS = new AtomicLong(); // When did main() run // Used to gate default worker threadpool sizes public static final int NUMCPUS = H2ORuntime.availableProcessors(); // Best-guess process ID public static final long PID; static { PID = getCurrentPID(); } // Extension Manager instance private static final ExtensionManager extManager = ExtensionManager.getInstance(); /** * Retrieves a value of an H2O system property. * * H2O system properties have {@link OptArgs#SYSTEM_PROP_PREFIX} prefix. * * @param name property name * @param def default value * @return value of the system property or default value if property was not defined */ public static String getSysProperty(String name, String def) { return System.getProperty(H2O.OptArgs.SYSTEM_PROP_PREFIX + name, def); } /** * Retrieves a boolean value of an H2O system property. * * H2O system properties have {@link OptArgs#SYSTEM_PROP_PREFIX} prefix. * * @param name property name * @param def default value * @return value of the system property as boolean or default value if property was not defined. False returned if * the system property value is set but it is not "true" or any upper/lower case variant of it. */ public static boolean getSysBoolProperty(String name, boolean def) { return Boolean.parseBoolean(getSysProperty(name, String.valueOf(def))); } /** * Throw an exception that will cause the request to fail, but the cluster to continue. * @see #fail(String, Throwable) * @return never returns */ public static H2OIllegalArgumentException unimpl() { return new H2OIllegalArgumentException("unimplemented"); } /** * Throw an exception that will cause the request to fail, but the cluster to continue. * @see #unimpl(String) * @see #fail(String, Throwable) * @return never returns */ public static H2OIllegalArgumentException unimpl(String msg) { return new H2OIllegalArgumentException("unimplemented: " + msg); } /** * H2O.fail is intended to be used in code where something should never happen, and if * it does it's a coding error that needs to be addressed immediately. Examples are: * AutoBuffer serialization for an object you're trying to serialize isn't available; * there's a typing error on your schema; your switch statement didn't cover all the AstRoot * subclasses available in Rapids. * <p> * It should *not* be used when only the single request should fail, it should *only* be * used if the error means that someone needs to go add some code right away. * * @param msg Message to Log.fatal() * @param cause Optional cause exception to Log.fatal() * @return never returns; calls System.exit(-1) */ public static H2OFailException fail(String msg, Throwable cause) { Log.fatal(msg); if (null != cause) Log.fatal(cause); Log.fatal("Stacktrace: "); Log.fatal(new Exception(msg)); // H2O fail() exists because of coding errors - but what if usage of fail() was itself a coding error? // Property "suppress.shutdown.on.failure" can be used in the case when someone is seeing shutdowns on production // because a developer incorrectly used fail() instead of just throwing a (recoverable) exception boolean suppressShutdown = getSysBoolProperty("suppress.shutdown.on.failure", false); if (! suppressShutdown) { H2O.shutdown(-1); } else { throw new IllegalStateException("Suppressed shutdown for failure: " + msg, cause); } // unreachable return new H2OFailException(msg); } /** * @see #fail(String, Throwable) * @return never returns */ public static H2OFailException fail() { return H2O.fail("Unknown code failure"); } /** * @see #fail(String, Throwable) * @return never returns */ public static H2OFailException fail(String msg) { return H2O.fail(msg, null); } /** * Return an error message with an accompanying URL to help the user get more detailed information. * * @param number H2O tech note number. * @param message Message to present to the user. * @return A longer message including a URL. */ public static String technote(int number, String message) { return message + "\n\n" + "For more information visit:\n" + " https://github.com/h2oai/h2o-3/discussions/" + GITHUB_DISCUSSIONS.get(number); } /** * Return an error message with an accompanying list of URLs to help the user get more detailed information. * * @param numbers H2O tech note numbers. * @param message Message to present to the user. * @return A longer message including a list of URLs. */ public static String technote(int[] numbers, String message) { StringBuilder sb = new StringBuilder() .append(message) .append("\n") .append("\n") .append("For more information visit:\n"); for (int number : numbers) { sb.append(" https://github.com/h2oai/h2o-3/discussions/").append(GITHUB_DISCUSSIONS.get(number)).append("\n"); } return sb.toString(); } // -------------------------------------------------------------------------- // The worker pools - F/J pools with different priorities. // These priorities are carefully ordered and asserted for... modify with // care. The real problem here is that we can get into cyclic deadlock // unless we spawn a thread of priority "X+1" in order to allow progress // on a queue which might be flooded with a large number of "<=X" tasks. // // Example of deadlock: suppose TaskPutKey and the Invalidate ran at the same // priority on a 2-node cluster. Both nodes flood their own queues with // writes to unique keys, which require invalidates to run on the other node. // Suppose the flooding depth exceeds the thread-limit (e.g. 99); then each // node might have all 99 worker threads blocked in TaskPutKey, awaiting // remote invalidates - but the other nodes' threads are also all blocked // awaiting invalidates! // // We fix this by being willing to always spawn a thread working on jobs at // priority X+1, and guaranteeing there are no jobs above MAX_PRIORITY - // i.e., jobs running at MAX_PRIORITY cannot block, and when those jobs are // done, the next lower level jobs get unblocked, etc. public static final byte MAX_PRIORITY = Byte.MAX_VALUE-1; public static final byte ACK_ACK_PRIORITY = MAX_PRIORITY; //126 public static final byte FETCH_ACK_PRIORITY = MAX_PRIORITY-1; //125 public static final byte ACK_PRIORITY = MAX_PRIORITY-2; //124 public static final byte DESERIAL_PRIORITY = MAX_PRIORITY-3; //123 public static final byte INVALIDATE_PRIORITY = MAX_PRIORITY-3; //123 public static final byte GET_KEY_PRIORITY = MAX_PRIORITY-4; //122 public static final byte PUT_KEY_PRIORITY = MAX_PRIORITY-5; //121 public static final byte ATOMIC_PRIORITY = MAX_PRIORITY-6; //120 public static final byte GUI_PRIORITY = MAX_PRIORITY-7; //119 public static final byte MIN_HI_PRIORITY = MAX_PRIORITY-7; //119 public static final byte MIN_PRIORITY = 0; // F/J threads that remember the priority of the last task they started // working on. // made public for ddply public static class FJWThr extends ForkJoinWorkerThread { public int _priority; FJWThr(ForkJoinPool pool) { super(pool); _priority = ((PrioritizedForkJoinPool)pool)._priority; setPriority( _priority == Thread.MIN_PRIORITY ? Thread.NORM_PRIORITY-1 : Thread. MAX_PRIORITY-1 ); setName("FJ-"+_priority+"-"+getPoolIndex()); } } // Factory for F/J threads, with cap's that vary with priority. static class FJWThrFact implements ForkJoinPool.ForkJoinWorkerThreadFactory { private final int _cap; FJWThrFact( int cap ) { _cap = cap; } @Override public ForkJoinWorkerThread newThread(ForkJoinPool pool) { int cap = _cap==-1 ? 4 * NUMCPUS : _cap; return pool.getPoolSize() <= cap ? new FJWThr(pool) : null; } } // A standard FJ Pool, with an expected priority level. private static class PrioritizedForkJoinPool extends ForkJoinPool { final int _priority; private PrioritizedForkJoinPool(int p, int cap) { super((ARGS.nthreads <= 0) ? NUMCPUS : ARGS.nthreads, new FJWThrFact(cap), null, p>=MIN_HI_PRIORITY /* low priority FJQs should use the default FJ settings to use LIFO order of thread private queues. */); _priority = p; } private H2OCountedCompleter poll2() { return (H2OCountedCompleter)pollSubmission(); } } // Hi-priority work, sorted into individual queues per-priority. // Capped at a small number of threads per pool. private static final PrioritizedForkJoinPool FJPS[] = new PrioritizedForkJoinPool[MAX_PRIORITY+1]; static { // Only need 1 thread for the AckAck work, as it cannot block FJPS[ACK_ACK_PRIORITY] = new PrioritizedForkJoinPool(ACK_ACK_PRIORITY,1); for( int i=MIN_HI_PRIORITY+1; i<MAX_PRIORITY; i++ ) FJPS[i] = new PrioritizedForkJoinPool(i,4); // All CPUs, but no more for blocking purposes FJPS[GUI_PRIORITY] = new PrioritizedForkJoinPool(GUI_PRIORITY,2); } // Easy peeks at the FJ queues static int getWrkQueueSize (int i) { return FJPS[i]==null ? -1 : FJPS[i].getQueuedSubmissionCount();} static int getWrkThrPoolSize(int i) { return FJPS[i]==null ? -1 : FJPS[i].getPoolSize(); } // For testing purposes (verifying API work exceeds grunt model-build work) // capture the class of any submitted job lower than this priority; static public int LOW_PRIORITY_API_WORK; static public String LOW_PRIORITY_API_WORK_CLASS; // Submit to the correct priority queue public static <T extends H2OCountedCompleter> T submitTask( T task ) { int priority = task.priority(); if( priority < LOW_PRIORITY_API_WORK ) LOW_PRIORITY_API_WORK_CLASS = task.getClass().toString(); assert MIN_PRIORITY <= priority && priority <= MAX_PRIORITY:"priority " + priority + " is out of range, expected range is < " + MIN_PRIORITY + "," + MAX_PRIORITY + ">"; if( FJPS[priority]==null ) synchronized( H2O.class ) { if( FJPS[priority] == null ) FJPS[priority] = new PrioritizedForkJoinPool(priority,-1); } FJPS[priority].submit(task); return task; } /** * Executes a runnable on a regular H2O Node (= not on a client). * If the current H2O Node is a regular node, the runnable will be executed directly (RemoteRunnable#run will be invoked). * If the current H2O Node is a client node, the runnable will be send to a leader node of the cluster and executed there. * The caller shouldn't make any assumptions on where the code will be run. * @param runnable code to be executed * @param <T> RemoteRunnable * @return executed runnable (will be a different instance if executed remotely). */ public static <T extends RemoteRunnable> T runOnH2ONode(T runnable) { H2ONode node = H2O.ARGS.client ? H2O.CLOUD.leader() : H2O.SELF; return runOnH2ONode(node, runnable); } public static <T extends RemoteRunnable> T runOnLeaderNode(T runnable) { return runOnH2ONode(H2O.CLOUD.leader(), runnable); } // package-private for unit tests static <T extends RemoteRunnable> T runOnH2ONode(H2ONode node, T runnable) { if (node == H2O.SELF) { // run directly runnable.run(); return runnable; } else { RunnableWrapperTask<T> task = new RunnableWrapperTask<>(runnable); try { return new RPC<>(node, task).call().get()._runnable; } catch (DistributedException e) { Log.trace("Exception in calling runnable on a remote node", e); Throwable cause = e.getCause(); throw cause instanceof RuntimeException ? (RuntimeException) cause : e; } } } private static class RunnableWrapperTask<T extends RemoteRunnable> extends DTask<RunnableWrapperTask<T>> { private final T _runnable; private RunnableWrapperTask(T runnable) { _runnable = runnable; } @Override public void compute2() { _runnable.setupOnRemote(); _runnable.run(); tryComplete(); } } public abstract static class RemoteRunnable<T extends RemoteRunnable> extends Iced<T> { public void setupOnRemote() {} public abstract void run(); } /** Simple wrapper over F/J {@link CountedCompleter} to support priority * queues. F/J queues are simple unordered (and extremely light weight) * queues. However, we frequently need priorities to avoid deadlock and to * promote efficient throughput (e.g. failure to respond quickly to {@link * TaskGetKey} can block an entire node for lack of some small piece of * data). So each attempt to do lower-priority F/J work starts with an * attempt to work and drain the higher-priority queues. */ public static abstract class H2OCountedCompleter<T extends H2OCountedCompleter> extends CountedCompleter implements Cloneable, Freezable<T> { @Override public byte [] asBytes(){return new AutoBuffer().put(this).buf();} @Override public T reloadFromBytes(byte [] ary){ return read(new AutoBuffer(ary));} private /*final*/ byte _priority; // Without a completer, we expect this task will be blocked on - so the // blocking thread is not available in the current thread pool, so the // launched task needs to run at a higher priority. public H2OCountedCompleter( ) { this(null); } // With a completer, this task will NOT be blocked on and the the current // thread is available for executing it... so the priority can remain at // the current level. static private byte computePriority( H2OCountedCompleter completer ) { int currThrPrior = currThrPriority(); // If there's no completer, then current thread will block on this task // at the current priority, possibly filling up the current-priority // thread pool - so the task has to run at the next higher priority. if( completer == null ) return (byte)(currThrPrior+1); // With a completer - no thread blocks on this task, so no thread pool // gets filled-up with blocked threads. We can run at the current // priority (or the completer's priority if it's higher). return (byte)Math.max(currThrPrior,completer.priority()); } protected H2OCountedCompleter(H2OCountedCompleter completer) { this(completer,computePriority(completer)); } // Special for picking GUI priorities protected H2OCountedCompleter( byte prior ) { this(null,prior); } protected H2OCountedCompleter(H2OCountedCompleter completer, byte prior) { super(completer); _priority = prior; } /** Used by the F/J framework internally to do work. Once per F/J task, * drain the high priority queue before doing any low priority work. * Calls {@link #compute2} which contains actual work. */ @Override public final void compute() { FJWThr t = (FJWThr)Thread.currentThread(); int pp = ((PrioritizedForkJoinPool)t.getPool())._priority; // Drain the high priority queues before the normal F/J queue H2OCountedCompleter h2o = null; boolean set_t_prior = false; try { assert priority() == pp:" wrong priority for task " + getClass().getSimpleName() + ", expected " + priority() + ", but got " + pp; // Job went to the correct queue? assert t._priority <= pp; // Thread attempting the job is only a low-priority? final int p2 = Math.max(pp,MIN_HI_PRIORITY); for( int p = MAX_PRIORITY; p > p2; p-- ) { if( FJPS[p] == null ) continue; h2o = FJPS[p].poll2(); if( h2o != null ) { // Got a hi-priority job? t._priority = p; // Set & do it now! t.setPriority(Thread.MAX_PRIORITY-1); set_t_prior = true; h2o.compute2(); // Do it ahead of normal F/J work p++; // Check again the same queue } } } catch( Throwable ex ) { // If the higher priority job popped an exception, complete it // exceptionally... but then carry on and do the lower priority job. if( h2o != null ) h2o.completeExceptionally(ex); else { ex.printStackTrace(); throw ex; } } finally { t._priority = pp; if( pp == MIN_PRIORITY && set_t_prior ) t.setPriority(Thread.NORM_PRIORITY-1); } // Now run the task as planned if( this instanceof DTask ) icer().compute1(this); else compute2(); } public void compute1() { compute2(); } /** Override compute3() with actual work without having to worry about tryComplete() */ public void compute2() {} // In order to prevent deadlock, threads that block waiting for a reply // from a remote node, need the remote task to run at a higher priority // than themselves. This field tracks the required priority. protected final byte priority() { return _priority; } @Override public final T clone(){ try { return (T)super.clone(); } catch( CloneNotSupportedException e ) { throw Log.throwErr(e); } } /** If this is a F/J thread, return it's priority - used to lift the * priority of a blocking remote call, so the remote node runs it at a * higher priority - so we don't deadlock when we burn the local * thread. */ protected static byte currThrPriority() { Thread cThr = Thread.currentThread(); return (byte)((cThr instanceof FJWThr) ? ((FJWThr)cThr)._priority : MIN_PRIORITY); } // The serialization flavor / delegate. Lazily set on first use. private short _ice_id; /** Find the serialization delegate for a subclass of this class */ protected Icer<T> icer() { int id = _ice_id; if(id != 0) { int tyid; if (id != 0) assert id == (tyid = TypeMap.onIce(this)) : "incorrectly cashed id " + id + ", typemap has " + tyid + ", type = " + getClass().getName(); } return TypeMap.getIcer(id!=0 ? id : (_ice_id=(short)TypeMap.onIce(this)),this); } @Override final public AutoBuffer write (AutoBuffer ab) { return icer().write (ab,(T)this); } @Override final public AutoBuffer writeJSON(AutoBuffer ab) { return icer().writeJSON(ab,(T)this); } @Override final public T read (AutoBuffer ab) { return icer().read (ab,(T)this); } @Override final public T readJSON(AutoBuffer ab) { return icer().readJSON(ab,(T)this); } @Override final public int frozenType() { return icer().frozenType(); } } public static abstract class H2OCallback<T extends H2OCountedCompleter> extends H2OCountedCompleter{ public H2OCallback(){} public H2OCallback(H2OCountedCompleter cc){super(cc);} @Override public void compute2(){throw H2O.fail();} @Override public void onCompletion(CountedCompleter caller){callback((T) caller);} public abstract void callback(T t); } public static int H2O_PORT; // H2O TCP Port public static int API_PORT; // RequestServer and the API HTTP port /** * @return String of the form ipaddress:port */ public static String getIpPortString() { return H2O.ARGS.disable_web? "" : H2O.SELF_ADDRESS.getHostAddress() + ":" + H2O.API_PORT; } public static String getURL(String schema) { return getURL(schema, H2O.SELF_ADDRESS, H2O.API_PORT, H2O.ARGS.context_path); } public static String getURL(String schema, InetAddress address, int port, String contextPath) { return String.format(address instanceof Inet6Address ? "%s://[%s]:%d%s" : "%s://%s:%d%s", schema, address.getHostAddress(), port, contextPath); } public static String getURL(String schema, String hostname, int port, String contextPath) { return String.format("%s://%s:%d%s", schema, hostname, port, contextPath); } // The multicast discovery port public static MulticastSocket CLOUD_MULTICAST_SOCKET; public static NetworkInterface CLOUD_MULTICAST_IF; public static InetAddress CLOUD_MULTICAST_GROUP; public static int CLOUD_MULTICAST_PORT ; /** Myself, as a Node in the Cloud */ public static H2ONode SELF = null; /** IP address of this node used for communication * with other nodes. */ public static InetAddress SELF_ADDRESS; /* Global flag to mark this specific cloud instance IPv6 only. * Right now, users have to force IPv6 stack by specifying the following * JVM options: * -Djava.net.preferIPv6Addresses=true * -Djava.net.preferIPv6Addresses=false */ static final boolean IS_IPV6 = NetworkUtils.isIPv6Preferred() && !NetworkUtils.isIPv4Preferred(); // Place to store temp/swap files public static URI ICE_ROOT; public static String DEFAULT_ICE_ROOT() { String username = System.getProperty("user.name"); if (username == null) username = ""; String u2 = username.replaceAll(" ", "_"); if (u2.length() == 0) u2 = "unknown"; return "/tmp/h2o-" + u2; } // Place to store flows public static String DEFAULT_FLOW_DIR() { String flow_dir = null; try { if (ARGS.ga_hadoop_ver != null) { PersistManager pm = getPM(); if (pm != null) { String s = pm.getHdfsHomeDirectory(); if (pm.exists(s)) { flow_dir = s; } } if (flow_dir != null) { flow_dir = flow_dir + "/h2oflows"; } } else { flow_dir = System.getProperty("user.home") + File.separator + "h2oflows"; } } catch (Exception ignore) { // Never want this to fail, as it will kill program startup. // Returning null is fine if it fails for whatever reason. } return flow_dir; } /* A static list of acceptable Cloud members passed via -flatfile option. * It is updated also when a new client appears. */ private static Set<H2ONode> STATIC_H2OS = null; // Reverse cloud index to a cloud; limit of 256 old clouds. static private final H2O[] CLOUDS = new H2O[256]; // Enables debug features like more logging and multiple instances per JVM static final String DEBUG_ARG = "h2o.debug"; static final boolean DEBUG = System.getProperty(DEBUG_ARG) != null; // Returned in REST API responses as X-h2o-cluster-id. // // Currently this is unique per node. Might make sense to distribute this // as part of joining the cluster so all nodes have the same value. public static final long CLUSTER_ID = System.currentTimeMillis(); private static WebServer webServer; public static void setWebServer(WebServer value) { webServer = value; } public static WebServer getWebServer() { return webServer; } /** If logging has not been setup yet, then Log.info will only print to * stdout. This allows for early processing of the '-version' option * without unpacking the jar file and other startup stuff. */ private static void printAndLogVersion(String[] arguments) { Log.init(ARGS.log_level, ARGS.quiet, ARGS.max_log_file_size); Log.info("----- H2O started " + (ARGS.client?"(client)":"") + " -----"); Log.info("Build git branch: " + ABV.branchName()); Log.info("Build git hash: " + ABV.lastCommitHash()); Log.info("Build git describe: " + ABV.describe()); Log.info("Build project version: " + ABV.projectVersion()); Log.info("Build age: " + PrettyPrint.toAge(ABV.compiledOnDate(), new Date())); Log.info("Built by: '" + ABV.compiledBy() + "'"); Log.info("Built on: '" + ABV.compiledOn() + "'"); if (ABV.isTooOld()) { Log.warn("\n*** Your H2O version is over 100 days old. Please download the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html ***"); Log.warn(""); } Log.info("Found H2O Core extensions: " + extManager.getCoreExtensions()); Log.info("Processed H2O arguments: ", Arrays.toString(arguments)); Runtime runtime = Runtime.getRuntime(); Log.info("Java availableProcessors: " + H2ORuntime.availableProcessors()); Log.info("Java heap totalMemory: " + PrettyPrint.bytes(runtime.totalMemory())); Log.info("Java heap maxMemory: " + PrettyPrint.bytes(runtime.maxMemory())); Log.info("Java version: Java "+System.getProperty("java.version")+" (from "+System.getProperty("java.vendor")+")"); List<String> launchStrings = ManagementFactory.getRuntimeMXBean().getInputArguments(); Log.info("JVM launch parameters: "+launchStrings); Log.info("JVM process id: " + ManagementFactory.getRuntimeMXBean().getName()); Log.info("OS version: "+System.getProperty("os.name")+" "+System.getProperty("os.version")+" ("+System.getProperty("os.arch")+")"); long totalMemory = OSUtils.getTotalPhysicalMemory(); Log.info ("Machine physical memory: " + (totalMemory==-1 ? "NA" : PrettyPrint.bytes(totalMemory))); Log.info("Machine locale: " + Locale.getDefault()); } /** Initializes the local node and the local cloud with itself as the only member. */ private static void startLocalNode() { // Figure self out; this is surprisingly hard NetworkInit.initializeNetworkSockets(); // Do not forget to put SELF into the static configuration (to simulate proper multicast behavior) if ( !ARGS.client && H2O.isFlatfileEnabled() && !H2O.isNodeInFlatfile(SELF)) { Log.warn("Flatfile configuration does not include self: " + SELF + ", but contains " + H2O.getFlatfile()); H2O.addNodeToFlatfile(SELF); } if (!H2O.ARGS.disable_net) { Log.info("H2O cloud name: '" + ARGS.name + "' on " + SELF + (H2O.isFlatfileEnabled() ? ", static configuration based on -flatfile " + ARGS.flatfile : (", discovery address " + CLOUD_MULTICAST_GROUP + ":" + CLOUD_MULTICAST_PORT))); } if (!H2O.ARGS.disable_web) { Log.info("If you have trouble connecting, try SSH tunneling from your local machine (e.g., via port 55555):\n" + " 1. Open a terminal and run 'ssh -L 55555:localhost:" + API_PORT + " " + System.getProperty("user.name") + "@" + SELF_ADDRESS.getHostAddress() + "'\n" + " 2. Point your browser to " + NetworkInit.h2oHttpView.getScheme() + "://localhost:55555"); } if (H2O.ARGS.rest_api_ping_timeout > 0) { Log.info(String.format("Registering REST API Check Thread. If 3/Ping endpoint is not" + " accessed during %d ms, the cluster will be terminated.", H2O.ARGS.rest_api_ping_timeout)); new RestApiPingCheckThread().start(); } // Create the starter Cloud with 1 member SELF._heartbeat._jar_md5 = JarHash.JARHASH; SELF._heartbeat._client = ARGS.client; SELF._heartbeat._cloud_name_hash = ARGS.name.hashCode(); } /** Starts the worker threads, receiver threads, heartbeats and all other * network related services. */ private static void startNetworkServices() { // Start the Persistent meta-data cleaner thread, which updates the K/V // mappings periodically to disk. There should be only 1 of these, and it // never shuts down. Needs to start BEFORE the HeartBeatThread to build // an initial histogram state. Cleaner.THE_CLEANER.start(); if (H2O.ARGS.disable_net) return; // We've rebooted the JVM recently. Tell other Nodes they can ignore task // prior tasks by us. Do this before we receive any packets UDPRebooted.T.reboot.broadcast(); // Start the MultiReceiverThread, to listen for multi-cast requests from // other Cloud Nodes. There should be only 1 of these, and it never shuts // down. Started soon, so we can start parsing multi-cast UDP packets new MultiReceiverThread().start(); // Start the TCPReceiverThread, to listen for TCP requests from other Cloud // Nodes. There should be only 1 of these, and it never shuts down. NetworkInit.makeReceiverThread().start(); } @Deprecated static public void register( String method_url, Class<? extends water.api.Handler> hclass, String method, String apiName, String summary ) { Log.warn("The H2O.register method is deprecated and will be removed in the next major release." + "Please register REST API endpoints as part of their corresponding REST API extensions!"); RequestServer.registerEndpoint(apiName, method_url, hclass, method, summary); } public static void registerResourceRoot(File f) { JarHash.registerResourceRoot(f); } /** Start the web service; disallow future URL registration. * Blocks until the server is up. * * @deprecated use starServingRestApi */ @Deprecated static public void finalizeRegistration() { startServingRestApi(); } /** * This switch Jetty into accepting mode. */ public static void startServingRestApi() { if (!H2O.ARGS.disable_web) { NetworkInit.h2oHttpView.acceptRequests(); } } // -------------------------------------------------------------------------- // The Current Cloud. A list of all the Nodes in the Cloud. Changes if we // decide to change Clouds via atomic Cloud update. public static volatile H2O CLOUD = new H2O(new H2ONode[0],0,0); // --- // A dense array indexing all Cloud members. Fast reversal from "member#" to // Node. No holes. Cloud size is _members.length. public final H2ONode[] _memary; // mapping from a node ip to node index private HashMap<String, Integer> _node_ip_to_index; final int _hash; public H2ONode getNodeByIpPort(String ipPort) { if(_node_ip_to_index != null) { Integer index = _node_ip_to_index.get(ipPort); if (index != null) { if(index == -1){ return H2O.SELF; } else if(index <= -1 || index >= _memary.length){ // index -1 should not happen anymore as well throw new RuntimeException("Mapping from node id to node index contains: " + index + ", however this node" + "does not exist!"); } return _memary[index]; } else { // no node with such ip:port return null; } } else { // mapping is null, no cloud ready yet return null; } } // A dense integer identifier that rolls over rarely. Rollover limits the // number of simultaneous nested Clouds we are operating on in-parallel. // Really capped to 1 byte, under the assumption we won't have 256 nested // Clouds. Capped at 1 byte so it can be part of an atomically-assigned // 'long' holding info specific to this Cloud. final char _idx; // no unsigned byte, so unsigned char instead // Construct a new H2O Cloud from the member list H2O( H2ONode[] h2os, int hash, int idx ) { this(h2os, false, hash, idx); } H2O( H2ONode[] h2os, boolean presorted, int hash, int idx ) { _memary = h2os; if (!presorted) java.util.Arrays.sort(_memary); // ... sorted! _hash = hash; // And record hash for cloud rollover _idx = (char)(idx&0x0ff); // Roll-over at 256 } // One-shot atomic setting of the next Cloud, with an empty K/V store. // Called single-threaded from Paxos. Constructs the new H2O Cloud from a // member list. void set_next_Cloud( H2ONode[] h2os, int hash ) { synchronized(this) { int idx = _idx+1; // Unique 1-byte Cloud index if( idx == 256 ) idx=1; // wrap, avoiding zero CLOUDS[idx] = CLOUD = new H2O(h2os,hash,idx); } SELF._heartbeat._cloud_size=(char)CLOUD.size(); H2O.CLOUD._node_ip_to_index = new HashMap<>(); for(H2ONode node: H2O.CLOUD._memary){ H2O.CLOUD._node_ip_to_index.put(node.getIpPortString(), node.index()); } } // Is nnn larger than old (counting for wrap around)? Gets confused if we // start seeing a mix of more than 128 unique clouds at the same time. Used // to tell the order of Clouds appearing. static boolean larger( int nnn, int old ) { assert (0 <= nnn && nnn <= 255); assert (0 <= old && old <= 255); return ((nnn-old)&0xFF) < 64; } public final int size() { return _memary.length; } public boolean isSingleNode() { return size() == 1; } public final H2ONode leader() { return _memary[0]; } public final H2ONode leaderOrNull() { return _memary.length > 0 ? _memary[0] : null; } // Find the node index for this H2ONode, or a negative number on a miss int nidx( H2ONode h2o ) { return java.util.Arrays.binarySearch(_memary,h2o); } boolean contains( H2ONode h2o ) { return nidx(h2o) >= 0; } @Override public String toString() { return java.util.Arrays.toString(_memary); } public H2ONode[] members() { return _memary; } // Cluster free memory public long free_mem() { long memsz = 0; for( H2ONode h2o : CLOUD._memary ) memsz += h2o._heartbeat.get_free_mem(); return memsz; } // Quick health check; no reason given for bad health public boolean healthy() { long now = System.currentTimeMillis(); for (H2ONode node : H2O.CLOUD.members()) if (!node.isHealthy(now)) return false; return true; } public static void waitForCloudSize(int x, long ms) { long start = System.currentTimeMillis(); if(!cloudIsReady(x)) Log.info("Waiting for clouding to finish. Current number of nodes " + CLOUD.size() + ". Target number of nodes: " + x); while (System.currentTimeMillis() - start < ms) { if (cloudIsReady(x)) break; try { Thread.sleep(100); } catch (InterruptedException ignore) {} } if (CLOUD.size() < x) throw new RuntimeException("Cloud size " + CLOUD.size() + " under " + x + ". Consider to increase `DEFAULT_TIME_FOR_CLOUDING`."); } private static boolean cloudIsReady(int x) { return CLOUD.size() >= x && Paxos._commonKnowledge; } public static int getCloudSize() { if (! Paxos._commonKnowledge) return -1; return CLOUD.size(); } // - Wait for at least HeartBeatThread.SLEEP msecs and // try to join others, if any. Try 2x just in case. // - Assume that we get introduced to everybody else // in one Paxos update, if at all (i.e, rest of // the cloud was already formed and stable by now) // - If nobody else is found, not an error. public static void joinOthers() { long start = System.currentTimeMillis(); while( System.currentTimeMillis() - start < 2000 ) { if( CLOUD.size() > 1 && Paxos._commonKnowledge ) break; try { Thread.sleep(100); } catch( InterruptedException ignore ) { } } } // -------------------------------------------------------------------------- static void initializePersistence() { _PM = new PersistManager(ICE_ROOT); } // -------------------------------------------------------------------------- // The (local) set of Key/Value mappings. public static final NonBlockingHashMap<Key,Value> STORE = new NonBlockingHashMap<>(); // PutIfMatch // - Atomically update the STORE, returning the old Value on success // - Kick the persistence engine as needed // - Return existing Value on fail, no change. // // Keys are interned here: I always keep the existing Key, if any. The // existing Key is blind jammed into the Value prior to atomically inserting // it into the STORE and interning. // // Because of the blind jam, there is a narrow unusual race where the Key // might exist but be stale (deleted, mapped to a TOMBSTONE), a fresh put() // can find it and jam it into the Value, then the Key can be deleted // completely (e.g. via an invalidate), the table can resize flushing the // stale Key, an unrelated weak-put can re-insert a matching Key (but as a // new Java object), and delete it, and then the original thread can do a // successful put_if_later over the missing Key and blow the invariant that a // stored Value always points to the physically equal Key that maps to it // from the STORE. If this happens, some of replication management bits in // the Key will be set in the wrong Key copy... leading to extra rounds of // replication. public static Value putIfMatch( Key key, Value val, Value old ) { if( old != null ) // Have an old value? key = old._key; // Use prior key if( val != null ) { assert val._key.equals(key); if( val._key != key ) val._key = key; // Attempt to uniquify keys } // Insert into the K/V store Value res = STORE.putIfMatchUnlocked(key,val,old); if( res != old ) return res; // Return the failure cause // Persistence-tickle. // If the K/V mapping is going away, remove the old guy. // If the K/V mapping is changing, let the store cleaner just overwrite. // If the K/V mapping is new, let the store cleaner just create if( old != null && val == null ) old.removePersist(); // Remove the old guy if( val != null ) { Cleaner.dirty_store(); // Start storing the new guy if( old==null ) Scope.track_internal(key); // New Key - start tracking } return old; // Return success } // Get the value from the store public static void raw_remove(Key key) { Value v = STORE.remove(key); if( v != null ) v.removePersist(); } public static void raw_clear() { STORE.clear(); } public static boolean containsKey( Key key ) { return STORE.get(key) != null; } static Key getk( Key key ) { return STORE.getk(key); } public static Set<Key> localKeySet( ) { return STORE.keySet(); } static Collection<Value> values( ) { return STORE.values(); } static public int store_size() { return STORE.size(); } // Nice local-STORE only debugging summary public static String STOREtoString() { int[] cnts = new int[1]; Object[] kvs = H2O.STORE.raw_array(); // Start the walk at slot 2, because slots 0,1 hold meta-data for( int i=2; i<kvs.length; i += 2 ) { // In the raw backing array, Keys and Values alternate in slots Object ov = kvs[i+1]; if( !(ov instanceof Value) ) continue; // Ignore tombstones and Primes and null's Value val = (Value)ov; if( val.isNull() ) { Value.STORE_get(val._key); continue; } // Another variant of NULL int t = val.type(); while( t >= cnts.length ) cnts = Arrays.copyOf(cnts,cnts.length<<1); cnts[t]++; } StringBuilder sb = new StringBuilder(); for( int t=0; t<cnts.length; t++ ) if( cnts[t] != 0 ) sb.append(String.format("-%30s %5d\n",TypeMap.CLAZZES[t],cnts[t])); return sb.toString(); } // Persistence manager private static PersistManager _PM; public static PersistManager getPM() { return _PM; } // Node persistent storage private static NodePersistentStorage NPS; public static NodePersistentStorage getNPS() { return NPS; } /** * Run System.gc() on every node in the H2O cluster. * * Having to call this manually from user code is a sign that something is wrong and a better * heuristic is needed internally. */ public static void gc() { class GCTask extends DTask<GCTask> { public GCTask() {super(GUI_PRIORITY);} @Override public void compute2() { Log.info("Calling System.gc() now..."); System.gc(); Log.info("System.gc() finished"); tryComplete(); } } for (H2ONode node : H2O.CLOUD._memary) { GCTask t = new GCTask(); new RPC<>(node, t).call().get(); } } private static boolean JAVA_CHECK_PASSED = false; /** * Check if the Java version is not supported * * @return true if not supported */ public static boolean checkUnsupportedJava(String[] args) { if (JAVA_CHECK_PASSED) return false; if (Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.noJavaVersionCheck")) { return false; } boolean unsupported = runCheckUnsupportedJava(args); if (!unsupported) { JAVA_CHECK_PASSED = true; } return unsupported; } static boolean runCheckUnsupportedJava(String[] args) { if (!JavaVersionSupport.runningOnSupportedVersion()) { Throwable error = null; boolean allowUnsupported = ArrayUtils.contains(args, "-allow_unsupported_java"); if (allowUnsupported) { boolean checkPassed = false; try { checkPassed = dynamicallyInvokeJavaSelfCheck(); } catch (Throwable t) { error = t; } if (checkPassed) { Log.warn("H2O is running on a version of Java (" + System.getProperty("java.version") + ") that was not certified at the time of the H2O release. " + "For production use please use a certified Java version (versions " + JavaVersionSupport.describeSupportedVersions() + " are officially supported)."); return false; } } System.err.printf("Only Java versions %s are supported, system version is %s%n", JavaVersionSupport.describeSupportedVersions(), System.getProperty("java.version")); if (ARGS.allow_unsupported_java) { System.err.println("H2O was invoked with flag -allow_unsupported_java, however, " + "we found out that your Java version doesn't meet the requirements to run H2O. Please use a supported Java version."); } if (error != null) error.printStackTrace(System.err); return true; } String vmName = System.getProperty("java.vm.name"); if (vmName != null && vmName.equals("GNU libgcj")) { System.err.println("GNU gcj is not supported"); return true; } return false; } /** * Dynamically invoke water.JavaSelfCheck#checkCompatibility. The call is dynamic in order to prevent * classloading issues to even load this class. * * @return true if Java-compatibility self-check passes successfully * @throws ClassNotFoundException * @throws NoSuchMethodException * @throws InvocationTargetException * @throws IllegalAccessException */ static boolean dynamicallyInvokeJavaSelfCheck() throws ClassNotFoundException, NoSuchMethodException, InvocationTargetException, IllegalAccessException { Class<?> cls = Class.forName("water.JavaSelfCheck"); Method m = cls.getDeclaredMethod("checkCompatibility"); return (Boolean) m.invoke(null); } /** * Any system property starting with `ai.h2o.` and containing any more `.` does not match * this pattern and is therefore ignored. This is mostly to prevent system properties * serving as configuration for H2O's dependencies (e.g. `ai.h2o.org.eclipse.jetty.LEVEL` ). */ static boolean isArgProperty(String name) { final String prefix = "ai.h2o."; if (!name.startsWith(prefix)) return false; return name.lastIndexOf('.') < prefix.length(); } // -------------------------------------------------------------------------- public static void main( String[] args ) { H2O.configureLogging(); extManager.registerCoreExtensions(); extManager.registerListenerExtensions(); extManager.registerAuthExtensions(); long time0 = System.currentTimeMillis(); if (checkUnsupportedJava(args)) throw new RuntimeException("Unsupported Java version"); // Record system start-time. if( !START_TIME_MILLIS.compareAndSet(0L, System.currentTimeMillis()) ) return; // Already started // Copy all ai.h2o.* system properties to the tail of the command line, // effectively overwriting the earlier args. ArrayList<String> args2 = new ArrayList<>(Arrays.asList(args)); for( Object p : System.getProperties().keySet() ) { String s = (String) p; if(isArgProperty(s)) { args2.add("-" + s.substring(7)); // hack: Junits expect properties, throw out dummy prop for ga_opt_out if (!s.substring(7).equals("ga_opt_out") && !System.getProperty(s).isEmpty()) args2.add(System.getProperty(s)); } } // Parse args String[] arguments = args2.toArray(args); parseArguments(arguments); // Get ice path before loading Log or Persist class long time1 = System.currentTimeMillis(); String ice = DEFAULT_ICE_ROOT(); if( ARGS.ice_root != null ) ice = ARGS.ice_root.replace("\\", "/"); try { ICE_ROOT = new URI(ice); } catch(URISyntaxException ex) { throw new RuntimeException("Invalid ice_root: " + ice + ", " + ex.getMessage()); } // Always print version, whether asked-for or not! long time2 = System.currentTimeMillis(); printAndLogVersion(arguments); if( ARGS.version ) { Log.flushBufferedMessagesToStdout(); exit(0); } // Print help & exit if (ARGS.help) { printHelp(); exit(0); } // Validate arguments validateArguments(); // Raise user warnings if (H2O.ARGS.web_ip == null) { Log.warn("SECURITY_WARNING: web_ip is not specified. H2O Rest API is listening on all available interfaces."); } Log.info("X-h2o-cluster-id: " + H2O.CLUSTER_ID); Log.info("User name: '" + H2O.ARGS.user_name + "'"); // Epic Hunt for the correct self InetAddress long time4 = System.currentTimeMillis(); Log.info("IPv6 stack selected: " + IS_IPV6); SELF_ADDRESS = NetworkInit.findInetAddressForSelf(); // Right now the global preference is to use IPv4 stack // To select IPv6 stack user has to explicitly pass JVM flags // to enable IPv6 preference. if (!IS_IPV6 && SELF_ADDRESS instanceof Inet6Address) { Log.err("IPv4 network stack specified but IPv6 address found: " + SELF_ADDRESS + "\n" + "Please specify JVM flags -Djava.net.preferIPv6Addresses=true and -Djava.net.preferIPv4Addresses=false to select IPv6 stack"); H2O.exit(-1); } if (IS_IPV6 && SELF_ADDRESS instanceof Inet4Address) { Log.err("IPv6 network stack specified but IPv4 address found: " + SELF_ADDRESS); H2O.exit(-1); } // Start the local node. Needed before starting logging. long time5 = System.currentTimeMillis(); startLocalNode(); // Allow core extensions to perform initialization that requires the network. long time6 = System.currentTimeMillis(); for (AbstractH2OExtension ext: extManager.getCoreExtensions()) { ext.onLocalNodeStarted(); } try { String logDir = Log.getLogDir(); Log.info("Log dir: '" + logDir + "'"); } catch (Exception e) { Log.info("Log dir: (Log4j configuration inherited)"); } Log.info("Cur dir: '" + System.getProperty("user.dir") + "'"); //Print extra debug info now that logs are setup long time7 = System.currentTimeMillis(); RuntimeMXBean rtBean = ManagementFactory.getRuntimeMXBean(); Log.debug("H2O launch parameters: "+ARGS.toString()); if (rtBean.isBootClassPathSupported()) { Log.debug("Boot class path: " + rtBean.getBootClassPath()); } Log.debug("Java class path: "+ rtBean.getClassPath()); Log.debug("Java library path: "+ rtBean.getLibraryPath()); // Load up from disk and initialize the persistence layer long time8 = System.currentTimeMillis(); initializePersistence(); // Initialize NPS { String flow_dir; if (ARGS.flow_dir != null) { flow_dir = ARGS.flow_dir; } else { flow_dir = DEFAULT_FLOW_DIR(); } if (flow_dir != null) { flow_dir = flow_dir.replace("\\", "/"); Log.info("Flow dir: '" + flow_dir + "'"); } else { Log.info("Flow dir is undefined; saving flows not available"); } NPS = new NodePersistentStorage(flow_dir); } // Start network services, including heartbeats long time9 = System.currentTimeMillis(); startNetworkServices(); // start server services Log.trace("Network services started"); // The "Cloud of size N formed" message printed out by doHeartbeat is the trigger // for users of H2O to know that it's OK to start sending REST API requests. long time10 = System.currentTimeMillis(); Paxos.doHeartbeat(SELF); assert SELF._heartbeat._cloud_hash != 0 || ARGS.client; // Start the heartbeat thread, to publish the Clouds' existence to other // Clouds. This will typically trigger a round of Paxos voting so we can // join an existing Cloud. new HeartBeatThread().start(); long time11 = System.currentTimeMillis(); // Log registered parsers Log.info("Registered parsers: " + Arrays.toString(ParserService.INSTANCE.getAllProviderNames(true))); // Start thread checking client disconnections if (Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.clientDisconnectAttack")) { // for development only! Log.warn("Client Random Disconnect attack is enabled - use only for debugging! More warnings to follow ;)"); new ClientRandomDisconnectThread().start(); } else { // regular mode of operation new ClientDisconnectCheckThread().start(); } if (isGCLoggingEnabled()) { Log.info(H2O.technote(16, "GC logging is enabled, you might see messages containing \"GC (Allocation Failure)\". " + "Please note that this is a normal part of GC operations and occurrence of such messages doesn't directly indicate an issue.")); } long time12 = System.currentTimeMillis(); Log.debug("Timing within H2O.main():"); Log.debug(" Args parsing & validation: " + (time1 - time0) + "ms"); Log.debug(" Get ICE root: " + (time2 - time1) + "ms"); Log.debug(" Print log version: " + (time4 - time2) + "ms"); Log.debug(" Detect network address: " + (time5 - time4) + "ms"); Log.debug(" Start local node: " + (time6 - time5) + "ms"); Log.debug(" Extensions onLocalNodeStarted(): " + (time7 - time6) + "ms"); Log.debug(" RuntimeMxBean: " + (time8 - time7) + "ms"); Log.debug(" Initialize persistence layer: " + (time9 - time8) + "ms"); Log.debug(" Start network services: " + (time10 - time9) + "ms"); Log.debug(" Cloud up: " + (time11 - time10) + "ms"); Log.debug(" Start GA: " + (time12 - time11) + "ms"); } private static boolean isGCLoggingEnabled() { RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); List<String> jvmArgs = runtimeMXBean.getInputArguments(); for (String arg : jvmArgs) { if (arg.startsWith("-XX:+PrintGC") || arg.equals("-verbose:gc") || (arg.startsWith("-Xlog:") && arg.contains("gc"))) return true; } return false; } /** Find PID of the current process, use -1 if we can't find the value. */ private static long getCurrentPID() { try { String n = ManagementFactory.getRuntimeMXBean().getName(); int i = n.indexOf('@'); if(i != -1) { return Long.parseLong(n.substring(0, i)); } else { return -1L; } } catch(Throwable ignore) { return -1L; } } // Die horribly public static void die(String s) { Log.fatal(s); H2O.exit(-1); } /** * Add node to a manual multicast list. * Note: the method is valid only if -flatfile option was specified on commandline * * @param node H2O node * @return true if the node was already in the multicast list. */ public static boolean addNodeToFlatfile(H2ONode node) { assert isFlatfileEnabled() : "Trying to use flatfile, but flatfile is not enabled!"; return STATIC_H2OS.add(node); } /** * Remove node from a manual multicast list. * Note: the method is valid only if -flatfile option was specified on commandline * * @param node H2O node * @return true if the node was in the multicast list. */ public static boolean removeNodeFromFlatfile(H2ONode node) { assert isFlatfileEnabled() : "Trying to use flatfile, but flatfile is not enabled!"; return STATIC_H2OS.remove(node); } /** * Check if a node is included in a manual multicast list. * Note: the method is valid only if -flatfile option was specified on commandline * * @param node H2O node * @return true if the node is in the multicast list. */ public static boolean isNodeInFlatfile(H2ONode node) { assert isFlatfileEnabled() : "Trying to use flatfile, but flatfile is not enabled!"; return STATIC_H2OS.contains(node); } /** * Check if manual multicast is enabled. * * @return true if -flatfile option was specified on commandline */ public static boolean isFlatfileEnabled() { return STATIC_H2OS != null; } /** * Setup a set of nodes which should be contacted during manual multicast * * @param nodes set of H2O nodes */ public static void setFlatfile(Set<H2ONode> nodes) { if (nodes == null) { STATIC_H2OS = null; } else { STATIC_H2OS = Collections.newSetFromMap(new ConcurrentHashMap<H2ONode, Boolean>()); STATIC_H2OS.addAll(nodes); } } /** * Returns a set of nodes which are contacted during manual multi-cast. * * @return set of H2O nodes */ public static Set<H2ONode> getFlatfile() { return new HashSet<>(STATIC_H2OS); } /** * Forgets H2O client */ static boolean removeClient(H2ONode client){ return client.removeClient(); } public static H2ONode[] getClients(){ return H2ONode.getClients(); } public static H2ONode getClientByIPPort(String ipPort){ return H2ONode.getClientByIPPort(ipPort); } public static Key<DecryptionTool> defaultDecryptionTool() { return H2O.ARGS.decrypt_tool != null ? Key.<DecryptionTool>make(H2O.ARGS.decrypt_tool) : null; } public static URI downloadLogs(URI destinationDir, LogArchiveContainer logContainer) { return LogsHandler.downloadLogs(destinationDir.toString(), logContainer); } public static URI downloadLogs(URI destinationDir, String logContainer) { return LogsHandler.downloadLogs(destinationDir.toString(), LogArchiveContainer.valueOf(logContainer)); } public static URI downloadLogs(String destinationDir, LogArchiveContainer logContainer) { return LogsHandler.downloadLogs(destinationDir, logContainer); } public static URI downloadLogs(String destinationDir, String logContainer) { return LogsHandler.downloadLogs(destinationDir, LogArchiveContainer.valueOf(logContainer)); } /** * Is this H2O cluster running in our continuous integration environment? * * This information can be used to enable extended error output or force shutdown in case * an error is encountered. Use responsibly. * * @return true, if running in CI */ static boolean isCI() { return AbstractBuildVersion.getBuildVersion().isDevVersion() && "jenkins".equals(System.getProperty("user.name")); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2OConstants.java
package water; /** * Global constants used throughout the H2O-3 project. */ public class H2OConstants { /** * Maximum number of elements allocable for a single aray */ public static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; /** * Maximum size of an array, minus one more byte reserved a trailing zero. * Non-final for testing purpose. */ public static final int MAX_STR_LEN = MAX_ARRAY_SIZE - 1; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2OError.java
package water; import water.util.HttpResponseStatus; import water.util.IcedHashMap; import water.util.IcedHashMapGeneric; import water.util.Log; import java.util.ArrayList; import java.util.Arrays; /** * Class which represents a back-end error which will be returned to the client. Such * errors may be caused by the user (specifying an object which has been removed) or due * to a failure which is out of the user's control. */ public class H2OError extends Iced { /** Milliseconds since the epoch for the time that this H2OError instance was created. Generally this is a short time since the underlying error ocurred. */ public long _timestamp; public String _error_url = null; /** Message intended for the end user (a data scientist). */ public String _msg; /** Potentially more detailed message intended for a developer (e.g. a front end engineer or someone designing a language binding). */ public String _dev_msg; /** HTTP status code for this error. */ public int _http_status; /** Unique ID for this error instance, so that later we can build a dictionary of errors for docs and I18N. public int _error_id;*/ /** Any values that are relevant to reporting or handling this error. Examples are a key name if the error is on a key, or a field name and object name if it's on a specific field. */ public IcedHashMapGeneric.IcedHashMapStringObject _values; /** Exception type, if any. */ public String _exception_type; /** Raw exception message, if any. */ public String _exception_msg; /** Stacktrace, if any. */ public String[] _stacktrace; public H2OError(String error_url, String msg, String dev_msg, int http_status, IcedHashMapGeneric.IcedHashMapStringObject values, Exception e) { this(System.currentTimeMillis(), error_url, msg, dev_msg, http_status, values, e); } public H2OError(long timestamp, String error_url, String msg, String dev_msg, int http_status, IcedHashMapGeneric.IcedHashMapStringObject values, Throwable e) { Log.err(e); this._timestamp = timestamp; this._error_url = error_url; this._msg = msg; this._dev_msg = dev_msg; this._http_status = http_status; this._values = values; if (null == this._msg) { // It's crazy, but some Java exceptions like NullPointerException do not have a message! if (null != e) { this._msg = "Caught exception: " + e.getClass().getCanonicalName(); this._dev_msg = this._msg + " from: " + e.getStackTrace()[0]; } else { this._msg = "Unknown error"; this._dev_msg = this._msg; } } if (null != e) { this._exception_type = e.getClass().getCanonicalName(); this._exception_msg = e.getMessage(); ArrayList<String> arr = new ArrayList<>(); arr.add(e.toString()); StackTraceElement[] trace = e.getStackTrace(); for (StackTraceElement ste : trace) { String s = " " + ste.toString(); arr.add(s); if (s.startsWith("org.eclipse.jetty")) { // Don't need humongous jetty stack traces. break; } } // All distributed exceptions have the real cause while((e = e.getCause()) != null) { arr.add("Caused by:" + e.toString()); trace = e.getStackTrace(); for (StackTraceElement ste : trace) { String s = " " + ste.toString(); arr.add(s); if (s.startsWith("org.eclipse.jetty")) { // Don't need humongous jetty stack traces. break; } } } this._stacktrace = arr.toArray(new String[0]); } // Add a little header to make it error message stand out. Note: don't do this in toString() as we want clients to get this change too. String prefix = "\n\nERROR MESSAGE:\n\n"; String postfix = "\n\n"; _msg = prefix + _msg + postfix; _dev_msg = prefix + _dev_msg + postfix; _exception_msg = prefix + _exception_msg + postfix; } public H2OError(Throwable e, String error_url) { this(System.currentTimeMillis(), error_url, e.getMessage(), e.getMessage(), HttpResponseStatus.INTERNAL_SERVER_ERROR.getCode(), new IcedHashMapGeneric.IcedHashMapStringObject(), e); } static public String httpStatusHeader(int status_code) { switch (status_code) { case 200: return "200 OK"; case 201: return "201 Created"; case 400: return "400 Bad Request"; case 401: return "401 Unauthorized"; case 403: return "403 Forbidden"; case 404: return "404 Not Found"; case 409: return "409 Conflict"; case 410: return "410 Gone"; case 412: return "412 Precondition Failed"; case 500: return "500 Internal Server Error"; case 501: return "501 Not Implemented"; case 503: return "503 Service Unavailable"; default: return status_code + " Unimplemented http status code"; } } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(this._dev_msg != null ? this._dev_msg : this._msg); sb.append("; "); sb.append("Stacktrace: ").append(Arrays.toString(this._stacktrace)); if (!this._values.isEmpty()) { sb.append("; Values: "); sb.append(this._values.toJsonString()); } return sb.toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2OListenerExtension.java
package water; public interface H2OListenerExtension { /** Name of listener extension */ String getName(); /** Initialize the extension */ void init(); void report(String ctrl, Object... data); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2OModelBuilderError.java
package water; import hex.Model; import hex.ModelBuilder; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.util.IcedHashMap; import water.util.IcedHashMapGeneric; /** * Class which represents a ModelBuilder back-end error which will be returned to the client. * Such errors may be caused by the user (specifying bad parameters) or due * to a failure which is out of the user's control. */ public class H2OModelBuilderError extends H2OError { // Expose parameters, messages and error_count in the same was as ModelBuilder so they come out // in the H2OModelBuilderError JSON exactly the same way as in the ModelBuilderSchema. public Model.Parameters _parameters; public ModelBuilder.ValidationMessage[] _messages; public int _error_count; public H2OModelBuilderError(long timestamp, String error_url, String msg, String dev_msg, int http_status, IcedHashMapGeneric.IcedHashMapStringObject values, H2OModelBuilderIllegalArgumentException e) { super(timestamp, error_url, msg, dev_msg, http_status, values, e); this._parameters = (Model.Parameters) values.get("parameters"); this._messages = (ModelBuilder.ValidationMessage[]) values.get("messages"); this._error_count = (int) values.get("error_count"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2ONode.java
package water; import water.nbhm.NonBlockingHashMap; import water.nbhm.NonBlockingHashMapLong; import water.network.SocketChannelFactory; import water.util.*; import java.io.IOException; import java.net.*; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.ByteChannel; import java.nio.channels.SocketChannel; import java.util.*; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; /** * A <code>Node</code> in an <code>H2O</code> Cloud. * Basically a worker-bee with CPUs, Memory and Disk. * One of this is the self-Node, but the rest are remote Nodes. * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public final class H2ONode extends Iced<H2ONode> implements Comparable { transient private SocketChannelFactory _socketFactory; transient private H2OSecurityManager _security; transient private PriorityBlockingQueue<ByteBuffer> _outgoingMsgQ; transient short _unique_idx; // Dense integer index, skipping 0. NOT cloud-wide unique. transient boolean _announcedLostContact; // True if heartbeat published a no-contact msg transient public long _last_heard_from; // Time in msec since we last heard from this Node transient public volatile HeartBeat _heartbeat; // My health info. Changes 1/sec. transient public int _tcp_readers; // Count of started TCP reader threads transient private short _timestamp; transient private boolean _removed_from_cloud; transient private volatile boolean _accessed_local_dkv; // Did this remote node ever accessed the local portion of DKV? public final boolean isClient() { return _heartbeat._client; } final void setTimestamp(short newTimestamp) { if (!H2ONodeTimestamp.isDefined(_timestamp) && H2ONodeTimestamp.isDefined(newTimestamp)) { // Note: time stamp is only known when the H2ONode actually opens a connection to this node // if H2ONode is discovered another way the timestamp is Undefined boolean isClient = H2ONodeTimestamp.decodeIsClient(newTimestamp); if (isClient) { Log.info("Client " + this + " started communicating with this H2O node."); } else { Log.debug("H2O Node " + this + " started communicating with this H2O node."); } } _timestamp = newTimestamp; } public final short getTimestamp() { return _timestamp; } public final boolean isRemovedFromCloud() { return _removed_from_cloud; } // Does this node technically correspond to a possible client? It doesn't say anything if the node can be part of the cluster // We intern all nodes that are trying to communicate with us regardless if they belong to the cluster or not. private boolean isPossibleClient() { return H2ONodeTimestamp.decodeIsClient(_timestamp) || isClient(); } void setHeartBeat(HeartBeat hb) { _heartbeat = hb; } void removeFromCloud() { _removed_from_cloud = true; stopSendThread(); } private void stopSendThread() { _sendThread = null; } private SmallMessagesSendThread startSendThread() { if (isClient() && !H2O.ARGS.allow_clients) { throw new IllegalStateException("Attempt to communicate with client " + getIpPortString() + " blocked. " + "Client connections are not allowed in this cloud."); } SmallMessagesSendThread newSendThread = new SmallMessagesSendThread(); // Launch the send thread for small messages _sendThread = newSendThread; newSendThread.start(); return newSendThread; } // A JVM is uniquely named by machine IP address and port# public final H2Okey _key; /** Identification of the node via IP and PORT. * */ static final class H2Okey extends InetSocketAddress implements Comparable { // Numeric representation of IP // For IPv6 the both fields are valid and describes full IPv6 address, for IPv4 only low 32 bits of _ipLow are valid // But still need a flag to distinguish between IPv4 and IPv6 final long _ipHigh, _ipLow; // IPv4: A.B.C.D ~ DCBA H2Okey(InetAddress inet, int port) { super(inet, port); byte[] b = inet.getAddress(); // 4bytes or 16bytes if (b.length == 4) { assert !H2O.IS_IPV6 : "IPv4 stack specified but IPv6 address passed! " + inet; _ipHigh = 0; _ipLow = ArrayUtils.encodeAsInt(b) & 0XFFFFFFFFL; } else { assert H2O.IS_IPV6 : "IPv6 stack specified but IPv4 address passed! " + inet; _ipHigh = ArrayUtils.encodeAsLong(b, 8, 8); _ipLow = ArrayUtils.encodeAsLong(b, 0, 8); } } /** * Retrieves the public communication port on which the REST API is exposed * @return port number */ int getApiPort() { return getPort()-H2O.ARGS.port_offset; } /** * Retrieves the internal node-to-node communication port * @return port number */ int getInternalPort() { return getPort() ; } @Override public String toString() { return getAddress()+":"+ getApiPort(); } public String getIpPortString() { return getAddress().getHostAddress() + ":" + getApiPort(); } AutoBuffer write( AutoBuffer ab ) { return (!H2O.IS_IPV6 ? ab.put4((int) _ipLow) : ab.put8(_ipLow).put8(_ipHigh)).put2((char) getInternalPort()); } static H2Okey read( AutoBuffer ab ) { try { InetAddress inet = InetAddress.getByAddress(ab.getA1(SIZE_OF_IP)); int port = ab.get2(); return new H2Okey(inet, port); } catch( UnknownHostException e ) { throw Log.throwErr(e); } } // Canonical ordering based on inet & port @Override public int compareTo(Object x) { if( x == null ) return -1; // Always before null if( x == this ) return 0; H2Okey key = (H2Okey)x; // Must be unsigned long-arithmetic, or overflow will make a broken sort int res = MathUtils.compareUnsigned(_ipHigh, _ipLow, key._ipHigh, key._ipLow); return res != 0 ? res : getInternalPort() - key.getInternalPort(); } static int SIZE_OF_IP = H2O.IS_IPV6 ? 16 : 4; /** Size of serialized H2OKey */ static int SIZE = SIZE_OF_IP /* ip */ + 2 /* port */; } public String getIp() { return _key.getHostString(); } public String getIpPortString() { return _key.getIpPortString(); } public final int ip4() { return (int) _key._ipLow; } public boolean isSelf() { return this == H2O.SELF; } // These are INTERN'd upon construction, and are uniquely numbered within the // same run of a JVM. If a remote Node goes down, then back up... it will // come back with the SAME IP address, and the same unique_idx and history // relative to *this* Node. They can be compared with pointer-equality. The // unique idx is used to know which remote Nodes have cached which Keys, even // if the Home#/Replica# change for a Key due to an unrelated change in Cloud // membership. The unique_idx is *per Node*; not all Nodes agree on the same // indexes. private H2ONode( H2Okey key, short unique_idx, short timestamp) { _key = key; _unique_idx = unique_idx; _last_heard_from = System.currentTimeMillis(); _heartbeat = new HeartBeat(); _timestamp = timestamp; _security = H2OSecurityManager.instance(); _socketFactory = SocketChannelFactory.instance(_security); _outgoingMsgQ = makeOutgoingMessageQueue(); _sendThread = null; // initialized lazily } public boolean isHealthy() { return isHealthy(System.currentTimeMillis()); } public boolean isHealthy(long now) { return (now - _last_heard_from) < HeartBeatThread.TIMEOUT; } public void markLocalDKVAccess() { _accessed_local_dkv = true; } public boolean accessedLocalDKV() { return _accessed_local_dkv; } // --------------- // A dense integer index for every unique IP ever seen, since the JVM booted. // Used to track "known replicas" per-key across Cloud change-ups. Just use // an array-of-H2ONodes static private final NonBlockingHashMap<H2Okey,H2ONode> INTERN = new NonBlockingHashMap<>(); static private final AtomicInteger UNIQUE = new AtomicInteger(1); static H2ONode IDX[] = new H2ONode[1]; static H2ONode[] getClients(){ ArrayList<H2ONode> clients = new ArrayList<>(INTERN.size()); for( Map.Entry<H2Okey, H2ONode> entry : INTERN.entrySet()){ if (entry.getValue().isClient() && !entry.getValue().isRemovedFromCloud()) { clients.add(entry.getValue()); } } return clients.toArray(new H2ONode[0]); } static H2ONode getClientByIPPort(String ipPort){ for( Map.Entry<H2Okey, H2ONode> entry : INTERN.entrySet()){ if (entry.getValue().isClient() && !entry.getValue().isRemovedFromCloud() && entry.getValue().getIpPortString().equals(ipPort)) { return entry.getValue(); } } return null; } private void refreshClient(short newTimestamp) { boolean respawned = H2ONodeTimestamp.hasNodeRespawned(_timestamp, newTimestamp); if (respawned) { Log.info("Client reconnected with a new timestamp=" + newTimestamp + ", old client: " + toDebugString()); if (_sendThread != null) { // We generally assume a lost client will eventually re-connect and we will want to deliver all the messages - // see the isActive() method in the SmallMessagesSendThread. However, when we detect a client was re-spawned // (and thus lost its previous state) we don't need to keep sending the old messages. That is why we kill the old // thread right away by injecting a new fresh instance. The old thread will detect it is not active and will // give the new one chance to start delivering messages. startSendThread(); } } //Transition the timestamp to defined state for this client if (!H2ONodeTimestamp.isDefined(newTimestamp)) { setTimestamp(newTimestamp); } _removed_from_cloud = false; _last_heard_from = System.currentTimeMillis(); } boolean removeClient() { Log.info("Removing client: " + toDebugString()); boolean removed = !_removed_from_cloud; removeFromCloud(); return removed; } // Create and/or re-use an H2ONode. Each gets a unique dense index, and is // *interned*: there is only one per InetAddress. static private H2ONode intern(H2Okey key, short timestamp) { final boolean foundPossibleClient = H2ONodeTimestamp.decodeIsClient(timestamp); if (!H2O.ARGS.client && foundPossibleClient && !H2O.ARGS.allow_clients) { throw new IllegalStateException("Client connections are not allowed, source " + key.getIpPortString()); } H2ONode h2o = INTERN.get(key); if (h2o != null) { if (foundPossibleClient || h2o.isPossibleClient()) { h2o.refreshClient(timestamp); } // Transition the timestamp to defined state for both workers & client if (!H2ONodeTimestamp.isDefined(h2o.getTimestamp())) { h2o.setTimestamp(timestamp); } return h2o; } else { if (foundPossibleClient) { // We don't know if this client belongs to this cloud yet, at this point it is just a candidate Log.info("New (possible) client found, timestamp=" + timestamp); } } final int idx = UNIQUE.getAndIncrement(); assert idx < Short.MAX_VALUE; h2o = new H2ONode(key, (short) idx, timestamp); H2ONode old = INTERN.putIfAbsent(key, h2o); if (old != null) { if (foundPossibleClient && old.isPossibleClient()) { old.refreshClient(timestamp); } return old; } synchronized (H2O.class) { while (idx >= IDX.length) { IDX = Arrays.copyOf(IDX, IDX.length << 1); } IDX[idx] = h2o; } return h2o; } static H2ONode intern(InetAddress ip, int port, short timestamp) { return intern(new H2Okey(ip, port), timestamp); } public static H2ONode intern(InetAddress ip, int port) { return intern(ip, port, H2ONodeTimestamp.UNDEFINED); } static H2ONode intern(byte[] bs, int off) { byte[] b = new byte[H2Okey.SIZE_OF_IP]; // the size depends on version of selected IP stack int port; // The static constant should be optimized if (!H2O.IS_IPV6) { // IPv4 UnsafeUtils.set4(b, 0, UnsafeUtils.get4(bs, off)); } else { // IPv6 UnsafeUtils.set8(b, 0, UnsafeUtils.get8(bs, off)); UnsafeUtils.set8(b, 8, UnsafeUtils.get8(bs, off + 8)); } port = UnsafeUtils.get2(bs,off + H2Okey.SIZE_OF_IP) & 0xFFFF; try { return intern(InetAddress.getByAddress(b),port); } catch( UnknownHostException e ) { throw Log.throwErr(e); } } // Get a nice Node Name for this Node in the Cloud. Basically it's the // InetAddress we use to communicate to this Node. public static H2ONode self(InetAddress local) { assert H2O.H2O_PORT != 0; try { // Figure out which interface matches our IP address List<NetworkInterface> matchingIfs = new ArrayList<>(); Enumeration<NetworkInterface> netIfs = NetworkInterface.getNetworkInterfaces(); while( netIfs.hasMoreElements() ) { NetworkInterface netIf = netIfs.nextElement(); Enumeration<InetAddress> addrs = netIf.getInetAddresses(); while( addrs.hasMoreElements() ) { InetAddress addr = addrs.nextElement(); if( addr.equals(local) ) { matchingIfs.add(netIf); break; } } } switch( matchingIfs.size() ) { case 0: H2O.CLOUD_MULTICAST_IF = null; break; case 1: H2O.CLOUD_MULTICAST_IF = matchingIfs.get(0); break; default: String msg = "Found multiple network interfaces for ip address " + local; for( NetworkInterface ni : matchingIfs ) { msg +="\n\t" + ni; } msg +="\nUsing " + matchingIfs.get(0) + " for UDP broadcast"; Log.warn(msg); H2O.CLOUD_MULTICAST_IF = matchingIfs.get(0); } } catch( SocketException e ) { throw Log.throwErr(e); } // Selected multicast interface must support multicast, and be up and running! try { if( H2O.CLOUD_MULTICAST_IF != null && !H2O.CLOUD_MULTICAST_IF.supportsMulticast() ) { Log.info("Selected H2O.CLOUD_MULTICAST_IF: "+H2O.CLOUD_MULTICAST_IF+ " doesn't support multicast"); // H2O.CLOUD_MULTICAST_IF = null; } if( H2O.CLOUD_MULTICAST_IF != null && !H2O.CLOUD_MULTICAST_IF.isUp() ) { throw new RuntimeException("Selected H2O.CLOUD_MULTICAST_IF: "+H2O.CLOUD_MULTICAST_IF+ " is not up and running"); } } catch( SocketException e ) { throw Log.throwErr(e); } return intern(new H2Okey(local, H2O.H2O_PORT), H2ONodeTimestamp.calculateNodeTimestamp()); } // Happy printable string @Override public String toString() { return _key.toString (); } public String toDebugString() { String base = _key.toString(); if (! isClient()) { return base; } StringBuilder sb = new StringBuilder(base); sb.append("("); sb.append("timestamp=").append(_timestamp); if (_heartbeat != null) { sb.append(", ").append("cloud_name_hash=").append(_heartbeat._cloud_name_hash); } sb.append(")"); return sb.toString(); } @Override public int hashCode() { return _key.hashCode(); } @Override public boolean equals(Object o) { return _key.equals (((H2ONode)o)._key); } @Override public int compareTo( Object o) { return _key.compareTo(((H2ONode)o)._key); } // index of this node in the current cloud... can change at the next cloud. public int index() { return H2O.CLOUD.nidx(this); } // --------------- // A queue of available TCP sockets // re-usable TCP socket opened to this node, or null. // This is essentially a BlockingQueue/Stack that allows null. private transient ByteChannel _socks[] = new ByteChannel[2]; private transient int _socksAvail=_socks.length; // Count of concurrent TCP requests both incoming and outgoing static final AtomicInteger TCPS = new AtomicInteger(0); ByteChannel getTCPSocket() throws IOException { // Under lock, claim an existing open socket if possible synchronized(this) { // Limit myself to the number of open sockets from node-to-node while( _socksAvail == 0 ) try { wait(1000); } catch( InterruptedException ignored ) { } // Claim an open socket ByteChannel sock = _socks[--_socksAvail]; if( sock != null ) { if( sock.isOpen() ) return sock; // Return existing socket! // Else it's an already-closed socket, lower open TCP count assert TCPS.get() > 0; TCPS.decrementAndGet(); } } // Must make a fresh socket SocketChannel sock2 = SocketChannel.open(); sock2.socket().setReuseAddress(true); sock2.socket().setSendBufferSize(AutoBuffer.BBP_BIG._size); boolean res = sock2.connect( _key ); assert res && !sock2.isConnectionPending() && sock2.isBlocking() && sock2.isConnected() && sock2.isOpen(); ByteBuffer bb = ByteBuffer.allocate(6).order(ByteOrder.nativeOrder()); bb.put((byte)2); bb.putShort(H2O.SELF._timestamp); bb.putChar((char)H2O.H2O_PORT); bb.put((byte)0xef); bb.flip(); ByteChannel wrappedSocket = _socketFactory.clientChannel(sock2, _key.getHostName(), _key.getPort()); while(bb.hasRemaining()) { wrappedSocket.write(bb); } TCPS.incrementAndGet(); // Cluster-wide counting return wrappedSocket; } synchronized void freeTCPSocket( ByteChannel sock ) { assert 0 <= _socksAvail && _socksAvail < _socks.length; assert TCPS.get() > 0; if( sock != null && !sock.isOpen() ) sock = null; _socks[_socksAvail++] = sock; if( sock == null ) TCPS.decrementAndGet(); notify(); } // --------------- // Send a message via batched TCP. Note: has to happen out-of-band with the // standard AutoBuffer writing, which can hit the case of needing a TypeId // mapping mid-serialization. Thus this path uses another TCP channel that // is specifically not any of the above channels. This channel is limited to // messages which are presented in their entirety (not streamed) thus never // need another (nested) TCP channel. private transient SmallMessagesSendThread _sendThread = null; // null if Node was removed from cloud or we didn't need to communicate with it yet public final void sendMessage(ByteBuffer bb, byte msg_priority) { SmallMessagesSendThread sendThread = _sendThread; if (sendThread == null) { // Sending threads are created lazily. // This is because we will intern all client nodes including the ones that have nothing to do with the cluster. // By delaying the initialization to the point when we actually want to send a message, we initialize the sending // thread just for the nodes that are really part of the cluster. // The other reason is client disconnect - when removing client we kill the reference to the sending thread, if we // still do need to communicate with the client later - we just recreate the thread. if (_removed_from_cloud) { Log.warn("Node " + this + " is not active in the cloud anymore but we want to communicate with it." + "Re-opening the communication channel."); } sendThread = startSendThread(); } assert sendThread != null; sendThread.sendMessage(bb, msg_priority); } /** * Returns a new connection of type {@code tcpType}, the type can be either * TCPReceiverThread.TCP_SMALL or TCPReceiverThread.TCP_BIG. * * If socket channel factory is set, the communication will considered to be secured - this depends on the * configuration of the {@link SocketChannelFactory}. In case of the factory is null, the communication won't be secured. * @return new socket channel */ public static ByteChannel openChan(byte tcpType, SocketChannelFactory socketFactory, InetAddress originAddr, int originPort, short nodeTimeStamp) throws IOException { // Must make a fresh socket SocketChannel sock = SocketChannel.open(); sock.socket().setReuseAddress(true); sock.socket().setSendBufferSize(AutoBuffer.BBP_BIG._size); InetSocketAddress isa = new InetSocketAddress(originAddr, originPort); boolean res = sock.connect(isa); // Can toss IOEx, esp if other node is still booting up assert res : "Should be already connected, but connection is in non-blocking mode and the connection operation is in progress!"; sock.configureBlocking(true); assert !sock.isConnectionPending() && sock.isBlocking() && sock.isConnected() && sock.isOpen(); sock.socket().setTcpNoDelay(true); ByteBuffer bb = ByteBuffer.allocate(6).order(ByteOrder.nativeOrder()); bb.put(tcpType).putShort(nodeTimeStamp).putChar((char)H2O.H2O_PORT).put((byte) 0xef).flip(); ByteChannel wrappedSocket = socketFactory.clientChannel(sock, isa.getHostName(), isa.getPort()); while (bb.hasRemaining()) { // Write out magic startup sequence wrappedSocket.write(bb); } return wrappedSocket; } public static ByteChannel openChan(byte tcpType, SocketChannelFactory socketFactory, String originAddr, int originPort, short nodeTimeStamp) throws IOException { return openChan(tcpType, socketFactory, InetAddress.getByName(originAddr), originPort, nodeTimeStamp); } private static PriorityBlockingQueue<ByteBuffer> makeOutgoingMessageQueue() { return new PriorityBlockingQueue<>(11,new Comparator<ByteBuffer>() { // Secret back-channel priority: the position field (capped at bb.limit) @Override public int compare( ByteBuffer bb1, ByteBuffer bb2 ) { return bb1.position() - bb2.position(); } }); } // Private thread serving (actually ships the bytes over) small msg Q. // Buffers the small messages together and sends the bytes over via TCP channel. private static String SEND_THREAD_NAME_PREFIX = "TCP-SMALL-SEND-"; class SmallMessagesSendThread extends Thread { private ByteChannel _chan; // Lazily made on demand; closed & reopened on error private final ByteBuffer _bb; // Reusable output large buffer SmallMessagesSendThread(){ super(SEND_THREAD_NAME_PREFIX + H2ONode.this); ThreadHelper.initCommonThreadProperties(this); _bb = AutoBuffer.BBP_BIG.make(); } /** Send small message to this node. Passes the message on to a private msg * q, prioritized by the message priority. MSG queue is served by sender * thread, message are continuously extracted, buffered together and sent * over TCP channel. * @param bb Message to send * @param msg_priority priority (e.g. NACK and ACKACK beat most other priorities */ private void sendMessage(ByteBuffer bb, byte msg_priority) { assert bb.position()==0 && bb.limit() > 0; // Secret back-channel priority: the position field (capped at bb.limit); // this is to avoid making Yet Another Object per send. // Priority can exceed position. "interesting" priorities are everything // above H2O.MIN_HI_PRIORITY and things just above 0; priorities in the // middl'n range from 10 to MIN_HI are really rare. Need to compress // priorities a little for this hack to work. if( msg_priority >= H2O.MIN_HI_PRIORITY ) msg_priority = (byte)((msg_priority-H2O.MIN_HI_PRIORITY)+10); else if( msg_priority >= 10 ) msg_priority = 10; if( msg_priority > bb.limit() ) msg_priority = (byte)bb.limit(); bb.position(msg_priority); _outgoingMsgQ.put(bb); } private boolean isActive() { return _sendThread == this || (_sendThread == null && isPossibleClient()); } // We deliver messages to regular nodes only if the are part of the cloud // and to always to clients private boolean keepSending() { return !isRemovedFromCloud() || isPossibleClient(); } @Override public void run(){ try { while (isActive()) { // Forever loop try { ByteBuffer bb = _outgoingMsgQ.take(); // take never returns null but blocks instead if (! isActive()) { _outgoingMsgQ.put(bb); // put back and give someone else a chance to deliver break; // terminate } while( bb != null ) { // while have an BB to process assert !bb.isDirect() : "Direct BBs already got recycled"; assert bb.limit()+1+2 <= _bb.capacity() : "Small message larger than the output buffer"; if( _bb.remaining() < bb.limit()+1+2 ) sendBuffer(); // Send full batch; reset _bb so taken bb fits _bb.putChar((char)bb.limit()); _bb.put(bb.array(),0,bb.limit()); // Jam this BB into the existing batch BB, all in one go (it all fits) _bb.put((byte)0xef);// Sentinel byte bb = _outgoingMsgQ.poll(); // Go get more, same batch } sendBuffer(); // Send final trailing BBs } catch (IllegalMonitorStateException imse) { /* ignore */ } catch (InterruptedException e) { /*ignore*/ } } } catch(Throwable t) { throw Log.throwErr(t); } if(_chan != null) { try {_chan.close();} catch (IOException e) {} _chan = null; } } void sendBuffer(){ int retries = 0; _bb.flip(); // limit set to old position; position set to 0 while (keepSending() && _bb.hasRemaining()) { try { ByteChannel chan = _chan == null ? (_chan=openChan()) : _chan; chan.write(_bb); } catch(IOException ioe) { _bb.rewind(); // Position to zero; limit unchanged; retry the operation // Log if not shutting down, and not middle-of-cloud-formation where // other node is still booting up (expected common failure), or *never* // comes up - such as when not all nodes mentioned in a flatfile will be // booted. Basically the ERRR log will show endless repeat attempts to // connect to the missing node if( keepSending() && !H2O.getShutdownRequested() && (Paxos._cloudLocked || retries++ > 300) ) { Log.err("Got IO error when sending a batch of bytes: ",ioe); retries = 150; // Throttle the pace of error msgs } if( _chan != null ) try { _chan.close(); } catch (Throwable t) {/*ignored*/} _chan = null; retries++; final int sleep = Math.max(5000,retries << 1); try {Thread.sleep(sleep);} catch (InterruptedException e) {/*ignored*/} } } _bb.clear(); // Position set to 0; limit to capacity } // Open channel on first write attempt private ByteChannel openChan() throws IOException { return H2ONode.openChan(TCPReceiverThread.TCP_SMALL, _socketFactory, _key.getAddress(), _key.getPort(), H2O.SELF._timestamp); } } // --------------- // The *outgoing* client-side calls; pending tasks this Node wants answered. private final NonBlockingHashMapLong<RPC> _tasks = new NonBlockingHashMapLong<>(); void taskPut(int tnum, RPC rpc ) { _tasks.put(tnum,rpc); if( rpc._dt instanceof TaskPutKey ) _tasksPutKey.put(tnum,(TaskPutKey)rpc._dt); } RPC taskGet(int tnum) { return _tasks.get(tnum); } void taskRemove(int tnum) { _tasks.remove(tnum); _tasksPutKey.remove(tnum); } Collection<RPC> tasks() { return _tasks.values(); } int taskSize() { return _tasks.size(); } // True if there is a pending PutKey against this Key. Totally a speed // optimization in the case of a large number of pending Gets are flooding // the tasks() queue, each needing to scan the tasks queue for pending // PutKeys to the same Key. Legal to always private final NonBlockingHashMapLong<TaskPutKey> _tasksPutKey = new NonBlockingHashMapLong<>(); TaskPutKey pendingPutKey( Key k ) { for( TaskPutKey tpk : _tasksPutKey.values() ) if( k.equals(tpk._key) ) return tpk; return null; } // The next unique task# sent *TO* the 'this' Node. private final AtomicInteger _created_task_ids = new AtomicInteger(1); int nextTaskNum() { return _created_task_ids.getAndIncrement(); } // --------------- // The Work-In-Progress list. Each item is a UDP packet's worth of work. // When the RPCCall to _computed, then it's Completed work instead // work-in-progress. Completed work can be short-circuit replied-to by // resending the RPC._dt back. Work that we're sure the this Node has seen // the reply to can be removed - but we must remember task-completion for all // time (because UDP packets can be dup'd and arrive very very late and // should not be confused with new work). private final NonBlockingHashMapLong<RPC.RPCCall> _work = new NonBlockingHashMapLong<>(); // We must track even dead/completed tasks for All Time (lest a very very // delayed UDP packet look like New Work). The easy way to do this is leave // all work packets/RPCs in the _work HashMap for All Time - but this amounts // to a leak. Instead we "roll up" the eldest completed work items, just // remembering their completion status. Task id's older (smaller) than the // _removed_task_ids are both completed, and rolled-up to a single integer. private final AtomicInteger _removed_task_ids = new AtomicInteger(0); // A Golden Completed Task: it's a shared completed task used to represent // all instances of tasks that have been completed and are no longer being // tracked separately. private final RPC.RPCCall _removed_task = new RPC.RPCCall(this); RPC.RPCCall has_task( int tnum ) { if( tnum <= _removed_task_ids.get() ) return _removed_task; return _work.get(tnum); } // Record a task-in-progress, or return the prior RPC if one already exists. // The RPC will flip to "_completed" once the work is done. The RPC._dtask // can be repeatedly ACKd back to the caller, and the _dtask is removed once // an ACKACK appears - and the RPC itself is removed once all prior RPCs are // also ACKACK'd. RPC.RPCCall record_task( RPC.RPCCall rpc ) { // Task removal (and roll-up) suffers from classic race-condition, which we // fix by a classic Dekker's algo; a task# is always in either the _work // HashMap, or rolled-up in the _removed_task_ids counter, or both (for // short intervals during the handoff). We can never has a cycle where // it's in neither or else a late UDP may attempt to "resurrect" the // already completed task. Hence we must always check the "removed ids" // AFTER we insert in the HashMap (we can check before also, but that's a // simple optimization and not sufficient for correctness). final RPC.RPCCall x = _work.putIfAbsent(rpc._tsknum,rpc); if( x != null ) return x; // Return pre-existing work // If this RPC task# is very old, we just return a Golden Completed task. // The task is not just completed, but also we have already received // verification that the client got the answer. So this is just a really // old attempt to restart a long-completed task. if( rpc._tsknum > _removed_task_ids.get() ) return null; // Task is new _work.remove(rpc._tsknum); // Bogus insert, need to remove it return _removed_task; // And return a generic Golden Completed object } // Record the final return value for a DTask. Should happen only once. // Recorded here, so if the client misses our ACK response we can resend the // same answer back. void record_task_answer( RPC.RPCCall rpcall ) { // assert rpcall._started == 0 || rpcall._dt.hasException(); rpcall._started = System.currentTimeMillis(); rpcall._retry = RPC.RETRY_MS; // Start the timer on when to resend // AckAckTimeOutThread.PENDING.add(rpcall); } // Stop tracking a remote task, because we got an ACKACK. void remove_task_tracking( int task ) { RPC.RPCCall rpc = _work.get(task); if( rpc == null ) return; // Already stopped tracking // Atomically attempt to remove the 'dt'. If we win, we are the sole // thread running the dt.onAckAck. Also helps GC: the 'dt' is done (sent // to client and we received the ACKACK), but the rpc might need to stick // around a long time - and the dt might be big. DTask dt = rpc._dt; // The existing DTask, if any if( dt != null && rpc.CAS_DT(dt,null) ) { assert rpc._computed : "Still not done #"+task+" "+dt.getClass()+" from "+rpc._client; dt.onAckAck(); // One-time call on stop-tracking } // Roll-up as many done RPCs as we can, into the _removed_task_ids list while( true ) { int t = _removed_task_ids.get(); // Last already-removed ID RPC.RPCCall rpc2 = _work.get(t+1); // RPC of 1st not-removed ID if( rpc2 == null || rpc2._dt != null || !_removed_task_ids.compareAndSet(t,t+1) ) break; // Stop when we hit in-progress tasks _work.remove(t+1); // Else we can remove the tracking now } } // This Node rebooted recently; we can quit tracking prior work history void rebooted() { _work.clear(); _removed_task_ids.set(0); } // Custom Serialization Class: H2OKey need to be built. public final AutoBuffer write_impl(AutoBuffer ab) { return _key.write(ab); } public final H2ONode read_impl( AutoBuffer ab ) { return intern(H2Okey.read(ab), H2ONodeTimestamp.UNDEFINED); } public final AutoBuffer writeJSON_impl(AutoBuffer ab) { return ab.putJSONStr("node",_key.toString()); } public final H2ONode readJSON_impl( AutoBuffer ab ) { throw H2O.fail(); } public SocketChannelFactory getSocketFactory() { return _socketFactory; } public H2OSecurityManager getSecurityManager() { return _security; } /** * * @return True if and only if this node is leader of the cloud. Otherwise false. */ public boolean isLeaderNode() { if(H2O.CLOUD.size() == 0) return false; return H2O.CLOUD.leader() != null && H2O.CLOUD.leader().equals(this); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2ONodeTimestamp.java
package water; public class H2ONodeTimestamp { /** In case we are using flatfile, we intern the nodes in the flatfile, but we do not know the timestamp of the remote node at that time. Therefore we set the timestamp to undefined and set it to correct value once we hear from the remote node */ static final short UNDEFINED = 0; /** * Check whether this timestamp is valid timestamp of running H2O node */ public static boolean isDefined(short timestamp) { return timestamp != UNDEFINED; } /** * Select last 15 bytes from the jvm boot start time and return it as short. If the timestamp is 0, we increment it by * 1 to be able to distinguish between client and node as -0 is the same as 0. */ private static short truncateTimestamp(long jvmStartTime){ int bitMask = (1 << 15) - 1; // select the lower 15 bits short timestamp = (short) (jvmStartTime & bitMask); // if the timestamp is 0=(TIMESTAMP_UNDEFINED) return 1 to be able to distinguish between positive and negative values return timestamp == 0 ? 1 : timestamp; } /** * Calculate node timestamp from Current's node information. We use start of jvm boot time and information whether * we are client or not. We combine these 2 information and create a char(2 bytes) with this info in a single variable. */ static short calculateNodeTimestamp() { return calculateNodeTimestamp(TimeLine.JVM_BOOT_MSEC, H2O.ARGS.client); } /** * Calculate node timestamp from the provided information. We use start of jvm boot time and information whether * we are client or not. * * The negative timestamp represents a client node, the positive one a regular H2O node * * @param bootTimestamp H2O node boot timestamp * @param amIClient true if this node is client, otherwise false */ static short calculateNodeTimestamp(long bootTimestamp, boolean amIClient) { short timestamp = truncateTimestamp(bootTimestamp); //if we are client, return negative timestamp, otherwise positive return amIClient ? (short) -timestamp : timestamp; } /** * Decodes whether the node is client or regular node from the timestamp * @param timestamp timestamp * @return true if timestamp is from client node, false otherwise */ static boolean decodeIsClient(short timestamp) { return timestamp < 0; } /** * This method checks whether the H2O node respawned based on its previous and current timestamp */ static boolean hasNodeRespawned(short oldTimestamp, short newTimestamp) { return isDefined(oldTimestamp) && isDefined(newTimestamp) && oldTimestamp != newTimestamp; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2ORuntime.java
package water; /** * H2O wrapper around Runtime class exposing modified versions of some functions */ public class H2ORuntime { static final int ACTIVE_PROCESSOR_COUNT = getActiveProcessorCount(); /** * Returns the number of processors available to H2O. * * @return number of available processors */ public static int availableProcessors() { return availableProcessors(ACTIVE_PROCESSOR_COUNT); } static int availableProcessors(int activeProcessorCount) { int availableProcessors = Runtime.getRuntime().availableProcessors(); if (activeProcessorCount > 0 && activeProcessorCount < availableProcessors) { availableProcessors = activeProcessorCount; } return availableProcessors; } public static int getActiveProcessorCount() { return Integer.parseInt(System.getProperty("sys.ai.h2o.activeProcessorCount", "0")); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2OSecurityManager.java
package water; import water.network.SSLContextException; import water.network.SSLSocketChannelFactory; import water.network.WrappingSecurityManager; import water.util.Log; import java.io.IOException; import java.nio.channels.ByteChannel; import java.nio.channels.SocketChannel; /** * Takes care of security. * * In the long run this class should manage all security aspects of H2O but currently some parts are handled * in other parts of the codebase. * * An instance of this class should be instantiated for each H2O object * and should follow its lifecycle. * * At this stage we support a simple shared secret, handshake based, authentication, which can be turned * on with the h2o_ssl_enabled parameter. Should the communicating nodes not share a common shared secret * communication between them will not be possible. * * Current state of data encryption: * * - HTTP for FlowUI - currently we rely on Jetty's SSL capabilities, authentication can be performed with * hash login, ldap login or kerberos. The location of secret keys used byt Jetty's SSL server should be * passed to the jks parameter. * * - inter node communication - all TCP based communication is being authenticated and encrypted using SSL * using JSSE (Java Secure Socket Extension) when then h2o_ssl_enabled parameter is passed. Keystore related * parameter should also be used as per the documentation. * * - in-memory data encryption - currently not supported, using an encrypted drive is recommended * at least for the swap partition. * * - data saved to disk - currently not supported, using an encrypted drive is recommended * */ public class H2OSecurityManager implements WrappingSecurityManager { private volatile static H2OSecurityManager INSTANCE = null; public final boolean securityEnabled; private SSLSocketChannelFactory sslSocketChannelFactory; private H2OSecurityManager() { this.securityEnabled = H2O.ARGS.internal_security_conf != null; try { if (null != H2O.ARGS.internal_security_conf) { this.sslSocketChannelFactory = new SSLSocketChannelFactory(); Log.info("H2O node running in encrypted mode using config file [" + H2O.ARGS.internal_security_conf + "]"); } else { Log.info("H2O node running in unencrypted mode."); } } catch (SSLContextException e) { Log.err("Node initialized with SSL enabled but failed to create SSLContext. " + "Node initialization aborted.", e); H2O.exit(1); } } @Override public boolean isSecurityEnabled() { return securityEnabled; } @Override public ByteChannel wrapServerChannel(SocketChannel channel) throws IOException { return sslSocketChannelFactory.wrapServerChannel(channel); } @Override public ByteChannel wrapClientChannel(SocketChannel channel, String host, int port) throws IOException { return sslSocketChannelFactory.wrapClientChannel(channel, host, port); } public static H2OSecurityManager instance() { if(null == INSTANCE) { synchronized (H2OSecurityManager.class) { if (null == INSTANCE) { INSTANCE = new H2OSecurityManager(); } } } return INSTANCE; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/H2OStarter.java
package water; import water.init.NetworkInit; import water.util.Log; import java.io.File; /** * H2O starter which manages start and registration of application extensions. */ public class H2OStarter { /** * Start H2O node. * * @param args H2O parameters * @param relativeResourcePath FIXME remove it * @param finalizeRestRegistration close registration of REST API */ public static void start(String[] args, String relativeResourcePath, boolean finalizeRestRegistration) { long time0 = System.currentTimeMillis(); // Fire up the H2O Cluster H2O.main(args); if (H2O.ARGS.disable_flow) { Log.info("Access to H2O Flow is disabled"); } else { H2O.registerResourceRoot(new File(relativeResourcePath + File.separator + "h2o-web/src/main/resources/www")); H2O.registerResourceRoot(new File(relativeResourcePath + File.separator + "h2o-core/src/main/resources/www")); } ExtensionManager.getInstance().registerRestApiExtensions(); if (!H2O.ARGS.disable_web) { if (finalizeRestRegistration) { H2O.startServingRestApi(); } } long timeF = System.currentTimeMillis(); Log.info("H2O started in " + (timeF - time0) + "ms"); if (!H2O.ARGS.disable_web) { Log.info(""); String message = H2O.ARGS.disable_flow ? "Connect to H2O from your R/Python client: " : "Open H2O Flow in your web browser: "; message += H2O.ARGS.web_ip == null ? H2O.getURL(NetworkInit.h2oHttpView.getScheme()) : H2O.getURL(NetworkInit.h2oHttpView.getScheme(), H2O.ARGS.web_ip, H2O.API_PORT, H2O.ARGS.context_path); Log.info(message); Log.info(""); } } public static void start(String[] args, String relativeResourcePath) { start(args, relativeResourcePath, true); } public static void start(String[] args, boolean finalizeRestRegistration) { start(args, System.getProperty("user.dir"), finalizeRestRegistration); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/HeartBeat.java
package water; import java.util.Arrays; import water.init.JarHash; /** * Struct holding H2ONode health info. * @author <a href="mailto:cliffc@h2o.ai"></a> */ public class HeartBeat extends Iced<HeartBeat> implements BootstrapFreezable<HeartBeat> { char _hb_version; // Incrementing counter for sorting timelines better. int _cloud_hash; // Cloud-membership hash int _cloud_name_hash; // Hash of this cloud's name boolean _common_knowledge; // Cloud shares common knowledge char _cloud_size; // Cloud-size this guy is reporting long _jvm_boot_msec; // Boot time of JVM public long jvmBootTimeMsec(){return _jvm_boot_msec;} byte[] _jar_md5; // JAR file digest public boolean _client; // This is a client node: no keys homed here public int _pid; // Process ID // Static cpus & threads public short _num_cpus; // Number of CPUs on this Node public short _cpus_allowed; // Number of CPUs allowed by process public short _nthreads; // Number of threads allowed by cmd line // Dynamic resource usage: ticks, files public float _system_load_average; public long _system_idle_ticks; public long _system_total_ticks; public long _process_total_ticks; public int _process_num_open_fds; // Memory & Disk scaled by K or by M setters & getters. // Sum of KV + POJO + FREE == MEM_MAX (heap set at JVM launch) private long _kv_mem; // Memory used by K/V as of last FullGC private long _pojo_mem; // POJO used as of last FullGC private long _free_mem; // Free memory as of last FullGC private long _swap_mem; // Swapped K/V as of last FullGC void set_kv_mem (long n) { _kv_mem = n; } void set_pojo_mem (long n) { _pojo_mem = n; } void set_free_mem (long n) { _free_mem = n; } void set_swap_mem (long n) { _swap_mem = n; } public long get_kv_mem () { return _kv_mem; } public long get_pojo_mem() { return _pojo_mem; } public long get_free_mem() { return _free_mem; } public long get_swap_mem() { return _swap_mem; } public int _keys; // Number of LOCAL keys in this node, cached or homed int _free_disk; // Free disk (internally stored in megabyte precision) void set_free_disk(long n) { _free_disk = (int)(n>>20); } public long get_free_disk() { return ((long)_free_disk)<<20 ; } int _max_disk; // Disk size (internally stored in megabyte precision) void set_max_disk (long n) { _max_disk = (int)(n>>20); } public long get_max_disk () { return ((long)_max_disk)<<20 ; } boolean check_jar_md5() { return H2O.ARGS.md5skip || Arrays.equals(JarHash.JARHASH, _jar_md5); } // Internal profiling public float _gflops = Float.NaN; // Number of GFlops for this node public float _membw; // Memory bandwidth in GB/s // Number of elements & threads in high FJ work queues public char _rpcs; // Outstanding RemoteProcedureCalls public short _fjthrds[]; // Number of threads (not all are runnable) public short _fjqueue[]; // Number of elements in FJ work queue public char _tcps_active; // Threads trying do a TCP send }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/HeartBeatThread.java
package water; import java.lang.management.ManagementFactory; import javax.management.*; import water.util.LinuxProcFileReader; import water.util.Log; import water.init.*; /** * Starts a thread publishing multicast HeartBeats to the local subnet: the * Leader of this Cloud. * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public class HeartBeatThread extends Thread { public HeartBeatThread() { super("Heartbeat"); setDaemon(true); } // Time between heartbeats. Strictly several iterations less than the // timeout. static final int SLEEP = 1000; // Timeout in msec before we decide to not include a Node in the next round // of Paxos Cloud Membership voting. static public final int TIMEOUT = 60000; // Timeout in msec before we decide a Node is suspect, and call for a vote // to remove him. This must be strictly greater than the TIMEOUT. static final int SUSPECT = TIMEOUT+500; // uniquely number heartbeats for better timelines static private char HB_VERSION; // Timeout in msec for all kinds of I/O operations on unresponsive clients. // Endlessly retry until this timeout, and then declare the client "dead", // and toss out all in-flight client ops static public final int CLIENT_TIMEOUT=1000; // The Run Method. // Started by main() on a single thread, this code publishes Cloud membership // to the Cloud once a second (across all members). If anybody disagrees // with the membership Heartbeat, they will start a round of Paxos group // discovery. public void run() { boolean benchmarkEnabled = Boolean.valueOf( System.getProperty(H2O.OptArgs.SYSTEM_PROP_PREFIX + "heartbeat.benchmark.enabled", "true")); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName os; try { os = new ObjectName("java.lang:type=OperatingSystem"); } catch( MalformedObjectNameException e ) { throw Log.throwErr(e); } Thread.currentThread().setPriority(Thread.MAX_PRIORITY); int counter = 0; //noinspection InfiniteLoopStatement while( true ) { // Update the interesting health self-info for publication also H2O cloud = H2O.CLOUD; HeartBeat hb = H2O.SELF._heartbeat; hb._hb_version = HB_VERSION++; hb._jvm_boot_msec= TimeLine.JVM_BOOT_MSEC; // Memory utilization as of last FullGC long kv_gc = Cleaner.KV_USED_AT_LAST_GC; long heap_gc = Cleaner.HEAP_USED_AT_LAST_GC; long pojo_gc = Math.max(heap_gc - kv_gc,0); long kv_mem = Cleaner.Histo.cached(); // More current than last FullGC numbers; can skyrocket // Since last FullGC, assuming POJO remains constant and KV changed: new free memory long free_mem = Math.max(MemoryManager.MEM_MAX-kv_mem-pojo_gc,0); long pojo_mem = MemoryManager.MEM_MAX-kv_mem-free_mem; hb.set_kv_mem(kv_mem); hb.set_pojo_mem(pojo_mem); hb.set_free_mem(free_mem); hb.set_swap_mem(Cleaner.Histo.swapped()); hb._keys = H2O.STORE.size(); try { hb._system_load_average = ((Double)mbs.getAttribute(os, "SystemLoadAverage")).floatValue(); if( hb._system_load_average == -1 ) // SystemLoadAverage not available on windows hb._system_load_average = ((Double)mbs.getAttribute(os, "SystemCpuLoad")).floatValue(); } catch( Exception e ) {/*Ignore, data probably not available on this VM*/ } int rpcs = 0; for( H2ONode h2o : cloud._memary ) rpcs += h2o.taskSize(); hb._rpcs = (char)rpcs; // Scrape F/J pool counts hb._fjthrds = new short[H2O.MAX_PRIORITY+1]; hb._fjqueue = new short[H2O.MAX_PRIORITY+1]; for( int i=0; i<hb._fjthrds.length; i++ ) { hb._fjthrds[i] = (short)H2O.getWrkThrPoolSize(i); hb._fjqueue[i] = (short)H2O.getWrkQueueSize(i); } hb._tcps_active= (char)H2ONode.TCPS.get(); // get the usable and total disk storage for the partition where the // persistent KV pairs are stored hb.set_free_disk(H2O.getPM().getIce().getUsableSpace()); hb.set_max_disk (H2O.getPM().getIce().getTotalSpace() ); // get cpu utilization for the system and for this process. (linux only.) LinuxProcFileReader lpfr = new LinuxProcFileReader(); lpfr.read(); if (lpfr.valid()) { hb._system_idle_ticks = lpfr.getSystemIdleTicks(); hb._system_total_ticks = lpfr.getSystemTotalTicks(); hb._process_total_ticks = lpfr.getProcessTotalTicks(); hb._process_num_open_fds = lpfr.getProcessNumOpenFds(); } else { hb._system_idle_ticks = -1; hb._system_total_ticks = -1; hb._process_total_ticks = -1; hb._process_num_open_fds = -1; } hb._num_cpus = (short) H2ORuntime.availableProcessors(); hb._cpus_allowed = (short) lpfr.getProcessCpusAllowed(); if (H2O.ARGS.nthreads < hb._cpus_allowed) { hb._cpus_allowed = H2O.ARGS.nthreads; } hb._nthreads = H2O.ARGS.nthreads; try { hb._pid = Integer.parseInt(lpfr.getProcessID()); } catch (Exception ignore) {} // Announce what Cloud we think we are in. // Publish our health as well. UDPHeartbeat.build_and_multicast(cloud, hb); // If we have no internet connection, then the multicast goes // nowhere and we never receive a heartbeat from ourselves! // Fake it now. long now = System.currentTimeMillis(); H2O.SELF._last_heard_from = now; // Look for napping Nodes & propose removing from Cloud for( H2ONode h2o : cloud._memary ) { long delta = now - h2o._last_heard_from; if( delta > SUSPECT ) {// We suspect this Node has taken a dirt nap if( !h2o._announcedLostContact ) { Paxos.print("hart: announce suspect node",cloud._memary,h2o.toString()); h2o._announcedLostContact = true; } } else if( h2o._announcedLostContact ) { Paxos.print("hart: regained contact with node",cloud._memary,h2o.toString()); h2o._announcedLostContact = false; } } // Run mini-benchmark every 5 mins. However, on startup - do not have // all JVMs immediately launch a all-core benchmark - they will fight // with each other. Stagger them using the hashcode. // Run this benchmark *before* testing the heap or GC, so the GC numbers // are current as of the send time. if(benchmarkEnabled && (counter+Math.abs(H2O.SELF.hashCode()*0xDECAF /*spread wider than 1 apart*/)) % (300/(Float.isNaN(hb._gflops)?10:1)) == 0) { hb._gflops = (float)Linpack.run(hb._cpus_allowed); hb._membw = (float)MemoryBandwidth.run(hb._cpus_allowed); } counter++; // Once per second, for the entire cloud a Node will multi-cast publish // itself, so other unrelated Clouds discover each other and form up. try { Thread.sleep(SLEEP); } // Only once-sec per entire Cloud catch( IllegalMonitorStateException ignore ) { } catch( InterruptedException ignore ) { } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Iced.java
package water; import java.io.*; /** * H2O uses Iced classes as the primary means of moving Java Objects around * the cluster. * <p> * Auto-serializer base-class using a delegator pattern (the faster option is * to byte-code gen directly in all Iced classes, but this requires all Iced * classes go through a ClassLoader). * <p> * Iced is a marker class, and {@link Freezable} is the companion marker * interface. Marked classes have 2-byte integer type associated with them, * and an auto-genned delegate class created to actually do byte-stream and * JSON serialization and deserialization. Byte-stream serialization is * extremely dense (includes various compressions), and typically memory-bandwidth * bound to generate. * <p> * During startup time the Weaver creates a parallel set of classes called * (classname)$Icer. These provide bytestream and JSON serializers * and deserializers which get called by AutoBuffer.write* and AutoBuffer.read*. * <p> * To debug the automagic serialization code create a transient field in your Iced * class called DEBUG_WEAVER. The generated source code will get written to STDOUT: * <p> * {@code * transient int DEBUG_WEAVER = 1; * } * @see Freezable * @see water.Weaver * @see water.AutoBuffer */ abstract public class Iced<D extends Iced> implements Freezable<D>, Externalizable { // The serialization flavor / delegate. Lazily set on first use. transient private volatile short _ice_id = 0; @Override public byte [] asBytes(){ return write(new AutoBuffer()).buf(); } @Override public D reloadFromBytes(byte [] ary){ return read(new AutoBuffer(ary)); } // Return the icer for this instance+class. Will set on 1st use. private Icer<D> icer() { int id = _ice_id; int tyid; if(id != 0) assert id == (tyid =TypeMap.onIce(this)):"incorrectly cashed id " + id + ", typemap has " + tyid + ", type = " + getClass().getName(); return TypeMap.getIcer(id!=0 ? id : (_ice_id=(short)TypeMap.onIce(this)),this); } /** Standard "write thyself into the AutoBuffer" call, using the fast Iced * protocol. Real work is in the delegate {@link Icer} classes. * @return Returns the original {@link AutoBuffer} for flow-coding. */ @Override final public AutoBuffer write (AutoBuffer ab) { return icer().write (ab,(D)this); } /** Standard "write thyself into the AutoBuffer" call, using JSON. Real work * is in the delegate {@link Icer} classes. * @return Returns the original {@link AutoBuffer} for flow-coding. */ @Override public AutoBuffer writeJSON(AutoBuffer ab) { return icer().writeJSON(ab,(D)this); } /** Standard "read thyself from the AutoBuffer" call, using the fast Iced protocol. Real work * is in the delegate {@link Icer} classes. * @return Returns the original {@link AutoBuffer} for flow-coding. */ @Override final public D read (AutoBuffer ab) { return icer().read (ab,(D)this); } /** Standard "read thyself from the AutoBuffer" call, using JSON. Real work * is in the delegate {@link Icer} classes. * @return Returns the original {@link AutoBuffer} for flow-coding. */ @Override final public D readJSON(AutoBuffer ab) { return icer().readJSON(ab,(D)this); } /** Helper for folks that want a JSON String for this object. */ final public String toJsonString() { return new String(this.writeJSON(new AutoBuffer()).buf()); } final public byte[] toJsonBytes() { return this.writeJSON(new AutoBuffer()).buf(); } /** Returns a small dense integer, which is cluster-wide unique per-class. * Useful as an array index. * @return Small integer, unique per-type */ @Override final public int frozenType() { return icer().frozenType(); } /** Clone, without the annoying exception */ @Override public final D clone() { try { return (D)super.clone(); } catch( CloneNotSupportedException e ) { throw water.util.Log.throwErr(e); } } /** Copy over cloned instance 'src' over 'this', field by field. */ protected void copyOver( D src ) { icer().copyOver((D)this,src); } /////////////////////////////////// // TODO: make all of these protected! /////////////////////////////////// /** Implementation of the {@link Iced} serialization protocol, only called by * auto-genned code. Not intended to be called by user code. Override only * for custom Iced serializers. */ //noninspection UnusedDeclaration // @Override public AutoBuffer write_impl( AutoBuffer ab ) { return ab; } /** Implementation of the {@link Iced} serialization protocol, only called by * auto-genned code. Not intended to be called by user code. Override only * for custom Iced serializers. */ //noninspection UnusedDeclaration // @Override public D read_impl( AutoBuffer ab ) { return (D)this; } /** Implementation of the {@link Iced} serialization protocol, only called by * auto-genned code. Not intended to be called by user code. Override only * for custom Iced serializers. */ //noninspection UnusedDeclaration // public AutoBuffer writeJSON_impl( AutoBuffer ab ) { return ab; } /** Implementation of the {@link Iced} serialization protocol, only called by * auto-genned code. Not intended to be called by user code. Override only * for custom Iced serializers. */ //noninspection UnusedDeclaration // @Override public D readJSON_impl( AutoBuffer ab ) { return (D)this; } // Java serializers use H2Os Icing @Override public void readExternal( ObjectInput ois ) throws IOException, ClassNotFoundException { int x = ois.readInt(); byte[] buf = MemoryManager.malloc1(x); ois.readFully(buf); read(new AutoBuffer(buf)); } @Override public void writeExternal( ObjectOutput oos ) throws IOException { byte[] buf = write(new AutoBuffer()).buf(); oos.writeInt(buf.length); oos.write(buf); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/IcedUtils.java
package water; /** * Utility class to support Iced objects. */ public class IcedUtils { /** Deep-copy clone given iced object. */ static public <T extends Iced> T deepCopy(T iced) { if (iced == null) return null; AutoBuffer ab = new AutoBuffer(); iced.write(ab); ab.flipForReading(); // Create a new instance return (T) TypeMap.newInstance(iced.frozenType()).read(ab); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/IcedWrapper.java
package water; import org.apache.commons.lang.ArrayUtils; import water.api.schemas3.KeyV3; import water.exceptions.H2OIllegalArgumentException; import java.lang.reflect.Array; import java.util.Arrays; /** * Iced wrapper object for primitive types and arrays, to allow fields in other Iced * classes to have a generic type equivalent to Object, which can contain primitives, * arrays, and Iced objects. */ public class IcedWrapper extends Iced { /** * Is the wrapped value an array? */ boolean is_array; /** * Class of the wrapped type (could be a class of a primitive type like Integer.TYPE). */ String t = null; /** * Fields containing the wrapped value: */ int i; // also holds byte long l; float f; double d; boolean b; String s; String e; // TODO: JavaAssist is blowing up on enum fields KeyV3 k; int[] i_ar; // also holds byte long[] l_ar; float[] f_ar; double[] d_ar; // boolean[] b_ar; String[] s_ar; String[] e_ar; // TODO: JavaAssist is blowing up on enum fields KeyV3[] k_ar; Iced[] iced_ar; String[][] s_ar_ar; public IcedWrapper(Object o) { if (null == o) { this.t = null; return; } this.is_array = o.getClass().isArray(); if (is_array) { // array (1D only, for now) Class clz = o.getClass().getComponentType(); if (clz == Byte.class) { t = "Byte"; // TODO: i_ar = ArrayUtils.toPrimitive((Byte[])o); } else if (clz == Byte.TYPE) { t = "B"; // TODO: i_ar = ArrayUtils.toPrimitive((Byte[])o); } else if (clz == Integer.class) { t = "I"; i_ar = ArrayUtils.toPrimitive((Integer[])o); } else if (clz == Integer.TYPE) { t = "I"; i_ar = (int[])o; } else if (clz == Long.class) { t = "L"; l_ar = ArrayUtils.toPrimitive((Long[])o); } else if (clz == Long.TYPE) { t = "L"; l_ar = (long[]) o; } else if (clz == Float.class) { t = "F"; f_ar = ArrayUtils.toPrimitive((Float[])o); } else if (clz == Float.TYPE) { t = "F"; f_ar = (float[]) o; } else if (clz == Double.class) { t = "D"; d_ar = ArrayUtils.toPrimitive((Double[])o); } else if (clz == Double.TYPE) { t = "D"; d_ar = (double[]) o; } else if (clz == Boolean.class) { t = "Bo"; // TODO: AutoBuffer can't serialize arrays of booleans: b_ar = (boolean[])o; } else if (clz == Boolean.TYPE) { t = "Bo"; // TODO: AutoBuffer etc etc. } else if (clz == String.class) { t = "S"; s_ar = (String[])o; } else if (clz.isEnum()) { t = "E"; e_ar = new String[Array.getLength(o)]; for (int i = 0; i < e_ar.length; i++) e_ar[i] = Array.get(o, i).toString(); } else if (o instanceof KeyV3[]) { t = "K"; k_ar = (KeyV3[])o; } else if (o instanceof Iced[]) { t = "Iced"; iced_ar = (Iced[])o; } else if(clz.isArray()){ // 2D array if(clz.getComponentType() == String.class) { t = "SS"; s_ar_ar = (String[][]) o; } } } else { // scalar if (o instanceof Byte) { i = (byte)o; t = "B"; } else if (o instanceof Integer) { i = (int)o; t = "I"; } else if (o instanceof Long) { l = (long)o; t = "L"; } else if (o instanceof Float) { f = (float)o; t = "F"; } else if (o instanceof Double) { d = (double)o; t = "D"; } else if (o instanceof Boolean) { b = (boolean)o; t = "Bo"; } else if (o instanceof String) { s = (String)o; t = "S"; } else if (o instanceof Enum) { e = ((Enum)o).toString(); t = "E"; } else if (o instanceof KeyV3) { k = (KeyV3)o; t = "K"; } } if (null == t) throw new H2OIllegalArgumentException("o", "IcedWrapper", o); } public Object get() { if (t == null) { return null; } if (is_array) { if (t == "byte") { throw H2O.fail(); // TODO: i_ar = ArrayUtils.toPrimitive((Byte[])o); } else if (t.equals("I")) { return i_ar; } else if (t.equals("L")) { return l_ar; } else if (t.equals("F")) { return f_ar; } else if (t.equals("D")) { return d_ar; } else if (t.equals("Bo")) { throw H2O.fail(); // TODO: AutoBuffer can't serialize arrays of booleans: b_ar = (boolean[])o; } else if (t.equals("S")) { return s_ar; } else if (t.equals("E")) { return e_ar; } else if (t.equals("K")) { return k_ar; } else if (t.equals("Iced")) { return iced_ar; } else if(t.equals("SS")){ return s_ar_ar; } } else { if (t.equals("B")) { return i; } else if (t.equals("I")) { return i; } else if (t.equals("L")) { return l; } else if (t.equals("F")) { return f; } else if (t.equals("D")) { return d; } else if (t.equals("Bo")) { return b; } else if (t.equals("S")) { return s; } else if (t.equals("E")) { return e; } else if (t.equals("K")) { return k; } } throw new H2OIllegalArgumentException(this.toString()); } @Override public String toString() { if (null == t) { return "(null)"; } else if (is_array) { if (t.equals("I")) return Arrays.toString(i_ar); else if (t.equals("L")) return Arrays.toString(l_ar); else if (t.equals("F")) return Arrays.toString(f_ar); else if (t.equals("D")) return Arrays.toString(d_ar); else if (t.equals("S")) return Arrays.toString(s_ar); else if (t.equals("E")) return Arrays.toString(e_ar); else if (t.equals("K")) return Arrays.toString(k_ar); else if (t.equals("SS")){ StringBuilder sb = new StringBuilder("["); for(String[] s_ar : s_ar_ar) { sb.append(Arrays.toString(s_ar)).append(","); } return sb.replace(sb.length()-1, sb.length(), "]").toString(); } } else if (t.equals("B")) { return "" + i; } else if (t.equals("I")) { return "" + i; } else if (t.equals("L")) { return "" + l; } else if (t.equals("F")) { return "" + f; } else if (t.equals("D")) { return "" + d; } else if (t.equals("Bo")) { return "" + b; } else if (t.equals("S")) { return s; } else if (t.equals("E")) { return "" + e; } else if (t.equals("K")) { return "" + k; } return "unhandled type"; } /** Write JSON for the wrapped value without putting it inside a JSON object. */ public AutoBuffer writeUnwrappedJSON( AutoBuffer ab ) { if (is_array) { if (t.equals("B")) return ab.putJSONA4(i_ar); // NOTE: upcast else if (t.equals("I")) return ab.putJSONA4(i_ar); else if (t.equals("L")) return ab.putJSONA8(l_ar); else if (t.equals("F")) return ab.putJSONA4f(f_ar); else if (t.equals("D")) return ab.putJSONA8d(d_ar); else if (t.equals("Bo")) return ab.putJSONAStr(null); // TODO: BROKEN else if (t.equals("S")) return ab.putJSONAStr(s_ar); else if (t.equals("E")) return ab.putJSONAStr(e_ar); else if (t.equals("K")) return ab.putJSONA(k_ar); else if (t.equals("Iced")) return ab.putJSONA(iced_ar); else if (t.equals("SS")) return ab.putJSONAAStr(s_ar_ar); } else { if (t.equals("B")) return ab.putJSON1((byte)i); else if (t.equals("I")) return ab.putJSON4(i); else if (t.equals("L")) return ab.putJSON8(l); else if (t.equals("F")) return ab.putJSON4f(f); else if (t.equals("D")) return ab.putJSON8d(d); else if (t.equals("Bo")) return ab.putJSONStrUnquoted(b ? "true" : "false"); else if (t.equals("S")) return ab.putJSONName(s); else if (t.equals("E")) return ab.putJSONName(e); else if (t.equals("K")) return ab.putJSON(k); } throw H2O.fail("Unhandled type: " + t); // TODO: arrays } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Icer.java
package water; import sun.misc.Unsafe; import water.nbhm.UtilUnsafe; /** Base Class for the {@link Iced} implementation hierarchy; subclasses are * all auto-gen'd and no user code should call or extend this class. Since * this is the base, it has no fields to read or write. */ public abstract class Icer<T extends Freezable> { protected static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private final T _new; public Icer(T iced) { assert iced != null:"null freezable"; _new=iced; } final T theFreezable() { return _new; } protected AutoBuffer write (AutoBuffer ab, T ice) { /*base of the write call chain; no fields to write*/return ab; } protected AutoBuffer writeJSON(AutoBuffer ab, T ice) { return ab.put1('{').put1('}'); } protected T read (AutoBuffer ab, T ice) { /*base of the read call chain; no fields to read*/return ice; } protected T readJSON(AutoBuffer ab, T ice) { /*base of the read call chain; no fields to read*/return ice; } protected void copyOver( T dst, T src ) { /*base of the call chain; no fields to copy*/ } public int frozenType() { throw fail(); } protected String className() { throw fail(); } private RuntimeException fail() { return new RuntimeException(getClass().toString()+" should be automatically overridden by the auto-serialization code"); } // That end in the TypeMap ID for "H2OCountedCompleter" class - which is "3". protected void compute1(water.H2O.H2OCountedCompleter h2cc ) { h2cc.compute1(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/JavaSelfCheck.java
package water; import water.util.UnsafeUtils; import java.util.Random; public class JavaSelfCheck { /** * Runs basic checks to ensure that H2O is compatible with the current JVM * * @return true if all compatibility checks passed, false otherwise */ public static boolean checkCompatibility() { if (!checkUnsafe()) return false; if (!checkWeaver()) return false; return checkSerialization(); } // Are we able to serialize and deserialize data? private static boolean checkSerialization() { byte[] bytes = AutoBuffer.serializeBootstrapFreezable(new HeartBeat()); return AutoBuffer.deserializeBootstrapFreezable(bytes) instanceof HeartBeat; } // Are we able to generate Icers? Does javassist work as expected? private static boolean checkWeaver() { // HeartBeat class is guaranteed to be part of boostrap classes, otherwise clouding couldn't work Freezable<?> f = TypeMap.newFreezable(HeartBeat.class.getName()); if (! (f instanceof HeartBeat)) { return false; } // check that we can generate Icer return TypeMap.getIcer(f) != null; } // Is Unsafe available and working as expected? private static boolean checkUnsafe() { final int N = 1024; final Random r = new Random(); final double[] doubleData = new double[N]; final byte[] doubleBytes = new byte[N * 8]; final long[] longData = new long[N]; final byte[] longBytes = new byte[N * 8]; for (int i = 0; i < N; i++) { doubleData[i] = r.nextDouble(); UnsafeUtils.set8d(doubleBytes, i * 8, doubleData[i]); longData[i] = r.nextLong(); UnsafeUtils.set8(longBytes, i * 8, longData[i]); } for (int i = 0; i < N; i++) { double d = UnsafeUtils.get8d(doubleBytes, i * 8); if (d != doubleData[i]) return false; long l = UnsafeUtils.get8(longBytes, i * 8); if (l != longData[i]) return false; } return true; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/JavaVersionSupport.java
package water; import static water.util.JavaVersionUtils.JAVA_VERSION; public class JavaVersionSupport { // Notes: // Inclusive interval for supported Java versions. // Make sure that the following is logically consistent with whitelist in R code - see function .h2o.check_java_version in connection.R. // Upgrade of the javassist library should be considered when adding support for a new java version. public static final int MIN_SUPPORTED_JAVA_VERSION = 8; public static final int MAX_SUPPORTED_JAVA_VERSION = 17; /** * Checks for the version of Java this instance of H2O was ran with and compares it with supported versions. * * @return True if the instance of H2O is running on supported JVM, otherwise false. */ public static boolean runningOnSupportedVersion() { return JAVA_VERSION.isKnown() && (isUserEnabledJavaVersion() || isSupportedVersion()); } private static boolean isSupportedVersion() { return JAVA_VERSION.getMajor() >= MIN_SUPPORTED_JAVA_VERSION && JAVA_VERSION.getMajor() <= MAX_SUPPORTED_JAVA_VERSION; } /** * @return True if the Java version this instance of H2O is currently running on has been explicitly whitelisted by * the user. */ private static boolean isUserEnabledJavaVersion() { final String extraJavaVersionsStr = System.getProperty(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.allowJavaVersions"); if (extraJavaVersionsStr == null || extraJavaVersionsStr.isEmpty()) { return false; } final String[] splitVersions = extraJavaVersionsStr.split(","); for (final String version : splitVersions) { final int majorVersion = Integer.valueOf(version); if (JAVA_VERSION.getMajor() == majorVersion) { return true; } } return false; } public static String describeSupportedVersions() { return MIN_SUPPORTED_JAVA_VERSION + "-" + MAX_SUPPORTED_JAVA_VERSION; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Job.java
package water; import jsr166y.CountedCompleter; import water.H2O.H2OCountedCompleter; import water.api.schemas3.KeyV3; import water.util.ArrayUtils; import water.util.Log; import java.util.Arrays; import java.util.concurrent.TimeUnit; /** Jobs are used to do minimal tracking of long-lifetime user actions, * including progress-bar updates and the ability to review in progress or * completed Jobs, and cancel currently running Jobs. * <p> * Jobs are {@link Keyed}, because they need to Key to control e.g. atomic updates. * <p> * Jobs are generic on Keyed, because their primary result is a Keyed result - * which is Not a Job. Obvious examples are Frames (from running Parse or * CreateFrame jobs), or Models (from running ModelBuilder jobs). * <p> * Long running tasks will has-a Job, not is-a Job. */ public final class Job<T extends Keyed> extends Keyed<Job> { public enum JobStatus { PENDING, RUNNING, SUCCEEDED, STOPPED, FAILED; public static String[] domain() { return Arrays.stream(values()).map(Object::toString).toArray(String[]::new); } } /** Result Key */ public final Key<T> _result; public final int _typeid; /** User description */ public final String _description; // whether this job is recoverable private boolean _recoverable = false; // whether the _result key is ready for view private boolean _ready_for_view = true; private String [] _warns; public void warn(String warn) { Log.warn(warn); setWarnings(ArrayUtils.append(warns(),warn)); } public void setWarnings(final String [] warns){ new JAtomic() { @Override boolean abort(Job job) { return job._stop_requested; } @Override void update(Job job) { job._warns = warns; } }.apply(this); } /** Create a Job * @param jobKey Key for this job * @param resultKey Key of the final result * @param clz_of_T String class of the Keyed result * @param desc String description * @param recoverable Boolean indicating that this job is recoverable */ public Job(Key<Job> jobKey, Key<T> resultKey, String clz_of_T, String desc, boolean recoverable) { super(jobKey == null ? defaultJobKey() : jobKey); assert resultKey==null || clz_of_T!=null; _result = resultKey; // Result (destination?) key _typeid = clz_of_T==null ? 0 : TypeMap.getIcedId(clz_of_T); _description = desc; _recoverable = recoverable; } /** Create a Job * @param key Key of the final result * @param clz_of_T String class of the Keyed result * @param desc String description */ public Job(Key<T> key, String clz_of_T, String desc) { this(defaultJobKey(), key, clz_of_T, desc, false); // Passing in a brand new Job key } /** Create a Job when a warning already exists due to bad model_id * @param key Key of the final result * @param clz_of_T String class of the Keyed result * @param desc String description * @param warningStr String contains a warning on model_id */ public Job(Key<T> key, String clz_of_T, String desc, String warningStr) { this(key, clz_of_T, desc); if (warningStr != null) { _warns = new String[] {warningStr}; } } // Job Keys are pinned to this node (i.e., the node that invoked the // computation), because it should be almost always updated locally private static Key<Job> defaultJobKey() { return Key.make(Key.JOB, false, H2O.SELF); } /** Job start_time and end_time using Sys.CTM */ private long _start_time; // Job started, or 0 if not running private long _end_time; // Job end time, or 0 if not ended // Simple internal state accessors private boolean created() { return _start_time == 0; } private boolean running() { return _start_time != 0 && _end_time == 0; } private boolean stopped() { return _end_time != 0; } // Simple state accessors; public ones do a DKV update check public long start_time() { update_from_remote(); assert !created(); return _start_time; } public long end_time() { update_from_remote(); assert stopped(); return _end_time; } public boolean isRunning() { update_from_remote(); return running(); } public boolean isStopped() { update_from_remote(); return stopped(); } // Slightly more involved state accessors public boolean isStopping(){ return isRunning() && _stop_requested; } public boolean isDone() { return isStopped() && _ex == null; } public boolean isCrashing(){ return isRunning() && _ex != null; } public boolean isCrashed (){ return isStopped() && _ex != null; } public JobStatus getStatus() { if (isCrashed()) return JobStatus.FAILED; else if (isStopped()) if (stop_requested()) return JobStatus.STOPPED; else return JobStatus.SUCCEEDED; else if (isRunning()) return JobStatus.RUNNING; else return JobStatus.PENDING; } /** Current runtime; zero if not started. */ public long msec() { update_from_remote(); if( created() ) return 0; // Created, not running if( running() ) return System.currentTimeMillis() - _start_time; return _end_time - _start_time; // Stopped } public boolean isRecoverable() { return _recoverable; }; public boolean readyForView() { return _ready_for_view; } public void setReadyForView(boolean ready) { _ready_for_view = ready; } /** Jobs may be requested to Stop. Each individual job will respond to this * on a best-effort basis, and make some time to stop. Stop really means * "the Job stops", but is not an indication of any kind of error or fail. * Perhaps the user simply got bored. Because it takes time to stop, a Job * may be both in state isRunning and stop_requested, and may later switch * to isStopped and stop_requested. Also, an exception may be posted. */ private volatile boolean _stop_requested; // monotonic change from false to true public boolean stop_requested() { update_from_remote(); return _stop_requested; } public void stop() { if( !_stop_requested ) // fast path cutout new JAtomic() { @Override boolean abort(Job job) { return job._stop_requested; } @Override void update(Job job) { job._stop_requested = true; Log.debug("Job "+job._description+" requested to stop"); } }.apply(this); } public void fail(Throwable ex) { new JAtomic() { @Override boolean abort(Job job) { return job._stop_requested; } @Override void update(Job job) { job._stop_requested = true; job._ex = AutoBuffer.javaSerializeWritePojo(ex); job._msg = ex.getMessage(); Log.debug("Job " + job._description + " failed because of " + ex.getMessage()); } }.apply(this); } /** Any exception thrown by this Job, or null if none. Note that while * setting an exception generally triggers stopping a Job, stopping * takes time, so the Job might still be running with an exception * posted. */ private byte [] _ex; public Throwable ex() { if(_ex == null) return null; return (Throwable)AutoBuffer.javaSerializeReadPojo(_ex); } /** Total expected work. */ public long _work; // Total work to-do public long _max_runtime_msecs; private long _worked; // Work accomplished; between 0 and _work private String _msg; // Progress string /** Returns a float from 0 to 1 representing progress. Polled periodically. * Can default to returning e.g. 0 always. */ public float progress() { update_from_remote(); float regularProgress = _work==0 ? 0f : Math.min(1,(float)_worked/_work); if (_max_runtime_msecs>0) return Math.min(1,Math.max(regularProgress, (float)msec()/_max_runtime_msecs)); return regularProgress; } /** Returns last progress message. */ public String progress_msg() { update_from_remote(); return _msg; } /** Report new work done for this job */ public final void update( final long newworked, final String msg) { if( newworked > 0 || (msg != null && !msg.equals(_msg)) ) { new JAtomic() { @Override boolean abort(Job job) { return newworked==0 && ((msg==null && _msg==null) || (msg != null && msg.equals(job._msg))); } @Override void update(Job old) { old._worked += newworked; old._msg = msg; } }.apply(this); } } public final void update(final long newworked) { update(newworked,(String)null); } public static void update(final long newworked, Key<Job> jobkey) { update(newworked, null, jobkey); } public static void update(final long newworked, String msg, Key<Job> jobkey) { jobkey.get().update(newworked, msg); } // -------------- /** A system key for global list of Job keys. */ public static final Key<Job> LIST = Key.make(" JobList", Key.BUILT_IN_KEY); public String[] warns() { update_from_remote(); return _warns; } private static class JobList extends Keyed { Key<Job>[] _jobs; JobList() { super(LIST); _jobs = new Key[0]; } private JobList(Key<Job>[]jobs) { super(LIST); _jobs = jobs; } } /** The list of all Jobs, past and present. * @return The list of all Jobs, past and present */ public static Job[] jobs() { final Value val = DKV.get(LIST); if( val==null ) return new Job[0]; JobList jl = val.get(); Job[] jobs = new Job[jl._jobs.length]; int j=0; for( int i=0; i<jl._jobs.length; i++ ) { final Value job = DKV.get(jl._jobs[i]); if( job != null ) jobs[j++] = job.get(); } if( j==jobs.length ) return jobs; // All jobs still exist jobs = Arrays.copyOf(jobs,j); // Shrink out removed Key keys[] = new Key[j]; for( int i=0; i<j; i++ ) keys[i] = jobs[i]._key; // One-shot throw-away attempt at remove dead jobs from the jobs list DKV.DputIfMatch(LIST,new Value(LIST,new JobList(keys)),val,new Futures()); return jobs; } public static final long WORK_UNKNOWN = 0L; public final long getWork() { update_from_remote(); return _work; } /** Set the amount of work for this job - can only be called if job was started without work specification */ public final void setWork(final long work) { if (getWork() != WORK_UNKNOWN) { throw new IllegalStateException("Cannot set work amount if it was already previously specified"); } new JAtomic() { @Override boolean abort(Job job) { return false; } @Override void update(Job old) { old._work = work; } }.apply(this); } public Job<T> start(final H2OCountedCompleter fjtask, long work, double max_runtime_secs) { _max_runtime_msecs = (long)(max_runtime_secs*1e3); return start(fjtask, work); } /** Start this task based on given top-level fork-join task representing job computation. * @param fjtask top-level job computation task. * @param work Amount of work to-do, for updating progress bar * @return this job in {@code isRunning()} state * * @see H2OCountedCompleter */ public Job<T> start(final H2OCountedCompleter fjtask, long work) { // Job does not exist in any DKV, and so does not have any global // visibility (yet). assert !new AssertNoKey(_key).doAllNodes()._found; assert created() && !running() && !stopped(); assert fjtask != null : "Starting a job with null working task is not permitted!"; assert fjtask.getCompleter() == null : "Cannot have a completer; this must be a top-level task"; // F/J rules: upon receiving an exception (the task's compute/compute2 // throws an exception caugt by F/J), the task is marked as "completing // exceptionally" - it is marked "completed" before the onExComplete logic // runs. It is then notified, and wait'ers wake up - before the // onExComplete runs; onExComplete runs on in another thread, so wait'ers // are racing with the onExComplete. // We want wait'ers to *wait* until the task's onExComplete runs, AND Job's // onExComplete runs (marking the Job as stopped, with an error). So we // add a few wrappers: // Make a wrapper class that only *starts* when the task completes - // especially it only starts even when task completes exceptionally... thus // the task onExceptionalCompletion code runs completely before Barrer1 // starts - providing a simple barrier. The Barrier1 onExComplete runs in // parallel with wait'ers on Barrier1. When Barrier1 onExComplete itself // completes, Barrier2 is notified. // Barrier2 is an empty class, and vacuously runs in parallel with wait'ers // of Barrier2 - all callers of Job.get(). _barrier = new Barrier2(); fjtask.setCompleter(new Barrier1(_barrier)); // These next steps must happen in-order: // 4 - cannot submitTask without being on job-list, lest all cores get // slammed but no user-visible record of why, so 4 after 3 // 3 - cannot be on job-list without job in DKV, lest user (briefly) see it // on list but cannot click the link & find job, so 3 after 2 // 2 - cannot be findable in DKV without job also being in running state // lest the finder be confused about the job state, so 2 after 1 // 1 - set state to running // 1 - Change state from created to running _start_time = System.currentTimeMillis(); assert !created() && running() && !stopped(); _work = work; // 2 - Save the full state of the job, first time ever making it public DKV.put(this); // Announce in DKV // 3 - Update job list final Key jobkey = _key; new TAtomic<JobList>() { @Override public JobList atomic(JobList old) { if( old == null ) old = new JobList(); Key[] jobs = old._jobs; old._jobs = Arrays.copyOf(jobs, jobs.length + 1); old._jobs[jobs.length] = jobkey; return old; } }.invoke(LIST); // 4 - Fire off the FJTASK H2O.submitTask(fjtask); return this; } transient private Barrier2 _barrier; // Top-level task to block on // Handy for assertion private static class AssertNoKey extends MRTask<AssertNoKey> { private final Key<Job> _key; boolean _found; AssertNoKey( Key<Job> key ) { _key = key; } @Override public void setupLocal() { _found = H2O.containsKey(_key); } @Override public void reduce( AssertNoKey ank ) { _found |= ank._found; } } public static class JobCancelledException extends RuntimeException { public JobCancelledException() { this(null); } public JobCancelledException(Job job) { super("job "+ (job == null ? "" : job._description+" ["+job._key+"] ") +"was cancelled"); } } // A simple barrier. Threads blocking on the job will block on this // "barrier" task, which will block until the fjtask runs the onCompletion or // onExceptionCompletion code. private class Barrier1 extends CountedCompleter { Barrier1(CountedCompleter cc) { super(cc,0); } @Override public void compute() { } @Override public void onCompletion(CountedCompleter caller) { new Barrier1OnCom().apply(Job.this); _barrier = null; // Free for GC } @Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) { if(Job.isCancelledException(ex)) { new Barrier1OnCom().apply(Job.this); } else { try { Log.err(ex); } catch (Throwable t) {/* do nothing */} new Barrier1OnExCom(ex).apply(Job.this); } _barrier = null; // Free for GC return true; } } static public boolean isCancelledException(Throwable ex) { return (ex != null) && (ex instanceof JobCancelledException || ex.getCause() != null && ex.getCause() instanceof JobCancelledException); } private static class Barrier1OnCom extends JAtomic { @Override boolean abort(Job job) { return false; } @Override public void update(Job old) { assert old._end_time==0 : "onComp should be called once at most, and never if onExComp is called"; old._end_time = System.currentTimeMillis(); if( old._worked < old._work ) old._worked = old._work; old._msg = old._stop_requested ? "Cancelled." : "Done."; } } private static class Barrier1OnExCom extends JAtomic { final byte[] _dex; Barrier1OnExCom(Throwable ex) { _dex = AutoBuffer.javaSerializeWritePojo(ex); } @Override boolean abort(Job job) { return job._ex != null && job._end_time!=0; } // Already stopped & exception'd @Override void update(Job job) { if( job._ex == null ) job._ex = _dex; // Keep first exception ever job._stop_requested = true; // Since exception set, also set stop if( job._end_time == 0 ) // Keep first end-time job._end_time = System.currentTimeMillis(); job._msg = "Failed."; } } private class Barrier2 extends CountedCompleter { @Override public void compute() { } } /** Blocks until the Job completes */ public T get() { Barrier2 bar = _barrier; if( bar != null ) // Barrier may be null if task already completed bar.join(); // Block on the *barrier* task, which blocks until the fjtask on*Completion code runs completely assert isStopped(); if (_ex!=null) throw new RuntimeException((Throwable)AutoBuffer.javaSerializeReadPojo(_ex)); // Maybe null return, if the started fjtask does not actually produce a result at this Key return _result==null ? null : _result.get(); } // -------------- // Atomic State Updaters. Atomically change state on the home node. They // also update the *this* object from the freshest remote state, meaning the // *this* object changes after these calls. // NO OTHER CHANGES HAPPEN TO JOB FIELDS. private abstract static class JAtomic extends TAtomic<Job> { void apply(Job job) { invoke(job._key); job.update_from_remote(); } abstract boolean abort(Job job); abstract void update(Job job); @Override public Job atomic(Job job) { assert job != null : "Race on creation (key=" + _key + ")"; if( abort(job) ) return null; update(job); return job; } } // Update the *this* object from a remote object. private void update_from_remote( ) { Job remote = DKV.getGet(_key); // Watch for changes in the DKV if( this==remote ) return; // Trivial! if( null==remote ) return; // Stay with local version boolean differ = false; if( _stop_requested != remote._stop_requested ) differ = true; if(_start_time!= remote._start_time) differ = true; if(_end_time != remote._end_time ) differ = true; if(_ex != remote._ex ) differ = true; if(_work != remote._work ) differ = true; if(_worked != remote._worked ) differ = true; if(_msg != remote._msg ) differ = true; if(_max_runtime_msecs != remote._max_runtime_msecs) differ = true; if(! Arrays.equals(_warns, remote._warns)) differ = true; if( differ ) synchronized(this) { _stop_requested = remote._stop_requested; _start_time= remote._start_time; _end_time = remote._end_time ; _ex = remote._ex ; _work = remote._work ; _worked = remote._worked ; _msg = remote._msg ; _max_runtime_msecs = remote._max_runtime_msecs; _warns = remote._warns; } } @Override public Class<KeyV3.JobKeyV3> makeSchema() { return KeyV3.JobKeyV3.class; } /** * Tries to retrieve a completed Job from DKV. It will wait * up to given timeout period if the requested job is still running - giving it * a chance to complete before this method has to return. * * @param key job key * @param timeMillis timeout period in milliseconds, if job is still running it will wait * up to this amount of time for the job to finish * @return null, if job doesn't exist, a Job instance otherwise */ public static Job<?> tryGetDoneJob(Key<Job> key, long timeMillis) { final Value val = DKV.get(key); if (val == null) { throw new IllegalArgumentException("Job is missing"); } final Iced<?> ice = val.get(); if (!(ice instanceof Job)) { throw new IllegalArgumentException("Must be a Job not a " + ice.getClass()); } final Job<?> j = (Job<?>) ice; if (timeMillis > 0 && !j.isStopped()) { j.blockingWaitForDone(timeMillis, false); } return j; } void blockingWaitForDone(long timeMillis, boolean checkConsistency) { final Barrier2 bar = _barrier; if (bar == null) { if (checkConsistency) { if (isRunning()) { // barrier is only removed after job is stopped throw new IllegalStateException("Running job is in an inconsistent state (barrier is missing)"); } } return; } try { bar.get(timeMillis, TimeUnit.MILLISECONDS, true); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (Exception e) { Log.trace(e); } } private static boolean FORCE_SLEEP_WAITING = H2O.getSysBoolProperty("job.sleep_wait_for_done", false); /** * Waits if necessary for at most the given time for the Job * to complete. The wait is always blocking regardless if called from * an F/J thread or a regular thread. * * @param timeoutMillis the maximum time in milliseconds to wait */ public void blockingWaitForDone(long timeoutMillis) { if (H2O.ARGS.client || FORCE_SLEEP_WAITING) { // in client-mode the job is actually running on a different node sleep(timeoutMillis); // for debugging to be able to prove that blockingWaitForDone saves time compared to just sleep } else { blockingWaitForDone(timeoutMillis, true); } } private static void sleep(long timeoutMillis) { try { Thread.sleep(timeoutMillis); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/JobUpdatePostMap.java
package water; import water.fvec.Chunk; public class JobUpdatePostMap extends MRTask.PostMapAction<JobUpdatePostMap> { private final Job<?> _job; private JobUpdatePostMap(Job<?> _job) { this._job = _job; } @Override void call(Key mapInput) { _job.update(1); } @Override void call(Chunk[] mapInput) { _job.update(mapInput[0].len()); } public static JobUpdatePostMap forJob(Job<?> job) { return job != null ? new JobUpdatePostMap(job) : null; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Key.java
package water; import water.util.ReflectionUtils; import water.util.StringUtils; import water.util.UnsafeUtils; import water.fvec.*; import java.util.Arrays; import java.util.UUID; import java.util.concurrent.atomic.AtomicLongFieldUpdater; /** * Keys! H2O supports a distributed Key/Value store, with exact Java Memory * Model consistency. Keys are a means to find a {@link Value} somewhere in * the Cloud, to cache it locally, to allow globally consistent updates to a * {@link Value}. Keys have a *home*, a specific Node in the Cloud, which is * computable from the Key itself. The Key's home node breaks ties on racing * updates, and tracks caching copies (via a hardware-like MESI protocol), but * otherwise is not involved in the DKV. All operations on the DKV, including * Gets and Puts, are found in {@link DKV}. * <p> * Keys are defined as a simple byte-array, plus a hashCode and a small cache * of Cloud-specific information. The first byte of the byte-array determines * if this is a user-visible Key or an internal system Key; an initial byte of * &lt;32 is a system Key. User keys are generally externally visible, system * keys are generally limited to things kept internal to the H2O Cloud. Keys * might be a high-count item, hence we care about the size. * <p> * System keys for {@link Job}, {@link Vec}, {@link Chunk} and {@link * water.fvec.Vec.VectorGroup} have special initial bytes; Keys for these classes can be * determined without loading the underlying Value. Layout for {@link Vec} and * {@link Chunk} is further restricted, so there is an efficient mapping * between a numbered Chunk and it's associated Vec. * <p> * System keys (other than the restricted Vec and Chunk keys) can have their * home node forced, by setting the desired home node in the first few Key * bytes. Otherwise home nodes are selected by pseudo-random hash. Selecting * a home node is sometimes useful for Keys with very high update rates coming * from a specific Node. * <p> * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ final public class Key<T extends Keyed> extends Iced<Key<T>> implements Comparable { // The Key!!! public final byte[] _kb; // Key bytes, wire-line protocol transient final int _hash; // Hash on key alone (and not value) // The user keys must be ASCII, so the values 0..31 are reserved for system // keys. When you create a system key, please do add its number to this list static final byte BUILT_IN_KEY = 2; public static final byte JOB = 3; public static final byte VEC = 4; // Vec public static final byte CHK = 5; // Chunk public static final byte GRP = 6; // Vec.VectorGroup public static final byte HIDDEN_USER_KEY = 31; public static final byte USER_KEY = 32; // Indices into key header structure (key bytes) private static final int KEY_HEADER_TYPE = 0; private static final int KEY_HEADER_CUSTOM_HOMED = 1; // For Fluid Vectors, we have a special Key layout. // 0 - key type byte, one of VEC, CHK or GRP // 1 - homing byte, always -1/0xFF as these keys use the hash to figure their home out // 4 - Vector Group // 4 - Chunk # for CHK, or 0xFFFFFFFF for VEC static final int VEC_PREFIX_LEN = 1+1+4+4; /** True is this is a {@link Vec} Key. * @return True is this is a {@link Vec} Key */ public final boolean isVec() { return _kb.length > 0 && _kb[KEY_HEADER_TYPE] == VEC; } /** True is this is a {@link Chunk} Key. * @return True is this is a {@link Chunk} Key */ public final boolean isChunkKey() { return _kb.length > 0 && _kb[KEY_HEADER_TYPE] == CHK; } /** Returns the {@link Vec} Key from a {@link Chunk} Key. * @return Returns the {@link Vec} Key from a {@link Chunk} Key. */ public final Key getVecKey() { assert isChunkKey(); return water.fvec.Vec.getVecKey(this); } /** Convenience function to fetch key contents from the DKV. * @return null if the Key is not mapped, or an instance of {@link Keyed} */ public final T get() { Value val = DKV.get(this); return val == null ? null : (T)val.get(); } // *Desired* distribution function on keys int D() { int hsz = H2O.CLOUD.size(); if (0 == hsz) return -1; // Clients starting up find no cloud, be unable to home keys // See if this is a specifically homed Key if (!user_allowed() && custom_homed()) { assert _kb[KEY_HEADER_TYPE] != Key.CHK; // Chunks cannot be custom-homed H2ONode h2o = H2ONode.intern(_kb,2); // Reverse the home to the index int idx = h2o.index(); if( idx >= 0 ) return idx; // Else homed to a node which is no longer in the cloud! // Fall back to the normal home mode } // Distribution of Fluid Vectors is a special case. // Fluid Vectors are grouped into vector groups, each of which must have // the same distribution of chunks so that MRTask run over group of // vectors will keep data-locality. The fluid vecs from the same group // share the same key pattern + each has 4 bytes identifying particular // vector in the group. Since we need the same chunks end up on the same // node in the group, we need to skip the 4 bytes containing vec# from the // hash. Apart from that, we keep the previous mode of operation, so that // ByteVec would have first 64MB distributed around cloud randomly and then // go round-robin in 64MB chunks. if( _kb[KEY_HEADER_TYPE] == CHK ) { // Homed Chunk? if( _kb[KEY_HEADER_CUSTOM_HOMED] != -1 ) throw H2O.fail(); // For round-robin on Chunks in the following pattern: // 1 Chunk-per-node, until all nodes have 1 chunk (max parallelism). // Then 2 chunks-per-node, once around, then 4, then 8, then 16. // Getting several chunks-in-a-row on a single Node means that stencil // calculations that step off the end of one chunk into the next won't // force a chunk local - replicating the data. If all chunks round robin // exactly, then any stencil calc will double the cached volume of data // (every node will have it's own chunk, plus a cached next-chunk). // Above 16-chunks-in-a-row we hit diminishing returns. int cidx = UnsafeUtils.get4(_kb, 1 + 1 + 4); // Chunk index int x = cidx/hsz; // Multiples of cluster size // 0 -> 1st trip around the cluster; nidx= (cidx- 0*hsz)>>0 // 1,2 -> 2nd & 3rd trip; allocate in pairs: nidx= (cidx- 1*hsz)>>1 // 3,4,5,6 -> next 4 rounds; allocate in quads: nidx= (cidx- 3*hsz)>>2 // 7-14 -> next 8 rounds in octets: nidx= (cidx- 7*hsz)>>3 // 15+ -> remaining rounds in groups of 16: nidx= (cidx-15*hsz)>>4 int z = x==0 ? 0 : (x<=2 ? 1 : (x<=6 ? 2 : (x<=14 ? 3 : 4))); int nidx = (cidx-((1<<z)-1)*hsz)>>z; return (nidx&0x7FFFFFFF) % hsz; } // Easy Cheesy Stupid: return (_hash&0x7FFFFFFF) % hsz; } /** List of illegal characters which are not allowed in user keys. */ static final CharSequence ILLEGAL_USER_KEY_CHARS = " !@#$%^&*()+={}[]|\\;:\"'<>,/?"; // 64 bits of Cloud-specific cached stuff. It is changed atomically by any // thread that visits it and has the wrong Cloud. It has to be read *in the // context of a specific Cloud*, since a re-read may be for another Cloud. private transient volatile long _cache; private static final AtomicLongFieldUpdater<Key> _cacheUpdater = AtomicLongFieldUpdater.newUpdater(Key.class, "_cache"); // Accessors and updaters for the Cloud-specific cached stuff. // The Cloud index, a byte uniquely identifying the last 256 Clouds. It // changes atomically with the _cache word, so we can tell which Cloud this // data is a cache of. static int cloud( long cache ) { return (int)(cache>>> 0)&0x00FF; } // Shortcut node index for Home. // 'char' because I want an unsigned 16bit thing, limit of 65534 Cloud members. // -1 is reserved for a bare-key static int home ( long cache ) { return (int)(cache>>> 8)&0xFFFF; } static long build_cache(int cidx, int home) { return // Build the new cache word ((long) (cidx & 0xFF)) | ((long) (home & 0xFFFF) << 8); } int home ( H2O cloud ) { return home (cloud_info(cloud)); } /** True if the {@link #home_node} is the current node. * @return True if the {@link #home_node} is the current node */ public boolean home() { return home_node()==H2O.SELF; } /** The home node for this Key. * @return The home node for this Key. */ public H2ONode home_node( ) { H2O cloud = H2O.CLOUD; return cloud._memary[home(cloud)]; } // Update the cache, but only to strictly newer Clouds private boolean set_cache( long cache ) { while( true ) { // Spin till get it long old = _cache; // Read once at the start if( !H2O.larger(cloud(cache),cloud(old)) ) // Rolling backwards? // Attempt to set for an older Cloud. Blow out with a failure; caller // should retry on a new Cloud. return false; assert cloud(cache) != cloud(old) || cache == old; if( old == cache ) return true; // Fast-path cutout if( _cacheUpdater.compareAndSet(this,old,cache) ) return true; // Can fail if the cache is really old, and just got updated to a version // which is still not the latest, and we are trying to update it again. } } // Return the info word for this Cloud. Use the cache if possible long cloud_info( H2O cloud ) { long x = _cache; // See if cached for this Cloud. This should be the 99% fast case. if( cloud(x) == cloud._idx ) return x; // Cache missed! Probably it just needs (atomic) updating. // But we might be holding the stale cloud... // Figure out home Node in this Cloud char home = (char)D(); long cache = build_cache(cloud._idx,home); set_cache(cache); // Attempt to upgrade cache, but ignore failure return cache; // Return the magic word for this Cloud } // Construct a new Key. private Key(byte[] kb) { _kb = kb; // Quicky hash: http://en.wikipedia.org/wiki/Jenkins_hash_function int hash = 0; for( byte b : kb ) { hash += b; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); _hash = hash; } // Make new Keys. Optimistically attempt interning, but no guarantee. public static <P extends Keyed> Key<P> make(byte[] kb) { Key key = new Key(kb); Key key2 = H2O.getk(key); // Get the interned version, if any if( key2 != null ) // There is one! Return it instead return key2; H2O cloud = H2O.CLOUD; // Read once key._cache = build_cache(cloud._idx-1,0); // Build a dummy cache with a fake cloud index key.cloud_info(cloud); // Now force compute & cache the real data return key; } /** A random string, useful as a Key name or partial Key suffix. * @return A random short string */ public static String rand() { UUID uid = UUID.randomUUID(); long l1 = uid.getLeastSignificantBits(); long l2 = uid. getMostSignificantBits(); return "_"+Long.toHexString(l1)+Long.toHexString(l2); } /** Factory making a Key from a String * @return Desired Key */ public static <P extends Keyed> Key<P> make(String s) { return make(decodeKeyName(s != null? s : rand())); } public static <P extends Keyed> Key<P> makeSystem(String s) { return make(s,BUILT_IN_KEY); } public static <P extends Keyed> Key<P> makeUserHidden(String s) { return make(s,HIDDEN_USER_KEY); } /** * Make a random key, homed to a given node. * @param node a node at which the new key is homed. * @return the new key */ public static <P extends Keyed> Key<P> make(H2ONode node) { return make(decodeKeyName(rand()),BUILT_IN_KEY,false,node); } public static <P extends Keyed> Key<P> make() { return make(rand()); } /** Factory making a homed system Key. Requires the initial system byte but * then allows a String for the remaining bytes. * * Requires specifying the home node of the key. The required specifies * if it is an error to name an H2ONode that is NOT in the Cloud, or if * some other H2ONode can be substituted. * @return the desired Key */ public static <P extends Keyed> Key<P> make(String s, byte systemType, boolean required, H2ONode home) { return make(decodeKeyName(s),systemType,required,home); } /** Factory making a system Key. Requires the initial system byte but * then allows a String for the remaining bytes. * @return the desired Key */ public static <P extends Keyed> Key<P> make(String s, byte systemType) { return make(decodeKeyName(s),systemType,false,null); } /** Factory making a homed system Key. Requires the initial system byte and * uses {@link #rand} for the remaining bytes. * * Requires specifying the home node of the key. The required specifies * if it is an error to name an H2ONode that is NOT in the Cloud, or if * some other H2ONode can be substituted. * @return the desired Key */ public static <P extends Keyed> Key<P> make(byte systemType, boolean required, H2ONode home) { return make(rand(),systemType,required,home); } // Make a Key which is homed to specific nodes. private static <P extends Keyed> Key<P> make(byte[] kb, byte systemType, boolean required, H2ONode home) { assert systemType < 32; // only system keys allowed home = home != null && H2O.CLOUD.contains(home) ? home : null; assert !required || home != null; // If homing is not required and home is not in cloud (or null), then ignore // Key byte layout is: // 0 - systemType, from 0-31 // 1 - is the key homed to a specific node? (0 or 1) // 2-n - if homed then IP4 (4+2 bytes) or IP6 (16+2 bytes) address // 2-5- 4 bytes of chunk#, or -1 for masters // n+ - repeat of the original kb AutoBuffer ab = new AutoBuffer(); ab.put1(systemType); ab.putZ(home != null); if (home != null) { home.write(ab); } ab.put4(-1); ab.putA1(kb, kb.length); return make(Arrays.copyOf(ab.buf(),ab.position())); } /** * Remove a Key from the DKV, including any embedded Keys. * @deprecated use {@link Keyed#remove(Key)} instead. Will be removed from version 3.30. */ public void remove() { Keyed.remove(this); } /** * @deprecated use {@link Keyed#remove(Futures)} instead. Will be removed from version 3.30. */ public Futures remove(Futures fs) { return Keyed.remove(this, fs, true); } /** True if a {@link #USER_KEY} and not a system key. * @return True if a {@link #USER_KEY} and not a system key */ public boolean user_allowed() { return type()==USER_KEY; } boolean custom_homed() { return _kb[KEY_HEADER_CUSTOM_HOMED] == 1; } /** System type/byte of a Key, or the constant {@link #USER_KEY} * @return Key type */ // Returns the type of the key. public int type() { return ((_kb[KEY_HEADER_TYPE]&0xff)>=32) ? USER_KEY : (_kb[KEY_HEADER_TYPE]&0xff); } /** Return the classname for the Value that this Key points to, if any (e.g., "water.fvec.Frame"). */ public String valueClass() { // Because Key<Keyed> doesn't have concrete parameterized subclasses (e.g. // class FrameKey extends Key<Frame>) we can't get the type parameter at // runtime. See: // http://www.javacodegeeks.com/2013/12/advanced-java-generics-retreiving-generic-type-arguments.html // // Therefore, we have to fetch the type of the item the Key is pointing to at runtime. Value v = DKV.get(this); if (null == v) return null; else return v.className(); } /** Return the base classname (not including the package) for the Value that this Key points to, if any (e.g., "Frame"). */ public String valueClassSimple() { String vc = this.valueClass(); if (null == vc) return null; String[] elements = vc.split("\\."); return elements[elements.length - 1]; } static final char MAGIC_CHAR = '$'; // Used to hexalate displayed keys private static final char[] HEX = "0123456789abcdef".toCharArray(); /** Converts the key to HTML displayable string. * * For user keys returns the key itself, for system keys returns their * hexadecimal values. * * @return key as a printable string */ @Override public String toString() { int len = _kb.length; while( --len >= 0 ) { char a = (char) _kb[len]; if (' ' <= a && a <= '#') continue; // then we have $ which is not allowed if ('%' <= a && a <= '~') continue; // already in the one above //if( 'a' <= a && a <= 'z' ) continue; //if( 'A' <= a && a <= 'Z' ) continue; //if( '0' <= a && a <= '9' ) continue; break; } if (len>=0) { StringBuilder sb = new StringBuilder(); sb.append(MAGIC_CHAR); for( int i = 0; i <= len; ++i ) { byte a = _kb[i]; sb.append(HEX[(a >> 4) & 0x0F]); sb.append(HEX[(a >> 0) & 0x0F]); } sb.append(MAGIC_CHAR); for( int i = len + 1; i < _kb.length; ++i ) sb.append((char)_kb[i]); return sb.toString(); } else { return new String(_kb); } } private static byte[] decodeKeyName(String what) { if( what==null ) return null; if( what.length()==0 ) return null; if (what.charAt(0) == MAGIC_CHAR) { int len = what.indexOf(MAGIC_CHAR,1); if( len < 0 ) throw new IllegalArgumentException("No matching magic '"+MAGIC_CHAR+"', key name is not legal"); String tail = what.substring(len+1); byte[] res = new byte[(len-1)/2 + tail.length()]; int r = 0; for( int i = 1; i < len; i+=2 ) { char h = what.charAt(i); char l = what.charAt(i+1); h -= Character.isDigit(h) ? '0' : ('a' - 10); l -= Character.isDigit(l) ? '0' : ('a' - 10); res[r++] = (byte)(h << 4 | l); } System.arraycopy(StringUtils.bytesOf(tail), 0, res, r, tail.length()); return res; } else { byte[] res = new byte[what.length()]; for( int i=0; i<res.length; i++ ) res[i] = (byte)what.charAt(i); return res; } } @Override public int hashCode() { return _hash; } @Override public boolean equals( Object o ) { if( this == o ) return true; if( o == null ) return false; Key k = (Key)o; if( _hash != k._hash ) return false; return Arrays.equals(k._kb,_kb); } /** Lexically ordered Key comparison, so Keys can be sorted. Modestly expensive. */ @Override public int compareTo(Object o) { assert (o instanceof Key); return this.toString().compareTo(o.toString()); } public static final AutoBuffer write_impl(Key k, AutoBuffer ab) {return ab.putA1(k._kb);} public static final Key read_impl(Key k, AutoBuffer ab) {return make(ab.getA1());} public static final AutoBuffer writeJSON_impl( Key k, AutoBuffer ab ) { ab.putJSONStr("name",k.toString()); ab.put1(','); ab.putJSONStr("type", ReflectionUtils.findActualClassParameter(k.getClass(), 0).getSimpleName()); return ab; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/KeySnapshot.java
package water; import java.util.ArrayList; import java.util.Arrays; import java.util.Map; import java.util.TreeMap; /** * Convenience class for easy access to user-visible keys in the cloud with enabled caching. * * This class represents snapshot of user keys currently stored in the cloud and contains methods to retrieve it. * It contains all user keys stored in the cloud at one particular point in time (marked by timestamp member variable). * Snapshot does not contain the actual values and no values are fetched from remote by requesting new snapshot. * * KeySnapshot itself is a set of user keys with some additional info (e.g. type and size) and some convenience functions * supporting filtering and instantiating of classes pointed to by the keys * * @author tomas */ public class KeySnapshot { /** Class to filter keys from the snapshot. */ public abstract static class KVFilter { /** @param k KeyInfo to be filtered * @return true if the key should be included in the new (filtered) set. */ public abstract boolean filter(KeyInfo k); } /** Class containing information about user keys. * Contains the actual key and all interesting information except the data itself. */ public static final class KeyInfo extends Iced implements Comparable<KeyInfo>{ public final Key _key; public final int _type; public final int _sz; public final byte _backEnd; public KeyInfo(Key k, Value v){ _key = k; _type = v.type(); _sz = v._max; _backEnd = v.backend(); } @Override public int compareTo(KeyInfo ki) { return _key.compareTo(ki._key);} public boolean isFrame() { return _type == TypeMap.FRAME; } public boolean isLockable(){ return TypeMap.theFreezable(_type) instanceof Lockable; } } private static final long _updateInterval = 1000; private static volatile KeySnapshot _cache; public final KeyInfo [] _keyInfos; /** (local) Time of creation. */ public final long timestamp; /** @return cached version of KeySnapshot */ public static KeySnapshot cache(){return _cache;} /** Filter the snapshot providing custom filter. * Only the keys for which filter returns true will be present in the new snapshot. * @param kvf The filter * @return filtered snapshot */ public KeySnapshot filter(KVFilter kvf){ ArrayList<KeyInfo> res = new ArrayList<>(); for(KeyInfo kinfo: _keyInfos) if(kvf.filter(kinfo))res.add(kinfo); return new KeySnapshot(res.toArray(new KeyInfo[res.size()])); } KeySnapshot(KeyInfo[] snapshot){ _keyInfos = snapshot; timestamp = System.currentTimeMillis(); } /** @return array of all keys in this snapshot. */ public Key[] keys(){ Key [] res = new Key[_keyInfos.length]; for(int i = 0; i < _keyInfos.length; ++i) res[i] = _keyInfos[i]._key; return res; } /** Return all the keys of the given class. * @param clz Class * @return array of keys in this snapshot with the given class */ public static Key[] globalKeysOfClass(final Class clz) { return KeySnapshot.globalSnapshot().filter(new KeySnapshot.KVFilter() { @Override public boolean filter(KeySnapshot.KeyInfo k) { return Value.isSubclassOf(k._type, clz); } }).keys(); } /** @param c Class objects of which should be instantiated * @param <T> Generic class being fetched * @return all objects (of the proper class) pointed to by this key snapshot (and still present in the K/V at the time of invocation). */ public <T extends Iced> Map<String, T> fetchAll(Class<T> c) { return fetchAll(c,false,0,Integer.MAX_VALUE);} /** @param c Class objects of which should be instantiated * @param <T> Generic class being fetched * @param exact - subclasses will not be included if set. * @return all objects (of the proper class) pointed to by this key snapshot (and still present in the K/V at the time of invocation). */ public <T extends Iced> Map<String, T> fetchAll(Class<T> c, boolean exact) { return fetchAll(c,exact,0,Integer.MAX_VALUE);} /** @param c Class objects of which should be instantiated * @param <T> Generic class being fetched * @param exact - subclasses will not be included if set. * @param offset - skip first offset values matching the given type * @param limit - produce only up to the limit objects. * @return all objects (of the proper class) pointed to by this key snapshot (and still present in the K/V at the time of invocation). */ public <T extends Iced> Map<String, T> fetchAll(Class<T> c, boolean exact, int offset, int limit) { TreeMap<String, T> res = new TreeMap<>(); final int typeId = TypeMap.onIce(c.getName()); for (KeyInfo kinfo : _keyInfos) { if (kinfo._type == typeId || (!exact && Value.isSubclassOf(kinfo._type, c))) { if (offset > 0) { --offset; continue; } Value v = DKV.get(kinfo._key); if (v != null) { T t = v.get(); res.put(kinfo._key.toString(), t); if (res.size() == limit) break; } } } return res; } /** * Get the user keys from this node only. * Includes non-local keys which are cached locally. * @return KeySnapshot containing keys from the local K/V. */ public static KeySnapshot localSnapshot(){return localSnapshot(false);} /** * Get the user keys from this node only. * @param homeOnly - exclude the non-local (cached) keys if set * @return KeySnapshot containing keys from the local K/V. */ public static KeySnapshot localSnapshot(boolean homeOnly){ Object [] kvs = H2O.STORE.raw_array(); ArrayList<KeyInfo> res = new ArrayList<>(); for(int i = 2; i < kvs.length; i+= 2){ Object ok = kvs[i]; if( !(ok instanceof Key ) ) continue; // Ignore tombstones and Primes and null's Key key = (Key )ok; if(!key.user_allowed())continue; if(homeOnly && !key.home())continue; // Raw array can contain regular and also wrapped values into Prime marker class: // - if we see Value object, create instance of KeyInfo // - if we do not see Value object directly (it can be wrapped in Prime marker class), // try to unwrap it via calling STORE.get (~H2O.get) and then // look at wrapped value again. Value val = Value.STORE_get(key); if( val == null ) continue; res.add(new KeyInfo(key,val)); } final KeyInfo [] arr = res.toArray(new KeyInfo[res.size()]); Arrays.sort(arr); return new KeySnapshot(arr); } /** * @return KeySnapshot containing user keys from all the nodes. */ public static KeySnapshot globalSnapshot(){ return globalSnapshot(-1);} /** * Cache-enabled call to get global key snapshot. * User can provide time tolerance to indicate a how old the snapshot can be. * @param timeTolerance - tolerated age of the cache in millis. * If the last snapshot is bellow this value, cached version will be returned immediately. * Otherwise new snapshot must be obtained by from all nodes. * @return KeySnapshot containing user keys from all the nodes. */ public static KeySnapshot globalSnapshot(long timeTolerance){ KeySnapshot res = _cache; final long t = System.currentTimeMillis(); if(res == null || (t - res.timestamp) > timeTolerance) res = new KeySnapshot((new GlobalUKeySetTask().doAllNodes()._res)); else if(t - res.timestamp > _updateInterval) H2O.submitTask(new H2O.H2OCountedCompleter() { @Override public void compute2() { new GlobalUKeySetTask().doAllNodes(); } }); return res; } // task to grab all user keys (+ info) form all around the cloud // updates the cache when done private static class GlobalUKeySetTask extends MRTask<GlobalUKeySetTask> { KeyInfo [] _res; GlobalUKeySetTask() { super(H2O.MIN_HI_PRIORITY); } @Override public void setupLocal(){ _res = localSnapshot(true)._keyInfos;} @Override public void reduce(GlobalUKeySetTask gbt){ if(_res == null)_res = gbt._res; else if(gbt._res != null){ // merge sort keys together KeyInfo [] res = new KeyInfo[_res.length + gbt._res.length]; int j = 0, k = 0; for(int i = 0; i < res.length; ++i) res[i] = j < gbt._res.length && (k == _res.length || gbt._res[j].compareTo(_res[k]) < 0)?gbt._res[j++]:_res[k++]; _res = res; } } @Override public void postGlobal(){ _cache = new KeySnapshot(_res); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Keyed.java
package water; import water.api.schemas3.KeyV3; import water.fvec.*; import water.util.Log; /** Iced, with a Key. Support for DKV removal. */ public abstract class Keyed<T extends Keyed> extends Iced<T> { /** Key mapping a Value which holds this object; may be null */ public Key<T> _key; public Keyed() { _key = null; } // NOTE: every Keyed that can come out of the REST API has to have a no-arg constructor. public Keyed( Key<T> key ) { _key = key; } public Key<T> getKey() { return _key; } // --- /** Remove this Keyed object, and all subparts; blocking. */ public final void remove() { remove(true); } /** Remove this Keyed object, including all subparts if cascade = true; blocking. */ public final void remove(boolean cascade) { remove(new Futures(), cascade).blockForPending(); } /** Remove this Keyed object, and all subparts. */ public final Futures remove( Futures fs ) { return remove(fs, true); } /** Remove this Keyed object, including all subparts if cascade = true. */ public final Futures remove( Futures fs, boolean cascade ) { fs = remove_self_key_impl(fs); return remove_impl(fs, cascade); } /** * @deprecated Better override {@link #remove_impl(Futures, boolean)} instead */ @Deprecated protected Futures remove_impl(Futures fs) { return fs; } /** Override to remove subparts, but not self, of composite Keyed objects. * Examples include {@link Vec} (removing associated {@link Chunk} keys) * and {@link Frame} (removing associated {@link Vec} keys.) */ protected Futures remove_impl(Futures fs, boolean cascade) { return remove_impl(fs); } /** Remove my own key from DKV. */ protected Futures remove_self_key_impl(Futures fs) { if (_key != null) DKV.remove(_key,fs); return fs; } /** * Removes the Keyed object associated to the key, and all subparts; blocking. * @return true if there was anything to be removed. **/ public static boolean remove( Key k ) { if (k==null) return false; Value val = DKV.get(k); if (val==null) return false; ((Keyed)val.get()).remove(); return true; } public static void removeQuietly(Key k) { try { remove(k); } catch (Exception e) { String reason = e.getMessage() != null ? " Reason: " + e.getMessage() : ""; Log.warn("Failed to correctly release memory associated with key=" + k + "." + reason); Log.debug("Failed to remove key " + k, e); } } /** Remove the Keyed object associated to the key, and all subparts. */ public static Futures remove( Key k, Futures fs, boolean cascade) { if (k==null) return fs; Value val = DKV.get(k); if (val==null) return fs; return ((Keyed)val.get()).remove(fs, cascade); } // --- /** Write this Keyed object, and all nested Keys. */ public AutoBuffer writeAll(AutoBuffer ab) { return writeAll_impl(ab.put(this)); } // Override this to write out subparts protected AutoBuffer writeAll_impl(AutoBuffer ab) { return ab; } /** Read a Keyed object, and all nested Keys. Nested Keys are injected into the K/V store * overwriting what was there before. */ public static Keyed readAll(AutoBuffer ab) { Futures fs = new Futures(); Keyed k = ab.getKey(fs); fs.blockForPending(); // Settle out all internal Key puts return k; } // Override this to read in subparts protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { return this; } /** High-quality 64-bit checksum of the <i>content</i> of the object. Similar * to hashcode(), but a long to reduce the chance of hash clashes. For * composite objects this should be defined using the subcomponents' checksums * (or hashcodes if not available). If two Keyed objects have the same * checksum() there should be a 1 - 1/2^64 chance that they are the same * object by value. */ protected long checksum_impl() { throw H2O.fail("Checksum not implemented by class "+this.getClass()); } protected long checksum_impl(boolean noCache) { return checksum_impl(); } private long _checksum; // Efficiently fetch the checksum, setting on first access public final long checksum() { if( _checksum!=0 ) return _checksum; long x = checksum_impl(false); if( x==0 ) x=1; return (_checksum=x); } public final long checksum(boolean noCache) { if (noCache) return checksum_impl(noCache); return checksum(); } // TODO: REMOVE THIS! It's not necessary; we can do it with reflection. public Class<? extends KeyV3> makeSchema() { throw H2O.fail("Override in subclasses which can be the result of a Job"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ListenerService.java
package water; /** * Service used to write to registered H2O listeners */ public class ListenerService { private static final ListenerService service = new ListenerService(); private ListenerService(){ } public static ListenerService getInstance(){ return service; } public void report(String msg, Object... data){ for (H2OListenerExtension ext : ExtensionManager.getInstance().getListenerExtensions()) { ext.report(msg, data); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/LocalMR.java
package water; import jsr166y.CountedCompleter; import java.util.concurrent.CancellationException; /** * Created by tomas on 11/5/16. * * Generic lightewight Local MRTask utility. Will launch requested number of tasks (on local node!), organized in a binary tree fashion, similar to MRTask. * Will attempt to share local results (MrFun instances) if the previous task has completed before launching current task. * * User expected to pass in MrFun implementing map(id), reduce(MrFun) and makeCopy() functions. * At the end of the task, MrFun holds the result. */ public class LocalMR<T extends MrFun<T>> extends H2O.H2OCountedCompleter<LocalMR> { private int _lo; private int _hi; MrFun _mrFun; volatile Throwable _t; private volatile boolean _cancelled; private LocalMR<T> _root; public LocalMR(MrFun mrt, int nthreads){this(mrt,nthreads,null);} public LocalMR(MrFun mrt, H2O.H2OCountedCompleter cc){this(mrt,H2O.NUMCPUS,cc);} public LocalMR(MrFun mrt, int nthreads, H2O.H2OCountedCompleter cc){ super(cc); if(nthreads <= 0) throw new IllegalArgumentException("nthreads must be positive"); _root = this; _mrFun = mrt; // used as golden copy and also will hold the result after task has finished. _lo = 0; _hi = nthreads; _prevTsk = null; } public LocalMR<T> withNoPrevTaskReuse() { _reusePrevTsk = false; return this; } public boolean isReproducible() { return !_reusePrevTsk; } private LocalMR(LocalMR src, LocalMR prevTsk,int lo, int hi) { super(src); _root = src._root; _reusePrevTsk = src._reusePrevTsk; _prevTsk = _reusePrevTsk ? prevTsk : null; _lo = lo; _hi = hi; _cancelled = src._cancelled; } private LocalMR<T> _left; private LocalMR<T> _rite; private boolean _reusePrevTsk = true; private final LocalMR<T> _prevTsk; //will attempt to share MrFun with "previous task" if it's done by the time we start volatile boolean completed; // this task and all it's children completed volatile boolean started; // this task and all it's children completed public boolean isCancelRequested(){return _root._cancelled;} private int mid(){ return _lo + ((_hi - _lo) >> 1);} @Override public final void compute2() { started = true; if(_root._cancelled){ tryComplete(); return; } int mid = mid(); assert _hi > _lo; if (_hi - _lo >= 2) { _left = new LocalMR(this, _prevTsk, _lo, mid); if (mid < _hi) { addToPendingCount(1); (_rite = new LocalMR(this, _left, mid, _hi)).fork(); } _left.compute2(); } else { if(_prevTsk != null && _prevTsk.completed){ _mrFun = _prevTsk._mrFun; _prevTsk._mrFun = null; } else if(this != _root) _mrFun = _root._mrFun.makeCopy(); try { _mrFun.map(mid); } catch (Throwable t) { if (_root._t == null) { _root._t = t; _root._cancelled = true; } } tryComplete(); } } @Override public final void onCompletion(CountedCompleter cc) { try { if (_cancelled) { assert this == _root; completeExceptionally(_t == null ? new CancellationException() : _t); // instead of throw return; } if (_root._cancelled) return; if (_left != null && _left._mrFun != null && _mrFun != _left._mrFun) { assert _left.completed; if (_mrFun == null) _mrFun = _left._mrFun; else _mrFun.reduce(_left._mrFun); } if (_rite != null && _mrFun != _rite._mrFun) { assert _rite.completed; if (_mrFun == null) _mrFun = _rite._mrFun; else _mrFun.reduce(_rite._mrFun); } _left = null; _rite = null; completed = true; } catch(Throwable t){ if(this == _root){ completeExceptionally(t); // instead of throw } else if (_root._t == null) { _root._t = t; _root._cancelled = true; } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Lockable.java
package water; import water.util.Log; import java.util.Arrays; /** Lockable Keys - Keys locked during long running {@link Job}s, to prevent * overwriting in-use keys. E.g. model-building: expected to read-lock input * {@link water.fvec.Frame}s, and write-lock the output {@link hex.Model}. * Parser should write-lock the output Frame, to guard against double-parsing. * This is a simple cooperative distributed locking scheme. Because we need * <em>distributed</em> locking, the normal Java locks do not work. Because * we cannot enforce good behavior, this is a <em>cooperative</em> scheme * only. * * Supports: <ul> * <li>lock-and-delete-old-and-update (for new Keys)</li> * <li>lock-and-delete (for removing old Keys)</li> * <li>unlock</li> * </ul> * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public abstract class Lockable<T extends Lockable<T>> extends Keyed<T> { /** List of Job Keys locking this Key. * <ul> * <li>Write-locker job is in {@code _lockers[0 ]}. Can be null locker.</li> * <li>Read -locker jobs are in {@code _lockers[1+]}.</li> * <li>Unlocked has _lockers equal to null.</li> * <li>Only 1 situation will be true at a time; atomically updated.</li> * <li>Transient, because this data is only valid on the master node.</li> * </ul> */ public transient Key<Job> _lockers[]; /** Create a Lockable object, if it has a {@link Key}. */ public Lockable( Key<T> key ) { super(key); } // ----------- // Atomic create+overwrite of prior key. // If prior key exists, block until acquire a write-lock. // Then call remove, removing all of a prior key. // The replace this object as the new Lockable, still write-locked. // "locker" can be null, meaning the special no-Job locker; for use by expected-fast operations // // Example: write-lock & remove an old Frame, and replace with a new locked Frame // Local-Node Master-Node // (1) new,old -->write_lock(job)--> old // (2) new,old.waiting... new,old+job-locked atomic xtn loop // (3) old.remove onSuccess // (4) new <--update success <-- new+job-locked /** Write-lock {@code this._key} by {@code job_key}. * Throws IAE if the Key is already locked. * @return the old POJO mapped to this Key, generally for deletion. */ public Lockable write_lock() { return write_lock((Key<Job>)null); } public Lockable write_lock( Job job ) { return write_lock(job._key); } public Lockable write_lock( Key<Job> job_key ) { Log.debug("write-lock "+_key+" by job "+job_key); return ((PriorWriteLock)new PriorWriteLock(job_key).invoke(_key))._old; } /** Write-lock {@code this._key} by {@code job_key}, and delete any prior mapping. * Throws IAE if the Key is already locked. * @return self, locked by job_key */ public T delete_and_lock( ) { return delete_and_lock((Key<Job>)null); } public T delete_and_lock( Job job ) { return (T)delete_and_lock(job._key); } public T delete_and_lock( Key<Job> job_key) { return delete_and_lock(job_key, false); // internal delete, by default don't remove dependencies as they're often still needed. } public T delete_and_lock( Key<Job> job_key, boolean cascade) { Lockable old = write_lock(job_key); if( old != null ) { Log.debug("lock-then-clear "+_key+" by job "+job_key); old.remove_impl(new Futures(), cascade).blockForPending(); } return (T)this; } /** Write-lock key and delete; blocking. * Throws IAE if the key is already locked. */ public static void delete( Key key ) { Value val = DKV.get(key); if( val==null ) return; ((Lockable)val.get()).delete(); } /** Write-lock 'this' and delete; blocking. * Throws IAE if the _key is already locked. * * Subclasses that need custom deletion logic should override {@link Keyed#remove_impl(Futures, boolean)} * as by contract, the only difference between {@link #delete()} and {@link #remove()} * is that `delete` first write-locks `this`. */ public final void delete( ) { delete(true); } public final void delete(boolean cascade) { delete(null, new Futures(), cascade).blockForPending(); } /** Write-lock 'this' and delete. * Throws IAE if the _key is already locked. * * Subclasses that need custom deletion logic should override {@link Keyed#remove_impl(Futures, boolean)}. */ public final Futures delete(Key<Job> job_key, Futures fs, boolean cascade) { if( _key != null ) { Log.debug("lock-then-delete "+_key+" by job "+job_key); new PriorWriteLock(job_key).invoke(_key); } return remove(fs, cascade); } // Obtain the write-lock on _key, which may already exist, using the current 'this'. private final class PriorWriteLock extends TAtomic<Lockable> { private final Key<Job> _job_key; // Job doing the locking private Lockable _old; // Return the old thing, for deleting later private PriorWriteLock( Key<Job> job_key ) { _job_key = job_key; } @Override public Lockable atomic(Lockable old) { _old = old; if( old != null ) { // Prior Lockable exists? assert !old.is_wlocked(_job_key) : "Key "+_key+" already locked (or deleted); lks="+Arrays.toString(old._lockers); // No double locking by same job if( old.is_locked(_job_key) ) // read-locked by self? (double-write-lock checked above) old.set_unlocked(old._lockers,_job_key); // Remove read-lock; will atomically upgrade to write-lock if( !old.is_unlocked() ) // Blocking for some other Job to finish??? throw new IllegalArgumentException(old.getClass()+" "+_key+" is already in use. Unable to use it now. Consider using a different destination name."); } // Update & set the new value set_write_lock(_job_key); return Lockable.this; } } // ----------- /** Atomically get a read-lock on Key k, preventing future deletes or updates */ public static void read_lock( Key k, Job job ) { read_lock(k,job._key); } public static void read_lock( Key k, Key<Job> job_key ) { Value val = DKV.get(k); if( val.isLockable() ) ((Lockable)val.get()).read_lock(job_key); // Lockable being locked } /** Atomically get a read-lock on this, preventing future deletes or updates */ public void read_lock( Key<Job> job_key ) { if( _key != null ) { Log.debug("shared-read-lock "+_key+" by job "+job_key); new ReadLock(job_key).invoke(_key); } } // Obtain read-lock static private class ReadLock extends TAtomic<Lockable> { final Key<Job> _job_key; // Job doing the unlocking ReadLock( Key<Job> job_key ) { _job_key = job_key; } @Override public Lockable atomic(Lockable old) { if( old == null ) throw new IllegalArgumentException("Nothing to lock!"); if( old.is_wlocked() ) throw new IllegalArgumentException( old.getClass()+" "+_key+" is being created; Unable to read it now."); old.set_read_lock(_job_key); return old; } } /** Atomically convert an existing write-lock on this to a read-lock, preventing future deletes or updates */ public void write_lock_to_read_lock(Key<Job> job_key) { if( _key != null ) { Log.debug("convert write-lock to read-lock " + _key + " by job " + job_key); new WriteLockToReadLock(job_key).invoke(_key); } } // Convert an existing write-lock to a read-lock static private class WriteLockToReadLock extends TAtomic<Lockable> { final Key<Job> _job_key; // Job doing the unlocking WriteLockToReadLock( Key<Job> job_key ) { _job_key = job_key; } @Override public Lockable atomic(Lockable old) { if (old == null) throw new IllegalArgumentException("Nothing to lock!"); if (!old.is_wlocked()) throw new IllegalArgumentException(old.getClass() + " " + _key + " is not write-locked; Unable to convert it to read-locked."); old.convert_write_to_read_lock(_job_key); return old; } } // ----------- /** Atomically set a new version of self, without changing the locking. Typically used * to upgrade a write-locked Model to a newer version with more training iterations. */ public T update( ) { return update((Key<Job>)null); } public T update( Job job ) { return (T)update(job._key); } public T update( Key<Job> job_key ) { Log.debug("update write-locked "+_key+" by job "+job_key); new Update(job_key).invoke(_key); return (T)this; // Flow-coding } // Freshen 'this' and leave locked private class Update extends TAtomic<Lockable> { final Key<Job> _job_key; // Job doing the unlocking Update( Key<Job> job_key ) { _job_key = job_key; } @Override public Lockable atomic(Lockable old) { assert old != null : "Cannot update - Lockable is null!"; assert old.is_wlocked() : "Cannot update - Lockable is not write-locked!"; _lockers = old._lockers; // Keep lock state return Lockable.this; // Freshen this } } // ----------- /** Atomically set a new version of self and unlock. */ public T unlock( ) { return unlock(null,true); } public T unlock( Job job ) { return (T)unlock(job._key,true); } public T unlock( Key<Job> job_key ) { return unlock(job_key,true); } public T unlock( Key<Job> job_key, boolean exact ) { if( _key != null ) { Log.debug("unlock "+_key+" by job "+job_key); new Unlock(job_key,exact).invoke(_key); } return (T)this; } // Unlock and if write-locked also freshen 'this' private class Unlock extends TAtomic<Lockable<T>> { final Key<Job> _job_key; // Job doing the unlocking // Most uses want exact semantics: assert if not locked when unlocking. // Crash-cleanup code sometimes has a hard time knowing if the crash was // before locking or after, so allow a looser version which quietly unlocks // in all situations. final boolean _exact; // Complain if not locked when unlocking Unlock( Key<Job> job_key, boolean exact ) { _job_key = job_key; _exact = exact;} @Override public Lockable<T> atomic(Lockable<T> old) { assert !_exact || old != null : "Trying to unlock null! (key = " + _key + ")"; assert !_exact || old.is_locked(_job_key) : "Can't unlock: Object " + _key + " is not locked!"; final Lockable<T> l = old == null || old.is_wlocked() ? Lockable.this : old; if (_exact || l.is_locked(_job_key)) { // Update & set the new value l.set_unlocked(old._lockers, _job_key); } return l; } } // ----------- // Accessors for locking state. Minimal self-checking; primitive results private boolean is_locked(Key<Job> job_key) { if( _lockers==null ) return false; for( int i=(_lockers.length==1?0:1); i<_lockers.length; i++ ) { Key k = _lockers[i]; if( job_key==k || (job_key != null && k != null && job_key.equals(k)) ) return true; } return false; } private boolean is_wlocked() { return _lockers!=null && _lockers.length==1; } private boolean is_wlocked(Key<Job> job_key) { return is_wlocked() && (_lockers[0] == job_key || (_lockers[0] != null && _lockers[0].equals(job_key))); } private boolean is_unlocked() { return _lockers== null; } private void set_write_lock( Key<Job> job_key ) { _lockers=new Key[]{job_key}; assert is_locked(job_key); } private void set_read_lock(Key<Job> job_key) { assert !is_wlocked(); // not write locked _lockers = _lockers == null ? new Key[2] : Arrays.copyOf(_lockers,_lockers.length+1); _lockers[_lockers.length-1] = job_key; assert is_locked(job_key); } private void convert_write_to_read_lock(Key<Job> job_key) { assert is_wlocked(); _lockers = new Key[]{null, job_key}; assert is_locked(job_key); } private void set_unlocked(Key lks[], Key<Job> job_key) { if( lks.length==1 ) { // Is write-locked? assert job_key==lks[0] || job_key.equals(lks[0]); _lockers = null; // Then unlocked } else if( lks.length==2 ) { // One reader assert lks[0]==null; // Not write-locked assert lks[1]==job_key || (job_key != null && job_key.equals(lks[1])); _lockers = null; // So unlocked } else { // Else one of many readers assert lks.length>2; _lockers = Arrays.copyOf(lks,lks.length-1); for( int i=1; i<lks.length; i++ ) if( (job_key != null && job_key.equals(lks[i])) || (job_key == null && lks[i] == null) ) { if( i < _lockers.length ) _lockers[i] = lks[lks.length-1]; break; } } } /** Force-unlock (break a lock) all lockers; useful in some debug situations. */ public void unlock_all() { if( _key != null ) for (Key k : _lockers) new UnlockSafe(k).invoke(_key); } private class UnlockSafe extends TAtomic<Lockable> { final Key<Job> _job_key; // potential job doing the unlocking UnlockSafe( Key job_key ) { _job_key = job_key; } @Override public Lockable atomic(Lockable old) { if (old.is_locked(_job_key)) set_unlocked(old._lockers,_job_key); return Lockable.this; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/MRTask.java
package water; import jsr166y.CountedCompleter; import jsr166y.ForkJoinPool; import water.fvec.*; import water.util.DistributedException; import water.util.FrameUtils; import water.util.PrettyPrint; import water.fvec.Vec.VectorGroup; import water.fvec.Vec; import java.util.Arrays; /** * Map/Reduce style distributed computation. * <p> * MRTask provides several <code>map</code> and <code>reduce</code> methods * that can be overridden to specify a computation. Several instances of this * class will be created to distribute the computation over F/J threads and * machines. Non-transient fields are copied and serialized to instances * created for map invocations. Reduce methods can store their results in * fields. Results are serialized and reduced all the way back to the invoking * node. When the last reduce method has been called, fields of the initial * MRTask instance contains the computation results.</p> * <p> * Apart from small reduced POJO returned to the calling node, MRTask can * produce output vector(s) as a result. These will have chunks co-located * with the input dataset, however, their number of lines will generally differ * so they won't be strictly compatible with the original. To produce output * vectors, call doAll.dfork version with required number of outputs and * override appropriate <code>map</code> call taking required number of * NewChunks. MRTask will automatically close the new Appendable vecs and a * call to <code>outputFrame</code> will make a frame with newly created Vecs. * </p> * * <p><b>Overview</b></p> * <p> * Distributed computation starts by calling <code>doAll</code>, * <code>dfork</code>, or <code>dfork</code>. <code>doAll</code> simply * calls <code>dfork</code> and <code>dfork</code> before blocking; * <code>dfork</code> and <code>dfork</code> are non-blocking. The main * pardigm is divide-conquer-combine using ForkJoin. </p> * <p> * If <code>doAll</code> is called with Keys, then one <code>map</code> call is * made per Key, on the Key's home node. If MRTask is invoked on a Frame (or * equivalently a Vec[]), then one <code>map</code> call is made per Chunk for * all Vecs at once, on the Chunk's home node. In both modes, * <code>reduce</code> is called between each pair of calls to * <code>map</code>. </p> * <p> * MRTask can also be called with <code>doAllNodes</code>, in which case only * the setupLocal call is made once per node; neither map nor reduce are * called.</p> * <p> * Computation is tailored primarily by overriding. The main method is the * <code>map</code> call, coupled sometimes with a <code>reduce</code> call. * <code>setupLocal</code> is called once per node before any map calls are * made on that node (but perhaps other nodes have already started); in reverse * <code>closeLocal</code> is called after the last map call completes on a * node (but perhaps other nodes are still computing maps). * <code>postGlobal</code> is called once only after all maps, reduces and * closeLocals, and only on the home node.</p> */ public abstract class MRTask<T extends MRTask<T>> extends DTask<T> implements ForkJoinPool.ManagedBlocker { /* * Technical note to developers: * * There are several internal flags and counters used throughout. They are gathered in * this note to help you reason about the execution of an MRTask. * * internal "top-level" fields * --------------------------- * - RPC<T> _nleft, _nrite: "child" node/JVMs that are doing work * - boolean _topLocal : "root" MRTask on a local machine * - boolean _topGlobal : "root" MRTask on the "root" node * - T _left, _rite : "child" MRTasks on a local machine * - T _res : "result" MRTask (everything reduced into here) * - int _nlo,_nhi : range of nodes to do remote work on (divide-conquer; see Diagram 2) * - Futures _fs : _topLocal task blocks on _fs for _left and _rite to complete * * Diagram 1: N is for Node; T is for Task * ------------------------------------- * 3 node cloud Inside one of the 'N' nodes: * N1 T _topLocal** * / \ / \ * N2 (_nleft) N3 (_nrite) T (_left) T (_rite) * * **: T is also _topGlobal if N==N1 * * These fields get set in the <code>SetupLocal0<code> call. Let's see what it does: * * Diagram 2: * ---------- * dfork on N1 * - _topGlobal=true * - _nlo=0 * - _nhi=CLOUD_SIZE * || * || * || * ==> setupLocal0 on N1 * - topLocal=true * - _fs = new Futures() * - nmid = (_nlo + _nhi) >> 1 => split the range of nodes (divide-conquer) * - _nleft = remote_compute(_nlo,nmid) => chooses a node in range and does new RPC().call() * - _nrite = remote_compute(nmid,_nhi) serializing MRTask and call dinvoke on remote. * / \ * / \ * / \ * dinvoke on N2 dinvoke on N3 * setupLocal0 on N2 setupLocal0 on N3 * - topLocal=true - topLocal=true * - _fs = new Futures() - _fs = new Futures() * - (continue splitting) - (continue splitting) * H2O.submitTask(this) => compute2 H2O.submitTask(this) => compute2 * */ public MRTask() { super(); } protected MRTask(H2O.H2OCountedCompleter cmp) {super(cmp); } protected MRTask(byte prior) { super(prior); } /** * This Frame instance is the handle for computation over a set of Vec instances. Recall * that a Frame is a collection Vec instances, so this includes any invocation of * <code>doAll</code> with Frame and Vec[] instances. Top-level calls to * <code>doAll</code> wrap Vec instances into a new Frame instance and set this into * <code>_fr</code> during a call to <code>dfork</code>. */ public Frame _fr; /** This <code>Key[]</code> instance is the handle used for computation when * an MRTask is invoked over an array of <code>Key</code>instances. */ public Key[] _keys; /** The number and type of output Vec instances produced by an MRTask. If * null then there are no outputs, _appendables will be null, and calls to * <code>outputFrame</code> will return null. */ private byte _output_types[]; /** First reserved VectorGroup key index for all output Vecs */ private int _vid; /** New Output vectors; may be null. * @return the set of AppendableVec instances or null if _output_types is null */ public AppendableVec[] appendables() { return _appendables; } /** Appendables are treated separately (roll-ups computed in map/reduce * style, can not be passed via K/V store).*/ protected AppendableVec[] _appendables; /** Internal field to track the left &amp; right remote nodes/JVMs to work on */ transient protected RPC<T> _nleft, _nrite; /** Internal field to track if this is a top-level local call */ transient protected boolean _topLocal; // Top-level local call, returning results over the wire /** Internal field to track if this is a top-level call. */ transient boolean _topGlobal = false; /** Internal field to track the left &amp; right sub-range of chunks to work on */ transient protected T _left, _rite; // In-progress execution tree /** Internal field upon which all reduces occur. */ transient private T _res; // Result /** The range of Nodes to work on remotely */ protected short _nlo, _nhi; /** Internal field to track a range of local Chunks to work on */ transient protected int _lo, _hi; /** We can add more things to block on - in case we want a bunch of lazy * tasks produced by children to all end before this top-level task ends. * Semantically, these will all complete before we return from the top-level * task. Pragmatically, we block on a finer grained basis. */ transient protected Futures _fs; // More things to block on /** If true, run entirely local - which will pull all the data locally. */ protected boolean _run_local; private PostMapAction<?> _postMap; public final MRTask<T> withPostMapAction(PostMapAction<?> postMap) { _postMap = postMap; return this; } public String profString() { return _profile != null ? _profile.toString() : "Profiling turned off"; } MRProfile _profile; /** Used to invoke profiling. Call as: <code>new MRTask().profile().doAll();*/ public T profile() { _profile = new MRProfile(this); return (T)this; } /** Get the resulting Frame from this invoked MRTask. <b>This Frame is not * in the DKV.</b> AppendableVec instances are closed into Vec instances, * which then appear in the DKV. * * @return null if no outputs, otherwise returns the resulting Frame from * the MRTask. The Frame has no column names nor domains. */ public Frame outputFrame() { return outputFrame(null,null,null); } /** Get the resulting Frame from this invoked MRTask. <b>This Frame is not in * the DKV.</b> AppendableVec instances are closed into Vec instances, which * then appear in the DKV. * * @param names The names of the columns in the resulting Frame. * @param domains The domains of the columns in the resulting Frame. * @return The result Frame, or null if no outputs */ public Frame outputFrame(String [] names, String [][] domains){ return outputFrame(null,names,domains); } /** * Get the resulting Frame from this invoked MRTask. If the passed in <code>key</code> * is not null, then the resulting Frame will appear in the DKV. AppendableVec instances * are closed into Vec instances, which then appear in the DKV. * * @param key If null, then the Frame will not appear in the DKV. Otherwise, this result * will appear in the DKV under this key. * @param names The names of the columns in the resulting Frame. * @param domains The domains of the columns in the resulting Frame. * @return null if _noutputs is 0, otherwise returns a Frame. */ public Frame outputFrame(Key<Frame> key, String [] names, String [][] domains){ Frame res = closeFrame(key, names, domains); if (key != null) DKV.put(res); return res; } // the work-horse for the outputFrame calls private Frame closeFrame(Key key, String[] names, String[][] domains) { if( _output_types == null ) return null; final int noutputs = _output_types.length; Vec[] vecs = new Vec[noutputs]; if( _appendables==null || _appendables.length == 0) // Zero rows? for( int i = 0; i < noutputs; i++ ) vecs[i] = _fr.anyVec().makeZero(); else { Futures fs = new Futures(); int rowLayout = _appendables[0].compute_rowLayout(); for( int i = 0; i < noutputs; i++ ) { _appendables[i].setDomain(domains==null ? null : domains[i]); vecs[i] = _appendables[i].close(rowLayout,fs); } fs.blockForPending(); // Vecs need to be installed in DKV _before_ we create a Frame } return new Frame(key,names,vecs); } /** Override with your map implementation. This overload is given a single * <strong>local</strong> input Chunk. It is meant for map/reduce jobs that use a * single column in a input Frame. All map variants are called, but only one is * expected to be overridden. */ public void map( Chunk c ) { } public void map( Chunk c, NewChunk nc ) { } /** Override with your map implementation. This overload is given two * <strong>local</strong> Chunks. All map variants are called, but only one * is expected to be overridden. */ public void map( Chunk c0, Chunk c1 ) { } public void map( Chunk c0, Chunk c1, NewChunk nc) { } public void map( Chunk c0, NewChunk nc0, NewChunk nc1) { } //public void map( Chunk c0, Chunk c1, NewChunk nc1, NewChunk nc2 ) { } /** Override with your map implementation. This overload is given three * <strong>local</strong> input Chunks. All map variants are called, but only one * is expected to be overridden. */ public void map( Chunk c0, Chunk c1, Chunk c2 ) { } //public void map( Chunk c0, Chunk c1, Chunk c2, NewChunk nc ) { } //public void map( Chunk c0, Chunk c1, Chunk c2, NewChunk nc1, NewChunk nc2 ) { } /** Override with your map implementation. This overload is given four * <strong>local</strong> input Chunks. All map variants are called, but only one * is expected to be overridden. */ public void map( Chunk c0, Chunk c1, Chunk c2, Chunk c3) { } /** Override with your map implementation. This overload is given an array * of <strong>local</strong> input Chunks, for Frames with arbitrary column * numbers. All map variants are called, but only one is expected to be * overridden. */ public void map( Chunk cs[] ) { } /** The handy method to generate a new vector based on existing vectors. * * Note: This method is used by Sparkling Water examples. * * @param cs input vectors * @param nc output vector */ public void map( Chunk cs[], NewChunk nc ) { } public void map( Chunk cs[], NewChunk nc1, NewChunk nc2 ) { } public void map( Chunk cs[], NewChunk [] ncs ) { } /** Override with your map implementation. Used when doAll is called with * an array of Keys, and called once-per-Key on the Key's Home node */ public void map( Key key ) { } /** Override to combine results from 'mrt' into 'this' MRTask. Both 'this' * and 'mrt' are guaranteed to either have map() run on them, or be the * results of a prior reduce(). Reduce is optional if, e.g., the result is * some output vector. */ public void reduce( T mrt ) { } /** Override to do any remote initialization on the 1st remote instance of * this object, for initializing node-local shared data structures. */ protected void setupLocal() {} /** Override to do any remote cleaning on the last remote instance of * this object, for disposing of node-local shared data structures. */ protected void closeLocal() { } /** Compute a permissible node index on which to launch remote work. */ private int addShift( int x ) { x += _nlo; int sz = H2O.CLOUD.size(); return x < sz ? x : x-sz; } private int subShift( int x ) { x -= _nlo; int sz = H2O.CLOUD.size(); return x < 0 ? x+sz : x; } private short selfidx() { int idx = H2O.SELF.index(); if( idx>= 0 ) return (short)idx; assert H2O.SELF.isClient(); return 0; } // Profiling support. Time for each subpart of a single M/R task, plus any // nested MRTasks. All numbers are CTM stamps or millisecond times. private static class MRProfile extends Iced { String _clz; public MRProfile(MRTask mrt) { _clz = mrt.getClass().toString(); _localdone = System.currentTimeMillis(); } // See where these are set to understand their meaning. If we split the // job, then _lstart & _rstart are the start of left & right jobs. If we // do NOT split, then _rstart is 0 and _lstart is for the user map job(s). long _localstart, _rpcLstart, _rpcRstart, _rpcRdone, _localdone; // Local setup, RPC network i/o times long _mapstart, _userstart, _closestart, _mapdone; // MAP phase long _onCstart, _reducedone, _closeLocalDone, _remoteBlkDone, _localBlkDone, _onCdone; // REDUCE phase // If we split the job left/right, then we get a total recording of the // last job, and the exec time & completion time of 1st job done. long _time1st, _done1st; int _size_rez0, _size_rez1; // i/o size in bytes during reduce MRProfile _last; long sumTime() { return _onCdone - (_localstart==0 ? _mapstart : _localstart); } void gather( MRProfile p, int size_rez ) { p._clz=null; if( _last == null ) { _last=p; _time1st = p.sumTime(); _done1st = p._onCdone; } else { MRProfile first = _last._onCdone <= p._onCdone ? _last : p; _last = _last._onCdone > p._onCdone ? _last : p; if( first._onCdone > _done1st ) { _time1st = first.sumTime(); _done1st = first._onCdone; } } if( size_rez !=0 ) // Record i/o result size if( _size_rez0 == 0 ) _size_rez0=size_rez; else _size_rez1=size_rez; assert _userstart !=0 || _last != null; assert _last._onCdone >= _done1st; } @Override public String toString() { return print(new StringBuilder(),0).toString(); } private StringBuilder print(StringBuilder sb, int d) { if( d==0 ) sb.append(_clz).append("\n"); for( int i=0; i<d; i++ ) sb.append(" "); if( _localstart != 0 ) sb.append("Node local ").append(_localdone - _localstart).append("ms, "); if( _last != null ) { // Forked job? sb.append("Slow wait ").append(_mapstart-_localdone).append("ms + work ").append(_last.sumTime()).append("ms, "); sb.append("Fast work ").append(_time1st).append("ms + wait ").append(_onCstart-_done1st).append("ms\n"); _last.print(sb,d+1); // Nested slow-path print for( int i=0; i<d; i++ ) sb.append(" "); sb.append("join-i/o ").append(_onCstart-_last._onCdone).append("ms, "); } if( _userstart != 0 ) { // Leaf map call? sb.append("Map ").append(_mapdone - _mapstart).append("ms (prep ").append(_userstart - _mapstart); sb.append("ms, user ").append(_closestart-_userstart); sb.append("ms, closeChk ").append(_mapdone-_closestart).append("ms), "); } sb.append("Red ").append(_onCdone - _onCstart); sb.append("ms (locRed ").append(_reducedone-_onCstart).append("ms"); if( _remoteBlkDone!=0 ) { sb.append( ", close " ).append(_closeLocalDone- _reducedone); sb.append("ms, remBlk ").append( _remoteBlkDone-_closeLocalDone); sb.append("ms, locBlk ").append( _localBlkDone- _remoteBlkDone); sb.append("ms, close " ).append( _onCdone- _localBlkDone); sb.append("ms, size " ).append(PrettyPrint.bytes(_size_rez0)).append("+").append(PrettyPrint.bytes(_size_rez1)); } sb.append(")\n"); return sb; } } // Support for fluid-programming with strong types protected T self() { return (T)this; } /** * Invokes the map/reduce computation over the given {@link Vec}s. This call is * blocking. * * @param vecs Perform the computation over this vectors. They can be possibly mutated as side-effect. * * @return this */ public final T doAll( Vec... vecs) { return doAll(null,vecs); } /** * Invokes the map/reduce computation over the given {@link Vec}s. This call is * blocking. * * @param outputTypes The type of output Vec instances to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param vecs Perform the computation over this vectors. They can be possibly mutated as side-effect. * * @return this */ public final T doAll(byte[] outputTypes, Vec... vecs) { return doAll(outputTypes, new Frame(vecs), false); } /** * Invokes the map/reduce computation over the given {@link Vec}s. This call is * blocking. * * @param outputType The type of output Vec instance to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param vecs Perform the computation over this vectors. They can be possibly mutated as side-effect. * * @return this */ public final T doAll(byte outputType, Vec... vecs) { return doAll(new byte[]{outputType}, new Frame(vecs), false); } /** * Invokes the map/reduce computation over the given {@link Vec}. This call is blocking. * * @param vec Perform the computation over this vector. It can be possibly mutated as side-effect. * * @param runLocal Run locally by copying data, or run across the cluster? * * @return this */ public final T doAll(Vec vec, boolean runLocal) { return doAll(null, vec, runLocal); } /** * Invokes the map/reduce computation over the given {@link Vec}. This call is blocking. * * @param outputTypes The type of output Vec instances to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param vec Perform the computation over this vector. It can be possibly mutated as side-effect. * * @param runLocal Run locally by copying data, or run across the cluster? * * @return this */ public final T doAll(byte[] outputTypes, Vec vec, boolean runLocal) { return doAll(outputTypes, new Frame(vec), runLocal); } /** * Invokes the map/reduce computation over the given Frame. This call is * blocking. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @param runLocal Run locally by copying data, or run across the cluster? * * @return this */ public final T doAll( Frame fr, boolean runLocal) { return doAll(null, fr, runLocal); } /** * Invokes the map/reduce computation over the given Frame. This call is * blocking. The run is performed across the cluster. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @return this */ public final T doAll(Frame fr ) { return doAll(null, fr, false); } /** * Invokes the map/reduce computation over the given Frame. This call is * blocking. The run is performed across the cluster. * * @param outputTypes The type of output Vec instances to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @return this */ public final T doAll(byte[] outputTypes, Frame fr) { return doAll(outputTypes, fr, false); } /** * Invokes the map/reduce computation over the given Frame. This call is * blocking. The run is performed across the cluster. * * @param outputType The type of one output Vec instance to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @return this */ public final T doAll(byte outputType, Frame fr) { return doAll(new byte[]{outputType}, fr, false); } /** * Invokes the map/reduce computation over the given Frame. This call is * blocking. * * @param outputTypes The type of output Vec instances to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @param runLocal Run locally by copying data, or run across the cluster? * * @return this */ public final T doAll(byte[] outputTypes, Frame fr, boolean runLocal) { dfork(outputTypes, fr, runLocal); return getResult(); } /** * Invokes the map/reduce computation over the given Frame. This call is * blocking. * * @param numberOfOutputs Number of output vectors for the computation. All of them will have the same type. * * @param outputType The type of all the output Vec instances to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @return this */ public final T doAll(int numberOfOutputs, byte outputType, Frame fr) { byte[] types = new byte[numberOfOutputs]; Arrays.fill(types, outputType); return doAll(types, fr, false); } // Special mode doing 1 map per key. No frame public T doAll( Key... keys ) { dfork(keys); return getResult(); // Block For All } // Special mode doing 1 map per key. No frame public void dfork(Key... keys ) { _topGlobal = true; _keys = keys; _nlo = selfidx(); _nhi = (short)H2O.CLOUD.size(); // Do Whole Cloud setupLocal0(); // Local setup H2O.submitTask(this); // Begin normal execution on a FJ thread } // Special mode to run once-per-node public T doAllNodes() { return doAll((Key[])null); } public void asyncExecOnAllNodes() { dfork((Key[]) null); } /** * Invokes the map/reduce computation over the given Vec instances and produces * <code>outputs</code> Vec instances. This call is asynchronous. It returns 'this', on * which <code>getResult</code> may be invoked by the caller to block for pending * computation to complete. * * @param outputTypes The type of output Vec instances to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param vecs The set of Vec instances upon which computation is performed. * * @return this */ public final T dfork( byte[] outputTypes, Vec... vecs) { return dfork(outputTypes, new Frame(vecs), false); } public final T dfork(Vec... vecs){ return dfork(null,new Frame(vecs),false); } /** * Invokes the map/reduce computation over the given Frame instance. This call is * asynchronous. It returns 'this', on which <code>getResult</code> may be invoked * by the caller to block for pending computation to complete. This call produces no * output Vec instances or Frame instances. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @return this */ public final T dfork(Frame fr) { return dfork(null, fr, false); } /** Fork the task in strictly non-blocking fashion. * Same functionality as dfork, but does not raise priority, so user is should * *never* block on it. * Because it does not raise priority, these can be tail-call chained together * for any length. * * @param outputTypes The type of output Vec instances to create. See {@link Vec#T_STR}, {@link Vec#T_NUM}, * {@link Vec#T_CAT} and other byte constants in {@link Vec} to see possible values. * * @param fr Perform the computation on this Frame instance. This frame can be possibly mutated as side-effect. * * @param runLocal Run locally by copying data, or run across the cluster? * * @return this */ public final T dfork(byte[] outputTypes, Frame fr, boolean runLocal) { _topGlobal = true; _output_types = outputTypes; if( outputTypes != null && outputTypes.length > 0 ) _vid = fr.anyVec().group().reserveKeys(outputTypes.length); _fr = fr; // Record vectors to work on _nlo = selfidx(); _nhi = (short)H2O.CLOUD.size(); // Do Whole Cloud _run_local = runLocal; // Run locally by copying data, or run globally? assert checkRunLocal() : "MRTask is expected to be running in a local-mode but _run_local = false"; setupLocal0(); // Local setup H2O.submitTask(this); // Begin normal execution on a FJ thread return self(); } private boolean checkRunLocal() { if (!Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.checkRunLocal")) return true; if ("water.fvec.RollupStats$Roll".equals(getClass().getName())) return true; return _run_local; } /** * Block for and get any final results from a dfork'd MRTask. * Note: the desired name 'get' is final in ForkJoinTask. */ public final T getResult(boolean fjManagedBlock) { assert getCompleter()==null; // No completer allowed here; FJ never awakens threads with completers do { try { if(fjManagedBlock) ForkJoinPool.managedBlock(this); else // For the cases when we really want to block this thread without FJ framework scheduling a new worker thread. // Model use is in MultifileParseTask - we want to be parsing at most cluster ncores files in parallel. block(); join(); // Throw any exception the map call threw } catch (InterruptedException ignore) { // do nothing } catch (Throwable re) { onExceptionalCompletion(re,null); // block for left and rite throw (re instanceof DistributedException)?new DistributedException(re.getMessage(),re.getCause()):new DistributedException(re); } } while( !isReleasable()); assert _topGlobal:"lost top global flag"; return self(); } /** * Block for and get any final results from a dfork'd MRTask. * Note: the desired name 'get' is final in ForkJoinTask. */ public final T getResult() {return getResult(true);} // Return true if blocking is unnecessary, which is true if the Task isDone. public boolean isReleasable() { return isDone(); } // Possibly blocks the current thread. Returns true if isReleasable would // return true. Used by the FJ Pool management to spawn threads to prevent // deadlock is otherwise all threads would block on waits. public boolean block() throws InterruptedException { while( !isDone() ) join(); return true; } /** Called once on remote at top level, probably with a subset of the cloud. * Called internal by D/F/J. Not expected to be user-called. */ @Override public final void dinvoke(H2ONode sender) { setupLocal0(); // Local setup H2O.submitTask(this); } protected boolean modifiesVolatileVecs(){return true;} /* * Set top-level fields and fire off remote work (if there is any to do) to 2 selected * child JVM/nodes. Setup for local work: fire off any global work to cloud neighbors; do all * chunks; call user's init. */ private void setupLocal0() { if(_profile != null) (_profile = new MRProfile(this))._localstart = System.currentTimeMillis(); // Make a blockable Futures for both internal and user work to block on. _fs = new Futures(); if(modifiesVolatileVecs() && _fr != null){ for(Vec v:_fr.vecs()) if(v.isVolatile())v.preWriting(); } _topLocal = true; // Check for global vs local work int selfidx = selfidx(); int nlo = subShift(selfidx); assert nlo < _nhi; final int nmid = (nlo+_nhi)>>>1; // Mid-point // Run remote IF: // - Not forced to run local (no remote jobs allowed) AND // - - There's remote work, or Client mode (always remote work) if( (!_run_local) && ((nlo+1 < _nhi) || H2O.ARGS.client) ) { if(_profile!=null) _profile._rpcLstart = System.currentTimeMillis(); _nleft = remote_compute(H2O.ARGS.client ? nlo : nlo+1,nmid); if(_profile!=null) _profile._rpcRstart = System.currentTimeMillis(); _nrite = remote_compute( nmid,_nhi); if(_profile!=null) _profile._rpcRdone = System.currentTimeMillis(); } else { if(_profile!=null) _profile._rpcLstart = _profile._rpcRstart = _profile._rpcRdone = System.currentTimeMillis(); } if( _fr != null ) { // Doing a Frame _lo = 0; _hi = _fr.numCols()==0 ? 0 : _fr.anyVec().nChunks(); // Do All Chunks // get the Vecs from the K/V store, to avoid racing fetches from the map calls _fr.vecs(); } else if( _keys != null ) { // Else doing a set of Keys _lo = 0; _hi = _keys.length; // Do All Keys } // Setup any user's shared local structures for both normal cluster nodes // and any client; want this for possible reduction ONTO client setupLocal(); if(_profile!=null) _profile._localdone = System.currentTimeMillis(); } // Make an RPC call to some node in the middle of the given range. Add a // pending completion to self, so that we complete when the RPC completes. private RPC<T> remote_compute( int nlo, int nhi ) { if( nlo < nhi ) { // have remote work int node = addShift(nlo); assert node != H2O.SELF.index(); // Not the same as selfidx() if this is a client T mrt = copyAndInit(); mrt._nhi = (short) nhi; addToPendingCount(1); // Not complete until the RPC returns // Set self up as needing completion by this RPC: when the ACK comes back // we'll get a wakeup. // Note the subtle inter-play of onCompletion madness here: // - when run on the remote, the RPCCall (NOT RPC!) is completed by the // last map/compute2 call, signals end of the remote work, and ACK's // back the result. i.e., last-map calls RPCCall.onCompletion. // - when launched on the local (right here, in this next line of code) // the completed RPC calls our self completion. i.e. the completed RPC // calls MRTask.onCompletion return new RPC<>(H2O.CLOUD._memary[node], mrt).addCompleter(this).call(); } return null; // nlo >= nhi => no remote work } /** Called from FJ threads to do local work. The first called Task (which is * also the last one to Complete) also reduces any global work. Called * internal by F/J. Not expected to be user-called. */ @Override public final void compute2() { assert _left == null && _rite == null && _res == null; if(_profile!=null) _profile._mapstart = System.currentTimeMillis(); if( (_hi-_lo) >= 2 ) { // Multi-chunk case: just divide-and-conquer to 1 chunk final int mid = (_lo+_hi)>>>1; // Mid-point _left = copyAndInit(); _rite = copyAndInit(); _left._hi = mid; // Reset mid-point _rite._lo = mid; // Also set self mid-point addToPendingCount(1); // One fork awaiting completion if( !isCompletedAbnormally() ) _left.fork(); // Runs in another thread/FJ instance if( !isCompletedAbnormally() ) _rite.compute2(); // Runs in THIS F/J thread if(_profile!=null) _profile._mapdone = System.currentTimeMillis(); return; // Not complete until the fork completes } // Zero or 1 chunks, and further chunk might not be homed here if( _fr==null ) { // No Frame, so doing Keys? if( _keys == null || // Once-per-node mode _hi > _lo && _keys[_lo].home() ) { assert(_keys == null || !H2O.ARGS.client) : "Client node should not process any keys in MRTask!"; if(_profile!=null) _profile._userstart = System.currentTimeMillis(); if( _keys != null ) map(_keys[_lo]); _res = self(); // Save results since called map() at least once! if (_postMap != null) _postMap.call(_keys[_lo]); if(_profile!=null) _profile._closestart = System.currentTimeMillis(); } } else if( _hi > _lo ) { // Frame, Single chunk? Vec v0 = _fr.anyVec(); if( _run_local || v0.chunkKey(_lo).home() ) { // And chunk is homed here? assert(_run_local || !H2O.ARGS.client) : "Client node should not process any keys in MRTask!"; // Make decompression chunk headers for these chunks NewChunk[] appendableChunks = null; Chunk[] bvs = FrameUtils.extractChunks(_fr, _lo, _run_local); if(_output_types != null) { final VectorGroup vg = v0.group(); _appendables = new AppendableVec[_output_types.length]; appendableChunks = new NewChunk[_output_types.length]; for(int i = 0; i < _appendables.length; ++i) { _appendables[i] = new AppendableVec(vg.vecKey(_vid+i),_output_types[i]); appendableChunks[i] = _appendables[i].chunkForChunkIdx(_lo); } } // Call all the various map() calls that apply if(_profile!=null) _profile._userstart = System.currentTimeMillis(); int num_fr_vecs = _fr.vecs().length; int num_outputs = _output_types == null? 0 : _output_types.length; if (num_outputs == 0) { if (num_fr_vecs == 1) map(bvs[0]); else if (num_fr_vecs == 2) map(bvs[0], bvs[1]); else if (num_fr_vecs == 3) map(bvs[0], bvs[1], bvs[2]); map(bvs); } else if (num_outputs == 1) { // convenience versions for cases with single output. assert appendableChunks != null; if (num_fr_vecs == 1) map(bvs[0], appendableChunks[0]); else if (num_fr_vecs == 2) map(bvs[0], bvs[1], appendableChunks[0]); // else if (fr_vecs_length == 3) map(bvs[0], bvs[1], bvs[2], appendableChunks[0]); map(bvs, appendableChunks[0]); } else if (num_outputs == 2) { // convenience versions for cases with 2 outputs (e.g split). assert appendableChunks != null; if (num_fr_vecs == 1) map(bvs[0], appendableChunks[0], appendableChunks[1]); // else if (fr_vecs_length == 2) map(bvs[0], bvs[1], appendableChunks[0], appendableChunks[1]); // else if (fr_vecs_length == 3) map(bvs[0], bvs[1], bvs[2], appendableChunks[0], appendableChunks[1]); map(bvs, appendableChunks[0], appendableChunks[1]); } if (num_outputs >= 0) map(bvs, appendableChunks); _res = self(); // Save results since called map() at least once! // Further D/K/V put any new vec results. if(_profile!=null) _profile._closestart = System.currentTimeMillis(); if (_postMap != null) _postMap.call(bvs); for( Chunk bv : bvs ) bv.close(_lo,_fs); if( _output_types != null) for(NewChunk nch:appendableChunks)nch.close(_lo, _fs); } } if(_profile!=null) _profile._mapdone = System.currentTimeMillis(); tryComplete(); } /** OnCompletion - reduce the left and right into self. Called internal by * F/J. Not expected to be user-called. */ @Override public final void onCompletion( CountedCompleter caller ) { if(_profile!=null) _profile._onCstart = System.currentTimeMillis(); // Reduce results into 'this' so they collapse going up the execution tree. // NULL out child-references so we don't accidentally keep large subtrees // alive since each one may be holding large partial results. reduce2(_left); _left = null; reduce2(_rite); _rite = null; if(_profile!=null) _profile._reducedone = System.currentTimeMillis(); // Only on the top local call, have more completion work if( _topLocal ) postLocal0(); if(_profile!=null) _profile._onCdone = System.currentTimeMillis(); } // Call 'reduce' on pairs of mapped MRTask's. // Collect all pending Futures from both parties as well. private void reduce2( MRTask<T> mrt ) { if( mrt == null ) return; if(_profile!=null) _profile.gather(mrt._profile,0); if( _res == null ) _res = mrt._res; else if( mrt._res != null ) _res.reduce4(mrt._res); // Futures are shared on local node and transient (so no remote updates) assert _fs == mrt._fs; } protected void postGlobal(){} // Work done after all the main local work is done. // Gather/reduce remote work. // User cleanup. // Block for other queued pending tasks. // Copy any final results into 'this', such that a return of 'this' has the results. private void postLocal0() { closeLocal(); // User's node-local cleanup if(_profile!=null) _profile._closeLocalDone = System.currentTimeMillis(); reduce3(_nleft); // Reduce global results from neighbors. reduce3(_nrite); if(_profile!=null) _profile._remoteBlkDone = System.currentTimeMillis(); _fs.blockForPending(); // Block any pending user tasks if(_profile!=null) _profile._localBlkDone = System.currentTimeMillis(); // Finally, must return all results in 'this' because that is the API - // what the user expects if( _res == null ) _nhi=-1; // Flag for no local results *at all* else if( _res != this ) { // There is a local result, and its not self _res._profile = _profile; // Use my profile (not child's) copyOver(_res); // So copy into self } if( _topGlobal ) { if (_fr != null) // Do any post-writing work (zap rollup fields, etc) _fr.postWrite(_fs).blockForPending(); postGlobal(); // User's continuation work } } // Block for RPCs to complete, then reduce global results into self results private void reduce3( RPC<T> rpc ) { if( rpc == null ) return; T mrt = rpc.get(); // This is a blocking remote call // Note: because _fs is transient it is not set or cleared by the RPC. // Because the MRT object is a clone of 'self' it's likely to contain a ptr // to the self _fs which will be not-null and still have local pending // blocks. Not much can be asserted there. if(_profile!=null) _profile.gather(mrt._profile, rpc.size_rez()); // Unlike reduce2, results are in mrt directly not mrt._res. if( mrt._nhi != -1L ) { // Any results at all? if( _res == null ) _res = mrt; else _res.reduce4(mrt); } } /** Call user's reduction. Also reduce any new AppendableVecs. Called * internal by F/J. Not expected to be user-called. */ void reduce4( T mrt ) { // Reduce any AppendableVecs if( _output_types != null ) for( int i=0; i<_appendables.length; i++ ) _appendables[i].reduce(mrt._appendables[i]); if( _ex == null ) _ex = mrt._ex; // User's reduction reduce(mrt); } // Full local work-tree cancellation void self_cancel2() { if( !isDone() ) { cancel(true); self_cancel1(); } } private void self_cancel1() { T l = _left; if( l != null ) { l.self_cancel2(); } T r = _rite; if( r != null ) { r.self_cancel2(); } } /** Cancel/kill all work as we can, then rethrow... do not invisibly swallow * exceptions (which is the F/J default). Called internal by F/J. Not * expected to be user-called. */ @Override public final boolean onExceptionalCompletion( Throwable ex, CountedCompleter caller ) { self_cancel1(); // Block for completion - we don't want the work, but we want all the // workers stopped before we complete this task. Otherwise this task quits // early and begins post-task processing (generally cleanup from the // exception) but the work is still on-going - often trying to use the same // Keys as are being cleaned-up! // Since blocking can throw (generally the same exception, again and again) // catch & ignore, keeping only the first one we already got. RPC<T> nl = _nleft; if( nl != null ) try { nl.get(); } catch( Throwable ignore ) { } _nleft = null; RPC<T> nr = _nrite; if( nr != null ) try { nr.get(); } catch( Throwable ignore ) { } _nrite = null; return true; } // Make copy, setting final-field completer and clearing out a bunch of fields private T copyAndInit() { T x = clone(); x._topGlobal = false; x.setCompleter(this); // Set completer, what used to be a final field x._topLocal = false; // Not a top job x._nleft = x._nrite = null; x. _left = x. _rite = null; x._fs = _fs; if( _profile!=null ) x._profile = new MRProfile(this); else x._profile = null; // Clone needs its own profile x.setPendingCount(0); // Volatile write for completer field; reset pending count also return x; } public static abstract class PostMapAction<T extends PostMapAction<T>> extends Iced<T> { void call(Key mapInput) { // do nothing by default } void call(Chunk[] mapInput) { // do nothing by default } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/MemoryManager.java
package water; import jsr166y.ForkJoinPool; import jsr166y.ForkJoinPool.ManagedBlocker; import water.util.Log; import water.util.PrettyPrint; import javax.management.Notification; import javax.management.NotificationEmitter; import java.lang.management.*; import java.util.Arrays; import java.util.concurrent.atomic.AtomicLong; /** * Manages memory assigned to key/value pairs. All byte arrays used in * keys/values should be allocated through this class - otherwise we risking * running out of java memory, and throw unexpected OutOfMemory errors. The * theory here is that *most* allocated bytes are allocated in large chunks by * allocating new Values - with large backing arrays. If we intercept these * allocation points, we cover most Java allocations. If such an allocation * might trigger an OOM error we first free up some other memory. * * MemoryManager monitors memory used by the K/V store (by walking through the * store (see Cleaner) and overall heap usage by hooking into gc. * * Memory is freed if either the cached memory is above the limit or if the * overall heap usage is too high (in which case we want to use less mem for * cache). There is also a lower limit on the amount of cache so that we never * delete all the cache and therefore some computation should always be able to * progress. * * The amount of memory to be freed is determined as the max of cached mem above * the limit and heap usage above the limit. * * One of the primary control inputs is FullGC cycles: we check heap usage and * set guidance for cache levels. We assume after a FullGC that the heap only * has POJOs (Plain Old Java Objects, unknown size) and K/V Cached stuff * (counted by us). We compute the free heap as MEM_MAX-heapUsage (after GC), * and we compute POJO size as (heapUsage - K/V cache usage). * * @author tomas * @author cliffc */ abstract public class MemoryManager { // Track timestamp of last oom log to avoid spamming the logs with junk. private static volatile long oomLastLogTimestamp = 0; private static final long SIXTY_SECONDS_IN_MILLIS = 60 * 1000; // max heap memory public static final long MEM_MAX = Runtime.getRuntime().maxMemory(); // Callbacks from GC static final HeapUsageMonitor HEAP_USAGE_MONITOR = new HeapUsageMonitor(); // Keep the K/V store below this threshold AND this is the FullGC call-back // threshold - which is limited in size to the old-gen pool size. static long MEM_CRITICAL; // Block allocations? static volatile boolean CAN_ALLOC = true; private static volatile boolean MEM_LOW_CRITICAL = false; // Lock for blocking on allocations private static final Object _lock = new Object(); // A monotonically increasing total count memory allocated via MemoryManager. // Useful in tracking total memory consumed by algorithms - just ask for the // before & after amounts and diff them. static void setMemGood() { if( CAN_ALLOC ) return; synchronized(_lock) { CAN_ALLOC = true; _lock.notifyAll(); } // NO LOGGING UNDER LOCK! Log.warn("Continuing after swapping"); } static void setMemLow() { if( !H2O.ARGS.cleaner ) return; // Cleaner turned off if( !CAN_ALLOC ) return; synchronized(_lock) { CAN_ALLOC = false; } // NO LOGGING UNDER LOCK! Log.warn("Pausing to swap to disk; more memory may help"); } static boolean canAlloc() { return CAN_ALLOC; } static void set_goals( String msg, boolean oom){ set_goals(msg, oom, 0); } // Set K/V cache goals. // Allow (or disallow) allocations. // Called from the Cleaner, when "cacheUsed" has changed significantly. // Called from any FullGC notification, and HEAP/POJO_USED changed. // Called on any OOM allocation static void set_goals( String msg, boolean oom , long bytes) { // Our best guess of free memory, as of the last GC cycle final long heapUsedGC = Cleaner.HEAP_USED_AT_LAST_GC; final long timeGC = Cleaner.TIME_AT_LAST_GC; final long freeHeap = MEM_MAX - heapUsedGC; assert freeHeap >= 0 : "I am really confused about the heap usage; MEM_MAX="+MEM_MAX+" heapUsedGC="+heapUsedGC; // Current memory held in the K/V store. final long cacheUsageGC = Cleaner.KV_USED_AT_LAST_GC; // Our best guess of POJO object usage: Heap_used minus cache used final long pojoUsedGC = Math.max(heapUsedGC - cacheUsageGC,0); // Block allocations if: // the cache is > 7/8 MEM_MAX, OR // we cannot allocate an equal amount of POJOs, pojoUsedGC > freeHeap. // Decay POJOS_USED by 1/8th every 5 sec: assume we got hit with a single // large allocation which is not repeating - so we do not need to have // double the POJO amount. // Keep at least 1/8th heap for caching. // Emergency-clean the cache down to the blocking level. long d = MEM_CRITICAL; // Block-allocation level; cache can grow till this // Decay POJO amount long p = pojoUsedGC; long age = (System.currentTimeMillis() - timeGC); // Age since last FullGC age = Math.min(age,10*60*1000 ); // Clip at 10mins while( (age-=5000) > 0 ) p = p-(p>>3); // Decay effective POJO by 1/8th every 5sec d -= 2*p - bytes; // Allow for the effective POJO, and again to throttle GC rate (and allow for this allocation) d = Math.max(d,MEM_MAX>>3); // Keep at least 1/8th heap if( Cleaner.DESIRED != -1 ) // Set to -1 only for OOM/Cleaner testing. Never negative normally Cleaner.DESIRED = d; // Desired caching level final long cacheUsageNow = Cleaner.Histo.cached(); boolean skipThisLogMessageToAvoidSpammingTheLogs = false; String m=""; if( cacheUsageNow > Cleaner.DESIRED ) { m = (CAN_ALLOC?"Swapping! ":"blocked: "); if( oom ) setMemLow(); // Stop allocations; trigger emergency clean Cleaner.kick_store_cleaner(); } else { // Else we are not *emergency* cleaning, but may be lazily cleaning. setMemGood(); // Cache is below desired level; unblock allocations if( oom ) { // But still have an OOM? m = "Unblock allocations; cache below desired, but also OOM: "; // Means the heap is full of uncached POJO's - which cannot be spilled. // Here we enter the zone of possibly dieing for OOM. There's no point // in blocking allocations, as no more memory can be freed by more // cache-flushing. Might as well proceed on a "best effort" basis. long now = System.currentTimeMillis(); if ((now - oomLastLogTimestamp) >= SIXTY_SECONDS_IN_MILLIS) { oomLastLogTimestamp = now; } else { skipThisLogMessageToAvoidSpammingTheLogs = true; } } else { m = "MemGood: "; // Cache is low enough, room for POJO allocation - full steam ahead! } } if (skipThisLogMessageToAvoidSpammingTheLogs) { return; } // No logging if under memory pressure: can deadlock the cleaner thread String s = m+msg+", (K/V:"+PrettyPrint.bytes(cacheUsageGC)+" + POJO:"+PrettyPrint.bytes(pojoUsedGC)+" + FREE:"+PrettyPrint.bytes(freeHeap)+" == MEM_MAX:"+PrettyPrint.bytes(MEM_MAX)+"), desiredKV="+PrettyPrint.bytes(Cleaner.DESIRED)+(oom?" OOM!":" NO-OOM"); if( CAN_ALLOC ) { if( oom ) Log.warn(s); else Log.debug(s); } else System.err.println(s); } /** Monitors the heap usage after full gc run and tells Cleaner to free memory * if mem usage is too high. Stops new allocation if mem usage is critical. * @author tomas */ private static class HeapUsageMonitor implements javax.management.NotificationListener { MemoryMXBean _allMemBean = ManagementFactory.getMemoryMXBean(); // general // Determine the OldGen GC pool size - which is saved in MEM_CRITICAL as // the max desirable K/V store size. HeapUsageMonitor() { int c = 0; for( MemoryPoolMXBean m : ManagementFactory.getMemoryPoolMXBeans() ) { if( m.getType() != MemoryType.HEAP ) // only interested in HEAP continue; if( m.isCollectionUsageThresholdSupported() && m.isUsageThresholdSupported()) { // Really idiotic API: no idea what the usageThreshold is, so I have // to guess. Start high, catch IAE & lower by 1/8th and try again. long gc_callback = MEM_MAX; while( true ) { try { m.setCollectionUsageThreshold(gc_callback); break; } catch( IllegalArgumentException iae ) { // Expected IAE: means we used too high a callback level gc_callback -= (gc_callback>>3); } } m.setCollectionUsageThreshold(1); // Call back for every fullgc NotificationEmitter emitter = (NotificationEmitter) _allMemBean; emitter.addNotificationListener(this, null, m); ++c; MEM_CRITICAL = gc_callback; // Set old-gen heap level } } assert c == 1; } /** Callback routine called by JVM after full gc run. Has two functions: * 1) sets the amount of memory to be cleaned from the cache by the Cleaner * 2) sets the CAN_ALLOC flag to false if memory level is critical */ @Override public void handleNotification(Notification notification, Object handback) { String notifType = notification.getType(); if( !notifType.equals(MemoryNotificationInfo.MEMORY_COLLECTION_THRESHOLD_EXCEEDED)) return; // Memory used after this FullGC Cleaner.TIME_AT_LAST_GC = System.currentTimeMillis(); Cleaner.HEAP_USED_AT_LAST_GC = _allMemBean.getHeapMemoryUsage().getUsed(); Cleaner.KV_USED_AT_LAST_GC = Cleaner.Histo.cached(); MEM_LOW_CRITICAL = Cleaner.HEAP_USED_AT_LAST_GC > 0.75*MEM_MAX; Log.debug("GC CALLBACK: "+Cleaner.TIME_AT_LAST_GC+", USED:"+PrettyPrint.bytes(Cleaner.HEAP_USED_AT_LAST_GC)+", CRIT: "+MEM_LOW_CRITICAL); set_goals("GC CALLBACK",MEM_LOW_CRITICAL); //if( MEM_LOW_CRITICAL ) { // emergency measure - really low on memory, stop allocations right now! // setMemLow(); // In-use memory is > 3/4 heap; block allocations //} else if( Cleaner.HEAP_USED_AT_LAST_GC < (MEM_MAX - (MEM_MAX >> 1)) ) // setMemGood(); // In use memory is < 1/2 heap; allow allocations even if Cleaner is still running } } // Allocates memory with cache management // Will block until there is enough available memory. // Catches OutOfMemory, clears cache & retries. static Object malloc(int elems, long bytes, int type, Object orig, int from ) { return malloc(elems,bytes,type,orig,from,false); } static Object malloc(int elems, long bytes, int type, Object orig, int from , boolean force) { assert elems >= 0 : "Bad size " + elems; // is 0 okay?! // Do not assert on large-size here. RF's temp internal datastructures are // single very large arrays. //assert bytes < Value.MAX : "malloc size=0x"+Long.toHexString(bytes); while( true ) { if( (!MEM_LOW_CRITICAL && !force) && !CAN_ALLOC && // Not allowing allocations? bytes > 256 && // Allow tiny ones in any case // To prevent deadlock, we cannot block the cleaner thread in any // case. This is probably an allocation for logging (ouch! shades of // logging-induced deadlock!) which will probably be recycled quickly. !(Thread.currentThread() instanceof Cleaner) ) { synchronized(_lock) { try { _lock.wait(300*1000); } catch (InterruptedException ex) { } } } try { switch( type ) { case 1: return new byte [elems]; case 2: return new short [elems]; case 4: return new int [elems]; case 8: return new long [elems]; case 5: return new float [elems]; case 9: return new double [elems]; case 0: return new boolean[elems]; case 10: return new Object [elems]; case -1: return Arrays.copyOfRange((byte [])orig,from,elems); case -4: return Arrays.copyOfRange((int [])orig,from,elems); case -5: return Arrays.copyOfRange((float [])orig,from,elems); case -8: return Arrays.copyOfRange((long [])orig,from,elems); case -9: return Arrays.copyOfRange((double[])orig,from,elems); default: throw H2O.fail(); } } catch( OutOfMemoryError e ) { // Do NOT log OutOfMemory, it is expected and unavoidable and handled // in most cases by spilling to disk. if( Cleaner.isDiskFull() ) { Log.err("Disk full, space left = " + Cleaner.availableDiskSpace()); UDPRebooted.suicide(UDPRebooted.T.oom, H2O.SELF); } } set_goals("OOM",true, bytes); // Low memory; block for swapping } } // Allocates memory with cache management public static byte [] malloc1 (int size) { return malloc1(size,false); } public static byte [] malloc1 (int size, boolean force) { return (byte [])malloc(size,size*1, 1,null,0,force); } public static short [] malloc2 (int size) { return (short [])malloc(size,size*2L, 2,null,0); } public static int [] malloc4 (int size) { return (int [])malloc(size,size*4L, 4,null,0); } public static int [][] malloc4 (int m, int n) { int [][] res = new int[m][]; for(int i = 0; i < m; ++i) res[i] = malloc4(n); return res; } public static long [] malloc8 (int size) { return (long [])malloc(size,size*8L, 8,null,0); } public static float [] malloc4f(int size) { return (float [])malloc(size,size*4L, 5,null,0); } public static double [] malloc8d(int size) { if(size < 32) try { // fast path for small arrays (e.g. histograms in gbm) return new double [size]; } catch (OutOfMemoryError oom){/* fall through */} return (double [])malloc(size,size*8L, 9,null,0); } public static double [][] malloc8d(int m, int n) { double [][] res = new double[m][]; for(int i = 0; i < m; ++i) res[i] = malloc8d(n); return res; } public static double[][][] malloc8d(final int d1, final int d2, final int d3) { final double[][][] array = new double[d1][d2][]; for (int j = 0; j < d1; ++j) for (int k = 0; k < d2; ++k) array[j][k] = MemoryManager.malloc8d(d3); return array; } public static boolean[] mallocZ (int size) { return (boolean[])malloc(size,size , 0,null,0); } public static Object [] mallocObj(int size){ return (Object [])malloc(size,size*8L,10,null,0,false); } public static byte [] arrayCopyOfRange(byte [] orig, int from, int sz) { return (byte []) malloc(sz,(sz-from) ,-1,orig,from); } public static int [] arrayCopyOfRange(int [] orig, int from, int sz) { return (int []) malloc(sz,(sz-from)*4,-4,orig,from); } public static long [] arrayCopyOfRange(long [] orig, int from, int sz) { return (long []) malloc(sz,(sz-from)*8,-8,orig,from); } public static float [] arrayCopyOfRange(float [] orig, int from, int sz){ return (float []) malloc(sz,(sz-from)*8,-5,orig,from); } public static double [] arrayCopyOfRange(double[] orig, int from, int sz) { return (double[]) malloc(sz,(sz-from)*8,-9,orig,from); } public static byte [] arrayCopyOf( byte [] orig, int sz) { return arrayCopyOfRange(orig,0,sz); } public static int [] arrayCopyOf( int [] orig, int sz) { return arrayCopyOfRange(orig,0,sz); } public static long [] arrayCopyOf( long [] orig, int sz) { return arrayCopyOfRange(orig,0,sz); } public static float [] arrayCopyOf( float [] orig, int sz) { return arrayCopyOfRange(orig,0,sz); } public static double [] arrayCopyOf( double[] orig, int sz) { return arrayCopyOfRange(orig,0,sz); } // Memory available for tasks (we assume 3/4 of the heap is available for tasks) static final AtomicLong _taskMem = new AtomicLong(MEM_MAX-(MEM_MAX>>2)); /** * Try to reserve memory needed for task execution and return true if * succeeded. Tasks have a shared pool of memory which they should ask for * in advance before they even try to allocate it. * * This method is another backpressure mechanism to make sure we do not * exhaust system's resources by running too many tasks at the same time. * Tasks are expected to reserve memory before proceeding with their * execution and making sure they release it when done. * * @param m - requested number of bytes * @return true if there is enough free memory */ static boolean tryReserveTaskMem(long m){ if(!CAN_ALLOC)return false; if( m == 0 ) return true; assert m >= 0:"m < 0: " + m; long current = _taskMem.addAndGet(-m); if(current < 0){ _taskMem.addAndGet(m); return false; } return true; } private static Object _taskMemLock = new Object(); static void reserveTaskMem(long m){ final long bytes = m; while(!tryReserveTaskMem(bytes)){ try { ForkJoinPool.managedBlock(new ManagedBlocker() { @Override public boolean isReleasable() {return _taskMem.get() >= bytes;} @Override public boolean block() throws InterruptedException { synchronized(_taskMemLock){ try {_taskMemLock.wait();} catch( InterruptedException e ) {} } return isReleasable(); } }); } catch (InterruptedException e){ Log.throwErr(e); } } } /** * Free the memory successfully reserved by task. * @param m */ static void freeTaskMem(long m){ if(m == 0)return; _taskMem.addAndGet(m); synchronized(_taskMemLock){ _taskMemLock.notifyAll(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/MrFun.java
package water; /** * Created by tomas on 11/5/16. * Interface to be used by LocalMR. * */ public abstract class MrFun<T extends MrFun<T>> extends Iced<T> { protected abstract void map(int id); protected void reduce(T t) {} protected MrFun<T> makeCopy() { return clone(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/MultiReceiverThread.java
package water; import java.net.*; import water.util.Log; /** * The Thread that looks for Multicast UDP Cloud requests. * * This thread just spins on reading multicast UDP packets from the kernel and * either dispatching on them directly itself (if the request is known short) * or queuing them up for worker threads. Multicast *Channels* are available * Java 7, but we are writing to Java 6 JDKs. SO back to the old-school * MulticastSocket. * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ class MultiReceiverThread extends Thread { MultiReceiverThread() { super("Multi-UDP-R"); ThreadHelper.initCommonThreadProperties(this); } // The Run Method. // --- // Started by main() on a single thread, this code manages reading UDP packets @SuppressWarnings("resource") @Override public void run() { // No multicast? Then do not bother with listening for them if (H2O.isFlatfileEnabled()) return; Thread.currentThread().setPriority(Thread.MAX_PRIORITY); MulticastSocket sock = null, errsock = null; InetAddress group = null, errgroup = null; boolean saw_error = false; // Loop forever accepting Cloud Management requests while( true ) { try { // --- // Cleanup from any prior socket failures. Rare unless we're really sick. if( errsock != null && errgroup != null ) { // socket error AND group present final InetAddress tmp = errgroup; errgroup = null; errsock.leaveGroup(tmp); // Could throw, but errgroup cleared for next pass } if( errsock != null ) { // One time attempt a socket close final MulticastSocket tmp2 = errsock; errsock = null; tmp2.close(); // Could throw, but errsock cleared for next pass } if( saw_error ) Thread.sleep(1000); // prevent deny-of-service endless socket-creates saw_error = false; // --- // Actually do the common-case setup of Inet multicast group if( group == null ) group = H2O.CLOUD_MULTICAST_GROUP; // More common-case setup of a MultiCast socket if( sock == null ) { sock = new MulticastSocket(H2O.CLOUD_MULTICAST_PORT); if( H2O.CLOUD_MULTICAST_IF != null ) { try { sock.setNetworkInterface(H2O.CLOUD_MULTICAST_IF); } catch( SocketException e ) { Log.err("Exception calling setNetworkInterface, Multicast Interface, Group, Port - "+ H2O.CLOUD_MULTICAST_IF+" "+H2O.CLOUD_MULTICAST_GROUP+":"+H2O.CLOUD_MULTICAST_PORT, e); throw e; } } sock.joinGroup(group); } // Receive a packet & handle it byte[] buf = new byte[AutoBuffer.MTU]; DatagramPacket pack = new DatagramPacket(buf,buf.length); sock.receive(pack); TCPReceiverThread.basic_packet_handling(new AutoBuffer(pack)); } catch( SocketException e ) { // This rethrow will not be caught and thus kills the multi-cast thread. Log.err("Turning off multicast, which will disable further cloud building"); throw new RuntimeException(e); } catch( Exception e ) { Log.err("Exception on Multicast Interface, Group, Port - "+ H2O.CLOUD_MULTICAST_IF+" "+H2O.CLOUD_MULTICAST_GROUP+":"+H2O.CLOUD_MULTICAST_PORT, e); // On any error from anybody, close all sockets & re-open saw_error = true; errsock = sock ; sock = null; // Signal error recovery on the next loop errgroup = group; group = null; } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ParallelizationTask.java
package water; import water.util.Log; import java.util.concurrent.atomic.AtomicInteger; public class ParallelizationTask<T extends H2O.H2OCountedCompleter<T>> extends H2O.H2OCountedCompleter { private final transient AtomicInteger _ctr; // Concurrency control private final T[] _tasks; // Task holder private final Job<?> _j; //Keep track of job progress private final int _maxParallelTasks; public ParallelizationTask(T[] tasks, int maxParallelTasks, Job<?> j) { if (maxParallelTasks <= 0) { throw new IllegalArgumentException("Argument maxParallelTasks should be a positive integer, got: " + maxParallelTasks); } _maxParallelTasks = maxParallelTasks; _ctr = new AtomicInteger(_maxParallelTasks - 1); _tasks = tasks; _j = j; } @Override public void compute2() { final int nTasks = _tasks.length; addToPendingCount(nTasks-1); for (int i=0; i < Math.min(_maxParallelTasks, nTasks); ++i) asyncVecTask(i); } private void asyncVecTask(final int task) { _tasks[task].setCompleter(new Callback(task)); _tasks[task].fork(); } private class Callback extends H2O.H2OCallback { private final int _taskId; Callback(int taskId) { super(ParallelizationTask.this); _taskId = taskId; } @Override public void callback(H2O.H2OCountedCompleter cc) { _tasks[_taskId] = null; // mark completed if (_j != null) { if (_j.stop_requested()) { final int current = _ctr.get(); Log.info("Skipping execution of last " + (_tasks.length - current) + " out of " + _tasks.length + " tasks."); stopAll(); throw new Job.JobCancelledException(); } } int i = _ctr.incrementAndGet(); if (i < _tasks.length) asyncVecTask(i); } } private void stopAll() { for (final T task : _tasks) { if (task != null) { task.cancel(true); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Paxos.java
package water; import java.util.Arrays; import water.H2ONode.H2Okey; import water.init.JarHash; import water.nbhm.NonBlockingHashMap; import water.util.Log; /** * (Not The) Paxos * * Used to define Cloud membership. See: * http://en.wikipedia.org/wiki/Paxos_%28computer_science%29 * * Detects and builds a "cloud" - a cooperating group of nodes, with mutual * knowledge of each other. Basically tracks all the nodes that *this* node * has ever heard of, and when *all* of the other nodes have all heard of each * other, declares the situation as "commonKnowledge", and a Cloud. This * algorithm differs from Paxos in a number of obvious ways: * - it is not robust against failing nodes * - it requires true global consensus (a Quorum of All) * - it is vastly simpler than Paxos * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public abstract class Paxos { // Whether or not we have common knowledge public static volatile boolean _commonKnowledge = false; // Whether or not we're allowing distributed-writes. The cloud is not // allowed to change shape once we begin writing. public static volatile boolean _cloudLocked = false; public static final NonBlockingHashMap<H2Okey,H2ONode> PROPOSED = new NonBlockingHashMap<>(); // --- // This is a packet announcing what Cloud this Node thinks is the current // Cloud, plus other status bits static synchronized int doHeartbeat( H2ONode h2o ) { // Kill somebody if the jar files mismatch. Do not attempt to deal with // mismatched jars. if(!H2O.ARGS.client && !h2o._heartbeat._client) { // don't check md5 for client nodes if (!h2o._heartbeat.check_jar_md5()) { System.out.println("Jar check fails; my hash=" + Arrays.toString(JarHash.JARHASH)); System.out.println("Jar check fails; received hash=" + Arrays.toString(h2o._heartbeat._jar_md5)); if (H2O.CLOUD.size() > 1) { Log.warn("Killing " + h2o + " because of H2O version mismatch (md5 differs)."); UDPRebooted.T.mismatch.send(h2o); } else { H2O.die("Attempting to join " + h2o + " with an H2O version mismatch (md5 differs). (Is H2O already running?) Exiting."); } return 0; } }else{ if (!h2o._heartbeat.check_jar_md5()) { // we do not want to disturb the user in this case // Just report that client with different md5 tried to connect ListenerService.getInstance().report("client_wrong_md5", new Object[]{h2o._heartbeat._jar_md5}); } } if(!H2O.ARGS.allow_clients && h2o.isClient() && !h2o.isSelf()) { // ignore requests from clients if cloud is not started with client connections enabled ListenerService.getInstance().report("clients_disabled", h2o); h2o.removeClient(); return 0; } if(h2o._heartbeat._cloud_name_hash != H2O.SELF._heartbeat._cloud_name_hash){ // ignore requests from this node as they are coming from different cluster ListenerService.getInstance().report("different_cloud", h2o); return 0; } if (H2O.isFlatfileEnabled() && !H2O.ARGS.client && h2o._heartbeat._client && !H2O.isNodeInFlatfile(h2o)) { H2O.addNodeToFlatfile(h2o); } // Never heard of this dude? See if we want to kill him off for being cloud-locked if( !PROPOSED.contains(h2o) && !h2o._heartbeat._client ) { if( _cloudLocked ) { Log.warn("Killing "+h2o+" because the cloud is no longer accepting new H2O nodes."); UDPRebooted.T.locked.send(h2o); return 0; } if( _commonKnowledge ) { _commonKnowledge = false; // No longer sure about things H2O.SELF._heartbeat._common_knowledge = false; Log.debug("Cloud voting in progress"); } // Add to proposed set, update cloud hash. Do not add clients H2ONode res = PROPOSED.putIfAbsent(h2o._key,h2o); assert res==null; H2O.SELF._heartbeat._cloud_hash += h2o.hashCode(); } else if( _commonKnowledge ) { return 0; // Already know about you, nothing more to do } int chash = H2O.SELF._heartbeat._cloud_hash; assert chash == doHash() : "mismatched hash4, HB="+chash+" full="+doHash(); assert !_commonKnowledge; // Do we have consensus now? H2ONode h2os[] = PROPOSED.values().toArray(new H2ONode[PROPOSED.size()]); if( H2O.ARGS.client && h2os.length == 0 ) return 0; // Client stalls until it finds *some* cloud for( H2ONode h2o2 : h2os ) if( chash != h2o2._heartbeat._cloud_hash ) return print("Heartbeat hashes differ, self=0x"+Integer.toHexString(chash)+" "+h2o2+"=0x"+Integer.toHexString(h2o2._heartbeat._cloud_hash)+" ",PROPOSED); // Hashes are same, so accept the new larger cloud-size H2O.CLOUD.set_next_Cloud(h2os,chash); // Demand everybody has rolled forward to same size before consensus boolean same_size=true; for( H2ONode h2o2 : h2os ) same_size &= (h2o2._heartbeat._cloud_size == H2O.CLOUD.size()); if( !same_size ) return 0; H2O.SELF._heartbeat._common_knowledge = true; for( H2ONode h2o2 : h2os ) if( !h2o2._heartbeat._common_knowledge ) return print("Missing common knowledge from all nodes!" ,PROPOSED); _commonKnowledge = true; // Yup! Have global consensus Paxos.class.notifyAll(); // Also, wake up a worker thread stuck in DKV.put Paxos.print("Announcing new Cloud Membership: ", H2O.CLOUD._memary); Log.info("Cloud of size ", H2O.CLOUD.size(), " formed ", H2O.CLOUD.toString()); H2Okey leader = H2O.CLOUD.leader()._key; H2O.notifyAboutCloudSize(H2O.SELF_ADDRESS, H2O.API_PORT, leader.getAddress(), leader.getApiPort(), H2O.CLOUD.size()); return 0; } static private int doHash() { int hash = 0; for( H2ONode h2o : PROPOSED.values() ) hash += h2o.hashCode(); assert hash != 0 || H2O.ARGS.client; return hash; } // Before we start doing distributed writes... block until the cloud // stabilizes. After we start doing distributed writes, it is an error to // change cloud shape - the distributed writes will be in the wrong place. public static void lockCloud(Object reason) { if( _cloudLocked ) return; // Fast-path cutout lockCloud_impl(reason); } static private void lockCloud_impl(Object reason) { // Any fast-path cutouts must happen en route to here. Log.info("Locking cloud to new members, because "+reason.toString()); synchronized(Paxos.class) { while( !_commonKnowledge ) try { Paxos.class.wait(); } catch( InterruptedException ignore ) { } _cloudLocked = true; // remove nodes which are not in the cluster (e.g. nodes from flat-file which are not actually used) if(H2O.isFlatfileEnabled()){ for(H2ONode n: H2O.getFlatfile()){ if(!n._heartbeat._client && !PROPOSED.containsKey(n._key)){ Log.warn("Flatfile entry ignored: Node " + n._key.getIpPortString() + " not active in this cloud. Removing it from the list."); n.removeFromCloud(); } } } } } static int print( String msg, NonBlockingHashMap<H2Okey,H2ONode> p ) { return print(msg,p.values().toArray(new H2ONode[p.size()])); } static int print( String msg, H2ONode h2os[] ) { return print(msg,h2os,""); } static int print( String msg, H2ONode h2os[], String msg2 ) { Log.debug(msg,Arrays.toString(h2os),msg2); return 0; // handy flow-coding return } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/RPC.java
package water; import jsr166y.CountedCompleter; import jsr166y.ForkJoinPool; import water.H2O.FJWThr; import water.H2O.H2OCountedCompleter; import water.UDP.udp; import water.util.DistributedException; import water.util.Log; import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; /** * A remotely executed FutureTask. Flow is: * * 1- Build a DTask (or subclass). This object will be replicated remotely. * 2- Make a RPC object, naming the target Node. Call (re)call(). Call get() * to block for result, or cancel() or isDone(), etc. Caller can also arrange * for caller.tryComplete() to be called in a F/J thread, to support completion * style execution (i.e. Continuation Passing Style). * 3- DTask will be serialized and sent to the target; small objects via UDP * and large via TCP (using AutoBuffer and auto-gen serializers). * 4- An RPC UDP control packet will be sent to target; this will also contain * the DTask if its small enough. * 4.5- The network may replicate (or drop) the UDP packet. Dups may arrive. * 4.5- Sender may timeout, and send dup control UDP packets. * 5- Target will capture a UDP packet, and begin filtering dups (via task#). * 6- Target will deserialize the DTask, and call DTask.invoke() in a F/J thread. * 6.5- Target continues to filter (and drop) dup UDP sends (and timeout resends) * 7- Target finishes call, and puts result in DTask. * 8- Target serializes result and sends to back to sender. * 9- Target sends an ACK back (may be combined with the result if small enough) * 10- Target puts the ACK in H2ONode.TASKS for later filtering. * 10.5- Target receives dup UDP request, then replies with ACK back. * 11- Sender receives ACK result; deserializes; notifies waiters * 12- Sender sends ACKACK back * 12.5- Sender receives dup ACK's, sends dup ACKACK's back * 13- Target receives ACKACK, removes TASKS tracking * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public class RPC<V extends DTask> implements Future<V>, Delayed, ForkJoinPool.ManagedBlocker { // The target remote node to pester for a response. NULL'd out if the target // disappears or we cancel things (hence not final). H2ONode _target; // The distributed Task to execute. Think: code-object+args while this RPC // is a call-in-progress (i.e. has an 'execution stack') final V _dt; // True if _dt contains the final answer volatile boolean _done; // True if the remote sent us a NACK (he received this RPC, but perhaps is // not done processing it, no need for more retries). volatile boolean _nack; // A locally-unique task number; a "cookie" handed to the remote process that // they hand back with the response packet. These *never* repeat, so that we // can tell when a reply-packet points to e.g. a dead&gone task. int _tasknum; // Time we started this sucker up. Controls re-send behavior. final long _started; long _retry; // When we should attempt a retry int _resendsCnt; // A list of CountedCompleters we will call tryComplete on when the RPC // finally completes. Frequently null/zero. ArrayList<H2OCountedCompleter> _fjtasks; // We only send non-failing TCP info once; also if we used TCP it was large // so duplications are expensive. However, we DO need to keep resending some // kind of "are you done yet?" UDP packet, incase the reply packet got dropped // (but also in case the main call was a single UDP packet and it got dropped). // Not volatile because read & written under lock. boolean _sentTcp; // To help with asserts, record the size of the sent DTask - if we resend // if should remain the same size. int _size; int _size_rez; // Size of received results // Magic Cookies static final byte SERVER_UDP_SEND = 10; static final byte SERVER_TCP_SEND = 11; static final byte CLIENT_UDP_SEND = 12; static final byte CLIENT_TCP_SEND = 13; static final private String[] COOKIES = new String[] { "SERVER_UDP","SERVER_TCP","CLIENT_UDP","CLIENT_TCP" }; final static int MAX_TIMEOUT = 60000; // 5 sec max timeout cap on exponential decay of retries public static <DT extends DTask> RPC<DT> call(H2ONode target, DT dtask) { return new RPC(target,dtask).call(); } // Make a remotely executed FutureTask. Must name the remote target as well // as the remote function. This function is expected to be subclassed. public RPC( H2ONode target, V dtask ) { this(target,dtask,1.0f); setTaskNum(); } // Only used for people who optimistically make RPCs that get thrown away and // never sent over the wire. Split out task# generation from RPC <init> - // every task# MUST be sent over the wires, because the far end tracks the // task#'s in a dense list (no holes). RPC( H2ONode target, V dtask, float ignore ) { _target = target; _dt = dtask; _started = System.currentTimeMillis(); _retry = RETRY_MS; } RPC<V> setTaskNum() { assert _tasknum == 0; _tasknum = _target.nextTaskNum(); return this; } // Any Completer will not be carried over to remote; add it to the RPC call // so completion is signaled after the remote comes back. private void handleCompleter( CountedCompleter cc ) { assert cc instanceof H2OCountedCompleter; if( _fjtasks == null || !_fjtasks.contains(cc) ) addCompleter((H2OCountedCompleter)cc); _dt.setCompleter(null); } // If running on self, just submit to queues & do locally private RPC<V> handleLocal() { assert _dt.getCompleter()==null; _dt.setCompleter(new H2O.H2OCallback<DTask>() { @Override public void callback(DTask dt) { synchronized(RPC.this) { _done = true; RPC.this.notifyAll(); } doAllCompletions(); } @Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter dt) { synchronized(RPC.this) { // Might be called several times if( _done ) return true; // Filter down to 1st exceptional completion _dt.setException(ex); // must be the last set before notify call cause the waiting thread // can wake up at any moment independently on notify _done = true; RPC.this.notifyAll(); } doAllCompletions(); return true; } }); H2O.submitTask(_dt); return this; } // Make an initial RPC, or re-send a packet. Always called on 1st send; also // called on a timeout. public synchronized RPC<V> call() { // Any Completer will not be carried over to remote; add it to the RPC call // so completion is signaled after the remote comes back. CountedCompleter cc = _dt.getCompleter(); if( cc != null ) handleCompleter(cc); // If running on self, just submit to queues & do locally if( _target==H2O.SELF ) return handleLocal(); // Keep a global record, for awhile if( _target != null ) _target.taskPut(_tasknum,this); try { if( _nack ) return this; // Racing Nack rechecked under lock; no need to send retry // We could be racing timeouts-vs-replies. Blow off timeout if we have an answer. if( isDone() ) { if( _target != null ) _target.taskRemove(_tasknum); return this; } // Default strategy: (re)fire the packet and (re)start the timeout. We // "count" exactly 1 failure: just whether or not we shipped via TCP ever // once. After that we fearlessly (re)send UDP-sized packets until the // server replies. // Pack classloader/class & the instance data into the outgoing // AutoBuffer. If it fits in a single UDP packet, ship it. If not, // finish off the current AutoBuffer (which is now going TCP style), and // make a new UDP-sized packet. On a re-send of a TCP-sized hunk, just // send the basic UDP control packet. if( !_sentTcp ) { while( true ) { // Retry loop for broken TCP sends AutoBuffer ab = new AutoBuffer(_target,_dt.priority()); try { final boolean t; ab.putTask(UDP.udp.exec, _tasknum).put1(CLIENT_UDP_SEND); ab.put(_dt); t = ab.hasTCP(); assert sz_check(ab) : "Resend of " + _dt.getClass() + " changes size from " + _size + " to " + ab.size() + " for task#" + _tasknum; ab.close(); // Then close; send final byte _sentTcp = t; // Set after close (and any other possible fail) break; // Break out of retry loop } catch( AutoBuffer.AutoBufferException e ) { Log.info("IOException during RPC call: " + e._ioe.getMessage() + ", AB=" + ab + ", for task#" + _tasknum + ", waiting and retrying..."); ab.drainClose(); try { Thread.sleep(500); } catch (InterruptedException ignore) {} } } // end of while(true) } else { // Else it was sent via TCP in a prior attempt, and we've timed out. // This means the caller's ACK/answer probably got dropped and we need // him to resend it (or else the caller is still processing our // request). Send a UDP reminder - but with the CLIENT_TCP_SEND flag // instead of the UDP send, and no DTask (since it previously went via // TCP, no need to resend it). AutoBuffer ab = new AutoBuffer(_target,_dt.priority()).putTask(UDP.udp.exec,_tasknum); ab.put1(CLIENT_TCP_SEND).close(); } // Double retry until we exceed existing age. This is the time to delay // until we try again. Note that we come here immediately on creation, // so the first doubling happens before anybody does any waiting. Also // note the generous 5sec cap: ping at least every 5 sec. _retry += (_retry < MAX_TIMEOUT ) ? _retry : MAX_TIMEOUT; // Put self on the "TBD" list of tasks awaiting Timeout. // So: dont really 'forget' but remember me in a little bit. // UDPTimeOutThread.PENDING.put(_tasknum, this); return this; } catch( Throwable t ) { throw Log.throwErr(t); } } private V result() { Throwable t = _dt.getDException(); if( t != null ) throw (t instanceof DistributedException)?new DistributedException(t.getMessage(),t.getCause()):new DistributedException(t); return _dt; } private static int getThreadPriority() { Thread cThr = Thread.currentThread(); return (cThr instanceof FJWThr) ? ((FJWThr)cThr)._priority : -1; } private boolean canDoManagedBlock() { final int priority = getThreadPriority(); return _dt.priority() > priority || (_dt.priority() == priority && _dt instanceof MRTask); } // Similar to FutureTask.get() but does not throw any checked exceptions. // Returns null for canceled tasks, including those where the target dies. // Throws a DException if the remote throws, wrapping the original exception. @Override public V get() { return get(false); } public V get(boolean allowDirectBlock) { if( _done ) return result(); // Fast-path shortcut, or throw if exception // check priorities - FJ task can only block on a task with higher priority! final boolean canDoManagedBlock = canDoManagedBlock(); final boolean doDirectBlock = !canDoManagedBlock && allowDirectBlock; try { if (doDirectBlock) { // Only block directly if the caller allows it (knows the blocking // will be short and we will recover quickly), direct blocking has // performance implications (worker threads are not replaced in the // FJ thread-pool). block(); } else { // Use FJP ManagedBlock for this blocking-wait - so the FJP can spawn // another thread if needed. final int priority = getThreadPriority(); // The assert should reveal the issue in our CI tests, however, // in production with asserts disabled we rather continue as the // risk of actual deadlock is low (and failing here would definitely // not give the user the result they want). assert canDoManagedBlock : "*** Attempting to block on task (" + _dt.getClass() + ") with equal or lower priority. Can lead to deadlock! " + _dt.priority() + " <= " + priority; ForkJoinPool.managedBlock(this); } } catch( InterruptedException ignore ) { } if( _done ) return result(); // Fast-path shortcut or throw if exception assert isCancelled(); return null; } // Return true if blocking is unnecessary, which is true if the Task isDone. @Override public boolean isReleasable() { return isDone(); } // Possibly blocks the current thread. Returns true if isReleasable would // return true. Used by the FJ Pool management to spawn threads to prevent // deadlock is otherwise all threads would block on waits. @Override public synchronized boolean block() throws InterruptedException { while( !isDone() ) { wait(1000); } return true; } @Override public final V get(long timeout, TimeUnit unit) { if( _done ) return _dt; // Fast-path shortcut throw H2O.fail(); } // Done if target is dead or canceled, or we have a result. @Override public final boolean isDone() { return _target==null || _done; } // Done if target is dead or canceled @Override public final boolean isCancelled() { return _target==null; } // Attempt to cancel job @Override public final boolean cancel( boolean mayInterruptIfRunning ) { boolean did = false; synchronized(this) { // Install the answer under lock if( !isCancelled() ) { did = true; // Did cancel (was not cancelled already) _target.taskRemove(_tasknum); _target = null; // Flag as canceled // UDPTimeOutThread.PENDING.remove(this); } notifyAll(); // notify in any case } return did; } // --- // Handle the remote-side incoming UDP packet. This is called on the REMOTE // Node, not local. Wrong thread, wrong JVM. static class RemoteHandler extends UDP { @Override AutoBuffer call(AutoBuffer ab) { throw H2O.fail(); } // Pretty-print bytes 1-15; byte 0 is the udp_type enum @Override String print16( AutoBuffer ab ) { int flag = ab.getFlag(); String clazz = (flag == CLIENT_UDP_SEND) ? TypeMap.className(ab.getInt()) : ""; return "task# "+ab.getTask()+" "+ clazz+" "+COOKIES[flag-SERVER_UDP_SEND]; } } static class RPCCall extends H2OCountedCompleter implements Delayed { volatile DTask _dt; // Set on construction, atomically set to null onAckAck final H2ONode _client; final int _tsknum; long _started; // Retry fields for the ackack long _retry; int _ackResendCnt; int _nackResendCnt; volatile boolean _computedAndReplied; // One time transition from false to true volatile boolean _computed; // One time transition from false to true // To help with asserts, record the size of the sent DTask - if we resend // if should remain the same size. Also used for profiling. int _size; RPCCall(DTask dt, H2ONode client, int tsknum) { super(dt.priority()); _dt = dt; _client = client; _tsknum = tsknum; if( _dt == null ) _computedAndReplied = true; // Only for Golden Completed Tasks (see H2ONode.java) _started = System.currentTimeMillis(); // for nack timeout _retry = RETRY_MS >> 1; // half retry for sending nack } RPCCall(H2ONode client) { _client = client; _tsknum = 0; } @Override public void compute2() { // First set self to be completed when this subtask completer assert _dt.getCompleter() == null; _dt.setCompleter(this); // Run the remote task on this server... _dt.dinvoke(_client); } // When the task completes, ship results back to client. F/J guarantees // that this is called only once with no onExceptionalCompletion calls - or // 1-or-more onExceptionalCompletion calls. @Override public void onCompletion( CountedCompleter caller ) { synchronized(this) { assert !_computed; _computed = true; } sendAck(); } // Exception occurred when processing this task locally, set exception and // send it back to the caller. Can be called lots of times (e.g., once per // MRTask.map call that throws). @Override public boolean onExceptionalCompletion( Throwable ex, CountedCompleter caller ) { if( _computed ) return false; synchronized(this) { // Filter dup calls to onExCompletion if( _computed ) return false; _computed = true; } _dt.setException(ex); sendAck(); // PUBDEV-2630 return false; } private void sendAck() { // Send results back DTask dt, origDt = _dt; // _dt can go null the instant it is send over wire assert origDt!=null; // Freed after completion while((dt = _dt) != null) { // Retry loop for broken TCP sends AutoBuffer ab = null; try { // Start the ACK with results back to client. If the client is // asking for a class/id mapping (or any job running at FETCH_ACK // priority) then return a udp.fetchack byte instead of a udp.ack. // The receiver thread then knows to handle the mapping at the higher // priority. UDP.udp udp = dt.priority()==H2O.FETCH_ACK_PRIORITY ? UDP.udp.fetchack : UDP.udp.ack; ab = new AutoBuffer(_client,udp._prior).putTask(udp,_tsknum).put1(SERVER_UDP_SEND); assert ab.position() == 1+2+2+4+1; dt.write(ab); // Write the DTask - could be very large write dt._repliedTcp = ab.hasTCP(); // Resends do not need to repeat TCP result ab.close(); // Then close; send final byte _computedAndReplied = true; // After the final handshake, set computed+replied bit break; // Break out of retry loop } catch( AutoBuffer.AutoBufferException e ) { if( !_client.isClient() ) // Report on servers only; clients allowed to be flaky Log.info("IOException during ACK, "+e._ioe.getMessage()+", t#"+_tsknum+" AB="+ab+", waiting and retrying..."); ab.drainClose(); if( _client.isClient() ) // Dead client will not accept a TCP ACK response? this.CAS_DT(dt,null); // cancel the ACK try { Thread.sleep(100); } catch (InterruptedException ignore) {} } catch( Throwable e ) { // Custom serializer just barfed? Log.err(e); // Log custom serializer exception ab.drainClose(); } } // end of while(true) if( dt == null ) Log.info("Cancelled remote task#"+_tsknum+" "+origDt.getClass()+" to "+_client + " has been cancelled by remote"); else { if( dt instanceof MRTask && dt.logVerbose() ) Log.debug("Done remote task#"+_tsknum+" "+dt.getClass()+" to "+_client); _client.record_task_answer(this); // Setup for retrying Ack & AckAck, if not canceled } } final void send_nack() { new AutoBuffer(_client,udp.nack._prior).putTask(udp.nack,_tsknum).close(); _retry += (_retry < MAX_TIMEOUT ) ? _retry : MAX_TIMEOUT; } // Re-send strictly the ack, because we're missing an AckAck final void resend_ack() { assert _computedAndReplied : "Found RPCCall not computed "+_tsknum; DTask dt = _dt; if( dt == null ) return; // Received ACKACK already UDP.udp udp = dt.priority()==H2O.FETCH_ACK_PRIORITY ? UDP.udp.fetchack : UDP.udp.ack; AutoBuffer rab = new AutoBuffer(_client,dt.priority()).putTask(udp,_tsknum); boolean wasTCP = dt._repliedTcp; if( wasTCP ) rab.put1(RPC.SERVER_TCP_SEND) ; // Original reply sent via TCP else { rab.put1(RPC.SERVER_UDP_SEND); // Original reply sent via UDP assert rab.position() == 1+2+2+4+1; dt.write(rab); } assert sz_check(rab) : "Resend of " + _dt.getClass() + " changes size from "+_size+" to "+rab.size(); assert dt._repliedTcp==wasTCP; rab.close(); dt._repliedTcp = wasTCP; // Double retry until we exceed existing age. This is the time to delay // until we try again. Note that we come here immediately on creation, // so the first doubling happens before anybody does any waiting. Also // note the generous 5sec cap: ping at least every 5 sec. _retry += (_retry < MAX_TIMEOUT ) ? _retry : MAX_TIMEOUT; } // How long until we should do the "timeout" action? @Override public final long getDelay( TimeUnit unit ) { long delay = (_started+_retry)-System.currentTimeMillis(); return unit.convert( delay, TimeUnit.MILLISECONDS ); } // Needed for the DelayQueue API @Override public final int compareTo( Delayed t ) { RPCCall r = (RPCCall)t; long nextTime = _started+_retry, rNextTime = r._started+r._retry; return nextTime == rNextTime ? 0 : (nextTime > rNextTime ? 1 : -1); } static private AtomicReferenceFieldUpdater<RPCCall,DTask> CAS_DT = AtomicReferenceFieldUpdater.newUpdater(RPCCall.class, DTask.class,"_dt"); boolean CAS_DT(DTask old, DTask nnn) { return CAS_DT.compareAndSet(this,old,nnn); } // Assertion check that size is not changing between resends, // i.e., resends sent identical data. private boolean sz_check(AutoBuffer ab) { final int absize = ab.size(); if( _size == 0 ) { _size = absize; return true; } return _size==absize; } } // Handle traffic, from a client to this server asking for work to be done. // Called from either a F/J thread (generally with a UDP packet) or from the // TCPReceiver thread. static void remote_exec( AutoBuffer ab ) { long lo = ab.get8(0), hi = ab._size >= 16 ? ab.get8(8) : 0; final int task = ab.getTask(); final int flag = ab.getFlag(); assert flag==CLIENT_UDP_SEND || flag==CLIENT_TCP_SEND; // Client-side send // Atomically record an instance of this task, one-time-only replacing a // null with an RPCCall, a placeholder while we work on a proper response - // and it serves to let us discard dup UDP requests. RPCCall old = ab._h2o.has_task(task); // This is a UDP packet requesting an answer back for a request sent via // TCP but the UDP packet has arrived ahead of the TCP. Just drop the UDP // and wait for the TCP to appear. if( old == null && flag == CLIENT_TCP_SEND ) { Log.warn("got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " /* + UDP.printx16(lo,hi)*/); assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " /* + UDP.printx16(lo,hi)*/; // All the resends should be UDP only // DROP PACKET } else if( old == null ) { // New task? RPCCall rpc; try { // Read the DTask Right Now. If we are the TCPReceiver thread, then we // are reading in that thread... and thus TCP reads are single-threaded. rpc = new RPCCall(ab.get(water.DTask.class),ab._h2o,task); } catch( AutoBuffer.AutoBufferException e ) { // Here we assume it's a TCP fail on read - and ignore the remote_exec // request. The caller will send it again. NOTE: this case is // indistinguishable from a broken short-writer/long-reader bug, except // that we'll re-send endlessly and fail endlessly. Log.info("Network congestion OR short-writer/long-reader: TCP "+e._ioe.getMessage()+", AB="+ab+", ignoring partial send"); ab.drainClose(); return; } RPCCall rpc2 = ab._h2o.record_task(rpc); if( rpc2==null ) { // Atomically insert (to avoid double-work) if( rpc._dt instanceof MRTask && rpc._dt.logVerbose() ) Log.debug("Start remote task#"+task+" "+rpc._dt.getClass()+" from "+ab._h2o); H2O.submitTask(rpc); // And execute! } else { // Else lost the task-insertion race if(ab.hasTCP()) ab.drainClose(); // DROP PACKET } } else if( !old._computedAndReplied) { // This packet has not been fully computed. Hence it's still a work-in- // progress locally. We have no answer to reply but we do not want to // re-offer the packet for repeated work. Send back a NACK, letting the // client know we're Working On It assert !ab.hasTCP():"got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi) + ", position = " + ab._bb.position(); ab.clearForWriting(udp.nack._prior).putTask(UDP.udp.nack.ordinal(), task); // DROP PACKET } else { // This is an old re-send of the same thing we've answered to before. // Send back the same old answer ACK. If we sent via TCP before, then // we know the answer got there so just send a control-ACK back. If we // sent via UDP, resend the whole answer. if(ab.hasTCP()) { Log.warn("got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi)); // All the resends should be UDP only ab.drainClose(); } if(old._dt != null) { // already ackacked ++old._ackResendCnt; if (old._ackResendCnt % 10 == 0) Log.err("Possibly broken network, can not send ack through, got " + old._ackResendCnt + " for task # " + old._tsknum + ", dt == null?" + (old._dt == null)); old.resend_ack(); } } ab.close(); } // TCP large RECEIVE of results. Note that 'this' is NOT the RPC object // that is hoping to get the received object, nor is the current thread the // RPC thread blocking for the object. The current thread is the TCP // reader thread. static void tcp_ack( final AutoBuffer ab ) throws IOException { // Get the RPC we're waiting on int task = ab.getTask(); RPC rpc = ab._h2o.taskGet(task); // Race with canceling a large RPC fetch: Task is already dead. Do not // bother reading from the TCP socket, just bail out & close socket. if( rpc == null || rpc._done) { ab.drainClose(); } else { assert rpc._tasknum == task; assert !rpc._done; // Here we have the result, and we're on the correct Node but wrong // Thread. If we just return, the TCP reader thread will close the // remote, the remote will UDP ACK the RPC back, and back on the current // Node but in the correct Thread, we'd wake up and realize we received a // large result. try { rpc.response(ab); } catch( AutoBuffer.AutoBufferException e ) { // If TCP fails, we will have done a short-read crushing the original // _dt object, and be unable to resend. This is fatal right now. // Really: an unimplemented feature; fix is to notice that a partial // TCP read means that the server (1) got our remote_exec request, (2) // has computed an answer and was trying to send it to us, (3) failed // sending via TCP hence the server knows it failed and will send again // without any further work from us. We need to disable all the resend // & retry logic, and wait for the server to re-send our result. // Meanwhile the _dt object is crushed with half-read crap, and cannot // be trusted except in the base fields. throw Log.throwErr(e._ioe); } } // ACKACK the remote, telling him "we got the answer" new AutoBuffer(ab._h2o, H2O.ACK_ACK_PRIORITY).putTask(UDP.udp.ackack.ordinal(),task).close(); } // Got a response UDP packet, or completed a large TCP answer-receive. // Install it as The Answer packet and wake up anybody waiting on an answer. // On all paths, send an ACKACK back static AutoBuffer ackack( AutoBuffer ab, int tnum ) { return ab.clearForWriting(H2O.ACK_ACK_PRIORITY).putTask(UDP.udp.ackack.ordinal(),tnum); } protected AutoBuffer response( AutoBuffer ab ) { assert _tasknum==ab.getTask(); if( _done ) { if(!ab.hasTCP()) return ackack(ab, _tasknum); // Ignore duplicate response packet ab.drainClose(); } else { int flag = ab.getFlag(); // Must read flag also, to advance ab if (flag == SERVER_TCP_SEND) return ackack(ab, _tasknum); // Ignore UDP packet for a TCP reply assert flag == SERVER_UDP_SEND:"flag = " + flag; synchronized (this) { // Install the answer under lock if (_done) { if(!ab.hasTCP()) return ackack(ab, _tasknum); // Ignore duplicate response packet ab.drainClose(); } else { // UDPTimeOutThread.PENDING.remove(_tasknum); _dt.read(ab); // Read the answer (under lock?) _size_rez = ab.size(); // Record received size ab.close(); // Also finish the read (under lock? even if canceled, since need to drain TCP) if (!isCancelled()) // Can be canceled already (locally by MRTask while recieving remote answer) _dt.onAck(); // One time only execute (before sending ACKACK) _done = true; // Only read one (of many) response packets ab._h2o.taskRemove(_tasknum); // Flag as task-completed, even if the result is null notifyAll(); // And notify in any case } if (!isCancelled()) // Can be canceled already doAllCompletions(); // Send all tasks needing completion to the work queues } } // AckAck back on a fresh AutoBuffer, since actually closed() the incoming one return new AutoBuffer(ab._h2o, H2O.ACK_ACK_PRIORITY).putTask(UDP.udp.ackack.ordinal(),_tasknum); } private void doAllCompletions() { final Throwable e = _dt.getDException(); // Also notify any and all pending completion-style tasks if( _fjtasks != null ) for( final H2OCountedCompleter task : _fjtasks ) { H2O.submitTask(new H2OCountedCompleter(task.priority()) { @Override public void compute2() { if (e != null) // re-throw exception on this side as if it happened locally task.completeExceptionally(e); else try { task.__tryComplete(_dt); } catch (Throwable e) { task.completeExceptionally(e); } } }); } } // --- public synchronized RPC<V> addCompleter( H2OCountedCompleter task ) { if( _fjtasks == null ) _fjtasks = new ArrayList(2); _fjtasks.add(task); return this; } // Assertion check that size is not changing between resends, // i.e., resends sent identical data. private boolean sz_check(AutoBuffer ab) { final int absize = ab.size(); if( _size == 0 ) { _size = absize; return true; } return _size==absize; } // Size of received results int size_rez() { return _size_rez; } // --- static final long RETRY_MS = 10000; // Initial UDP packet retry in msec // How long until we should do the "timeout" action? @Override public final long getDelay( TimeUnit unit ) { long delay = (_started+_retry)-System.currentTimeMillis(); return unit.convert( delay, TimeUnit.MILLISECONDS ); } // Needed for the DelayQueue API @Override public final int compareTo( Delayed t ) { RPC<?> dt = (RPC<?>)t; long nextTime = _started+_retry, dtNextTime = dt._started+dt._retry; return nextTime == dtNextTime ? 0 : (nextTime > dtNextTime ? 1 : -1); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/RestApiPingCheckThread.java
package water; import water.api.PingHandler; import water.util.Log; public class RestApiPingCheckThread extends Thread { public RestApiPingCheckThread() { super("RestApiPingCheckThread"); this.setDaemon(true); } @Override public void run() { while (!Thread.currentThread().isInterrupted()) { if (Paxos._cloudLocked) { if (H2O.SELF == H2O.CLOUD.leader()) { if (isTimeoutExceeded(PingHandler.lastAccessed, H2O.ARGS.rest_api_ping_timeout)) { Log.fatal("Stopping H2O cluster since we haven't received any REST api request on 3/Ping!"); H2O.shutdown(-1); } } else { // Cloud is locked, but we are not leader, we can stop the thread Thread.currentThread().interrupt(); continue; } } try { Thread.sleep(H2O.ARGS.rest_api_ping_timeout); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } } private static boolean isTimeoutExceeded(long lastHeardFrom, long timeout) { return (System.currentTimeMillis() - lastHeardFrom) >= timeout; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Scope.java
package water; import water.fvec.FileVec; import water.fvec.Frame; import water.fvec.Vec; import water.logging.Logger; import water.logging.LoggerFactory; import java.util.*; import java.util.stream.Collectors; /** A "scope" for tracking Key lifetimes; an experimental API. * * <p>A Scope defines a <em>SINGLE THREADED</em> local lifetime management context, * stored in Thread Local Storage. Scopes can be explicitly entered or exited. * User keys created by this thread are tracked, and deleted when the scope is * exited. Since enter &amp; exit are explicit, failure to exit means the Keys * leak (there is no reliable thread-on-exit cleanup action). You must call * <code>Scope.exit()</code> at some point. Only user keys &amp; Vec keys are tracked.</p> * * <p>Scopes support nesting. Scopes support partial cleanup: you can list Keys * you'd like to keep in the exit() call. These will be "bumped up" to the * higher nested scope - or escaped and become untracked at the top-level.</p> */ public class Scope { private static final Logger log = LoggerFactory.getLogger(Scope.class); // Thread-based Key lifetime tracking private static final ThreadLocal<Scope> _scope = new ThreadLocal<Scope>() { @Override protected Scope initialValue() { return new Scope(); } }; private final Stack<Level> _levels = new Stack<>(); /** debugging purpose */ public static Scope current() { return _scope.get(); } /** for testing purpose */ public static int nLevel() { Scope scope = current(); return scope._levels.size(); } /** for testing purpose */ public static void reset() { Scope scope = _scope.get(); scope._levels.clear(); } /** Enter a new Scope */ public static void enter() { Scope scope = _scope.get(); Level level = new Level(); Level outer = scope._levels.empty() ? null : scope._levels.peek(); if (outer != null) level._protectedKeys.addAll(outer._protectedKeys); //inherit protected keys from outer scope level scope._levels.push(level); } /** Exit the innermost Scope, remove all Keys created since the matching * enter call except for the listed Keys. * @return Returns the list of kept keys. */ public static Key[] exit(Key... keep) { Scope scope = _scope.get(); assert !(scope._levels.empty()): "Scope in inconsistent state: Scope.exit() called without a matching Scope.enter()"; final Set<Key> keepKeys = new HashSet<>(); if (keep != null) { for (Key k : keep) { if (k != null) keepKeys.add(k); } } final Level exitingLevel = scope._levels.pop(); keepKeys.addAll(exitingLevel._protectedKeys); Key[] arrkeep = keepKeys.toArray(new Key[0]); Arrays.sort(arrkeep); Futures fs = new Futures(); final Map<Integer, List<Key<Vec>>> bulkRemovals = new HashMap<>(); for (Key key : exitingLevel._keys) { boolean remove = arrkeep.length == 0 || Arrays.binarySearch(arrkeep, key) < 0; if (remove) { Value v = DKV.get(key); boolean cascade = !(v == null || v.isFrame()); //Frames are handled differently as we're explicitly also tracking their Vec keys... if (v != null && v.isVec() && exitingLevel._trackingInfo.containsKey(key)) { int nchunks = exitingLevel._trackingInfo.get(key)._nchunks; if (nchunks < 0) { Keyed.remove(key, fs, cascade); // don't bulk remove Vecs with unfilled _nchunks info. } else { if (!bulkRemovals.containsKey(nchunks)) bulkRemovals.put(nchunks, new ArrayList<>()); bulkRemovals.get(nchunks).add(key); } } else { Keyed.remove(key, fs, cascade); } } } for (Map.Entry<Integer, List<Key<Vec>>> bulkRemoval : bulkRemovals.entrySet()) { Vec.bulk_remove(bulkRemoval.getValue().toArray(new Key[0]), bulkRemoval.getKey()); } fs.blockForPending(); exitingLevel.clear(); return keep; } /** * @return true iff we are inside a scope */ public static boolean isActive() { return !_scope.get()._levels.empty(); } /** * get the current scope level in a context of modifying it, therefore requiring `Scope.enter()` to have been called first. * @return the current Scope.Level. */ private static Level lget() { Scope scope = _scope.get(); // assert !scope._levels.empty() : "Need to enter a Scope before modifying it."; // would be nice to be able to enable this assertion, unfortunately too much code (tests?) don't fulfill this requirement currently. return scope._levels.empty() ? null : scope._levels.peek(); } static void track_internal(Key k) { if (k.user_allowed() || !k.isVec()) return; // Not tracked Scope scope = _scope.get(); // Pay the price of T.L.S. lookup if (scope._levels.empty()) return; // track internal may currently be implicitly called when we're not inside a scope. track_impl(scope._levels.peek(), k); } public static <T extends Keyed<T>> T track_generic(T keyed) { if (keyed == null) return null; Level level = lget(); // Pay the price of T.L.S. lookup track_impl(level, keyed._key); return keyed; } /** * Track a single Vec. * @param vec * @return */ public static Vec track(Vec vec) { if (vec == null) return vec; Level level = lget(); // Pay the price of T.L.S. lookup if (level == null) return vec; track_impl(level, vec._key); if (!(vec instanceof FileVec)) { // don't provide nchunks for individually tracked FileVecs as it is mutable for those (alternative is to fully disable this for all individually tracked Vecs) final TrackingInfo vecInfo = new TrackingInfo(); vecInfo._nchunks = vec.nChunks(); level._trackingInfo.put(vec._key, vecInfo); } return vec; } /** * Track one or more {@link Frame}s, as well as all their Vecs independently. * The tracked frames and vecs will be removed from DKV when {@link Scope#exit(Key[])} is called, * but for {@link Frame}s, they will be removed without their Vecs as those are tracked independently, * and we want to be able to {@link #untrack(Key[])} them (or spare them at {@link #exit(Key[])} * without them being removed together with the {@link Frame} to which they're attached. * @param frames * @return the first Frame passed as param */ public static Frame track(Frame... frames) { if (frames.length == 0) return null; Level level = lget(); if (level == null) return frames[0]; for (Frame fr : frames) { if (fr == null) continue; track_impl(level, fr._key); final TrackingInfo vecInfo = new TrackingInfo(); vecInfo._source = Objects.toString(fr._key); for (Key<Vec> vkey : fr.keys()) { track_impl(level, vkey); if (vecInfo._nchunks < 0) { Vec vec = vkey.get(); if (vec != null) vecInfo._nchunks = vec.nChunks(); } if (vecInfo._nchunks > 0) level._trackingInfo.put(vkey, vecInfo); } } return frames[0]; } private static void track_impl(Level level, Key key) { if (key == null) return; if (level == null) return; level._keys.add(key); // Track key } /** * Untrack the specified keys. * Note that if a key corresponds to a {@Frame}, then only the frame key is untracked, not its vecs. * Use {@link #untrack(Frame...)} is you need a behaviour symmetrical to {@link #track(Frame...)}. * @param keys */ public static <K extends Key> void untrack(K... keys) { if (keys.length == 0) return; Level level = lget(); // Pay the price of T.L.S. lookup if (level == null) return; // should we allow calling `untrack` if we're not entered in a scope? (symmetry with `track` currently forces us to do so). Set<Key> xkeys = level._keys; for (Key key : keys) xkeys.remove(key); // Untrack key } /** * Untrack the specified keys. * Note that if a key corresponds to a {@Frame}, then only the frame key is untracked, not its vecs. * Use {@link #untrack(Frame...)} is you need a behaviour symmetrical to {@link #track(Frame...)}. * @param keys */ public static <K extends Key> void untrack(Iterable<K> keys) { Level level = lget(); // Pay the price of T.L.S. lookup if (level == null) return; Set<Key> xkeys = level._keys; for (Key key : keys) xkeys.remove(key); // Untrack key } /** * * @param frames * @return the first Frame passed as a param. */ public static Frame untrack(Frame... frames) { if (frames.length == 0) return null; Level level = lget(); // Pay the price of T.L.S. lookup if (level == null) return frames[0]; Set<Key> xkeys = level._keys; for (Frame fr : frames) { xkeys.remove(fr._key); xkeys.removeAll(Arrays.asList(fr.keys())); } return frames[0]; } /** * Protects the listed frames and their vecs inside this scope and inner scopes so that they can't be removed, * for example if an unprotected frame shares some Vecs. * @param frames * @return the first protected frame. */ public static Frame protect(Frame... frames) { if (frames.length == 0) return null; Level level = lget(); // Pay the price of T.L.S. lookup for (Frame fr : frames) { if (fr == null) continue; protect_impl(level, fr._key); for (Vec vec : fr.vecs()) protect_impl(level, vec._key); } return frames[0]; } private static void protect_impl(Level level, Key key) { if (key == null) return; if (level == null) return; level._protectedKeys.add(key); // track-protect key } /** * Enters a new scope and protects the passed frames in that scope. * To be used as a resource in a try block: the new "safe" scope will then be auto-exited. */ public static Safe safe(Frame... protectedFrames) { Safe scope = new Safe(); Scope.protect(protectedFrames); return scope; } public static class Safe implements AutoCloseable { private Safe() { Scope.enter(); } @Override public void close() { Scope.exit(); } } static class Level { final Set<Key> _keys; final Set<Key> _protectedKeys; final Map<Key, TrackingInfo> _trackingInfo; Level() { _keys = new HashSet<>(); _protectedKeys = new HashSet<>(); _trackingInfo = new HashMap<>(); } Level(Set<Key> keys, Set<Key> protectedKeys, Map<Key, TrackingInfo> trackingInfo) { _keys = keys; _protectedKeys = protectedKeys; _trackingInfo = trackingInfo; } void clear() { _keys.clear(); _protectedKeys.clear(); _trackingInfo.clear(); } } /** * for debugging or test purpose */ private static class ROLevel extends Level { public ROLevel(Level level) { super( Collections.unmodifiableSet(level._keys), Collections.unmodifiableSet(level._protectedKeys), Collections.unmodifiableMap(level._trackingInfo) ); } } static class TrackingInfo { int _nchunks = -1; String _source; } /** * @return a read-only view of scope levels */ List<Level> levels() { return Collections.unmodifiableList(_levels.stream().map(ROLevel::new).collect(Collectors.toList())); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ScopeInspect.java
package water; import water.fvec.Frame; import water.fvec.Vec; import java.util.*; import java.util.function.Predicate; /** * Some utility functions useful for debugging when trying to find origin of leaked keys. */ public class ScopeInspect { public static String toString(Scope scope) { return toString(scope, false, false, false, (k) -> true); } public static String toString(Scope scope, boolean hierarchy, boolean outOfScope, boolean subKeys, Predicate<Key> keyFilter) { StringBuilder sb = new StringBuilder(); Set<Key> scoped = new HashSet<>(); sb.append("Scope").append(hierarchy ? " hierarchy" : "").append("\n"); List<Scope.Level> levels = scope.levels(); for (int i=levels.size()-1; i >= 0; i--) { Set<Key> ks = new TreeSet<>(levels.get(i)._keys); Set<Key> pks = new TreeSet<>(levels.get(i)._protectedKeys); Map<Key, Scope.TrackingInfo> tracks = levels.get(i)._trackingInfo; scoped.addAll(ks); scoped.addAll(pks); if (hierarchy) indent(sb, 1).append("level ").append(i).append(": \n"); indent(sb, 2).append("tracking ").append(ks.size()).append(" keys:\n"); for (Key k : ks) { String desc = tracks.containsKey(k) ? tracks.get(k)._source : null; appendKey(sb, k, 3, desc, subKeys, keyFilter); } indent(sb, 2).append("protecting ").append(pks.size()).append(" keys:\n"); for (Key k : pks) { String desc = tracks.containsKey(k) ? tracks.get(k)._source : null; appendKey(sb, k, 3, desc, subKeys, keyFilter); } if (!hierarchy) break; } if (outOfScope) { Set<Key> unscoped = new TreeSet<>(new KeysCollector().doAllNodes().keys()); unscoped.removeAll(scoped); sb.append("Keys out of scope:\n"); for (Key k : unscoped) { appendKey(sb, k, 1, null, subKeys, keyFilter); } } return sb.toString(); } public static String keysToString(String header, Key... keys) { StringBuilder sb = new StringBuilder(header).append(":\n"); for (Key key : keys) { appendKey(sb, key, 1, null, true, (k) -> true); } return sb.toString(); } private static class KeysCollector extends MRTask<KeysCollector> { Key[] _collectedKeys; @Override protected void setupLocal() { _collectedKeys = H2O.localKeySet().toArray(new Key[0]); } @Override public void reduce(KeysCollector mrt) { Set<Key> ks = keys(); ks.addAll(mrt.keys()); _collectedKeys = ks.toArray(new Key[0]); } public Set<Key> keys() { return new HashSet<>(Arrays.asList(_collectedKeys)); } } private static StringBuilder indent(StringBuilder sb, int numIndent) { final int indent = 2; for (int i=0; i<numIndent*indent; i++) sb.append(" "); return sb; } private static StringBuilder appendKey(StringBuilder sb, Key key, int numIndent, String desc, boolean subKeys, Predicate<Key> keyFilter) { if (!keyFilter.test(key)) return sb; indent(sb, numIndent).append(key).append(" [").append(key.valueClass()).append(desc == null ? "" : ", "+desc).append("]").append("\n"); if (!subKeys) return sb; if (key.isVec()) { Vec v = DKV.getGet(key); if (v != null) { appendKey(sb, v.rollupStatsKey(), numIndent+1, "rollupstats", false, keyFilter); for (int i=0; i<v.nChunks(); i++) { appendKey(sb, v.chunkKey(i), numIndent+1, "chunk", false, keyFilter); } } } else if (key.isChunkKey()) { appendKey(sb, key.getVecKey(), numIndent+1, "from vec", false, keyFilter); } else { Value v = DKV.get(key); if (v != null && v.isFrame()) { Frame fr = v.get(); if (fr != null) { for (int i=0; i<fr.keys().length; i++) { Key<Vec> vk = fr.keys()[i]; appendKey(sb, vk, numIndent+1, "vec_"+i, true, keyFilter); } } } } return sb; } public static String dataKeysToString() { return toString(Scope.current(), true, true, true, (k) -> { boolean ok = k.isVec() || k.isChunkKey(); if (ok) return true; Value v = DKV.get(k); if (v != null) return v.isFrame(); return false; }); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/SplitToChunksApplyCombine.java
package water; import water.fvec.Frame; import water.fvec.OneChunkVec; import water.fvec.Vec; import water.util.fp.Function; import java.util.Arrays; import java.util.LinkedList; import java.util.List; public class SplitToChunksApplyCombine { public static Frame concatFrames(List<Frame> frms, Key<Frame> destinationKey) { Frame result = new Frame(destinationKey); long nRows = frms.stream().mapToLong(frame -> frame.numRows()).sum(); for (int i = 0; i < frms.get(0).numCols(); i++) { Vec v = Vec.makeZero(nRows); try (Vec.Writer vw = v.open()) { long cnt = 0; for (Frame fr : frms) { Vec.Reader vr = fr.vec(i).new Reader(); for (int k = 0; k < fr.numRows(); k++) { vw.set(cnt++, vr.at(k)); } } } result.add(frms.get(0)._names[i], v); } DKV.put(result); return result; } public static Frame createSubFrame(Frame fr, int cidx, String destinationKeyPrefix) { assert cidx >= 0 && fr.anyVec().nChunks() > cidx; Futures fs = new Futures(); Vec[] vecs = Arrays.stream(fr.vecs()).map(v -> OneChunkVec.make(v, cidx, fs)).toArray(Vec[]::new); fs.blockForPending(); return new Frame(Key.make(destinationKeyPrefix + "_oneChunkFrame_" + cidx), fr.names(), vecs); } public static Frame splitApplyCombine(Frame frameToSplit, Function<Frame, Frame> fun, Key<Frame> destinationKey) { try (Scope.Safe safe = Scope.safe(frameToSplit)) { List<Frame> resultSubFrames = new LinkedList<>(); int nChunks = frameToSplit.anyVec().nChunks(); for (int cidx = 0; cidx < nChunks; cidx++) { Frame subFrame = createSubFrame(frameToSplit, cidx, destinationKey.toString()); if (subFrame.numRows() == 0) continue; DKV.put(subFrame); resultSubFrames.add(Scope.track(fun.apply(subFrame))); } return Scope.untrack(concatFrames(resultSubFrames, destinationKey)); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TAtomic.java
package water; /** * A typed atomic update. */ public abstract class TAtomic<T extends Freezable> extends Atomic<TAtomic<T>> { public TAtomic(){} public TAtomic(H2O.H2OCountedCompleter completer){super(completer);} /** Atomically update an old value to a new one. * @param old The old value, it may be null. It is a defensive copy. * @return The new value; if null if this atomic update no longer needs to be run */ protected abstract T atomic(T old); @Override protected Value atomic(Value val) { @SuppressWarnings("unchecked") T old = val == null || val.isNull() ? null : (T)(val.getFreezable().clone()); T nnn = atomic(old); // Atomic operation changes the data, so it can not be performed over // values persisted on read-only data source as we would not be able to // write those changes back. assert val == null || val.onICE() || !val.isPersisted(); return nnn == null ? null : new Value(_key,nnn,val==null?Value.ICE:val.backend()); } @Override protected void onSuccess( Value old ) { onSuccess(old==null?null:(T)old.getFreezable()); } // Upcast the old value to T public void onSuccess( T old ) { } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TCPReceiverThread.java
package water; import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.ByteChannel; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.Date; import java.util.Random; import water.network.SocketChannelFactory; import water.util.Log; import water.util.SB; /** * The Thread that looks for TCP Cloud requests. * * This thread just spins on reading TCP requests from other Nodes. * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public class TCPReceiverThread extends Thread { private ServerSocketChannel SOCK; private SocketChannelFactory socketChannelFactory; /** * Byte representing TCP communication for small data */ static final byte TCP_SMALL = 1; /** * Byte representing TCP communication for big data */ static final byte TCP_BIG = 2; public TCPReceiverThread( ServerSocketChannel sock) { super("TCP-Accept"); ThreadHelper.initCommonThreadProperties(this); SOCK = sock; this.socketChannelFactory = H2O.SELF.getSocketFactory(); } // The Run Method. // Started by main() on a single thread, this code manages reading TCP requests @SuppressWarnings("resource") public void run() { Thread.currentThread().setPriority(Thread.MAX_PRIORITY); ServerSocketChannel errsock = null; boolean saw_error = false; while( true ) { ByteChannel wrappedSocket = null; try { // Cleanup from any prior socket failures. Rare unless we're really sick. if( errsock != null ) { // One time attempt a socket close final ServerSocketChannel tmp2 = errsock; errsock = null; tmp2.close(); // Could throw, but errsock cleared for next pass } if( saw_error ) Thread.sleep(100); // prevent deny-of-service endless socket-creates saw_error = false; // --- // More common-case setup of a ServerSocket if( SOCK == null ) { SOCK = ServerSocketChannel.open(); SOCK.socket().setReceiveBufferSize(AutoBuffer.BBP_BIG._size); SOCK.socket().bind(H2O.SELF._key); } // Block for TCP connection and setup to read from it. SocketChannel sock = SOCK.accept(); InetAddress inetAddress = sock.socket().getInetAddress(); ByteBuffer bb = ByteBuffer.allocate(6).order(ByteOrder.nativeOrder()); wrappedSocket = socketChannelFactory.serverChannel(sock); bb.limit(bb.capacity()); bb.position(0); while(bb.hasRemaining()) { // read first 8 bytes wrappedSocket.read(bb); } bb.flip(); int chanType = bb.get(); // 1 - small, 2 - big, 3 - external short timestamp = bb.getShort(); // read timestamp // Note: timestamp was not part of the original protocol, was added in 3.22.0.1, #a33de44) if (H2ONodeTimestamp.decodeIsClient(timestamp) && !H2O.ARGS.allow_clients) { ListenerService.getInstance().report("connection-failure", "clients-disabled"); throw new IllegalStateException("Client connection from " + inetAddress + " blocked. " + "This cloud has client connections disabled."); } int port = bb.getChar(); // read port int sentinel = (0xFF) & bb.get(); if(sentinel != 0xef) { ListenerService.getInstance().report("protocol-failure", "handshake"); String channelType = H2O.SELF.getSecurityManager().securityEnabled ? "SSL TCP" : "TCP"; throw new IOException("Communication protocol failure (source: '" + inetAddress + "'): " + "Missing EOM sentinel when opening new " + channelType + " channel."); } // todo compare against current cloud, refuse the con if no match switch( chanType ) { case TCP_SMALL: new SmallMessagesReaderThread(H2ONode.intern(inetAddress, port, timestamp), wrappedSocket).start(); break; case TCP_BIG: new TCPReaderThread(wrappedSocket, new AutoBuffer(wrappedSocket, inetAddress, timestamp), inetAddress, timestamp).start(); break; default: ListenerService.getInstance().report("protocol-failure", "channel-type", chanType); throw new IOException("Communication protocol failure: Unexpected channel type " + chanType + ", only know 1 - Small, 2 - Big"); } } catch( java.nio.channels.AsynchronousCloseException ex ) { break; // Socket closed for shutdown } catch( Exception e ) { if (wrappedSocket != null) { try { wrappedSocket.close(); } catch (Exception e2) { Log.trace(e2); } } // On any error from anybody, close all sockets & re-open Log.err("IO error on TCP port " + H2O.H2O_PORT + ": ", e); saw_error = true; errsock = SOCK ; SOCK = null; // Signal error recovery on the next loop } } } // A private thread for reading from this open socket. static class TCPReaderThread extends Thread { public ByteChannel _sock; public AutoBuffer _ab; private final InetAddress _address; private final short _timestamp; public TCPReaderThread(ByteChannel sock, AutoBuffer ab, InetAddress address, short timestamp) { super("TCP-"+ab._h2o+"-"+(ab._h2o._tcp_readers++)); ThreadHelper.initCommonThreadProperties(this); _sock = sock; _ab = ab; _address = address; _timestamp = timestamp; setPriority(MAX_PRIORITY-1); } public void run() { while( true ) { // Loop, reading fresh TCP requests until the sender closes try { // Record the last time we heard from any given Node _ab._h2o._last_heard_from = System.currentTimeMillis(); TimeLine.record_recv(_ab, true, 0); // Hand off the TCP connection to the proper handler int ctrl = _ab.getCtrl(); int x = ctrl; if( ctrl < 0 || ctrl >= UDP.udp.UDPS.length ) x = 0; switch( UDP.udp.UDPS[x] ) { case exec: RPC.remote_exec (_ab); break; case ack: RPC.tcp_ack (_ab); break; case timeline: TimeLine.tcp_call(_ab); break; default: throw new RuntimeException("Unknown TCP Type: " + ctrl+" "+_ab._h2o); } } catch( java.nio.channels.AsynchronousCloseException ex ) { break; // Socket closed for shutdown } catch( Throwable e ) { // On any error from anybody, close everything System.err.println("IO error"); Log.err("IO error on TCP port " + H2O.H2O_PORT + ": ", e); if (e instanceof Error && H2O.isCI()) { // could be AssertionError, OOM... throw H2O.fail("Encountered an error while running in CI - will terminate to prevent deadlocks", e); } break; } // Reuse open sockets for the next task try { if( !_sock.isOpen() ) break; _ab = new AutoBuffer(_sock, _address, _timestamp); } catch( Exception e ) { // Exceptions here are *normal*, this is an idle TCP connection and // either the OS can time it out, or the cloud might shutdown. We // don't care what happens to this socket. break; // Ignore all errors; silently die if socket is closed } } } } /** A private thread reading small messages from a tcp channel. The thread * reads the raw bytes of a message from the channel, copies them into a * byte array which is than passed on to FJQ. Each message is expected to * be MSG_SZ(2B) MSG BODY(MSG_SZ*B) EOM MARKER (1B - 0xef). */ static class SmallMessagesReaderThread extends Thread { private final ByteChannel _chan; private final ByteBuffer _bb; private final H2ONode _h2o; public SmallMessagesReaderThread(H2ONode h2o, ByteChannel chan) { super("TCP-SMALL-READ-" + h2o); ThreadHelper.initCommonThreadProperties(this); _h2o = h2o; _chan = chan; _bb = ByteBuffer.allocateDirect(AutoBuffer.BBP_BIG._size).order(ByteOrder.nativeOrder()); _bb.flip(); // Prep for reading; zero bytes available } public String printBytes(ByteBuffer bb, int start, int sz) { SB sb = new SB(); int idx = start + sz; try { for (int i = 5; i > 0; --i) sb.p("-").p(i).p(":").p(0xFF & bb.get(idx - i)).p(" "); sb.p("0: ").p(0xFF & bb.get(idx)).p(" "); for (int i = 1; i <= 5; ++i) sb.p("+").p(i).p(":").p(0xFF & bb.get(idx + i)).p(" "); } catch(Throwable t) {/*ignore, just a debug print*/} return sb.toString(); } // Read until there are at least N bytes in the ByteBuffer private ByteBuffer read(int n) throws IOException { if( _bb.remaining() < n ) { // Not enuf bytes between position and limit _bb.compact(); // move data down to 0, set position to remaining bytes while(_bb.position() < n) { int res = _chan.read(_bb); // Slide position forward (up to limit) if (res <= 0) throw new IOException("Didn't read any data: res=" + res); // no eof & progress made _h2o._last_heard_from = System.currentTimeMillis(); } _bb.flip(); // Limit to amount of data, position to 0 } return _bb; } @Override public void run() { assert !_bb.hasArray(); // Direct ByteBuffer only boolean idle = false; try { //noinspection InfiniteLoopStatement while (true) { idle = true; // OK to have remote suicide while idle; happens during normal shutdown int sz = read(2).getChar(); // 2 bytes of next-message-size idle = false; assert sz < AutoBuffer.BBP_SML._size : "Incoming message is too big, should've been sent by TCP-BIG, got " + sz + " bytes"; byte[] ary = MemoryManager.malloc1(Math.max(16,sz)); int sentinel = read(sz+1).get(ary,0,sz).get(); // extract the message bytes, then the sentinel byte assert (0xFF & sentinel) == 0xef : "Missing expected sentinel (0xef) at the end of the message from " + _h2o + ", likely out of sync, size = " + sz + ", position = " + _bb.position() +", bytes = " + printBytes(_bb, _bb.position(), sz); // package the raw bytes into an array and pass it on to FJQ for further processing basic_packet_handling(new AutoBuffer(_h2o, ary, 0, sz)); } } catch(Throwable t) { if( !idle || !(t instanceof IOException) ) { Log.err(t); } } finally { AutoBuffer.BBP_BIG.free(_bb); if(_chan != null && _chan.isOpen()) try { _chan.close();} catch (IOException e) {/*ignore error on close*/} } } } static private int _unknown_packets_per_sec = 0; static private long _unknown_packet_time = 0; static final Random RANDOM_UDP_DROP = new Random(); // Basic packet handling: // - Timeline record it static public void basic_packet_handling( AutoBuffer ab ) throws java.io.IOException { // Randomly drop 1/10th of the packets, as-if broken network. Dropped // packets are timeline recorded before dropping - and we still will // respond to timelines and suicide packets. int drop = H2O.ARGS.random_udp_drop && RANDOM_UDP_DROP.nextInt(5) == 0 ? 2 : 0; // Record the last time we heard from any given Node TimeLine.record_recv(ab, false, drop); final long now = ab._h2o._last_heard_from = System.currentTimeMillis(); // Snapshots are handled *IN THIS THREAD*, to prevent more UDP packets from // being handled during the dump. Also works for packets from outside the // Cloud... because we use Timelines to diagnose Paxos failures. int ctrl = ab.getCtrl(); ab.getPort(); // skip the port bytes if( ctrl == UDP.udp.timeline.ordinal() ) { UDP.udp.timeline._udp.call(ab); return; } // Suicide packet? Short-n-sweet... if( ctrl == UDP.udp.rebooted.ordinal()) UDPRebooted.checkForSuicide(ctrl, ab); // Drop the packet. if( drop != 0 ) return; // Get the Cloud we are operating under for this packet H2O cloud = H2O.CLOUD; // Check cloud membership; stale ex-members are "fail-stop" - we mostly // ignore packets from them (except paxos packets). boolean is_member = cloud.contains(ab._h2o); boolean is_client = ab._h2o.isClient(); // Some non-Paxos packet from a non-member. Probably should record & complain. // Filter unknown-packet-reports. In bad situations of poisoned Paxos // voting we can get a LOT of these packets/sec, flooding the logs. if( !(UDP.udp.UDPS[ctrl]._paxos || is_member || is_client) ) { _unknown_packets_per_sec++; long timediff = ab._h2o._last_heard_from - _unknown_packet_time; if( timediff > 1000 ) { // If this is a recently booted client node... coming up right after a // prior client was shutdown, it might see leftover trash UDP packets // from the servers intended for the prior client. if( !(H2O.ARGS.client && now-H2O.START_TIME_MILLIS.get() < HeartBeatThread.CLIENT_TIMEOUT) ) Log.warn("Received packets from outside the cloud: "+_unknown_packets_per_sec+"/sec, last one from "+ab._h2o+ " @ "+new Date()); _unknown_packets_per_sec = 0; _unknown_packet_time = ab._h2o._last_heard_from; } ab.close(); return; } // Paxos stateless packets & ACKs just fire immediately in a worker // thread. Dups are handled by these packet handlers directly. No // current membership check required for Paxos packets. // // Handle the case of packet flooding draining all the available // ByteBuffers and running the JVM out of *native* memory, triggering // either a large RSS (and having YARN kill us for being over-budget) or // simply tossing a OOM - but a out-of-native-memory nothing to do with // heap memory. // // All UDP packets at this stage have fairly short lifetimes - Exec packets // (which you might think to be unboundedly slow) are actually just going // through the deserialization call in RPC.remote_exec - and the deser'd // DTask gets tossed on a low priority queue to do "the real work". Since // this is coming from a UDP packet the deser work is actually small. H2O.submitTask(new FJPacket(ab,ctrl)); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TaskGetKey.java
package water; import water.nbhm.NonBlockingHashMap; /** * Get the given key from the remote node * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public class TaskGetKey extends DTask<TaskGetKey> { Key _key; // Set by client/sender JVM, cleared by server JVM Value _val; // Set by server JVM, read by client JVM transient Key _xkey; // Set by client, read by client transient H2ONode _h2o; // Set by server JVM, read by server JVM on ACKACK // Unify multiple Key/Value fetches for the same Key from the same Node at // the "same time". Large key fetches are slow, and we'll get multiple // requests close in time. Batch them up. private static final NonBlockingHashMap<Key,RPC<TaskGetKey>> TGKS = new NonBlockingHashMap(); // Get a value from a named remote node static Value get( H2ONode target, Key key ) { return get(start(target,key)); } static Value get(RPC<TaskGetKey> rpc) { return rpc.get()._val; // Block for it } // Start an RPC to fetch a Value, handling short-cutting dup-fetches static RPC<TaskGetKey> start( H2ONode target, Key key ) { // Do we have an old TaskGetKey in-progress? RPC<TaskGetKey> old = TGKS.get(key); if( old != null ) return old; // Make a new TGK. RPC<TaskGetKey> rpc = new RPC(target,new TaskGetKey(key),1.0f); if( (old=TGKS.putIfMatchUnlocked(key,rpc,null)) != null ) return old; // Failed because an old exists rpc.setTaskNum().call(); // Start the op return rpc; // Successful install of a fresh RPC } private TaskGetKey( Key key ) { super(H2O.GET_KEY_PRIORITY); _key = _xkey = key; } // Top-level non-recursive invoke @Override public void dinvoke( H2ONode sender ) { _h2o = sender; Key k = _key; _key = null; // Not part of the return result assert k.home(); // Gets are always from home (less we do replication) // Shipping a result? Track replicas so we can invalidate. There's a // narrow race on a moving K/V mapping tracking this Value just as it gets // deleted - in which case, simply retry for another Value. do _val = Value.STORE_get(k); // The return result while( _val != null && !_val.setReplica(sender) ); tryComplete(); } @Override public void compute2() { throw H2O.fail(); } // Received an ACK; executes on the node asking&receiving the Value @Override public void onAck() { if( _val != null ) { // Set transient fields after deserializing assert !_xkey.home() && _val._key == null; _val._key = _xkey; } // Now update the local store, caching the result. // We only started down the TGK path because we missed locally, so we only // expect to find a NULL in the local store. If somebody else installed // another value (e.g. a racing TGK, or racing local Put) this value must // be more recent than our NULL - but is UNORDERED relative to the Value // returned from the Home. We'll take the local Value to preserve ordering // and rely on invalidates from Home to force refreshes as needed. // Hence we can do a blind putIfMatch here over a null or empty Value // If it fails, what is there is also the TGK result. Value old = H2O.STORE.get(_xkey); if( old != null && !old.isEmpty() ) old=null; Value res = H2O.putIfMatch(_xkey,_val,old); if( res != old ) _val = res; TGKS.remove(_xkey); // Clear from dup cache } // Received an ACKACK; executes on the node sending the Value @Override public void onAckAck() { if( _val != null ) _val.lowerActiveGetCount(_h2o); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TaskInvalidateKey.java
package water; /** Invalidate cached value on remote. */ class TaskInvalidateKey extends TaskPutKey { private final transient Value _newval; private TaskInvalidateKey(Key key, Value newval){super(key); _newval=newval;} static void invalidate( H2ONode h2o, Key key, Value newval, Futures fs ) { assert newval._key != null && key.home(); // Prevent the new Value from being overwritten by Yet Another PUT by // read-locking it. It's safe to read, but not to over-write, until this // invalidate completes on the *prior* value. newval.read_lock();// block further writes until all invalidates complete fs.add(RPC.call(h2o,new TaskInvalidateKey(key,newval))); } // Lower read-lock, possibly enabling pending writes to start @Override public void onAck() { _newval.lowerActiveGetCount(null); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TaskPutKey.java
package water; /** Push the given key to the remote node * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public class TaskPutKey extends DTask<TaskPutKey> { Key _key; Value _val; boolean _dontCache; // delete cached value on the sender's side? transient Value _xval; transient Key _xkey; static void put( H2ONode h2o, Key key, Value val, Futures fs, boolean dontCache) { fs.add(RPC.call(h2o,new TaskPutKey(key,val,dontCache))); } protected TaskPutKey( Key key, Value val ) { this(key,val,false);} protected TaskPutKey( Key key, Value val, boolean removeCache ) { super(H2O.PUT_KEY_PRIORITY); _xkey = _key = key; _xval = _val = val; _dontCache = removeCache;} protected TaskPutKey( Key key ) { super(H2O.INVALIDATE_PRIORITY); _xkey = _key = key; _xval = _val = null; _dontCache = false;} @Override public void dinvoke( H2ONode sender ) { assert _key.home() || _val==null; // Only PUT to home for keys, or remote invalidation from home Paxos.lockCloud(_key); // Initialize Value for having a single known replica (the sender) if( _val != null ) _val.initReplicaHome(sender,_key); else if( _key.home() ) _val = Value.makeNull(_key); // Spin, until we update something. Value old = H2O.STORE.get(_key); // Raw-get: do not lazy-manifest if overwriting while( H2O.putIfMatch(_key,_val,old) != old ) old = H2O.STORE.get(_key); // Repeat until we update something. // Invalidate remote caches. Block, so that all invalidates are done // before we return to the remote caller. This is conservative, but // otherwise we have to send the invalidate-completion message to the // remote caller; i.e. the caller would have to handle a 2-step Put // completion ("I started your Put request" and "I completed your Put // request"). if( _key.home() ) { if( old != null ) old.lockAndInvalidate(sender,_val,new Futures()).blockForPending(); else _val.lowerActiveGetCount(null); // Remove initial read-lock, accounting for pending inv counts } // No return result _key = null; _val = null; tryComplete(); } @Override public void compute2() { throw H2O.fail(); } // Received an ACK @Override public void onAck() { // remove local cache but NOT in case it is already on disk // (ie memory can be reclaimed and we assume we have plenty of disk space) if( _dontCache && !_xval.isPersisted() ) H2O.putIfMatch(_xkey, null, _xval); if( _xval != null ) _xval.completeRemotePut(); } }