index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Metadata.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Metadata {
/** Return the REST API endpoint metadata, including documentation, for the endpoint specified by number. */
@GET("/3/Metadata/endpoints/(?<num>[0-9]+)")
Call<MetadataV3> fetchRoute(@Path("num") int num);
/** Return the REST API endpoint metadata, including documentation, for the endpoint specified by path. */
@GET("/3/Metadata/endpoints/{path}")
Call<MetadataV3> fetchRoute(@Path("path") String path);
/** Return a list of all the REST API endpoints. */
@GET("/3/Metadata/endpoints")
Call<MetadataV3> listRoutes();
/** Return the REST API schema metadata for specified schema class. */
@GET("/3/Metadata/schemaclasses/{classname}")
Call<MetadataV3> fetchSchemaMetadataByClass(@Path("classname") String classname);
/** Return the REST API schema metadata for specified schema. */
@GET("/3/Metadata/schemas/{schemaname}")
Call<MetadataV3> fetchSchemaMetadata(@Path("schemaname") String schemaname);
/** Return list of all REST API schemas. */
@GET("/3/Metadata/schemas")
Call<MetadataV3> listSchemas();
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/MissingInserter.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface MissingInserter {
/** Insert missing values. */
@FormUrlEncoded
@POST("/3/MissingInserter")
Call<JobV3> run(@Field("dataset") String dataset,
@Field("fraction") double fraction,
@Field("seed") long seed,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/ModelBuilders.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface ModelBuilders {
/** Return a new unique model_id for the specified algorithm. */
@FormUrlEncoded
@POST("/3/ModelBuilders/{algo}/model_id")
Call<ModelIdV3> calcModelId(@Path("algo") String algo,
@Field("_exclude_fields") String _exclude_fields);
/** Return the Model Builder metadata for the specified algorithm. */
@GET("/3/ModelBuilders/{algo}")
Call<ModelBuildersV3> fetch(@Path("algo") String algo);
/** Return the Model Builder metadata for all available algorithms. */
@GET("/3/ModelBuilders")
Call<ModelBuildersV3> list();
/** Train a DeepLearning model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/deeplearning")
Call<ModelBuilderSchema> train_deeplearning(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of DeepLearning model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/deeplearning/parameters")
Call<ModelBuilderSchema> validate_parameters_deeplearning(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a GLM model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/glm")
Call<ModelBuilderSchema> train_glm(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of GLM model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/glm/parameters")
Call<ModelBuilderSchema> validate_parameters_glm(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a GLRM model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/glrm")
Call<ModelBuilderSchema> train_glrm(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of GLRM model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/glrm/parameters")
Call<ModelBuilderSchema> validate_parameters_glrm(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a KMeans model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/kmeans")
Call<ModelBuilderSchema> train_kmeans(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of KMeans model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/kmeans/parameters")
Call<ModelBuilderSchema> validate_parameters_kmeans(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a NaiveBayes model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/naivebayes")
Call<ModelBuilderSchema> train_naivebayes(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of NaiveBayes model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/naivebayes/parameters")
Call<ModelBuilderSchema> validate_parameters_naivebayes(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a PCA model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/pca")
Call<ModelBuilderSchema> train_pca(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of PCA model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/pca/parameters")
Call<ModelBuilderSchema> validate_parameters_pca(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a SVD model. */
@FormUrlEncoded
@POST("/99/ModelBuilders/svd")
Call<ModelBuilderSchema> train_svd(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of SVD model builder parameters. */
@FormUrlEncoded
@POST("/99/ModelBuilders/svd/parameters")
Call<ModelBuilderSchema> validate_parameters_svd(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a DRF model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/drf")
Call<ModelBuilderSchema> train_drf(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of DRF model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/drf/parameters")
Call<ModelBuilderSchema> validate_parameters_drf(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Train a GBM model. */
@FormUrlEncoded
@POST("/3/ModelBuilders/gbm")
Call<ModelBuilderSchema> train_gbm(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
/** Validate a set of GBM model builder parameters. */
@FormUrlEncoded
@POST("/3/ModelBuilders/gbm/parameters")
Call<ModelBuilderSchema> validate_parameters_gbm(@Field("parameters") ModelParametersSchema parameters,
@Field("__http_status") int __http_status,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/ModelMetrics.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface ModelMetrics {
/** Return the saved scoring metrics for the specified Model and Frame. */
@GET("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> fetch(@Path("model") String model,
@Path("frame") String frame);
/** Return the saved scoring metrics for the specified Model and Frame. */
@DELETE("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> delete(@Path("model") String model,
@Path("frame") String frame);
/** Return the saved scoring metrics for the specified Model. */
@GET("/3/ModelMetrics/models/{model}")
Call<ModelMetricsListSchemaV3> fetch(@Path("model") String model);
/** Return all the saved scoring metrics. */
@GET("/3/ModelMetrics")
Call<ModelMetricsListSchemaV3> fetch();
/** Return the scoring metrics for the specified Frame with the specified Model. If the Frame has already been scored with the Model then cached results will be returned; otherwise predictions for all rows in the Frame will be generated and the metrics will be returned. */
@FormUrlEncoded
@POST("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> score(@Path("model") String model,
@Path("frame") String frame,
@Field("predictions_frame") String predictions_frame,
@Field("reconstruction_error") boolean reconstruction_error,
@Field("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Field("deep_features_hidden_layer") int deep_features_hidden_layer,
@Field("reconstruct_train") boolean reconstruct_train,
@Field("project_archetypes") boolean project_archetypes,
@Field("reverse_transform") boolean reverse_transform,
@Field("leaf_node_assignment") boolean leaf_node_assignment,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Models.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Models {
/** Return the specified Model from the H2O distributed K/V store, optionally with the list of compatible Frames. */
@GET("/3/Models/{model_id}")
Call<ModelsV3> fetch(@Path("model_id") String model_id);
/** Return all Models from the H2O distributed K/V store. */
@GET("/3/Models")
Call<ModelsV3> list();
/** Delete the specified Model from the H2O distributed K/V store. */
@DELETE("/3/Models/{model_id}")
Call<ModelsV3> delete(@Path("model_id") String model_id);
/** Delete all Models from the H2O distributed K/V store. */
@DELETE("/3/Models")
Call<ModelsV3> deleteAll();
/** Return potentially abridged model suitable for viewing in a browser (currently only used for java model code). */
@GET("/3/Models.java/{model_id}/preview")
Call<StreamingSchema> fetchPreview(@Path("model_id") String model_id);
/** Return the stream containing model implementation in Java code. */
@GET("/3/Models.java/{model_id}")
Call<StreamingSchema> fetchJavaCode(@Path("model_id") String model_id);
/** Import given binary model into H2O. */
@FormUrlEncoded
@POST("/99/Models.bin/{model_id}")
Call<ModelsV3> importModel(@Path("model_id") String model_id,
@Field("dir") String dir,
@Field("force") boolean force,
@Field("_exclude_fields") String _exclude_fields);
/** Export given model. */
@GET("/99/Models.bin/{model_id}")
Call<ModelExportV3> exportModel(@Path("model_id") String model_id);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/NetworkTest.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface NetworkTest {
/** Run a network test to measure the performance of the cluster interconnect. */
@GET("/3/NetworkTest")
Call<NetworkTestV3> fetch();
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/NodePersistentStorage.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface NodePersistentStorage {
/** Return true or false. */
@GET("/3/NodePersistentStorage/categories/{category}/names/{name}/exists")
Call<NodePersistentStorageV3> exists(@Path("category") String category,
@Path("name") String name);
/** Return true or false. */
@GET("/3/NodePersistentStorage/categories/{category}/exists")
Call<NodePersistentStorageV3> exists(@Path("category") String category);
/** Return true or false. */
@GET("/3/NodePersistentStorage/configured")
Call<NodePersistentStorageV3> configured();
/** Store a named value. */
@FormUrlEncoded
@POST("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> put_with_name(@Path("category") String category,
@Path("name") String name,
@Field("value") String value,
@Field("_exclude_fields") String _exclude_fields);
/** Return value for a given name. */
@GET("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> get_as_string(@Path("category") String category,
@Path("name") String name);
/** Delete a key. */
@DELETE("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> delete(@Path("category") String category,
@Path("name") String name);
/** Store a value. */
@FormUrlEncoded
@POST("/3/NodePersistentStorage/{category}")
Call<NodePersistentStorageV3> put(@Path("category") String category,
@Field("name") String name,
@Field("value") String value,
@Field("_exclude_fields") String _exclude_fields);
/** Return all keys stored for a given category. */
@GET("/3/NodePersistentStorage/{category}")
Call<NodePersistentStorageV3> list(@Path("category") String category);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Parse.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Parse {
/** Parse a raw byte-oriented Frame into a useful columnar data Frame. */
@FormUrlEncoded
@POST("/3/Parse")
Call<ParseV3> parse(@Field("destination_frame") String destination_frame,
@Field("source_frames") FrameKeyV3[] source_frames,
@Field("parse_type") ParserType parse_type,
@Field("separator") byte separator,
@Field("single_quotes") boolean single_quotes,
@Field("check_header") int check_header,
@Field("number_columns") int number_columns,
@Field("column_names") String[] column_names,
@Field("column_types") String[] column_types,
@Field("domains") String[][] domains,
@Field("na_strings") String[][] na_strings,
@Field("chunk_size") int chunk_size,
@Field("delete_on_done") boolean delete_on_done,
@Field("blocking") boolean blocking,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/ParseSetup.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface ParseSetup {
/** Guess the parameters for parsing raw byte-oriented data into an H2O Frame. */
@FormUrlEncoded
@POST("/3/ParseSetup")
Call<ParseSetupV3> guessSetup(@Field("source_frames") FrameKeyV3[] source_frames,
@Field("parse_type") ParserType parse_type,
@Field("separator") byte separator,
@Field("single_quotes") boolean single_quotes,
@Field("check_header") int check_header,
@Field("column_names") String[] column_names,
@Field("column_types") String[] column_types,
@Field("na_strings") String[][] na_strings,
@Field("column_name_filter") String column_name_filter,
@Field("column_offset") int column_offset,
@Field("column_count") int column_count,
@Field("total_filtered_column_count") int total_filtered_column_count,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Predictions.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Predictions {
/** Score (generate predictions) for the specified Frame with the specified Model. Both the Frame of predictions and the metrics will be returned. */
@FormUrlEncoded
@POST("/3/Predictions/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> predict(@Path("model") String model,
@Path("frame") String frame,
@Field("predictions_frame") String predictions_frame,
@Field("reconstruction_error") boolean reconstruction_error,
@Field("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Field("deep_features_hidden_layer") int deep_features_hidden_layer,
@Field("reconstruct_train") boolean reconstruct_train,
@Field("project_archetypes") boolean project_archetypes,
@Field("reverse_transform") boolean reverse_transform,
@Field("leaf_node_assignment") boolean leaf_node_assignment,
@Field("_exclude_fields") String _exclude_fields);
/** Score (generate predictions) for the specified Frame with the specified Model. Both the Frame of predictions and the metrics will be returned. */
@FormUrlEncoded
@POST("/4/Predictions/models/{model}/frames/{frame}")
Call<JobV3> predict2(@Path("model") String model,
@Path("frame") String frame,
@Field("predictions_frame") String predictions_frame,
@Field("reconstruction_error") boolean reconstruction_error,
@Field("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Field("deep_features_hidden_layer") int deep_features_hidden_layer,
@Field("reconstruct_train") boolean reconstruct_train,
@Field("project_archetypes") boolean project_archetypes,
@Field("reverse_transform") boolean reverse_transform,
@Field("leaf_node_assignment") boolean leaf_node_assignment,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Profiler.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Profiler {
/** Report real-time profiling information for all nodes (sorted, aggregated stack traces). */
@GET("/3/Profiler")
Call<ProfilerV3> fetch();
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Rapids.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Rapids {
/** Execute an Rapids AST. */
@FormUrlEncoded
@POST("/99/Rapids")
Call<RapidsSchema> exec(@Field("ast") String ast,
@Field("id") String id,
@Field("session_id") String session_id);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Sample.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Sample {
/** Example of an experimental endpoint. Call via /EXPERIMENTAL/Sample. Experimental endpoints can change at any moment. */
@GET("/99/Sample")
Call<CloudV3> status();
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Shutdown.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Shutdown {
/** Shut down the cluster */
@FormUrlEncoded
@POST("/3/Shutdown")
Call<ShutdownV3> shutdown(@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/SplitFrame.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface SplitFrame {
/** Split a H2O Frame. */
@FormUrlEncoded
@POST("/3/SplitFrame")
Call<SplitFrameV3> run(@Field("key") String key,
@Field("dataset") String dataset,
@Field("ratios") double[] ratios,
@Field("destination_frames") FrameKeyV3[] destination_frames,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Tabulate.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Tabulate {
/** Tabulate one column vs another. */
@FormUrlEncoded
@POST("/99/Tabulate")
Call<TabulateV3> run(@Field("dataset") String dataset,
@Field("predictor") ColSpecifierV3 predictor,
@Field("response") ColSpecifierV3 response,
@Field("weight") ColSpecifierV3 weight,
@Field("nbins_predictor") int nbins_predictor,
@Field("nbins_response") int nbins_response,
@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Timeline.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Timeline {
/** Debugging tool that provides information on current communication between nodes. */
@GET("/3/Timeline")
Call<TimelineV3> fetch();
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/Typeahead.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Typeahead {
/** Typehead hander for filename completion. */
@GET("/3/Typeahead/files")
Call<TypeaheadV3> files();
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/UnlockKeys.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface UnlockKeys {
/** Unlock all keys in the H2O distributed K/V store, to attempt to recover from a crash. */
@FormUrlEncoded
@POST("/3/UnlockKeys")
Call<UnlockKeysV3> unlock(@Field("_exclude_fields") String _exclude_fields);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/WaterMeterCpuTicks.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface WaterMeterCpuTicks {
/** Return a CPU usage snapshot of all cores of all nodes in the H2O cluster. */
@GET("/3/WaterMeterCpuTicks/{nodeidx}")
Call<WaterMeterCpuTicksV3> fetch(@Path("nodeidx") int nodeidx);
}
|
0
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies
|
java-sources/ai/h2o/h2o-java-rest-bindings/3.8.2.11/water/bindings/proxies/retrofit/WaterMeterIo.java
|
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface WaterMeterIo {
/** Return IO usage snapshot of all nodes in the H2O cluster. */
@GET("/3/WaterMeterIo/{nodeidx}")
Call<WaterMeterIoV3> fetch(@Path("nodeidx") int nodeidx);
/** Return IO usage snapshot of all nodes in the H2O cluster. */
@GET("/3/WaterMeterIo")
Call<WaterMeterIoV3> fetch_all();
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/ai/h2o/org/eclipse/jetty/jaas
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/ai/h2o/org/eclipse/jetty/jaas/api/LdapLoginModule.java
|
package ai.h2o.org.eclipse.jetty.jaas.api;
/**
* LdapLoginModule is relocated in Sparkling Water to the package ai.h2o.org.eclipse.jetty.jaas.spi
* of Jetty 9. External backend workers on Hadoop 2 utilize Jetty 8 and thus the module
* org.eclipse.jetty.plus.jaas.spi. This class enables to use only one package name for both cases.
*/
public class LdapLoginModule extends org.eclipse.jetty.plus.jaas.spi.LdapLoginModule { /* empty */ }
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/ai/h2o/org/eclipse/jetty/plus/jaas
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/ai/h2o/org/eclipse/jetty/plus/jaas/spi/LdapLoginModule.java
|
package ai.h2o.org.eclipse.jetty.plus.jaas.spi;
/**
* LdapLoginModule is relocated in Sparkling Water to the package ai.h2o.org.eclipse.jetty.jaas.spi
* of Jetty 9. External backend workers on Hadoop 2 utilize Jetty 8 and thus the module
* org.eclipse.jetty.plus.jaas.spi. This class enables to use only one package name for both cases.
*/
public class LdapLoginModule extends org.eclipse.jetty.plus.jaas.spi.LdapLoginModule { /* empty */ }
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/ai/h2o/org/eclipse/jetty/security
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/ai/h2o/org/eclipse/jetty/security/authentication/SpnegoAuthenticator.java
|
//
// ========================================================================
// Copyright (c) 1995-2019 Mort Bay Consulting Pty. Ltd.
// ------------------------------------------------------------------------
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// and Apache License v2.0 which accompanies this distribution.
//
// The Eclipse Public License is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// The Apache License v2.0 is available at
// http://www.opensource.org/licenses/apache2.0.php
//
// You may elect to redistribute this code under either of these licenses.
// ========================================================================
//
package ai.h2o.org.eclipse.jetty.security.authentication;
import java.io.IOException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.eclipse.jetty.http.HttpHeaders;
import org.eclipse.jetty.security.ServerAuthException;
import org.eclipse.jetty.security.UserAuthentication;
import org.eclipse.jetty.security.authentication.DeferredAuthentication;
import org.eclipse.jetty.security.authentication.LoginAuthenticator;
import org.eclipse.jetty.server.Authentication;
import org.eclipse.jetty.server.Authentication.User;
import org.eclipse.jetty.server.UserIdentity;
import org.eclipse.jetty.util.log.Log;
import org.eclipse.jetty.util.log.Logger;
import org.eclipse.jetty.util.security.Constraint;
public class SpnegoAuthenticator extends LoginAuthenticator
{
private static final Logger LOG = Log.getLogger(SpnegoAuthenticator.class);
private String _authMethod = Constraint.__SPNEGO_AUTH;
public SpnegoAuthenticator()
{
}
/**
* Allow for a custom authMethod value to be set for instances where SPNEGO may not be appropriate
*
* @param authMethod the auth method
*/
public SpnegoAuthenticator(String authMethod)
{
_authMethod = authMethod;
}
@Override
public String getAuthMethod()
{
return _authMethod;
}
@Override
public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException
{
HttpServletRequest req = (HttpServletRequest)request;
HttpServletResponse res = (HttpServletResponse)response;
String header = req.getHeader(HttpHeaders.AUTHORIZATION);
String authScheme = getAuthSchemeFromHeader(header);
if (!mandatory)
{
return new DeferredAuthentication(this);
}
// The client has responded to the challenge we sent previously
if (header != null && isAuthSchemeNegotiate(authScheme))
{
String spnegoToken = header.substring(10);
UserIdentity user = login(null, spnegoToken, request);
if (user != null)
{
return new UserAuthentication(getAuthMethod(), user);
}
}
// A challenge should be sent if any of the following cases are true:
// 1. There was no Authorization header provided
// 2. There was an Authorization header for a type other than Negotiate
try
{
if (DeferredAuthentication.isDeferred(res))
{
return Authentication.UNAUTHENTICATED;
}
LOG.debug("Sending challenge");
res.setHeader(HttpHeaders.WWW_AUTHENTICATE, HttpHeaders.NEGOTIATE);
res.sendError(HttpServletResponse.SC_UNAUTHORIZED);
return Authentication.SEND_CONTINUE;
}
catch (IOException ioe)
{
throw new ServerAuthException(ioe);
}
}
/**
* Extracts the auth_scheme from the HTTP Authorization header, {@code Authorization: <auth_scheme> <token>}.
*
* @param header The HTTP Authorization header or null.
* @return The parsed auth scheme from the header, or the empty string.
*/
String getAuthSchemeFromHeader(String header)
{
// No header provided, return the empty string
if (header == null || header.isEmpty())
{
return "";
}
// Trim any leading whitespace
String trimmedHeader = header.trim();
// Find the first space, all characters prior should be the auth_scheme
int index = trimmedHeader.indexOf(' ');
if (index > 0)
{
return trimmedHeader.substring(0, index);
}
// If we don't find a space, this is likely malformed, just return the entire value
return trimmedHeader;
}
/**
* Determines if provided auth scheme text from the Authorization header is case-insensitively
* equal to {@code negotiate}.
*
* @param authScheme The auth scheme component of the Authorization header
* @return True if the auth scheme component is case-insensitively equal to {@code negotiate}, False otherwise.
*/
boolean isAuthSchemeNegotiate(String authScheme)
{
if (authScheme == null || authScheme.length() != HttpHeaders.NEGOTIATE.length())
{
return false;
}
// Headers should be treated case-insensitively, so we have to jump through some extra hoops.
return authScheme.equalsIgnoreCase(HttpHeaders.NEGOTIATE);
}
@Override
public boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, User validatedUser) throws ServerAuthException
{
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/org/eclipse/jetty
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/org/eclipse/jetty/server/Response.java
|
//
// ========================================================================
// Copyright (c) 1995-2016 Mort Bay Consulting Pty. Ltd.
// ------------------------------------------------------------------------
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// and Apache License v2.0 which accompanies this distribution.
//
// The Eclipse Public License is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// The Apache License v2.0 is available at
// http://www.opensource.org/licenses/apache2.0.php
//
// You may elect to redistribute this code under either of these licenses.
// ========================================================================
//
package org.eclipse.jetty.server;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Collection;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Locale;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import org.eclipse.jetty.http.HttpCookie;
import org.eclipse.jetty.http.HttpFields;
import org.eclipse.jetty.http.HttpGenerator;
import org.eclipse.jetty.http.HttpHeaderValues;
import org.eclipse.jetty.http.HttpHeaders;
import org.eclipse.jetty.http.HttpSchemes;
import org.eclipse.jetty.http.HttpStatus;
import org.eclipse.jetty.http.HttpURI;
import org.eclipse.jetty.http.HttpVersions;
import org.eclipse.jetty.http.MimeTypes;
import org.eclipse.jetty.io.BufferCache.CachedBuffer;
import org.eclipse.jetty.server.handler.ContextHandler;
import org.eclipse.jetty.server.handler.ErrorHandler;
import org.eclipse.jetty.util.ByteArrayISO8859Writer;
import org.eclipse.jetty.util.QuotedStringTokenizer;
import org.eclipse.jetty.util.StringUtil;
import org.eclipse.jetty.util.URIUtil;
import org.eclipse.jetty.util.log.Log;
import org.eclipse.jetty.util.log.Logger;
/** Response.
* <p>
* Implements {@link javax.servlet.http.HttpServletResponse} from the <code>javax.servlet.http</code> package.
* </p>
*/
public class Response implements HttpServletResponse
{
private static final Logger LOG = Log.getLogger(Response.class);
private final static int __MIN_BUFFER_SIZE = 1;
public static final int
NONE=0,
STREAM=1,
WRITER=2;
/**
* If a header name starts with this string, the header (stripped of the prefix)
* can be set during include using only {@link #setHeader(String, String)} or
* {@link #addHeader(String, String)}.
*/
public final static String SET_INCLUDE_HEADER_PREFIX = "org.eclipse.jetty.server.include.";
/**
* If this string is found within the comment of a cookie added with {@link #addCookie(Cookie)}, then the cookie
* will be set as HTTP ONLY.
*/
public final static String HTTP_ONLY_COMMENT="__HTTP_ONLY__";
/* ------------------------------------------------------------ */
public static Response getResponse(HttpServletResponse response)
{
if (response instanceof Response)
return (Response)response;
return AbstractHttpConnection.getCurrentConnection().getResponse();
}
private final AbstractHttpConnection _connection;
private int _status=SC_OK;
private String _reason;
private Locale _locale;
private String _mimeType;
private CachedBuffer _cachedMimeType;
private String _characterEncoding;
private boolean _explicitEncoding;
private String _contentType;
private volatile int _outputState;
private PrintWriter _writer;
/* ------------------------------------------------------------ */
/**
*
*/
public Response(AbstractHttpConnection connection)
{
_connection=connection;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#reset()
*/
protected void recycle()
{
_status=SC_OK;
_reason=null;
_locale=null;
_mimeType=null;
_cachedMimeType=null;
_characterEncoding=null;
_explicitEncoding=false;
_contentType=null;
_writer=null;
_outputState=NONE;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#addCookie(javax.servlet.http.Cookie)
*/
public void addCookie(HttpCookie cookie)
{
_connection.getResponseFields().addSetCookie(cookie);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#addCookie(javax.servlet.http.Cookie)
*/
public void addCookie(Cookie cookie)
{
String comment=cookie.getComment();
boolean http_only=false;
if (comment!=null)
{
int i=comment.indexOf(HTTP_ONLY_COMMENT);
if (i>=0)
{
http_only=true;
comment=comment.replace(HTTP_ONLY_COMMENT,"").trim();
if (comment.length()==0)
comment=null;
}
}
_connection.getResponseFields().addSetCookie(cookie.getName(),
cookie.getValue(),
cookie.getDomain(),
cookie.getPath(),
cookie.getMaxAge(),
comment,
cookie.getSecure(),
http_only || cookie.isHttpOnly(),
cookie.getVersion());
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#containsHeader(java.lang.String)
*/
public boolean containsHeader(String name)
{
return _connection.getResponseFields().containsKey(name);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#encodeURL(java.lang.String)
*/
public String encodeURL(String url)
{
final Request request=_connection.getRequest();
SessionManager sessionManager = request.getSessionManager();
if (sessionManager==null)
return url;
HttpURI uri = null;
if (sessionManager.isCheckingRemoteSessionIdEncoding() && URIUtil.hasScheme(url))
{
uri = new HttpURI(url);
String path = uri.getPath();
path = (path == null?"":path);
int port=uri.getPort();
if (port<0)
port = HttpSchemes.HTTPS.equalsIgnoreCase(uri.getScheme())?443:80;
if (!request.getServerName().equalsIgnoreCase(uri.getHost()) ||
request.getServerPort()!=port ||
!path.startsWith(request.getContextPath())) //TODO the root context path is "", with which every non null string starts
return url;
}
String sessionURLPrefix = sessionManager.getSessionIdPathParameterNamePrefix();
if (sessionURLPrefix==null)
return url;
if (url==null)
return null;
// should not encode if cookies in evidence
if ((sessionManager.isUsingCookies() && request.isRequestedSessionIdFromCookie()) || !sessionManager.isUsingURLs())
{
int prefix=url.indexOf(sessionURLPrefix);
if (prefix!=-1)
{
int suffix=url.indexOf("?",prefix);
if (suffix<0)
suffix=url.indexOf("#",prefix);
if (suffix<=prefix)
return url.substring(0,prefix);
return url.substring(0,prefix)+url.substring(suffix);
}
return url;
}
// get session;
HttpSession session=request.getSession(false);
// no session
if (session == null)
return url;
// invalid session
if (!sessionManager.isValid(session))
return url;
String id=sessionManager.getNodeId(session);
if (uri == null)
uri = new HttpURI(url);
// Already encoded
int prefix=url.indexOf(sessionURLPrefix);
if (prefix!=-1)
{
int suffix=url.indexOf("?",prefix);
if (suffix<0)
suffix=url.indexOf("#",prefix);
if (suffix<=prefix)
return url.substring(0,prefix+sessionURLPrefix.length())+id;
return url.substring(0,prefix+sessionURLPrefix.length())+id+
url.substring(suffix);
}
// edit the session
int suffix=url.indexOf('?');
if (suffix<0)
suffix=url.indexOf('#');
if (suffix<0)
{
return url+
((HttpSchemes.HTTPS.equalsIgnoreCase(uri.getScheme()) || HttpSchemes.HTTP.equalsIgnoreCase(uri.getScheme())) && uri.getPath()==null?"/":"") + //if no path, insert the root path
sessionURLPrefix+id;
}
return url.substring(0,suffix)+
((HttpSchemes.HTTPS.equalsIgnoreCase(uri.getScheme()) || HttpSchemes.HTTP.equalsIgnoreCase(uri.getScheme())) && uri.getPath()==null?"/":"")+ //if no path so insert the root path
sessionURLPrefix+id+url.substring(suffix);
}
/* ------------------------------------------------------------ */
/**
* @see javax.servlet.http.HttpServletResponse#encodeRedirectURL(java.lang.String)
*/
public String encodeRedirectURL(String url)
{
return encodeURL(url);
}
/* ------------------------------------------------------------ */
@Deprecated
public String encodeUrl(String url)
{
return encodeURL(url);
}
/* ------------------------------------------------------------ */
@Deprecated
public String encodeRedirectUrl(String url)
{
return encodeRedirectURL(url);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#sendError(int, java.lang.String)
*/
public void sendError(int code, String message) throws IOException
{
if (_connection.isIncluding())
return;
if (isCommitted())
LOG.warn("Committed before "+code+" "+message);
resetBuffer();
_characterEncoding=null;
setHeader(HttpHeaders.EXPIRES,null);
setHeader(HttpHeaders.LAST_MODIFIED,null);
setHeader(HttpHeaders.CACHE_CONTROL,null);
setHeader(HttpHeaders.CONTENT_TYPE,null);
setHeader(HttpHeaders.CONTENT_LENGTH,null);
_outputState=NONE;
setStatus(code,message);
if (message==null)
message=HttpStatus.getMessage(code);
// If we are allowed to have a body
if (code!=SC_NO_CONTENT &&
code!=SC_NOT_MODIFIED &&
code!=SC_PARTIAL_CONTENT &&
code>=SC_OK)
{
Request request = _connection.getRequest();
ErrorHandler error_handler = null;
ContextHandler.Context context = request.getContext();
if (context!=null)
error_handler=context.getContextHandler().getErrorHandler();
if (error_handler==null)
error_handler = _connection.getConnector().getServer().getBean(ErrorHandler.class);
if (error_handler!=null)
{
request.setAttribute(RequestDispatcher.ERROR_STATUS_CODE, code);
request.setAttribute(RequestDispatcher.ERROR_MESSAGE, message);
request.setAttribute(RequestDispatcher.ERROR_REQUEST_URI, request.getRequestURI());
request.setAttribute(RequestDispatcher.ERROR_SERVLET_NAME,request.getServletName());
error_handler.handle(null,_connection.getRequest(),_connection.getRequest(),this );
}
else
{
setHeader(HttpHeaders.CACHE_CONTROL, "must-revalidate,no-cache,no-store");
setContentType(MimeTypes.TEXT_HTML_8859_1);
ByteArrayISO8859Writer writer= new ByteArrayISO8859Writer(2048);
if (message != null)
{
message= StringUtil.replace(message, "&", "&");
message= StringUtil.replace(message, "<", "<");
message= StringUtil.replace(message, ">", ">");
}
String uri= request.getRequestURI();
if (uri!=null)
{
uri= StringUtil.replace(uri, "&", "&");
uri= StringUtil.replace(uri, "<", "<");
uri= StringUtil.replace(uri, ">", ">");
}
writer.write("<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=ISO-8859-1\"/>\n");
writer.write("<title>Error ");
writer.write(Integer.toString(code));
writer.write(' ');
if (message==null)
message=HttpStatus.getMessage(code);
writer.write(message);
writer.write("</title>\n</head>\n<body>\n<h2>HTTP ERROR: ");
writer.write(Integer.toString(code));
writer.write("</h2>\n<p>Problem accessing ");
writer.write(uri);
writer.write(". Reason:\n<pre> ");
writer.write(message);
writer.write("</pre>");
writer.write("</p>\n");
if(_connection.getServer().getSendServerVersion())
{
writer.write("<hr /><i><small>Powered by Jetty:// ");
writer.write(Server.getVersion());
writer.write("</small></i>");
}
for (int i= 0; i < 20; i++)
writer.write("\n ");
writer.write("\n</body>\n</html>\n");
writer.flush();
setContentLength(writer.size());
writer.writeTo(getOutputStream());
writer.destroy();
}
}
else if (code!=SC_PARTIAL_CONTENT)
{
_connection.getRequestFields().remove(HttpHeaders.CONTENT_TYPE_BUFFER);
_connection.getRequestFields().remove(HttpHeaders.CONTENT_LENGTH_BUFFER);
_characterEncoding=null;
_mimeType=null;
_cachedMimeType=null;
}
complete();
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#sendError(int)
*/
public void sendError(int sc) throws IOException
{
switch (sc)
{
case -1:
_connection.getEndPoint().close();
break;
case 102:
sendProcessing();
break;
default:
sendError(sc,null);
break;
}
}
/* ------------------------------------------------------------ */
/* Send a 102-Processing response.
* If the connection is a HTTP connection, the version is 1.1 and the
* request has a Expect header starting with 102, then a 102 response is
* sent. This indicates that the request still be processed and real response
* can still be sent. This method is called by sendError if it is passed 102.
* @see javax.servlet.http.HttpServletResponse#sendError(int)
*/
public void sendProcessing() throws IOException
{
if (_connection.isExpecting102Processing() && !isCommitted())
((HttpGenerator)_connection.getGenerator()).send1xx(HttpStatus.PROCESSING_102);
}
public static boolean RELATIVE_REDIRECT_ALLOWED = true;
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#sendRedirect(java.lang.String)
*/
public void sendRedirect(String location) throws IOException
{
if (_connection.isIncluding())
return;
if (location==null)
throw new IllegalArgumentException();
if (!URIUtil.hasScheme(location))
{
StringBuilder buf = RELATIVE_REDIRECT_ALLOWED ? new StringBuilder() : _connection.getRequest().getRootURL();
if (location.startsWith("/"))
{
// absolute in context
location=URIUtil.canonicalPath(location);
}
else
{
// relative to request
String path=_connection.getRequest().getRequestURI();
String parent=(path.endsWith("/"))?path:URIUtil.parentPath(path);
location=URIUtil.canonicalPath(URIUtil.addPaths(parent,location));
if (!location.startsWith("/"))
buf.append('/');
}
if(location==null)
throw new IllegalStateException("path cannot be above root");
buf.append(location);
location=buf.toString();
}
resetBuffer();
setHeader(HttpHeaders.LOCATION,location);
setStatus(HttpServletResponse.SC_MOVED_TEMPORARILY);
complete();
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#setDateHeader(java.lang.String, long)
*/
public void setDateHeader(String name, long date)
{
if (!_connection.isIncluding())
_connection.getResponseFields().putDateField(name, date);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#addDateHeader(java.lang.String, long)
*/
public void addDateHeader(String name, long date)
{
if (!_connection.isIncluding())
_connection.getResponseFields().addDateField(name, date);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#setHeader(java.lang.String, java.lang.String)
*/
public void setHeader(String name, String value)
{
if (HttpHeaders.CONTENT_TYPE.equalsIgnoreCase(name))
setContentType(value);
else
{
if (_connection.isIncluding())
{
if (name.startsWith(SET_INCLUDE_HEADER_PREFIX))
name=name.substring(SET_INCLUDE_HEADER_PREFIX.length());
else
return;
}
_connection.getResponseFields().put(name, value);
if (HttpHeaders.CONTENT_LENGTH.equalsIgnoreCase(name))
{
if (value==null)
_connection._generator.setContentLength(-1);
else
_connection._generator.setContentLength(Long.parseLong(value));
}
}
}
/* ------------------------------------------------------------ */
public Collection<String> getHeaderNames()
{
final HttpFields fields=_connection.getResponseFields();
return fields.getFieldNamesCollection();
}
/* ------------------------------------------------------------ */
/*
*/
public String getHeader(String name)
{
return _connection.getResponseFields().getStringField(name);
}
/* ------------------------------------------------------------ */
/*
*/
public Collection<String> getHeaders(String name)
{
final HttpFields fields=_connection.getResponseFields();
Collection<String> i = fields.getValuesCollection(name);
if (i==null)
return Collections.EMPTY_LIST;
return i;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#addHeader(java.lang.String, java.lang.String)
*/
public void addHeader(String name, String value)
{
if (_connection.isIncluding())
{
if (name.startsWith(SET_INCLUDE_HEADER_PREFIX))
name=name.substring(SET_INCLUDE_HEADER_PREFIX.length());
else
return;
}
if (HttpHeaders.CONTENT_TYPE.equalsIgnoreCase(name))
{
setContentType(value);
return;
}
_connection.getResponseFields().add(name, value);
if (HttpHeaders.CONTENT_LENGTH.equalsIgnoreCase(name))
_connection._generator.setContentLength(Long.parseLong(value));
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#setIntHeader(java.lang.String, int)
*/
public void setIntHeader(String name, int value)
{
if (!_connection.isIncluding())
{
_connection.getResponseFields().putLongField(name, value);
if (HttpHeaders.CONTENT_LENGTH.equalsIgnoreCase(name))
_connection._generator.setContentLength(value);
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#addIntHeader(java.lang.String, int)
*/
public void addIntHeader(String name, int value)
{
if (!_connection.isIncluding())
{
_connection.getResponseFields().addLongField(name, value);
if (HttpHeaders.CONTENT_LENGTH.equalsIgnoreCase(name))
_connection._generator.setContentLength(value);
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#setStatus(int)
*/
public void setStatus(int sc)
{
setStatus(sc,null);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.http.HttpServletResponse#setStatus(int, java.lang.String)
*/
public void setStatus(int sc, String sm)
{
if (sc<=0)
throw new IllegalArgumentException();
if (!_connection.isIncluding())
{
_status=sc;
_reason=sm;
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#getCharacterEncoding()
*/
public String getCharacterEncoding()
{
if (_characterEncoding==null)
_characterEncoding=StringUtil.__ISO_8859_1;
return _characterEncoding;
}
/* ------------------------------------------------------------ */
String getSetCharacterEncoding()
{
return _characterEncoding;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#getContentType()
*/
public String getContentType()
{
return _contentType;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#getOutputStream()
*/
public ServletOutputStream getOutputStream() throws IOException
{
if (_outputState!=NONE && _outputState!=STREAM)
throw new IllegalStateException("WRITER");
ServletOutputStream out = _connection.getOutputStream();
_outputState=STREAM;
return out;
}
/* ------------------------------------------------------------ */
public boolean isWriting()
{
return _outputState==WRITER;
}
/* ------------------------------------------------------------ */
public boolean isOutputing()
{
return _outputState!=NONE;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#getWriter()
*/
public PrintWriter getWriter() throws IOException
{
if (_outputState!=NONE && _outputState!=WRITER)
throw new IllegalStateException("STREAM");
/* if there is no writer yet */
if (_writer==null)
{
/* get encoding from Content-Type header */
String encoding = _characterEncoding;
if (encoding==null)
{
/* implementation of educated defaults */
if(_cachedMimeType != null)
encoding = MimeTypes.getCharsetFromContentType(_cachedMimeType);
if (encoding==null)
encoding = StringUtil.__ISO_8859_1;
setCharacterEncoding(encoding);
}
/* construct Writer using correct encoding */
_writer = _connection.getPrintWriter(encoding);
}
_outputState=WRITER;
return _writer;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#setCharacterEncoding(java.lang.String)
*/
public void setCharacterEncoding(String encoding)
{
if (_connection.isIncluding())
return;
if (this._outputState==0 && !isCommitted())
{
_explicitEncoding=true;
if (encoding==null)
{
// Clear any encoding.
if (_characterEncoding!=null)
{
_characterEncoding=null;
if (_cachedMimeType!=null)
_contentType=_cachedMimeType.toString();
else if (_mimeType!=null)
_contentType=_mimeType;
else
_contentType=null;
if (_contentType==null)
_connection.getResponseFields().remove(HttpHeaders.CONTENT_TYPE_BUFFER);
else
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else
{
// No, so just add this one to the mimetype
_characterEncoding=encoding;
if (_contentType!=null)
{
int i0=_contentType.indexOf(';');
if (i0<0)
{
_contentType=null;
if(_cachedMimeType!=null)
{
CachedBuffer content_type = _cachedMimeType.getAssociate(_characterEncoding);
if (content_type!=null)
{
_contentType=content_type.toString();
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,content_type);
}
}
if (_contentType==null)
{
_contentType = _mimeType+";charset="+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else
{
int i1=_contentType.indexOf("charset=",i0);
if (i1<0)
{
_contentType = _contentType+";charset="+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
}
else
{
int i8=i1+8;
int i2=_contentType.indexOf(" ",i8);
if (i2<0)
_contentType=_contentType.substring(0,i8)+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
else
_contentType=_contentType.substring(0,i8)+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ")+_contentType.substring(i2);
}
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
}
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#setContentLength(int)
*/
public void setContentLength(int len)
{
// Protect from setting after committed as default handling
// of a servlet HEAD request ALWAYS sets _content length, even
// if the getHandling committed the response!
if (isCommitted() || _connection.isIncluding())
return;
_connection._generator.setContentLength(len);
if (len>0)
{
_connection.getResponseFields().putLongField(HttpHeaders.CONTENT_LENGTH, len);
if (_connection._generator.isAllContentWritten())
{
if (_outputState==WRITER)
_writer.close();
else if (_outputState==STREAM)
{
try
{
getOutputStream().close();
}
catch(IOException e)
{
throw new RuntimeException(e);
}
}
}
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#setContentLength(int)
*/
public void setLongContentLength(long len)
{
// Protect from setting after committed as default handling
// of a servlet HEAD request ALWAYS sets _content length, even
// if the getHandling committed the response!
if (isCommitted() || _connection.isIncluding())
return;
_connection._generator.setContentLength(len);
_connection.getResponseFields().putLongField(HttpHeaders.CONTENT_LENGTH, len);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#setContentType(java.lang.String)
*/
public void setContentType(String contentType)
{
if (isCommitted() || _connection.isIncluding())
return;
// Yes this method is horribly complex.... but there are lots of special cases and
// as this method is called on every request, it is worth trying to save string creation.
//
if (contentType==null)
{
if (_locale==null)
_characterEncoding=null;
_mimeType=null;
_cachedMimeType=null;
_contentType=null;
_connection.getResponseFields().remove(HttpHeaders.CONTENT_TYPE_BUFFER);
}
else
{
// Look for encoding in contentType
int i0=contentType.indexOf(';');
if (i0>0)
{
// we have content type parameters
// Extract params off mimetype
_mimeType=contentType.substring(0,i0).trim();
_cachedMimeType=MimeTypes.CACHE.get(_mimeType);
// Look for charset
int i1=contentType.indexOf("charset=",i0+1);
if (i1>=0)
{
_explicitEncoding=true;
int i8=i1+8;
int i2 = contentType.indexOf(' ',i8);
if (_outputState==WRITER)
{
// strip the charset and ignore;
if ((i1==i0+1 && i2<0) || (i1==i0+2 && i2<0 && contentType.charAt(i0+1)==' '))
{
if (_cachedMimeType!=null)
{
CachedBuffer content_type = _cachedMimeType.getAssociate(_characterEncoding);
if (content_type!=null)
{
_contentType=content_type.toString();
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,content_type);
}
else
{
_contentType=_mimeType+";charset="+_characterEncoding;
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else
{
_contentType=_mimeType+";charset="+_characterEncoding;
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else if (i2<0)
{
_contentType=contentType.substring(0,i1)+";charset="+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
else
{
_contentType=contentType.substring(0,i1)+contentType.substring(i2)+";charset="+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else if ((i1==i0+1 && i2<0) || (i1==i0+2 && i2<0 && contentType.charAt(i0+1)==' '))
{
// The params are just the char encoding
_cachedMimeType=MimeTypes.CACHE.get(_mimeType);
_characterEncoding = QuotedStringTokenizer.unquote(contentType.substring(i8));
if (_cachedMimeType!=null)
{
CachedBuffer content_type = _cachedMimeType.getAssociate(_characterEncoding);
if (content_type!=null)
{
_contentType=content_type.toString();
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,content_type);
}
else
{
_contentType=contentType;
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else
{
_contentType=contentType;
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else if (i2>0)
{
_characterEncoding = QuotedStringTokenizer.unquote(contentType.substring(i8,i2));
_contentType=contentType;
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
else
{
_characterEncoding = QuotedStringTokenizer.unquote(contentType.substring(i8));
_contentType=contentType;
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else // No encoding in the params.
{
_cachedMimeType=null;
_contentType=_characterEncoding==null?contentType:contentType+";charset="+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else // No params at all
{
_mimeType=contentType;
_cachedMimeType=MimeTypes.CACHE.get(_mimeType);
if (_characterEncoding!=null)
{
if (_cachedMimeType!=null)
{
CachedBuffer content_type = _cachedMimeType.getAssociate(_characterEncoding);
if (content_type!=null)
{
_contentType=content_type.toString();
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,content_type);
}
else
{
_contentType=_mimeType+";charset="+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else
{
_contentType=contentType+";charset="+QuotedStringTokenizer.quoteIfNeeded(_characterEncoding,";= ");
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
else if (_cachedMimeType!=null)
{
_contentType=_cachedMimeType.toString();
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_cachedMimeType);
}
else
{
_contentType=contentType;
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#setBufferSize(int)
*/
public void setBufferSize(int size)
{
if (isCommitted() || getContentCount()>0)
throw new IllegalStateException("Committed or content written");
if (size <= 0)
size = __MIN_BUFFER_SIZE;
_connection.getGenerator().increaseContentBufferSize(size);
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#getBufferSize()
*/
public int getBufferSize()
{
return _connection.getGenerator().getContentBufferSize();
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#flushBuffer()
*/
public void flushBuffer() throws IOException
{
_connection.flushResponse();
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#reset()
*/
public void reset()
{
resetBuffer();
fwdReset();
_status=200;
_reason=null;
HttpFields response_fields=_connection.getResponseFields();
response_fields.clear();
String connection=_connection.getRequestFields().getStringField(HttpHeaders.CONNECTION_BUFFER);
if (connection!=null)
{
String[] values = connection.split(",");
for (int i=0;values!=null && i<values.length;i++)
{
CachedBuffer cb = HttpHeaderValues.CACHE.get(values[0].trim());
if (cb!=null)
{
switch(cb.getOrdinal())
{
case HttpHeaderValues.CLOSE_ORDINAL:
response_fields.put(HttpHeaders.CONNECTION_BUFFER,HttpHeaderValues.CLOSE_BUFFER);
break;
case HttpHeaderValues.KEEP_ALIVE_ORDINAL:
if (HttpVersions.HTTP_1_0.equalsIgnoreCase(_connection.getRequest().getProtocol()))
response_fields.put(HttpHeaders.CONNECTION_BUFFER,HttpHeaderValues.KEEP_ALIVE);
break;
case HttpHeaderValues.TE_ORDINAL:
response_fields.put(HttpHeaders.CONNECTION_BUFFER,HttpHeaderValues.TE);
break;
}
}
}
}
}
public void reset(boolean preserveCookies)
{
if (!preserveCookies)
reset();
else
{
HttpFields response_fields=_connection.getResponseFields();
ArrayList<String> cookieValues = new ArrayList<String>(5);
Enumeration<String> vals = response_fields.getValues(HttpHeaders.SET_COOKIE);
while (vals.hasMoreElements())
cookieValues.add((String)vals.nextElement());
reset();
for (String v:cookieValues)
response_fields.add(HttpHeaders.SET_COOKIE, v);
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#reset()
*/
public void fwdReset()
{
resetBuffer();
_writer=null;
_outputState=NONE;
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#resetBuffer()
*/
public void resetBuffer()
{
if (isCommitted())
throw new IllegalStateException("Committed");
_connection.getGenerator().resetBuffer();
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#isCommitted()
*/
public boolean isCommitted()
{
return _connection.isResponseCommitted();
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#setLocale(java.util.Locale)
*/
public void setLocale(Locale locale)
{
if (locale == null || isCommitted() ||_connection.isIncluding())
return;
_locale = locale;
_connection.getResponseFields().put(HttpHeaders.CONTENT_LANGUAGE_BUFFER,locale.toString().replace('_','-'));
if (_explicitEncoding || _outputState!=0 )
return;
if (_connection.getRequest().getContext()==null)
return;
String charset = _connection.getRequest().getContext().getContextHandler().getLocaleEncoding(locale);
if (charset!=null && charset.length()>0)
{
_characterEncoding=charset;
/* get current MIME type from Content-Type header */
String type=getContentType();
if (type!=null)
{
_characterEncoding=charset;
int semi=type.indexOf(';');
if (semi<0)
{
_mimeType=type;
_contentType= type += ";charset="+charset;
}
else
{
_mimeType=type.substring(0,semi);
_contentType= _mimeType += ";charset="+charset;
}
_cachedMimeType=MimeTypes.CACHE.get(_mimeType);
_connection.getResponseFields().put(HttpHeaders.CONTENT_TYPE_BUFFER,_contentType);
}
}
}
/* ------------------------------------------------------------ */
/*
* @see javax.servlet.ServletResponse#getLocale()
*/
public Locale getLocale()
{
if (_locale==null)
return Locale.getDefault();
return _locale;
}
/* ------------------------------------------------------------ */
/**
* @return The HTTP status code that has been set for this request. This will be <code>200<code>
* ({@link HttpServletResponse#SC_OK}), unless explicitly set through one of the <code>setStatus</code> methods.
*/
public int getStatus()
{
return _status;
}
/* ------------------------------------------------------------ */
/**
* @return The reason associated with the current {@link #getStatus() status}. This will be <code>null</code>,
* unless one of the <code>setStatus</code> methods have been called.
*/
public String getReason()
{
return _reason;
}
/* ------------------------------------------------------------ */
/**
*/
public void complete()
throws IOException
{
_connection.completeResponse();
}
/* ------------------------------------------------------------- */
/**
* @return the number of bytes actually written in response body
*/
public long getContentCount()
{
if (_connection==null || _connection.getGenerator()==null)
return -1;
return _connection.getGenerator().getContentWritten();
}
/* ------------------------------------------------------------ */
public HttpFields getHttpFields()
{
return _connection.getResponseFields();
}
/* ------------------------------------------------------------ */
@Override
public String toString()
{
return "HTTP/1.1 "+_status+" "+ (_reason==null?"":_reason) +System.getProperty("line.separator")+
_connection.getResponseFields().toString();
}
/* ------------------------------------------------------------ */
/* ------------------------------------------------------------ */
/* ------------------------------------------------------------ */
private static class NullOutput extends ServletOutputStream
{
@Override
public void write(int b) throws IOException
{
}
@Override
public void print(String s) throws IOException
{
}
@Override
public void println(String s) throws IOException
{
}
@Override
public void write(byte[] b, int off, int len) throws IOException
{
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/Jetty8DelegatingAuthenticator.java
|
package water.webserver.jetty8;
import org.eclipse.jetty.security.Authenticator;
import org.eclipse.jetty.security.ServerAuthException;
import org.eclipse.jetty.server.Authentication;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
/**
* Dynamically switches between Form-based authentication
* and Basic Access authentication.
* The decision is made based on user's "User-Agent". Browser clients will use Form based
* authentication, all other clients will use basic auth.
*/
class Jetty8DelegatingAuthenticator implements Authenticator {
private final Authenticator _primaryAuth;
private final Authenticator _formAuth;
Jetty8DelegatingAuthenticator(Authenticator primaryAuth, Authenticator formAuth) {
_primaryAuth = primaryAuth;
_formAuth = formAuth;
}
@Override
public void setConfiguration(AuthConfiguration configuration) {
_primaryAuth.setConfiguration(configuration);
_formAuth.setConfiguration(configuration);
}
@Override
public String getAuthMethod() {
return "FORM_PREFERRED";
}
@Override
public Authentication validateRequest(ServletRequest request, ServletResponse response,
boolean mandatory) throws ServerAuthException {
if (isBrowserAgent((HttpServletRequest) request))
return _formAuth.validateRequest(request, response, mandatory);
else
return _primaryAuth.validateRequest(request, response, mandatory);
}
private static boolean isBrowserAgent(HttpServletRequest request) {
String userAgent = request.getHeader("User-Agent");
// Covers all modern browsers (Firefox, Chrome, IE, Edge & Opera)
return (userAgent != null) &&
(userAgent.startsWith("Mozilla/") || userAgent.startsWith("Opera/"));
}
@Override
public boolean secureResponse(ServletRequest request, ServletResponse response,
boolean mandatory, Authentication.User validatedUser) {
return true; // both BASIC and FORM return true
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/Jetty8Facade.java
|
package water.webserver.jetty8;
import water.webserver.iface.Credentials;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.HttpServerFacade;
import water.webserver.iface.ProxyServer;
import water.webserver.iface.WebServer;
public class Jetty8Facade implements HttpServerFacade {
@Override
public WebServer createWebServer(H2OHttpView h2oHttpView) {
return Jetty8ServerAdapter.create(h2oHttpView);
}
@Override
public ProxyServer createProxyServer(H2OHttpView h2oHttpView, Credentials credentials, String proxyTo) {
return Jetty8ProxyServerAdapter.create(h2oHttpView, credentials, proxyTo);
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/Jetty8Helper.java
|
package water.webserver.jetty8;
import ai.h2o.org.eclipse.jetty.security.authentication.SpnegoAuthenticator;
import org.eclipse.jetty.plus.jaas.JAASLoginService;
import org.eclipse.jetty.security.*;
import org.eclipse.jetty.security.authentication.BasicAuthenticator;
import org.eclipse.jetty.server.*;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.server.nio.SelectChannelConnector;
import org.eclipse.jetty.server.session.HashSessionIdManager;
import org.eclipse.jetty.server.session.HashSessionManager;
import org.eclipse.jetty.server.session.SessionHandler;
import org.eclipse.jetty.server.ssl.SslSelectChannelConnector;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.util.security.Constraint;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import water.webserver.config.ConnectionConfiguration;
import water.webserver.iface.H2OHttpConfig;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.LoginType;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Collections;
class Jetty8Helper {
private final H2OHttpConfig config;
private final H2OHttpView h2oHttpView;
Jetty8Helper(H2OHttpView h2oHttpView) {
this.h2oHttpView = h2oHttpView;
this.config = h2oHttpView.getConfig();
}
Server createJettyServer(String ip, int port) {
System.setProperty("org.eclipse.jetty.server.Request.maxFormContentSize", Integer.toString(Integer.MAX_VALUE));
final Server jettyServer = new Server();
if (config.ensure_daemon_threads) {
QueuedThreadPool pool = new QueuedThreadPool();
pool.setDaemon(true);
jettyServer.setThreadPool(pool);
}
jettyServer.setSendServerVersion(false);
final Connector connector;
final ConnectionConfiguration connConfig = getConnectionConfiguration();
if (connConfig.isSecure()) {
final SslContextFactory sslContextFactory = new SslContextFactory(config.jks);
sslContextFactory.setKeyStorePassword(config.jks_pass);
if (config.jks_alias != null) {
sslContextFactory.setCertAlias(config.jks_alias);
}
connector = new SslSelectChannelConnector(sslContextFactory);
} else {
connector = new SelectChannelConnector();
}
if (ip != null) {
connector.setHost(ip);
}
connector.setPort(port);
configureConnector(connector, connConfig);
jettyServer.setConnectors(new Connector[]{connector});
return jettyServer;
}
ConnectionConfiguration getConnectionConfiguration() {
return new ConnectionConfiguration(config.jks != null);
}
// Configure connector via properties which we can modify.
// Also increase request header size and buffer size from default values
// located in org.eclipse.jetty.http.HttpBuffersImpl
// see PUBDEV-5939 for details
static void configureConnector(Connector connector, ConnectionConfiguration cfg) {
connector.setRequestHeaderSize(cfg.getRequestHeaderSize());
connector.setRequestBufferSize(cfg.getRequestBufferSize());
connector.setResponseHeaderSize(cfg.getResponseHeaderSize());
connector.setResponseBufferSize(cfg.getOutputBufferSize(connector.getResponseBufferSize()));
if (!cfg.isRelativeRedirectAllowed()) {
// trick: the default value is enabled -> we need to touch the field only if the relative-redirects are disabled
// this means we don't have to worry about deployments where someone substituted our implementation for
// something else at assembly time
Response.RELATIVE_REDIRECT_ALLOWED = false;
}
connector.setMaxIdleTime(cfg.getIdleTimeout());
}
HandlerWrapper authWrapper(Server jettyServer) {
if (config.loginType == LoginType.NONE) {
return jettyServer;
}
// REFER TO http://www.eclipse.org/jetty/documentation/9.1.4.v20140401/embedded-examples.html#embedded-secured-hello-handler
final LoginService loginService;
final Authenticator primaryAuthenticator;
switch (config.loginType) {
case HASH:
loginService = new HashLoginService("H2O", config.login_conf);
primaryAuthenticator = new BasicAuthenticator();
break;
case LDAP:
case KERBEROS:
case PAM:
loginService = new JAASLoginService(config.loginType.jaasRealm);
primaryAuthenticator = new BasicAuthenticator();
break;
case SPNEGO:
System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");
loginService = new SpnegoLoginService(config.loginType.jaasRealm, config.spnego_properties);
primaryAuthenticator = new SpnegoAuthenticator();
break;
default:
throw new UnsupportedOperationException(config.loginType + ""); // this can never happen
}
final IdentityService identityService = new DefaultIdentityService();
loginService.setIdentityService(identityService);
jettyServer.addBean(loginService);
// Set a security handler as the first handler in the chain.
final ConstraintSecurityHandler security = new ConstraintSecurityHandler();
// Set up a constraint to authenticate all calls, and allow certain roles in.
final Constraint constraint = new Constraint();
constraint.setName("auth");
constraint.setAuthenticate(true);
// Configure role stuff (to be disregarded). We are ignoring roles, and only going off the user name.
//
// Jetty 8 and prior.
//
// Jetty 8 requires the security.setStrict(false) and ANY_ROLE.
security.setStrict(false);
constraint.setRoles(new String[]{Constraint.ANY_ROLE});
// Jetty 9 and later.
//
// Jetty 9 and later uses a different servlet spec, and ANY_AUTH gives the same behavior
// for that API version as ANY_ROLE did previously. This required some low-level debugging
// to figure out, so I'm documenting it here.
// Jetty 9 did not require security.setStrict(false).
//
// constraint.setRoles(new String[]{Constraint.ANY_AUTH});
final ConstraintMapping mapping = new ConstraintMapping();
mapping.setPathSpec("/*"); // Lock down all API calls
mapping.setConstraint(constraint);
security.setConstraintMappings(Collections.singletonList(mapping));
// Authentication / Authorization
final Authenticator authenticator;
if (config.form_auth) {
final ConnectionConfiguration connConfig = getConnectionConfiguration();
final Authenticator formAuthenticator = makeFormAuthenticator(connConfig.isRelativeRedirectAllowed());
authenticator = new Jetty8DelegatingAuthenticator(primaryAuthenticator, formAuthenticator);
} else {
authenticator = primaryAuthenticator;
}
security.setLoginService(loginService);
security.setAuthenticator(authenticator);
final HashSessionIdManager idManager = new HashSessionIdManager();
jettyServer.setSessionIdManager(idManager);
final HashSessionManager manager = new HashSessionManager();
if (config.session_timeout > 0) {
manager.setMaxInactiveInterval(config.session_timeout * 60);
}
final SessionHandler sessionHandler = new SessionHandler(manager);
sessionHandler.setHandler(security);
// Pass-through to H2O if authenticated.
jettyServer.setHandler(sessionHandler);
return security;
}
static Authenticator makeFormAuthenticator(boolean useRelativeRedirects) {
final Authenticator formAuthenticator;
if (useRelativeRedirects) {
// If relative redirects are enabled - use our custom modified Authenticator
formAuthenticator = new water.webserver.jetty8.security.FormAuthenticator(
"/login", "/loginError", false, true
);
} else {
// Otherwise - prefer the default jetty authenticator
formAuthenticator = new org.eclipse.jetty.security.authentication.FormAuthenticator(
"/login", "/loginError", false
);
}
return formAuthenticator;
}
/**
* Hook up Jetty handlers. Do this before start() is called.
*/
ServletContextHandler createServletContextHandler() {
// Both security and session handlers are already created (Note: we don't want to create a new separate session
// handler just for ServletContextHandler - we want to have just one SessionHandler & SessionManager)
final ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SECURITY | ServletContextHandler.NO_SESSIONS);
if(null != config.context_path && ! config.context_path.isEmpty()) {
context.setContextPath(config.context_path);
} else {
context.setContextPath("/");
}
return context;
}
Handler authenticationHandler() {
return new AuthenticationHandler();
}
private class AuthenticationHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException {
boolean handled = h2oHttpView.authenticationHandler(request, response);
if (handled) {
baseRequest.setHandled(true);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/Jetty8ProxyServerAdapter.java
|
package water.webserver.jetty8;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.HandlerCollection;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import water.webserver.iface.Credentials;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.ProxyServer;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
class Jetty8ProxyServerAdapter implements ProxyServer {
private final Jetty8Helper helper;
private final H2OHttpView h2oHttpView;
private final Credentials credentials;
private final String proxyTo;
private Jetty8ProxyServerAdapter(Jetty8Helper helper, H2OHttpView h2oHttpView, Credentials credentials, String proxyTo) {
this.helper = helper;
this.h2oHttpView = h2oHttpView;
this.credentials = credentials;
this.proxyTo = proxyTo;
}
static ProxyServer create(final H2OHttpView h2oHttpView, final Credentials credentials, final String proxyTo) {
final Jetty8Helper helper = new Jetty8Helper(h2oHttpView);
return new Jetty8ProxyServerAdapter(helper, h2oHttpView, credentials, proxyTo);
}
@Override
public void start(final String ip, final int port) throws IOException {
final Server jettyServer = helper.createJettyServer(ip, port);
final HandlerWrapper handlerWrapper = helper.authWrapper(jettyServer);
final ServletContextHandler context = helper.createServletContextHandler();
registerHandlers(handlerWrapper, context, credentials, proxyTo);
try {
jettyServer.start();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
private void registerHandlers(HandlerWrapper handlerWrapper, ServletContextHandler context, Credentials credentials, String proxyTo) {
// setup authenticating proxy servlet (each request is forwarded with BASIC AUTH)
final ServletHolder proxyServlet = new ServletHolder(TransparentProxyServlet.class);
proxyServlet.setInitParameter("ProxyTo", proxyTo);
proxyServlet.setInitParameter("Prefix", "/");
proxyServlet.setInitParameter("BasicAuth", credentials.toBasicAuth());
context.addServlet(proxyServlet, "/*");
// authHandlers assume the user is already authenticated
final HandlerCollection authHandlers = new HandlerCollection();
authHandlers.setHandlers(new Handler[]{
helper.authenticationHandler(),
context,
});
// handles requests of login form and delegates the rest to the authHandlers
final ProxyLoginHandler loginHandler = new ProxyLoginHandler();
loginHandler.setHandler(authHandlers);
// login handler is the root handler
handlerWrapper.setHandler(loginHandler);
}
private class ProxyLoginHandler extends HandlerWrapper {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
final boolean handled = h2oHttpView.proxyLoginHandler(target, request, response);
if (handled) {
baseRequest.setHandled(true);
} else {
super.handle(target, baseRequest, request, response);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/Jetty8ServerAdapter.java
|
package water.webserver.jetty8;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.server.handler.HandlerCollection;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.RequestAuthExtension;
import water.webserver.iface.WebServer;
import water.webserver.iface.H2OWebsocketServlet;
import javax.servlet.Servlet;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
class Jetty8ServerAdapter implements WebServer {
private final Jetty8Helper helper;
private final H2OHttpView h2oHttpView;
private Server jettyServer;
private Jetty8ServerAdapter(Jetty8Helper helper, H2OHttpView h2oHttpView) {
this.helper = helper;
this.h2oHttpView = h2oHttpView;
}
static WebServer create(final H2OHttpView h2oHttpView) {
final Jetty8Helper helper = new Jetty8Helper(h2oHttpView);
return new Jetty8ServerAdapter(helper, h2oHttpView);
}
@Override
public void start(final String ip, final int port) throws IOException {
jettyServer = helper.createJettyServer(ip, port);
final HandlerWrapper handlerWrapper = helper.authWrapper(jettyServer);
final ServletContextHandler context = helper.createServletContextHandler();
registerHandlers(handlerWrapper, context);
try {
jettyServer.start();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
/**
* Stop Jetty server after it has been started.
* This is unlikely to ever be called by H2O until H2O supports graceful shutdown.
*
* @throws Exception -
*/
@Override
public void stop() throws IOException {
if (jettyServer != null) {
try {
jettyServer.stop();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
}
private void registerHandlers(final HandlerWrapper handlerWrapper, final ServletContextHandler context) {
for (Map.Entry<String, Class<? extends HttpServlet>> entry : h2oHttpView.getServlets().entrySet()) {
context.addServlet(entry.getValue(), entry.getKey());
}
for (Map.Entry<String, Class<? extends H2OWebsocketServlet>> entry : h2oHttpView.getWebsockets().entrySet()) {
try {
Servlet servlet = new Jetty8WebsocketServlet(entry.getValue().newInstance());
context.addServlet(new ServletHolder(entry.getValue().getName(), servlet), entry.getKey());
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException("Failed to instantiate websocket servlet object", e);
}
}
final List<Handler> extHandlers = new ArrayList<>();
extHandlers.add(helper.authenticationHandler());
// here we wrap generic authentication handlers into jetty-aware wrappers
final Collection<RequestAuthExtension> authExtensions = h2oHttpView.getAuthExtensions();
for (final RequestAuthExtension requestAuthExtension : authExtensions) {
extHandlers.add(new AbstractHandler() {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
if (requestAuthExtension.handle(target, request, response)) {
baseRequest.setHandled(true);
}
}
});
}
//
extHandlers.add(context);
// Handlers that can only be invoked for an authenticated user (if auth is enabled)
final HandlerCollection authHandlers = new HandlerCollection();
authHandlers.setHandlers(extHandlers.toArray(new Handler[0]));
// LoginHandler handles directly login requests and delegates the rest to the authHandlers
final LoginHandler loginHandler = new LoginHandler();
loginHandler.setHandler(authHandlers);
final HandlerCollection hc = new HandlerCollection();
hc.setHandlers(new Handler[]{
new GateHandler(),
loginHandler
});
handlerWrapper.setHandler(hc);
}
private class LoginHandler extends HandlerWrapper {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
final boolean handled = h2oHttpView.loginHandler(target, request, response);
if (handled) {
baseRequest.setHandled(true);
} else {
super.handle(target, baseRequest, request, response);
}
}
}
private class GateHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) {
final boolean handled = h2oHttpView.gateHandler(request, response);
if (handled) {
baseRequest.setHandled(true);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/Jetty8WebsocketServlet.java
|
package water.webserver.jetty8;
import org.eclipse.jetty.websocket.WebSocket;
import org.eclipse.jetty.websocket.WebSocketServlet;
import water.webserver.iface.WebsocketConnection;
import water.webserver.iface.WebsocketHandler;
import water.webserver.iface.H2OWebsocketServlet;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
public class Jetty8WebsocketServlet extends WebSocketServlet {
private final H2OWebsocketServlet impl;
public Jetty8WebsocketServlet(H2OWebsocketServlet impl) {
this.impl = impl;
}
static class Jetty8WebsocketConnection implements WebsocketConnection {
private final WebSocket.Connection connection;
Jetty8WebsocketConnection(WebSocket.Connection connection) {
this.connection = connection;
}
@Override
public void sendMessage(String message) throws IOException {
connection.sendMessage(message);
}
}
class Jetty8WebsocketHandler implements WebSocket, WebSocket.OnTextMessage {
private Jetty8WebsocketConnection conn;
private WebsocketHandler handler;
@Override
public void onMessage(String data) {
handler.onMessage(data);
}
@Override
public void onOpen(Connection connection) {
conn = new Jetty8WebsocketConnection(connection);
handler = impl.onConnect(conn);
}
@Override
public void onClose(int closeCode, String message) {
handler.onClose(conn);
handler = null;
conn = null;
}
}
@Override
public WebSocket doWebSocketConnect(
HttpServletRequest request, String protocol
) {
return new Jetty8WebsocketHandler();
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/TransparentProxyServlet.java
|
package water.webserver.jetty8;
import org.eclipse.jetty.client.HttpExchange;
import org.eclipse.jetty.servlets.ProxyServlet;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
/**
* Transparent proxy that automatically adds authentication to each request
*/
public class TransparentProxyServlet extends ProxyServlet.Transparent {
private String _basicAuth;
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
_basicAuth = config.getInitParameter("BasicAuth");
}
@Override
protected void customizeExchange(HttpExchange exchange, HttpServletRequest request) {
exchange.setRequestHeader("Authorization", _basicAuth);
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8
|
java-sources/ai/h2o/h2o-jetty-8/3.46.0.7/water/webserver/jetty8/security/FormAuthenticator.java
|
//
// ========================================================================
// Copyright (c) 1995-2016 Mort Bay Consulting Pty. Ltd.
// ------------------------------------------------------------------------
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// and Apache License v2.0 which accompanies this distribution.
//
// The Eclipse Public License is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// The Apache License v2.0 is available at
// http://www.opensource.org/licenses/apache2.0.php
//
// You may elect to redistribute this code under either of these licenses.
// ========================================================================
//
package water.webserver.jetty8.security;
import java.io.IOException;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Locale;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServletResponseWrapper;
import javax.servlet.http.HttpSession;
import org.eclipse.jetty.http.HttpHeaders;
import org.eclipse.jetty.http.HttpMethods;
import org.eclipse.jetty.http.MimeTypes;
import org.eclipse.jetty.security.ServerAuthException;
import org.eclipse.jetty.security.UserAuthentication;
import org.eclipse.jetty.security.authentication.DeferredAuthentication;
import org.eclipse.jetty.security.authentication.LoginAuthenticator;
import org.eclipse.jetty.security.authentication.SessionAuthentication;
import org.eclipse.jetty.server.AbstractHttpConnection;
import org.eclipse.jetty.server.Authentication;
import org.eclipse.jetty.server.Authentication.User;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.UserIdentity;
import org.eclipse.jetty.util.MultiMap;
import org.eclipse.jetty.util.StringUtil;
import org.eclipse.jetty.util.URIUtil;
import org.eclipse.jetty.util.log.Log;
import org.eclipse.jetty.util.log.Logger;
import org.eclipse.jetty.util.security.Constraint;
/**
* FORM Authenticator.
*
* <p>This authenticator implements form authentication will use dispatchers to
* the login page if the {@link #__FORM_DISPATCH} init parameter is set to true.
* Otherwise it will redirect.</p>
*
* <p>The form authenticator redirects unauthenticated requests to a log page
* which should use a form to gather username/password from the user and send them
* to the /j_security_check URI within the context. FormAuthentication uses
* {@link SessionAuthentication} to wrap Authentication results so that they
* are associated with the session.</p>
*
*
*/
public class FormAuthenticator extends LoginAuthenticator
{
private static final Logger LOG = Log.getLogger(FormAuthenticator.class);
public final static String __FORM_LOGIN_PAGE="org.eclipse.jetty.security.form_login_page";
public final static String __FORM_ERROR_PAGE="org.eclipse.jetty.security.form_error_page";
public final static String __FORM_DISPATCH="org.eclipse.jetty.security.dispatch";
public final static String __J_URI = "org.eclipse.jetty.security.form_URI";
public final static String __J_POST = "org.eclipse.jetty.security.form_POST";
public final static String __J_SECURITY_CHECK = "/j_security_check";
public final static String __J_USERNAME = "j_username";
public final static String __J_PASSWORD = "j_password";
private String _formErrorPage;
private String _formErrorPath;
private String _formLoginPage;
private String _formLoginPath;
private boolean _dispatch;
private boolean _alwaysSaveUri;
private boolean _useRelativeRedirects;
public FormAuthenticator()
{
}
/* ------------------------------------------------------------ */
public FormAuthenticator(String login,String error,boolean dispatch,boolean useRelativeRedirects)
{
this();
if (login!=null)
setLoginPage(login);
if (error!=null)
setErrorPage(error);
_dispatch=dispatch;
_useRelativeRedirects=useRelativeRedirects;
}
/* ------------------------------------------------------------ */
/**
* If true, uris that cause a redirect to a login page will always
* be remembered. If false, only the first uri that leads to a login
* page redirect is remembered.
* See https://bugs.eclipse.org/bugs/show_bug.cgi?id=379909
* @param alwaysSave
*/
public void setAlwaysSaveUri (boolean alwaysSave)
{
_alwaysSaveUri = alwaysSave;
}
/* ------------------------------------------------------------ */
public boolean getAlwaysSaveUri ()
{
return _alwaysSaveUri;
}
public boolean getUseRelativeRedirects ()
{
return _useRelativeRedirects;
}
/* ------------------------------------------------------------ */
/**
* @see org.eclipse.jetty.security.authentication.LoginAuthenticator#setConfiguration(org.eclipse.jetty.security.Authenticator.AuthConfiguration)
*/
@Override
public void setConfiguration(AuthConfiguration configuration)
{
super.setConfiguration(configuration);
String login=configuration.getInitParameter(FormAuthenticator.__FORM_LOGIN_PAGE);
if (login!=null)
setLoginPage(login);
String error=configuration.getInitParameter(FormAuthenticator.__FORM_ERROR_PAGE);
if (error!=null)
setErrorPage(error);
String dispatch=configuration.getInitParameter(FormAuthenticator.__FORM_DISPATCH);
_dispatch = dispatch==null?_dispatch:Boolean.valueOf(dispatch);
}
/* ------------------------------------------------------------ */
public String getAuthMethod()
{
return Constraint.__FORM_AUTH;
}
/* ------------------------------------------------------------ */
private void setLoginPage(String path)
{
if (!path.startsWith("/"))
{
LOG.warn("form-login-page must start with /");
path = "/" + path;
}
_formLoginPage = path;
_formLoginPath = path;
if (_formLoginPath.indexOf('?') > 0)
_formLoginPath = _formLoginPath.substring(0, _formLoginPath.indexOf('?'));
}
/* ------------------------------------------------------------ */
private void setErrorPage(String path)
{
if (path == null || path.trim().length() == 0)
{
_formErrorPath = null;
_formErrorPage = null;
}
else
{
if (!path.startsWith("/"))
{
LOG.warn("form-error-page must start with /");
path = "/" + path;
}
_formErrorPage = path;
_formErrorPath = path;
if (_formErrorPath.indexOf('?') > 0)
_formErrorPath = _formErrorPath.substring(0, _formErrorPath.indexOf('?'));
}
}
/* ------------------------------------------------------------ */
@Override
public UserIdentity login(String username, Object password, ServletRequest request)
{
UserIdentity user = super.login(username,password,request);
if (user!=null)
{
HttpSession session = ((HttpServletRequest)request).getSession(true);
Authentication cached=new SessionAuthentication(getAuthMethod(),user,password);
session.setAttribute(SessionAuthentication.__J_AUTHENTICATED, cached);
}
return user;
}
/* ------------------------------------------------------------ */
public Authentication validateRequest(ServletRequest req, ServletResponse res, boolean mandatory) throws ServerAuthException
{
HttpServletRequest request = (HttpServletRequest)req;
HttpServletResponse response = (HttpServletResponse)res;
String uri = request.getRequestURI();
if (uri==null)
uri=URIUtil.SLASH;
mandatory|=isJSecurityCheck(uri);
if (!mandatory)
return new DeferredAuthentication(this);
if (isLoginOrErrorPage(URIUtil.addPaths(request.getServletPath(),request.getPathInfo())) &&!DeferredAuthentication.isDeferred(response))
return new DeferredAuthentication(this);
HttpSession session = request.getSession(true);
try
{
// Handle a request for authentication.
if (isJSecurityCheck(uri))
{
final String username = request.getParameter(__J_USERNAME);
final String password = request.getParameter(__J_PASSWORD);
UserIdentity user = login(username, password, request);
session = request.getSession(true);
if (user!=null)
{
// Redirect to original request
String nuri;
synchronized(session)
{
nuri = (String) session.getAttribute(__J_URI);
if (nuri == null || nuri.length() == 0)
{
nuri = request.getContextPath();
if (nuri.length() == 0)
nuri = URIUtil.SLASH;
}
}
response.setContentLength(0);
response.sendRedirect(response.encodeRedirectURL(nuri));
return new FormAuthentication(getAuthMethod(),user);
}
// not authenticated
if (LOG.isDebugEnabled())
LOG.debug("Form authentication FAILED for " + StringUtil.printable(username));
if (_formErrorPage == null)
{
if (response != null)
response.sendError(HttpServletResponse.SC_FORBIDDEN);
}
else if (_dispatch)
{
RequestDispatcher dispatcher = request.getRequestDispatcher(_formErrorPage);
response.setHeader(HttpHeaders.CACHE_CONTROL,"No-cache");
response.setDateHeader(HttpHeaders.EXPIRES,1);
dispatcher.forward(new FormRequest(request), new FormResponse(response));
}
else
{
response.sendRedirect(response.encodeRedirectURL(URIUtil.addPaths(request.getContextPath(),_formErrorPage)));
}
return Authentication.SEND_FAILURE;
}
// Look for cached authentication
Authentication authentication = (Authentication) session.getAttribute(SessionAuthentication.__J_AUTHENTICATED);
if (authentication != null)
{
// Has authentication been revoked?
if (authentication instanceof Authentication.User &&
_loginService!=null &&
!_loginService.validate(((Authentication.User)authentication).getUserIdentity()))
{
session.removeAttribute(SessionAuthentication.__J_AUTHENTICATED);
}
else
{
String j_uri=(String)session.getAttribute(__J_URI);
if (j_uri!=null)
{
MultiMap<String> j_post = (MultiMap<String>)session.getAttribute(__J_POST);
if (j_post!=null)
{
StringBuffer buf = extractJUri(request);
if (j_uri.equals(buf.toString()))
{
// This is a retry of an original POST request
// so restore method and parameters
session.removeAttribute(__J_POST);
Request base_request = (req instanceof Request)?(Request)req:AbstractHttpConnection.getCurrentConnection().getRequest();
base_request.setMethod(HttpMethods.POST);
base_request.setParameters(j_post);
}
}
else
session.removeAttribute(__J_URI);
}
return authentication;
}
}
// if we can't send challenge
if (DeferredAuthentication.isDeferred(response))
{
LOG.debug("auth deferred {}",session.getId());
return Authentication.UNAUTHENTICATED;
}
// remember the current URI
synchronized (session)
{
// But only if it is not set already, or we save every uri that leads to a login form redirect
if (session.getAttribute(__J_URI)==null || _alwaysSaveUri)
{
StringBuffer buf = extractJUri(request);
session.setAttribute(__J_URI, buf.toString());
if (MimeTypes.FORM_ENCODED.equalsIgnoreCase(req.getContentType()) && HttpMethods.POST.equals(request.getMethod()))
{
Request base_request = (req instanceof Request)?(Request)req:AbstractHttpConnection.getCurrentConnection().getRequest();
base_request.extractParameters();
session.setAttribute(__J_POST, new MultiMap<String>(base_request.getParameters()));
}
}
}
// send the the challenge
if (_dispatch)
{
RequestDispatcher dispatcher = request.getRequestDispatcher(_formLoginPage);
response.setHeader(HttpHeaders.CACHE_CONTROL,"No-cache");
response.setDateHeader(HttpHeaders.EXPIRES,1);
dispatcher.forward(new FormRequest(request), new FormResponse(response));
}
else
{
response.sendRedirect(response.encodeRedirectURL(URIUtil.addPaths(request.getContextPath(),_formLoginPage)));
}
return Authentication.SEND_CONTINUE;
}
catch (IOException e)
{
throw new ServerAuthException(e);
}
catch (ServletException e)
{
throw new ServerAuthException(e);
}
}
StringBuffer extractJUri(HttpServletRequest request) {
final StringBuffer buf;
if (_useRelativeRedirects) {
buf = new StringBuffer(request.getContextPath());
} else
buf = request.getRequestURL();
if (request.getQueryString() != null)
buf.append("?").append(request.getQueryString());
return buf;
}
/* ------------------------------------------------------------ */
public boolean isJSecurityCheck(String uri)
{
int jsc = uri.indexOf(__J_SECURITY_CHECK);
if (jsc<0)
return false;
int e=jsc+__J_SECURITY_CHECK.length();
if (e==uri.length())
return true;
char c = uri.charAt(e);
return c==';'||c=='#'||c=='/'||c=='?';
}
/* ------------------------------------------------------------ */
public boolean isLoginOrErrorPage(String pathInContext)
{
return pathInContext != null && (pathInContext.equals(_formErrorPath) || pathInContext.equals(_formLoginPath));
}
/* ------------------------------------------------------------ */
public boolean secureResponse(ServletRequest req, ServletResponse res, boolean mandatory, User validatedUser) throws ServerAuthException
{
return true;
}
/* ------------------------------------------------------------ */
/* ------------------------------------------------------------ */
protected static class FormRequest extends HttpServletRequestWrapper
{
public FormRequest(HttpServletRequest request)
{
super(request);
}
@Override
public long getDateHeader(String name)
{
if (name.toLowerCase(Locale.ENGLISH).startsWith("if-"))
return -1;
return super.getDateHeader(name);
}
@Override
public String getHeader(String name)
{
if (name.toLowerCase(Locale.ENGLISH).startsWith("if-"))
return null;
return super.getHeader(name);
}
@Override
public Enumeration getHeaderNames()
{
return Collections.enumeration(Collections.list(super.getHeaderNames()));
}
@Override
public Enumeration getHeaders(String name)
{
if (name.toLowerCase(Locale.ENGLISH).startsWith("if-"))
return Collections.enumeration(Collections.EMPTY_LIST);
return super.getHeaders(name);
}
}
/* ------------------------------------------------------------ */
/* ------------------------------------------------------------ */
protected static class FormResponse extends HttpServletResponseWrapper
{
public FormResponse(HttpServletResponse response)
{
super(response);
}
@Override
public void addDateHeader(String name, long date)
{
if (notIgnored(name))
super.addDateHeader(name,date);
}
@Override
public void addHeader(String name, String value)
{
if (notIgnored(name))
super.addHeader(name,value);
}
@Override
public void setDateHeader(String name, long date)
{
if (notIgnored(name))
super.setDateHeader(name,date);
}
@Override
public void setHeader(String name, String value)
{
if (notIgnored(name))
super.setHeader(name,value);
}
private boolean notIgnored(String name)
{
if (HttpHeaders.CACHE_CONTROL.equalsIgnoreCase(name) ||
HttpHeaders.PRAGMA.equalsIgnoreCase(name) ||
HttpHeaders.ETAG.equalsIgnoreCase(name) ||
HttpHeaders.EXPIRES.equalsIgnoreCase(name) ||
HttpHeaders.LAST_MODIFIED.equalsIgnoreCase(name) ||
HttpHeaders.AGE.equalsIgnoreCase(name))
return false;
return true;
}
}
/* ------------------------------------------------------------ */
/** This Authentication represents a just completed Form authentication.
* Subsequent requests from the same user are authenticated by the presents
* of a {@link SessionAuthentication} instance in their session.
*/
public static class FormAuthentication extends UserAuthentication implements Authentication.ResponseSent
{
public FormAuthentication(String method, UserIdentity userIdentity)
{
super(method,userIdentity);
}
@Override
public String toString()
{
return "Form"+super.toString();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/ai/h2o/org/eclipse/jetty/jaas
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/ai/h2o/org/eclipse/jetty/jaas/spi/LdapLoginModule.java
|
package ai.h2o.org.eclipse.jetty.jaas.spi;
/**
* LdapLoginModule is relocated in Sparkling Water to the package ai.h2o.org.eclipse.jetty.jaas.spi
* of Jetty 9. This class lets user define login module that will work both for H2O and SW
* (user needs to put "org.eclipse.jetty.jaas.spi.LdapLoginModule required" in the login conf)
*/
public class LdapLoginModule extends org.eclipse.jetty.jaas.spi.LdapLoginModule { /* empty */ }
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/ai/h2o/org/eclipse/jetty/plus/jaas
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/ai/h2o/org/eclipse/jetty/plus/jaas/spi/LdapLoginModule.java
|
package ai.h2o.org.eclipse.jetty.plus.jaas.spi;
/**
* LdapLoginModule is relocated in Sparkling Water to the package ai.h2o.org.eclipse.jetty.jaas.spi
* of Jetty 9. External backend workers on Hadoop 2 utilize Jetty 8 and thus the module
* org.eclipse.jetty.plus.jaas.spi. This class enables to use only one package name for both cases.
*/
public class LdapLoginModule extends org.eclipse.jetty.jaas.spi.LdapLoginModule { /* empty */ }
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/ai/h2o/org/eclipse/jetty/security
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/ai/h2o/org/eclipse/jetty/security/authentication/SpnegoAuthenticator.java
|
//
// ========================================================================
// Copyright (c) 1995-2019 Mort Bay Consulting Pty. Ltd.
// ------------------------------------------------------------------------
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// and Apache License v2.0 which accompanies this distribution.
//
// The Eclipse Public License is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// The Apache License v2.0 is available at
// http://www.opensource.org/licenses/apache2.0.php
//
// You may elect to redistribute this code under either of these licenses.
// ========================================================================
//
package ai.h2o.org.eclipse.jetty.security.authentication;
import java.io.IOException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.eclipse.jetty.http.HttpHeader;
import org.eclipse.jetty.security.ServerAuthException;
import org.eclipse.jetty.security.UserAuthentication;
import org.eclipse.jetty.security.authentication.DeferredAuthentication;
import org.eclipse.jetty.security.authentication.LoginAuthenticator;
import org.eclipse.jetty.server.Authentication;
import org.eclipse.jetty.server.Authentication.User;
import org.eclipse.jetty.server.UserIdentity;
import org.eclipse.jetty.util.log.Log;
import org.eclipse.jetty.util.log.Logger;
import org.eclipse.jetty.util.security.Constraint;
public class SpnegoAuthenticator extends LoginAuthenticator
{
private static final Logger LOG = Log.getLogger(SpnegoAuthenticator.class);
private String _authMethod = Constraint.__SPNEGO_AUTH;
public SpnegoAuthenticator()
{
}
/**
* Allow for a custom authMethod value to be set for instances where SPNEGO may not be appropriate
*
* @param authMethod the auth method
*/
public SpnegoAuthenticator(String authMethod)
{
_authMethod = authMethod;
}
@Override
public String getAuthMethod()
{
return _authMethod;
}
@Override
public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException
{
HttpServletRequest req = (HttpServletRequest)request;
HttpServletResponse res = (HttpServletResponse)response;
String header = req.getHeader(HttpHeader.AUTHORIZATION.asString());
String authScheme = getAuthSchemeFromHeader(header);
if (!mandatory)
{
return new DeferredAuthentication(this);
}
// The client has responded to the challenge we sent previously
if (header != null && isAuthSchemeNegotiate(authScheme))
{
String spnegoToken = header.substring(10);
UserIdentity user = login(null, spnegoToken, request);
if (user != null)
{
return new UserAuthentication(getAuthMethod(), user);
}
}
// A challenge should be sent if any of the following cases are true:
// 1. There was no Authorization header provided
// 2. There was an Authorization header for a type other than Negotiate
try
{
if (DeferredAuthentication.isDeferred(res))
{
return Authentication.UNAUTHENTICATED;
}
LOG.debug("Sending challenge");
res.setHeader(HttpHeader.WWW_AUTHENTICATE.asString(), HttpHeader.NEGOTIATE.asString());
res.sendError(HttpServletResponse.SC_UNAUTHORIZED);
return Authentication.SEND_CONTINUE;
}
catch (IOException ioe)
{
throw new ServerAuthException(ioe);
}
}
/**
* Extracts the auth_scheme from the HTTP Authorization header, {@code Authorization: <auth_scheme> <token>}.
*
* @param header The HTTP Authorization header or null.
* @return The parsed auth scheme from the header, or the empty string.
*/
String getAuthSchemeFromHeader(String header)
{
// No header provided, return the empty string
if (header == null || header.isEmpty())
{
return "";
}
// Trim any leading whitespace
String trimmedHeader = header.trim();
// Find the first space, all characters prior should be the auth_scheme
int index = trimmedHeader.indexOf(' ');
if (index > 0)
{
return trimmedHeader.substring(0, index);
}
// If we don't find a space, this is likely malformed, just return the entire value
return trimmedHeader;
}
/**
* Determines if provided auth scheme text from the Authorization header is case-insensitively
* equal to {@code negotiate}.
*
* @param authScheme The auth scheme component of the Authorization header
* @return True if the auth scheme component is case-insensitively equal to {@code negotiate}, False otherwise.
*/
boolean isAuthSchemeNegotiate(String authScheme)
{
if (authScheme == null || authScheme.length() != HttpHeader.NEGOTIATE.asString().length())
{
return false;
}
// Headers should be treated case-insensitively, so we have to jump through some extra hoops.
return authScheme.equalsIgnoreCase(HttpHeader.NEGOTIATE.asString());
}
@Override
public boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, User validatedUser) throws ServerAuthException
{
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver/jetty9/Jetty9DelegatingAuthenticator.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.security.Authenticator;
import org.eclipse.jetty.security.ServerAuthException;
import org.eclipse.jetty.security.authentication.FormAuthenticator;
import org.eclipse.jetty.server.Authentication;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
/**
* Dynamically switches between Form-based authentication
* and Basic Access authentication.
* The decision is made based on user's "User-Agent". Browser clients will use Form based
* authentication, all other clients will use basic auth.
*/
class Jetty9DelegatingAuthenticator implements Authenticator {
private Authenticator _primaryAuth;
private FormAuthenticator _formAuth;
Jetty9DelegatingAuthenticator(Authenticator primaryAuth, FormAuthenticator formAuth) {
_primaryAuth = primaryAuth;
_formAuth = formAuth;
}
@Override
public void setConfiguration(AuthConfiguration configuration) {
_primaryAuth.setConfiguration(configuration);
_formAuth.setConfiguration(configuration);
}
@Override
public String getAuthMethod() {
return "FORM_PREFERRED";
}
@Override
public void prepareRequest(ServletRequest request) {
// Do nothing
}
@Override
public Authentication validateRequest(ServletRequest request, ServletResponse response,
boolean mandatory) throws ServerAuthException {
if (isBrowserAgent((HttpServletRequest) request))
return _formAuth.validateRequest(request, response, mandatory);
else
return _primaryAuth.validateRequest(request, response, mandatory);
}
private static boolean isBrowserAgent(HttpServletRequest request) {
String userAgent = request.getHeader("User-Agent");
// Covers all modern browsers (Firefox, Chrome, IE, Edge & Opera)
return (userAgent != null) &&
(userAgent.startsWith("Mozilla/") || userAgent.startsWith("Opera/"));
}
@Override
public boolean secureResponse(ServletRequest request, ServletResponse response,
boolean mandatory, Authentication.User validatedUser) {
return true; // both BASIC and FORM return true
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver/jetty9/Jetty9Facade.java
|
package water.webserver.jetty9;
import water.webserver.iface.Credentials;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.HttpServerFacade;
import water.webserver.iface.ProxyServer;
import water.webserver.iface.WebServer;
public class Jetty9Facade implements HttpServerFacade {
@Override
public WebServer createWebServer(H2OHttpView h2oHttpView) {
return Jetty9ServerAdapter.create(h2oHttpView);
}
@Override
public ProxyServer createProxyServer(H2OHttpView h2oHttpView, Credentials credentials, String proxyTo) {
return Jetty9ProxyServerAdapter.create(h2oHttpView, credentials, proxyTo);
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver/jetty9/Jetty9Helper.java
|
package water.webserver.jetty9;
import ai.h2o.org.eclipse.jetty.security.authentication.SpnegoAuthenticator;
import org.eclipse.jetty.jaas.JAASLoginService;
import org.eclipse.jetty.security.Authenticator;
import org.eclipse.jetty.security.ConstraintMapping;
import org.eclipse.jetty.security.ConstraintSecurityHandler;
import org.eclipse.jetty.security.DefaultIdentityService;
import org.eclipse.jetty.security.HashLoginService;
import org.eclipse.jetty.security.IdentityService;
import org.eclipse.jetty.security.LoginService;
import org.eclipse.jetty.security.SpnegoLoginService;
import org.eclipse.jetty.security.authentication.BasicAuthenticator;
import org.eclipse.jetty.security.authentication.FormAuthenticator;
import org.eclipse.jetty.server.*;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.server.session.SessionHandler;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.util.security.Constraint;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
import org.eclipse.jetty.util.thread.Scheduler;
import water.webserver.config.ConnectionConfiguration;
import water.webserver.iface.H2OHttpConfig;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.LoginType;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.util.Collections;
class Jetty9Helper {
private final H2OHttpConfig config;
private final H2OHttpView h2oHttpView;
Jetty9Helper(H2OHttpView h2oHttpView) {
this.h2oHttpView = h2oHttpView;
this.config = h2oHttpView.getConfig();
}
Server createJettyServer(String ip, int port) {
System.setProperty("org.eclipse.jetty.server.Request.maxFormContentSize", Integer.toString(Integer.MAX_VALUE));
final Server jettyServer;
if (config.ensure_daemon_threads) {
QueuedThreadPool pool = new QueuedThreadPool();
pool.setDaemon(true);
jettyServer = new Server(pool);
// Ensure the threads started by jetty are daemon threads, so they don't prevent stopping of H2O
Scheduler s = jettyServer.getBean(Scheduler.class);
jettyServer.updateBean(s, new ScheduledExecutorScheduler(null, true));
} else
jettyServer = new Server();
final boolean isSecured = config.jks != null;
final HttpConfiguration httpConfiguration = makeHttpConfiguration(new ConnectionConfiguration(isSecured));
final HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfiguration);
final ServerConnector connector;
if (isSecured) {
final SslContextFactory sslContextFactory = getSslContextFactory();
sslContextFactory.setKeyStorePath(config.jks);
sslContextFactory.setKeyStorePassword(config.jks_pass);
if (config.jks_alias != null) {
sslContextFactory.setCertAlias(config.jks_alias);
}
connector = new ServerConnector(jettyServer, AbstractConnectionFactory.getFactories(sslContextFactory, httpConnectionFactory));
} else {
connector = new ServerConnector(jettyServer, httpConnectionFactory);
}
connector.setIdleTimeout(httpConfiguration.getIdleTimeout()); // for websockets,...
if (ip != null) {
connector.setHost(ip);
}
connector.setPort(port);
jettyServer.setConnectors(new Connector[]{connector});
return jettyServer;
}
/**
* A method which tries to get the proper SslContextFactory - for older Jetty 9 versions SslContextFactory,
* and for newer ones (which fail with the former one) SslContextFactory$Server
*/
protected static SslContextFactory getSslContextFactory() {
try {
return (SslContextFactory) Class.forName("org.eclipse.jetty.util.ssl.SslContextFactory$Server").newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
try {
return (SslContextFactory) Class.forName("org.eclipse.jetty.util.ssl.SslContextFactory").newInstance();
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException ex) {
throw new RuntimeException(ex);
}
}
}
static HttpConfiguration makeHttpConfiguration(ConnectionConfiguration cfg) {
final HttpConfiguration httpConfiguration = new HttpConfiguration();
httpConfiguration.setSendServerVersion(false);
httpConfiguration.setRequestHeaderSize(cfg.getRequestHeaderSize());
httpConfiguration.setResponseHeaderSize(cfg.getResponseHeaderSize());
httpConfiguration.setOutputBufferSize(cfg.getOutputBufferSize(httpConfiguration.getOutputBufferSize()));
setRelativeRedirectAllowed(httpConfiguration, cfg.isRelativeRedirectAllowed());
httpConfiguration.setIdleTimeout(cfg.getIdleTimeout());
return httpConfiguration;
}
static void setRelativeRedirectAllowed(HttpConfiguration httpConfiguration, boolean isRelativeRedirectAllowed) {
Method[] methods = httpConfiguration.getClass().getMethods();
Method method = null;
for (Method m : methods) {
if (m.getName().equals("setRelativeRedirectAllowed")) {
method = m;
break;
}
}
if (method != null) {
try {
method.invoke(httpConfiguration, isRelativeRedirectAllowed);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (InvocationTargetException e) {
throw new RuntimeException(e);
}
}
}
HandlerWrapper authWrapper(Server jettyServer) {
if (config.loginType == LoginType.NONE) {
return jettyServer;
}
// REFER TO http://www.eclipse.org/jetty/documentation/9.1.4.v20140401/embedded-examples.html#embedded-secured-hello-handler
final LoginService loginService;
final Authenticator primaryAuthenticator;
switch (config.loginType) {
case HASH:
loginService = new HashLoginService("H2O", config.login_conf);
primaryAuthenticator = new BasicAuthenticator();
break;
case LDAP:
case KERBEROS:
case PAM:
loginService = new JAASLoginService(config.loginType.jaasRealm);
primaryAuthenticator = new BasicAuthenticator();
break;
case SPNEGO:
System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");
loginService = new SpnegoLoginService(config.loginType.jaasRealm, config.spnego_properties);
primaryAuthenticator = new SpnegoAuthenticator();
break;
default:
throw new UnsupportedOperationException(config.loginType + ""); // this can never happen
}
final IdentityService identityService = new DefaultIdentityService();
loginService.setIdentityService(identityService);
jettyServer.addBean(loginService);
// Set a security handler as the first handler in the chain.
final ConstraintSecurityHandler security = new ConstraintSecurityHandler();
// Set up a constraint to authenticate all calls, and allow certain roles in.
final Constraint constraint = new Constraint();
constraint.setName("auth");
constraint.setAuthenticate(true);
constraint.setRoles(new String[]{Constraint.ANY_AUTH});
final ConstraintMapping mapping = new ConstraintMapping();
mapping.setPathSpec("/*"); // Lock down all API calls
mapping.setConstraint(constraint);
security.setConstraintMappings(Collections.singletonList(mapping));
// Authentication / Authorization
final Authenticator authenticator;
if (config.form_auth) {
FormAuthenticator formAuthenticator = new FormAuthenticator("/login", "/loginError", false);
authenticator = new Jetty9DelegatingAuthenticator(primaryAuthenticator, formAuthenticator);
} else {
authenticator = primaryAuthenticator;
}
security.setLoginService(loginService);
security.setAuthenticator(authenticator);
final SessionHandler sessionHandler = new SessionHandler();
if (config.session_timeout > 0) {
sessionHandler.setMaxInactiveInterval(config.session_timeout * 60);
}
sessionHandler.setHandler(security);
jettyServer.setSessionIdManager(sessionHandler.getSessionIdManager());
// Pass-through to H2O if authenticated.
jettyServer.setHandler(sessionHandler);
return security;
}
/**
* Hook up Jetty handlers. Do this before start() is called.
*/
ServletContextHandler createServletContextHandler() {
// Both security and session handlers are already created (Note: we don't want to create a new separate session
// handler just for ServletContextHandler - we want to have just one SessionHandler & SessionManager)
final ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SECURITY | ServletContextHandler.NO_SESSIONS);
if (null != config.context_path && !config.context_path.isEmpty()) {
context.setContextPath(config.context_path);
} else {
context.setContextPath("/");
}
return context;
}
Handler authenticationHandler() {
return new AuthenticationHandler();
}
private class AuthenticationHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException {
boolean handled = h2oHttpView.authenticationHandler(request, response);
if (handled) {
baseRequest.setHandled(true);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver/jetty9/Jetty9ProxyServerAdapter.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.HandlerCollection;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import water.webserver.iface.Credentials;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.ProxyServer;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
class Jetty9ProxyServerAdapter implements ProxyServer {
private final Jetty9Helper helper;
private final H2OHttpView h2oHttpView;
private final Credentials credentials;
private final String proxyTo;
private Jetty9ProxyServerAdapter(Jetty9Helper helper, H2OHttpView h2oHttpView, Credentials credentials, String proxyTo) {
this.helper = helper;
this.h2oHttpView = h2oHttpView;
this.credentials = credentials;
this.proxyTo = proxyTo;
}
static ProxyServer create(final H2OHttpView h2oHttpView, final Credentials credentials, final String proxyTo) {
final Jetty9Helper helper = new Jetty9Helper(h2oHttpView);
return new Jetty9ProxyServerAdapter(helper, h2oHttpView, credentials, proxyTo);
}
@Override
public void start(final String ip, final int port) throws IOException {
final Server jettyServer = helper.createJettyServer(ip, port);
final HandlerWrapper handlerWrapper = helper.authWrapper(jettyServer);
final ServletContextHandler context = helper.createServletContextHandler();
registerHandlers(handlerWrapper, context, credentials, proxyTo);
try {
jettyServer.start();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
private void registerHandlers(HandlerWrapper handlerWrapper, ServletContextHandler context, Credentials credentials, String proxyTo) {
// setup authenticating proxy servlet (each request is forwarded with BASIC AUTH)
final ServletHolder proxyServlet = new ServletHolder(TransparentProxyServlet.class);
proxyServlet.setInitParameter("proxyTo", proxyTo); // Jetty 9 requires starting with small case letter
proxyServlet.setInitParameter("Prefix", "/");
proxyServlet.setInitParameter("BasicAuth", credentials.toBasicAuth());
context.addServlet(proxyServlet, "/*");
// authHandlers assume the user is already authenticated
final HandlerCollection authHandlers = new HandlerCollection();
authHandlers.setHandlers(new Handler[]{
helper.authenticationHandler(),
context,
});
// handles requests of login form and delegates the rest to the authHandlers
final ProxyLoginHandler loginHandler = new ProxyLoginHandler();
loginHandler.setHandler(authHandlers);
// login handler is the root handler
handlerWrapper.setHandler(loginHandler);
}
private class ProxyLoginHandler extends HandlerWrapper {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
final boolean handled = h2oHttpView.proxyLoginHandler(target, request, response);
if (handled) {
baseRequest.setHandled(true);
} else {
super.handle(target, baseRequest, request, response);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver/jetty9/Jetty9ServerAdapter.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.server.handler.HandlerCollection;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.RequestAuthExtension;
import water.webserver.iface.WebServer;
import water.webserver.iface.H2OWebsocketServlet;
import javax.servlet.Servlet;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
class Jetty9ServerAdapter implements WebServer {
private final Jetty9Helper helper;
private final H2OHttpView h2oHttpView;
private Server jettyServer;
private Jetty9ServerAdapter(Jetty9Helper helper, H2OHttpView h2oHttpView) {
this.helper = helper;
this.h2oHttpView = h2oHttpView;
}
static WebServer create(final H2OHttpView h2oHttpView) {
final Jetty9Helper helper = new Jetty9Helper(h2oHttpView);
return new Jetty9ServerAdapter(helper, h2oHttpView);
}
@Override
public void start(final String ip, final int port) throws IOException {
jettyServer = helper.createJettyServer(ip, port);
final HandlerWrapper handlerWrapper = helper.authWrapper(jettyServer);
final ServletContextHandler context = helper.createServletContextHandler();
registerHandlers(handlerWrapper, context);
try {
jettyServer.start();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
/**
* Stop Jetty server after it has been started.
* This is unlikely to ever be called by H2O until H2O supports graceful shutdown.
*
* @throws Exception -
*/
@Override
public void stop() throws IOException {
if (jettyServer != null) {
try {
jettyServer.stop();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
}
private void registerHandlers(final HandlerWrapper handlerWrapper, final ServletContextHandler context) {
for (Map.Entry<String, Class<? extends HttpServlet>> entry : h2oHttpView.getServlets().entrySet()) {
context.addServlet(entry.getValue(), entry.getKey());
}
for (Map.Entry<String, Class<? extends H2OWebsocketServlet>> entry : h2oHttpView.getWebsockets().entrySet()) {
try {
Servlet servlet = new Jetty9WebsocketServlet(entry.getValue().newInstance());
context.addServlet(new ServletHolder(entry.getValue().getName(), servlet), entry.getKey());
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException("Failed to instantiate websocket servlet object", e);
}
}
final List<Handler> extHandlers = new ArrayList<>();
extHandlers.add(helper.authenticationHandler());
// here we wrap generic authentication handlers into jetty-aware wrappers
final Collection<RequestAuthExtension> authExtensions = h2oHttpView.getAuthExtensions();
for (final RequestAuthExtension requestAuthExtension : authExtensions) {
extHandlers.add(new AbstractHandler() {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
if (requestAuthExtension.handle(target, request, response)) {
baseRequest.setHandled(true);
}
}
});
}
//
extHandlers.add(context);
// Handlers that can only be invoked for an authenticated user (if auth is enabled)
final HandlerCollection authHandlers = new HandlerCollection();
authHandlers.setHandlers(extHandlers.toArray(new Handler[0]));
// LoginHandler handles directly login requests and delegates the rest to the authHandlers
final LoginHandler loginHandler = new LoginHandler();
loginHandler.setHandler(authHandlers);
final HandlerCollection hc = new HandlerCollection();
hc.setHandlers(new Handler[]{
new GateHandler(),
loginHandler
});
handlerWrapper.setHandler(hc);
}
private class LoginHandler extends HandlerWrapper {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
final boolean handled = h2oHttpView.loginHandler(target, request, response);
if (handled) {
baseRequest.setHandled(true);
} else {
super.handle(target, baseRequest, request, response);
}
}
}
private class GateHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) {
if (h2oHttpView.gateHandler(request, response)) {
baseRequest.setHandled(true);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver/jetty9/Jetty9WebsocketServlet.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.websocket.api.Session;
import org.eclipse.jetty.websocket.api.WebSocketListener;
import org.eclipse.jetty.websocket.servlet.*;
import water.webserver.iface.WebsocketConnection;
import water.webserver.iface.WebsocketHandler;
import water.webserver.iface.H2OWebsocketServlet;
import java.io.IOException;
public class Jetty9WebsocketServlet extends WebSocketServlet {
private final H2OWebsocketServlet impl;
public Jetty9WebsocketServlet(H2OWebsocketServlet impl) {
this.impl = impl;
}
static class Jetty9WebsocketConnection implements WebsocketConnection {
private final Session sess;
Jetty9WebsocketConnection(Session sess) {
this.sess = sess;
}
@Override
public void sendMessage(String message) throws IOException {
sess.getRemote().sendString(message);
}
}
class Jetty9WebsocketHandler implements WebSocketListener {
private WebsocketHandler handler;
private Jetty9WebsocketConnection conn;
@Override
public void onWebSocketConnect(Session sess) {
conn = new Jetty9WebsocketConnection(sess);
handler = impl.onConnect(conn);
}
@Override
public void onWebSocketBinary(byte[] payload, int offset, int len) {
// ignore
}
@Override
public void onWebSocketText(String message) {
handler.onMessage(message);
}
@Override
public void onWebSocketClose(int statusCode, String reason)
{
handler.onClose(conn);
conn = null;
handler = null;
}
@Override
public void onWebSocketError(Throwable cause) {
cause.printStackTrace();
}
}
/**
* Please note, each Servlet has it's own instance of WebSocketServletFactory.
*
* @param factory Factory object to register socket creator with.
*/
@Override
public void configure(WebSocketServletFactory factory) {
factory.setCreator(new H2OWebSocketCreator());
}
/**
* Custom in-place socket creator, returning new instance of {@link Jetty9WebsocketHandler}m
* which already contains the proper {@link WebsocketServlet} implementation the request is being delegated to.
* <p>
* This is required, as default {@link WebSocketServletFactory} uses {@link org.eclipse.jetty.util.DecoratedObjectFactory}
* to instantiate {@link WebSocketListener} classes. This class is only able to instantiate static classes with 0-arg constructor,
* which inner non-static class {@link Jetty9WebsocketHandler} is NOT.
*/
public class H2OWebSocketCreator implements WebSocketCreator {
@Override
public Object createWebSocket(ServletUpgradeRequest req, ServletUpgradeResponse resp) {
return new Jetty9WebsocketHandler();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9/3.46.0.7/water/webserver/jetty9/TransparentProxyServlet.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.client.HttpClient;
import org.eclipse.jetty.client.api.Request;
import org.eclipse.jetty.proxy.ProxyServlet;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
/**
* Transparent proxy that automatically adds authentication to each request
*/
public class TransparentProxyServlet extends ProxyServlet.Transparent {
private String _basicAuth;
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
_basicAuth = config.getInitParameter("BasicAuth");
}
@Override
protected HttpClient newHttpClient() {
final SslContextFactory sslContextFactory = new SslContextFactory(true);
return new HttpClient(sslContextFactory);
}
@Override
protected void addProxyHeaders(HttpServletRequest clientRequest,
Request proxyRequest) {
proxyRequest.getHeaders().remove("Authorization");
proxyRequest.header("Authorization", _basicAuth);
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9-ext/3.42.0.4/org/eclipse/jetty
|
java-sources/ai/h2o/h2o-jetty-9-ext/3.42.0.4/org/eclipse/jetty/server/Response.java
|
//
// ========================================================================
// Copyright (c) 1995-2018 Mort Bay Consulting Pty. Ltd.
// ------------------------------------------------------------------------
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// and Apache License v2.0 which accompanies this distribution.
//
// The Eclipse Public License is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// The Apache License v2.0 is available at
// http://www.opensource.org/licenses/apache2.0.php
//
// You may elect to redistribute this code under either of these licenses.
// ========================================================================
//
package org.eclipse.jetty.server;
import java.io.IOException;
import java.io.PrintWriter;
import java.nio.channels.IllegalSelectorException;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import org.eclipse.jetty.http.CookieCompliance;
import org.eclipse.jetty.http.DateGenerator;
import org.eclipse.jetty.http.HttpContent;
import org.eclipse.jetty.http.HttpCookie;
import org.eclipse.jetty.http.HttpField;
import org.eclipse.jetty.http.HttpFields;
import org.eclipse.jetty.http.HttpGenerator;
import org.eclipse.jetty.http.HttpHeader;
import org.eclipse.jetty.http.HttpHeaderValue;
import org.eclipse.jetty.http.HttpScheme;
import org.eclipse.jetty.http.HttpStatus;
import org.eclipse.jetty.http.HttpURI;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.http.MetaData;
import org.eclipse.jetty.http.MimeTypes;
import org.eclipse.jetty.http.PreEncodedHttpField;
import org.eclipse.jetty.http.Syntax;
import org.eclipse.jetty.io.RuntimeIOException;
import org.eclipse.jetty.server.handler.ContextHandler;
import org.eclipse.jetty.server.handler.ErrorHandler;
import org.eclipse.jetty.server.session.SessionHandler;
import org.eclipse.jetty.util.QuotedStringTokenizer;
import org.eclipse.jetty.util.StringUtil;
import org.eclipse.jetty.util.URIUtil;
import org.eclipse.jetty.util.log.Log;
import org.eclipse.jetty.util.log.Logger;
import water.webserver.jetty9.H2OHttpConfiguration;
/**
* <p>{@link Response} provides the implementation for {@link HttpServletResponse}.</p>
*/
public class Response implements HttpServletResponse
{
private static final Logger LOG = Log.getLogger(Response.class);
private static final String __COOKIE_DELIM="\",;\\ \t";
private static final String __01Jan1970_COOKIE = DateGenerator.formatCookieDate(0).trim();
private static final int __MIN_BUFFER_SIZE = 1;
private static final HttpField __EXPIRES_01JAN1970 = new PreEncodedHttpField(HttpHeader.EXPIRES,DateGenerator.__01Jan1970);
// Cookie building buffer. Reduce garbage for cookie using applications
private static final ThreadLocal<StringBuilder> __cookieBuilder = ThreadLocal.withInitial(() -> new StringBuilder(128));
public enum OutputType
{
NONE, STREAM, WRITER
}
/**
* If a header name starts with this string, the header (stripped of the prefix)
* can be set during include using only {@link #setHeader(String, String)} or
* {@link #addHeader(String, String)}.
*/
public final static String SET_INCLUDE_HEADER_PREFIX = "org.eclipse.jetty.server.include.";
/**
* If this string is found within the comment of a cookie added with {@link #addCookie(Cookie)}, then the cookie
* will be set as HTTP ONLY.
*/
public final static String HTTP_ONLY_COMMENT = "__HTTP_ONLY__";
private final HttpChannel _channel;
private final HttpFields _fields = new HttpFields();
private final AtomicInteger _include = new AtomicInteger();
private final HttpOutput _out;
private int _status = HttpStatus.OK_200;
private String _reason;
private Locale _locale;
private MimeTypes.Type _mimeType;
private String _characterEncoding;
private EncodingFrom _encodingFrom=EncodingFrom.NOT_SET;
private String _contentType;
private OutputType _outputType = OutputType.NONE;
private ResponseWriter _writer;
private long _contentLength = -1;
private Supplier<HttpFields> trailers;
private enum EncodingFrom { NOT_SET, INFERRED, SET_LOCALE, SET_CONTENT_TYPE, SET_CHARACTER_ENCODING }
private static final EnumSet<EncodingFrom> __localeOverride = EnumSet.of(EncodingFrom.NOT_SET,EncodingFrom.INFERRED);
private static final EnumSet<EncodingFrom> __explicitCharset = EnumSet.of(EncodingFrom.SET_LOCALE,EncodingFrom.SET_CHARACTER_ENCODING);
public Response(HttpChannel channel, HttpOutput out)
{
_channel = channel;
_out = out;
}
public HttpChannel getHttpChannel()
{
return _channel;
}
protected void recycle()
{
_status = HttpStatus.OK_200;
_reason = null;
_locale = null;
_mimeType = null;
_characterEncoding = null;
_contentType = null;
_outputType = OutputType.NONE;
_contentLength = -1;
_out.recycle();
_fields.clear();
_encodingFrom=EncodingFrom.NOT_SET;
}
public HttpOutput getHttpOutput()
{
return _out;
}
public boolean isIncluding()
{
return _include.get() > 0;
}
public void include()
{
_include.incrementAndGet();
}
public void included()
{
_include.decrementAndGet();
if (_outputType == OutputType.WRITER)
{
_writer.reopen();
}
_out.reopen();
}
public void addCookie(HttpCookie cookie)
{
if (StringUtil.isBlank(cookie.getName()))
{
throw new IllegalArgumentException("Cookie.name cannot be blank/null");
}
if (getHttpChannel().getHttpConfiguration().isCookieCompliance(CookieCompliance.RFC2965))
addSetRFC2965Cookie(
cookie.getName(),
cookie.getValue(),
cookie.getDomain(),
cookie.getPath(),
cookie.getMaxAge(),
cookie.getComment(),
cookie.isSecure(),
cookie.isHttpOnly(),
cookie.getVersion());
else
addSetRFC6265Cookie(
cookie.getName(),
cookie.getValue(),
cookie.getDomain(),
cookie.getPath(),
cookie.getMaxAge(),
cookie.isSecure(),
cookie.isHttpOnly());
}
@Override
public void addCookie(Cookie cookie)
{
String comment = cookie.getComment();
boolean httpOnly = false;
if (comment != null)
{
int i = comment.indexOf(HTTP_ONLY_COMMENT);
if (i >= 0)
{
httpOnly = true;
comment = comment.replace(HTTP_ONLY_COMMENT, "").trim();
if (comment.length() == 0)
comment = null;
}
}
if (StringUtil.isBlank(cookie.getName()))
{
throw new IllegalArgumentException("Cookie.name cannot be blank/null");
}
if (getHttpChannel().getHttpConfiguration().isCookieCompliance(CookieCompliance.RFC2965))
addSetRFC2965Cookie(cookie.getName(),
cookie.getValue(),
cookie.getDomain(),
cookie.getPath(),
cookie.getMaxAge(),
comment,
cookie.getSecure(),
httpOnly || cookie.isHttpOnly(),
cookie.getVersion());
else
addSetRFC6265Cookie(cookie.getName(),
cookie.getValue(),
cookie.getDomain(),
cookie.getPath(),
cookie.getMaxAge(),
cookie.getSecure(),
httpOnly || cookie.isHttpOnly());
}
/**
* Format a set cookie value by RFC6265
*
* @param name the name
* @param value the value
* @param domain the domain
* @param path the path
* @param maxAge the maximum age
* @param isSecure true if secure cookie
* @param isHttpOnly true if for http only
*/
public void addSetRFC6265Cookie(
final String name,
final String value,
final String domain,
final String path,
final long maxAge,
final boolean isSecure,
final boolean isHttpOnly)
{
// Check arguments
if (name == null || name.length() == 0)
throw new IllegalArgumentException("Bad cookie name");
// Name is checked for legality by servlet spec, but can also be passed directly so check again for quoting
// Per RFC6265, Cookie.name follows RFC2616 Section 2.2 token rules
Syntax.requireValidRFC2616Token(name, "RFC6265 Cookie name");
// Ensure that Per RFC6265, Cookie.value follows syntax rules
Syntax.requireValidRFC6265CookieValue(value);
// Format value and params
StringBuilder buf = __cookieBuilder.get();
buf.setLength(0);
buf.append(name).append('=').append(value==null?"":value);
// Append path
if (path!=null && path.length()>0)
buf.append(";Path=").append(path);
// Append domain
if (domain!=null && domain.length()>0)
buf.append(";Domain=").append(domain);
// Handle max-age and/or expires
if (maxAge >= 0)
{
// Always use expires
// This is required as some browser (M$ this means you!) don't handle max-age even with v1 cookies
buf.append(";Expires=");
if (maxAge == 0)
buf.append(__01Jan1970_COOKIE);
else
DateGenerator.formatCookieDate(buf, System.currentTimeMillis() + 1000L * maxAge);
buf.append(";Max-Age=");
buf.append(maxAge);
}
// add the other fields
if (isSecure)
buf.append(";Secure");
if (isHttpOnly)
buf.append(";HttpOnly");
// add the set cookie
_fields.add(HttpHeader.SET_COOKIE, buf.toString());
// Expire responses with set-cookie headers so they do not get cached.
_fields.put(__EXPIRES_01JAN1970);
}
/**
* Format a set cookie value
*
* @param name the name
* @param value the value
* @param domain the domain
* @param path the path
* @param maxAge the maximum age
* @param comment the comment (only present on versions > 0)
* @param isSecure true if secure cookie
* @param isHttpOnly true if for http only
* @param version version of cookie logic to use (0 == default behavior)
*/
public void addSetRFC2965Cookie(
final String name,
final String value,
final String domain,
final String path,
final long maxAge,
final String comment,
final boolean isSecure,
final boolean isHttpOnly,
int version)
{
// Check arguments
if (name == null || name.length() == 0)
throw new IllegalArgumentException("Bad cookie name");
// Format value and params
StringBuilder buf = __cookieBuilder.get();
buf.setLength(0);
// Name is checked for legality by servlet spec, but can also be passed directly so check again for quoting
boolean quote_name=isQuoteNeededForCookie(name);
quoteOnlyOrAppend(buf,name,quote_name);
buf.append('=');
// Append the value
boolean quote_value=isQuoteNeededForCookie(value);
quoteOnlyOrAppend(buf,value,quote_value);
// Look for domain and path fields and check if they need to be quoted
boolean has_domain = domain!=null && domain.length()>0;
boolean quote_domain = has_domain && isQuoteNeededForCookie(domain);
boolean has_path = path!=null && path.length()>0;
boolean quote_path = has_path && isQuoteNeededForCookie(path);
// Upgrade the version if we have a comment or we need to quote value/path/domain or if they were already quoted
if (version==0 && ( comment!=null || quote_name || quote_value || quote_domain || quote_path ||
QuotedStringTokenizer.isQuoted(name) || QuotedStringTokenizer.isQuoted(value) ||
QuotedStringTokenizer.isQuoted(path) || QuotedStringTokenizer.isQuoted(domain)))
version=1;
// Append version
if (version==1)
buf.append (";Version=1");
else if (version>1)
buf.append (";Version=").append(version);
// Append path
if (has_path)
{
buf.append(";Path=");
quoteOnlyOrAppend(buf,path,quote_path);
}
// Append domain
if (has_domain)
{
buf.append(";Domain=");
quoteOnlyOrAppend(buf,domain,quote_domain);
}
// Handle max-age and/or expires
if (maxAge >= 0)
{
// Always use expires
// This is required as some browser (M$ this means you!) don't handle max-age even with v1 cookies
buf.append(";Expires=");
if (maxAge == 0)
buf.append(__01Jan1970_COOKIE);
else
DateGenerator.formatCookieDate(buf, System.currentTimeMillis() + 1000L * maxAge);
// for v1 cookies, also send max-age
if (version>=1)
{
buf.append(";Max-Age=");
buf.append(maxAge);
}
}
// add the other fields
if (isSecure)
buf.append(";Secure");
if (isHttpOnly)
buf.append(";HttpOnly");
if (comment != null)
{
buf.append(";Comment=");
quoteOnlyOrAppend(buf,comment,isQuoteNeededForCookie(comment));
}
// add the set cookie
_fields.add(HttpHeader.SET_COOKIE, buf.toString());
// Expire responses with set-cookie headers so they do not get cached.
_fields.put(__EXPIRES_01JAN1970);
}
/* ------------------------------------------------------------ */
/** Does a cookie value need to be quoted?
* @param s value string
* @return true if quoted;
* @throws IllegalArgumentException If there a control characters in the string
*/
private static boolean isQuoteNeededForCookie(String s)
{
if (s==null || s.length()==0)
return true;
if (QuotedStringTokenizer.isQuoted(s))
return false;
for (int i=0;i<s.length();i++)
{
char c = s.charAt(i);
if (__COOKIE_DELIM.indexOf(c)>=0)
return true;
if (c<0x20 || c>=0x7f)
throw new IllegalArgumentException("Illegal character in cookie value");
}
return false;
}
private static void quoteOnlyOrAppend(StringBuilder buf, String s, boolean quote)
{
if (quote)
QuotedStringTokenizer.quoteOnly(buf,s);
else
buf.append(s);
}
@Override
public boolean containsHeader(String name)
{
return _fields.containsKey(name);
}
@Override
public String encodeURL(String url)
{
final Request request = _channel.getRequest();
SessionHandler sessionManager = request.getSessionHandler();
if (sessionManager == null)
return url;
HttpURI uri = null;
if (sessionManager.isCheckingRemoteSessionIdEncoding() && URIUtil.hasScheme(url))
{
uri = new HttpURI(url);
String path = uri.getPath();
path = (path == null ? "" : path);
int port = uri.getPort();
if (port < 0)
port = HttpScheme.HTTPS.asString().equalsIgnoreCase(uri.getScheme()) ? 443 : 80;
// Is it the same server?
if (!request.getServerName().equalsIgnoreCase(uri.getHost()))
return url;
if (request.getServerPort() != port)
return url;
if (!path.startsWith(request.getContextPath())) //TODO the root context path is "", with which every non null string starts
return url;
}
String sessionURLPrefix = sessionManager.getSessionIdPathParameterNamePrefix();
if (sessionURLPrefix == null)
return url;
if (url == null)
return null;
// should not encode if cookies in evidence
if ((sessionManager.isUsingCookies() && request.isRequestedSessionIdFromCookie()) || !sessionManager.isUsingURLs())
{
int prefix = url.indexOf(sessionURLPrefix);
if (prefix != -1)
{
int suffix = url.indexOf("?", prefix);
if (suffix < 0)
suffix = url.indexOf("#", prefix);
if (suffix <= prefix)
return url.substring(0, prefix);
return url.substring(0, prefix) + url.substring(suffix);
}
return url;
}
// get session;
HttpSession session = request.getSession(false);
// no session
if (session == null)
return url;
// invalid session
if (!sessionManager.isValid(session))
return url;
String id = sessionManager.getExtendedId(session);
if (uri == null)
uri = new HttpURI(url);
// Already encoded
int prefix = url.indexOf(sessionURLPrefix);
if (prefix != -1)
{
int suffix = url.indexOf("?", prefix);
if (suffix < 0)
suffix = url.indexOf("#", prefix);
if (suffix <= prefix)
return url.substring(0, prefix + sessionURLPrefix.length()) + id;
return url.substring(0, prefix + sessionURLPrefix.length()) + id +
url.substring(suffix);
}
// edit the session
int suffix = url.indexOf('?');
if (suffix < 0)
suffix = url.indexOf('#');
if (suffix < 0)
{
return url +
((HttpScheme.HTTPS.is(uri.getScheme()) || HttpScheme.HTTP.is(uri.getScheme())) && uri.getPath() == null ? "/" : "") + //if no path, insert the root path
sessionURLPrefix + id;
}
return url.substring(0, suffix) +
((HttpScheme.HTTPS.is(uri.getScheme()) || HttpScheme.HTTP.is(uri.getScheme())) && uri.getPath() == null ? "/" : "") + //if no path so insert the root path
sessionURLPrefix + id + url.substring(suffix);
}
@Override
public String encodeRedirectURL(String url)
{
return encodeURL(url);
}
@Override
@Deprecated
public String encodeUrl(String url)
{
return encodeURL(url);
}
@Override
@Deprecated
public String encodeRedirectUrl(String url)
{
return encodeRedirectURL(url);
}
@Override
public void sendError(int sc) throws IOException
{
sendError(sc, null);
}
@Override
public void sendError(int code, String message) throws IOException
{
if (isIncluding())
return;
if (isCommitted())
{
if (LOG.isDebugEnabled())
LOG.debug("Aborting on sendError on committed response {} {}",code,message);
code=-1;
}
else
resetBuffer();
switch(code)
{
case -1:
_channel.abort(new IOException());
return;
case 102:
sendProcessing();
return;
default:
break;
}
_outputType = OutputType.NONE;
setContentType(null);
setCharacterEncoding(null);
setHeader(HttpHeader.EXPIRES,null);
setHeader(HttpHeader.LAST_MODIFIED,null);
setHeader(HttpHeader.CACHE_CONTROL,null);
setHeader(HttpHeader.CONTENT_TYPE,null);
setHeader(HttpHeader.CONTENT_LENGTH, null);
setStatus(code);
Request request = _channel.getRequest();
Throwable cause = (Throwable)request.getAttribute(Dispatcher.ERROR_EXCEPTION);
if (message==null)
{
_reason=HttpStatus.getMessage(code);
message=cause==null?_reason:cause.toString();
}
else
_reason=message;
// If we are allowed to have a body, then produce the error page.
if (code != SC_NO_CONTENT && code != SC_NOT_MODIFIED &&
code != SC_PARTIAL_CONTENT && code >= SC_OK)
{
ContextHandler.Context context = request.getContext();
ContextHandler contextHandler = context == null ? _channel.getState().getContextHandler() : context.getContextHandler();
request.setAttribute(RequestDispatcher.ERROR_STATUS_CODE, code);
request.setAttribute(RequestDispatcher.ERROR_MESSAGE, message);
request.setAttribute(RequestDispatcher.ERROR_REQUEST_URI, request.getRequestURI());
request.setAttribute(RequestDispatcher.ERROR_SERVLET_NAME, request.getServletName());
ErrorHandler error_handler = ErrorHandler.getErrorHandler(_channel.getServer(), contextHandler);
if (error_handler!=null)
error_handler.handle(null, request, request, this);
}
if (!request.isAsyncStarted())
closeOutput();
}
/**
* Sends a 102-Processing response.
* If the connection is a HTTP connection, the version is 1.1 and the
* request has a Expect header starting with 102, then a 102 response is
* sent. This indicates that the request still be processed and real response
* can still be sent. This method is called by sendError if it is passed 102.
* @throws IOException if unable to send the 102 response
* @see javax.servlet.http.HttpServletResponse#sendError(int)
*/
public void sendProcessing() throws IOException
{
if (_channel.isExpecting102Processing() && !isCommitted())
{
_channel.sendResponse(HttpGenerator.PROGRESS_102_INFO, null, true);
}
}
/**
* Sends a response with one of the 300 series redirection codes.
* @param code the redirect status code
* @param location the location to send in {@code Location} headers
* @throws IOException if unable to send the redirect
*/
public void sendRedirect(int code, String location) throws IOException
{
if ((code < HttpServletResponse.SC_MULTIPLE_CHOICES) || (code >= HttpServletResponse.SC_BAD_REQUEST))
throw new IllegalArgumentException("Not a 3xx redirect code");
if (isIncluding())
return;
if (location == null)
throw new IllegalArgumentException();
if (!URIUtil.hasScheme(location))
{
StringBuilder buf = ((H2OHttpConfiguration) _channel.getHttpConfiguration()).isRelativeRedirectAllowed()
? new StringBuilder()
: _channel.getRequest().getRootURL();
if (location.startsWith("/"))
{
// absolute in context
location=URIUtil.canonicalEncodedPath(location);
}
else
{
// relative to request
String path=_channel.getRequest().getRequestURI();
String parent=(path.endsWith("/"))?path:URIUtil.parentPath(path);
location=URIUtil.canonicalEncodedPath(URIUtil.addEncodedPaths(parent,location));
if (location!=null && !location.startsWith("/"))
buf.append('/');
}
if(location==null)
throw new IllegalStateException("path cannot be above root");
buf.append(location);
location=buf.toString();
}
resetBuffer();
setHeader(HttpHeader.LOCATION, location);
setStatus(code);
closeOutput();
}
@Override
public void sendRedirect(String location) throws IOException
{
sendRedirect(HttpServletResponse.SC_MOVED_TEMPORARILY, location);
}
@Override
public void setDateHeader(String name, long date)
{
if (!isIncluding())
_fields.putDateField(name, date);
}
@Override
public void addDateHeader(String name, long date)
{
if (!isIncluding())
_fields.addDateField(name, date);
}
public void setHeader(HttpHeader name, String value)
{
if (HttpHeader.CONTENT_TYPE == name)
setContentType(value);
else
{
if (isIncluding())
return;
_fields.put(name, value);
if (HttpHeader.CONTENT_LENGTH == name)
{
if (value == null)
_contentLength = -1L;
else
_contentLength = Long.parseLong(value);
}
}
}
@Override
public void setHeader(String name, String value)
{
if (HttpHeader.CONTENT_TYPE.is(name))
setContentType(value);
else
{
if (isIncluding())
{
if (name.startsWith(SET_INCLUDE_HEADER_PREFIX))
name = name.substring(SET_INCLUDE_HEADER_PREFIX.length());
else
return;
}
_fields.put(name, value);
if (HttpHeader.CONTENT_LENGTH.is(name))
{
if (value == null)
_contentLength = -1L;
else
_contentLength = Long.parseLong(value);
}
}
}
@Override
public Collection<String> getHeaderNames()
{
return _fields.getFieldNamesCollection();
}
@Override
public String getHeader(String name)
{
return _fields.get(name);
}
@Override
public Collection<String> getHeaders(String name)
{
Collection<String> i = _fields.getValuesList(name);
if (i == null)
return Collections.emptyList();
return i;
}
@Override
public void addHeader(String name, String value)
{
if (isIncluding())
{
if (name.startsWith(SET_INCLUDE_HEADER_PREFIX))
name = name.substring(SET_INCLUDE_HEADER_PREFIX.length());
else
return;
}
if (HttpHeader.CONTENT_TYPE.is(name))
{
setContentType(value);
return;
}
if (HttpHeader.CONTENT_LENGTH.is(name))
{
setHeader(name,value);
return;
}
_fields.add(name, value);
}
@Override
public void setIntHeader(String name, int value)
{
if (!isIncluding())
{
_fields.putLongField(name, value);
if (HttpHeader.CONTENT_LENGTH.is(name))
_contentLength = value;
}
}
@Override
public void addIntHeader(String name, int value)
{
if (!isIncluding())
{
_fields.add(name, Integer.toString(value));
if (HttpHeader.CONTENT_LENGTH.is(name))
_contentLength = value;
}
}
@Override
public void setStatus(int sc)
{
if (sc <= 0)
throw new IllegalArgumentException();
if (!isIncluding())
{
_status = sc;
_reason = null;
}
}
@Override
@Deprecated
public void setStatus(int sc, String sm)
{
setStatusWithReason(sc,sm);
}
public void setStatusWithReason(int sc, String sm)
{
if (sc <= 0)
throw new IllegalArgumentException();
if (!isIncluding())
{
_status = sc;
_reason = sm;
}
}
@Override
public String getCharacterEncoding()
{
if (_characterEncoding == null)
{
String encoding = MimeTypes.getCharsetAssumedFromContentType(_contentType);
if (encoding!=null)
return encoding;
encoding = MimeTypes.getCharsetInferredFromContentType(_contentType);
if (encoding!=null)
return encoding;
return StringUtil.__ISO_8859_1;
}
return _characterEncoding;
}
@Override
public String getContentType()
{
return _contentType;
}
@Override
public ServletOutputStream getOutputStream() throws IOException
{
if (_outputType == OutputType.WRITER)
throw new IllegalStateException("WRITER");
_outputType = OutputType.STREAM;
return _out;
}
public boolean isWriting()
{
return _outputType == OutputType.WRITER;
}
@Override
public PrintWriter getWriter() throws IOException
{
if (_outputType == OutputType.STREAM)
throw new IllegalStateException("STREAM");
if (_outputType == OutputType.NONE)
{
/* get encoding from Content-Type header */
String encoding = _characterEncoding;
if (encoding == null)
{
if (_mimeType!=null && _mimeType.isCharsetAssumed())
encoding=_mimeType.getCharsetString();
else
{
encoding = MimeTypes.getCharsetAssumedFromContentType(_contentType);
if (encoding == null)
{
encoding = MimeTypes.getCharsetInferredFromContentType(_contentType);
if (encoding == null)
encoding = StringUtil.__ISO_8859_1;
setCharacterEncoding(encoding,EncodingFrom.INFERRED);
}
}
}
Locale locale = getLocale();
if (_writer != null && _writer.isFor(locale,encoding))
_writer.reopen();
else
{
if (StringUtil.__ISO_8859_1.equalsIgnoreCase(encoding))
_writer = new ResponseWriter(new Iso88591HttpWriter(_out),locale,encoding);
else if (StringUtil.__UTF8.equalsIgnoreCase(encoding))
_writer = new ResponseWriter(new Utf8HttpWriter(_out),locale,encoding);
else
_writer = new ResponseWriter(new EncodingHttpWriter(_out, encoding),locale,encoding);
}
// Set the output type at the end, because setCharacterEncoding() checks for it
_outputType = OutputType.WRITER;
}
return _writer;
}
@Override
public void setContentLength(int len)
{
// Protect from setting after committed as default handling
// of a servlet HEAD request ALWAYS sets _content length, even
// if the getHandling committed the response!
if (isCommitted() || isIncluding())
return;
if (len>0)
{
long written = _out.getWritten();
if (written > len)
throw new IllegalArgumentException("setContentLength(" + len + ") when already written " + written);
_contentLength = len;
_fields.putLongField(HttpHeader.CONTENT_LENGTH, len);
if (isAllContentWritten(written))
{
try
{
closeOutput();
}
catch(IOException e)
{
throw new RuntimeIOException(e);
}
}
}
else if (len==0)
{
long written = _out.getWritten();
if (written > 0)
throw new IllegalArgumentException("setContentLength(0) when already written " + written);
_contentLength = len;
_fields.put(HttpHeader.CONTENT_LENGTH, "0");
}
else
{
_contentLength = len;
_fields.remove(HttpHeader.CONTENT_LENGTH);
}
}
public long getContentLength()
{
return _contentLength;
}
public boolean isAllContentWritten(long written)
{
return (_contentLength >= 0 && written >= _contentLength);
}
public boolean isContentComplete(long written)
{
return (_contentLength < 0 || written >= _contentLength);
}
public void closeOutput() throws IOException
{
switch (_outputType)
{
case WRITER:
_writer.close();
if (!_out.isClosed())
_out.close();
break;
case STREAM:
if (!_out.isClosed())
getOutputStream().close();
break;
default:
if (!_out.isClosed())
_out.close();
}
}
public long getLongContentLength()
{
return _contentLength;
}
public void setLongContentLength(long len)
{
// Protect from setting after committed as default handling
// of a servlet HEAD request ALWAYS sets _content length, even
// if the getHandling committed the response!
if (isCommitted() || isIncluding())
return;
_contentLength = len;
_fields.putLongField(HttpHeader.CONTENT_LENGTH.toString(), len);
}
@Override
public void setContentLengthLong(long length)
{
setLongContentLength(length);
}
@Override
public void setCharacterEncoding(String encoding)
{
setCharacterEncoding(encoding,EncodingFrom.SET_CHARACTER_ENCODING);
}
private void setCharacterEncoding(String encoding, EncodingFrom from)
{
if (isIncluding() || isWriting())
return;
if (_outputType != OutputType.WRITER && !isCommitted())
{
if (encoding == null)
{
_encodingFrom=EncodingFrom.NOT_SET;
// Clear any encoding.
if (_characterEncoding != null)
{
_characterEncoding = null;
if (_mimeType!=null)
{
_mimeType=_mimeType.getBaseType();
_contentType=_mimeType.asString();
_fields.put(_mimeType.getContentTypeField());
}
else if (_contentType != null)
{
_contentType = MimeTypes.getContentTypeWithoutCharset(_contentType);
_fields.put(HttpHeader.CONTENT_TYPE, _contentType);
}
}
}
else
{
// No, so just add this one to the mimetype
_encodingFrom = from;
_characterEncoding = HttpGenerator.__STRICT?encoding:StringUtil.normalizeCharset(encoding);
if (_mimeType!=null)
{
_contentType=_mimeType.getBaseType().asString()+ ";charset=" + _characterEncoding;
_mimeType = MimeTypes.CACHE.get(_contentType);
if (_mimeType==null || HttpGenerator.__STRICT)
_fields.put(HttpHeader.CONTENT_TYPE, _contentType);
else
_fields.put(_mimeType.getContentTypeField());
}
else if (_contentType != null)
{
_contentType = MimeTypes.getContentTypeWithoutCharset(_contentType) + ";charset=" + _characterEncoding;
_fields.put(HttpHeader.CONTENT_TYPE, _contentType);
}
}
}
}
@Override
public void setContentType(String contentType)
{
if (isCommitted() || isIncluding())
return;
if (contentType == null)
{
if (isWriting() && _characterEncoding != null)
throw new IllegalSelectorException();
if (_locale == null)
_characterEncoding = null;
_mimeType = null;
_contentType = null;
_fields.remove(HttpHeader.CONTENT_TYPE);
}
else
{
_contentType = contentType;
_mimeType = MimeTypes.CACHE.get(contentType);
String charset;
if (_mimeType!=null && _mimeType.getCharset()!=null && !_mimeType.isCharsetAssumed())
charset=_mimeType.getCharsetString();
else
charset = MimeTypes.getCharsetFromContentType(contentType);
if (charset == null)
{
switch (_encodingFrom)
{
case NOT_SET:
break;
case INFERRED:
case SET_CONTENT_TYPE:
if (isWriting())
{
_mimeType=null;
_contentType = _contentType + ";charset=" + _characterEncoding;
}
else
{
_encodingFrom=EncodingFrom.NOT_SET;
_characterEncoding=null;
}
break;
case SET_LOCALE:
case SET_CHARACTER_ENCODING:
{
_contentType = contentType + ";charset=" + _characterEncoding;
_mimeType = null;
}
}
}
else if (isWriting() && !charset.equalsIgnoreCase(_characterEncoding))
{
// too late to change the character encoding;
_mimeType = null;
_contentType = MimeTypes.getContentTypeWithoutCharset(_contentType);
if (_characterEncoding != null)
_contentType = _contentType + ";charset=" + _characterEncoding;
}
else
{
_characterEncoding = charset;
_encodingFrom = EncodingFrom.SET_CONTENT_TYPE;
}
if (HttpGenerator.__STRICT || _mimeType==null)
_fields.put(HttpHeader.CONTENT_TYPE, _contentType);
else
{
_contentType=_mimeType.asString();
_fields.put(_mimeType.getContentTypeField());
}
}
}
@Override
public void setBufferSize(int size)
{
if (isCommitted())
throw new IllegalStateException("cannot set buffer size after response is in committed state");
if (getContentCount() > 0)
throw new IllegalStateException("cannot set buffer size after response has " + getContentCount() + " bytes already written");
if (size < __MIN_BUFFER_SIZE)
size = __MIN_BUFFER_SIZE;
_out.setBufferSize(size);
}
@Override
public int getBufferSize()
{
return _out.getBufferSize();
}
@Override
public void flushBuffer() throws IOException
{
if (!_out.isClosed())
_out.flush();
}
@Override
public void reset()
{
reset(false);
}
public void reset(boolean preserveCookies)
{
resetForForward();
_status = 200;
_reason = null;
_contentLength = -1;
List<HttpField> cookies = preserveCookies
?_fields.stream()
.filter(f->f.getHeader()==HttpHeader.SET_COOKIE)
.collect(Collectors.toList()):null;
_fields.clear();
String connection = _channel.getRequest().getHeader(HttpHeader.CONNECTION.asString());
if (connection != null)
{
for (String value: StringUtil.csvSplit(null,connection,0,connection.length()))
{
HttpHeaderValue cb = HttpHeaderValue.CACHE.get(value);
if (cb != null)
{
switch (cb)
{
case CLOSE:
_fields.put(HttpHeader.CONNECTION, HttpHeaderValue.CLOSE.toString());
break;
case KEEP_ALIVE:
if (HttpVersion.HTTP_1_0.is(_channel.getRequest().getProtocol()))
_fields.put(HttpHeader.CONNECTION, HttpHeaderValue.KEEP_ALIVE.toString());
break;
case TE:
_fields.put(HttpHeader.CONNECTION, HttpHeaderValue.TE.toString());
break;
default:
}
}
}
}
if (preserveCookies)
cookies.forEach(_fields::add);
else
{
Request request = getHttpChannel().getRequest();
HttpSession session = request.getSession(false);
if (session!=null && session.isNew())
{
SessionHandler sh = request.getSessionHandler();
if (sh!=null)
{
HttpCookie c=sh.getSessionCookie(session,request.getContextPath(),request.isSecure());
if (c!=null)
addCookie(c);
}
}
}
}
public void resetForForward()
{
resetBuffer();
_outputType = OutputType.NONE;
}
@Override
public void resetBuffer()
{
_out.resetBuffer();
}
public void setTrailers(Supplier<HttpFields> trailers)
{
this.trailers = trailers;
}
public Supplier<HttpFields> getTrailers()
{
return trailers;
}
protected MetaData.Response newResponseMetaData()
{
MetaData.Response info = new MetaData.Response(_channel.getRequest().getHttpVersion(), getStatus(), getReason(), _fields, getLongContentLength());
info.setTrailerSupplier(getTrailers());
return info;
}
/** Get the MetaData.Response committed for this response.
* This may differ from the meta data in this response for
* exceptional responses (eg 4xx and 5xx responses generated
* by the container) and the committedMetaData should be used
* for logging purposes.
* @return The committed MetaData or a {@link #newResponseMetaData()}
* if not yet committed.
*/
public MetaData.Response getCommittedMetaData()
{
MetaData.Response meta = _channel.getCommittedMetaData();
if (meta==null)
return newResponseMetaData();
return meta;
}
@Override
public boolean isCommitted()
{
return _channel.isCommitted();
}
@Override
public void setLocale(Locale locale)
{
if (locale == null || isCommitted() || isIncluding())
return;
_locale = locale;
_fields.put(HttpHeader.CONTENT_LANGUAGE, locale.toString().replace('_', '-'));
if (_outputType != OutputType.NONE)
return;
if (_channel.getRequest().getContext() == null)
return;
String charset = _channel.getRequest().getContext().getContextHandler().getLocaleEncoding(locale);
if (charset != null && charset.length() > 0 && __localeOverride.contains(_encodingFrom))
setCharacterEncoding(charset,EncodingFrom.SET_LOCALE);
}
@Override
public Locale getLocale()
{
if (_locale == null)
return Locale.getDefault();
return _locale;
}
@Override
public int getStatus()
{
return _status;
}
public String getReason()
{
return _reason;
}
public HttpFields getHttpFields()
{
return _fields;
}
public long getContentCount()
{
return _out.getWritten();
}
@Override
public String toString()
{
return String.format("%s %d %s%n%s", _channel.getRequest().getHttpVersion(), _status, _reason == null ? "" : _reason, _fields);
}
public void putHeaders(HttpContent content,long contentLength, boolean etag)
{
HttpField lm = content.getLastModified();
if (lm!=null)
_fields.put(lm);
if (contentLength==0)
{
_fields.put(content.getContentLength());
_contentLength=content.getContentLengthValue();
}
else if (contentLength>0)
{
_fields.putLongField(HttpHeader.CONTENT_LENGTH,contentLength);
_contentLength=contentLength;
}
HttpField ct=content.getContentType();
if (ct!=null)
{
if (_characterEncoding!=null &&
content.getCharacterEncoding()==null &&
content.getContentTypeValue()!=null &&
__explicitCharset.contains(_encodingFrom))
{
setContentType(MimeTypes.getContentTypeWithoutCharset(content.getContentTypeValue()));
}
else
{
_fields.put(ct);
_contentType=ct.getValue();
_characterEncoding=content.getCharacterEncoding();
_mimeType=content.getMimeType();
}
}
HttpField ce=content.getContentEncoding();
if (ce!=null)
_fields.put(ce);
if (etag)
{
HttpField et = content.getETag();
if (et!=null)
_fields.put(et);
}
}
public static void putHeaders(HttpServletResponse response, HttpContent content, long contentLength, boolean etag)
{
long lml=content.getResource().lastModified();
if (lml>=0)
response.setDateHeader(HttpHeader.LAST_MODIFIED.asString(),lml);
if (contentLength==0)
contentLength=content.getContentLengthValue();
if (contentLength >=0)
{
if (contentLength<Integer.MAX_VALUE)
response.setContentLength((int)contentLength);
else
response.setHeader(HttpHeader.CONTENT_LENGTH.asString(),Long.toString(contentLength));
}
String ct=content.getContentTypeValue();
if (ct!=null && response.getContentType()==null)
response.setContentType(ct);
String ce=content.getContentEncodingValue();
if (ce!=null)
response.setHeader(HttpHeader.CONTENT_ENCODING.asString(),ce);
if (etag)
{
String et=content.getETagValue();
if (et!=null)
response.setHeader(HttpHeader.ETAG.asString(),et);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver/jetty9/Jetty9Facade.java
|
package water.webserver.jetty9;
import water.webserver.iface.Credentials;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.HttpServerFacade;
import water.webserver.iface.ProxyServer;
import water.webserver.iface.WebServer;
public class Jetty9Facade implements HttpServerFacade {
@Override
public WebServer createWebServer(H2OHttpView h2oHttpView) {
return Jetty9ServerAdapter.create(h2oHttpView);
}
@Override
public ProxyServer createProxyServer(H2OHttpView h2oHttpView, Credentials credentials, String proxyTo) {
throw new UnsupportedOperationException("This H2O version doesn't support deployment in 'proxy' mode.");
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver/jetty9/Jetty9Helper.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.security.*;
import org.eclipse.jetty.security.authentication.BasicAuthenticator;
import org.eclipse.jetty.security.authentication.FormAuthenticator;
import org.eclipse.jetty.server.*;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.server.session.SessionHandler;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.util.security.Constraint;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
import org.eclipse.jetty.util.thread.Scheduler;
import water.webserver.config.ConnectionConfiguration;
import water.webserver.iface.H2OHttpConfig;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.LoginType;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Collections;
class Jetty9Helper {
private final H2OHttpConfig config;
private final H2OHttpView h2oHttpView;
Jetty9Helper(H2OHttpView h2oHttpView) {
this.h2oHttpView = h2oHttpView;
this.config = h2oHttpView.getConfig();
}
Server createJettyServer(String ip, int port) {
System.setProperty("org.eclipse.jetty.server.Request.maxFormContentSize", Integer.toString(Integer.MAX_VALUE));
final Server jettyServer;
if (config.ensure_daemon_threads) {
QueuedThreadPool pool = new QueuedThreadPool();
pool.setDaemon(true);
jettyServer = new Server(pool);
// Ensure the threads started by jetty are daemon threads so they don't prevent stopping of H2O
Scheduler s = jettyServer.getBean(Scheduler.class);
jettyServer.updateBean(s, new ScheduledExecutorScheduler(null, true));
} else
jettyServer = new Server();
final boolean isSecured = config.jks != null;
final HttpConfiguration httpConfiguration = makeHttpConfiguration(new ConnectionConfiguration(isSecured));
final HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfiguration);
final ServerConnector connector;
if (isSecured) {
final SslContextFactory sslContextFactory = new SslContextFactory.Server();
sslContextFactory.setKeyStorePath(config.jks);
sslContextFactory.setKeyStorePassword(config.jks_pass);
if (config.jks_alias != null) {
sslContextFactory.setCertAlias(config.jks_alias);
}
connector = new ServerConnector(jettyServer, AbstractConnectionFactory.getFactories(sslContextFactory, httpConnectionFactory));
} else {
connector = new ServerConnector(jettyServer, httpConnectionFactory);
}
connector.setIdleTimeout(httpConfiguration.getIdleTimeout()); // for websockets,...
if (ip != null) {
connector.setHost(ip);
}
connector.setPort(port);
jettyServer.setConnectors(new Connector[]{connector});
return jettyServer;
}
static HttpConfiguration makeHttpConfiguration(ConnectionConfiguration cfg) {
final HttpConfiguration httpConfiguration = new HttpConfiguration();
httpConfiguration.setSendServerVersion(false);
httpConfiguration.setRequestHeaderSize(cfg.getRequestHeaderSize());
httpConfiguration.setResponseHeaderSize(cfg.getResponseHeaderSize());
httpConfiguration.setOutputBufferSize(cfg.getOutputBufferSize(httpConfiguration.getOutputBufferSize()));
httpConfiguration.setRelativeRedirectAllowed(cfg.isRelativeRedirectAllowed());
httpConfiguration.setIdleTimeout(cfg.getIdleTimeout());
return httpConfiguration;
}
HandlerWrapper authWrapper(Server jettyServer) {
if (config.loginType == LoginType.NONE) {
return jettyServer;
}
final LoginService loginService;
final Authenticator authenticator;
switch (config.loginType) {
case HASH:
loginService = new HashLoginService("H2O", config.login_conf);
authenticator = new BasicAuthenticator();
break;
case LDAP:
case KERBEROS:
case PAM:
case SPNEGO:
default:
throw new UnsupportedOperationException(
"Authentication type '" + config.loginType + "' is not supported by this version of H2O."
);
}
final IdentityService identityService = new DefaultIdentityService();
loginService.setIdentityService(identityService);
jettyServer.addBean(loginService);
// Set a security handler as the first handler in the chain.
final ConstraintSecurityHandler security = new ConstraintSecurityHandler();
// Set up a constraint to authenticate all calls, and allow certain roles in.
final Constraint constraint = new Constraint();
constraint.setName("auth");
constraint.setAuthenticate(true);
constraint.setRoles(new String[]{Constraint.ANY_AUTH});
final ConstraintMapping mapping = new ConstraintMapping();
mapping.setPathSpec("/*"); // Lock down all API calls
mapping.setConstraint(constraint);
security.setConstraintMappings(Collections.singletonList(mapping));
// Authentication / Authorization
security.setLoginService(loginService);
security.setAuthenticator(authenticator);
final SessionHandler sessionHandler = new SessionHandler();
if (config.session_timeout > 0) {
sessionHandler.setMaxInactiveInterval(config.session_timeout * 60);
}
sessionHandler.setHandler(security);
jettyServer.setSessionIdManager(sessionHandler.getSessionIdManager());
// Pass-through to H2O if authenticated.
jettyServer.setHandler(sessionHandler);
return security;
}
/**
* Hook up Jetty handlers. Do this before start() is called.
*/
ServletContextHandler createServletContextHandler() {
// Both security and session handlers are already created (Note: we don't want to create a new separate session
// handler just for ServletContextHandler - we want to have just one SessionHandler & SessionManager)
final ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SECURITY | ServletContextHandler.NO_SESSIONS);
if (null != config.context_path && !config.context_path.isEmpty()) {
context.setContextPath(config.context_path);
} else {
context.setContextPath("/");
}
return context;
}
Handler authenticationHandler() {
return new AuthenticationHandler();
}
private class AuthenticationHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException {
boolean handled = h2oHttpView.authenticationHandler(request, response);
if (handled) {
baseRequest.setHandled(true);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver/jetty9/Jetty9ServerAdapter.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.server.handler.HandlerCollection;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import water.webserver.iface.H2OHttpView;
import water.webserver.iface.H2OWebsocketServlet;
import water.webserver.iface.RequestAuthExtension;
import water.webserver.iface.WebServer;
import javax.servlet.Servlet;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
class Jetty9ServerAdapter implements WebServer {
private final Jetty9Helper helper;
private final H2OHttpView h2oHttpView;
private Server jettyServer;
private Jetty9ServerAdapter(Jetty9Helper helper, H2OHttpView h2oHttpView) {
this.helper = helper;
this.h2oHttpView = h2oHttpView;
}
static WebServer create(final H2OHttpView h2oHttpView) {
final Jetty9Helper helper = new Jetty9Helper(h2oHttpView);
return new Jetty9ServerAdapter(helper, h2oHttpView);
}
@Override
public void start(final String ip, final int port) throws IOException {
jettyServer = helper.createJettyServer(ip, port);
final HandlerWrapper handlerWrapper = helper.authWrapper(jettyServer);
final ServletContextHandler context = helper.createServletContextHandler();
registerHandlers(handlerWrapper, context);
try {
jettyServer.start();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
/**
* Stop Jetty server after it has been started.
* This is unlikely to ever be called by H2O until H2O supports graceful shutdown.
*
* @throws IOException -
*/
@Override
public void stop() throws IOException {
if (jettyServer != null) {
try {
jettyServer.stop();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
}
private void registerHandlers(final HandlerWrapper handlerWrapper, final ServletContextHandler context) {
for (Map.Entry<String, Class<? extends HttpServlet>> entry : h2oHttpView.getServlets().entrySet()) {
context.addServlet(entry.getValue(), entry.getKey());
}
for (Map.Entry<String, Class<? extends H2OWebsocketServlet>> entry : h2oHttpView.getWebsockets().entrySet()) {
try {
Servlet servlet = new Jetty9WebsocketServlet(entry.getValue().newInstance());
context.addServlet(new ServletHolder(entry.getValue().getName(), servlet), entry.getKey());
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException("Failed to instantiate websocket servlet object", e);
}
}
final List<Handler> extHandlers = new ArrayList<>();
extHandlers.add(helper.authenticationHandler());
// here we wrap generic authentication handlers into jetty-aware wrappers
final Collection<RequestAuthExtension> authExtensions = h2oHttpView.getAuthExtensions();
for (final RequestAuthExtension requestAuthExtension : authExtensions) {
extHandlers.add(new AbstractHandler() {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
if (requestAuthExtension.handle(target, request, response)) {
baseRequest.setHandled(true);
}
}
});
}
//
extHandlers.add(context);
// Handlers that can only be invoked for an authenticated user (if auth is enabled)
final HandlerCollection authHandlers = new HandlerCollection();
authHandlers.setHandlers(extHandlers.toArray(new Handler[0]));
// GateHandler handles directly invalid requests and delegates the rest to the authHandlers
final GateHandler gateHandler = new GateHandler();
gateHandler.setHandler(authHandlers);
handlerWrapper.setHandler(gateHandler);
}
private class GateHandler extends HandlerWrapper {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
final boolean handled = h2oHttpView.gateHandler(request, response);
if (handled) {
baseRequest.setHandled(true);
} else {
super.handle(target, baseRequest, request, response);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver
|
java-sources/ai/h2o/h2o-jetty-9-minimal/3.46.0.7/water/webserver/jetty9/Jetty9WebsocketServlet.java
|
package water.webserver.jetty9;
import org.eclipse.jetty.websocket.api.Session;
import org.eclipse.jetty.websocket.api.WebSocketListener;
import org.eclipse.jetty.websocket.servlet.*;
import water.webserver.iface.H2OWebsocketServlet;
import water.webserver.iface.WebsocketConnection;
import water.webserver.iface.WebsocketHandler;
import java.io.IOException;
public class Jetty9WebsocketServlet extends WebSocketServlet {
private final H2OWebsocketServlet impl;
public Jetty9WebsocketServlet(H2OWebsocketServlet impl) {
this.impl = impl;
}
static class Jetty9WebsocketConnection implements WebsocketConnection {
private final Session sess;
Jetty9WebsocketConnection(Session sess) {
this.sess = sess;
}
@Override
public void sendMessage(String message) throws IOException {
sess.getRemote().sendString(message);
}
}
class Jetty9WebsocketHandler implements WebSocketListener {
private WebsocketHandler handler;
private Jetty9WebsocketConnection conn;
@Override
public void onWebSocketConnect(Session sess) {
conn = new Jetty9WebsocketConnection(sess);
handler = impl.onConnect(conn);
}
@Override
public void onWebSocketBinary(byte[] payload, int offset, int len) {
// ignore
}
@Override
public void onWebSocketText(String message) {
handler.onMessage(message);
}
@Override
public void onWebSocketClose(int statusCode, String reason)
{
handler.onClose(conn);
conn = null;
handler = null;
}
@Override
public void onWebSocketError(Throwable cause) {
cause.printStackTrace();
}
}
/**
* Please note, each Servlet has it's own instance of WebSocketServletFactory.
*
* @param factory Factory object to register socket creator with.
*/
@Override
public void configure(WebSocketServletFactory factory) {
factory.setCreator(new H2OWebSocketCreator());
}
/**
* Custom in-place socket creator, returning new instance of {@link Jetty9WebsocketHandler}m
* which already contains the proper {@link WebsocketServlet} implementation the request is being delegated to.
* <p>
* This is required, as default {@link WebSocketServletFactory} uses {@link org.eclipse.jetty.util.DecoratedObjectFactory}
* to instantiate {@link WebSocketListener} classes. This class is only able to instantiate static classes with 0-arg constructor,
* which inner non-static class {@link Jetty9WebsocketHandler} is NOT.
*/
public class H2OWebSocketCreator implements WebSocketCreator {
@Override
public Object createWebSocket(ServletUpgradeRequest req, ServletUpgradeResponse resp) {
return new Jetty9WebsocketHandler();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/H2OCluster.java
|
package water.k8s;
import org.apache.log4j.Logger;
import water.k8s.api.KubernetesRestApi;
import water.k8s.lookup.KubernetesDnsLookup;
import water.k8s.lookup.KubernetesLookup;
import water.k8s.lookup.LookupConstraintsBuilder;
import java.io.IOException;
import java.util.Collection;
import java.util.Optional;
import java.util.Set;
public class H2OCluster {
private static final Logger LOG = Logger.getLogger(H2OCluster.class);
private volatile static boolean clustered = false;
private volatile static H2ONodeInfo nodeInfo;
public static final String K8S_NODE_LOOKUP_TIMEOUT_KEY = "H2O_NODE_LOOKUP_TIMEOUT";
public static final String K8S_DESIRED_CLUSTER_SIZE_KEY = "H2O_NODE_EXPECTED_COUNT";
public static boolean isClustered() {
return clustered;
}
public static H2ONodeInfo getCurrentNodeInfo() {
return nodeInfo;
}
public static void setCurrentNodeInfo(H2ONodeInfo ni) {
nodeInfo = ni;
}
/**
* @return True if there are environment variables indicating H2O is running inside a container managed by
* Kubernetes. Otherwise false.
*/
public static boolean isRunningOnKubernetes() {
return KubernetesDnsLookup.isLookupPossible();
}
public static Collection<String> resolveNodeIPs() {
startKubernetesRestApi();
LOG.info("Initializing H2O Kubernetes cluster");
final Collection<String> nodeIPs = resolveInternalNodeIPs()
.orElseThrow(() -> new IllegalStateException("Unable to resolve Node IPs from DNS service."));
LOG.info(String.format("Using the following pods to form H2O cluster: [%s]",
String.join(",", nodeIPs)));
clustered = true;
return nodeIPs;
}
/**
* @return A Set of node addresses. The adresses are internal adresses/IPs to the Kubernetes cluster.
*/
private static Optional<Set<String>> resolveInternalNodeIPs() {
final LookupConstraintsBuilder lookupConstraintsBuilder = new LookupConstraintsBuilder();
try {
final int timeoutSeconds = Integer.parseInt(System.getenv(K8S_NODE_LOOKUP_TIMEOUT_KEY));
LOG.info(String.format("Timeout contraint: %d seconds.", timeoutSeconds));
lookupConstraintsBuilder.withTimeoutSeconds(timeoutSeconds);
} catch (NumberFormatException e) {
LOG.info(String.format("'%s' environment variable not set.", K8S_NODE_LOOKUP_TIMEOUT_KEY));
}
try {
final int desiredClusterSize = Integer.parseInt(System.getenv(K8S_DESIRED_CLUSTER_SIZE_KEY));
LOG.info(String.format("Cluster size constraint: %d nodes.", desiredClusterSize));
lookupConstraintsBuilder.withDesiredClusterSize(desiredClusterSize);
} catch (NumberFormatException e) {
LOG.info(String.format("'%s' environment variable not set.", K8S_DESIRED_CLUSTER_SIZE_KEY));
}
final KubernetesLookup kubernetesDnsDiscovery = KubernetesDnsLookup.fromH2ODefaults();
return kubernetesDnsDiscovery.lookupNodes(lookupConstraintsBuilder.build());
}
/**
* Start Kubernetes-only REST API services
*/
private static void startKubernetesRestApi() {
LOG.info("Starting Kubernetes-related REST API services");
try {
final KubernetesRestApi kubernetesRestApi = new KubernetesRestApi();
kubernetesRestApi.start();
LOG.info("Kubernetes REST API services successfully started.");
} catch (IOException e) {
LOG.error("Unable to start H2O Kubernetes REST API", e);
System.exit(1);
}
}
public interface H2ONodeInfo {
boolean isLeader();
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/api/KubernetesRestApi.java
|
package water.k8s.api;
import com.sun.net.httpserver.HttpServer;
import water.k8s.probe.KubernetesLeaderNodeProbeHandler;
import java.io.IOException;
import java.net.InetSocketAddress;
/**
* This class represents a tiny (memory, cpu, dependencies) self-contained API only for Kubernetes,
* running separately on localhost on a specified port.
* When the Kubernetes extension is starting and attempting to form an H2O cluster,
* H2O's REST API is yet not fully initialized, as its startup relies on configuration obtained
* during the clustering phase. However, REST API services (readiness probe, liveness probe, startup probe)
* are required to be available since the very start of H2O K8S extension. Therefore, a separate REST API running
* on a distinct port is spawned.
* <p>
*/
public class KubernetesRestApi implements AutoCloseable {
/**
* Default port to bind to / listen on.
*/
private static final int DEFAULT_PORT = 8080;
public static final String KUBERNETES_REST_API_PORT_KEY = "H2O_KUBERNETES_API_PORT";
private final HttpServer server;
/**
* Creates, but not starts Kubernetes REST API. To start the REST API, please use
* one of the start methods available.
* <p>
* The REST API is bound to a default port of 8080, unless specified otherwise by H2O_KUBERNETES_API_PORT environment
* variable.
*/
public KubernetesRestApi() throws IOException {
int port = getPort();
server = HttpServer.create(new InetSocketAddress(port), 0);
addMappings();
}
private static int getPort() {
final String customKubernetesPort = System.getenv(KUBERNETES_REST_API_PORT_KEY);
if (customKubernetesPort == null) {
return DEFAULT_PORT;
}
try {
return Integer.parseInt(customKubernetesPort);
} catch (NumberFormatException e) {
final String errorMessage = String.format("Non-usable port for K8S REST API to bind to: '%s'", customKubernetesPort);
throw new IllegalArgumentException(errorMessage, e);
}
}
public void addMappings() {
server.createContext("/kubernetes/isLeaderNode", new KubernetesLeaderNodeProbeHandler());
}
@Override
public void close() {
server.stop(0);
}
public void start() throws IOException {
server.start();
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/lookup/ClusterSizeConstraint.java
|
package water.k8s.lookup;
import java.util.Set;
/**
* Constraint triggered when a pre-defined amount of pods is discovered.
*/
public class ClusterSizeConstraint implements LookupConstraint {
private final int desiredClusterSize;
public ClusterSizeConstraint(final int desiredClusterSize) {
this.desiredClusterSize = desiredClusterSize;
}
@Override
public boolean isLookupEnded(final Set<String> discoveredNodes) {
return discoveredNodes.size() == desiredClusterSize;
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/lookup/KubernetesDnsLookup.java
|
package water.k8s.lookup;
import org.apache.log4j.Logger;
import javax.naming.Context;
import javax.naming.NamingEnumeration;
import javax.naming.NamingException;
import javax.naming.directory.Attribute;
import javax.naming.directory.Attributes;
import javax.naming.directory.DirContext;
import javax.naming.directory.InitialDirContext;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.*;
import java.util.regex.Pattern;
/**
* Discovery strategy on a DNS cluster leveraging the DNS record of a Kubernetes headless service present in the cluster.
* Kubernetes headless services, instead of load-balancing the requests onto one of the underlying pods, return the
* addresses of all the pods covered by the headless service.
* <p>
* Such pods can then be discovered via the above-mentioned DNS record. In order for H2O to know which service to query,
* it is mandatory for the K8S user to pass the name of the headless service to the H2O container, as follows:
*
* <pre>
* apiVersion: apps/v1
* kind: StatefulSet
* metadata:
* name: h2o-stateful-set
* namespace: h2o-statefulset
* spec:
* serviceName: h2o-service
* replicas: 3
* selector:
* matchLabels:
* app: h2o-k8s
* template:
* metadata:
* labels:
* app: h2o-k8s
* spec:
* terminationGracePeriodSeconds: 10
* containers:
* - name: h2o-k8s
* image: '<someDockerImageWithH2OInside>'
* resources:
* requests:
* memory: "4Gi"
* ports:
* - containerPort: 54321
* protocol: TCP
* env:
* - name: H2O_KUBERNETES_SERVICE_DNS
* value: h2o-service.h2o-statefulset.svc.cluster.local
* - name: H2O_NODE_LOOKUP_TIMEOUT
* value: '180'
* - name: H2O_NODE_EXPECTED_COUNT
* value: '3'
* </pre>
*/
public class KubernetesDnsLookup implements KubernetesLookup {
private static final Logger LOG = Logger.getLogger(KubernetesDnsLookup.class);
private static final String K8S_SERVICE_DNS_ENV_VAR_KEY = "H2O_KUBERNETES_SERVICE_DNS";
private static final String DNS_TIMEOUT_DEFAULT = "30000"; // 30 seconds
private static final int ONE_SECOND = 1000;
private final String serviceDns;
private final DirContext dirContext;
public KubernetesDnsLookup(final String serviceDns) {
this.serviceDns = serviceDns;
this.dirContext = initDirContext();
}
/**
* @return
* @throws IllegalStateException When the H2O-related kubernetes DNS service is not found
*/
public static KubernetesDnsLookup fromH2ODefaults() throws IllegalStateException {
final String dnsServiceName = System.getenv(K8S_SERVICE_DNS_ENV_VAR_KEY);
if (dnsServiceName == null) {
throw new IllegalStateException(String.format("DNS of H2O service not set. Please set the '%s' variable.",
K8S_SERVICE_DNS_ENV_VAR_KEY));
} else if (dnsServiceName.trim().isEmpty()) {
throw new IllegalStateException(String.format("DNS Service '%s' name is invalid.", dnsServiceName));
}
return new KubernetesDnsLookup(dnsServiceName);
}
private static String extractHost(final String server, final Pattern extractHostPattern) {
String host = server.split(" ")[3];
return extractHostPattern.matcher(host).replaceAll("");
}
/**
* Looks up H2O pods via configured K8S Stateless service DNS. Environment variable with key defined in
* H2O_K8S_SERVICE_DNS_KEY constant is used to obtain address of the DNS. The DNS is then queried for pods
* in the underlying service. It is the responsibility of the K8S cluster owner to set-up the service correctly to
* only provide correct adresses of pods with H2O active. If pods with no H2O running are supplied, the resulting
* flatfile may contain pod IPs with no H2O running as well.
*
* @param lookupConstraints Constraints to obey during lookup
* @return A {@link Set} of adresses of looked up nodes represented as String. The resulting set is never empty.
*/
public Optional<Set<String>> lookupNodes(final Collection<LookupConstraint> lookupConstraints) {
final Set<String> lookedUpNodes = new HashSet<>();
while (lookupConstraints.stream().allMatch(lookupStrategy -> !lookupStrategy.isLookupEnded(lookedUpNodes))) {
try {
dnsLookup(lookedUpNodes);
} catch (NamingException e) {
LOG.warn(e.getMessage());
continue;
} finally {
try {
Thread.sleep(ONE_SECOND);
} catch (InterruptedException e) {
LOG.error(e);
return Optional.empty();
}
}
}
return Optional.of(lookedUpNodes);
}
public static boolean isLookupPossible() {
return System.getenv()
.containsKey(KubernetesDnsLookup.K8S_SERVICE_DNS_ENV_VAR_KEY);
}
/**
* Performs a single DNS lookup. Discovered nodes (their IPs respectively) are addded to the existing
* set of nodeIPs.
*
* @param nodeIPs A {@link Set} of nodes already discovered during previous lookups.
* @throws NamingException If the DNS under given name is unreachable / does not exist.
*/
private void dnsLookup(final Set<String> nodeIPs) throws NamingException {
final Attributes attributes = dirContext.getAttributes(serviceDns, new String[]{"SRV"});
final Attribute srvAttribute = attributes.get("srv");
final Pattern extractHostPattern = Pattern.compile("\\\\.$");
if (srvAttribute != null) {
final NamingEnumeration<?> servers = srvAttribute.getAll();
while (servers.hasMore()) {
final String server = (String) servers.next();
final String serverHost = extractHost(server, extractHostPattern);
final InetAddress nodeIP;
try {
nodeIP = InetAddress.getByName(serverHost);
} catch (UnknownHostException e) {
LOG.error("Unknown host for IP Address: " + serverHost);
continue;
}
if (nodeIPs.add(nodeIP.getHostAddress())) {
LOG.info(String.format("New H2O pod with DNS record '%s' discovered.", nodeIP));
}
}
servers.close();
}
}
private DirContext initDirContext() {
final Hashtable<String, String> environment = new Hashtable<>();
environment.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory");
environment.put(Context.PROVIDER_URL, "dns:");
environment.put("com.sun.jndi.dns.timeout.initial", DNS_TIMEOUT_DEFAULT);
try {
return new InitialDirContext(environment);
} catch (NamingException e) {
throw new IllegalStateException("Error while initializing DirContext", e);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/lookup/KubernetesLookup.java
|
package water.k8s.lookup;
import java.util.Collection;
import java.util.Optional;
import java.util.Set;
public interface KubernetesLookup {
/**
* Looks up H2O pods in K8S cluster.
*
* @param lookupConstraints Constraints to obey during lookup
* @return A {@link Set} of adresses of looked up nodes represented as String. If there are difficulties
* during node lookup, Optional.empty() is returned.
*/
Optional<Set<String>> lookupNodes(final Collection<LookupConstraint> lookupConstraints);
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/lookup/LookupConstraint.java
|
package water.k8s.lookup;
import java.util.Set;
/**
* A constraint during Pod lookup in Kubernetes cluster. Each implementation represents a single rule to constraint
* the lookup with.
*/
public interface LookupConstraint {
/**
* @param lookedUpNodes A set of unique string representations of the nodes discovered
* @return True if after the recent node discovery, the lookup should be ended.
*/
boolean isLookupEnded(final Set<String> lookedUpNodes);
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/lookup/LookupConstraintsBuilder.java
|
package water.k8s.lookup;
import org.apache.log4j.Logger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* Builder for lookup constraints. For different input/configuration, this builder outputs the exact set of instances
* of {@link LookupConstraint} to meet user's requirements.
*/
public class LookupConstraintsBuilder {
private static final Logger LOG = Logger.getLogger(KubernetesDnsLookup.class);
private static final int K8S_DEFAULT_CLUSTERING_TIMEOUT_SECONDS = 180;
private Integer timeoutSeconds;
private Integer desiredClusterSize;
public LookupConstraintsBuilder() {
this.timeoutSeconds = null;
this.desiredClusterSize = null;
}
/**
* @param timeoutSeconds Timeout in seconds. Inserting a null value resets the timeout settings.
* @return The very instance of {@link LookupConstraintsBuilder} called (builder pattern).
*/
public LookupConstraintsBuilder withTimeoutSeconds(Integer timeoutSeconds) {
this.timeoutSeconds = timeoutSeconds;
return this;
}
/**
* @param desiredClusterSize Desired amount of pods discovered. Inserting a null value resets the desired cluster
* size.
* @return The very instance of {@link LookupConstraintsBuilder} called (builder pattern).
*/
public LookupConstraintsBuilder withDesiredClusterSize(final int desiredClusterSize) {
this.desiredClusterSize = desiredClusterSize;
return this;
}
/**
* Construct a never-empty collection of {@link LookupConstraint} instances. By guaranteeing the resulting collection
* to be never empty, it is ensured the H2O Node lookup on available pods will always end in a reasonably finite time.
*
* @return A {@link Collection} of {@link LookupConstraint}. The collection is never empty.
*/
public Collection<LookupConstraint> build() {
final List<LookupConstraint> lookupConstraintList = new ArrayList<>();
// If there are no constraints set by the user via environment variables, use a sensible timeout.
if (timeoutSeconds == null && desiredClusterSize == null) {
LOG.info(String.format("No H2O Node discovery constraints set. Using default timeout of %d seconds.",
K8S_DEFAULT_CLUSTERING_TIMEOUT_SECONDS));
lookupConstraintList.add(new TimeoutConstraint(K8S_DEFAULT_CLUSTERING_TIMEOUT_SECONDS));
}
if (timeoutSeconds != null) {
LOG.info(String.format("Timeout for node discovery is set to %d seconds.", timeoutSeconds));
lookupConstraintList.add(new TimeoutConstraint(timeoutSeconds));
}
if (desiredClusterSize != null) {
LOG.info(String.format(String.format("Desired cluster size is set to %d nodes.", desiredClusterSize)));
lookupConstraintList.add(new ClusterSizeConstraint(desiredClusterSize));
}
return lookupConstraintList;
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/lookup/TimeoutConstraint.java
|
package water.k8s.lookup;
import java.time.Duration;
import java.time.Instant;
import java.util.Set;
/**
* Constraints triggered once the lookup takes a certain amount of time.
*/
public class TimeoutConstraint implements LookupConstraint {
private final int timeoutSeconds;
private final Instant beginning;
public TimeoutConstraint(final int timeoutSeconds) {
this.timeoutSeconds = timeoutSeconds;
beginning = Instant.now();
}
@Override
public boolean isLookupEnded(final Set<String> discoveredNodes) {
return Duration.between(beginning, Instant.now()).getSeconds() >= timeoutSeconds;
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s
|
java-sources/ai/h2o/h2o-k8s/3.46.0.7/water/k8s/probe/KubernetesLeaderNodeProbeHandler.java
|
package water.k8s.probe;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import water.k8s.H2OCluster;
import java.io.IOException;
import static java.net.HttpURLConnection.*;
public class KubernetesLeaderNodeProbeHandler implements HttpHandler {
public static final String MIME_TYPE_TEXT_PLAIN = "text/plain";
public static final String GET_METHOD = "GET";
@Override
public void handle(HttpExchange httpExchange) throws IOException {
if (!GET_METHOD.equals(httpExchange.getRequestMethod())) {
httpResponseWithoutBody(httpExchange, HTTP_BAD_METHOD);
}
// All nodes report ready state until the clustering process is finished. Since then, only the leader node is ready.
final H2OCluster.H2ONodeInfo self = H2OCluster.getCurrentNodeInfo();
if (self == null || self.isLeader() || !H2OCluster.isClustered()) {
httpResponseWithoutBody(httpExchange, HTTP_OK);
} else {
httpResponseWithoutBody(httpExchange, HTTP_NOT_FOUND);
}
}
private static void httpResponseWithoutBody(HttpExchange httpExchange, int httpResponseCode) throws IOException {
httpExchange.getResponseHeaders().set("Content-Type", MIME_TYPE_TEXT_PLAIN);
httpExchange.sendResponseHeaders(httpResponseCode, -1);
httpExchange.close();
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s-int/3.46.0.7/water
|
java-sources/ai/h2o/h2o-k8s-int/3.46.0.7/water/k8s/KubernetesEmbeddedConfig.java
|
package water.k8s;
import water.H2O;
import water.init.AbstractEmbeddedH2OConfig;
import water.util.Log;
import java.net.InetAddress;
import java.util.Collection;
public class KubernetesEmbeddedConfig extends AbstractEmbeddedH2OConfig {
private final String flatfile;
private final int cloudSize;
public KubernetesEmbeddedConfig(final Collection<String> nodeIPs) {
this.flatfile = writeFlatFile(nodeIPs);
this.cloudSize = nodeIPs.size();
}
private String writeFlatFile(final Collection<String> nodeIPs) {
final StringBuilder flatFileBuilder = new StringBuilder();
nodeIPs.forEach(nodeIP -> {
flatFileBuilder.append(nodeIP);
flatFileBuilder.append(":");
flatFileBuilder.append(H2O.H2O_DEFAULT_PORT); // All pods are expected to utilize the default H2O port
flatFileBuilder.append("\n");
});
return flatFileBuilder.toString();
}
@Override
public void notifyAboutEmbeddedWebServerIpPort(InetAddress ip, int port) {
if (H2O.SELF == null) {
throw new IllegalStateException("H2O.SELF is expected to be defined at this point!");
}
H2OCluster.setCurrentNodeInfo(new NodeInfo());
}
@Override
public void notifyAboutCloudSize(InetAddress ip, int port, InetAddress leaderIp, int leaderPort, int size) {
Log.info(String.format("Created cluster of size %d, leader node IP is '%s'", size, leaderIp.toString()));
if (size == cloudSize) {
cloudingFinished();
}
}
@Override
public boolean providesFlatfile() {
return true;
}
@Override
public String fetchFlatfile() {
return flatfile;
}
@Override
public void exit(int status) {
System.exit(status);
}
@Override
public void print() {
}
@Override
public boolean disableNonLeaderNodeAccess() {
return H2OCluster.isRunningOnKubernetes();
}
private static class NodeInfo implements H2OCluster.H2ONodeInfo {
@Override
public boolean isLeader() {
return H2O.SELF.isLeaderNode();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-k8s-int/3.46.0.7/water
|
java-sources/ai/h2o/h2o-k8s-int/3.46.0.7/water/k8s/KubernetesEmbeddedConfigProvider.java
|
package water.k8s;
import water.init.AbstractEmbeddedH2OConfig;
import water.init.EmbeddedConfigProvider;
import java.util.Collection;
/**
* A configuration provider for H2O running in Kubernetes cluster. It is able to detected H2O is being run in K8S
* environment, otherwise remains inactive.
* <p>
* Uses potentially multiple strategies to discover H2O Pods on a Kubernetes cluster.
*/
public class KubernetesEmbeddedConfigProvider implements EmbeddedConfigProvider {
private boolean runningOnKubernetes = false;
private KubernetesEmbeddedConfig kubernetesEmbeddedConfig;
@Override
public void init() {
runningOnKubernetes = H2OCluster.isRunningOnKubernetes();
if (!runningOnKubernetes) {
return; // Do not initialize any configuration if H2O is not running in K8S-spawned container.
}
Collection<String> nodeIPs = H2OCluster.resolveNodeIPs();
kubernetesEmbeddedConfig = new KubernetesEmbeddedConfig(nodeIPs);
}
@Override
public boolean isActive() {
return runningOnKubernetes;
}
@Override
public AbstractEmbeddedH2OConfig getConfig() {
return kubernetesEmbeddedConfig;
}
}
|
0
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water/logging/ConsoleLogger.java
|
package water.logging;
import java.io.PrintStream;
public class ConsoleLogger implements Logger {
@Override
public void trace(String message) {
log(0, message);
}
@Override
public void debug(String message) {
log(1, message);
}
@Override
public void info(String message) {
log(2, message);
}
@Override
public void warn(String message) {
log(3, message);
}
@Override
public void error(String message) {
log(4, message);
}
@Override
public void fatal(String message) {
log(5, message);
}
@Override
public boolean isTraceEnabled() {
return true;
}
@Override
public boolean isDebugEnabled() {
return true;
}
@Override
public boolean isInfoEnabled() {
return true;
}
@Override
public boolean isWarnEnabled() {
return true;
}
@Override
public boolean isErrorEnabled() {
return true;
}
@Override
public boolean isFatalEnabled() {
return true;
}
private void log(int level, String message) {
PrintStream ps;
if (level < 4) {
ps = System.out;
} else {
ps = System.err;
}
ps.println(message);
}
}
|
0
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water/logging/Logger.java
|
package water.logging;
public interface Logger {
void trace(String message);
void debug(String message);
void info(String message);
void warn(String message);
void error(String message);
void fatal(String message);
boolean isTraceEnabled();
boolean isDebugEnabled();
boolean isInfoEnabled();
boolean isWarnEnabled();
boolean isErrorEnabled();
boolean isFatalEnabled();
}
|
0
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water/logging/LoggerFactory.java
|
package water.logging;
public class LoggerFactory {
private static final String DEFAULT_SLF4J_CLASS_TO_CHECK = "org.slf4j.LoggerFactory";
private static final String DEFAULT_WATERLOG_CLASS_TO_CHECK = "water.util.Log";
private static final LoggerFactory INSTANCE = new LoggerFactory(DEFAULT_SLF4J_CLASS_TO_CHECK, DEFAULT_WATERLOG_CLASS_TO_CHECK);
private final String slf4jClassName;
private final boolean isSlf4JAvailable;
private final String waterLogClassName;
private final boolean isWaterLogAvailable;
private final Logger waterLogger;
LoggerFactory(String slf4jClass, String waterLogClass) {
slf4jClassName = slf4jClass;
waterLogClassName = waterLogClass;
isSlf4JAvailable = isSlf4JAvailable();
isWaterLogAvailable = isWaterLogAvailable();
waterLogger = (isWaterLogAvailable) ? tryToGetWaterLogger() : null;
}
/**
* Returns new logger for each invocation. Logger logs to water.util.Log / SLF4J / console depending on whether water.util.Log / SLF4J is available.
*
* @param clazz class from which getLogger() is called
* @return WaterLogger (water.util.Log adapter) if water.util.Log is on the classpath, else Slf4JLogger (SLF4J adapter) if SLF4J is on the classpath else ConsoleLogger
*/
public static Logger getLogger(Class<?> clazz) {
return INSTANCE.getCustomLogger(clazz);
}
/**
* Returns new logger for each invocation. Logger logs to water.util.Log / SLF4J / console depending on whether water.util.Log / SLF4J is available.
*
* @param clazz class from which getLogger() is called
* @return SWaterLogger (water.util.Log adapter) if water.util.Log is on the classpath, else Slf4JLogger (SLF4J adapter) if SLF4J is on the classpath else ConsoleLogger
*/
public Logger getCustomLogger(Class<?> clazz) {
if (isWaterLogAvailable && waterLogger != null) {
return waterLogger;
} else if (isSlf4JAvailable) {
return new Slf4JLogger(clazz);
} else {
return new ConsoleLogger();
}
}
private Logger tryToGetWaterLogger() {
try {
return (Logger) Class.forName("water.util.WaterLogger").newInstance();
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
return null;
}
}
/**
* Checks whether SLF4J is on the classpath.
*
* @return true if SLF4J is on the classpath, false if not.
*/
private boolean isSlf4JAvailable() {
try {
Class.forName(slf4jClassName);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
/**
* Checks whether water.util.Log is on the classpath.
*
* @return true if water.util.Log is on the classpath, false if not.
*/
private boolean isWaterLogAvailable() {
try {
Class.forName(waterLogClassName);
return true;
} catch (ClassNotFoundException | NoClassDefFoundError e) {
return false;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water/logging/LoggingLevel.java
|
package water.logging;
public enum LoggingLevel {
TRACE, DEBUG, INFO, WARN, ERROR;
}
|
0
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water
|
java-sources/ai/h2o/h2o-logger/3.46.0.7/water/logging/Slf4JLogger.java
|
package water.logging;
import org.slf4j.LoggerFactory;
public class Slf4JLogger implements Logger {
private org.slf4j.Logger logger;
public Slf4JLogger(Class<?> clazz) {
this.logger = LoggerFactory.getLogger(clazz);
}
@Override
public void trace(String message) {
logger.trace(message);
}
@Override
public void debug(String message) {
logger.debug(message);
}
@Override
public void info(String message) {
logger.info(message);
}
@Override
public void warn(String message) {
logger.warn(message);
}
@Override
public void error(String message) {
logger.error(message);
}
@Override
public void fatal(String message) {
logger.error(message);
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
}
@Override
public boolean isErrorEnabled() {
return logger.isErrorEnabled();
}
@Override
public boolean isFatalEnabled() {
return logger.isErrorEnabled();
}
}
|
0
|
java-sources/ai/h2o/h2o-logging-impl-classic/3.46.0.7/org/apache
|
java-sources/ai/h2o/h2o-logging-impl-classic/3.46.0.7/org/apache/log4j/H2OPropertyConfigurator.java
|
package org.apache.log4j;
import org.apache.log4j.spi.LoggerRepository;
import java.util.Properties;
/**
* Append to an existing live log4j configuration rather than to create a new one
* with a new complete properties file.
*
* This is used by embedded environments like Sparkling Water that don't want to
* blindly clobber the parent logger configuration.
*/
public class H2OPropertyConfigurator extends PropertyConfigurator {
@Override
public
void doConfigure(Properties properties, LoggerRepository hierarchy) {
parseCatsAndRenderers(properties, hierarchy);
// We don't want to hold references to appenders preventing their
// garbage collection.
registry.clear();
}
}
|
0
|
java-sources/ai/h2o/h2o-logging-impl-classic/3.46.0.7/water
|
java-sources/ai/h2o/h2o-logging-impl-classic/3.46.0.7/water/util/LoggerBackend.java
|
package water.util;
import org.apache.log4j.H2OPropertyConfigurator;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import java.io.File;
import java.net.URL;
import java.util.Properties;
import java.util.function.Function;
public class LoggerBackend {
public static final Level[] L4J_LVLS = { Level.FATAL, Level.ERROR, Level.WARN, Level.INFO, Level.DEBUG, Level.TRACE };
public int _level;
public String _prefix;
public String _maxLogFileSize;
public boolean _launchedWithHadoopJar;
public boolean _haveInheritedLog4jConfiguration;
public Function<String, String> _getLogFilePath;
public Logger createLog4j() {
String h2oLog4jConfiguration = System.getProperty("h2o.log4j.configuration");
if (h2oLog4jConfiguration != null) {
// Try to configure via a file on local filesystem
if (new File(h2oLog4jConfiguration).exists()) {
PropertyConfigurator.configure(h2oLog4jConfiguration);
} else {
// Try to load file via classloader resource (e.g., from classpath)
URL confUrl = LoggerBackend.class.getClassLoader().getResource(h2oLog4jConfiguration);
if (confUrl != null) {
PropertyConfigurator.configure(confUrl);
}
}
} else {
// Create some default properties on the fly if we aren't using a provided configuration.
// H2O creates the log setup itself on the fly in code.
Properties p = new Properties();
try {
setLog4jProperties(p);
}
catch (Exception e) {
System.err.println("ERROR: failed in createLog4j, exiting now.");
e.printStackTrace();
return null;
}
// For the Hadoop case, force H2O to specify the logging setup since we don't care
// about any hadoop log setup, anyway.
//
// For the Sparkling Water case, we will have inherited the log4j configuration,
// so append to it rather than whack it.
if (!_launchedWithHadoopJar && _haveInheritedLog4jConfiguration) {
// Use a modified log4j property configurator to append rather than create a new log4j configuration.
H2OPropertyConfigurator.configure(p);
} else {
PropertyConfigurator.configure(p);
}
}
return Logger.getLogger("water.default");
}
private void setLog4jProperties(Properties p) {
String patternTail = _prefix + " %10.10t %5.5p %c: %m%n";
String pattern = "%d{MM-dd HH:mm:ss.SSS} " + patternTail;
p.setProperty("log4j.rootLogger", L4J_LVLS[_level] + ", console");
// H2O-wide logging
String appendersReferences = ", R1, R2, R3, R4, R5, R6";
String appenders = L4J_LVLS[_level] + appendersReferences;
for (String packageName : new String[] {"water", "ai.h2o", "hex"}) {
p.setProperty("log4j.logger." + packageName, appenders);
p.setProperty("log4j.logger.additivity." + packageName, "false");
}
p.setProperty("log4j.appender.console", "org.apache.log4j.ConsoleAppender");
p.setProperty("log4j.appender.console.Threshold", L4J_LVLS[_level].toString());
p.setProperty("log4j.appender.console.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.console.layout.ConversionPattern", pattern);
p.setProperty("log4j.appender.R1", "org.apache.log4j.RollingFileAppender");
p.setProperty("log4j.appender.R1.Threshold", "TRACE");
p.setProperty("log4j.appender.R1.File", _getLogFilePath.apply("trace"));
p.setProperty("log4j.appender.R1.MaxFileSize", "1MB");
p.setProperty("log4j.appender.R1.MaxBackupIndex", "3");
p.setProperty("log4j.appender.R1.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.R1.layout.ConversionPattern", pattern);
p.setProperty("log4j.appender.R2", "org.apache.log4j.RollingFileAppender");
p.setProperty("log4j.appender.R2.Threshold", "DEBUG");
p.setProperty("log4j.appender.R2.File", _getLogFilePath.apply("debug"));
p.setProperty("log4j.appender.R2.MaxFileSize", _maxLogFileSize);
p.setProperty("log4j.appender.R2.MaxBackupIndex", "3");
p.setProperty("log4j.appender.R2.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.R2.layout.ConversionPattern", pattern);
p.setProperty("log4j.appender.R3", "org.apache.log4j.RollingFileAppender");
p.setProperty("log4j.appender.R3.Threshold", "INFO");
p.setProperty("log4j.appender.R3.File", _getLogFilePath.apply("info"));
p.setProperty("log4j.appender.R3.MaxFileSize", _maxLogFileSize);
p.setProperty("log4j.appender.R3.MaxBackupIndex", "3");
p.setProperty("log4j.appender.R3.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.R3.layout.ConversionPattern", pattern);
p.setProperty("log4j.appender.R4", "org.apache.log4j.RollingFileAppender");
p.setProperty("log4j.appender.R4.Threshold", "WARN");
p.setProperty("log4j.appender.R4.File", _getLogFilePath.apply("warn"));
p.setProperty("log4j.appender.R4.MaxFileSize", "256KB");
p.setProperty("log4j.appender.R4.MaxBackupIndex", "3");
p.setProperty("log4j.appender.R4.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.R4.layout.ConversionPattern", pattern);
p.setProperty("log4j.appender.R5", "org.apache.log4j.RollingFileAppender");
p.setProperty("log4j.appender.R5.Threshold", "ERROR");
p.setProperty("log4j.appender.R5.File", _getLogFilePath.apply("error"));
p.setProperty("log4j.appender.R5.MaxFileSize", "256KB");
p.setProperty("log4j.appender.R5.MaxBackupIndex", "3");
p.setProperty("log4j.appender.R5.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.R5.layout.ConversionPattern", pattern);
p.setProperty("log4j.appender.R6", "org.apache.log4j.RollingFileAppender");
p.setProperty("log4j.appender.R6.Threshold", "FATAL");
p.setProperty("log4j.appender.R6.File", _getLogFilePath.apply("fatal"));
p.setProperty("log4j.appender.R6.MaxFileSize", "256KB");
p.setProperty("log4j.appender.R6.MaxBackupIndex", "3");
p.setProperty("log4j.appender.R6.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.R6.layout.ConversionPattern", pattern);
// HTTPD logging
p.setProperty("log4j.logger.water.api.RequestServer", "TRACE, HTTPD");
p.setProperty("log4j.additivity.water.api.RequestServer", "false");
p.setProperty("log4j.appender.HTTPD", "org.apache.log4j.RollingFileAppender");
p.setProperty("log4j.appender.HTTPD.Threshold", "TRACE");
p.setProperty("log4j.appender.HTTPD.File", _getLogFilePath.apply("httpd"));
p.setProperty("log4j.appender.HTTPD.MaxFileSize", "1MB");
p.setProperty("log4j.appender.HTTPD.MaxBackupIndex", "3");
p.setProperty("log4j.appender.HTTPD.layout", "org.apache.log4j.PatternLayout");
p.setProperty("log4j.appender.HTTPD.layout.ConversionPattern", "%d{ISO8601} " + patternTail);
// Turn down the logging for some class hierarchies.
p.setProperty("log4j.logger.org.apache.http", "WARN" + appendersReferences);
p.setProperty("log4j.logger.com.amazonaws", "WARN" + appendersReferences);
p.setProperty("log4j.logger.org.apache.hadoop", "WARN" + appendersReferences);
p.setProperty("log4j.logger.org.jets3t.service", "WARN" + appendersReferences);
p.setProperty("log4j.logger.org.reflections.Reflections", "ERROR" + appendersReferences);
p.setProperty("log4j.logger.com.brsanthu.googleanalytics", "ERROR" + appendersReferences);
// Turn down the logging for external libraries that Orc parser depends on
p.setProperty("log4j.logger.org.apache.hadoop.util.NativeCodeLoader", "ERROR");
}
}
|
0
|
java-sources/ai/h2o/h2o-logging-impl-log4j2/3.46.0.7/water
|
java-sources/ai/h2o/h2o-logging-impl-log4j2/3.46.0.7/water/util/LoggerBackend.java
|
package water.util;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.logging.log4j.core.config.builder.api.*;
import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
import java.io.File;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
public class LoggerBackend {
public static final Level[] L4J_LVLS = { Level.FATAL, Level.ERROR, Level.WARN, Level.INFO, Level.DEBUG, Level.TRACE };
public static final org.apache.logging.log4j.Level[] L4J_LOGGING_LVLS = {
org.apache.logging.log4j.Level.FATAL,
org.apache.logging.log4j.Level.ERROR,
org.apache.logging.log4j.Level.WARN,
org.apache.logging.log4j.Level.INFO,
org.apache.logging.log4j.Level.DEBUG,
org.apache.logging.log4j.Level.TRACE
};
public int _level;
public String _prefix;
public String _maxLogFileSize;
public boolean _launchedWithHadoopJar;
public boolean _haveInheritedLog4jConfiguration;
public Function<String, String> _getLogFilePath;
public Logger createLog4j() {
String h2oLog4jConfiguration = System.getProperty("h2o.log4j.configuration");
if (h2oLog4jConfiguration != null) {
// Try to configure via a file on local filesystem
File file = new File(h2oLog4jConfiguration);
if (file.exists()) {
Configurator.reconfigure(file.toURI());
} else {
// Try to load file via classloader resource (e.g., from classpath)
URL confUrl = LoggerBackend.class.getClassLoader().getResource(h2oLog4jConfiguration);
if (confUrl != null) {
try {
Configurator.reconfigure(confUrl.toURI());
} catch (URISyntaxException e) {
System.err.println("ERROR: failed in createLog4j, exiting now.");
e.printStackTrace();
return null;
}
}
}
} else {
try {
reconfigureLog4J();
} catch (Exception e) {
System.err.println("ERROR: failed in createLog4j, exiting now.");
e.printStackTrace();
return null;
}
// TODO: hadoop and sparkling water cases
}
return Logger.getLogger("water.default");
}
public void reconfigureLog4J() {
ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
builder.setStatusLevel(L4J_LOGGING_LVLS[_level]);
builder.setConfigurationName("H2OLogConfiguration");
// configure appenders:
String patternTail = _prefix + " %10.10t %5.5p %c: %m%n";
String pattern = "%d{MM-dd HH:mm:ss.SSS} " + patternTail;
LayoutComponentBuilder layoutComponentBuilder = builder.newLayout("PatternLayout").addAttribute("pattern", pattern);
builder.add(builder.newAppender("Console", "Console")
.addAttribute("target", "SYSTEM_OUT")
.add(layoutComponentBuilder));
builder.add(builder.newAppender("stderr", "Console")
.addAttribute("target", "SYSTEM_ERR")
.add(builder.newFilter("ThresholdFilter", Filter.Result.ACCEPT, Filter.Result.DENY).addAttribute("level", Level.ERROR))
.add(layoutComponentBuilder));
builder.add(newRollingFileAppenderComponent(builder, "R1", "1MB", _getLogFilePath.apply("trace"), pattern, Level.TRACE));
builder.add(newRollingFileAppenderComponent(builder, "R2", _maxLogFileSize, _getLogFilePath.apply("debug"), pattern, Level.DEBUG));
builder.add(newRollingFileAppenderComponent(builder, "R3", _maxLogFileSize, _getLogFilePath.apply("info"), pattern, Level.INFO));
builder.add(newRollingFileAppenderComponent(builder, "R4", "256KB", _getLogFilePath.apply("warn"), pattern, Level.WARN));
builder.add(newRollingFileAppenderComponent(builder, "R5", "256KB", _getLogFilePath.apply("error"), pattern, Level.ERROR));
builder.add(newRollingFileAppenderComponent(builder, "R6", "256KB", _getLogFilePath.apply("fatal"), pattern, Level.FATAL));
builder.add(newRollingFileAppenderComponent(builder, "HTTPD", "1MB", _getLogFilePath.apply("httpd"), "%d{ISO8601} " + patternTail, Level.TRACE));
AppenderRefComponentBuilder consoleAppenderRef = builder.newAppenderRef("Console");
AppenderRefComponentBuilder stderrAppenderRef = builder.newAppenderRef("stderr");
// configure loggers:
List<AppenderRefComponentBuilder> appenderReferences = new ArrayList();
appenderReferences.add(builder.newAppenderRef("R1"));
appenderReferences.add(builder.newAppenderRef("R2"));
appenderReferences.add(builder.newAppenderRef("R3"));
appenderReferences.add(builder.newAppenderRef("R4"));
appenderReferences.add(builder.newAppenderRef("R5"));
appenderReferences.add(builder.newAppenderRef("R6"));
appenderReferences.add(consoleAppenderRef);
appenderReferences.add(stderrAppenderRef);
builder.add(newLoggerComponent(builder, "hex", appenderReferences));
builder.add(newLoggerComponent(builder, "water", appenderReferences));
builder.add(newLoggerComponent(builder, "ai.h2o", appenderReferences));
builder.add(builder.newRootLogger(String.valueOf(L4J_LVLS[_level])).add(consoleAppenderRef).add(stderrAppenderRef));
// Turn down the logging for some class hierarchies.
builder.add(newLoggerComponent(builder, "org.apache.http", appenderReferences, "WARN"));
builder.add(newLoggerComponent(builder, "com.amazonaws", appenderReferences, "WARN"));
builder.add(newLoggerComponent(builder, "org.apache.hadoop", appenderReferences, "WARN"));
builder.add(newLoggerComponent(builder, "org.jets3t.service", appenderReferences, "WARN"));
builder.add(newLoggerComponent(builder, "org.reflections.Reflections", appenderReferences, "ERROR"));
builder.add(newLoggerComponent(builder, "com.brsanthu.googleanalytics", appenderReferences, "ERROR"));
// Turn down the logging for external libraries that Orc parser depends on-->
builder.add(newLoggerComponent(builder, "org.apache.hadoop.util.NativeCodeLoader", appenderReferences, "ERROR"));
// HTTPD logging
appenderReferences = new ArrayList();
appenderReferences.add(builder.newAppenderRef("HTTPD"));
builder.add(newLoggerComponent(builder, "water.api.RequestServer", appenderReferences));
Configurator.reconfigure(builder.build());
}
AppenderComponentBuilder newRollingFileAppenderComponent(ConfigurationBuilder builder, String name, String sizeBasedTriggeringPolicyValue, String fileNameValue, String filePatternValue, Level thresholdFilterLevel) {
ComponentBuilder triggeringPolicy = builder.newComponent("Policies")
.addComponent(builder.newComponent("SizeBasedTriggeringPolicy").addAttribute("size", sizeBasedTriggeringPolicyValue));
LayoutComponentBuilder layoutBuilder = builder.newLayout("PatternLayout")
.addAttribute("pattern", filePatternValue);
FilterComponentBuilder thresholdFilter = builder.newFilter("ThresholdFilter", Filter.Result.ACCEPT, Filter.Result.DENY)
.addAttribute("level", thresholdFilterLevel.toString());
ComponentBuilder rolloverStrategy = builder.newComponent("DefaultRolloverStrategy").addAttribute("max", 3);
AppenderComponentBuilder appenderBuilder = builder.newAppender(name, "RollingFile")
.addAttribute("fileName", fileNameValue)
.addAttribute("filePattern", fileNameValue.concat(".%i"))
.add(thresholdFilter)
.addComponent(triggeringPolicy)
.addComponent(layoutBuilder)
.addComponent(rolloverStrategy);
return appenderBuilder;
}
LoggerComponentBuilder newLoggerComponent(ConfigurationBuilder builder, String name, List<AppenderRefComponentBuilder> appenderReferences) {
LoggerComponentBuilder loggerComponentBuilder = builder.newLogger(name);
for (AppenderRefComponentBuilder reference : appenderReferences) {
loggerComponentBuilder.add(reference);
}
loggerComponentBuilder.addAttribute("additivity", false);
return loggerComponentBuilder;
}
LoggerComponentBuilder newLoggerComponent(ConfigurationBuilder builder, String name, List<AppenderRefComponentBuilder> appenderReferences, String level) {
LoggerComponentBuilder loggerComponentBuilder = builder.newLogger(name);
for (AppenderRefComponentBuilder reference : appenderReferences) {
loggerComponentBuilder.add(reference);
}
loggerComponentBuilder.addAttribute("additivity", false);
loggerComponentBuilder.addAttribute("level", level);
return loggerComponentBuilder;
}
}
|
0
|
java-sources/ai/h2o/h2o-logging-safe4j/3.46.0.7/org/apache/logging/log4j/core
|
java-sources/ai/h2o/h2o-logging-safe4j/3.46.0.7/org/apache/logging/log4j/core/lookup/JndiLookup.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.lookup;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.config.plugins.Plugin;
/**
* Dummy replacement for Log4j's original JndiLookup class
* It is intended to replace the class affected by CVE-2021-44228 and CVE-2021-45046
*/
@Plugin(name = "jndi", category = StrLookup.CATEGORY)
public class JndiLookup extends AbstractLookup {
static {
if (isSafe4j()) {
System.out.println("This build is patched against vulnerabilities CVE-2021-44228 and CVE-2021-45046");
}
}
@Override
public String lookup(final LogEvent event, final String key) {
return null;
}
/**
* Marker method - this allows us to detect that JndiLookup class is H2O's placeholder
* and not the actual implementation.
*
* We can use it to develop extension that verifies that H2O is not vulnerable.
*
* @return always true
*/
public static boolean isSafe4j() {
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-orc-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-orc-parser/3.46.0.7/water/parser/orc/OrcParser.java
|
package water.parser.orc;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.*;
import org.apache.hadoop.hive.ql.io.orc.Reader;
import org.apache.hadoop.hive.ql.io.orc.RecordReader;
import org.apache.hadoop.hive.ql.io.orc.StripeInformation;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.serde2.objectinspector.*;
import org.joda.time.DateTime;
import org.joda.time.MutableDateTime;
import water.Futures;
import water.H2O;
import water.Job;
import water.Key;
import water.fvec.Vec;
import water.parser.*;
import water.util.ArrayUtils;
import water.util.StringUtils;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import static water.parser.orc.OrcUtil.isSupportedSchema;
import static water.parser.orc.OrcUtil.schemaToColumnType;
// Orc support
/**
* ORC parser for H2O distributed parsing subsystem.
*
* Basically, here is the plan:
* To parse an Orc file, we need to do the following in order to get the following useful
* information:
* 1. Get a Reader rdr.
* 2. From the reader rdr, we can get the following pieces of information:
* a. number of columns, column types and column names. We only support parsing of primitive types;
* b. Lists of StripeInformation that describes how many stripes of data that we will need to read;
* c. For each stripe, get information like rows per stripe, data size in bytes
* 3. The plan is to read the file in parallel in whole numbers of stripes.
* 4. Inside each stripe, we will read data out in batches of VectorizedRowBatch (1024 rows or less).
*
*/
public class OrcParser extends Parser {
/** Orc Info */
private final Reader orcFileReader; // can generate all the other fields from this reader
public static final int DAY_TO_MS = 24*3600*1000;
public static final int ADD_OFFSET = 8*3600*1000;
public static final int HOUR_OFFSET = 3600000; // in ms to offset for leap seconds, years
private MutableDateTime epoch = new MutableDateTime(); // used to help us out the leap seconds, years
private ArrayList<String> storeWarnings = new ArrayList<String>(); // store a list of warnings
OrcParser(ParseSetup setup, Key<Job> jobKey) {
super(setup, jobKey);
epoch.setDate(0); // used to figure out leap seconds, years
this.orcFileReader = ((OrcParser.OrcParseSetup) setup).orcFileReader;
}
private transient int _cidx;
private transient HashMap<Integer,HashMap<Number,byte[]>> _toStringMaps = new HashMap<>();
@Override protected ParseWriter streamParse(final InputStream is, final StreamParseWriter dout) throws IOException {
List<StripeInformation> stripesInfo = ((OrcParseSetup) this._setup).getStripes();
StreamParseWriter nextChunk = dout;
Futures fs = new Futures();
for(int i = 0; i < stripesInfo.size(); i++) {
parseChunk(i, null, nextChunk);
nextChunk.close(fs);
if(dout != nextChunk)
dout.reduce(nextChunk);
if(i < stripesInfo.size()-1) nextChunk = nextChunk.nextChunk();
}
return dout;
}
@Override protected ParseWriter streamParseZip( final InputStream is, final StreamParseWriter dout, InputStream bvs ) throws IOException {
throw new UnsupportedOperationException("H2O Orc Parser does not support parsing of zipped orc files");
}
/**
* This method calculates the number of stripes that will be read for each chunk. Since
* only single threading is supported in reading each stripe, we will never split one stripe
* over different chunks.
*
* @param chunkId: chunk index, calculated as file size/chunk size. The file size is calculated
* with data plus overhead in terms of headers and other info, number of chunks
* calculated will be higher than the actual chunks needed. If the chunk number
* is too high, the method will return without writing to
* dout.
* @param din: ParseReader, not used for parsing orc files
* @param dout: ParseWriter, used to add data to H2O frame.
* @return: Parsewriter dout.
*/
@Override
protected final ParseWriter parseChunk(int chunkId, ParseReader din, ParseWriter dout) {
_cidx = chunkId;
// only do something if within file size and the orc file is not empty
List<StripeInformation> stripesInfo = ((OrcParseSetup) this._setup).getStripes();
if(stripesInfo.size() == 0) {
dout.addError(new ParseWriter.ParseErr("Orc Parser: Empty file.", chunkId, 0L, -2L));
return dout; // empty file
}
OrcParseSetup setup = (OrcParseSetup) this._setup;
StripeInformation thisStripe = stripesInfo.get(chunkId); // get one stripe
// write one stripe of data to H2O frame
String [] orcTypes = setup.getColumnTypesString();
boolean[] toInclude = setup.getToInclude();
try {
RecordReader perStripe = orcFileReader.rows(thisStripe.getOffset(), thisStripe.getDataLength(),
setup.getToInclude(), null, setup.getColumnNames());
VectorizedRowBatch batch = null;
long rows = 0;
long rowCount = thisStripe.getNumberOfRows();
while (rows != rowCount) {
batch = perStripe.nextBatch(batch); // read orc file stripes in vectorizedRowBatch
long currentBatchRow = batch.count();
int nrows = (int)currentBatchRow;
if(currentBatchRow != nrows)
throw new IllegalArgumentException("got batch with too many records, does not fit in int");
ColumnVector[] dataVectors = batch.cols;
int colIndex = 0;
for (int col = 0; col < batch.numCols; ++col) { // read one column at a time;
if (toInclude[col + 1]) { // only write a column if we actually want it
if(_setup.getColumnTypes()[colIndex] != Vec.T_BAD)
write1column(dataVectors[col], orcTypes[colIndex], colIndex, nrows, dout);
else dout.addNAs(col,nrows);
colIndex++;
}
}
rows += currentBatchRow; // record number of rows of data actually read
}
byte [] col_types = _setup.getColumnTypes();
for(int i = 0; i < col_types.length; ++i){
if(col_types[i] == Vec.T_BAD)
dout.addNAs(i,(int)rowCount);
}
perStripe.close();
} catch(IOException ioe) {
throw new RuntimeException(ioe);
}
return dout;
}
/**
* This method writes one column of H2O data frame at a time.
*
* @param oneColumn
* @param columnType
* @param cIdx
* @param rowNumber
* @param dout
*/
private void write1column(ColumnVector oneColumn, String columnType, int cIdx, int rowNumber,ParseWriter dout) {
if(oneColumn.isRepeating && !oneColumn.noNulls) { // ALL NAs
for(int i = 0; i < rowNumber; ++i)
dout.addInvalidCol(cIdx);
} else switch (columnType.toLowerCase()) {
case "bigint":
case "boolean":
case "int":
case "smallint":
case "tinyint":
writeLongcolumn((LongColumnVector)oneColumn, cIdx, rowNumber, dout);
break;
case "float":
case "double":
writeDoublecolumn((DoubleColumnVector)oneColumn, cIdx, rowNumber, dout);
break;
case "numeric":
case "real":
if (oneColumn instanceof LongColumnVector)
writeLongcolumn((LongColumnVector)oneColumn, cIdx, rowNumber, dout);
else
writeDoublecolumn((DoubleColumnVector)oneColumn, cIdx, rowNumber, dout);
break;
case "string":
case "varchar":
case "char":
// case "binary": //FIXME: only reading it as string right now.
writeStringcolumn((BytesColumnVector)oneColumn, cIdx, rowNumber, dout);
break;
case "date":
case "timestamp":
writeTimecolumn((LongColumnVector)oneColumn, columnType, cIdx, rowNumber, dout);
break;
case "decimal":
writeDecimalcolumn((DecimalColumnVector)oneColumn, cIdx, rowNumber, dout);
break;
default:
throw new IllegalArgumentException("Unsupported Orc schema type: " + columnType);
}
}
/**
* This method is written to take care of the leap seconds, leap year effects. Our original
* plan of converting number of days from epoch does not quite work out right due to all these
* leap seconds, years accumulated over the century. However, I do notice that when we are
* not correcting for the leap seconds/years, if we build a dateTime object, the hour does not
* work out to be 00. Instead it is off. In this case, we just calculate the offset and take
* if off our straight forward timestamp calculation.
*
* @param daysSinceEpoch: number of days since epoch (1970 1/1)
* @return long: correct timestamp corresponding to daysSinceEpoch
*/
private long correctTimeStamp(long daysSinceEpoch) {
long timestamp = (daysSinceEpoch*DAY_TO_MS+ADD_OFFSET);
DateTime date = new DateTime(timestamp);
int hour = date.hourOfDay().get();
if (hour == 0)
return timestamp;
else
return (timestamp-hour*HOUR_OFFSET);
}
/**
* This method writes one column of H2O frame for column type timestamp. This is just a long that
* records the number of seconds since Jan 1, 2015.
*
* @param col
* @param cIdx
* @param rowNumber
* @param dout
*/
private void writeTimecolumn(LongColumnVector col, String columnType,int cIdx,
int rowNumber, ParseWriter dout) {
boolean timestamp = columnType.equals("timestamp");
long [] oneColumn = col.vector;
if(col.isRepeating) {
long val = timestamp ? oneColumn[0] / 1000000 : correctTimeStamp(oneColumn[0]);
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++)
dout.addNumCol(cIdx, val, 0);
} else if(col.noNulls) {
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++)
dout.addNumCol(cIdx, timestamp ? oneColumn[rowIndex] / 1000000 : correctTimeStamp(oneColumn[rowIndex]), 0);
} else {
boolean[] isNull = col.isNull;
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
if (isNull[rowIndex])
dout.addInvalidCol(cIdx);
else
dout.addNumCol(cIdx, timestamp ? oneColumn[rowIndex] / 1000000 : correctTimeStamp(oneColumn[rowIndex]), 0);
}
}
}
/**
* This method writes a column to H2O frame for column type Decimal. It is just written as some
* integer without using the scale field. Need to make sure this is what the customer wants.
*
* @param col
* @param cIdx
* @param rowNumber
* @param dout
*/
private void writeDecimalcolumn(DecimalColumnVector col, int cIdx,
int rowNumber, ParseWriter dout) {
HiveDecimalWritable[] oneColumn = col.vector;
if(col.isRepeating) {
HiveDecimal hd = oneColumn[0].getHiveDecimal();
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++)
dout.addNumCol(cIdx, hd.unscaledValue().longValue(),-hd.scale());
} else if(col.noNulls) {
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
HiveDecimal hd = oneColumn[rowIndex].getHiveDecimal();
dout.addNumCol(cIdx, hd.unscaledValue().longValue(),-hd.scale());
}
} else {
boolean [] isNull = col.isNull;
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
if (isNull[rowIndex])
dout.addInvalidCol(cIdx);
else {
HiveDecimal hd = oneColumn[rowIndex].getHiveDecimal();
dout.addNumCol(cIdx, hd.unscaledValue().longValue(), -hd.scale());
}
}
}
}
/**
* This method writes a column of H2O frame for Orc File column types of string, varchar, char and
* binary at some point.
*
* @param col
* @param cIdx
* @param rowNumber
* @param dout
*/
private void writeStringcolumn(BytesColumnVector col, int cIdx, int rowNumber, ParseWriter dout) {
BufferedString bs = new BufferedString();
if(col.isRepeating) {
assert col.length[0] >= 0 : getClass().getSimpleName() + ".writeStringcolumn/1: col.length[0]=" + col.length[0] + ", col.start[0]=" + col.start[0];
dout.addStrCol(cIdx, bs.set(col.vector[0], col.start[0], col.length[0]));
for (int rowIndex = 1; rowIndex < rowNumber; ++rowIndex)
dout.addStrCol(cIdx, bs);
} else if (col.noNulls) {
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
int l = col.length[rowIndex];
assert l >= 0 : getClass().getSimpleName() + ".writeStringcolumn/2: col.col.length[rowIndex]=" + l + ", rowIndex=" + rowIndex;
dout.addStrCol(cIdx, bs.set(col.vector[rowIndex], col.start[rowIndex], l));
}
} else {
boolean [] isNull = col.isNull;
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
if (isNull[rowIndex])
dout.addInvalidCol(cIdx);
else {
int l = col.length[rowIndex];
assert l >= 0 : getClass().getSimpleName() + ".writeStringcolumn/3: col.col.length[rowIndex]=" + l + ", rowIndex=" + rowIndex;
dout.addStrCol(cIdx, bs.set(col.vector[rowIndex], col.start[rowIndex], col.length[rowIndex]));
}
}
}
}
/**
* This method writes a column of H2O frame for Orc File column type of float or double.
*
* @param vec
* @param colId
* @param rowNumber
* @param dout
*/
private void writeDoublecolumn(DoubleColumnVector vec, int colId, int rowNumber, ParseWriter dout) {
double[] oneColumn = vec.vector;
byte t = _setup.getColumnTypes()[colId];
switch(t) {
case Vec.T_CAT:
if(_toStringMaps.get(colId) == null)
_toStringMaps.put(colId,new HashMap<Number, byte[]>());
HashMap<Number,byte[]> map = _toStringMaps.get(colId);
BufferedString bs = new BufferedString();
if(vec.isRepeating) {
bs.set(StringUtils.toBytes(oneColumn[0]));
for (int i = 0; i < rowNumber; ++i)
dout.addStrCol(colId, bs);
} else if (vec.noNulls) {
for (int i = 0; i < rowNumber; i++) {
double d = oneColumn[i];
if(map.get(d) == null) // TODO probably more effficient if moved to the data output
map.put(d, StringUtils.toBytes(d));
dout.addStrCol(colId, bs.set(map.get(d)));
}
} else {
for (int i = 0; i < rowNumber; i++) {
boolean [] isNull = vec.isNull;
if (isNull[i])
dout.addInvalidCol(colId);
else {
double d = oneColumn[i];
if(map.get(d) == null)
map.put(d,StringUtils.toBytes(d));
dout.addStrCol(colId, bs.set(map.get(d)));
}
}
}
break;
default:
if(vec.isRepeating) {
for (int i = 0; i < rowNumber; ++i)
dout.addNumCol(colId, oneColumn[0]);
} else if (vec.noNulls) {
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++)
dout.addNumCol(colId, oneColumn[rowIndex]);
} else {
boolean [] isNull = vec.isNull;
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
if (isNull[rowIndex]) dout.addInvalidCol(colId);
else dout.addNumCol(colId, oneColumn[rowIndex]);
}
}
break;
}
}
/**
* This method writes a column of H2O frame for Orc File column type of boolean, bigint, int, smallint,
* tinyint and date.
*
* @param vec
* @param colId
* @param rowNumber
* @param dout
*/
private void writeLongcolumn(LongColumnVector vec, int colId, int rowNumber, ParseWriter dout) {
long[] oneColumn = vec.vector;
byte t = _setup.getColumnTypes()[colId];
switch(t) {
case Vec.T_CAT:
if(_toStringMaps.get(colId) == null)
_toStringMaps.put(colId,new HashMap<Number, byte[]>());
HashMap<Number,byte[]> map = _toStringMaps.get(colId);
BufferedString bs = new BufferedString();
if(vec.isRepeating) {
bs.set(StringUtils.toBytes(oneColumn[0]));
for (int i = 0; i < rowNumber; ++i)
dout.addStrCol(colId, bs);
} else if (vec.noNulls) {
for (int i = 0; i < rowNumber; i++) {
long l = oneColumn[i];
if(map.get(l) == null)
map.put(l,StringUtils.toBytes(l));
dout.addStrCol(colId, bs.set(map.get(l)));
}
} else {
for (int i = 0; i < rowNumber; i++) {
boolean [] isNull = vec.isNull;
if (isNull[i])
dout.addInvalidCol(colId);
else {
long l = oneColumn[i];
if(map.get(l) == null)
map.put(l,StringUtils.toBytes(l));
dout.addStrCol(colId, bs.set(map.get(l)));
}
}
}
break;
default:
if(vec.isRepeating) {
for (int i = 0; i < rowNumber; ++i)
dout.addNumCol(colId, oneColumn[0], 0);
} else if (vec.noNulls) {
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
check_Min_Value(oneColumn[rowIndex], colId, rowNumber, dout);
dout.addNumCol(colId, oneColumn[rowIndex], 0);
}
} else {
for (int rowIndex = 0; rowIndex < rowNumber; rowIndex++) {
boolean [] isNull = vec.isNull;
if (isNull[rowIndex])
dout.addInvalidCol(colId);
else {
check_Min_Value(oneColumn[rowIndex], colId, rowNumber, dout);
dout.addNumCol(colId, oneColumn[rowIndex], 0);
}
}
}
break;
}
}
/**
* This method is written to check and make sure any value written to a column of type long
* is more than Long.MIN_VALUE. If this is not true, a warning will be passed to the user.
*
* @param l
* @param cIdx
* @param rowNumber
* @param dout
*/
private void check_Min_Value(long l, int cIdx, int rowNumber, ParseWriter dout) {
if (l <= Long.MIN_VALUE) {
String warning = "Orc Parser: Long.MIN_VALUE: " + l + " is found in column "+cIdx+" row "+rowNumber +
" of stripe "+_cidx +". This value is used for sentinel and will not be parsed correctly.";
dout.addError(new ParseWriter.ParseErr(warning, _cidx, rowNumber, -2L));
}
}
public static class OrcParseSetup extends ParseSetup {
// expand to include Orc specific fields
transient Reader orcFileReader;
String[] columnTypesString;
boolean[] toInclude;
String[] allColumnNames;
public OrcParseSetup(int ncols,
String[] columnNames,
byte[] ctypes,
String[][] domains,
String[][] naStrings,
String[][] data,
Reader orcReader,
String[] columntypes,
boolean[] toInclude,
String[] allColNames, ParseWriter.ParseErr[] errs) {
super(OrcParserProvider.ORC_INFO, (byte) '|', true, HAS_HEADER ,
ncols, columnNames, ctypes, domains, naStrings, data, errs);
this.orcFileReader = orcReader;
this.columnTypesString = columntypes;
this.toInclude = toInclude;
int[] skippedColumns = this.getSkippedColumns();
if (skippedColumns != null) {
for (int cindex:skippedColumns)
this.toInclude[cindex]=false; // set skipped columns to be false in order not to read it in.
}
this.allColumnNames = allColNames;
}
@Override
protected boolean isCompatible(ParseSetup setupB) {
return super.isCompatible(setupB) && Arrays.equals(getColumnTypes(),setupB.getColumnTypes());
}
@Override
protected Parser parser(Key jobKey) {
return new OrcParser(this, jobKey);
}
public Reader getOrcFileReader() {
return this.orcFileReader;
}
public String[] getColumnTypesString() {
return this.columnTypesString;
}
public void setColumnTypeStrings(String[] columnTypeStrings) {
this.columnTypesString = columnTypeStrings;
}
public boolean[] getToInclude() { return this.toInclude; }
public String[] getAllColNames() { return this.allColumnNames; }
public void setAllColNames(String[] columnNames) {
this.allColumnNames = allColumnNames;
}
public void setOrcFileReader(Reader orcFileReader) {
this.orcFileReader = orcFileReader;
this.stripesInfo = orcFileReader.getStripes();
}
private transient List<StripeInformation> stripesInfo;
public List<StripeInformation> getStripes() {return stripesInfo;}
}
// types are flattened in pre-order tree walk, here we just count the number of fields for non-primitve types
// which are ignored for now
static private int countStructFields(ObjectInspector x, ArrayList<String> allColumnNames) {
int res = 1;
switch(x.getCategory()) {
case STRUCT:
StructObjectInspector structObjectInspector = (StructObjectInspector) x;
List<StructField> allColumns = (List<StructField>) structObjectInspector.getAllStructFieldRefs(); // column info
for (StructField oneField : allColumns) {
allColumnNames.add(oneField.getFieldName());
res += countStructFields(oneField.getFieldObjectInspector(),allColumnNames);
}
break;
case LIST:
ListObjectInspector listObjectInspector = (ListObjectInspector) x;
allColumnNames.add("list");
res += countStructFields(listObjectInspector.getListElementObjectInspector(),allColumnNames);
break;
case MAP:
MapObjectInspector mapObjectInspector = (MapObjectInspector) x;
allColumnNames.add("mapKey");
res += countStructFields(mapObjectInspector.getMapKeyObjectInspector(),allColumnNames);
allColumnNames.add("mapValue");
res += countStructFields(mapObjectInspector.getMapValueObjectInspector(),allColumnNames);
break;
case UNION:
UnionObjectInspector unionObjectInspector = (UnionObjectInspector)x;
allColumnNames.add("union");
for( ObjectInspector xx:unionObjectInspector.getObjectInspectors())
res += countStructFields(xx,allColumnNames);
break;
case PRIMITIVE:break;
default: throw H2O.unimpl();
}
return res;
}
/*
* This function will derive information like column names, types and number from
* the inspector.
*/
static OrcParseSetup deriveParseSetup(Reader orcFileReader, StructObjectInspector insp) {
List<StructField> allColumns = (List<StructField>) insp.getAllStructFieldRefs(); // grab column info
List<StripeInformation> allStripes = orcFileReader.getStripes(); // grab stripe information
ArrayList<String> allColNames = new ArrayList<>();
boolean[] toInclude = new boolean[allColumns.size()+1];
int supportedFieldCnt = 0 ;
int colIdx = 0;
for (StructField oneField:allColumns) {
allColNames.add(oneField.getFieldName());
String columnType = oneField.getFieldObjectInspector().getTypeName();
if (columnType.toLowerCase().contains("decimal")) {
columnType = "decimal";
}
if (isSupportedSchema(columnType)) {
toInclude[colIdx+1] = true;
supportedFieldCnt++;
}
int cnt = countStructFields(oneField.getFieldObjectInspector(),allColNames);
if(cnt > 1)
toInclude = Arrays.copyOf(toInclude,toInclude.length + cnt-1);
colIdx+=cnt;
}
String [] allNames = allColNames.toArray(new String[allColNames.size()]);
String[] names = new String[supportedFieldCnt];
byte[] types = new byte[supportedFieldCnt];
String[][] domains = new String[supportedFieldCnt][];
String[] dataPreview = new String[supportedFieldCnt];
String[] dataTypes = new String[supportedFieldCnt];
ParseWriter.ParseErr[] errs = new ParseWriter.ParseErr[0];
// go through all column information
int columnIndex = 0;
for (StructField oneField : allColumns) {
String columnType = oneField.getFieldObjectInspector().getTypeName();
if (columnType.toLowerCase().contains("decimal"))
columnType = "decimal"; // get rid of strange attachment
if (isSupportedSchema(columnType)) {
names[columnIndex] = oneField.getFieldName();
types[columnIndex] = schemaToColumnType(columnType);
dataTypes[columnIndex] = columnType;
columnIndex++;
} else {
errs = ArrayUtils.append(errs, new ParseWriter.ParseErr("Orc Parser: Skipping field: "
+ oneField.getFieldName() + " because of unsupported type: " + columnType, -1, -1L, -2L));
}
}
// get size of each stripe
long[] stripeSizes = new long[allStripes.size()];
long fileSize = 0L;
long maxStripeSize = 0L;
for (int index = 0; index < allStripes.size(); index++) {
long stripeSize = allStripes.get(index).getDataLength();
if (stripeSize > maxStripeSize)
maxStripeSize = stripeSize;
fileSize = fileSize + stripeSize;
stripeSizes[index] = fileSize;
}
OrcParseSetup ps = new OrcParseSetup(
supportedFieldCnt,
names,
types,
domains,
null,
new String[][] { dataPreview },
orcFileReader,
dataTypes,
toInclude,
allNames,
errs
);
return ps;
}
}
|
0
|
java-sources/ai/h2o/h2o-orc-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-orc-parser/3.46.0.7/water/parser/orc/OrcParserProvider.java
|
package water.parser.orc;
import org.apache.hadoop.hive.ql.io.orc.OrcFile;
import org.apache.hadoop.hive.ql.io.orc.Reader;
import org.apache.hadoop.hive.ql.io.orc.StripeInformation;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import water.DKV;
import water.H2O;
import water.Job;
import water.Key;
import water.fvec.*;
import water.parser.*;
import water.persist.VecFileSystem;
import java.io.IOException;
import java.util.List;
/**
* Orc parser provider.
*/
public class OrcParserProvider extends ParserProvider {
public static class OrcParserInfo extends ParserInfo {
public OrcParserInfo() {
super("ORC", DefaultParserProviders.MAX_CORE_PRIO + 30, true, true, false);
}
public ParseMethod parseMethod(int nfiles, int nchunks){
int ncores_tot = H2O.NUMCPUS*H2O.CLOUD.size();
// prefer StreamParse if we have enough files to keep cluster busy
// ORC stream parse is more efficient
return
nfiles >= (ncores_tot >> 1) // got enough files to keep cluster busy
?ParseMethod.StreamParse:ParseMethod.StreamParse;//ParseMethod.DistributedParse;
}
}
/* Setup for this parser */
static ParserInfo ORC_INFO = new OrcParserInfo();
@Override
public ParserInfo info() {
return ORC_INFO;
}
@Override
public Parser createParser(ParseSetup setup, Key<Job> jobKey) {
return new OrcParser(setup, jobKey);
}
@Override
public ParseSetup guessSetup(ByteVec bv, byte [] bits, byte sep, int ncols, boolean singleQuotes,
int checkHeader, String[] columnNames, byte[] columnTypes,
String[][] domains, String[][] naStrings) {
if(bv instanceof FileVec)
return readSetup((FileVec)bv, columnNames, columnTypes);
throw new UnsupportedOperationException("ORC only works on Files");
}
/**
* Use only the first file to setup everything.
*
* @param inputs input keys
* @param requiredSetup user given parser setup
* @return
*/
@Override
public ParseSetup createParserSetup(Key[] inputs, ParseSetup requiredSetup) {
FileVec f;
Object frameOrVec = DKV.getGet(inputs[0]);
if (frameOrVec instanceof water.fvec.Frame)
f = (FileVec) ((Frame) frameOrVec).vec(0);
else
f = (FileVec) frameOrVec;
return readSetup(f, requiredSetup.getColumnNames(), requiredSetup.getColumnTypes());
}
private Reader getReader(FileVec f) throws IOException {
return OrcFile.createReader(VecFileSystem.VEC_PATH.getFileSystem(VecFileSystem.makeConfiguration(f)), VecFileSystem.VEC_PATH);
}
/*
public static final byte T_BAD = 0; // No none-NA rows (triple negative! all NAs or zero rows)
public static final byte T_UUID = 1; // UUID
public static final byte T_STR = 2; // String
public static final byte T_NUM = 3; // Numeric, but not categorical or time
public static final byte T_CAT = 4; // Integer, with a categorical/factor String mapping
public static final byte T_TIME = 5; // Long msec since the Unix Epoch - with a variety of display/parse options
*/
public static byte [][] supported_type_conversions = new byte[][]{
{0,0,0,0,0,0}, // T_BAD
{1,0,0,0,0,0}, // UUID
{1,0,0,0,1,1}, // T_STR
{1,0,0,0,0,0}, // T_NUM
{1,0,1,0,0,0}, // T_CAT
{1,0,0,0,0,0}, // T_TIME
};
/**
* This method will create the readers and others info needed to parse an orc file.
* In addition, it will not over-ride the columnNames, columnTypes that the user
* may want to force upon it. However, we only allow users to set column types to
* enum at this point and ignore all the other requests.
*
* @param f
* @param columnNames
* @param columnTypes
* @return
*/
public ParseSetup readSetup(FileVec f, String[] columnNames, byte[] columnTypes) {
try {
Reader orcFileReader = getReader(f);
StructObjectInspector insp = (StructObjectInspector) orcFileReader.getObjectInspector();
OrcParser.OrcParseSetup stp = OrcParser.deriveParseSetup(orcFileReader, insp);
// change back the columnNames and columnTypes if they are specified already
if (!(columnNames == null) && (stp.getAllColNames().length == columnNames.length)) { // copy column name
stp.setColumnNames(columnNames);
stp.setAllColNames(columnNames);
}
if (columnTypes != null) { // copy enum type only
byte[] old_columnTypes = stp.getColumnTypes();
String[] old_columnTypeNames = stp.getColumnTypesString();
for (int index = 0; index < columnTypes.length; index++) {
if(columnTypes[index] != old_columnTypes[index]){
if(supported_type_conversions[old_columnTypes[index]][columnTypes[index]] == 1){
old_columnTypes[index] = columnTypes[index];
} else {
stp.addErrs(new ParseWriter.UnsupportedTypeOverride(f._key.toString(),Vec.TYPE_STR[old_columnTypes[index]], Vec.TYPE_STR[columnTypes[index]],columnNames[index]));
}
}
if (columnTypes[index] == Vec.T_CAT || columnTypes[index] == Vec.T_BAD || columnTypes[index] == Vec.T_TIME) // only copy the enum types
old_columnTypes[index] = columnTypes[index];
}
stp.setColumnTypes(old_columnTypes);
stp.setColumnTypeStrings(old_columnTypeNames);
}
List<StripeInformation> stripesInfo = orcFileReader.getStripes();
if(stripesInfo.size() == 0) { // empty file
f.setChunkSize(stp._chunk_size = (int)f.length());
return stp;
}
f.setNChunks(stripesInfo.size());
stp._chunk_size = f._chunkSize;
assert f.nChunks() == stripesInfo.size(); // ORC parser needs one-to one mapping between chunk and strip (just ids, offsets do not matter)
return stp;
} catch(IOException ioe) {
throw new RuntimeException(ioe);
}
}
@Override
public ParseSetup setupLocal(Vec v, ParseSetup setup){
if(!(v instanceof FileVec)) throw H2O.unimpl("ORC only implemented for HDFS / NFS files");
try {
((OrcParser.OrcParseSetup)setup).setOrcFileReader(getReader((FileVec)v));
return setup;
} catch (IOException e) {throw new RuntimeException(e);}
}
}
|
0
|
java-sources/ai/h2o/h2o-orc-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-orc-parser/3.46.0.7/water/parser/orc/OrcUtil.java
|
package water.parser.orc;
import water.fvec.Vec;
/**
* Utilities to work with Orc schema.
*/
public final class OrcUtil {
/** Return true if the given schema can be transformed
* into h2o type.
*
* @param s orc field name in string
* @return true if the schema can be transformed into H2O type
*/
public static boolean isSupportedSchema(String s) {
switch (s.toLowerCase()) {
case "boolean":
case "bigint": // long
// case "binary": // removed binary column type support for now
case "char":
case "date":
case "decimal":
case "double":
case "float":
case "int":
case "smallint":
case "string":
case "timestamp":
case "tinyint":
case "varchar":
case "enum":
return true;
default:
return false;
}
}
/**
* Transform Orc column types into H2O type.
*
* @param s Orc data type
* @return a byte representing H2O column type
* @throws IllegalArgumentException if schema is not supported
*/
public static byte schemaToColumnType(String s) {
switch (s.toLowerCase()) {
case "boolean":
case "smallint":
case "tinyint":
case "bigint": // FIXME: make sure this is fixed by Tomas.
case "int":
case "float":
case "double":
case "decimal":
return Vec.T_NUM;
case "timestamp":
case "date":
return Vec.T_TIME;
case "enum":
return Vec.T_CAT;
case "string":
case "varchar":
// case "binary": // Removed binary column type support for now
case "char":
return Vec.T_STR;
default:
throw new IllegalArgumentException("Unsupported Orc schema type: " + s);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ChunkConverter.java
|
package water.parser.parquet;
import org.apache.parquet.column.Dictionary;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.io.api.Converter;
import org.apache.parquet.io.api.GroupConverter;
import org.apache.parquet.io.api.PrimitiveConverter;
import org.apache.parquet.schema.*;
import org.joda.time.DateTime;
import org.joda.time.DateTimeUtils;
import org.joda.time.DateTimeZone;
import water.fvec.Vec;
import water.logging.Logger;
import water.parser.BufferedString;
import water.parser.ParseTime;
import water.parser.parquet.ext.DecimalUtils;
import water.util.StringUtils;
import java.time.Instant;
import static water.parser.parquet.TypeUtils.getTimestampAdjustmentFromUtcToLocalInMillis;
/**
* Implementation of Parquet's GroupConverter for H2O's chunks.
*
* ChunkConverter is responsible for converting parquet data into Chunks. As opposed to regular
* Parquet converters this converter doesn't actually produce any records and instead writes the data
* using a provided ParseWriter to chunks. The (artificial) output of the converter is number of
* the record that was written to the chunk.
*
* Note: It is meant to be used as a root converter.
*/
class ChunkConverter extends GroupConverter {
private final WriterDelegate _writer; // this guy actually performs the writing.
private final Converter[] _converters;
private long _currentRecordIdx = -1;
private boolean _adjustTimezone;
ChunkConverter(MessageType parquetSchema, byte[] chunkSchema, WriterDelegate writer, boolean[] keepColumns, boolean adjustTimezone) {
_writer = writer;
_adjustTimezone = adjustTimezone;
int colIdx = 0; // index to columns actually parsed
_converters = new Converter[chunkSchema.length];
int trueColumnIndex = 0; // count all columns including the skipped ones
for (Type parquetField : parquetSchema.getFields()) {
assert parquetField.isPrimitive();
if (keepColumns[trueColumnIndex]) {
_converters[trueColumnIndex] = newConverter(colIdx, chunkSchema[trueColumnIndex], parquetField.asPrimitiveType());
colIdx++;
} else {
_converters[trueColumnIndex] = nullConverter(chunkSchema[trueColumnIndex], parquetField.asPrimitiveType());
}
trueColumnIndex++;
}
}
@Override
public Converter getConverter(int fieldIndex) {
return _converters[fieldIndex];
}
@Override
public void start() {
_currentRecordIdx++;
_writer.startLine();
}
@Override
public void end() {
_writer.endLine();
}
long getCurrentRecordIdx() {
return _currentRecordIdx;
}
private PrimitiveConverter nullConverter(byte vecType, PrimitiveType parquetType) {
switch (vecType) {
case Vec.T_BAD:
case Vec.T_CAT:
case Vec.T_STR:
case Vec.T_UUID:
case Vec.T_TIME:
case Vec.T_NUM:
boolean dictSupport = parquetType.getOriginalType() == OriginalType.UTF8 || parquetType.getOriginalType() == OriginalType.ENUM;
return new NullStringConverter(dictSupport);
default:
throw new UnsupportedOperationException("Unsupported type " + vecType);
}
}
private static class NullStringConverter extends PrimitiveConverter {
private final boolean _dictionarySupport;
NullStringConverter(boolean dictionarySupport) {
_dictionarySupport = dictionarySupport;
}
@Override
public void addBinary(Binary value) { ; }
@Override
public boolean hasDictionarySupport() {
return _dictionarySupport;
}
@Override
public void setDictionary(Dictionary dictionary) {
}
@Override
public void addValueFromDictionary(int dictionaryId) {
}
@Override
public void addBoolean(boolean value) { }
@Override
public void addDouble(double value) { }
@Override
public void addFloat(float value) { }
@Override
public void addInt(int value) { }
@Override
public void addLong(long value) { }
}
private PrimitiveConverter newConverter(int colIdx, byte vecType, PrimitiveType parquetType) {
switch (vecType) {
case Vec.T_BAD:
case Vec.T_CAT:
case Vec.T_STR:
if (parquetType.getPrimitiveTypeName().equals(PrimitiveType.PrimitiveTypeName.BOOLEAN)) {
return new BooleanConverter(_writer, colIdx);
}
case Vec.T_UUID:
case Vec.T_TIME:
if (OriginalType.TIMESTAMP_MILLIS.equals(parquetType.getOriginalType()) || parquetType.getPrimitiveTypeName().equals(PrimitiveType.PrimitiveTypeName.INT96)) {
if (_adjustTimezone) {
long timestampAdjustmentMillis = getTimestampAdjustmentFromUtcToLocalInMillis();
return new TimestampConverter(colIdx, _writer, timestampAdjustmentMillis);
} else {
return new TimestampConverter(colIdx, _writer, 0L);
}
} else if (OriginalType.DATE.equals(parquetType.getOriginalType()) || parquetType.getPrimitiveTypeName().equals(PrimitiveType.PrimitiveTypeName.INT32)){
return new DateConverter(colIdx, _writer);
} else {
boolean dictSupport = parquetType.getOriginalType() == OriginalType.UTF8 || parquetType.getOriginalType() == OriginalType.ENUM;
return new StringConverter(_writer, colIdx, dictSupport);
}
case Vec.T_NUM:
if (OriginalType.DECIMAL.equals(parquetType.getOriginalType()))
return new DecimalConverter(colIdx, parquetType.getDecimalMetadata(), _writer);
else
return new NumberConverter(colIdx, _writer);
default:
throw new UnsupportedOperationException("Unsupported type " + vecType);
}
}
private static class BooleanConverter extends PrimitiveConverter {
private BufferedString TRUE = new BufferedString("True"); // note: this cannot be static - some BS ops are not thread safe!
private BufferedString FALSE = new BufferedString("False");
private final int _colIdx;
private final WriterDelegate _writer;
BooleanConverter(WriterDelegate writer, int colIdx) {
_colIdx = colIdx;
_writer = writer;
}
@Override
public void addBoolean(boolean value) {
BufferedString bsValue = value ? TRUE : FALSE;
_writer.addStrCol(_colIdx, bsValue);
}
}
private static class StringConverter extends PrimitiveConverter {
private final BufferedString _bs = new BufferedString();
private final int _colIdx;
private final WriterDelegate _writer;
private final boolean _dictionarySupport;
private String[] _dict;
StringConverter(WriterDelegate writer, int colIdx, boolean dictionarySupport) {
_colIdx = colIdx;
_writer = writer;
_dictionarySupport = dictionarySupport;
}
@Override
public void addBinary(Binary value) {
writeStrCol(StringUtils.bytesOf(value.toStringUsingUTF8()));
}
@Override
public boolean hasDictionarySupport() {
return _dictionarySupport;
}
@Override
public void setDictionary(Dictionary dictionary) {
_dict = new String[dictionary.getMaxId() + 1];
for (int i = 0; i <= dictionary.getMaxId(); i++) {
_dict[i] = dictionary.decodeToBinary(i).toStringUsingUTF8();
}
}
@Override
public void addValueFromDictionary(int dictionaryId) {
writeStrCol(StringUtils.bytesOf(_dict[dictionaryId]));
}
private void writeStrCol(byte[] data) {
_bs.set(data);
_writer.addStrCol(_colIdx, _bs);
}
}
private static class NumberConverter extends PrimitiveConverter {
private final int _colIdx;
private final WriterDelegate _writer;
private final BufferedString _bs = new BufferedString();
NumberConverter(int _colIdx, WriterDelegate _writer) {
this._colIdx = _colIdx;
this._writer = _writer;
}
@Override
public void addBoolean(boolean value) {
_writer.addNumCol(_colIdx, value ? 1 : 0);
}
@Override
public void addDouble(double value) {
_writer.addNumCol(_colIdx, value);
}
@Override
public void addFloat(float value) {
_writer.addNumCol(_colIdx, value);
}
@Override
public void addInt(int value) {
_writer.addNumCol(_colIdx, value, 0);
}
@Override
public void addLong(long value) {
_writer.addNumCol(_colIdx, value, 0);
}
@Override
public void addBinary(Binary value) {
_bs.set(StringUtils.bytesOf(value.toStringUsingUTF8()));
_writer.addStrCol(_colIdx, _bs);
}
}
private static class DecimalConverter extends PrimitiveConverter {
private final int _colIdx;
private final WriterDelegate _writer;
private final int _precision;
private final int _scale;
DecimalConverter(int colIdx, DecimalMetadata dm, WriterDelegate writer) {
_colIdx = colIdx;
_precision = dm.getPrecision();
_scale = dm.getScale();
_writer = writer;
}
@Override
public void addBoolean(boolean value) {
throw new UnsupportedOperationException("Boolean type is not supported by DecimalConverter");
}
@Override
public void addDouble(double value) {
throw new UnsupportedOperationException("Double type is not supported by DecimalConverter");
}
@Override
public void addFloat(float value) {
throw new UnsupportedOperationException("Float type is not supported by DecimalConverter");
}
@Override
public void addInt(int value) {
_writer.addNumCol(_colIdx, value, -_scale);
}
@Override
public void addLong(long value) {
_writer.addNumCol(_colIdx, value, -_scale);
}
@Override
public void addBinary(Binary value) {
_writer.addNumCol(_colIdx, DecimalUtils.binaryToDecimal(value, _precision, _scale).doubleValue());
}
}
private static class TimestampConverter extends PrimitiveConverter {
private final int _colIdx;
private final WriterDelegate _writer;
private final long timestampAdjustmentMillis;
TimestampConverter(int colIdx, WriterDelegate writer, long timestampAdjustmentMillis) {
this._colIdx = colIdx;
this._writer = writer;
this.timestampAdjustmentMillis = timestampAdjustmentMillis;
}
@Override
public void addLong(long value) {
_writer.addNumCol(_colIdx, adjustTimeStamp(value), 0);
}
@Override
public void addBinary(Binary value) {
final long timestampMillis = ParquetInt96TimestampConverter.getTimestampMillis(value);
_writer.addNumCol(_colIdx, adjustTimeStamp(timestampMillis));
}
private long adjustTimeStamp(long ts) {
return ts + timestampAdjustmentMillis;
}
}
private static class DateConverter extends PrimitiveConverter {
private final static long EPOCH_MILLIS = Instant.EPOCH.toEpochMilli();
private final static long MILLIS_IN_A_DAY = 24 * 60 * 60 * 1000;
private final int _colIdx;
private final WriterDelegate _writer;
DateConverter(int _colIdx, WriterDelegate _writer) {
this._colIdx = _colIdx;
this._writer = _writer;
}
@Override
public void addInt(int numberOfDaysFromUnixEpoch) {
final long parquetDateEpochMillis = EPOCH_MILLIS + numberOfDaysFromUnixEpoch * MILLIS_IN_A_DAY;
_writer.addNumCol(_colIdx, parquetDateEpochMillis);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ChunkReadSupport.java
|
package water.parser.parquet;
import org.apache.hadoop.conf.Configuration;
import org.apache.parquet.hadoop.api.InitContext;
import org.apache.parquet.hadoop.api.ReadSupport;
import org.apache.parquet.io.api.RecordMaterializer;
import org.apache.parquet.schema.MessageType;
import java.util.Map;
public class ChunkReadSupport extends ReadSupport<Long> {
private WriterDelegate _writer;
private byte[] _chunkSchema;
private boolean[] _keepColumns;
private boolean _adjustTimezone;
public ChunkReadSupport(WriterDelegate writer, byte[] chunkSchema, boolean[] keepcolumns, boolean adjustTimezone) {
_writer = writer;
_chunkSchema = chunkSchema;
_keepColumns = keepcolumns;
_adjustTimezone = adjustTimezone;
}
@Override
public ReadContext init(InitContext context) {
return new ReadContext(context.getFileSchema());
}
@Override
public RecordMaterializer<Long> prepareForRead(Configuration configuration, Map<String, String> keyValueMetaData,
MessageType fileSchema, ReadContext readContext) {
return new ChunkRecordMaterializer(fileSchema, _chunkSchema, _writer, _keepColumns, _adjustTimezone);
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ChunkRecordMaterializer.java
|
package water.parser.parquet;
import org.apache.parquet.io.api.GroupConverter;
import org.apache.parquet.io.api.RecordMaterializer;
import org.apache.parquet.schema.MessageType;
/**
* Implementation of Parquet's RecordMaterializer for Chunks
*
* This implementation doesn't directly return any records. The rows are written to Chunks
* indirectly using a ParseWriter and function getCurrentRecord returns the index of the record
* in the current chunk.
*/
class ChunkRecordMaterializer extends RecordMaterializer<Long> {
private ChunkConverter _converter;
ChunkRecordMaterializer(MessageType parquetSchema, byte[] chunkSchema, WriterDelegate writer, boolean[] keepColumns, boolean adjustTimezone) {
_converter = new ChunkConverter(parquetSchema, chunkSchema, writer, keepColumns, adjustTimezone);
}
@Override
public Long getCurrentRecord() {
return _converter.getCurrentRecordIdx();
}
@Override
public GroupConverter getRootConverter() {
return _converter;
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/FrameParquetExporter.java
|
package water.parser.parquet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.parquet.column.ParquetProperties;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.SimpleGroupFactory;
import org.apache.parquet.hadoop.ParquetFileWriter;
import org.apache.parquet.hadoop.api.WriteSupport;
import org.apache.parquet.hadoop.example.GroupWriteSupport;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.apache.parquet.schema.MessageType;
import water.H2O;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import static org.apache.parquet.hadoop.metadata.CompressionCodecName.GZIP;
import static org.apache.parquet.hadoop.metadata.CompressionCodecName.UNCOMPRESSED;
import static org.apache.parquet.schema.MessageTypeParser.parseMessageType;
import static water.fvec.Vec.*;
import static water.parser.parquet.TypeUtils.getTimestampAdjustmentFromUtcToLocalInMillis;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.hadoop.fs.Path;
import water.parser.BufferedString;
import water.persist.PersistHdfs;
import java.io.File;
import java.io.IOException;
public class FrameParquetExporter {
public void export(H2O.H2OCountedCompleter<?> completer, String path, Frame frame, boolean force, String compression, boolean writeChecksum, boolean tzAdjustFromLocal) {
File f = new File(path);
new FrameParquetExporter.PartExportParquetTask(
completer,
f.getPath(),
generateMessageTypeString(frame),
frame.names(),
frame.types(),
frame.domains(),
force,
compression,
writeChecksum,
tzAdjustFromLocal
).dfork(frame);
}
private static class PartExportParquetTask extends MRTask<PartExportParquetTask> {
final String _path;
final CompressionCodecName _compressionCodecName;
final String _messageTypeString;
final String[] _colNames;
final byte[] _colTypes;
final String[][] _domains;
final boolean _force;
final boolean _writeChecksum;
final boolean _tzAdjustFromLocal;
PartExportParquetTask(H2O.H2OCountedCompleter<?> completer, String path, String messageTypeString,
String[] colNames, byte[] colTypes, String[][] domains,
boolean force, String compression, boolean writeChecksum, boolean tzAdjustFromLocal) {
super(completer);
_path = path;
_compressionCodecName = getCompressionCodecName(compression);
_messageTypeString = messageTypeString;
_colNames = colNames;
_colTypes = colTypes;
_domains = domains;
_force = force;
_writeChecksum = writeChecksum;
_tzAdjustFromLocal = tzAdjustFromLocal;
}
CompressionCodecName getCompressionCodecName(String compression) {
if (compression == null)
return UNCOMPRESSED;
switch (compression.toLowerCase()) {
case "gzip":
return GZIP;
case "lzo":
return CompressionCodecName.LZO;
case "snappy":
return CompressionCodecName.SNAPPY;
default:
throw new RuntimeException("Compression " + compression + "is not supported for parquet export.");
}
}
ParquetFileWriter.Mode getMode(boolean force) {
return force ? ParquetFileWriter.Mode.OVERWRITE : ParquetFileWriter.Mode.CREATE;
}
@Override
public void map(Chunk[] cs) {
Chunk anyChunk = cs[0];
int partIdx = anyChunk.cidx();
String partPath = _path + "/part-m-" + String.valueOf(100000 + partIdx).substring(1);
SimpleGroupFactory fact = new SimpleGroupFactory(parseMessageType(_messageTypeString));
try (ParquetWriter<Group> writer = buildWriter(new Path(partPath), _compressionCodecName, PersistHdfs.CONF, parseMessageType(_messageTypeString), getMode(_force), _writeChecksum)) {
String currColName;
byte currColType;
long timeStampAdjustment = _tzAdjustFromLocal ? getTimestampAdjustmentFromUtcToLocalInMillis() : 0L;
for (int i = 0; i < anyChunk._len; i++) {
Group group = fact.newGroup();
for (int j = 0; j < cs.length; j++) {
currColName = _colNames[j];
currColType = _colTypes[j];
switch (currColType) {
case (T_UUID):
case (T_TIME):
long timestamp = cs[j].at8(i);
long adjustedTimestamp = timestamp - timeStampAdjustment;
group = group.append(currColName, adjustedTimestamp);
break;
case (T_STR):
if (!cs[j].isNA(i)) {
group = group.append(currColName, cs[j].atStr(new BufferedString(), i).toString());
}
break;
case (T_CAT):
if (cs[j].isNA(i)) {
group = group.append(currColName, "");
} else {
group = group.append(currColName, _domains[j][(int) cs[j].at8(i)]);
}
break;
case (T_NUM):
case (T_BAD):
default:
group = group.append(currColName, cs[j].atd(i));
break;
}
}
writer.write(group);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
private static String generateMessageTypeString(Frame frame) {
StringBuilder mb = new StringBuilder("message export_type { ");
String currName;
for (int i = 0; i < frame.numCols(); i++) {
currName = frame._names[i];
switch (frame.types()[i]) {
case (T_TIME):
mb.append("optional int64 ").append(currName).append(" (TIMESTAMP_MILLIS);");
break;
case (T_NUM):
case (T_BAD):
mb.append("optional double ").append(currName).append("; ");
break;
case (T_STR):
case (T_CAT):
mb.append("optional BINARY ").append(currName).append(" (UTF8); ");
break;
case (T_UUID):
mb.append("optional fixed_len_byte_array(16) ").append(currName).append(" (UUID); ");
break;
}
}
mb.append("} ");
return mb.toString();
}
private static ParquetWriter<Group> buildWriter(Path path, CompressionCodecName compressionCodecName, Configuration configuration, MessageType schema, ParquetFileWriter.Mode mode, boolean writeChecksum) throws IOException {
GroupWriteSupport.setSchema(schema, configuration);
// The filesystem is cached for a given path and configuration,
// therefore the following modification on the fs is a bit hacky as another process could use the same instance.
// However, given the current use case and the fact that the changes impacts only the way files are written, it should be on the safe side.
FileSystem fs = path.getFileSystem(configuration);
fs.setWriteChecksum(writeChecksum);
return new ParquetWriter.Builder(path) {
@Override
protected ParquetWriter.Builder self() {
return this;
}
@Override
protected WriteSupport<Group> getWriteSupport(Configuration conf) {
return new GroupWriteSupport();
}
}
.self()
.withCompressionCodec(compressionCodecName)
.withConf(configuration)
.withWriteMode(mode)
.build();
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ParquetExporter.java
|
package water.parser.parquet;
import water.H2O;
import water.fvec.Frame;
import water.parser.BinaryFormatExporter;
import water.util.ExportFileFormat;
public class ParquetExporter implements BinaryFormatExporter {
@Override
public H2O.H2OCountedCompleter export(Frame frame, String path, boolean force, String compression, boolean writeChecksum, boolean tzAdjustFromLocal) {
return new ExportParquetDriver(frame, path, force, compression, writeChecksum, tzAdjustFromLocal);
}
@Override
public boolean supports(ExportFileFormat format) {
return ExportFileFormat.parquet.equals(format);
}
private class ExportParquetDriver extends H2O.H2OCountedCompleter<ExportParquetDriver> {
Frame _frame;
String _path;
boolean _force;
String _compression;
boolean _writeChecksum;
boolean _tzAdjustFromLocal;
public ExportParquetDriver(Frame frame, String path, boolean force, String compression, boolean writeChecksum, boolean tzAdjustFromLocal) {
_frame = frame;
_path = path;
_force = force;
_compression = compression;
_writeChecksum = writeChecksum;
_tzAdjustFromLocal = tzAdjustFromLocal;
}
@Override
public void compute2() {
// multipart export
FrameParquetExporter parquetExporter = new FrameParquetExporter();
parquetExporter.export(this, _path, _frame, _force, _compression, _writeChecksum, _tzAdjustFromLocal);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ParquetInt96TimestampConverter.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* Copyright H20.ai Limited
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package water.parser.parquet;
import org.apache.parquet.io.api.Binary;
import java.util.concurrent.TimeUnit;
/**
* Class for decoding INT96 encoded parquet timestamp to timestamp millis in GMT.
* <p>
* This class is equivalent of @see org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTime,
* which produces less intermediate objects during decoding.
*
* This class is a modified version of ParquetTimestampUtils from Presto project.
*/
final class ParquetInt96TimestampConverter {
private static final int JULIAN_EPOCH_OFFSET_DAYS = 2_440_588;
private static final long MILLIS_IN_DAY = TimeUnit.DAYS.toMillis(1);
private static final long NANOS_PER_MILLISECOND = TimeUnit.MILLISECONDS.toNanos(1);
private static final byte BYTES_IN_INT96_TIMESTAMP = 12;
private ParquetInt96TimestampConverter() {
}
/**
* Returns GMT timestamp from binary encoded parquet timestamp (12 bytes - julian date + time of day nanos).
*
* @param timestampBinary INT96 parquet timestamp
* @return timestamp in millis, GMT timezone
*/
public static long getTimestampMillis(Binary timestampBinary) {
if (timestampBinary.length() != BYTES_IN_INT96_TIMESTAMP) {
throw new IllegalArgumentException("Parquet timestamp must be 12 bytes long, actual " + timestampBinary.length());
}
byte[] bytes = timestampBinary.getBytes();
// little endian encoding - bytes are red in inverted order
long timeOfDayNanos = TypeUtils.longFromBytes(bytes[7], bytes[6], bytes[5], bytes[4], bytes[3], bytes[2], bytes[1], bytes[0]);
int julianDay = TypeUtils.intFromBytes(bytes[11], bytes[10], bytes[9], bytes[8]);
return julianDayToMillis(julianDay) + (timeOfDayNanos / NANOS_PER_MILLISECOND);
}
/**
* @param julianDay Day since the beginning of Julian calendar
* @return millis since epoch
*/
private static long julianDayToMillis(int julianDay) {
return (julianDay - JULIAN_EPOCH_OFFSET_DAYS) * MILLIS_IN_DAY;
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ParquetParser.java
|
package water.parser.parquet;
import org.apache.parquet.format.converter.ParquetMetadataConverter;
import org.apache.parquet.hadoop.metadata.BlockMetaData;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.OriginalType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
import water.Job;
import water.Key;
import water.exceptions.H2OUnsupportedDataFileException;
import water.fvec.ByteVec;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.parser.*;
import water.util.IcedHashMapGeneric;
import water.util.Log;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import static org.apache.parquet.hadoop.ParquetFileWriter.MAGIC;
/**
* Parquet parser for H2O distributed parsing subsystem.
*/
public class ParquetParser extends Parser {
private static final int MAX_PREVIEW_RECORDS = 1000;
private final byte[] _metadata;
ParquetParser(ParseSetup setup, Key<Job> jobKey) {
super(setup, jobKey);
_metadata = ((ParquetParseSetup) setup).parquetMetadata;
}
@Override
protected final StreamParseWriter sequentialParse(Vec vec, final StreamParseWriter dout) {
final ParquetMetadata metadata = VecParquetReader.readFooter(_metadata);
final int nChunks = vec.nChunks();
final long totalRecs = totalRecords(metadata);
final long nChunkRecs = ((totalRecs / nChunks) + (totalRecs % nChunks > 0 ? 1 : 0));
if (nChunkRecs != (int) nChunkRecs) {
throw new IllegalStateException("Unsupported Parquet file. Too many records (#" + totalRecs + ", nChunks=" + nChunks + ").");
}
final WriterDelegate w = new WriterDelegate(dout, _setup.getColumnTypes().length);
final VecParquetReader reader = new VecParquetReader(vec, metadata, w, _setup.getColumnTypes(), _keepColumns, _setup.gettzAdjustToLocal());
StreamParseWriter nextChunk = dout;
try {
long parsedRecs = 0;
for (int i = 0; i < nChunks; i++) {
Long recordNumber;
do {
recordNumber = reader.read();
if (recordNumber != null)
parsedRecs++;
} while ((recordNumber != null) && (w.lineNum() < nChunkRecs));
if (_jobKey != null)
Job.update(vec.length() / nChunks, _jobKey);
nextChunk.close();
dout.reduce(nextChunk);
nextChunk = nextChunk.nextChunk();
w.setWriter(nextChunk);
}
assert parsedRecs == totalRecs;
} catch (IOException e) {
throw new RuntimeException("Failed to parse records", e);
}
return dout;
}
private long totalRecords(ParquetMetadata metadata) {
long nr = 0;
for (BlockMetaData meta : metadata.getBlocks()) {
nr += meta.getRowCount();
}
return nr;
}
@Override
protected final ParseWriter parseChunk(int cidx, ParseReader din, ParseWriter dout) {
if (! (din instanceof FVecParseReader)) {
// TODO: Should we modify the interface to expose the underlying chunk for non-streaming parsers?
throw new IllegalStateException("We only accept parser readers backed by a Vec (no streaming support!).");
}
Chunk chunk = ((FVecParseReader) din).getChunk();
Vec vec = chunk.vec();
// extract metadata, we want to read only the row groups that have centers in this chunk
ParquetMetadataConverter.MetadataFilter chunkFilter = ParquetMetadataConverter.range(
chunk.start(), chunk.start() + chunk.len());
ParquetMetadata metadata = VecParquetReader.readFooter(_metadata, chunkFilter);
if (metadata.getBlocks().isEmpty()) {
Log.trace("Chunk #", cidx, " doesn't contain any Parquet block center.");
return dout;
}
Log.info("Processing ", metadata.getBlocks().size(), " blocks of chunk #", cidx);
VecParquetReader reader = new VecParquetReader(vec, metadata, dout, _setup.getColumnTypes(), _keepColumns, _setup.get_parse_columns_indices().length, _setup.gettzAdjustToLocal());
try {
Long recordNumber;
do {
recordNumber = reader.read();
} while (recordNumber != null);
} catch (IOException e) {
throw new RuntimeException("Failed to parse records", e);
}
return dout;
}
public static ParquetParseSetup guessFormatSetup(ByteVec vec, byte[] bits, boolean tzAdjustToLocal) {
if (bits.length < MAGIC.length) {
return null;
}
for (int i = 0; i < MAGIC.length; i++) {
if (bits[i] != MAGIC[i]) return null;
}
// seems like we have a Parquet file
byte[] metadataBytes = VecParquetReader.readFooterAsBytes(vec);
ParquetMetadata metadata = VecParquetReader.readFooter(metadataBytes);
checkCompatibility(metadata);
return toInitialSetup(metadata.getFileMetaData().getSchema(), metadataBytes, tzAdjustToLocal);
}
public static String[] extractColumnTypes(ParquetMetadata metadata) {
MessageType messageType = metadata.getFileMetaData().getSchema();
int colNum = messageType.getFieldCount();
String[] parquetColNames = new String[colNum];
for (int index=0; index<colNum; index++) {
parquetColNames[index] = messageType.getType(index).asPrimitiveType().getPrimitiveTypeName().name();
}
return parquetColNames;
}
private static ParquetParseSetup toInitialSetup(MessageType parquetSchema, byte[] metadataBytes, boolean tzAdjustToLocal) {
byte[] roughTypes = roughGuessTypes(parquetSchema);
String[] columnNames = columnNames(parquetSchema);
return new ParquetParseSetup(columnNames, roughTypes, null, metadataBytes, tzAdjustToLocal);
}
public static ParquetParseSetup guessDataSetup(ByteVec vec, ParquetParseSetup ps, boolean[] keepcolumns) {
ParquetPreviewParseWriter ppWriter = readFirstRecords(ps, vec, MAX_PREVIEW_RECORDS, keepcolumns);
return ppWriter.toParseSetup(ps.parquetMetadata, ps.gettzAdjustToLocal());
}
/**
* Overrides unsupported type conversions/mappings specified by the user.
* @param vec byte vec holding bin\ary parquet data
* @param requestedTypes user-specified target types
* @return corrected types
*/
public static byte[] correctTypeConversions(ByteVec vec, byte[] requestedTypes) {
byte[] metadataBytes = VecParquetReader.readFooterAsBytes(vec);
ParquetMetadata metadata = VecParquetReader.readFooter(metadataBytes, ParquetMetadataConverter.NO_FILTER);
byte[] roughTypes = roughGuessTypes(metadata.getFileMetaData().getSchema());
return correctTypeConversions(roughTypes, requestedTypes);
}
private static byte[] correctTypeConversions(byte[] roughTypes, byte[] requestedTypes) {
if (requestedTypes.length != roughTypes.length)
throw new IllegalArgumentException("Invalid column type specification: number of columns and number of types differ!");
byte[] resultTypes = new byte[requestedTypes.length];
for (int i = 0; i < requestedTypes.length; i++) {
if ((roughTypes[i] == Vec.T_NUM) || (roughTypes[i] == Vec.T_TIME)) {
// don't convert Parquet numeric/time type to non-numeric type in H2O
resultTypes[i] = roughTypes[i];
} else if ((roughTypes[i] == Vec.T_BAD) && (requestedTypes[i] == Vec.T_NUM)) {
// don't convert Parquet non-numeric type to a numeric type in H2O
resultTypes[i] = Vec.T_STR;
} else
// satisfy the request
resultTypes[i] = requestedTypes[i];
}
return resultTypes; // return types for all columns present.
}
private static class ParquetPreviewParseWriter extends PreviewParseWriter {
private String[] _colNames;
private byte[] _roughTypes;
public ParquetPreviewParseWriter() {
// externalizable class should have a public constructor
super();
}
ParquetPreviewParseWriter(ParquetParseSetup setup) {
super(setup.getColumnNames().length);
_colNames = setup.getColumnNames();
_roughTypes = setup.getColumnTypes();
setColumnNames(_colNames);
_nlines = 0;
_data[0] = new String[_colNames.length];
}
@Override
public byte[] guessTypes() {
return correctTypeConversions(_roughTypes, super.guessTypes());
}
ParquetParseSetup toParseSetup(byte[] parquetMetadata, boolean tzAdjustToLocal) {
byte[] types = guessTypes();
return new ParquetParseSetup(_colNames, types, _data, parquetMetadata, tzAdjustToLocal);
}
}
public static class ParquetParseSetup extends ParseSetup {
transient byte[] parquetMetadata;
public ParquetParseSetup() { super(); }
public ParquetParseSetup(String[] columnNames, byte[] ctypes, String[][] data, byte[] parquetMetadata, boolean tzAdjustToLocal) {
super(ParquetParserProvider.PARQUET_INFO, (byte) '|', true, ParseSetup.HAS_HEADER,
columnNames.length, columnNames, ctypes,
new String[columnNames.length][] /* domains */, null /* NA strings */, data, tzAdjustToLocal);
this.parquetMetadata = parquetMetadata;
if (getForceColTypes() && parquetMetadata != null) {
this.parquetColumnTypes = extractColumnTypes(VecParquetReader.readFooter(parquetMetadata));
}
}
}
private static void checkCompatibility(ParquetMetadata metadata) {
// make sure we can map Parquet blocks to Chunks
for (BlockMetaData block : metadata.getBlocks()) {
if (block.getRowCount() > Integer.MAX_VALUE) {
IcedHashMapGeneric.IcedHashMapStringObject dbg = new IcedHashMapGeneric.IcedHashMapStringObject();
dbg.put("startingPos", block.getStartingPos());
dbg.put("rowCount", block.getRowCount());
throw new H2OUnsupportedDataFileException("Unsupported Parquet file (technical limitation).",
"Current implementation doesn't support Parquet files with blocks larger than " +
Integer.MAX_VALUE + " rows.", dbg); // because we map each block to a single H2O Chunk
}
}
// check that file doesn't have nested structures
MessageType schema = metadata.getFileMetaData().getSchema();
for (String[] path : schema.getPaths())
if (path.length != 1) {
throw new H2OUnsupportedDataFileException("Parquet files with nested structures are not supported.",
"Detected a column with a nested structure " + Arrays.asList(path));
}
}
private static ParquetPreviewParseWriter readFirstRecords(ParquetParseSetup initSetup, ByteVec vec, int cnt,
boolean[] keepcolumns) {
ParquetMetadata metadata = VecParquetReader.readFooter(initSetup.parquetMetadata);
List<BlockMetaData> blockMetaData;
if (metadata.getBlocks().isEmpty()) {
blockMetaData = Collections.<BlockMetaData>emptyList();
} else {
final BlockMetaData firstBlock = findFirstBlock(metadata);
blockMetaData = Collections.singletonList(firstBlock);
}
ParquetMetadata startMetadata = new ParquetMetadata(metadata.getFileMetaData(), blockMetaData);
ParquetPreviewParseWriter ppWriter = new ParquetPreviewParseWriter(initSetup);
VecParquetReader reader = new VecParquetReader(vec, startMetadata, ppWriter, ppWriter._roughTypes, keepcolumns,initSetup.get_parse_columns_indices().length, initSetup.gettzAdjustToLocal());
try {
int recordCnt = 0;
Long recordNum;
do {
recordNum = reader.read();
} while ((recordNum != null) && (++recordCnt < cnt));
return ppWriter;
} catch (IOException e) {
throw new RuntimeException("Failed to read the first few records", e);
}
}
private static byte[] roughGuessTypes(MessageType messageType) {
byte[] types = new byte[messageType.getPaths().size()];
for (int i = 0; i < types.length; i++) {
Type parquetType = messageType.getType(i);
assert parquetType.isPrimitive();
OriginalType ot = parquetType.getOriginalType();
PrimitiveType pt = parquetType.asPrimitiveType();
types[i] = convertType(ot, pt);
}
return types;
}
private static byte convertType(OriginalType ot, PrimitiveType pt) {
// handle special cases (where we cannot guess based on the physical primitive type)
if (OriginalType.TIMESTAMP_MILLIS.equals(ot) || OriginalType.DATE.equals(ot)) {
return Vec.T_TIME;
} else if (OriginalType.DECIMAL.equals(ot)){
return Vec.T_NUM;
}
// convert based on primitive type
switch (pt.getPrimitiveTypeName()) {
case BOOLEAN:
return Vec.T_CAT;
case INT32:
case FLOAT:
case DOUBLE:
case INT64:
return Vec.T_NUM;
case INT96:
return Vec.T_TIME;
default:
return Vec.T_BAD;
}
}
private static String[] columnNames(MessageType messageType) {
String[] colNames = new String[messageType.getPaths().size()];
int i = 0;
for (String[] path : messageType.getPaths()) {
assert path.length == 1;
colNames[i++] = path[0];
}
return colNames;
}
private static BlockMetaData findFirstBlock(ParquetMetadata metadata) {
BlockMetaData firstBlockMeta = metadata.getBlocks().get(0);
for (BlockMetaData meta : metadata.getBlocks()) {
if (meta.getStartingPos() < firstBlockMeta.getStartingPos()) {
firstBlockMeta = meta;
}
}
return firstBlockMeta;
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ParquetParserProvider.java
|
package water.parser.parquet;
import org.apache.parquet.format.converter.ParquetMetadataConverter;
import water.DKV;
import water.Job;
import water.Key;
import water.fvec.ByteVec;
import water.fvec.Frame;
import water.fvec.Vec;
import water.parser.*;
import static water.parser.parquet.ParquetParser.extractColumnTypes;
/**
* Parquet parser provider.
*/
public class ParquetParserProvider extends BinaryParserProvider {
/* Setup for this parser */
static ParserInfo PARQUET_INFO = new ParserInfo("PARQUET", DefaultParserProviders.MAX_CORE_PRIO + 20, true, false, true,false);
@Override
public ParserInfo info() {
return PARQUET_INFO;
}
@Override
public Parser createParser(ParseSetup setup, Key<Job> jobKey) {
return new ParquetParser(setup, jobKey);
}
@Override
public ParseSetup guessInitSetup(ByteVec v, byte[] bits, ParseSetup userSetup) {
return ParquetParser.guessFormatSetup(v, bits, userSetup.gettzAdjustToLocal());
}
@Override
public ParseSetup guessFinalSetup(ByteVec v, byte[] bits, ParseSetup ps) {
boolean[] keepColumns=null;
int[] parseColumnIndices = ps.get_parse_columns_indices();
if (parseColumnIndices!= null) {
int numCols = ps.getNumberColumns();
keepColumns = new boolean[numCols];
for (int cindex:parseColumnIndices) {
keepColumns[cindex]=true;
}
}
return ParquetParser.guessDataSetup(v, (ParquetParser.ParquetParseSetup) ps, keepColumns);
}
@Override
public ParseSetup createParserSetup(Key[] inputs, ParseSetup requestedSetup) {
// convert to an instance of ParquetParseSetup if needed
ParseSetup setup = requestedSetup instanceof ParquetParser.ParquetParseSetup ?
requestedSetup : requestedSetup.copyTo(new ParquetParser.ParquetParseSetup());
// override incorrect type mappings (using the MessageFormat of the first file)
Object frameOrVec = DKV.getGet(inputs[0]);
ByteVec vec = (ByteVec) (frameOrVec instanceof Frame ? ((Frame) frameOrVec).vec(0) : frameOrVec);
if (setup.getForceColTypes() && vec != null)
setup.setParquetColumnTypes(extractColumnTypes(VecParquetReader.readFooter(VecParquetReader.readFooterAsBytes(vec), ParquetMetadataConverter.NO_FILTER)));
byte[] requestedTypes = setup.getColumnTypes();
byte[] types = ParquetParser.correctTypeConversions(vec, requestedTypes);
setup.setColumnTypes(types);
for (int i = 0; i < types.length; i++)
if (types[i] != requestedTypes[i])
setup.addErrs(new ParseWriter.UnsupportedTypeOverride(inputs[0].toString(),Vec.TYPE_STR[types[i]], Vec.TYPE_STR[requestedTypes[i]], setup.getColumnNames()[i]));
return setup;
}
@Override
public ParseSetup setupLocal(Vec v, ParseSetup setup) {
((ParquetParser.ParquetParseSetup) setup).parquetMetadata = VecParquetReader.readFooterAsBytes(v);
return setup;
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/TypeUtils.java
|
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package water.parser.parquet;
import org.joda.time.DateTimeZone;
/**
* Several helper methods inspired by Guava library - https://github.com/google/guava/. We want to avoid bringing guava dependency when possible.
*
* Duplicating some code from that library is a small sacrifice for not bringing the whole dependency
*/
public class TypeUtils {
/**
* Returns the {@code long} value whose byte representation is the given 8 bytes, in big-endian
* order; equivalent to {@code Longs.fromByteArray(new byte[] {b1, b2, b3, b4, b5, b6, b7, b8})}.
*
*/
public static long longFromBytes(
byte b1, byte b2, byte b3, byte b4, byte b5, byte b6, byte b7, byte b8) {
return (b1 & 0xFFL) << 56
| (b2 & 0xFFL) << 48
| (b3 & 0xFFL) << 40
| (b4 & 0xFFL) << 32
| (b5 & 0xFFL) << 24
| (b6 & 0xFFL) << 16
| (b7 & 0xFFL) << 8
| (b8 & 0xFFL);
}
/**
* Returns the {@code int} value whose byte representation is the given 4 bytes, in big-endian
* order; equivalent to {@code Ints.fromByteArray(new byte[] {b1, b2, b3, b4})}.
*/
public static int intFromBytes(byte b1, byte b2, byte b3, byte b4) {
return b1 << 24 | (b2 & 0xFF) << 16 | (b3 & 0xFF) << 8 | (b4 & 0xFF);
}
public static int getTimestampAdjustmentFromUtcToLocalInMillis() {
DateTimeZone clusterLocalTimezone = DateTimeZone.getDefault();
return clusterLocalTimezone.getOffset(null);
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/VecParquetReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package water.parser.parquet;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.parquet.filter2.compat.FilterCompat;
import org.apache.parquet.filter2.compat.RowGroupFilter;
import org.apache.parquet.format.converter.ParquetMetadataConverter;
import org.apache.parquet.hadoop.ParquetReader;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
import water.H2O;
import water.fvec.Vec;
import water.parser.ParseWriter;
import water.util.Log;
import java.io.ByteArrayInputStream;
import java.io.Closeable;
import java.io.IOException;
import java.util.Arrays;
import static org.apache.parquet.bytes.BytesUtils.readIntLittleEndian;
import static org.apache.parquet.format.converter.ParquetMetadataConverter.MetadataFilter;
import static org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER;
import static org.apache.parquet.hadoop.ParquetFileWriter.MAGIC;
/**
* Implementation of Parquet Reader working on H2O's Vecs.
*
* Note: This class was derived from Parquet's ParquetReader implementation. We cannot directly
* use the original implementation because it uses Hadoop FileSystem to access source data (and also Parquet summary files),
* it uses its own parallel implementation for reading metadata information which doesn't fit into H2O's architecture.
*/
public class VecParquetReader implements Closeable {
private static ParquetMetadataConverter converter = new ParquetMetadataConverter();
private final Vec vec;
private final ParquetMetadata metadata;
private final WriterDelegate writer;
private final byte[] chunkSchema; // contains column types of all columns, not just the skipped one
private ParquetReader<Long> reader;
private boolean[] keepColumns;
private boolean adjustTimezone;
public VecParquetReader(Vec vec, ParquetMetadata metadata, ParseWriter writer, byte[] chunkSchema, boolean[] keepcolumns, int parseColumnNumber, boolean adjustTimezone) {
this(vec, metadata, new WriterDelegate(writer, parseColumnNumber), chunkSchema, keepcolumns, adjustTimezone);
}
VecParquetReader(Vec vec, ParquetMetadata metadata, WriterDelegate writer, byte[] chunkSchema, boolean[] keepcolumns, boolean adjustTimezone) {
this.vec = vec;
this.metadata = metadata;
this.writer = writer;
this.chunkSchema = chunkSchema;
this.keepColumns = keepcolumns;
this.adjustTimezone = adjustTimezone;
}
/**
* @return the index of added Chunk record or null if finished
* @throws IOException
*/
public Long read() throws IOException {
if (reader == null) {
initReader();
}
assert reader != null;
return reader.read();
}
private void initReader() throws IOException {
assert reader == null;
final VecReaderEnv env = VecReaderEnv.make(vec);
ChunkReadSupport crSupport = new ChunkReadSupport(writer, chunkSchema, keepColumns, adjustTimezone);
ParquetReader.Builder<Long> prBuilder = ParquetReader.builder(crSupport, env.getPath())
.withConf(env.getConf())
.withFilter(new FilterCompat.Filter() {
@Override
@SuppressWarnings("unchecked")
public <R> R accept(FilterCompat.Visitor<R> visitor) {
if (visitor instanceof RowGroupFilter) // inject already filtered metadata on RowGroup level
return (R) metadata.getBlocks();
else // no other filtering otherwise
return visitor.visit((FilterCompat.NoOpFilter) FilterCompat.NOOP);
}
});
reader = prBuilder.build();
}
@Override
public void close() throws IOException {
if (reader != null) {
reader.close();
}
}
public static byte[] readFooterAsBytes(Vec vec) {
FSDataInputStream f = null;
try {
f = (FSDataInputStream) H2O.getPM().openSeekable(vec);
return readFooterAsBytes(vec.length(), f);
} catch (IOException e) {
throw new RuntimeException("Failed to read Parquet metadata", e);
} finally {
try {
if (f != null) f.close();
} catch (Exception e) {
Log.warn("Failed to close Vec data input stream", e);
}
}
}
static byte[] readFooterAsBytes(final long length, FSDataInputStream f) throws IOException {
final int FOOTER_LENGTH_SIZE = 4;
if (length < MAGIC.length + FOOTER_LENGTH_SIZE + MAGIC.length) { // MAGIC + data + footer + footerIndex + MAGIC
throw new RuntimeException("Vec doesn't represent a Parquet data (too short)");
}
long footerLengthIndex = length - FOOTER_LENGTH_SIZE - MAGIC.length;
f.seek(footerLengthIndex);
int footerLength = readIntLittleEndian(f);
byte[] magic = new byte[MAGIC.length];
f.readFully(magic);
if (!Arrays.equals(MAGIC, magic)) {
throw new RuntimeException("Vec is not a Parquet file. expected magic number at tail " +
Arrays.toString(MAGIC) + " but found " + Arrays.toString(magic));
}
long footerIndex = footerLengthIndex - footerLength;
if (footerIndex < MAGIC.length || footerIndex >= footerLengthIndex) {
throw new RuntimeException("corrupted file: the footer index is not within the Vec");
}
f.seek(footerIndex);
byte[] metadataBytes = new byte[footerLength];
f.readFully(metadataBytes);
return metadataBytes;
}
public static ParquetMetadata readFooter(byte[] metadataBytes) {
return readFooter(metadataBytes, NO_FILTER);
}
public static ParquetMetadata readFooter(byte[] metadataBytes, MetadataFilter filter) {
try {
ByteArrayInputStream bis = new ByteArrayInputStream(metadataBytes);
return converter.readParquetMetadata(bis, filter);
} catch (IOException e) {
throw new RuntimeException("Failed to read Parquet metadata", e);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/VecReaderEnv.java
|
package water.parser.parquet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import water.fvec.HDFSFileVec;
import water.fvec.Vec;
import water.persist.PersistHdfs;
import water.persist.VecFileSystem;
import static org.apache.parquet.hadoop.ParquetFileReader.PARQUET_READ_PARALLELISM;
class VecReaderEnv {
private final Configuration _conf;
private final Path _path;
private VecReaderEnv(Configuration conf, Path path) {
_conf = conf;
_path = path;
_conf.setInt(PARQUET_READ_PARALLELISM, 1); // disable parallelism (just one virtual file!)
}
Configuration getConf() {
return _conf;
}
Path getPath() {
return _path;
}
static VecReaderEnv make(Vec vec) {
if (vec instanceof HDFSFileVec) {
// We prefer direct read from HDFS over H2O in-memory caching, saves resources and prevents overloading a single node with data
Path path = new Path(((HDFSFileVec) vec).getPath());
return new VecReaderEnv(PersistHdfs.CONF, path);
} else {
return new VecReaderEnv(VecFileSystem.makeConfiguration(vec), VecFileSystem.VEC_PATH);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/WriterDelegate.java
|
package water.parser.parquet;
import water.DKV;
import water.Iced;
import water.Key;
import water.parser.BufferedString;
import water.parser.ParseWriter;
import water.util.IcedInt;
import water.util.Log;
import java.util.Arrays;
import static water.H2OConstants.MAX_STR_LEN;
final class WriterDelegate {
private final int _maxStringSize;
private final int[] _colRawSize; // currently only used for String columns
private final int _numCols;
private ParseWriter _writer;
private int _col;
WriterDelegate(ParseWriter writer, int numCols) {
_maxStringSize = getMaxStringSize();
_numCols = numCols;
_colRawSize = new int[numCols];
setWriter(writer);
}
// For unit tests only: allows to set maximum string size in a test for all nodes
private int getMaxStringSize() {
Iced<?> maxSize = DKV.getGet(Key.make(WriterDelegate.class.getCanonicalName() + "_maxStringSize"));
return (maxSize instanceof IcedInt) ? ((IcedInt) maxSize)._val : MAX_STR_LEN;
}
void startLine() {
_col = -1;
}
void endLine() {
moveToCol(_numCols);
_writer.newLine();
}
private int moveToCol(int colIdx) {
for (int c = _col + 1; c < colIdx; c++) _writer.addInvalidCol(c);
_col = colIdx;
return _col;
}
void addNumCol(int colIdx, long number, int exp) {
_writer.addNumCol(moveToCol(colIdx), number, exp);
}
void addNumCol(int colIdx, double d) {
_writer.addNumCol(moveToCol(colIdx), d);
}
void addStrCol(int colIdx, BufferedString str) {
if (_colRawSize[colIdx] == -1)
return; // already exceeded max length
long totalSize = (long) str.length() + _colRawSize[colIdx];
if (totalSize > _maxStringSize) {
_colRawSize[colIdx] = -1;
Log.err("Total String size limit reached: skipping remaining values in column: " + colIdx + "!");
return;
}
_colRawSize[colIdx] += str.length();
_writer.addStrCol(moveToCol(colIdx), str);
}
long lineNum() {
return _writer.lineNum();
}
final void setWriter(ParseWriter writer) {
_writer = writer;
_col = Integer.MIN_VALUE;
Arrays.fill(_colRawSize, 0);
}
}
|
0
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet
|
java-sources/ai/h2o/h2o-parquet-parser/3.46.0.7/water/parser/parquet/ext/DecimalUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package water.parser.parquet.ext;
import java.nio.ByteBuffer;
import java.math.BigInteger;
import java.math.BigDecimal;
import static java.lang.Math.pow;
import org.apache.parquet.io.api.Binary;
/*
*
* Note: this code 1-1 copy of https://github.com/apache/parquet-mr/blob/master/parquet-pig/src/main/java/org/apache/parquet/pig/convert/DecimalUtils.java
* All credit goes to original Parquet contributors
*
* Conversion between Parquet Decimal Type to Java BigDecimal in Pig
* Code Based on the Apache Spark ParquetRowConverter.scala
*/
public class DecimalUtils {
public static BigDecimal binaryToDecimal(Binary value, int precision, int scale) {
/*
* Precision <= 18 checks for the max number of digits for an unscaled long,
* else treat with big integer conversion
*/
if (precision <= 18) {
ByteBuffer buffer = value.toByteBuffer();
byte[] bytes = buffer.array();
int start = buffer.arrayOffset() + buffer.position();
int end = buffer.arrayOffset() + buffer.limit();
long unscaled = 0L;
int i = start;
while ( i < end ) {
unscaled = ( unscaled << 8 | bytes[i] & 0xff );
i++;
}
int bits = 8*(end - start);
long unscaledNew = (unscaled << (64 - bits)) >> (64 - bits);
if (unscaledNew <= -pow(10,18) || unscaledNew >= pow(10,18)) {
return new BigDecimal(unscaledNew);
} else {
return BigDecimal.valueOf(unscaledNew / pow(10,scale));
}
} else {
return new BigDecimal(new BigInteger(value.getBytes()), scale);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-gcs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-gcs/3.46.0.7/water/persist/GcsBlob.java
|
package water.persist;
import com.google.cloud.storage.Blob;
import com.google.cloud.storage.BlobId;
import com.google.cloud.storage.BlobInfo;
import water.Key;
import water.api.FSIOException;
import water.fvec.Vec;
import java.net.URI;
import java.util.Arrays;
class GcsBlob {
static final String KEY_PREFIX = "gs://";
private static final int KEY_PREFIX_LENGTH = KEY_PREFIX.length();
private final String canonical;
private final BlobId blobId;
private final Key key;
private GcsBlob(String bucket, String key) {
canonical = toCanonical(bucket, key);
blobId = BlobId.of(bucket, key);
this.key = Key.make(canonical);
}
static GcsBlob of(String bucket, String key) {
return new GcsBlob(bucket, key);
}
static GcsBlob of(String s) {
final String canonical = toCanonical(s);
final String[] bk = canonical.substring(KEY_PREFIX_LENGTH).split("/", 2);
if (bk.length == 2) {
return GcsBlob.of(bk[0], bk[1]);
} else {
throw new FSIOException(s, "Cannot parse blob name");
}
}
static GcsBlob of(URI uri) {
return GcsBlob.of(uri.toString());
}
static GcsBlob of(Key k) {
final String s = new String((k._kb[0] == Key.CHK) ? Arrays.copyOfRange(k._kb, Vec.KEY_PREFIX_LEN, k._kb.length) : k._kb);
return GcsBlob.of(s);
}
static GcsBlob of(BlobId blobId) {
return GcsBlob.of(blobId.getBucket(), blobId.getName());
}
static GcsBlob of(Blob blob) {
return GcsBlob.of(blob.getBlobId());
}
String getCanonical() {
return canonical;
}
BlobId getBlobId() {
return blobId;
}
BlobInfo getBlobInfo() {
return BlobInfo.newBuilder(blobId).build();
}
Key getKey() {
return key;
}
static String toCanonical(String s) {
if (s.startsWith(KEY_PREFIX)) {
return s;
} else if (s.startsWith("/")) {
return KEY_PREFIX + s.substring(1);
} else {
return KEY_PREFIX + s;
}
}
private static String toCanonical(String bucket, String key) {
return KEY_PREFIX + bucket + '/' + key;
}
static String removePrefix(String s) {
if (s.startsWith(KEY_PREFIX)) {
return s.substring(KEY_PREFIX_LENGTH);
} else if (s.startsWith("/")) {
return s.substring(1);
} else {
return s;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-gcs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-gcs/3.46.0.7/water/persist/GcsStorageProvider.java
|
package water.persist;
import com.google.cloud.storage.Storage;
import com.google.cloud.storage.StorageOptions;
/**
* A class wrapping {@link Storage}, enabling safe lazy initialization by only providing getStorage method, not risking for
* developers to access storage field directly.
*/
final class GcsStorageProvider {
private Storage storage;
/**
* Returns an existing instance of {@link Storage} or creates a new one, if not initialized.
* Lazy-initialization of storage does not slow down startup of H2O (attempts are made to connect to GCS).
* The connection status and {@link com.google.auth.Credentials} are checked at actual request-time.
*
* @return An instance of {@link Storage}, if initialized
*/
protected Storage getStorage() {
if (storage == null) {
storage = StorageOptions.getDefaultInstance().getService();
}
return storage;
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-gcs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-gcs/3.46.0.7/water/persist/PersistGcs.java
|
package water.persist;
import com.google.cloud.ReadChannel;
import com.google.cloud.WriteChannel;
import com.google.cloud.storage.*;
import com.google.cloud.storage.Storage.BucketField;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import water.H2O;
import water.Key;
import water.MemoryManager;
import water.Value;
import water.api.FSIOException;
import water.fvec.FileVec;
import water.fvec.GcsFileVec;
import water.util.Log;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
* Persistence backend for GCS
*/
@SuppressWarnings("unused")
public final class PersistGcs extends Persist {
private GcsStorageProvider storageProvider = new GcsStorageProvider();
@Override
public byte[] load(final Value v) throws IOException {
final BlobId blobId = GcsBlob.of(v._key).getBlobId();
final byte[] contentBytes = MemoryManager.malloc1(v._max);
final ByteBuffer wrappingBuffer = ByteBuffer.wrap(contentBytes);
final Key k = v._key;
long offset = 0;
// Skip offset based on chunk number
if (k._kb[0] == Key.CHK) {
offset = FileVec.chunkOffset(k); // The offset
}
final ReadChannel reader = storageProvider.getStorage().reader(blobId);
reader.seek(offset);
reader.read(wrappingBuffer);
return contentBytes;
}
@Override
public Key uriToKey(URI uri) throws IOException {
final GcsBlob blob = GcsBlob.of(uri);
final Long contentSize = storageProvider.getStorage().get(blob.getBlobId()).getSize();
return GcsFileVec.make(blob.getCanonical(), contentSize);
}
@Override
public void store(Value v) throws IOException {
if (!v._key.home()) return;
final byte payload[] = v.memOrLoad();
final GcsBlob blob = GcsBlob.of(v._key);
Log.debug("Storing: " + blob.toString());
final ByteBuffer buffer = ByteBuffer.wrap(payload);
storageProvider.getStorage().create(blob.getBlobInfo()).writer().write(buffer);
}
@Override
public void delete(Value v) {
final BlobId blobId = GcsBlob.of(v._key).getBlobId();
Log.debug("Deleting: " + blobId.toString());
storageProvider.getStorage().get(blobId).delete();
}
@Override
public void cleanUp() {
throw H2O.unimpl();
}
private final LoadingCache<String, List<String>> keyCache = CacheBuilder.newBuilder()
.maximumSize(1000)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build(new CacheLoader<String, List<String>>() {
@Override
public List<String> load(String key) {
final List<String> blobs = new ArrayList<>();
for (Blob b : storageProvider.getStorage().get(key).list().iterateAll()) {
blobs.add(b.getName());
}
return blobs;
}
});
private final LoadingCache<Object, List<String>> bucketCache = CacheBuilder.newBuilder()
.maximumSize(1000)
.expireAfterWrite(1, TimeUnit.MINUTES)
.build(new CacheLoader<Object, List<String>>() {
@Override
public List<String> load(Object key) {
final List<String> fileNames = new ArrayList<>();
for (Bucket b : storageProvider.getStorage().list().iterateAll()) {
fileNames.add(b.getName());
}
return fileNames;
}
});
@Override
public List<String> calcTypeaheadMatches(String filter, int limit) {
final String input = GcsBlob.removePrefix(filter);
final String[] bk = input.split("/", 2);
List<String> results = limit > 0 ? new ArrayList<String>(limit) : new ArrayList<String>();
try {
if (bk.length == 1) {
List<String> buckets = bucketCache.get("all");
for (String s : buckets) {
results.add(GcsBlob.KEY_PREFIX + s);
if (--limit == 0) {
break;
}
}
} else if (bk.length == 2) {
List<String> objects = keyCache.get(bk[0]);
for (String s : objects) {
if (s.startsWith(bk[1])) {
results.add(GcsBlob.KEY_PREFIX + bk[0] + "/" + s);
}
if (--limit == 0) {
break;
}
}
}
} catch (ExecutionException e) {
Log.err(e);
}
return results;
}
@Override
public void importFiles(String path,
String pattern,
ArrayList<String> files,
ArrayList<String> keys,
ArrayList<String> fails,
ArrayList<String> dels) {
// bk[0] is bucket name, bk[1] is file name - file name is optional.
final String bk[] = GcsBlob.removePrefix(path).split("/", 2);
if (bk.length < 2) {
parseBucket(bk[0], files, keys, fails);
} else {
try {
Iterable<Blob> values = storageProvider.getStorage().list(bk[0], Storage.BlobListOption.prefix(bk[1])).getValues();
values.forEach(blob -> {
final String blobPath = "gs://" + blob.getBucket() + "/" + blob.getName();
final Key k = GcsFileVec.make(blobPath, blob.getSize());
keys.add(k.toString());
files.add(blobPath);
}
);
} catch (Throwable t) {
Log.err(t);
fails.add(path);
}
}
}
private void parseBucket(String bucketId,
ArrayList<String> files,
ArrayList<String> keys,
ArrayList<String> fails) {
final Bucket bucket = storageProvider.getStorage().get(bucketId);
for (Blob blob : bucket.list().iterateAll()) {
final GcsBlob gcsBlob = GcsBlob.of(blob.getBlobId());
Log.debug("Importing: " + gcsBlob.toString());
try {
final Key k = GcsFileVec.make(gcsBlob.getCanonical(), blob.getSize());
keys.add(k.toString());
files.add(gcsBlob.getCanonical());
} catch (Throwable t) {
Log.err(t);
fails.add(gcsBlob.getCanonical());
}
}
}
@Override
public InputStream open(final String path) {
final GcsBlob gcsBlob = GcsBlob.of(path);
Log.debug("Opening: " + gcsBlob.toString());
final Blob blob = storageProvider.getStorage().get(gcsBlob.getBlobId());
return new InputStream() {
final ReadChannel reader = blob.reader();
@Override
public int read() throws IOException {
// very naive version with reading byte by byte
try {
ByteBuffer bytes = ByteBuffer.wrap(MemoryManager.malloc1(1));
int numRed = reader.read(bytes);
if (numRed == 0) return -1;
return bytes.get(0);
} catch (IOException e) {
throw new FSIOException(path, e);
}
}
@Override
public int read(byte bytes[], int off, int len) throws IOException {
Objects.requireNonNull(bytes);
if (off < 0 || len < 0 || len > bytes.length - off) {
throw new IndexOutOfBoundsException("Length of byte array is " + bytes.length + ". Offset is " + off
+ " and length is " + len);
} else if (len == 0) {
return 0;
}
final ByteBuffer buffer = ByteBuffer.wrap(bytes, off, len);
return reader.read(buffer);
}
@Override
public int available() throws IOException {
return 1;
}
@Override
public void close() throws IOException {
reader.close();
}
};
}
@Override
public OutputStream create(String path, boolean overwrite) {
final GcsBlob gcsBlob = GcsBlob.of(path);
Log.debug("Creating: " + gcsBlob.getCanonical());
final WriteChannel writer = storageProvider.getStorage().create(gcsBlob.getBlobInfo()).writer();
return new OutputStream() {
@Override
public void write(int b) throws IOException {
ByteBuffer buffer = ByteBuffer.wrap(new byte[]{(byte) b});
writer.write(buffer);
}
@Override
public void write(byte[] b) throws IOException {
ByteBuffer buffer = ByteBuffer.wrap(b);
writer.write(buffer);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
ByteBuffer buffer = ByteBuffer.wrap(b, off, len);
writer.write(buffer);
}
@Override
public void close() throws IOException {
writer.close();
}
};
}
@Override
public boolean rename(String fromPath, String toPath) {
final BlobId fromBlob = GcsBlob.of(fromPath).getBlobId();
final BlobId toBlob = GcsBlob.of(toPath).getBlobId();
storageProvider.getStorage().get(fromBlob).copyTo(toBlob);
keyCache.invalidate(fromBlob.getBucket());
keyCache.invalidate(toBlob.getBucket());
return storageProvider.getStorage().delete(fromBlob);
}
private String[] split(String path) {
return GcsBlob.removePrefix(path).split("/", 2);
}
@Override
public boolean exists(String path) {
final String bk[] = split(path);
if (bk.length == 1) {
return storageProvider.getStorage().get(bk[0]).exists();
} else if (bk.length == 2) {
Blob blob = storageProvider.getStorage().get(bk[0], bk[1]);
return blob != null && blob.exists();
} else {
return false;
}
}
@Override
public boolean isDirectory(String path) {
final String bk[] = split(path);
return bk.length == 1;
}
@Override
public String getParent(String path) {
final String bk[] = split(path);
if (bk.length > 0) {
return bk[0];
} else {
return null;
}
}
@Override
public boolean delete(String path) {
final BlobId blob = GcsBlob.of(path).getBlobId();
keyCache.invalidate(blob.getBucket());
return storageProvider.getStorage().get(blob).delete();
}
@Override
public long length(String path) {
final BlobId blob = GcsBlob.of(path).getBlobId();
return storageProvider.getStorage().get(blob).getSize();
}
/**
* Lists Blobs prefixed with `path`.
* Prefix `path` is removed from the name of returned entries.
* e.g.
* If `path` equals gs://bucket/infix and 2 Blobs exist: "gs://bucket/infix/blob1, gs://bucket/infix/blob2,
* the returned array contains of Persist Entries with names set to blob1 and blob2, respectively.
*/
@Override
public PersistEntry[] list(String path) {
final String bk[] = split(path);
int substrLen = bk.length == 2 ? bk[1].length() : 0;
List<PersistEntry> results = new ArrayList<>();
try {
for (Blob b : storageProvider.getStorage().list(bk[0]).iterateAll()) {
if (bk.length == 1 || (bk.length == 2 && b.getName().startsWith(bk[1]))) {
String relativeName = b.getName().substring(substrLen);
if (relativeName.startsWith("/")) {
relativeName = relativeName.substring(1);
}
results.add(new PersistEntry(relativeName, b.getSize(), b.getUpdateTime()));
}
}
} catch (StorageException e) {
Log.err(e);
}
return results.toArray(new PersistEntry[results.size()]);
}
@Override
public boolean mkdirs(String path) {
try {
final String bk[] = split(path);
if (bk.length > 0) {
Bucket b = storageProvider.getStorage().get(bk[0]);
if (b == null || !b.exists()) {
storageProvider.getStorage().create(BucketInfo.of(bk[0]));
}
return true;
} else {
return false;
}
} catch (StorageException e) {
Log.err(e);
return false;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water/persist/PersistHdfs.java
|
package water.persist;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import java.io.*;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import water.*;
import water.api.HDFSIOException;
import water.fvec.HDFSFileVec;
import water.fvec.Vec;
import water.util.FileUtils;
import water.util.Log;
import static water.fvec.FileVec.getPathForKey;
/**
* HDFS persistence layer.
*/
public final class PersistHdfs extends Persist {
private static final String[] _customS3ASecurityProviders = {
"hex://water.persist.H2OCredentialProviderFactory"
};
/** Globally shared HDFS configuration. */
public static final Configuration CONF;
/** Root path of HDFS */
private final Path _iceRoot;
private static final List<S3ATokenRefresherFactory> _refreshers = new LinkedList<>();
/**
* Filter out hidden files/directories (dot files, eg.: .crc).
* Note: This implementation differs from the filter used in Hadoop MR: we do not skip underscore-prefixed files.
* We already have another filter that takes care of on zero-length files (underscore files are typically empty anyway
* eg.: _SUCCESS)
*/
private static final PathFilter HIDDEN_FILE_FILTER = p -> ! p.getName().startsWith(".");
// Global HDFS initialization
static {
final Configuration conf = new Configuration();
if (H2O.ARGS.hdfs_config != null && H2O.ARGS.hdfs_config.length > 0) {
for (String config : H2O.ARGS.hdfs_config) {
File p = new File(config);
if (!p.exists())
H2O.die("Unable to open hdfs configuration file " + p.getAbsolutePath());
conf.addResource(new Path(p.getAbsolutePath()));
Log.debug("resource ", p.getAbsolutePath(), " added to the hadoop configuration");
}
} else {
Path confDir = null;
// Try to guess location of default Hadoop configuration
// http://www.slideshare.net/martyhall/hadoop-tutorial-hdfs-part-3-java-api
// WARNING: loading of default properties should be disabled if the job
// is executed via yarn command which prepends core-site.xml properties on classpath
if (System.getenv().containsKey("HADOOP_CONF_DIR")) {
confDir = new Path(System.getenv("HADOOP_CONF_DIR"));
} else if (System.getenv().containsKey("YARN_CONF_DIR")) {
confDir = new Path(System.getenv("YARN_CONF_DIR"));
} else if (System.getenv().containsKey("HADOOP_HOME")) {
confDir = new Path(System.getenv("HADOOP_HOME"), "conf");
}
// Load default HDFS configuration
if (confDir != null) {
Log.info("Using HDFS configuration from " + confDir);
conf.addResource(new Path(confDir, "core-site.xml"));
} else {
Log.debug("Cannot find HADOOP_CONF_DIR or YARN_CONF_DIR - default HDFS properties are NOT loaded!");
}
}
// inject our custom S3 credentials providers for use with S3A
injectS3ASecurityCredentialProviders(conf);
// add manually passed configuration
configureFromProperties(conf, H2O.ARGS.hadoop_properties);
CONF = conf;
}
static void configureFromProperties(Configuration conf, Properties props) {
for (Object propertyKey : Collections.list(props.keys())) {
String propertyValue = props.getProperty((String) propertyKey);
conf.set((String) propertyKey, propertyValue);
}
}
static void injectS3ASecurityCredentialProviders(Configuration conf) {
final Collection<String> providers = conf.getStringCollection("fs.s3a.security.credential.provider.path");
String[] merged = new String[providers.size() + _customS3ASecurityProviders.length];
int i = 0;
for (String provider : providers) {
merged[i++] = provider;
}
for (String provider : _customS3ASecurityProviders) {
merged[i++] = provider;
}
assert i == merged.length;
conf.setStrings("fs.s3a.security.credential.provider.path", merged);
}
// Loading HDFS files
@SuppressWarnings("unused") // called via reflection
public PersistHdfs() { _iceRoot = null; }
public void cleanUp() { throw H2O.unimpl(); /* user-mode swapping not implemented */}
@Override public byte[] load(final Value v) {
//
// !!! WARNING !!!
//
// tomk: Sun Apr 19 13:11:51 PDT 2015
//
//
// This load implementation behaved *HORRIBLY* with S3 when the libraries were updated.
// Behaves well (and is the same set of libraries as H2O-1):
// org.apache.hadoop:hadoop-client:2.0.0-cdh4.3.0
// net.java.dev.jets3t:jets3t:0.6.1
//
// Behaves abysmally:
// org.apache.hadoop:hadoop-client:2.5.0-cdh5.2.0
// net.java.dev.jets3t:jets3t:0.9.2
//
//
// I did some debugging.
//
// What happens in the new libraries is the connection type is a streaming connection, and
// the entire file gets read on close() even if you only wanted to read a chunk. The result
// is the same data gets read over and over again by the underlying transport layer even
// though H2O only thinks it's asking for (and receiving) each piece of data once.
//
// I suspect this has something to do with the 'Range' HTTP header on the GET, but I'm not
// entirely sure. Many layers of library need to be fought through to really figure it out.
//
// Anyway, this will need to be rewritten from the perspective of how to properly use the
// new library version. Might make sense to go to straight to 's3a' which is a replacement
// for 's3n'.
//
assert v.isPersisted();
final Key k = v._key;
final long skip = k.isChunkKey() ? water.fvec.NFSFileVec.chunkOffset(k) : 0;
return load(k, skip, v._max);
}
@Override
public byte[] load(final Key k, final long skip, final int max) {
final Path p = _iceRoot == null?new Path(getPathForKey(k)):new Path(_iceRoot, getIceName(k));
return load(p, skip, max);
}
private byte[] load(final Path p, final long skip, final int max) {
//
// !!! WARNING !!!
//
// tomk: Sun Apr 19 13:11:51 PDT 2015
//
//
// This load implementation behaved *HORRIBLY* with S3 when the libraries were updated.
// Behaves well (and is the same set of libraries as H2O-1):
// org.apache.hadoop:hadoop-client:2.0.0-cdh4.3.0
// net.java.dev.jets3t:jets3t:0.6.1
//
// Behaves abysmally:
// org.apache.hadoop:hadoop-client:2.5.0-cdh5.2.0
// net.java.dev.jets3t:jets3t:0.9.2
//
//
// I did some debugging.
//
// What happens in the new libraries is the connection type is a streaming connection, and
// the entire file gets read on close() even if you only wanted to read a chunk. The result
// is the same data gets read over and over again by the underlying transport layer even
// though H2O only thinks it's asking for (and receiving) each piece of data once.
//
// I suspect this has something to do with the 'Range' HTTP header on the GET, but I'm not
// entirely sure. Many layers of library need to be fought through to really figure it out.
//
// Anyway, this will need to be rewritten from the perspective of how to properly use the
// new library version. Might make sense to go to straight to 's3a' which is a replacement
// for 's3n'.
//
long start = System.currentTimeMillis();
final byte[] b = MemoryManager.malloc1(max);
run(() -> {
FileSystem fs = FileSystem.get(p.toUri(), CONF);
FSDataInputStream s = null;
try {
s = fs.open(p);
s.seek(skip);
s.readFully(b);
} finally {
if (s != null) {
FileUtils.close(s.getWrappedStream());
FileUtils.closeSilently(s);
}
}
return null;
});
long end = System.currentTimeMillis();
if (end-start > 1000) // Only log read that took over 1 second to complete
Log.debug("Slow Read: "+(end-start)+" millis to get bytes "+skip +"-"+(skip+b.length)+" in HDFS read.");
return b;
}
@Override public void store(Value v) {
// Should be used only if ice goes to HDFS
assert this == H2O.getPM().getIce();
assert !v.isPersisted();
byte[] m = v.memOrLoad();
assert (m == null || m.length == v._max); // Assert not saving partial files
store(new Path(_iceRoot, getIceName(v)), m);
}
private static void store(final Path path, final byte[] data) {
run(() -> {
FileSystem fs = getFileSystem(path, true);
fs.mkdirs(path.getParent());
try (FSDataOutputStream s = fs.create(path)) {
s.write(data);
}
return null;
});
}
@Override public void delete(final Value v) {
assert this == H2O.getPM().getIce();
assert !v.isPersisted(); // Upper layers already cleared out
run(() -> {
Path p = new Path(_iceRoot, getIceName(v));
FileSystem fs = getFileSystem(p, true);
fs.delete(p, true);
return null;
});
}
private static void run(Callable<?> c) {
while( true ) {
try {
c.call();
break;
// Explicitly ignore the following exceptions but
// fail on the rest IOExceptions
} catch( EOFException e ) {
ignoreAndWait(e, true);
} catch( SocketTimeoutException e ) {
ignoreAndWait(e, false);
} catch( IOException e ) {
// Newer versions of Hadoop derive S3Exception from IOException
ignoreAndWait(e, e.getClass().getName().contains("S3Exception"));
} catch( RuntimeException e ) {
// Older versions of Hadoop derive S3Exception from RuntimeException
if (e.getClass().getName().contains("S3Exception")) {
ignoreAndWait(e, false);
} else {
throw Log.throwErr(e);
}
} catch( Exception e ) {
throw Log.throwErr(e);
}
}
}
private static void ignoreAndWait(final Exception e, boolean printException) {
Log.ignore(e, "Hit HDFS reset problem, retrying...", printException);
try {
Thread.sleep(500);
} catch( InterruptedException ie ) {}
}
public static void addFolder(Path p, ArrayList<String> keys,ArrayList<String> failed) throws IOException, RuntimeException {
FileSystem fs = getFileSystem(p, false);
if(!fs.exists(p)){
failed.add("Path does not exist: '" + p.toString() + "'");
return;
}
addFolder(fs, p, keys, failed);
}
private static void addFolder(FileSystem fs, Path p, ArrayList<String> keys, ArrayList<String> failed) {
if (fs == null) return;
Futures futures = new Futures();
try {
for( FileStatus file : fs.listStatus(p, HIDDEN_FILE_FILTER) ) {
Path pfs = file.getPath();
if(file.isDirectory()) {
addFolder(fs, pfs, keys, failed);
} else if (file.getLen() > 0){
Key<?> k = HDFSFileVec.make(pfs.toString(), file.getLen(), futures);
keys.add(k.toString());
Log.debug("PersistHdfs: DKV.put(" + k + ")");
}
}
} catch( Exception e ) {
Log.err(e);
failed.add(p.toString());
} finally {
futures.blockForPending();
}
}
@Override
public Key uriToKey(URI uri) throws IOException {
assert "hdfs".equals(uri.getScheme()) || "s3".equals(uri.getScheme())
|| "s3n".equals(uri.getScheme()) || "s3a".equals(uri.getScheme()) : "Expected hdfs, s3 s3n, or s3a scheme, but uri is " + uri;
Path path = new Path(uri);
FileSystem fs = getFileSystem(path, false);
FileStatus[] fstatus = fs.listStatus(path);
assert fstatus.length == 1 : "Expected uri to single file, but uri is " + uri;
return HDFSFileVec.make(fstatus[0].getPath().toString(), fstatus[0].getLen());
}
// Is there a bucket name without a trailing "/" ?
private boolean isBareS3NBucketWithoutTrailingSlash(String s) {
String s2 = s.toLowerCase();
Matcher m = Pattern.compile("s3n://[^/]*").matcher(s2);
return m.matches();
}
// // We don't handle HDFS style S3 storage, just native storage. But all users
// // don't know about HDFS style S3 so treat S3 as a request for a native file
// private static final String convertS3toS3N(String s) {
// if (Pattern.compile("^s3[a]?://.*").matcher(s).matches())
// return s.replaceFirst("^s3[a]?://", "s3n://");
// else return s;
// }
@Override
public ArrayList<String> calcTypeaheadMatches(String filter, int limit) {
// Handle S3N bare buckets - s3n://bucketname should be suffixed by '/'
// or underlying Jets3n will throw NPE. filter name should be s3n://bucketname/
if (isBareS3NBucketWithoutTrailingSlash(filter)) {
filter += "/";
}
// Output matches
ArrayList<String> array = new ArrayList<>();
{
// Filter out partials which are known to print out useless stack traces.
String s = filter.toLowerCase();
if ("hdfs:".equals(s)) return array;
if ("maprfs:".equals(s)) return array;
}
try {
Path p = new Path(filter);
Path expand = p;
if( !filter.endsWith("/") ) expand = p.getParent();
FileSystem fs = getFileSystem(p, false);
for( FileStatus file : fs.listStatus(expand) ) {
Path fp = file.getPath();
if( fp.toString().startsWith(p.toString()) ) {
array.add(fp.toString());
}
if( array.size() == limit) break;
}
} catch (Exception e) {
Log.trace(e);
} catch (Throwable t) {
Log.warn(t);
}
return array;
}
@Override
public void importFiles(String path, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) {
// path = convertS3toS3N(path);
// Fix for S3 kind of URL
if (isBareS3NBucketWithoutTrailingSlash(path)) {
path += "/";
}
Log.info("ImportHDFS processing (" + path + ")");
// List of processed files
try {
// Recursively import given file/folder
addFolder(new Path(path), keys, fails);
files.addAll(keys);
// write barrier was here : DKV.write_barrier();
} catch (IOException e) {
throw new HDFSIOException(path, PersistHdfs.CONF.toString(), e);
}
}
// -------------------------------
// Node Persistent Storage helpers
// -------------------------------
@Override
public String getHomeDirectory() {
try {
FileSystem fs = FileSystem.get(CONF);
return fs.getHomeDirectory().toString();
}
catch (Exception e) {
return null;
}
}
@Override
public PersistEntry[] list(String path) {
try {
Path p = new Path(path);
FileSystem fs = getFileSystem(p, false);
FileStatus[] arr1 = fs.listStatus(p);
PersistEntry[] arr2 = new PersistEntry[arr1.length];
for (int i = 0; i < arr1.length; i++) {
arr2[i] = new PersistEntry(arr1[i].getPath().getName(), arr1[i].getLen(), arr1[i].getModificationTime());
}
return arr2;
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
@Override
public boolean exists(String path) {
Path p = new Path(path);
try {
FileSystem fs = getFileSystem(p, false);
return fs.exists(p);
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
@Override
public String getParent(String path) {
Path p = new Path(path);
return p.getParent().toUri().toString();
}
@Override
public boolean isDirectory(String path) {
Path p = new Path(path);
try {
FileSystem fs = getFileSystem(p, false);
return fs.isDirectory(p);
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
@Override
public long length(String path) {
Path p = new Path(path);
try {
FileSystem fs = getFileSystem(p, false);
return fs.getFileStatus(p).getLen();
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
@Override
public InputStream open(String path) {
return openSeekable(path);
}
@Override
public InputStream openSeekable(String path) {
Path p = new Path(path);
try {
FileSystem fs = getFileSystem(p, false);
return fs.open(p);
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
public InputStream wrapSeekable(Vec vec) {
return new FSDataInputStream(new VecDataInputStream(vec, true));
}
public boolean isSeekableOpenSupported() {
return true;
}
@Override
public boolean mkdirs(String path) {
Path p = new Path(path);
try {
FileSystem fs = getFileSystem(p, false);
// Be consistent with Java API and File#mkdirs
if (fs.exists(p)) {
return false;
} else {
return fs.mkdirs(p);
}
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
@Override
public boolean rename(String fromPath, String toPath) {
Path f = new Path(fromPath);
Path t = new Path(toPath);
try {
FileSystem fs = getFileSystem(f, false);
return fs.rename(f, t);
}
catch (IOException e) {
throw new HDFSIOException(toPath, CONF.toString(), e);
}
}
@Override
public OutputStream create(String path, boolean overwrite) {
Path p = new Path(path);
try {
FileSystem fs = getFileSystem(p, false);
return fs.create(p, overwrite);
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
@Override
public boolean delete(String path) {
Path p = new Path(path);
try {
FileSystem fs = getFileSystem(p, false);
return fs.delete(p, true);
}
catch (IOException e) {
throw new HDFSIOException(path, CONF.toString(), e);
}
}
@Override
public boolean canHandle(String path) {
URI uri = new Path(path).toUri();
try {
// Skip undefined scheme
return uri.getScheme() != null && FileSystem.getFileSystemClass(uri.getScheme(), CONF) != null;
} catch (IOException e) {
return false;
}
}
/**
* Retrieves FileSystem instance - the sole purpose of this method is to make sure delegation tokens
* are acquired before we attempt to get FileSystem instance.
*
* @param path hdfs path
* @param assumeTokensAcquired set to true if this function is called for retrieving data from DKV
* in this case we assume that a higher-level method already acquired the tokens
* @return FileSystem instance
* @throws IOException ouch...
*/
private static FileSystem getFileSystem(Path path, boolean assumeTokensAcquired) throws IOException {
if (! assumeTokensAcquired && _refreshers.size() > 0) {
for (S3ATokenRefresherFactory refresherFactory : _refreshers) {
boolean handled = refresherFactory.startDelegationTokenRefresher(path);
if (handled)
break;
}
}
return FileSystem.get(path.toUri(), CONF);
}
public static void registerRefresherFactory(S3ATokenRefresherFactory refresherFactory) {
_refreshers.add(refresherFactory);
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water/persist/S3AClientFactory.java
|
package water.persist;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import water.H2O;
import water.util.ReflectionUtils;
import java.io.IOException;
import java.net.URI;
public class S3AClientFactory implements S3ClientFactory {
private static final String PROP_S3A_FACTORY_PROTOTYPE_URI = "persist.s3a.factoryPrototypeUri";
@Override
public <T> T getOrMakeClient(String bucket, Object configuration) {
if (configuration != null && !(configuration instanceof Configuration)) {
throw new IllegalArgumentException("Configuration not instance of org.apache.hadoop.conf.Configuration");
}
Configuration hadoopConf = configuration != null ? (Configuration) configuration : PersistHdfs.CONF;
try {
String path = bucket != null ? "s3a://" + bucket + "/" : getDefaultPrototypeUri();
FileSystem fs = getFileSystem(URI.create(path), hadoopConf);
if (fs instanceof S3AFileSystem) {
return ReflectionUtils.getFieldValue(fs, "s3");
} else {
throw new IllegalStateException("File system corresponding to schema s3a is not an instance of S3AFileSystem, " +
"it is " + (fs != null ? fs.getClass().getName() : "undefined") + ".");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static String getDefaultPrototypeUri() {
return H2O.getSysProperty(PROP_S3A_FACTORY_PROTOTYPE_URI, "s3a://www.h2o.ai/");
}
protected FileSystem getFileSystem(URI uri, Configuration conf) throws IOException {
return FileSystem.get(uri, conf);
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water/persist/S3ATokenRefresherFactory.java
|
package water.persist;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import water.H2O;
import water.Paxos;
import water.persist.security.HdfsDelegationTokenRefresher;
import water.util.Log;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
public class S3ATokenRefresherFactory {
private static final String H2O_DYNAMIC_AUTH_S3A_TOKEN_REFRESHER_ENABLED = "h2o.auth.dynamicS3ATokenRefresher.enabled";
private final Configuration conf;
private final String tmpDir;
private final Set<String> bucketsWithDelegationToken = Collections.synchronizedSet(new HashSet<>());
private final Object GENERATION_LOCK = new Object();
S3ATokenRefresherFactory(Configuration conf, String tmpDir) {
this.conf = conf;
this.tmpDir = tmpDir;
}
/**
* Starts a delegation token refresher if given path is compatible with this refresher.
*
* @param p path
* @return flag indicating whether the path was handled or if we need to try another refresher to handle this path
* @throws IOException
*/
boolean startDelegationTokenRefresher(Path p) throws IOException {
if (Paxos._cloudLocked && H2O.CLOUD.leader() != H2O.SELF) {
// fast path - cloud already locked, and I am not the leader, give up - only the cloud leader is allowed to refresh the tokens
return false; // not handled (we didn't touch the path even)
}
final URI uri = p.toUri();
if (!"s3a".equalsIgnoreCase(uri.getScheme())) {
// only S3A needs to generate delegation token
if (Log.isLoggingFor(Log.DEBUG)) {
Log.debug("Delegation token refresh is only needed for s3a, requested URI: " + uri);
}
return false; // not handled, different from s3a
}
// Important make sure the cloud is locked in order to guarantee that the leader will distribute credentials
// to all nodes and don't do refresh only for itself (which can happen if cloud is not yet locked)
Paxos.lockCloud("S3A Token Refresh");
if (H2O.CLOUD.leader() != H2O.SELF) {
// we are not a leader node in a locked cloud, give up
return true; // handled (by the leader - assumed, not checked)
}
synchronized (GENERATION_LOCK) {
if (isInBucketWithAlreadyExistingToken(uri)) {
return true;
}
final String bucketIdentifier = p.toUri().getHost();
HdfsDelegationTokenRefresher.setup(conf, tmpDir, p.toString());
Log.debug("Bucket added to bucketsWithDelegationToken: '" + bucketIdentifier + "'");
bucketsWithDelegationToken.add(bucketIdentifier);
}
return true; // handled by us
}
private boolean isInBucketWithAlreadyExistingToken(URI uri) {
return bucketsWithDelegationToken.contains(uri.getHost());
}
public static S3ATokenRefresherFactory make(Configuration conf, String tmpDir) {
if (conf == null ||
!conf.getBoolean(H2O_DYNAMIC_AUTH_S3A_TOKEN_REFRESHER_ENABLED, false)) {
return null;
}
return new S3ATokenRefresherFactory(conf, tmpDir);
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water/persist/VecDataInputStream.java
|
package water.persist;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import water.DKV;
import water.Key;
import water.Value;
import water.fvec.Chunk;
import water.fvec.Vec;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
/**
* Seekable and PositionedReadable implementation of InputStream backed by a Vec data source.
*/
public class VecDataInputStream extends InputStream implements Seekable, PositionedReadable {
private static final byte[] EMPTY_BUFFER = new byte[0];
private final Vec _v;
private final boolean _transient;
private byte[] _buffer;
private long _offset;
private int _pos;
public VecDataInputStream(Vec v) {
this(v, false);
}
public VecDataInputStream(Vec v, boolean trans) {
this._v = v;
this._transient = trans;
flushBuffer(0);
}
private int buffAvailable() {
return _buffer.length - _pos;
}
private long globAvailable() {
return _v.length() - (_offset + _pos);
}
private void fetchData(long position) {
Chunk chk = _v.chunkForRow(position);
if (_transient) {
Key<?> k = _v.chunkKey(chk.cidx());
if (!k.home()) { // free the cache on a non-home node (this is used to prevent leader-node overload when reading preview data)
Value v = DKV.get(k);
if (v != null)
v.freeMem();
}
}
_buffer = chk.asBytes();
_offset = chk.start();
_pos = (int) (position - _offset);
assert _buffer.length > 0;
}
private void flushBuffer(long position) {
_buffer = EMPTY_BUFFER;
_pos = 0;
_offset = position;
}
@Override
public int read() throws IOException {
if (buffAvailable() <= 0) {
if (globAvailable() <= 0L) {
return -1;
}
fetchData(_offset + _pos);
}
return _buffer[_pos++] & 0xff;
}
@Override
public int read(byte[] buffer, int offset, int length) throws IOException {
int read = read(_offset + _pos, buffer, offset, length);
if (read == -1) {
flushBuffer(_offset + _pos);
} else {
int skipped = (int) skip(read);
assert skipped == read;
}
return read;
}
@Override
public long skip(long n) throws IOException {
if (n == 0L) {
return 0L;
}
long target = _offset + _pos + n;
if (inBuffer(target)) {
seekInBuffer(target);
} else {
if (target > _v.length()) {
n -= target - _v.length();
target = _v.length();
}
flushBuffer(target);
}
return n;
}
@Override
public int read(final long position, byte[] buffer, int offset, int length) throws IOException {
int loaded = 0;
long currentPosition = position;
while ((loaded < length) && (currentPosition < _v.length())) {
byte[] buff;
int pos;
if (inBuffer(currentPosition)) {
buff = _buffer;
pos = (int) (currentPosition - _offset);
} else {
Chunk chunk = _v.chunkForRow(currentPosition);
buff = chunk.asBytes();
pos = (int) (currentPosition - chunk.start());
}
int avail = Math.min(buff.length - pos, length - loaded);
System.arraycopy(buff, pos, buffer, offset + loaded, avail);
loaded += avail;
currentPosition += avail;
}
if ((loaded == 0) && (currentPosition == _v.length()))
return -1;
else
return loaded;
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
int loaded = read(position, buffer, offset, length);
if (loaded != length) {
throw new EOFException("Reached the end of the Vec while reading into buffer.");
}
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
readFully(position, buffer, 0, buffer.length);
}
@Override
public void seek(long position) throws IOException {
if (inBuffer(position)) {
seekInBuffer(position);
} else {
flushBuffer(position);
}
}
private void seekInBuffer(long position) {
_pos = (int) (position - _offset);
}
private boolean inBuffer(long position) {
return (position >= _offset) && (position < _offset + _buffer.length);
}
@Override
public long getPos() throws IOException {
return _offset + _pos;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
throw new UnsupportedOperationException("Intentionally not implemented");
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water/persist/VecFileSystem.java
|
package water.persist;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import water.DKV;
import water.fvec.Vec;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
/**
* Virtual implementation of a Hadoop FileSystem backed by a Vec.
* Instances of this class provide read-only access to a single provided Vec.
* The Vec instance is injected using a ContextAwareConfiguration - the Vec object constitutes the context in this case.
*/
public class VecFileSystem extends FileSystem {
private static final String KEY_PROP = "fs.hex.vec.key";
public static Path VEC_PATH = new Path("hex:/vec");
private Vec _v;
@Override
@SuppressWarnings("unchecked")
public void initialize(URI name, Configuration conf) throws IOException {
String keyStr = conf.get(KEY_PROP);
if (keyStr == null) {
throw new IllegalArgumentException("Configuration needs to a reference to a Vec (set property 'fs.hex.vec.key').");
}
_v = DKV.getGet(keyStr);
super.initialize(name, conf);
}
@Override public FileStatus getFileStatus(Path p) throws IOException {
if (VEC_PATH.equals(p)) {
return new FileStatus(_v.length(),false,1,_v.length()/_v.nChunks(),0l, VecFileSystem.VEC_PATH);
} else
throw new FileNotFoundException("File does not exist: " + p);
}
@Override
public URI getUri() {
return URI.create("hex:/");
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (! f.equals(VEC_PATH)) {
throw new IllegalArgumentException("Invalid path specified, expected " + VEC_PATH);
}
return new FSDataInputStream(new VecDataInputStream(_v));
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
throw new UnsupportedOperationException("This is a virtual file system backed by a single Vec, 'create' not supported!");
}
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
throw new UnsupportedOperationException("This is a virtual file system backed by a single Vec, 'append' not supported!");
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
throw new UnsupportedOperationException("This is a virtual file system backed by a single Vec, 'rename' not supported!");
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
throw new UnsupportedOperationException("This is a virtual file system backed by a single Vec, 'delete' not supported!");
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
if (VEC_PATH.equals(f)) {
return new FileStatus[]{getFileStatus(f)};
} else
return new FileStatus[0];
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
throw new UnsupportedOperationException("This is a virtual file system backed by a single Vec, 'mkdirs' not supported!");
}
@Override
public void setWorkingDirectory(Path newDir) {
}
@Override
public Path getWorkingDirectory() {
return null;
}
public static Configuration makeConfiguration(Vec v) {
Configuration conf = new Configuration(false);
conf.setBoolean("fs.hex.impl.disable.cache", true);
conf.setClass("fs.hex.impl", VecFileSystem.class, FileSystem.class);
conf.set("fs.hex.vec.key", v._key.toString());
return conf;
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water/persist
|
java-sources/ai/h2o/h2o-persist-hdfs/3.46.0.7/water/persist/security/HdfsDelegationTokenRefresher.java
|
package water.persist.security;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import water.H2O;
import water.MRTask;
import water.Paxos;
import water.persist.PersistHdfs;
import water.util.BinaryFileTransfer;
import water.util.FileUtils;
import java.io.*;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class HdfsDelegationTokenRefresher implements Runnable {
public static final String H2O_AUTH_USER = "h2o.auth.user";
public static final String H2O_AUTH_PRINCIPAL = "h2o.auth.principal";
public static final String H2O_AUTH_KEYTAB = "h2o.auth.keytab";
public static final String H2O_AUTH_TOKEN_REFRESHER_ENABLED = "h2o.auth.tokenRefresher.enabled";
public static final String H2O_AUTH_TOKEN_REFRESHER_INTERVAL_RATIO = "h2o.auth.tokenRefresher.intervalRatio";
public static final String H2O_AUTH_TOKEN_REFRESHER_MAX_ATTEMPTS = "h2o.auth.tokenRefresher.maxAttempts";
public static final String H2O_AUTH_TOKEN_REFRESHER_RETRY_DELAY_SECS = "h2o.auth.tokenRefresher.retryDelaySecs";
public static final String H2O_AUTH_TOKEN_REFRESHER_FALLBACK_INTERVAL_SECS = "h2o.auth.tokenRefresher.fallbackIntervalSecs";
public static final String H2O_DYNAMIC_AUTH_S3A_TOKEN_REFRESHER_ENABLED = "h2o.auth.dynamicS3ATokenRefresher.enabled";
public static void setup(Configuration conf, String tmpDir, String uri) throws IOException {
boolean enabled = conf.getBoolean(H2O_AUTH_TOKEN_REFRESHER_ENABLED, false) || conf.getBoolean(H2O_DYNAMIC_AUTH_S3A_TOKEN_REFRESHER_ENABLED, false);
if (!enabled) {
log("HDFS Token renewal is not enabled in configuration", null);
return;
}
String authUser = conf.get(H2O_AUTH_USER);
String authPrincipal = conf.get(H2O_AUTH_PRINCIPAL);
if (authPrincipal == null) {
log("Principal not provided, HDFS tokens will not be refreshed by H2O and their lifespan will be limited", null);
return;
}
String authKeytab = conf.get(H2O_AUTH_KEYTAB);
if (authKeytab == null) {
log("Keytab not provided, HDFS tokens will not be refreshed by H2O and their lifespan will be limited", null);
return;
}
String authKeytabPath = writeKeytabToFile(authKeytab, tmpDir);
startRefresher(conf, authPrincipal, authKeytabPath, authUser, uri);
}
static void startRefresher(Configuration conf, String authPrincipal, String authKeytabPath, String authUser, String uri) {
new HdfsDelegationTokenRefresher(conf, authPrincipal, authKeytabPath, authUser, uri).start();
}
public static void startRefresher(Configuration conf,
String authPrincipal, String authKeytabPath, long renewalIntervalSecs) {
new HdfsDelegationTokenRefresher(conf, authPrincipal, authKeytabPath, null).start(renewalIntervalSecs);
}
private static String writeKeytabToFile(String authKeytab, String tmpDir) throws IOException {
FileUtils.makeSureDirExists(tmpDir);
File keytabFile = new File(tmpDir, "hdfs_auth_keytab");
byte[] byteArr = BinaryFileTransfer.convertStringToByteArr(authKeytab);
BinaryFileTransfer.writeBinaryFile(keytabFile.getAbsolutePath(), byteArr);
return keytabFile.getAbsolutePath();
}
// executor is only non-NULL when this node still believes it can refresh credentials (eg: it is a leader node or cloud was not locked yet)
private volatile ScheduledExecutorService _executor;
private final String _authPrincipal;
private final String _authKeytabPath;
private final String _authUser;
private final double _intervalRatio;
private final int _maxAttempts;
private final int _retryDelaySecs;
private final long _fallbackIntervalSecs;
private final String _uri;
public HdfsDelegationTokenRefresher(
Configuration conf,
String authPrincipal,
String authKeytabPath,
String authUser
) {
this(conf, authPrincipal, authKeytabPath, authUser, null);
}
public HdfsDelegationTokenRefresher(
Configuration conf,
String authPrincipal,
String authKeytabPath,
String authUser,
String uri
) {
_authPrincipal = authPrincipal;
_authKeytabPath = authKeytabPath;
_authUser = authUser;
_intervalRatio = Double.parseDouble(conf.get(H2O_AUTH_TOKEN_REFRESHER_INTERVAL_RATIO, "0.4"));
_maxAttempts = conf.getInt(H2O_AUTH_TOKEN_REFRESHER_MAX_ATTEMPTS, 12);
_retryDelaySecs = conf.getInt(H2O_AUTH_TOKEN_REFRESHER_RETRY_DELAY_SECS, 10);
_fallbackIntervalSecs = conf.getInt(H2O_AUTH_TOKEN_REFRESHER_FALLBACK_INTERVAL_SECS, 12 * 3600); // 12h
_uri = uri;
_executor = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(true).setNameFormat(getThreadNameFormatForRefresher(
conf.getBoolean(H2O_DYNAMIC_AUTH_S3A_TOKEN_REFRESHER_ENABLED, false),
uri)).build());
}
private String getThreadNameFormatForRefresher(boolean isDynamicS3ATokenRefresherEnabled, String uri) {
if (isDynamicS3ATokenRefresherEnabled && uri != null) {
String bucketIdentifier = new Path(uri).toUri().getHost();
return "s3a-hdfs-token-refresher-" + bucketIdentifier + "-%d";
} else {
return "hdfs-token-refresher-%d";
}
}
void start() {
long renewalIntervalSecs = autodetectRenewalInterval();
start(renewalIntervalSecs);
}
void start(long renewalIntervalSecs) {
if (renewalIntervalSecs <= 0) {
throw new IllegalArgumentException("Renewal interval needs to be a positive number, got " + renewalIntervalSecs);
}
boolean keepRefreshing = doRefresh();
if (keepRefreshing) {
// note: _executor cannot be modified concurrently - at this stage everything is strictly sequential
assert _executor != null : "Executor is undefined even though we were asked to keep refreshing credentials";
_executor.scheduleAtFixedRate(this, renewalIntervalSecs, renewalIntervalSecs, TimeUnit.SECONDS);
} else {
log("Node " + H2O.SELF + " will not be participating in delegation token refresh.", null);
}
}
private long autodetectRenewalInterval() {
final long actualIntervalSecs;
long intervalSecs = 0L;
try {
intervalSecs = getTokenRenewalIntervalSecs(loginAuthUser());
} catch (IOException | InterruptedException e) {
log("Encountered error while trying to determine token renewal interval.", e);
}
if (intervalSecs == 0L) {
actualIntervalSecs = _fallbackIntervalSecs;
log("Token renewal interval was not determined, will use " + _fallbackIntervalSecs + "s.", null);
} else {
actualIntervalSecs = (long) (intervalSecs * _intervalRatio);
log("Determined token renewal interval = " + intervalSecs + "s. " +
"Using actual interval = " + actualIntervalSecs + "s (ratio=" + _intervalRatio + ").", null);
}
return actualIntervalSecs;
}
private static void log(String s, Exception e) {
System.out.println("HDFS TOKEN REFRESH: " + s);
if (e != null) {
e.printStackTrace(System.out);
}
}
@Override
public void run() {
boolean keepRefreshing = doRefresh();
if (!keepRefreshing) {
log("Cloud is already locked, non-leader node " + H2O.SELF + " will no longer refresh delegation tokens.", null);
}
}
private boolean doRefresh() {
if (Paxos._cloudLocked && !(H2O.CLOUD.leader() == H2O.SELF)) {
// cloud is formed the leader will take of subsequent refreshes
if (_executor != null) {
// only shutdown once
final ScheduledExecutorService executor;
synchronized(this) {
executor = _executor;
_executor = null;
}
if (executor != null) {
executor.shutdown();
}
}
return false;
}
for (int i = 0; i < _maxAttempts; i++) {
try {
Credentials creds = refreshTokens(loginAuthUser());
distribute(creds);
return true;
} catch (IOException | InterruptedException e) {
log("Failed to refresh token (attempt " + i + " out of " + _maxAttempts + "). Will retry in " + _retryDelaySecs + "s.", e);
}
try {
Thread.sleep(_retryDelaySecs * 1000L);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
return true;
}
private Credentials refreshTokens(UserGroupInformation tokenUser) throws IOException, InterruptedException {
return tokenUser.doAs((PrivilegedExceptionAction<Credentials>) () -> {
Credentials creds = new Credentials();
Token<?>[] tokens = fetchDelegationTokens(getRenewer(), creds, _uri);
log("Fetched delegation tokens: " + Arrays.toString(tokens), null);
return creds;
});
}
private String getRenewer() {
return _authUser != null ? _authUser : _authPrincipal;
}
private UserGroupInformation loginAuthUser() throws IOException {
log("Log in from keytab as " + _authPrincipal, null);
UserGroupInformation realUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI(_authPrincipal, _authKeytabPath);
UserGroupInformation tokenUser = realUser;
if (_authUser != null) {
log("Impersonate " + _authUser, null);
// attempt to impersonate token user, this verifies that the real-user is able to impersonate tokenUser
tokenUser = UserGroupInformation.createProxyUser(_authUser, tokenUser);
}
return tokenUser;
}
private long getTokenRenewalIntervalSecs(UserGroupInformation tokenUser) throws IOException, InterruptedException {
Credentials creds = refreshTokens(tokenUser);
long intervalMillis = tokenUser.doAs((PrivilegedExceptionAction<Long>) () ->
creds.getAllTokens()
.stream()
.map(token -> {
try {
long expiresAt = token.renew(PersistHdfs.CONF);
long issuedAt = 0;
TokenIdentifier ident = token.decodeIdentifier();
if (ident instanceof AbstractDelegationTokenIdentifier) {
issuedAt = ((AbstractDelegationTokenIdentifier) ident).getIssueDate();
}
return expiresAt - (issuedAt > 0 ? issuedAt : System.currentTimeMillis());
} catch (InterruptedException | IOException e) {
log("Failed to determine token expiration for token " + token, e);
return Long.MAX_VALUE;
}
}).min(Long::compareTo).orElse(Long.MAX_VALUE)
);
return intervalMillis > 0 && intervalMillis < Long.MAX_VALUE ?
intervalMillis / 1000 : 0L;
}
private static Token<?>[] fetchDelegationTokens(String renewer, Credentials credentials, String uri) throws IOException {
if (uri != null) {
log("Fetching a delegation token for not-null uri: '" + uri, null);
return FileSystem.get(URI.create(uri), PersistHdfs.CONF).addDelegationTokens(renewer, credentials);
} else {
return FileSystem.get(PersistHdfs.CONF).addDelegationTokens(renewer, credentials);
}
}
private void distribute(Credentials creds) throws IOException {
DistributeCreds distributeTask = new DistributeCreds(creds);
if (!Paxos._cloudLocked) {
// skip token distribution in pre-cloud forming phase, only use credentials locally
distributeTask.setupLocal();
} else {
distributeTask.doAllNodes();
}
}
private static class DistributeCreds extends MRTask<DistributeCreds> {
private final byte[] _credsSerialized;
private DistributeCreds(Credentials creds) throws IOException {
_credsSerialized = serializeCreds(creds);
}
@Override
protected void setupLocal() {
try {
Credentials creds = deserialize();
log("Updating credentials", null);
UserGroupInformation.getCurrentUser().addCredentials(creds);
} catch (IOException e) {
log("Failed to update credentials", e);
}
}
private Credentials deserialize() throws IOException {
ByteArrayInputStream tokensBuf = new ByteArrayInputStream(_credsSerialized);
Credentials creds = new Credentials();
creds.readTokenStorageStream(new DataInputStream(tokensBuf));
return creds;
}
private static byte[] serializeCreds(Credentials creds) throws IOException {
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(byteStream);
creds.writeTokenStorageToStream(dataStream);
return byteStream.toByteArray();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-http/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-http/3.46.0.7/water/persist/PersistHTTP.java
|
package water.persist;
import org.apache.http.Header;
import org.apache.http.HttpHeaders;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.log4j.Logger;
import water.Key;
import water.MemoryManager;
import water.Value;
import water.fvec.FileVec;
import water.fvec.HTTPFileVec;
import water.fvec.Vec;
import water.util.ByteStreams;
import water.util.HttpResponseStatus;
import water.util.Log;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import static water.H2O.OptArgs.SYSTEM_PROP_PREFIX;
/**
* Implementation of the Persist interface for HTTP/HTTPS data sources
* Only subset of the API is supported.
*/
public class PersistHTTP extends PersistEagerHTTP {
private static final Logger LOG = Logger.getLogger(PersistHTTP.class);
private static final String ENABLE_LAZY_LOAD_KEY = SYSTEM_PROP_PREFIX + "persist.http.enableLazyLoad";
private static final Set<String> COMPRESSED_CONTENT_TYPES = new HashSet<>(Arrays.asList(
"application/zip",
"application/gzip"
)); // only need to list the ones H2O actually supports
@Override
public final byte[] load(Value v) throws IOException {
final byte[] b = MemoryManager.malloc1(v._max);
final Key k = v._key;
final long offset = (k._kb[0] == Key.CHK) ? FileVec.chunkOffset(k) : 0L;
URI source = decodeKey(k);
HttpRequestBase req = createReq(source, false);
String rangeHeader = "bytes=" + offset + "-" + (offset+v._max-1);
req.setHeader(HttpHeaders.RANGE, rangeHeader);
LOG.debug("Loading " + rangeHeader + " from " + source);
try (CloseableHttpClient client = HttpClientBuilder.create().build();
CloseableHttpResponse response = client.execute(req)) {
if (response.getStatusLine().getStatusCode() != HttpResponseStatus.PARTIAL_CONTENT.getCode()) {
throw new IllegalStateException("Expected to retrieve a partial content response (status: " + response.getStatusLine() + ").");
}
if (readContentLength(response) != v._max) {
throw new IllegalStateException("Received incorrect amount of data (expected: " + v._max + "B," +
" received: " + response.getEntity().getContentLength() + "B).");
}
try (InputStream s = response.getEntity().getContent()) {
ByteStreams.readFully(s, b);
}
}
return b;
}
static long readContentLength(HttpResponse response) {
long len = response.getEntity().getContentLength();
if (len >= 0)
return len;
final Header contentRange = response.getFirstHeader(HttpHeaders.CONTENT_RANGE);
try {
return parseContentRangeLength(contentRange);
} catch (Exception e) {
throw new IllegalStateException("Unable to determine response length: " + contentRange, e);
}
}
private static long parseContentRangeLength(Header contentRange) {
if (contentRange == null || contentRange.getValue() == null)
throw new IllegalStateException("Range not available");
if (!contentRange.getValue().startsWith("bytes"))
throw new IllegalStateException("Only 'bytes' range is supported: " + contentRange);
String value = contentRange.getValue().substring("bytes".length()).trim();
String[] crParts = value.split("/");
if (crParts.length != 2)
throw new IllegalStateException("Invalid HTTP response. Cannot parse header " + HttpHeaders.CONTENT_RANGE + ": " + contentRange.getValue());
String[] range = crParts[0].split("-");
if (range.length != 2)
throw new IllegalStateException("Invalid HTTP response. Cannot interpret range value in response header " + HttpHeaders.CONTENT_RANGE + ": " + contentRange.getValue());
return 1 + Long.parseLong(range[1]) - Long.parseLong(range[0]);
}
private static URI decodeKey(Key k) {
return URI.create(new String((k._kb[0] == Key.CHK) ? Arrays.copyOfRange(k._kb, Vec.KEY_PREFIX_LEN, k._kb.length) : k._kb));
}
long useLazyLoad(URI uri) throws IOException {
HttpRequestBase req = createReq(uri, true);
try (CloseableHttpClient client = HttpClientBuilder.create().build();
CloseableHttpResponse response = client.execute(req)) {
if (isCompressed(response))
return -1L; // avoid lazy-loading of compressed resource that cannot be parsed in parallel
return checkRangeSupport(uri, response);
}
}
static boolean isCompressed(HttpResponse response) {
Header contentTypeHeader = response.getFirstHeader(HttpHeaders.CONTENT_TYPE);
if (contentTypeHeader == null)
return false; // assume not compressed
String contentType = contentTypeHeader.getValue();
if (contentType == null)
return false;
return COMPRESSED_CONTENT_TYPES.contains(contentType.toLowerCase());
}
/**
* Tests whether a given URI can be accessed using range-requests.
*
* @param uri resource identifier
* @param response HttpResponse retrieved by accessing the given uri
* @return -1 if range-requests are not supported, otherwise content length of the requested resource
*/
long checkRangeSupport(URI uri, HttpResponse response) {
Header acceptRangesHeader = response.getFirstHeader(HttpHeaders.ACCEPT_RANGES);
Header contentLengthHeader = response.getFirstHeader(HttpHeaders.CONTENT_LENGTH);
boolean acceptByteRange = (acceptRangesHeader != null) && "bytes".equalsIgnoreCase(acceptRangesHeader.getValue());
if (!acceptByteRange || contentLengthHeader == null) {
LOG.debug(uri + " does not support range header");
return -1L;
}
LOG.debug("Range support confirmed for " + uri + " with length " + contentLengthHeader.getValue());
return Long.parseLong(contentLengthHeader.getValue());
}
private HttpRequestBase createReq(URI uri, boolean isHead) {
HttpRequestBase req = isHead ? new HttpHead(uri) : new HttpGet(uri);
req.setHeader(HttpHeaders.ACCEPT_ENCODING, "identity");
return req;
}
@Override
public void importFiles(String path, String pattern,
/*OUT*/ ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) {
boolean lazyLoadEnabled = Boolean.parseBoolean(System.getProperty(ENABLE_LAZY_LOAD_KEY, "true"));
if (lazyLoadEnabled) {
long length = -1L;
try {
URI source = URI.create(path);
length = useLazyLoad(source);
} catch (Exception e) {
Log.debug("Failed to detect range support for " + path, e);
}
if (length >= 0) {
final Key<?> destination_key = HTTPFileVec.make(path, length);
files.add(path);
keys.add(destination_key.toString());
return;
}
} else
Log.debug("HTTP lazy load disabled by user.");
// Fallback - load the key eagerly if range-requests are not supported
super.importFiles(path, pattern, files, keys, fails, dels);
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water/persist/AstS3GeneratePresignedURL.java
|
package water.persist;
import water.H2O;
import water.rapids.Val;
import water.rapids.ast.AstBuiltin;
import water.rapids.ast.prims.misc.AstSetProperty;
import water.rapids.vals.ValStr;
import java.net.URI;
import java.net.URL;
import java.time.Instant;
import java.util.Date;
/**
* Exposes S3 resources as a pre-signed URL in a Rapids expression.
*
* Note: this currently doesn't have other practical use other than for debugging.
* It could be a useful workaround in cases where PersistS3 fails and provide a viable
* alternative for users to get data in their clusters.
*/
public class AstS3GeneratePresignedURL extends AstBuiltin<AstSetProperty> {
@Override
public String[] args() {
return new String[]{"path", "duration_millis"};
}
@Override
public int nargs() {
return 1 + 2;
} // (s3.generate.presigned.URL path duration_millis)
@Override
public String str() {
return "s3.generate.presigned.URL";
}
@Override
protected ValStr exec(Val[] args) {
final String path = args[1].getStr();
final long durationMillis = (long) args[2].getNum();
Persist persist = H2O.getPM().getPersistForURI(URI.create(path));
if (!(persist instanceof PersistS3)) {
throw new IllegalArgumentException("Path '" + path + "' cannot be handled by PersistS3.");
}
Date expiration = new Date(Instant.now().toEpochMilli() + durationMillis);
URL presignedURL = ((PersistS3) persist).generatePresignedUrl(path, expiration);
return new ValStr(presignedURL.toString());
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water/persist/H2OCredentialProviderFactory.java
|
package water.persist;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import water.util.Log;
import java.net.URI;
import java.util.Collections;
import java.util.List;
public class H2OCredentialProviderFactory extends CredentialProviderFactory {
@Override
public CredentialProvider createProvider(URI uri, Configuration configuration) {
if ("hex".equals(uri.getScheme()) && getClass().getName().equals(uri.getHost()))
return new H2OCredentialProvider(new PersistS3.H2OAWSCredentialsProviderChain());
else
return null;
}
static class H2OCredentialProvider extends CredentialProvider {
private final AWSCredentialsProvider _awsCredentialsProvider;
public H2OCredentialProvider(AWSCredentialsProvider awsCredentialsProvider) {
_awsCredentialsProvider = awsCredentialsProvider;
}
@Override
public void flush() {
// nothing to do
}
@Override
public CredentialEntry getCredentialEntry(String s) {
try {
if ("fs.s3a.access.key".equals(s)) {
AWSCredentials credentials = _awsCredentialsProvider.getCredentials();
return new H2OCredentialEntry("fs.s3a.access.key", credentials.getAWSAccessKeyId().toCharArray());
} else if ("fs.s3a.secret.key".equals(s)) {
AWSCredentials credentials = _awsCredentialsProvider.getCredentials();
return new H2OCredentialEntry("fs.s3a.secret.key", credentials.getAWSSecretKey().toCharArray());
}
} catch (Exception e) {
Log.warn("Failed to retrieve '" + s + "' using the H2O built-in credentials chain.");
}
return null;
}
@Override
public List<String> getAliases() {
return Collections.emptyList();
}
@Override
public CredentialEntry createCredentialEntry(String s, char[] chars) {
throw new UnsupportedOperationException("AWS Credentials are read-only: unable to create new entry");
}
@Override
public void deleteCredentialEntry(String s) {
throw new UnsupportedOperationException("AWS Credentials are read-only: unable to delete an entry");
}
}
static class H2OCredentialEntry extends CredentialProvider.CredentialEntry {
protected H2OCredentialEntry(String alias, char[] credential) {
super(alias, credential);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water/persist/IcedS3Credentials.java
|
package water.persist;
import com.amazonaws.auth.AWSCredentials;
import water.Iced;
/**
* Amazon S3 Credentials wrapper
*/
public class IcedS3Credentials extends Iced<IcedS3Credentials> {
public static final String S3_CREDENTIALS_DKV_KEY = "S3_CREDENTIALS_KEY";
final String _accessKeyId;
final String _secretAccessKey;
final String _sessionToken;
/**
* @param accessKeyId AWS Credentials access key id.
* @param secretAccessKey AWS Credentials secret access key.
* @param sessionToken AWS Session token - only for authorization with session tokens - might be null.
*/
public IcedS3Credentials(final String accessKeyId, final String secretAccessKey,
final String sessionToken) {
_accessKeyId = accessKeyId;
_secretAccessKey = secretAccessKey;
_sessionToken = sessionToken;
}
public IcedS3Credentials(final AWSCredentials credentials) {
this(credentials.getAWSAccessKeyId(), credentials.getAWSSecretKey(), null);
}
public boolean isAWSSessionTokenAuth() {
return _sessionToken != null && _secretAccessKey != null && _accessKeyId != null;
}
public boolean isAWSCredentialsAuth() {
return _sessionToken == null // Session token must be set to null in order to use AWS Credentials auth
&& _secretAccessKey != null && _accessKeyId != null;
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water/persist/PersistS3.java
|
package water.persist;
import com.amazonaws.*;
import com.amazonaws.auth.*;
import com.amazonaws.regions.RegionUtils;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.S3ClientOptions;
import com.amazonaws.services.s3.model.*;
import org.apache.log4j.Logger;
import water.*;
import water.fvec.FileVec;
import water.fvec.S3FileVec;
import water.fvec.Vec;
import water.util.ByteStreams;
import water.util.StringUtils;
import water.util.ReflectionUtils;
import water.util.ArrayUtils;
import water.util.Log;
import java.io.*;
import java.net.URI;
import java.net.URL;
import java.util.*;
import static water.H2O.OptArgs.SYSTEM_PROP_PREFIX;
/** Persistence backend for S3 */
public final class PersistS3 extends Persist {
private static final Logger LOG = Logger.getLogger(PersistS3.class);
private static final String KEY_PREFIX = "s3://";
private static final int KEY_PREFIX_LEN = KEY_PREFIX.length();
private static final Object _lock = new Object();
private static volatile S3ClientFactory _s3Factory;
// for unit testing
static void setClientFactory(S3ClientFactory factory) {
_s3Factory = factory;
}
static S3ClientFactory getS3ClientFactory() {
return _s3Factory;
}
static AmazonS3 getClient(String bucket, H2O.OptArgs args, Object configuration) {
if (_s3Factory == null) {
String factoryClassName = System.getProperty(S3_CLIENT_FACTORY_CLASS);
if (args.configure_s3_using_s3a) {
if (factoryClassName == null) {
factoryClassName = S3_CLIENT_FACTORY_CLASS_DEFAULT;
} else
Log.warn("Option configure_s3_using_s3a was given alongside System property S3_CLIENT_FACTORY_CLASS=" +
factoryClassName + ". The system property will take precedence.");
}
synchronized (_lock) {
if (_s3Factory == null) {
if (StringUtils.isNullOrEmpty(factoryClassName)) {
_s3Factory = new DefaultS3ClientFactory(args);
} else {
try {
_s3Factory = ReflectionUtils.newInstance(factoryClassName, S3ClientFactory.class);
} catch (Exception e) {
throw new RuntimeException("Unable to instantiate S3 client factory for class " + factoryClassName + ".", e);
}
}
assert _s3Factory != null;
}
}
}
return _s3Factory.getOrMakeClient(bucket, configuration);
}
private static AmazonS3 getClient(String bucket) {
return getClient(bucket, H2O.ARGS, null);
}
private static AmazonS3 getClient(String[] decodedParts) {
return getClient(decodedParts[0]);
}
private static final class DefaultS3ClientFactory implements S3ClientFactory {
private final AmazonS3 _s3;
public DefaultS3ClientFactory(H2O.OptArgs args) {
_s3 = makeDefaultClient(args);
}
@Override
@SuppressWarnings("unchecked")
public AmazonS3 getOrMakeClient(String bucket, Object configration) {
return _s3;
}
}
static AmazonS3 makeDefaultClient(H2O.OptArgs args) {
try {
H2OAWSCredentialsProviderChain c = new H2OAWSCredentialsProviderChain(args);
c.setReuseLastProvider(false);
ClientConfiguration cc = s3ClientCfg();
return configureClient(new AmazonS3Client(c, cc));
} catch( Throwable e ) {
e.printStackTrace();
String msg = e.getMessage() + "\n" + "Unable to load S3 credentials.";
throw new RuntimeException(msg, e);
}
}
/** Modified version of default credentials provider which includes H2O-specific
* credentials provider.
*/
public static class H2OAWSCredentialsProviderChain extends AWSCredentialsProviderChain {
@SuppressWarnings("unused")
public H2OAWSCredentialsProviderChain() {
this(H2O.ARGS);
}
private H2OAWSCredentialsProviderChain(H2O.OptArgs args) {
super(constructProviderChain(args));
}
static AWSCredentialsProvider[] constructProviderChain(H2O.OptArgs args) {
return constructProviderChain(args, System.getProperty(S3_CUSTOM_CREDENTIALS_PROVIDER_CLASS));
}
static AWSCredentialsProvider[] constructProviderChain(H2O.OptArgs args, String customProviderClassName) {
AWSCredentialsProvider[] defaultProviders = new AWSCredentialsProvider[]{
new H2ODynamicCredentialsProvider(),
new H2OArgCredentialsProvider(args),
new DefaultAWSCredentialsProviderChain() // use constructor instead of getInstance to be compatible with older versions on Hadoop
};
if (customProviderClassName == null) {
return defaultProviders;
}
try {
AWSCredentialsProvider customProvider = ReflectionUtils.newInstance(
customProviderClassName, AWSCredentialsProvider.class);
Log.info("Added custom credentials provider (" + customProviderClassName + ") " +
"to credentials provider chain.");
return ArrayUtils.append(new AWSCredentialsProvider[]{customProvider}, defaultProviders);
} catch (Exception e) {
Log.warn("Skipping invalid credentials provider (" + customProviderClassName + ").", e);
return defaultProviders;
}
}
}
/**
* Holds basic credentials (Secret key ID + Secret access key) pair.
*/
private static final class H2ODynamicCredentialsProvider implements AWSCredentialsProvider {
@Override
public AWSCredentials getCredentials() {
final IcedS3Credentials s3Credentials = DKV.getGet(IcedS3Credentials.S3_CREDENTIALS_DKV_KEY);
if (s3Credentials != null && s3Credentials.isAWSCredentialsAuth()) {
return new BasicAWSCredentials(s3Credentials._accessKeyId, s3Credentials._secretAccessKey);
} else if (s3Credentials != null && s3Credentials.isAWSSessionTokenAuth()) {
return new BasicSessionCredentials(s3Credentials._accessKeyId, s3Credentials._secretAccessKey,
s3Credentials._sessionToken);
} else {
throw new AmazonClientException("No Amazon S3 credentials set directly.");
}
}
@Override
public void refresh() {
// No actions taken on refresh
}
}
/** A simple credentials provider reading file-based credentials from given
* command argument <code>--aws_credentials</code>.
*/
static class H2OArgCredentialsProvider implements AWSCredentialsProvider {
// Default location of the AWS credentials file
public static final String DEFAULT_CREDENTIALS_LOCATION = "AwsCredentials.properties";
private final String _credentialsPath;
public H2OArgCredentialsProvider() {
this(H2O.ARGS);
}
public H2OArgCredentialsProvider(H2O.OptArgs args) {
_credentialsPath = args.aws_credentials != null ? args.aws_credentials : DEFAULT_CREDENTIALS_LOCATION;
}
@Override public AWSCredentials getCredentials() {
File credentials = new File(_credentialsPath);
try {
return new PropertiesCredentials(credentials);
} catch (IOException e) {
LOG.debug(
"Unable to load AWS credentials from file " + credentials +
"; exists? " + credentials.exists() + ", canRead? " + credentials.canRead() +
", size=" + credentials.length() + "; problem: " + e.getMessage());
throw new AmazonClientException(
"PersistS3. Unable to load AWS credentials from file " + credentials + ": " + e.getMessage());
}
}
@Override public void refresh() {}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
/**
* Returns a pre-signed URL for accessing S3 resource.
*
* @param path S3 path
* @param expiration when should the pre-signed URL expire?
* @return pre-signed URL
*/
URL generatePresignedUrl(String path, Date expiration) {
final String[] bk = decodePath(path);
return getClient(bk).generatePresignedUrl(bk[0], bk[1], expiration, HttpMethod.GET);
}
@Override
public boolean exists(String path) {
return list(path).length >= 1;
}
@Override
public PersistEntry[] list(String path) {
final String[] bk = decodePath(path);
ObjectListing objects = getClient(bk).listObjects(bk[0], bk[1]);
final String key = bk[1].endsWith("/") ? bk[1].substring(0, bk[1].length() - 1) : bk[1];
PersistEntry[] entries = objects.getObjectSummaries().stream()
.filter(s -> s.getKey().equals(key) || s.getKey().startsWith(key + "/"))
.map(s -> new PersistEntry(s.getKey(), s.getSize(), s.getLastModified().getTime()))
.toArray(PersistEntry[]::new);
Arrays.sort(entries);
return entries;
}
@Override
public InputStream open(String path) {
String[] bk = decodePath(path);
GetObjectRequest r = new GetObjectRequest(bk[0], bk[1]);
S3Object s3obj = getClient(bk).getObject(r);
return s3obj.getObjectContent();
}
@Override
public OutputStream create(String path, boolean overwrite) {
String[] bk = decodePath(path);
final File tmpFile;
try {
tmpFile = File.createTempFile("h2o-export", ".bin");
tmpFile.deleteOnExit();
} catch (IOException e) {
throw new RuntimeException("Failed to create temporary file for S3 object upload", e);
}
Runnable callback = new PutObjectCallback(getClient(bk), tmpFile, true, bk[0], bk[1]);
try {
return new CallbackFileOutputStream(tmpFile, callback);
} catch (FileNotFoundException e) {
throw new RuntimeException(e); // should never happen
}
}
static class PutObjectCallback implements Runnable {
private final AmazonS3 _client;
private final File _file;
private final boolean _deleteOnDone;
private final String _bucketName;
private final String _key;
public PutObjectCallback(AmazonS3 client, File file, boolean deleteOnDone, String bucketName, String key) {
_client = client;
_file = file;
_deleteOnDone = deleteOnDone;
_bucketName = bucketName;
_key = key;
}
@Override
public void run() {
try {
PutObjectRequest request = new PutObjectRequest(_bucketName, _key, _file);
PutObjectResult result = _client.putObject(request);
Log.info("Object `" + _key + "` uploaded to bucket `" + _bucketName + "`, ETag=`" + result.getETag() + "`.");
} finally {
if (_deleteOnDone) {
boolean deleted = _file.delete();
if (!deleted) {
LOG.warn("Temporary file `" + _file.getAbsolutePath() + "` was not deleted. Please delete manually.");
}
}
}
}
}
static class CallbackFileOutputStream extends FileOutputStream {
private final Object closeLock = new Object();
private volatile boolean closed = false;
private final Runnable callback;
public CallbackFileOutputStream(File file, Runnable callback) throws FileNotFoundException {
super(file);
this.callback = callback;
}
@Override
public void close() throws IOException {
synchronized (closeLock) {
if (closed) {
super.close();
return; // run callback only once
}
closed = true;
}
callback.run();
}
}
@Override
public boolean mkdirs(String path) {
return true; // S3 doesn't really have concept of directories - for our use case we can just ignore it
}
public static Key loadKey(ObjectListing listing, S3ObjectSummary obj) throws IOException {
// Note: Some of S3 implementations does not fill bucketName of returned object (for example, Minio).
// So guess it based on returned ObjectListing
String bucketName = obj.getBucketName() == null ? listing.getBucketName() : obj.getBucketName();
return S3FileVec.make(encodePath(bucketName, obj.getKey()),obj.getSize());
}
private static void processListing(ObjectListing listing, String pattern, ArrayList<String> succ, ArrayList<String> fail, boolean doImport) {
if( pattern != null && pattern.isEmpty()) pattern = null;
for( S3ObjectSummary obj : listing.getObjectSummaries() ) {
if (obj.getKey().endsWith("/")) continue;
if (pattern != null && !obj.getKey().matches(pattern)) continue;
try {
if (doImport) {
Key k = loadKey(listing, obj);
succ.add(k.toString());
} else {
succ.add(obj.getKey());
}
} catch( IOException e ) {
fail.add(obj.getKey());
}
}
}
public void importFiles(String path, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) {
LOG.info("ImportS3 processing (" + path + ")");
// List of processed files
String[] bk = decodePath(path);
AmazonS3 s3 = getClient(bk);
ObjectListing currentList = s3.listObjects(bk[0], bk[1]);
processListing(currentList, pattern, files, fails, true);
while(currentList.isTruncated()){
currentList = s3.listNextBatchOfObjects(currentList);
processListing(currentList, pattern, files, fails, true);
}
keys.addAll(files);
// write barrier was here : DKV.write_barrier();
}
// file implementation -------------------------------------------------------
// Read up to 'len' bytes of Value. Value should already be persisted to
// disk. A racing delete can trigger a failure where we get a null return,
// but no crash (although one could argue that a racing load&delete is a bug
// no matter what).
@Override public byte[] load(Value v) {
long start_io_ms = System.currentTimeMillis();
byte[] b = MemoryManager.malloc1(v._max);
Key k = v._key;
long skip = 0;
// Skip offset based on chunk number
if(k._kb[0] == Key.CHK)
skip = FileVec.chunkOffset(k); // The offset
// Too complicate matters, S3 likes to reset connections when H2O hits it
// too hard. We "fix" this by just trying again, assuming we're getting
// hit with a bogus resource limit (H2O doing a parse looks like a DDOS to
// Amazon S3).
S3ObjectInputStream s = null;
while( true ) { // Loop, in case we get premature EOF's
try {
long start_ns = System.nanoTime(); // Blocking i/o call timing - without counting repeats
s = getObjectForKey(k, skip, v._max).getObjectContent();
ByteStreams.readFully(s, b); // delegate work to Google (it reads the byte buffer in a cycle as we did)
assert v.isPersisted();
// TimeLine.record_IOclose(start_ns, start_io_ms, 1/* read */, v._max, Value.S3);
return b;
// Explicitly ignore the following exceptions but
// fail on the rest IOExceptions
} catch( IOException e ) {
ignoreAndWait(e);
} finally {
try {
if( s != null ) s.close();
} catch( IOException e ) {}
}
}
}
private static void ignoreAndWait(final Exception e) {
LOG.debug("Hit the S3 reset problem, waiting and retrying...", e);
try {
Thread.sleep(500);
} catch( InterruptedException ie ) {}
}
// Store Value v to disk.
@Override public void store(Value v) {
if( !v._key.home() ) return;
throw H2O.unimpl(); // VA only
}
/**
* Creates the key for given S3 bucket and key. Returns the H2O key, or null if the key cannot be
* created.
*
* @param bucket
* Bucket name
* @param key
* Key name (S3)
* @return H2O key pointing to the given bucket and key.
*/
public static Key encodeKey(String bucket, String key) {
Key res = encodeKeyImpl(bucket, key);
// assert checkBijection(res, bucket, key);
return res;
}
/**
* Decodes the given H2O key to the S3 bucket and key name. Returns the array of two strings,
* first one is the bucket name and second one is the key name.
*
* @param k
* Key to be decoded.
* @return Pair (array) of bucket name and key name.
*/
public static String[] decodeKey(Key k) {
return decodeKeyImpl(k);
// assert checkBijection(k, res[0], res[1]);
// return res;
}
// private static boolean checkBijection(Key k, String bucket, String key) {
// Key en = encodeKeyImpl(bucket, key);
// String[] de = decodeKeyImpl(k);
// boolean res = Arrays.equals(k._kb, en._kb) && bucket.equals(de[0]) && key.equals(de[1]);
// assert res : "Bijection failure:" + "\n\tKey 1:" + k + "\n\tKey 2:" + en + "\n\tBkt 1:" + bucket + "\n\tBkt 2:"
// + de[0] + "\n\tStr 1:" + key + "\n\tStr 2:" + de[1] + "";
// return res;
// }
private static String encodePath(String bucket, String key){
return KEY_PREFIX + bucket + '/' + key;
}
private static Key encodeKeyImpl(String bucket, String key) {
return Key.make(KEY_PREFIX + bucket + '/' + key);
}
/**
* Decompose S3 name into bucket name and key name
*
* @param s generic s3 path (e.g., "s3://bucketname/my/directory/file.ext")
* @return array of { bucket name, key }
*/
private static String [] decodePath(String s) {
assert s.startsWith(KEY_PREFIX) && s.indexOf('/') >= 0 : "Attempting to decode non s3 key: " + s;
s = s.substring(KEY_PREFIX_LEN);
int dlm = s.indexOf('/');
if(dlm < 0) return new String[]{s,null};
String bucket = s.substring(0, dlm);
String key = s.substring(dlm + 1);
return new String[] { bucket, key };
}
private static String[] decodeKeyImpl(Key k) {
String s = new String((k._kb[0] == Key.CHK)?Arrays.copyOfRange(k._kb, Vec.KEY_PREFIX_LEN, k._kb.length):k._kb);
return decodePath(s);
}
// Gets the S3 object associated with the key that can read length bytes from offset
private static S3Object getObjectForKey(Key k, long offset, long length) throws IOException {
String[] bk = decodeKey(k);
GetObjectRequest r = new GetObjectRequest(bk[0], bk[1]);
r.setRange(offset, offset + length - 1); // Range is *inclusive* according to docs???
return getClient(bk).getObject(r);
}
/** S3 socket timeout property name */
public final static String S3_SOCKET_TIMEOUT_PROP = SYSTEM_PROP_PREFIX + "persist.s3.socketTimeout";
/** S3 connection timeout property name */
public final static String S3_CONNECTION_TIMEOUT_PROP = SYSTEM_PROP_PREFIX + "persist.s3.connectionTimeout";
/** S3 maximal error retry number */
public final static String S3_MAX_ERROR_RETRY_PROP = SYSTEM_PROP_PREFIX + "persist.s3.maxErrorRetry";
/** S3 maximal http connections */
public final static String S3_MAX_HTTP_CONNECTIONS_PROP = SYSTEM_PROP_PREFIX + "persist.s3.maxHttpConnections";
/** S3 force HTTP traffic */
public final static String S3_FORCE_HTTP = SYSTEM_PROP_PREFIX + "persist.s3.force.http";
/** S3 end-point, for example: "https://localhost:9000 */
public final static String S3_END_POINT = SYSTEM_PROP_PREFIX + "persist.s3.endPoint";
/** S3 region, for example "us-east-1",
* see {@link com.amazonaws.regions.Region#getRegion(com.amazonaws.regions.Regions)} for region list */
public final static String S3_REGION = SYSTEM_PROP_PREFIX + "persist.s3.region";
/** Enable S3 path style access via setting the property to true.
* See: {@link com.amazonaws.services.s3.S3ClientOptions#setPathStyleAccess(boolean)} */
public final static String S3_ENABLE_PATH_STYLE = SYSTEM_PROP_PREFIX + "persist.s3.enable.path.style";
/** Specify custom credentials provider implementation */
public final static String S3_CUSTOM_CREDENTIALS_PROVIDER_CLASS = SYSTEM_PROP_PREFIX + "persist.s3.customCredentialsProviderClass";
/** Specify class name of S3ClientFactory implementation */
public final static String S3_CLIENT_FACTORY_CLASS = SYSTEM_PROP_PREFIX + "persist.s3.clientFactoryClass";
/** Specify class name of S3ClientFactory implementation */
public final static String S3_CLIENT_FACTORY_CLASS_DEFAULT = "water.persist.S3AClientFactory";
static ClientConfiguration s3ClientCfg() {
ClientConfiguration cfg = new ClientConfiguration();
Properties prop = System.getProperties();
if (prop.containsKey(S3_SOCKET_TIMEOUT_PROP)) cfg.setSocketTimeout(Integer.getInteger(S3_SOCKET_TIMEOUT_PROP));
if (prop.containsKey(S3_CONNECTION_TIMEOUT_PROP)) cfg.setConnectionTimeout(Integer.getInteger(S3_CONNECTION_TIMEOUT_PROP));
if (prop.containsKey(S3_MAX_ERROR_RETRY_PROP)) cfg.setMaxErrorRetry(Integer.getInteger(S3_MAX_ERROR_RETRY_PROP));
if (prop.containsKey(S3_MAX_HTTP_CONNECTIONS_PROP)) cfg.setMaxConnections(Integer.getInteger(S3_MAX_HTTP_CONNECTIONS_PROP));
if (prop.containsKey(S3_FORCE_HTTP)) cfg.setProtocol(Protocol.HTTP);
return cfg;
}
static AmazonS3Client configureClient(AmazonS3Client s3Client) {
if (System.getProperty(S3_REGION) != null) {
String region = System.getProperty(S3_REGION);
LOG.debug(String.format("S3 region specified: %s", region) );
s3Client.setRegion(RegionUtils.getRegion(region));
}
// Region overrides end-point settings
if (System.getProperty(S3_END_POINT) != null) {
String endPoint = System.getProperty(S3_END_POINT);
LOG.debug(String.format("S3 endpoint specified: %s", endPoint));
s3Client.setEndpoint(endPoint);
}
if (System.getProperty(S3_ENABLE_PATH_STYLE) != null && Boolean.parseBoolean(System.getProperty(S3_ENABLE_PATH_STYLE))) {
LOG.debug("S3 path style access enabled");
S3ClientOptions sco = S3ClientOptions.builder()
.setPathStyleAccess(true)
.build();
s3Client.setS3ClientOptions(sco);
}
return s3Client;
}
@Override public void delete(Value v) {
throw new UnsupportedOperationException();
}
@Override
public Key uriToKey(URI uri) throws IOException {
String[] bk = decodePath(uri.toString());
AmazonS3 s3 = getClient(bk);
try {
ObjectMetadata om = s3.getObjectMetadata(bk[0], bk[1]);
// Voila: create S3 specific key pointing to the file
return S3FileVec.make(encodePath(bk[0], bk[1]), om.getContentLength());
} catch (AmazonServiceException e) {
if (e.getErrorCode().contains("404")) {
throw new IOException(e);
} else {
LOG.error("AWS failed for " + Arrays.toString(bk) + ": " + e.getMessage());
throw e;
}
}
}
@Override
public void cleanUp() { throw H2O.unimpl(); /** user-mode swapping not implemented */}
static class Cache {
long _lastUpdated = 0;
long _timeoutMillis = 5*60*1000;
String [] _cache = new String[0];
public boolean containsKey(String k) { return Arrays.binarySearch(_cache,k) >= 0;}
protected String [] update(){
LOG.debug("Renewing S3 bucket cache.");
List<Bucket> l = getClient((String) null).listBuckets();
String [] cache = new String[l.size()];
int i = 0;
for (Bucket b : l) cache[i++] = b.getName();
Arrays.sort(cache);
return _cache = cache;
}
protected String wrapKey(String s) {return "s3://" + s;}
public ArrayList<String> fetch(String filter, int limit) {
String [] cache = _cache;
if(System.currentTimeMillis() > _lastUpdated + _timeoutMillis) {
cache = update();
_lastUpdated = System.currentTimeMillis();
}
ArrayList<String> res = new ArrayList<>();
int i = Arrays.binarySearch(cache, filter);
if (i < 0) i = -i - 1;
while (i < cache.length && cache[i].startsWith(filter) && (limit < 0 || res.size() < limit))
res.add(wrapKey(cache[i++]));
return res;
}
}
private static class KeyCache extends Cache {
private final String _keyPrefix;
private final String _bucket;
public KeyCache(String bucket){
_bucket = bucket;
_keyPrefix = super.wrapKey(bucket) + "/";
}
@Override
protected String [] update(){
LOG.debug("Renewing S3 cache.");
AmazonS3 s3 = getClient(_bucket);
ObjectListing currentList = s3.listObjects(_bucket,"");
ArrayList<String> res = new ArrayList<>();
processListing(currentList, null, res, null, false);
while(currentList.isTruncated()){
currentList = s3.listNextBatchOfObjects(currentList);
processListing(currentList, null, res, null, false);
}
Collections.sort(res);
return _cache = res.toArray(new String[res.size()]);
}
@Override
protected String wrapKey(String s) {
return _keyPrefix + s;
}
}
static volatile Cache _bucketCache = new Cache();
static volatile HashMap<String, KeyCache> _keyCaches = new HashMap<>();
@Override
public List<String> calcTypeaheadMatches(String filter, int limit) {
String [] parts = decodePath(filter);
if(parts[1] != null) { // bucket and key prefix
if(_keyCaches.get(parts[0]) == null) {
if(!getClient(parts[0]).doesBucketExist(parts[0]))
return new ArrayList<>();
_keyCaches.put(parts[0], new KeyCache(parts[0]));
}
return _keyCaches.get(parts[0]).fetch(parts[1],limit);
} else { // no key, only bucket prefix
return _bucketCache.fetch(parts[0],limit);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water/persist/PersistS3CredentialsV3.java
|
package water.persist;
import water.Iced;
import water.api.API;
import water.api.schemas3.SchemaV3;
public class PersistS3CredentialsV3 extends SchemaV3<Iced, PersistS3CredentialsV3> {
@API(required = true, direction = API.Direction.INPUT, level = API.Level.secondary, help = "S3 Secret Key ID")
public String secret_key_id;
@API(required = true, direction = API.Direction.INPUT, level = API.Level.secondary, help = "S3 Secret Key")
public String secret_access_key;
@API(required = false, direction = API.Direction.INPUT, level = API.Level.secondary, help = "S3 Session token")
public String session_token;
}
|
0
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water
|
java-sources/ai/h2o/h2o-persist-s3/3.46.0.7/water/persist/PersistS3Handler.java
|
package water.persist;
import water.DKV;
import water.Key;
import water.api.Handler;
import java.util.Objects;
public class PersistS3Handler extends Handler {
public PersistS3CredentialsV3 setS3Credentials(final int version, final PersistS3CredentialsV3 s3Credentials){
validateS3Credentials(s3Credentials);
final IcedS3Credentials icedS3Credentials = new IcedS3Credentials(s3Credentials.secret_key_id, s3Credentials.secret_access_key,
s3Credentials.session_token);
DKV.put(Key.make(IcedS3Credentials.S3_CREDENTIALS_DKV_KEY), icedS3Credentials);
return s3Credentials;
}
public PersistS3CredentialsV3 removeS3Credentials(final int version, final PersistS3CredentialsV3 s3Credentials){
DKV.remove(Key.make(IcedS3Credentials.S3_CREDENTIALS_DKV_KEY));
return s3Credentials;
}
/**
* Checks for basic mistakes users might make when providing S3 credentials.
* @param s3Credentials S3 credentials provided by the user
*/
private void validateS3Credentials(final PersistS3CredentialsV3 s3Credentials){
Objects.requireNonNull(s3Credentials);
if(s3Credentials.secret_key_id == null) throw new IllegalArgumentException("The field 'S3_SECRET_KEY_ID' may not be null.");
if(s3Credentials.secret_access_key == null) throw new IllegalArgumentException("The field 'S3_SECRET_ACCESS_KEY' may not be null.");
s3Credentials.secret_key_id = s3Credentials.secret_key_id.trim();
s3Credentials.secret_access_key = s3Credentials.secret_access_key.trim();
if(s3Credentials.session_token != null) {
s3Credentials.session_token = s3Credentials.session_token.trim();
}
if(s3Credentials.secret_key_id.isEmpty()) throw new IllegalArgumentException("The field 'S3_SECRET_KEY_ID' may not be empty.");
if (s3Credentials.secret_access_key.isEmpty())
throw new IllegalArgumentException("The field 'S3_SECRET_ACCESS_KEY' may not be empty.");
if (s3Credentials.session_token != null && s3Credentials.session_token.isEmpty())
throw new IllegalArgumentException("The field 'S3_SESSION_TOKEN' may not be empty");
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.