index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/DefaultRequestParameterDiscoverer.java
|
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GaUtils.appendSystemProperty;
import static com.brsanthu.googleanalytics.GaUtils.isEmpty;
//import org.slf4j.Logger;
//import org.slf4j.LoggerFactory;
/**
* Default request parameter discoverer. Discovers following parameters.
* <ul>
* <li>Creates User Agent as java/1.6.0_45-b06/Sun Microsystems Inc./Java HotSpot(TM) 64-Bit Server VM/Windows 7/6.1/amd64</li>
* <li>User Language, and Country</li>
* <li>File Encoding</li>
* </ul>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class DefaultRequestParameterDiscoverer implements RequestParameterDiscoverer {
@Override
public DefaultRequest discoverParameters(GoogleAnalyticsConfig config, DefaultRequest request) {
try {
if (isEmpty(config.getUserAgent())) {
config.setUserAgent(getUserAgentString());
}
if (isEmpty(request.userLanguage())) {
String region = System.getProperty("user.region");
if (isEmpty(region)) {
region = System.getProperty("user.country");
}
request.userLanguage(System.getProperty("user.language") + "-" + region);
}
if (isEmpty(request.documentEncoding())) {
request.documentEncoding(System.getProperty("file.encoding"));
}
} catch (Exception e) {
// logger.warn("Exception while deriving the System properties for request " + request, e);
}
return request;
}
protected String getUserAgentString() {
StringBuilder sb = new StringBuilder("java");
appendSystemProperty(sb, "java.runtime.version");
appendSystemProperty(sb, "java.specification.vendor");
appendSystemProperty(sb, "java.vm.name");
appendSystemProperty(sb, "os.name");
appendSystemProperty(sb, "os.version");
appendSystemProperty(sb, "os.arch");
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/EventHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EVENT_ACTION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EVENT_CATEGORY;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EVENT_LABEL;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EVENT_VALUE;
/**
* GA request to track events.
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#events">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class EventHit extends GoogleAnalyticsRequest<EventHit> {
public EventHit() {
this(null, null, null, null);
}
public EventHit(String eventCategory, String eventAction) {
this(eventCategory, eventAction, null, null);
}
public EventHit (String eventCategory, String eventAction, String eventLabel) {
this(eventCategory, eventAction, eventLabel, null);
}
public EventHit (String eventCategory, String eventAction, String eventLabel, Integer eventValue) {
super("event");
eventCategory(eventCategory);
eventAction(eventAction);
eventLabel(eventLabel);
eventValue(eventValue);
}
/**
* <h2 id="events">Event Tracking</h2>
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the event category. Must not be empty.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ec</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>150 Bytes
* </td>
* <td>event</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Category</code><br>
* Example usage: <code>ec=Category</code>
* </div>
* </div>
*/
public EventHit eventCategory(String value) {
setString(EVENT_CATEGORY, value);
return this;
}
public String eventCategory() {
return getString(EVENT_CATEGORY);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the event action. Must not be empty.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ea</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>event</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Action</code><br>
* Example usage: <code>ea=Action</code>
* </div>
* </div>
*/
public EventHit eventAction(String value) {
setString(EVENT_ACTION, value);
return this;
}
public String eventAction() {
return getString(EVENT_ACTION);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the event label.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>el</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>event</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Label</code><br>
* Example usage: <code>el=Label</code>
* </div>
* </div>
*/
public EventHit eventLabel(String value) {
setString(EVENT_LABEL, value);
return this;
}
public String eventLabel() {
return getString(EVENT_LABEL);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the event value. Values must be non-negative.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ev</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>event</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>55</code><br>
* Example usage: <code>ev=55</code>
* </div>
* </div>
*/
public EventHit eventValue(Integer value) {
setInteger(EVENT_VALUE, value);
return this;
}
public Integer eventValue() {
return getInteger(EVENT_VALUE);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/ExceptionHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EXCEPTION_DESCRIPTION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EXCEPTION_FATAL;
/**
* GA request to track exceptions.
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#exception">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class ExceptionHit extends GoogleAnalyticsRequest<ExceptionHit> {
public ExceptionHit() {
this(null);
}
public ExceptionHit(String exceptionDescription) {
this(exceptionDescription, false);
}
public ExceptionHit(String exceptionDescription, Boolean fatal) {
super("exception");
exceptionDescription(exceptionDescription);
exceptionFatal(fatal);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the description of an exception.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>exd</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>150 Bytes
* </td>
* <td>exception</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>DatabaseError</code><br>
* Example usage: <code>exd=DatabaseError</code>
* </div>
* </div>
*/
public ExceptionHit exceptionDescription(String value) {
setString(EXCEPTION_DESCRIPTION, value);
return this;
}
public String exceptionDescription() {
return getString(EXCEPTION_DESCRIPTION);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies whether the exception was fatal.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>exf</code></td>
* <td>boolean</td>
* <td><code>1</code>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>exception</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>0</code><br>
* Example usage: <code>exf=0</code>
* </div>
* </div>
*/
public ExceptionHit exceptionFatal(Boolean value) {
setBoolean(EXCEPTION_FATAL, value);
return this;
}
public Boolean exceptionFatal() {
return getBoolean(EXCEPTION_FATAL);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GaUtils.java
|
package com.brsanthu.googleanalytics;
/**
* A small library for interacting with Google Analytics Measurement Protocol. This
* copy is a back port of version 1.1.2 of the library. This backport removes
* the slf4j dependency, and modifies the code to work with the 4.1 version of the
* Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*
*/
public class GaUtils {
public static boolean isNotEmpty(String value) {
return !isEmpty(value);
}
public static boolean isEmpty(String value) {
return value == null || value.trim().length() == 0;
}
public static StringBuilder appendSystemProperty(StringBuilder sb, String property) {
String value = System.getProperty(property);
if (isNotEmpty(value)) {
if (isNotEmpty(sb.toString())) {
sb.append("/");
}
sb.append(value);
}
return sb;
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GoogleAnalytics.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GaUtils.isEmpty;
import static com.brsanthu.googleanalytics.GaUtils.isNotEmpty;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.http.HttpHost;
import org.apache.http.NameValuePair;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.params.CoreProtocolPNames;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.client.HttpClient;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.util.EntityUtils;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.HttpResponse;
import org.apache.http.conn.params.ConnRoutePNames;
import org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.impl.client.BasicCredentialsProvider;
/**
* This is the main class of this library that accepts the requests from clients and
* sends the events to Google Analytics (GA).
* <p/>
* Clients needs to instantiate this object with {@link GoogleAnalyticsConfig} and {@link DefaultRequest}.
* Configuration contains sensible defaults so one could just initialize using one of the convenience constructors.
* <p/>
* This object is ThreadSafe and it is intended that clients create one instance of this for each GA Tracker Id
* and reuse each time an event needs to be posted.
* <p/>
* This object contains resources which needs to be shutdown/disposed. So {@link #close()} method is called
* to release all resources. Once close method is called, this instance cannot be reused so create new instance
* if required.
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class GoogleAnalytics {
private static final Charset UTF8 = Charset.forName("UTF-8");
private GoogleAnalyticsConfig config = null;
private DefaultRequest defaultRequest = null;
private HttpClient httpClient = null;
private ThreadPoolExecutor executor = null;
private GoogleAnalyticsStats stats = new GoogleAnalyticsStats();
public GoogleAnalytics(String trackingId) {
this(new GoogleAnalyticsConfig(), new DefaultRequest().trackingId(trackingId));
}
public GoogleAnalytics(GoogleAnalyticsConfig config, String trackingId) {
this(config, new DefaultRequest().trackingId(trackingId));
}
public GoogleAnalytics(String trackingId, String appName, String appVersion) {
this(new GoogleAnalyticsConfig(), trackingId, appName, appVersion);
}
public GoogleAnalytics(GoogleAnalyticsConfig config, String trackingId, String appName, String appVersion) {
this(config, new DefaultRequest().trackingId(trackingId).applicationName(appName).applicationVersion(appVersion));
}
public GoogleAnalytics(GoogleAnalyticsConfig config, DefaultRequest defaultRequest) {
if (config.isDiscoverRequestParameters() && config.getRequestParameterDiscoverer() != null) {
config.getRequestParameterDiscoverer().discoverParameters(config, defaultRequest);
}
this.config = config;
this.defaultRequest = defaultRequest;
this.defaultRequest.userAgent(config.getUserAgent());
this.httpClient = createHttpClient(config);
}
public GoogleAnalyticsConfig getConfig() {
return config;
}
public HttpClient getHttpClient() {
return httpClient;
}
public DefaultRequest getDefaultRequest() {
return defaultRequest;
}
public void setDefaultRequest(DefaultRequest request) {
this.defaultRequest = request;
}
public void setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
}
@SuppressWarnings({"rawtypes"})
public GoogleAnalyticsResponse post(GoogleAnalyticsRequest request) {
GoogleAnalyticsResponse response = new GoogleAnalyticsResponse();
if (!config.isEnabled()) {
return response;
}
HttpResponse httpResponse = null;
try {
List<NameValuePair> postParms = new ArrayList<NameValuePair>();
//Process the parameters
processParameters(request, postParms);
//Process custom dimensions
processCustomDimensionParameters(request, postParms);
//Process custom metrics
processCustomMetricParameters(request, postParms);
//logger.debug("GA Processed all parameters and sending the request " + postParms);
HttpPost httpPost = new HttpPost(config.getUrl());
try {
httpPost.setEntity(new UrlEncodedFormEntity(postParms, "UTF-8"));
} catch (UnsupportedEncodingException e) { /*Log.warn("This systems doesn't support UTF-8!");*/ }
try {
httpResponse = httpClient.execute(httpPost);
} catch (ClientProtocolException e) {
//logger.trace("GA connectivity had a problem or the connectivity was aborted. "+e.toString());
} catch (IOException e) {
//logger.trace("GA connectivity suffered a protocol error. "+e.toString());
}
//logger.debug("GA response: " +httpResponse.toString());
response.setStatusCode(httpResponse.getStatusLine().getStatusCode());
response.setPostedParms(postParms);
try {
EntityUtils.consume(httpResponse.getEntity());
} catch (IOException e) {/*consume quietly*/}
if (config.isGatherStats()) {
gatherStats(request);
}
} catch (Exception e) {
if (e instanceof UnknownHostException) {
//logger.trace("Coudln't connect to GA. Internet may not be available. " + e.toString());
} else {
//logger.trace("Exception while sending the GA tracker request: " + request +". "+ e.toString());
}
}
return response;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private void processParameters(GoogleAnalyticsRequest request, List<NameValuePair> postParms) {
Map<GoogleAnalyticsParameter, String> requestParms = request.getParameters();
Map<GoogleAnalyticsParameter, String> defaultParms = defaultRequest.getParameters();
for (GoogleAnalyticsParameter parm : defaultParms.keySet()) {
String value = requestParms.get(parm);
String defaultValue = defaultParms.get(parm);
if (isEmpty(value) && !isEmpty(defaultValue)) {
requestParms.put(parm, defaultValue);
}
}
for (GoogleAnalyticsParameter key : requestParms.keySet()) {
postParms.add(new BasicNameValuePair(key.getParameterName(), requestParms.get(key)));
}
}
/**
* Processes the custom dimensions and adds the values to list of parameters, which would be posted to GA.
*
* @param request
* @param postParms
*/
private void processCustomDimensionParameters(@SuppressWarnings("rawtypes") GoogleAnalyticsRequest request, List<NameValuePair> postParms) {
Map<String, String> customDimParms = new HashMap<String, String>();
for (String defaultCustomDimKey : defaultRequest.customDimensions().keySet()) {
customDimParms.put(defaultCustomDimKey, defaultRequest.customDimensions().get(defaultCustomDimKey));
}
@SuppressWarnings("unchecked")
Map<String, String> requestCustomDims = request.customDimensions();
for (String requestCustomDimKey : requestCustomDims.keySet()) {
customDimParms.put(requestCustomDimKey, requestCustomDims.get(requestCustomDimKey));
}
for (String key : customDimParms.keySet()) {
postParms.add(new BasicNameValuePair(key, customDimParms.get(key)));
}
}
/**
* Processes the custom metrics and adds the values to list of parameters, which would be posted to GA.
*
* @param request
* @param postParms
*/
private void processCustomMetricParameters(@SuppressWarnings("rawtypes") GoogleAnalyticsRequest request, List<NameValuePair> postParms) {
Map<String, String> customMetricParms = new HashMap<String, String>();
for (String defaultCustomMetricKey : defaultRequest.custommMetrics().keySet()) {
customMetricParms.put(defaultCustomMetricKey, defaultRequest.custommMetrics().get(defaultCustomMetricKey));
}
@SuppressWarnings("unchecked")
Map<String, String> requestCustomMetrics = request.custommMetrics();
for (String requestCustomDimKey : requestCustomMetrics.keySet()) {
customMetricParms.put(requestCustomDimKey, requestCustomMetrics.get(requestCustomDimKey));
}
for (String key : customMetricParms.keySet()) {
postParms.add(new BasicNameValuePair(key, customMetricParms.get(key)));
}
}
private void gatherStats(@SuppressWarnings("rawtypes") GoogleAnalyticsRequest request) {
String hitType = request.hitType();
if ("pageview".equalsIgnoreCase(hitType)) {
stats.pageViewHit();
} else if ("screenview".equalsIgnoreCase(hitType)) {
stats.screenViewHit();
} else if ("event".equalsIgnoreCase(hitType)) {
stats.eventHit();
} else if ("item".equalsIgnoreCase(hitType)) {
stats.itemHit();
} else if ("transaction".equalsIgnoreCase(hitType)) {
stats.transactionHit();
} else if ("social".equalsIgnoreCase(hitType)) {
stats.socialHit();
} else if ("timing".equalsIgnoreCase(hitType)) {
stats.timingHit();
}
}
public Future<GoogleAnalyticsResponse> postAsync(final RequestProvider requestProvider) {
if (!config.isEnabled()) {
return null;
}
Future<GoogleAnalyticsResponse> future = getExecutor().submit(new Callable<GoogleAnalyticsResponse>() {
public GoogleAnalyticsResponse call() throws Exception {
try {
@SuppressWarnings("rawtypes")
GoogleAnalyticsRequest request = requestProvider.getRequest();
if (request != null) {
return post(request);
}
} catch (Exception e) {
// logger.warn("Request Provider (" + requestProvider + ") thrown exception " + e.toString() + " and hence nothing is posted to GA.");
}
return null;
}
});
return future;
}
@SuppressWarnings("rawtypes")
public Future<GoogleAnalyticsResponse> postAsync(final GoogleAnalyticsRequest request) {
if (!config.isEnabled()) {
return null;
}
Future<GoogleAnalyticsResponse> future = getExecutor().submit(new Callable<GoogleAnalyticsResponse>() {
public GoogleAnalyticsResponse call() throws Exception {
return post(request);
}
});
return future;
}
public void close() {
try {
executor.shutdown();
} catch (Exception e) {
//ignore
}
}
protected HttpClient createHttpClient(GoogleAnalyticsConfig config) {
ThreadSafeClientConnManager connManager = new ThreadSafeClientConnManager();
connManager.setDefaultMaxPerRoute(getDefaultMaxPerRoute(config));
BasicHttpParams params = new BasicHttpParams();
if (isNotEmpty(config.getUserAgent())) {
params.setParameter(CoreProtocolPNames.USER_AGENT, config.getUserAgent());
}
if (isNotEmpty(config.getProxyHost())) {
params.setParameter(ConnRoutePNames.DEFAULT_PROXY, new HttpHost(config.getProxyHost(), config.getProxyPort()));
}
DefaultHttpClient client = new DefaultHttpClient(connManager, params);
if (isNotEmpty(config.getProxyUserName())) {
BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(new AuthScope(config.getProxyHost(), config.getProxyPort()),
new UsernamePasswordCredentials(config.getProxyUserName(), config.getProxyPassword()));
client.setCredentialsProvider(credentialsProvider);
}
return client;
}
protected int getDefaultMaxPerRoute(GoogleAnalyticsConfig config) {
return Math.max(config.getMaxThreads(), 1);
}
protected ThreadPoolExecutor getExecutor() {
if (executor == null) {
executor = createExecutor(config);
}
return executor;
}
protected synchronized ThreadPoolExecutor createExecutor(GoogleAnalyticsConfig config) {
return new ThreadPoolExecutor(0, config.getMaxThreads(), 5, TimeUnit.MINUTES, new LinkedBlockingDeque<Runnable>(), createThreadFactory());
}
protected ThreadFactory createThreadFactory() {
return new GoogleAnalyticsThreadFactory(config.getThreadNameFormat());
}
public GoogleAnalyticsStats getStats() {
return stats;
}
public void resetStats() {
stats = new GoogleAnalyticsStats();
}
public void setEnabled(boolean b) { config.setEnabled(b);}
public boolean getEnabled() { return config.isEnabled();}
}
class GoogleAnalyticsThreadFactory implements ThreadFactory {
private final AtomicInteger threadNumber = new AtomicInteger(1);
private String threadNameFormat = null;
public GoogleAnalyticsThreadFactory(String threadNameFormat) {
this.threadNameFormat = threadNameFormat;
}
public Thread newThread(Runnable r) {
Thread thread = new Thread(Thread.currentThread().getThreadGroup(), r, MessageFormat.format(threadNameFormat, threadNumber.getAndIncrement()), 0);
thread.setDaemon(true);
thread.setPriority(Thread.MIN_PRIORITY);
return thread;
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GoogleAnalyticsConfig.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
/**
* Properties that can be configured in this library. These would include any properties that are required to process the
* tracking request or enhance the tracking request (but not specified in measurement protocol like User agent).
* <p/>
* Most of the properties are initialization level and request level. If a property is a initialization level property,
* it should be set at the time of GoogleAnalytics object initialization. If a property is a request level property,
* it can be set any time and it will be effective.
* <p/>
* All properties of this config object supports method chaining. So for example, you could do,
* <code>new GoogleAnalyticsConfig().setMaxThreads(2).setThreadNameFormat("name");</code>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class GoogleAnalyticsConfig {
private String threadNameFormat = "googleanalytics-thread-{0}";
private boolean enabled = true;
private int maxThreads = 1;
private boolean useHttps = true;
private boolean validate = true;
private String httpUrl = "http://www.google-analytics.com/collect";
private String httpsUrl = "https://ssl.google-analytics.com/collect";
private String userAgent = null;
private String proxyHost = null;
private int proxyPort = 80;
private String proxyUserName = null;
private String proxyPassword = null;
private boolean discoverRequestParameters = true;
private boolean gatherStats = false;
private RequestParameterDiscoverer requestParameterDiscoverer = new DefaultRequestParameterDiscoverer();
public RequestParameterDiscoverer getRequestParameterDiscoverer() {
return requestParameterDiscoverer;
}
/**
* Sets the appropriate request parameter discoverer. Default is {@link DefaultRequestParameterDiscoverer} but
* can be changed to {@link AwtRequestParameterDiscoverer} if you want to use Toolkit to derive the screen resolution etc.
* <p/>
* Please make sure you also enable the discovery using {@link #setDiscoverRequestParameters(boolean)}
*
* @param requestParameterDiscoverer can be null and is so, parameters will not be discovered.
*/
public void setRequestParameterDiscoverer(RequestParameterDiscoverer requestParameterDiscoverer) {
this.requestParameterDiscoverer = requestParameterDiscoverer;
}
public boolean isGatherStats() {
return gatherStats;
}
/**
* If set to true, {@link GoogleAnalytics} will collect the basic stats about successful event postings
* for various hit types and keeps a copy of {@link GoogleAnalyticsStats}, which can be retrieved
* using {@link GoogleAnalytics#getStats()}
*
* @param gatherStats
*/
public void setGatherStats(boolean gatherStats) {
this.gatherStats = gatherStats;
}
/**
* Sets the thread name format that should be while creating the threads.
* <p/>
* Default is "googleanalytics-thread-{0}" where {0} is the thread counter. If you specify
* a custom format, make sure {0} is there somewhere otherwise all threads will be nameed
* same and can be an issue for troubleshooting.
*
* @param threadNameFormat non-null string for thread name.
*/
public GoogleAnalyticsConfig setThreadNameFormat(String threadNameFormat) {
this.threadNameFormat = threadNameFormat;
return this;
}
public String getThreadNameFormat() {
return threadNameFormat;
}
/**
* Deprecated since 1.0.6
*
* @deprecated Use {@link #setDiscoverRequestParameters(boolean)} instead
*/
@Deprecated
public GoogleAnalyticsConfig setDeriveSystemParameters(boolean deriveSystemProperties) {
return setDiscoverRequestParameters(deriveSystemProperties);
}
/**
* If true, derives the system properties (User Language, Region, Country, Screen Size, Color Depth, and File encoding) and adds to
* the default request.
* <p/>
* <p>This is <strong>initialization</strong> level configuration (must be set while creating GoogleAnalytics object).</p>
*/
public GoogleAnalyticsConfig setDiscoverRequestParameters(boolean discoverSystemParameters) {
this.discoverRequestParameters = discoverSystemParameters;
return this;
}
/**
* Deprecated since 1.0.6
*
* @deprecated Use {@link #isDiscoverRequestParameters()} instead
*/
@Deprecated
public boolean isDeriveSystemParameters() {
return isDiscoverRequestParameters();
}
public boolean isDiscoverRequestParameters() {
return discoverRequestParameters;
}
/**
* Sets the user name which should be used to authenticate to the proxy server. This is applicable only if {@link #setProxyHost(String)} is not empty.
* <p/>
* <p>This is <strong>initialization</strong> level configuration (must be set while creating GoogleAnalytics object).</p>
*/
public GoogleAnalyticsConfig setProxyUserName(String proxyUserName) {
this.proxyUserName = proxyUserName;
return this;
}
public String getProxyUserName() {
return proxyUserName;
}
public String getProxyPassword() {
return proxyPassword;
}
/**
* Sets the password which should be used to authenticate to the proxy server. This is applicable only if {@link #setProxyHost(String)} and {@link #setProxyUserName(String)} is not empty.
* <p/>
* <p>This is <strong>initialization</strong> level configuration (must be set while creating GoogleAnalytics object).</p>
*/
public GoogleAnalyticsConfig setProxyPassword(String proxyPassword) {
this.proxyPassword = proxyPassword;
return this;
}
public String getProxyHost() {
return proxyHost;
}
/**
* Sets the host name of the proxy server, to connect to Google analytics.
* <p/>
* <p>This is <strong>initialization</strong> level configuration (must be set while creating GoogleAnalytics object).</p>
*/
public GoogleAnalyticsConfig setProxyHost(String proxyHost) {
this.proxyHost = proxyHost;
return this;
}
public int getProxyPort() {
return proxyPort;
}
/**
* Sets the host name of the proxy server, to connect to Google analytics.
* <p/>
* <p>This is <strong>initialization</strong> level configuration (must be set while creating GoogleAnalytics object).</p>
*/
public GoogleAnalyticsConfig setProxyPort(int proxyPort) {
this.proxyPort = proxyPort;
return this;
}
public String getUserAgent() {
return userAgent;
}
/**
* Sets the user agent string that should be sent while making the http request. Default is Apache Http Client's user agent,
* which looks something similar to this. <code>Apache-HttpClient/release (java 1.5)</code>
* <p/>
* <p>This is <strong>initialization</strong> level configuration (must be set while creating GoogleAnalytics object).</p>
*/
public GoogleAnalyticsConfig setUserAgent(String userAgent) {
this.userAgent = userAgent;
return this;
}
public boolean isEnabled() {
return enabled;
}
/**
* Enables or disables the GoogleAnalytics posting. If disabled, library will continue to accept the send/post requests but silently skips
* sending the event and returns successful response. Default is <code>false</code>.
* <p/>
* <p>This is <strong>request</strong> level configuration (can be changed any time).</p>
*/
public GoogleAnalyticsConfig setEnabled(boolean enabled) {
this.enabled = enabled;
return this;
}
/**
* Maximum threads to use to process the asynchronous event posting and Http client connection pooling. Default is 1.
* <p/>
* <p>This is <strong>initialization</strong> level configuration (must be set while creating GoogleAnalytics object).</p>
*/
public int getMaxThreads() {
return maxThreads;
}
public GoogleAnalyticsConfig setMaxThreads(int maxThreads) {
this.maxThreads = maxThreads;
return this;
}
public boolean isUseHttps() {
return useHttps;
}
/**
* Instructs to use https url to send the events. Default is true.
* <p/>
* <p>This is <strong>request</strong> level configuration (can be changed any time).</p>
*/
public GoogleAnalyticsConfig setUseHttps(boolean useHttps) {
this.useHttps = useHttps;
return this;
}
public boolean isValidate() {
return validate;
}
/**
* If set, validates the request before sending to Google Analytics. If any errors found, GoogleAnalyticsException will be thrown with details.
* Default is false. Note that, if you are sending the event in async mode, then request is always validated and logged to log file as warnings irrespective
* of this flag.
* <p/>
* <p>This is <strong>request</strong> level configuration (can be changed any time).</p>
*/
public GoogleAnalyticsConfig setValidate(boolean validate) {
this.validate = validate;
return this;
}
public String getHttpUrl() {
return httpUrl;
}
/**
* URL to use when posting the event in http mode. This url is Google Analytics service url and usually not updated by the clients.
* <p/>
* <p>Default value is <code>http://www.google-analytics.com/collect</code></p>
* <p/>
* <p>This is <strong>request</strong> level configuration (can be changed any time).</p>
*/
public GoogleAnalyticsConfig setHttpUrl(String httpUrl) {
this.httpUrl = httpUrl;
return this;
}
public String getHttpsUrl() {
return httpsUrl;
}
/**
* URL to use when posting the event in https mode. This url is Google Analytics service url and usually not updated by the clients.
* <p>Default value is <code>https://ssl.google-analytics.com/collect</code>
* <p/>
* <p>This is <strong>request</strong> level configuration (can be changed any time).</p>
*/
public GoogleAnalyticsConfig setHttpsUrl(String httpsUrl) {
this.httpsUrl = httpsUrl;
return this;
}
String getUrl() {
return useHttps ? httpsUrl : httpUrl;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("GoogleAnalyticsConfig [");
if (threadNameFormat != null) {
builder.append("threadNameFormat=");
builder.append(threadNameFormat);
builder.append(", ");
}
builder.append("enabled=");
builder.append(enabled);
builder.append(", maxThreads=");
builder.append(maxThreads);
builder.append(", useHttps=");
builder.append(useHttps);
builder.append(", validate=");
builder.append(validate);
builder.append(", ");
if (httpUrl != null) {
builder.append("httpUrl=");
builder.append(httpUrl);
builder.append(", ");
}
if (httpsUrl != null) {
builder.append("httpsUrl=");
builder.append(httpsUrl);
builder.append(", ");
}
if (userAgent != null) {
builder.append("userAgent=");
builder.append(userAgent);
builder.append(", ");
}
if (proxyHost != null) {
builder.append("proxyHost=");
builder.append(proxyHost);
builder.append(", ");
}
builder.append("proxyPort=");
builder.append(proxyPort);
builder.append(", ");
if (proxyUserName != null) {
builder.append("proxyUserName=");
builder.append(proxyUserName);
builder.append(", ");
}
if (proxyPassword != null) {
builder.append("proxyPassword=");
builder.append(mask(proxyPassword));
builder.append(", ");
}
builder.append("deriveSystemParameters=");
builder.append(discoverRequestParameters);
builder.append(", gatherStats=");
builder.append(gatherStats);
builder.append("]");
return builder.toString();
}
public static String mask(String value) {
return value == null ? null : "********";
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GoogleAnalyticsException.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
/**
* Any exception thrown (usually due to validation), it would be of this type.
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*
*/
public class GoogleAnalyticsException extends RuntimeException {
private static final long serialVersionUID = 1L;
public GoogleAnalyticsException() {
super();
}
public GoogleAnalyticsException(String message, Throwable cause) {
super(message, cause);
}
public GoogleAnalyticsException(String message) {
super(message);
}
public GoogleAnalyticsException(Throwable cause) {
super(cause);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GoogleAnalyticsParameter.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License")),
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
/**
* Google Analytics Measurement Protocol Parameters.
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public enum GoogleAnalyticsParameter {
//General
PROTOCOL_VERSION("v", true),
TRACKING_ID("tid", true),
ANONYMIZE_IP("aip", "boolean"),
QUEUE_TIME("qt", "integer"),
CACHE_BUSTER("z"),
DATA_SOURCE("ds"),
//Visitor
CLIENT_ID("cid", true),
USER_ID("uid"),
//Session
SESSION_CONTROL("sc"),
USER_IP("uip"),
USER_AGENT("ua"),
//Traffic Sources
DOCUMENT_REFERRER("dr", 2048),
CAMPAIGN_NAME("cn", 100),
CAMPAIGN_SOURCE("cs", 100),
CAMPAIGN_MEDIUM("cm", 50),
CAMPAIGN_KEYWORD("ck", 500),
CAMPAIGN_CONTENT("cc", 500),
CAMPAIGN_ID("ci", 100),
ADWORDS_ID("gclid"),
DISPLAY_ADS_ID("dclid"),
//System Info
SCREEN_RESOLUTION("sr", 20),
VIEWPORT_SIZE("vp", 20),
DOCUMENT_ENCODING("de", 20),
SCREEN_COLORS("sd", 20),
USER_LANGUAGE("ul", 20),
JAVA_ENABLED("je", "boolean"),
FLASH_VERSION("fl", 20),
//Hit
HIT_TYPE("t", true),
NON_INTERACTION_HIT("ni"),
//Content Information
DOCUMENT_URL("dl", 2048),
DOCUMENT_HOST_NAME("dh", 100),
DOCUMENT_PATH("dp", 2048),
DOCUMENT_TITLE("dt", 1500),
CONTENT_DESCRIPTION("cd"),
LINK_ID("linkid"),
//App Tracking
APPLICATION_NAME("an", 100),
APPLICATION_ID("aid", 150),
APPLICATION_VERSION("av", 100),
APPLICATION_INSTALLER_ID("aiid", 150),
//Event Tracking
EVENT_CATEGORY("ec", new String[]{"event"}, 150),
EVENT_ACTION("ea", new String[]{"event"}, 500),
EVENT_LABEL("el", new String[]{"event"}, 500),
EVENT_VALUE("ev", false, "integer", new String[]{"event"}),
//E-Commerce
TRANSACTION_ID("ti", new String[]{"transaction", "item"}, 500),
TRANSACTION_AFFILIATION("ta", new String[]{"transaction"}, 500),
TRANSACTION_REVENUE("tr", false, "currency", new String[]{"transaction"}),
TRANSACTION_SHIPPING("ts", false, "currency", new String[]{"transaction"}),
TRANSACTION_TAX("tt", false, "currency", new String[]{"transaction"}),
ITEM_NAME("in", new String[]{"item"}, 500),
ITEM_PRICE("ip", false, "currency", new String[]{"item"}),
ITEM_QUANTITY("iq", false, "integer", new String[]{"item"}),
ITEM_CODE("ic", new String[]{"item"}, 500),
ITEM_CATEGORY("iv", new String[]{"item"}, 500),
CURRENCY_CODE("cu", new String[]{"transaction", "item"}, 10),
//Social Interactions
SOCIAL_NETWORK("sn", new String[]{"social"}, 50),
SOCIAL_ACTION("sa", new String[]{"social"}, 50),
SOCIAL_ACTION_TARGET("st", new String[]{"social"}, 2048),
//Timing
USER_TIMING_CATEGORY("utc", new String[]{"timing"}, 150),
USER_TIMING_VARIABLE_NAME("utv", new String[]{"timing"}, 500),
USER_TIMING_TIME("utt", false, "integer", new String[]{"timing"}),
USER_TIMING_LABEL("utl", new String[]{"timing"}, 500),
PAGE_LOAD_TIME("plt", false, "integer", new String[]{"timing"}),
DNS_TIME("dns", false, "integer", new String[]{"timing"}),
PAGE_DOWNLOAD_TIME("pdt", false, "integer", new String[]{"timing"}),
REDIRECT_RESPONSE_TIME("rrt", false, "integer", new String[]{"timing"}),
TCP_CONNECT_TIME("tcp", false, "integer", new String[]{"timing"}),
SERVER_RESPONSE_TIME("srt", false, "integer", new String[]{"timing"}),
//Exceptions
EXCEPTION_DESCRIPTION("exd", new String[]{"exception"}, 150),
EXCEPTION_FATAL("exf", false, "boolean", new String[]{"exception"}),
//Experiment Variations
EXPERIMENT_ID("xid", 40),
EXPERIMENT_VARIANT("xvar");
private String parameterName = null;
private boolean required = false;
private String type = "text";
private String[] supportedHitTypes = null;
private int maxLength = 0;
private GoogleAnalyticsParameter(String name) {
this(name, false);
}
private GoogleAnalyticsParameter(String name, int maxLength) {
this(name, false, null, null, maxLength);
}
private GoogleAnalyticsParameter(String name, boolean required) {
this(name, required, "text", null, 0);
}
private GoogleAnalyticsParameter(String name, String type) {
this(name, false, type, null, 0);
}
private GoogleAnalyticsParameter(String name, String[] supportedHitTypes) {
this(name, false, "text", supportedHitTypes, 0);
}
private GoogleAnalyticsParameter(String name, String[] supportedHitTypes, int maxLength) {
this(name, false, "text", supportedHitTypes, maxLength);
}
private GoogleAnalyticsParameter(String name, boolean required, String type, String[] supportedHitTypes) {
this(name, required, type, supportedHitTypes, 0);
}
private GoogleAnalyticsParameter(String name, boolean required, String type, String[] supportedHitTypes, int maxLength) {
this.parameterName = name;
this.required = required;
if (type == null) {
type = "text";
}
this.type = type;
this.supportedHitTypes = supportedHitTypes;
this.maxLength = maxLength;
}
public String getParameterName() {
return parameterName;
}
public String[] getSupportedHitTypes() {
return supportedHitTypes;
}
public String getType() {
return type;
}
public boolean isRequired() {
return required;
}
public int getMaxLength() {
return maxLength;
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GoogleAnalyticsRequest.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.ADWORDS_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.ANONYMIZE_IP;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.APPLICATION_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.APPLICATION_NAME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.APPLICATION_VERSION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CACHE_BUSTER;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CAMPAIGN_CONTENT;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CAMPAIGN_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CAMPAIGN_KEYWORD;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CAMPAIGN_MEDIUM;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CAMPAIGN_NAME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CAMPAIGN_SOURCE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CLIENT_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CONTENT_DESCRIPTION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DATA_SOURCE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DISPLAY_ADS_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DOCUMENT_ENCODING;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DOCUMENT_HOST_NAME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DOCUMENT_PATH;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DOCUMENT_REFERRER;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DOCUMENT_TITLE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DOCUMENT_URL;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EXPERIMENT_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.EXPERIMENT_VARIANT;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.FLASH_VERSION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.HIT_TYPE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.JAVA_ENABLED;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.NON_INTERACTION_HIT;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.PROTOCOL_VERSION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.QUEUE_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.SCREEN_COLORS;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.SCREEN_RESOLUTION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.SESSION_CONTROL;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TRACKING_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.USER_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.USER_LANGUAGE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.VIEWPORT_SIZE;
import java.util.HashMap;
import java.util.Map;
/**
* Base GA Tracking Request containing the standard and custom parameter values.
* <p/>
* <p>It also provides type safe getter/setters for all parameters that are applicable
* for all hit types. Hit specific setters/getters are available in corresponding
* Hit specific request objects (like {@link EventHit} or {@link PageViewHit} etc)
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
@SuppressWarnings("unchecked")
public class GoogleAnalyticsRequest<T> {
protected Map<GoogleAnalyticsParameter, String> parms = new HashMap<GoogleAnalyticsParameter, String>();
protected Map<String, String> customDimensions = new HashMap<String, String>();
protected Map<String, String> customMetrics = new HashMap<String, String>();
public GoogleAnalyticsRequest() {
this(null, null, null, null);
}
public GoogleAnalyticsRequest(String hitType) {
this(hitType, null, null, null);
}
public GoogleAnalyticsRequest(String hitType, String trackingId, String appName, String appVersion) {
hitType(isEmpty(hitType) ? "pageview" : hitType);
trackingId(trackingId);
applicationName(appName);
applicationVersion(appVersion);
protocolVersion("1");
}
/**
* Sets the String value for specified parameter. If value is null, the parameter
* is removed from the parameters list.
*
* @param parameter
* @param value
* @return
*/
protected T setString(GoogleAnalyticsParameter parameter, String value) {
if (value == null) {
parms.remove(parameter);
} else {
String stringValue = value;
parms.put(parameter, stringValue);
}
return (T) this;
}
protected String getString(GoogleAnalyticsParameter parameter) {
return parms.get(parameter);
}
protected T setInteger(GoogleAnalyticsParameter parameter, Integer value) {
if (value == null) {
parms.remove(parameter);
} else {
String stringValue = fromInteger(value);
parms.put(parameter, stringValue);
}
return (T) this;
}
protected Double getDouble(GoogleAnalyticsParameter parameter) {
return toDouble(parms.get(parameter));
}
protected T setDouble(GoogleAnalyticsParameter parameter, Double value) {
if (value == null) {
parms.remove(parameter);
} else {
String stringValue = fromDouble(value);
parms.put(parameter, stringValue);
}
return (T) this;
}
protected Boolean getBoolean(GoogleAnalyticsParameter parameter) {
return toBoolean(parms.get(parameter));
}
protected T setBoolean(GoogleAnalyticsParameter parameter, Boolean value) {
if (value == null) {
parms.remove(parameter);
} else {
String stringValue = fromBoolean(value);
parms.put(parameter, stringValue);
}
return (T) this;
}
protected Integer getInteger(GoogleAnalyticsParameter parameter) {
return toInteger(parms.get(parameter));
}
protected String fromBoolean(Boolean booleanString) {
if (booleanString == null) {
return null;
}
return "" + booleanString;
}
protected Boolean toBoolean(String booleanString) {
if (isEmpty(booleanString)) {
return null;
}
return new Boolean(booleanString).booleanValue();
}
protected String fromInteger(Integer intValue) {
if (intValue == null) {
return null;
}
return "" + intValue;
}
protected Integer toInteger(String intString) {
if (isEmpty(intString)) {
return null;
}
return Integer.parseInt(intString);
}
protected String fromDouble(Double doubleValue) {
if (doubleValue == null) {
return null;
}
return "" + doubleValue;
}
protected Double toDouble(String doubleString) {
if (isEmpty(doubleString)) {
return null;
}
return Double.parseDouble(doubleString);
}
protected T parameter(GoogleAnalyticsParameter parameter, String value) {
if (value == null) {
parms.remove(parameter);
} else {
parms.put(parameter, value);
}
return (T) this;
}
protected String parameter(GoogleAnalyticsParameter parameter) {
return parms.get(parameter);
}
public Map<GoogleAnalyticsParameter, String> getParameters() {
return parms;
}
/**
* @deprecated Use {@link #customDimension(int)} instead
*/
public String customDimention(int index) {
return customDimension(index);
}
public String customDimension(int index) {
return customDimensions.get("cd" + index);
}
/**
* @deprecated Use {@link #customDimension(int, String)} instead
*/
public T customDimention(int index, String value) {
return customDimension(index, value);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Each custom dimension has an associated index. There is a maximum of 20 custom dimensions (200 for Premium accounts). The name suffix must be a positive integer between 1 and 200, inclusive.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cd[1-9][0-9]*</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>150 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Sports</code><br>
* Example usage: <code>cd[1-9][0-9]*=Sports</code>
* </div>
* </div>
*/
public T customDimension(int index, String value) {
customDimensions.put("cd" + index, value);
return (T) this;
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Each custom metric has an associated index. There is a maximum of 20 custom metrics (200 for Premium accounts). The name suffix must be a positive integer between 1 and 200, inclusive.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cm[1-9][0-9]*</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>47</code><br>
* Example usage: <code>cm[1-9][0-9]*=47</code>
* </div>
* </div>
*/
public T customMetric(int index, String value) {
customMetrics.put("cm" + index, value);
return (T) this;
}
public String customMetric(int index) {
return customMetrics.get("cm" + index);
}
/**
* @deprecated Use {@link #customDimensions()} instead
*/
public Map<String, String> customDimentions() {
return customDimensions();
}
public Map<String, String> customDimensions() {
return customDimensions;
}
public Map<String, String> custommMetrics() {
return customMetrics;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Request [");
if (parms != null) {
builder.append("parms=");
builder.append(parms);
builder.append(", ");
}
if (customDimensions != null) {
builder.append("customDimensions=");
builder.append(customDimensions);
builder.append(", ");
}
if (customMetrics != null) {
builder.append("customMetrics=");
builder.append(customMetrics);
}
builder.append("]");
return builder.toString();
}
/**
* <div class="ind">
* <p>
* <strong>Required for all hit types.</strong>
* </p>
* <p>The Protocol version. The current value is '1'. This will only change when there are changes made that are not backwards compatible.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>v</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>1</code><br>
* Example usage: <code>v=1</code>
* </div>
* </div>
*/
public T protocolVersion(String value) {
setString(PROTOCOL_VERSION, value);
return (T) this;
}
public String protocolVersion() {
return getString(PROTOCOL_VERSION);
}
/**
* <div class="ind">
* <p>
* <strong>Required for all hit types.</strong>
* </p>
* <p>The tracking ID / web property ID. The format is UA-XXXX-Y. All collected data is associated by this ID.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>tid</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>UA-XXXX-Y</code><br>
* Example usage: <code>tid=UA-XXXX-Y</code>
* </div>
* </div>
*/
public T trackingId(String value) {
setString(TRACKING_ID, value);
return (T) this;
}
public String trackingId() {
return getString(TRACKING_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>When present, the IP address of the sender will be anonymized. For example, the IP will be anonymized if any of the following parameters are present in the payload: &aip=, &aip=0, or &aip=1</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>aip</code></td>
* <td>boolean</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>1</code><br>
* Example usage: <code>aip=1</code>
* </div>
* </div>
*/
public T anonymizeIp(Boolean value) {
setBoolean(ANONYMIZE_IP, value);
return (T) this;
}
public Boolean anonymizeIp() {
return getBoolean(ANONYMIZE_IP);
}
/**
* <div class="ds">
* <p>
* Optional.
* </p>
* <p>Indicates the data source of the hit. Hits sent from analytics.js will have data source set to 'web'; hits sent from one of the mobile SDKs will have data source set to 'app'.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ds</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>web</code><br>
* Example usage: <code>ds=web</code>
* <p/>
* Example value: <code>app</code><br>
* Example usage: <code>ds=app</code>
* <p/>
* Example value: <code>call center</code><br>
* Example usage: <code>ds=call%20center</code>
* <p/>
* Example value: <code>crm</code><br>
* Example usage: <code>ds=crm</code>
* </div>
* </div>
*/
public T dataSource(String value) {
setString(DATA_SOURCE, value);
return (T) this;
}
public String dataSource() {
return getString(DATA_SOURCE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Used to collect offline / latent hits. The value represents the time delta (in milliseconds) between when the hit being reported occurred and the time the hit was sent. The value must be greater than or equal to 0. Values greater than four hours may lead to hits not being processed.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>qt</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>560</code><br>
* Example usage: <code>qt=560</code>
* </div>
* </div>
*/
public T queueTime(Integer value) {
setInteger(QUEUE_TIME, value);
return (T) this;
}
public Integer queueTime() {
return getInteger(QUEUE_TIME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Used to send a random number in GET requests to ensure browsers and proxies don't cache hits. It should be sent as the final parameter of the request since we've seen some 3rd party internet filtering software add additional parameters to HTTP requests incorrectly. This value is not used in reporting.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>z</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>289372387623</code><br>
* Example usage: <code>z=289372387623</code>
* </div>
* </div>
*/
public T cacheBuster(String value) {
setString(CACHE_BUSTER, value);
return (T) this;
}
public String cacheBuster() {
return getString(CACHE_BUSTER);
}
/**
* <div class="ind">
* <p>
* <strong>Required for all hit types.</strong>
* </p>
* <p>This anonymously identifies a particular user, device, or browser instance. For the web, this is generally stored as a first-party cookie with a two-year expiration. For mobile apps, this is randomly generated for each particular instance of an application install. The value of this field should be a random UUID (version 4) as described in http://www.ietf.org/rfc/rfc4122.txt</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cid</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>35009a79-1a05-49d7-b876-2b884d0f825b</code><br>
* Example usage: <code>cid=35009a79-1a05-49d7-b876-2b884d0f825b</code>
* </div>
* </div>
*/
public T clientId(String value) {
setString(CLIENT_ID, value);
return (T) this;
}
public String clientId() {
return getString(CLIENT_ID);
}
/**
* <div class="ind">
* <strong>
* Optional.
* </strong>
* <p>This is intended to be a known identifier for a user provided by the site owner/tracking library user. It may not itself be PII. The value should never be persisted in GA cookies or other Analytics provided storage.</p>
* <table>
* <tbody><tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>uid</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody></table>
* <p/>
* <p/>
* <div>
* Example value: <code>as8eknlll</code><br>
* Example usage: <code>uid=as8eknlll</code>
* </div>
* <p/>
* <p/>
* </div>
*
* @param value
* @return
*/
public T userId(String value) {
setString(USER_ID, value);
return (T) this;
}
public String userId() {
return getString(USER_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Used to control the session duration. A value of 'start' forces a new session to start with this hit and 'end' forces the current session to end with this hit. All other values are ignored.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>sc</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>start</code><br>
* Example usage: <code>sc=start</code>
* </div>
* <br>
* <div>
* Example value: <code>end</code><br>
* Example usage: <code>sc=end</code>
* </div>
* </div>
*/
public T sessionControl(String value) {
setString(SESSION_CONTROL, value);
return (T) this;
}
public String sessionControl() {
return getString(SESSION_CONTROL);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies which referral source brought traffic to a website. This value is also used to compute the traffic source. The format of this value is a URL.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>dr</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>2048 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>http://example.com</code><br>
* Example usage: <code>dr=http%3A%2F%2Fexample.com</code>
* </div>
* </div>
*/
public T documentReferrer(String value) {
setString(DOCUMENT_REFERRER, value);
return (T) this;
}
public String documentReferrer() {
return getString(DOCUMENT_REFERRER);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the campaign name.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cn</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>100 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>(direct)</code><br>
* Example usage: <code>cn=%28direct%29</code>
* </div>
* </div>
*/
public T campaignName(String value) {
setString(CAMPAIGN_NAME, value);
return (T) this;
}
public String campaignName() {
return getString(CAMPAIGN_NAME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the campaign source.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cs</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>100 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>(direct)</code><br>
* Example usage: <code>cs=%28direct%29</code>
* </div>
* </div>
*/
public T campaignSource(String value) {
setString(CAMPAIGN_SOURCE, value);
return (T) this;
}
public String campaignSource() {
return getString(CAMPAIGN_SOURCE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the campaign medium.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cm</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>50 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>organic</code><br>
* Example usage: <code>cm=organic</code>
* </div>
* </div>
*/
public T campaignMedium(String value) {
setString(CAMPAIGN_MEDIUM, value);
return (T) this;
}
public String campaignMedium() {
return getString(CAMPAIGN_MEDIUM);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the campaign keyword.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ck</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Blue Shoes</code><br>
* Example usage: <code>ck=Blue%20Shoes</code>
* </div>
* </div>
*/
public T campaignKeyword(String value) {
setString(CAMPAIGN_KEYWORD, value);
return (T) this;
}
public String campaignKeyword() {
return getString(CAMPAIGN_KEYWORD);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the campaign content.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cc</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>content</code><br>
* Example usage: <code>cc=content</code>
* </div>
* </div>
*/
public T campaignContent(String value) {
setString(CAMPAIGN_CONTENT, value);
return (T) this;
}
public String campaignContent() {
return getString(CAMPAIGN_CONTENT);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the campaign ID.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ci</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>100 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>ID</code><br>
* Example usage: <code>ci=ID</code>
* </div>
* </div>
*/
public T campaignId(String value) {
setString(CAMPAIGN_ID, value);
return (T) this;
}
public String campaignId() {
return getString(CAMPAIGN_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the Google AdWords Id.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>gclid</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>CL6Q-OXyqKUCFcgK2goddQuoHg</code><br>
* Example usage: <code>gclid=CL6Q-OXyqKUCFcgK2goddQuoHg</code>
* </div>
* </div>
*/
public T adwordsId(String value) {
setString(ADWORDS_ID, value);
return (T) this;
}
public String adwordsId() {
return getString(ADWORDS_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the Google Display Ads Id.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>dclid</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>d_click_id</code><br>
* Example usage: <code>dclid=d_click_id</code>
* </div>
* </div>
*/
public T displayadId(String value) {
setString(DISPLAY_ADS_ID, value);
return (T) this;
}
public String displayadId() {
return getString(DISPLAY_ADS_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the screen resolution.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>sr</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>20 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>800x600</code><br>
* Example usage: <code>sr=800x600</code>
* </div>
* </div>
*/
public T screenResolution(String value) {
setString(SCREEN_RESOLUTION, value);
return (T) this;
}
public String screenResolution() {
return getString(SCREEN_RESOLUTION);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the viewable area of the browser / device.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>vp</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>20 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>123x456</code><br>
* Example usage: <code>vp=123x456</code>
* </div>
* </div>
*/
public T viewportSize(String value) {
setString(VIEWPORT_SIZE, value);
return (T) this;
}
public String viewportSize() {
return getString(VIEWPORT_SIZE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the character set used to encode the page / document.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>de</code></td>
* <td>text</td>
* <td><code>UTF-8</code>
* </td>
* <td>20 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>UTF-8</code><br>
* Example usage: <code>de=UTF-8</code>
* </div>
* </div>
*/
public T documentEncoding(String value) {
setString(DOCUMENT_ENCODING, value);
return (T) this;
}
public String documentEncoding() {
return getString(DOCUMENT_ENCODING);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the screen color depth.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>sd</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>20 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>24-bits</code><br>
* Example usage: <code>sd=24-bits</code>
* </div>
* </div>
*/
public T screenColors(String value) {
setString(SCREEN_COLORS, value);
return (T) this;
}
public String screenColors() {
return getString(SCREEN_COLORS);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the language.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ul</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>20 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>en-us</code><br>
* Example usage: <code>ul=en-us</code>
* </div>
* </div>
*/
public T userLanguage(String value) {
setString(USER_LANGUAGE, value);
return (T) this;
}
public String userLanguage() {
return getString(USER_LANGUAGE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies whether Java was enabled.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>je</code></td>
* <td>boolean</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>1</code><br>
* Example usage: <code>je=1</code>
* </div>
* </div>
*/
public T javaEnabled(Boolean value) {
setBoolean(JAVA_ENABLED, value);
return (T) this;
}
public Boolean javaEnabled() {
return getBoolean(JAVA_ENABLED);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the flash version.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>fl</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>20 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>10 1 r103</code><br>
* Example usage: <code>fl=10%201%20r103</code>
* </div>
* </div>
*/
public T flashVersion(String value) {
setString(FLASH_VERSION, value);
return (T) this;
}
public String flashVersion() {
return getString(FLASH_VERSION);
}
/**
* <div class="ind">
* <p>
* <strong>Required for all hit types.</strong>
* </p>
* <p>The type of hit. Must be one of 'pageview', 'screenview', 'event', 'transaction', 'item', 'social', 'exception', 'timing'.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>t</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>pageview</code><br>
* Example usage: <code>t=pageview</code>
* </div>
* </div>
*/
public T hitType(String value) {
setString(HIT_TYPE, value);
return (T) this;
}
public String hitType() {
return getString(HIT_TYPE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies that a hit be considered non-interactive.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ni</code></td>
* <td>boolean</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>1</code><br>
* Example usage: <code>ni=1</code>
* </div>
* </div>
*/
public T nonInteractionHit(String value) {
setString(NON_INTERACTION_HIT, value);
return (T) this;
}
public String nonInteractionHit() {
return getString(NON_INTERACTION_HIT);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Use this parameter to send the full URL (document location) of the page on which content resides. You can use the &dh and &dp parameters to override the hostname and path + query portions of the document location, accordingly. The JavaScript clients determine this parameter using the concatenation of the document.location.origin + document.location.pathname + document.location.search browser parameters. Be sure to remove any user authentication or other private information from the URL if present.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>dl</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>2048 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>http://foo.com/home?a=b</code><br>
* Example usage: <code>dl=http%3A%2F%2Ffoo.com%2Fhome%3Fa%3Db</code>
* </div>
* </div>
*/
public T documentUrl(String value) {
setString(DOCUMENT_URL, value);
return (T) this;
}
public String documentUrl() {
return getString(DOCUMENT_URL);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the hostname from which content was hosted.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>dh</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>100 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>foo.com</code><br>
* Example usage: <code>dh=foo.com</code>
* </div>
* </div>
*/
public T documentHostName(String value) {
setString(DOCUMENT_HOST_NAME, value);
return (T) this;
}
public String documentHostName() {
return getString(DOCUMENT_HOST_NAME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>The path portion of the page URL. Should begin with '/'.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>dp</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>2048 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>/foo</code><br>
* Example usage: <code>dp=%2Ffoo</code>
* </div>
* </div>
*/
public T documentPath(String value) {
setString(DOCUMENT_PATH, value);
return (T) this;
}
public String documentPath() {
return getString(DOCUMENT_PATH);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>The title of the page / document.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>dt</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>1500 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Settings</code><br>
* Example usage: <code>dt=Settings</code>
* </div>
* </div>
*/
public T documentTitle(String value) {
setString(DOCUMENT_TITLE, value);
return (T) this;
}
public String documentTitle() {
return getString(DOCUMENT_TITLE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>If not specified, this will default to the unique URL of the page by either using the &dl parameter as-is or assembling it from &dh and &dp. App tracking makes use of this for the 'Screen Name' of the screenview hit.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cd</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>2048 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>High Scores</code><br>
* Example usage: <code>cd=High%20Scores</code>
* </div>
* </div>
*/
public T contentDescription(String value) {
setString(CONTENT_DESCRIPTION, value);
return (T) this;
}
public String contentDescription() {
return getString(CONTENT_DESCRIPTION);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the application name. Only visible in app views (profiles).</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>an</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>100 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>My App</code><br>
* Example usage: <code>an=My%20App</code>
* </div>
* </div>
*/
public T applicationName(String value) {
setString(APPLICATION_NAME, value);
return (T) this;
}
public String applicationName() {
return getString(APPLICATION_NAME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the application version. Only visible in app views (profiles).</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>av</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>100 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>1.2</code><br>
* Example usage: <code>av=1.2</code>
* </div>
* </div>
*/
public T applicationVersion(String value) {
setString(APPLICATION_VERSION, value);
return (T) this;
}
public String applicationVersion() {
return getString(APPLICATION_VERSION);
}
/**
* <div class="aid">
* <p>
* Optional.
* </p>
* <p>Specifies the application identifier. Only visible in app views (profiles).</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>aid</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>150 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>com.company.app</code><br>
* Example usage: <code>aid=com.company.app</code>
* </div>
* </div>
*/
public T applicationId(String value) {
setString(APPLICATION_ID, value);
return (T) this;
}
public String applicationId() {
return getString(APPLICATION_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* <p/>
* </p>
* <p>This parameter specifies that this visitor has been exposed to an experiment with the given ID. It should be sent in conjunction with the Experiment Variant parameter.</p>
* <table>
* <tbody><tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>xid</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>40 Bytes
* </td>
* <td>all</td>
* </tr>
* </tbody></table>
* <p/>
* <p/>
* <div>
* Example value: <code>Qp0gahJ3RAO3DJ18b0XoUQ</code><br>
* Example usage: <code>xid=Qp0gahJ3RAO3DJ18b0XoUQ</code>
* </div>
* </div>
*/
public T expirementId(String value) {
setString(EXPERIMENT_ID, value);
return (T) this;
}
public String expirementId() {
return getString(EXPERIMENT_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* <p/>
* </p>
* <p>This parameter specifies that this visitor has been exposed to a particular variation of an experiment. It should be sent in conjunction with the Experiment ID parameter.</p>
* <table>
* <tbody><tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>xvar</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>all</td>
* </tr>
* </tbody></table>
* <p/>
* <div>
* Example value: <code>1</code><br>
* Example usage: <code>xvar=1</code>
* </div>
* </div>
*/
public T expirementVariant(String value) {
setString(EXPERIMENT_VARIANT, value);
return (T) this;
}
public String expirementVariant() {
return getString(EXPERIMENT_VARIANT);
}
/**
* IP Override
* parameter: uip
* Should be a valid IP address. This will always be anonymized just as though &aip (anonymize IP) had been used.
* example: &uip=1.2.3.4
*/
public T userIp(String value) {
setString(GoogleAnalyticsParameter.USER_IP, value);
return (T) this;
}
public String userIp() {
return getString(GoogleAnalyticsParameter.USER_IP);
}
/**
* User Agent Override
* parameter: &ua
* Should be a User Agent reported by the browser. Note: We have libraries to identify real user agents. Hand crafting your own agent could break at any time.
* example: &ua=Opera%2F9.80%20(Windows%20NT%206.0)%20Presto%2F2.12.388%20Version%2F12.14
*/
public T userAgent(String value) {
setString(GoogleAnalyticsParameter.USER_AGENT, value);
return (T) this;
}
public String userAgent() {
return getString(GoogleAnalyticsParameter.USER_AGENT);
}
protected boolean isEmpty(String string) {
return string == null || string.trim().length() == 0;
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GoogleAnalyticsResponse.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.http.NameValuePair;
/**
* Response for GA tracking request.
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class GoogleAnalyticsResponse {
private int statusCode = 200;
private List<NameValuePair> postedParms = null;
public Map<String, String> getPostedParmsAsMap() {
if (postedParms == null) {
return null;
}
Map<String, String> paramsMap = new HashMap<String, String>();
for (NameValuePair pair : postedParms) {
paramsMap.put(pair.getName(), pair.getValue());
}
return paramsMap;
}
public List<NameValuePair> getPostedParms() {
return postedParms;
}
public void setPostedParms(List<NameValuePair> postedParms) {
this.postedParms = postedParms;
}
public void setStatusCode(int statusCode) {
this.statusCode = statusCode;
}
public int getStatusCode() {
return statusCode;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Response [statusCode=");
builder.append(statusCode);
builder.append("]");
return builder.toString();
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/GoogleAnalyticsStats.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import java.util.concurrent.atomic.AtomicLong;
/**
* Collects the basic stats about successful events that have been posted to GA.
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class GoogleAnalyticsStats {
private AtomicLong pageViewHits = new AtomicLong();
private AtomicLong eventHits = new AtomicLong();
private AtomicLong screenViewHits = new AtomicLong();
private AtomicLong itemHits = new AtomicLong();
private AtomicLong transactionHits = new AtomicLong();
private AtomicLong timingHits = new AtomicLong();
private AtomicLong socialHits = new AtomicLong();
void pageViewHit() {
pageViewHits.incrementAndGet();
}
void eventHit() {
eventHits.incrementAndGet();
}
void screenViewHit() {
screenViewHits.incrementAndGet();
}
void itemHit() {
itemHits.incrementAndGet();
}
void transactionHit() {
transactionHits.incrementAndGet();
}
void socialHit() {
socialHits.incrementAndGet();
}
void timingHit() {
timingHits.incrementAndGet();
}
public long getPageViewHits() {
return pageViewHits.get();
}
public long getEventHits() {
return eventHits.get();
}
public long getScreenViewHits() {
return screenViewHits.get();
}
public long getItemHits() {
return itemHits.get();
}
public long getTransactionHits() {
return transactionHits.get();
}
public long getTimingHits() {
return timingHits.get();
}
public long getSocialHits() {
return socialHits.get();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("GoogleAnalyticsStats [");
if (pageViewHits != null) {
builder.append("pageViewHits=");
builder.append(pageViewHits);
builder.append(", ");
}
if (eventHits != null) {
builder.append("eventHits=");
builder.append(eventHits);
builder.append(", ");
}
if (screenViewHits != null) {
builder.append("screenViewHits=");
builder.append(screenViewHits);
builder.append(", ");
}
if (itemHits != null) {
builder.append("itemHits=");
builder.append(itemHits);
builder.append(", ");
}
if (transactionHits != null) {
builder.append("transactionHits=");
builder.append(transactionHits);
builder.append(", ");
}
if (timingHits != null) {
builder.append("timingHits=");
builder.append(timingHits);
builder.append(", ");
}
if (socialHits != null) {
builder.append("socialHits=");
builder.append(socialHits);
}
builder.append("]");
return builder.toString();
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/ItemHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CURRENCY_CODE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.ITEM_CATEGORY;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.ITEM_CODE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.ITEM_NAME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.ITEM_PRICE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.ITEM_QUANTITY;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TRANSACTION_ID;
/**
* GA request to track items as part of ecommerce transaction.
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#ecomm">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class ItemHit extends GoogleAnalyticsRequest<ItemHit> {
public ItemHit() {
super("item");
}
/**
* <div class="ind">
* <p>
* <strong>Required for transaction hit type.</strong>
* <br>
* <strong>Required for item hit type.</strong>
* </p>
* <p>A unique identifier for the transaction. This value should be the same for both the Transaction hit and Items hits associated to the particular transaction.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ti</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>transaction, item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>OD564</code><br>
* Example usage: <code>ti=OD564</code>
* </div>
* </div>
*/
public ItemHit txId(String value) {
setString(TRANSACTION_ID, value);
return this;
}
public String txId() {
return getString(TRANSACTION_ID);
}
/**
* <div class="ind">
* <p>
* <strong>Required for item hit type.</strong>
* </p>
* <p>Specifies the item name.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>in</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Shoe</code><br>
* Example usage: <code>in=Shoe</code>
* </div>
* </div>
*/
public ItemHit itemName(String value) {
setString(ITEM_NAME, value);
return this;
}
public String itemName() {
return getString(ITEM_NAME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the price for a single item / unit.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ip</code></td>
* <td>currency</td>
* <td><code>0</code>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>3.50</code><br>
* Example usage: <code>ip=3.50</code>
* </div>
* </div>
*/
public ItemHit itemPrice(Double value) {
setDouble(ITEM_PRICE, value);
return this;
}
public Double itemPrice() {
return getDouble(ITEM_PRICE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the number of items purchased.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>iq</code></td>
* <td>integer</td>
* <td><code>0</code>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>4</code><br>
* Example usage: <code>iq=4</code>
* </div>
* </div>
*/
public ItemHit itemQuantity(Integer value) {
setInteger(ITEM_QUANTITY, value);
return this;
}
public Integer itemQuantity() {
return getInteger(ITEM_QUANTITY);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the SKU or item code.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ic</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>SKU47</code><br>
* Example usage: <code>ic=SKU47</code>
* </div>
* </div>
*/
public ItemHit itemCode(String value) {
setString(ITEM_CODE, value);
return this;
}
public String itemCode() {
return getString(ITEM_CODE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the category that the item belongs to.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>iv</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Blue</code><br>
* Example usage: <code>iv=Blue</code>
* </div>
* </div>
*/
public ItemHit itemCategory(String value) {
setString(ITEM_CATEGORY, value);
return this;
}
public String itemCategory() {
return getString(ITEM_CATEGORY);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>When present indicates the local currency for all transaction currency values. Value should be a valid ISO 4217 currency code.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cu</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>10 Bytes
* </td>
* <td>transaction, item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>EUR</code><br>
* Example usage: <code>cu=EUR</code>
* </div>
* </div>
*/
public ItemHit currencyCode(String value) {
setString(CURRENCY_CODE, value);
return this;
}
public String currencyCode() {
return getString(CURRENCY_CODE);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/PageViewHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
/**
* GA request to track a typical web page view
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#content">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class PageViewHit extends GoogleAnalyticsRequest<PageViewHit> {
public PageViewHit() {
this(null, null, null);
}
public PageViewHit(String url, String title) {
this(url, title, null);
}
public PageViewHit(String url, String title, String description) {
super("pageview");
documentUrl(url);
documentTitle(title);
contentDescription(description);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/RequestParameterDiscoverer.java
|
package com.brsanthu.googleanalytics;
/**
* Mechanism to discover some default request parameters.
*
* A small library for interacting with Google Analytics Measurement Protocol. This
* copy is a back port of version 1.1.2 of the library. This backport removes
* the slf4j dependency, and modifies the code to work with the 4.1 version of the
* Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*
*/
public interface RequestParameterDiscoverer {
public DefaultRequest discoverParameters(GoogleAnalyticsConfig config, DefaultRequest request);
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/RequestProvider.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
/**
* Interface which returns the GA request that needs to be sent to GA.
* <p/>
* This interface helps creating the GA request in lazily inside the async thread
* so the cost of constructing the Request is not part of user related thread or
* cost is completely avoided if GA is disabled (via {@link GoogleAnalyticsConfig.setEnabled})
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public interface RequestProvider {
/**
* Constructs and returns the request, that should be sent to GA. If this method throws exception,
* nothing will be sent to GA.
*
* @return the request that must be sent to GA. Can return <code>null</code> and if so,
* nothing will be sent to GA.
*/
@SuppressWarnings("rawtypes")
GoogleAnalyticsRequest getRequest();
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/ScreenViewHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
/**
* GA request to track application page view (for mobile or desktop apps).
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#apptracking">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*
*/
public class ScreenViewHit extends GoogleAnalyticsRequest<ScreenViewHit> {
public ScreenViewHit() {
this(null);
}
public ScreenViewHit(String contentDescription) {
this(null, null, contentDescription);
}
public ScreenViewHit(String applicationName, String applicationVersion, String contentDescription) {
super("screenview");
applicationName(applicationName);
applicationVersion(applicationVersion);
contentDescription(contentDescription);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/SocialHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.SOCIAL_ACTION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.SOCIAL_ACTION_TARGET;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.SOCIAL_NETWORK;
/**
* GA request to track social interactions
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#social">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class SocialHit extends GoogleAnalyticsRequest<SocialHit> {
public SocialHit() {
this(null, null, null);
}
public SocialHit(String socialNetwork, String socialAction, String socialTarget) {
super("social");
socialAction(socialAction);
socialNetwork(socialNetwork);
socialActionTarget(socialTarget);
}
/**
* <div class="ind">
* <p>
* <strong>Required for social hit type.</strong>
* </p>
* <p>Specifies the social network, for example Facebook or Google Plus.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>sn</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>50 Bytes
* </td>
* <td>social</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>facebook</code><br>
* Example usage: <code>sn=facebook</code>
* </div>
* </div>
*/
public SocialHit socialNetwork(String value) {
setString(SOCIAL_NETWORK, value);
return this;
}
public String socialNetwork() {
return getString(SOCIAL_NETWORK);
}
/**
* <div class="ind">
* <p>
* <strong>Required for social hit type.</strong>
* </p>
* <p>Specifies the social interaction action. For example on Google Plus when a user clicks the +1 button, the social action is 'plus'.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>sa</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>50 Bytes
* </td>
* <td>social</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>like</code><br>
* Example usage: <code>sa=like</code>
* </div>
* </div>
*/
public SocialHit socialAction(String value) {
setString(SOCIAL_ACTION, value);
return this;
}
public String socialAction() {
return getString(SOCIAL_ACTION);
}
/**
* <div class="ind">
* <p>
* <strong>Required for social hit type.</strong>
* </p>
* <p>Specifies the target of a social interaction. This value is typically a URL but can be any text.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>st</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>2048 Bytes
* </td>
* <td>social</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>http://foo.com</code><br>
* Example usage: <code>st=http%3A%2F%2Ffoo.com</code>
* </div>
* </div>
*/
public SocialHit socialActionTarget(String value) {
setString(SOCIAL_ACTION_TARGET, value);
return this;
}
public String socialActionTarget() {
return getString(SOCIAL_ACTION_TARGET);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/TimingHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.DNS_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.PAGE_DOWNLOAD_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.PAGE_LOAD_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.REDIRECT_RESPONSE_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.SERVER_RESPONSE_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TCP_CONNECT_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.USER_TIMING_CATEGORY;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.USER_TIMING_LABEL;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.USER_TIMING_TIME;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.USER_TIMING_VARIABLE_NAME;
/**
* GA request to track performance timings like page load time, server response time etc.
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#timing">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class TimingHit extends GoogleAnalyticsRequest<TimingHit> {
public TimingHit() {
super("timing");
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the user timing category.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>utc</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>150 Bytes
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>category</code><br>
* Example usage: <code>utc=category</code>
* </div>
* </div>
*/
public TimingHit userTimingCategory(String value) {
setString(USER_TIMING_CATEGORY, value);
return this;
}
public String userTimingCategory() {
return getString(USER_TIMING_CATEGORY);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the user timing variable.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>utv</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>lookup</code><br>
* Example usage: <code>utv=lookup</code>
* </div>
* </div>
*/
public TimingHit userTimingVariableName(String value) {
setString(USER_TIMING_VARIABLE_NAME, value);
return this;
}
public String userTimingVariableName() {
return getString(USER_TIMING_VARIABLE_NAME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the user timing value. The value is in milliseconds.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>utt</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>123</code><br>
* Example usage: <code>utt=123</code>
* </div>
* </div>
*/
public TimingHit userTimingTime(Integer value) {
setInteger(USER_TIMING_TIME, value);
return this;
}
public Integer userTimingTime() {
return getInteger(USER_TIMING_TIME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the user timing label.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>utl</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>label</code><br>
* Example usage: <code>utl=label</code>
* </div>
* </div>
*/
public TimingHit userTimingLabel(String value) {
setString(USER_TIMING_LABEL, value);
return this;
}
public String userTimingLabel() {
return getString(USER_TIMING_LABEL);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the time it took for a page to load. The value is in milliseconds.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>plt</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>3554</code><br>
* Example usage: <code>plt=3554</code>
* </div>
* </div>
*/
public TimingHit pageLoadTime(Integer value) {
setInteger(PAGE_LOAD_TIME, value);
return this;
}
public Integer pageLoadTime() {
return getInteger(PAGE_LOAD_TIME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the time it took to do a DNS lookup.The value is in milliseconds.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>dns</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>43</code><br>
* Example usage: <code>dns=43</code>
* </div>
* </div>
*/
public TimingHit dnsTime(Integer value) {
setInteger(DNS_TIME, value);
return this;
}
public Integer dnsTime() {
return getInteger(DNS_TIME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the time it took for the page to be downloaded. The value is in milliseconds.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>pdt</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>500</code><br>
* Example usage: <code>pdt=500</code>
* </div>
* </div>
*/
public TimingHit pageDownloadTime(Integer value) {
setInteger(PAGE_DOWNLOAD_TIME, value);
return this;
}
public Integer pageDownloadTime() {
return getInteger(PAGE_DOWNLOAD_TIME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the time it took for any redirects to happen. The value is in milliseconds.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>rrt</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>500</code><br>
* Example usage: <code>rrt=500</code>
* </div>
* </div>
*/
public TimingHit redirectResponseTime(Integer value) {
setInteger(REDIRECT_RESPONSE_TIME, value);
return this;
}
public Integer redirectResponseTime() {
return getInteger(REDIRECT_RESPONSE_TIME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the time it took for a TCP connection to be made. The value is in milliseconds.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>tcp</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>500</code><br>
* Example usage: <code>tcp=500</code>
* </div>
* </div>
*/
public TimingHit tcpConnectTime(Integer value) {
setInteger(TCP_CONNECT_TIME, value);
return this;
}
public Integer tcpConnectTime() {
return getInteger(TCP_CONNECT_TIME);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the time it took for the server to respond after the connect time. The value is in milliseconds.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>srt</code></td>
* <td>integer</td>
* <td><span class="none">None</span>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>timing</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>500</code><br>
* Example usage: <code>srt=500</code>
* </div>
* </div>
*/
public TimingHit serverResponseTime(Integer value) {
setInteger(SERVER_RESPONSE_TIME, value);
return this;
}
public Integer serverResponseTime() {
return getInteger(SERVER_RESPONSE_TIME);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/TransactionHit.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.brsanthu.googleanalytics;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.CURRENCY_CODE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TRANSACTION_AFFILIATION;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TRANSACTION_ID;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TRANSACTION_REVENUE;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TRANSACTION_SHIPPING;
import static com.brsanthu.googleanalytics.GoogleAnalyticsParameter.TRANSACTION_TAX;
/**
* GA request to track ecommerce transaction.
* <p/>
* <p>For more information, see <a href="https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#ecomm">GA Parameters Reference</a></p>
*
* @author Santhosh Kumar
*
* This copy of google-analytics-java is a back port of version 1.1.2 of the library.
* This backport removes the slf4j dependency, and modifies the code to work with the
* 4.1 version of the Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*/
public class TransactionHit extends GoogleAnalyticsRequest<TransactionHit> {
public TransactionHit() {
this(null);
}
public TransactionHit(String txId) {
this(txId, null);
}
public TransactionHit(String txId, Double txRevenue) {
this(txId, null, txRevenue);
}
public TransactionHit(String txId, String txAffiliation, Double txRevenue) {
this(txId, txAffiliation, txRevenue, null, null, "USD");
}
public TransactionHit(String txId, String txAffiliation, Double txRevenue, String currencyCode) {
this(txId, txAffiliation, txRevenue, null, null, currencyCode);
}
public TransactionHit(String txId, String txAffiliation, Double txRevenue, Double txShipping, Double txTax, String currencyCode) {
super("transaction");
txId(txId);
txAffiliation(txAffiliation);
txRevenue(txRevenue);
txShipping(txShipping);
txTax(txTax);
currencyCode(currencyCode);
}
/**
* <div class="ind">
* <p>
* <strong>Required for transaction hit type.</strong>
* <br>
* <strong>Required for item hit type.</strong>
* </p>
* <p>A unique identifier for the transaction. This value should be the same for both the Transaction hit and Items hits associated to the particular transaction.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ti</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>transaction, item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>OD564</code><br>
* Example usage: <code>ti=OD564</code>
* </div>
* </div>
*/
public TransactionHit txId(String value) {
setString(TRANSACTION_ID, value);
return this;
}
public String txId() {
return getString(TRANSACTION_ID);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the affiliation or store name.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ta</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>500 Bytes
* </td>
* <td>transaction</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>Member</code><br>
* Example usage: <code>ta=Member</code>
* </div>
* </div>
*/
public TransactionHit txAffiliation(String value) {
setString(TRANSACTION_AFFILIATION, value);
return this;
}
public String txAffiliation() {
return getString(TRANSACTION_AFFILIATION);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the total revenue associated with the transaction. This value should include any shipping or tax costs.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>tr</code></td>
* <td>currency</td>
* <td><code>0</code>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>transaction</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>15.47</code><br>
* Example usage: <code>tr=15.47</code>
* </div>
* </div>
*/
public TransactionHit txRevenue(Double value) {
setDouble(TRANSACTION_REVENUE, value);
return this;
}
public Double txRevenue() {
return getDouble(TRANSACTION_REVENUE);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the total shipping cost of the transaction.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>ts</code></td>
* <td>currency</td>
* <td><code>0</code>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>transaction</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>3.50</code><br>
* Example usage: <code>ts=3.50</code>
* </div>
* </div>
*/
public TransactionHit txShipping(Double value) {
setDouble(TRANSACTION_SHIPPING, value);
return this;
}
public Double txShipping() {
return getDouble(TRANSACTION_SHIPPING);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>Specifies the total tax of the transaction.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>tt</code></td>
* <td>currency</td>
* <td><code>0</code>
* </td>
* <td><span class="none">None</span>
* </td>
* <td>transaction</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>11.20</code><br>
* Example usage: <code>tt=11.20</code>
* </div>
* </div>
*/
public TransactionHit txTax(Double value) {
setDouble(TRANSACTION_TAX, value);
return this;
}
public Double txTax() {
return getDouble(TRANSACTION_TAX);
}
/**
* <div class="ind">
* <p>
* Optional.
* </p>
* <p>When present indicates the local currency for all transaction currency values. Value should be a valid ISO 4217 currency code.</p>
* <table border="1">
* <tbody>
* <tr>
* <th>Parameter</th>
* <th>Value Type</th>
* <th>Default Value</th>
* <th>Max Length</th>
* <th>Supported Hit Types</th>
* </tr>
* <tr>
* <td><code>cu</code></td>
* <td>text</td>
* <td><span class="none">None</span>
* </td>
* <td>10 Bytes
* </td>
* <td>transaction, item</td>
* </tr>
* </tbody>
* </table>
* <div>
* Example value: <code>EUR</code><br>
* Example usage: <code>cu=EUR</code>
* </div>
* </div>
*/
public TransactionHit currencyCode(String value) {
setString(CURRENCY_CODE, value);
return this;
}
public String currencyCode() {
return getString(CURRENCY_CODE);
}
}
|
0
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics
|
java-sources/ai/h2o/google-analytics-java/1.1.2-H2O-CUSTOM/com/brsanthu/googleanalytics/internal/ParameterGetterSetterGenerator.java
|
package com.brsanthu.googleanalytics.internal;
import com.brsanthu.googleanalytics.GoogleAnalyticsParameter;
/**
* A small library for interacting with Google Analytics Measurement Protocol. This
* copy is a back port of version 1.1.2 of the library. This backport removes
* the slf4j dependency, and modifies the code to work with the 4.1 version of the
* Apache http client library.
*
* Original sources can be found at https://github.com/brsanthu/google-analytics-java.
* All copyrights retained by original authors.
*
*/
public class ParameterGetterSetterGenerator {
public static void main(String[] args) {
GoogleAnalyticsParameter[] enumConstants = GoogleAnalyticsParameter.class.getEnumConstants();
for (GoogleAnalyticsParameter parameter : enumConstants) {
String methodName = null;//CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, parameter.toString());
String constName = parameter.toString();
String type = "String";
if (parameter.getType().equalsIgnoreCase("integer")) {
type = "Integer";
} else if (parameter.getType().equalsIgnoreCase("boolean")) {
type = "Boolean";
} else if (parameter.getType().equalsIgnoreCase("currency")) {
type = "Double";
}
System.out.println("public T " + methodName + "(" + type + " value) {");
System.out.println(" set" + type + "(" + constName + ", value);");
System.out.println(" return (T) this;");
System.out.println("}");
System.out.println("public " + type + " " + methodName + "() {");
System.out.println(" return get" + type + "(" + constName + ");");
System.out.println("}");
}
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex/Infogram/EstimateCMI.java
|
package hex.Infogram;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
public class EstimateCMI extends MRTask<EstimateCMI> {
public int _nonZeroRows;
public double _accumulatedCMI;
public double _meanCMI;
public static final double _scale = 1.0 / Math.log(2);
public final int _responseColumn;
public final int _nclass;
public EstimateCMI(Frame fr, int nclasses, String response) {
_meanCMI = 0.0;
_responseColumn = fr.find(response);
_nclass = nclasses;
}
@Override
public void map(Chunk[] ck) {
_nonZeroRows = 0;
_accumulatedCMI = 0.0;
int nchunks = ck.length - 1;
boolean weightIncluded = nchunks - _nclass == 2;
int numRow = ck[0].len();
for (int rowIndex = 0; rowIndex < numRow; rowIndex++) {
if (!weightIncluded || (weightIncluded && ck[nchunks].atd(rowIndex) > 0)) {
int prediction = (int) ck[_responseColumn].atd(rowIndex);
double predictionProb = ck[prediction + 1].atd(rowIndex);
if (!Double.isNaN(predictionProb) && predictionProb > 0) {
_nonZeroRows++;
_accumulatedCMI += Math.log(predictionProb);
}
}
}
}
@Override
public void reduce(EstimateCMI other) {
_nonZeroRows += other._nonZeroRows;
_accumulatedCMI += other._accumulatedCMI;
}
@Override
public void postGlobal() {
_meanCMI = _scale * _accumulatedCMI / _nonZeroRows;
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex/Infogram/Infogram.java
|
package hex.Infogram;
import hex.*;
import water.*;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.util.ArrayUtils;
import water.util.TwoDimTable;
import java.util.*;
import java.util.stream.IntStream;
import hex.genmodel.utils.DistributionFamily;
import static hex.Infogram.InfogramModel.InfogramModelOutput.sortCMIRel;
import static hex.Infogram.InfogramModel.InfogramParameters.Algorithm.AUTO;
import static hex.Infogram.InfogramModel.InfogramParameters.Algorithm.gbm;
import static hex.Infogram.InfogramUtils.*;
import static hex.gam.MatrixFrameUtils.GamUtils.keepFrameKeys;
import static water.util.ArrayUtils.sort;
import static water.util.ArrayUtils.sum;
public class Infogram extends ModelBuilder<hex.Infogram.InfogramModel, hex.Infogram.InfogramModel.InfogramParameters,
hex.Infogram.InfogramModel.InfogramModelOutput> {
static final double NORMALIZE_ADMISSIBLE_INDEX = 1.0/Math.sqrt(2.0);
boolean _buildCore; // true to find core predictors, false to find admissible predictors
String[] _topKPredictors; // contain the names of top predictors to consider for infogram
Frame _baseOrSensitiveFrame = null;
String[] _modelDescription; // describe each model in terms of predictors used
int _numModels; // number of models to build
double[] _cmi; // store conditional mutual information
double[] _cmiValid;
double[] _cmiCV;
double[] _cmiRaw; // raw conditional mutual information
double[] _cmiRawValid; // raw conditional mutual information from validation frame
double[] _cmiRawCV;
String[] _columnsCV;
TwoDimTable _varImp;
int _numPredictors; // number of predictors in training dataset
Key<Frame> _cmiRelKey;
Key<Frame> _cmiRelKeyValid;
Key<Frame> _cmiRelKeyCV;
boolean _cvDone = false; // on when we are inside cv
private transient InfogramModel _model;
long _validNonZeroNumRows;
int _nFoldOrig = 0;
Model.Parameters.FoldAssignmentScheme _foldAssignmentOrig = null;
String _foldColumnOrig = null;
public Infogram(boolean startup_once) { super(new hex.Infogram.InfogramModel.InfogramParameters(), startup_once);}
public Infogram(hex.Infogram.InfogramModel.InfogramParameters parms) {
super(parms);
init(false);
}
public Infogram(hex.Infogram.InfogramModel.InfogramParameters parms, Key<hex.Infogram.InfogramModel> key) {
super(parms, key);
init(false);
}
@Override
protected Driver trainModelImpl() {
return new InfogramDriver();
}
@Override
protected int nModelsInParallel(int folds) {
return nModelsInParallel(folds,2);
}
/***
* This is called before cross-validation is carried out
*/
@Override
public void computeCrossValidation() {
info("cross-validation", "cross-validation infogram information is stored in frame with key" +
" labeled as admissible_score_key_cv and the admissible features in admissible_features_cv.");
if (error_count() > 0) {
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(Infogram.this);
}
super.computeCrossValidation();
}
// find the best alpha/lambda values used to build the main model moving forward by looking at the devianceValid
@Override
public void cv_computeAndSetOptimalParameters(ModelBuilder[] cvModelBuilders) {
int nBuilders = cvModelBuilders.length;
double[][] cmiRaw = new double[nBuilders][];
List<List<String>> columns = new ArrayList<>();
long[] nObs = new long[nBuilders];
for (int i = 0; i < cvModelBuilders.length; i++) { // run cv for each lambda value
InfogramModel g = (InfogramModel) cvModelBuilders[i].dest().get();
Scope.track_generic(g);
extractInfogramInfo(g, cmiRaw, columns, i);
nObs[i] = g._output._validNonZeroNumRows;
}
calculateMeanInfogramInfo(cmiRaw, columns, nObs);
for (int i = 0; i < cvModelBuilders.length; i++) {
Infogram g = (Infogram) cvModelBuilders[i];
InfogramModel gm = g._model;
gm.write_lock(_job);
gm.update(_job);
gm.unlock(_job);
}
_cvDone = true; // cv is done and we are going to build main model next
}
public void calculateMeanInfogramInfo(double[][] cmiRaw, List<List<String>> columns,
long[] nObs) {
int nFolds = cmiRaw.length;
Set<String> allNames = new HashSet<>(); // store all names
for (List<String> oneFold : columns)
allNames.addAll(oneFold);
List<String> allNamesList = new ArrayList<>(allNames);
int nPreds = allNames.size();
_cmiCV = new double[nPreds];
_cmiRawCV = new double[nPreds];
double oneOverNObsSum = 1.0/sum(nObs);
int foldPredSize = cmiRaw[0].length;
for (int fIndex = 0; fIndex < nFolds; fIndex++) { // get sum of each fold
List<String> oneFoldC = columns.get(fIndex);
double scale = nObs[fIndex] * oneOverNObsSum;
for (int pIndex = 0; pIndex < foldPredSize; pIndex++) { // go through each predictor
String colName = oneFoldC.get(pIndex); // use same predictor order as zero fold
int allNameIndex = allNamesList.indexOf(colName); // current fold colName order index change
_cmiRawCV[allNameIndex] += cmiRaw[fIndex][pIndex] * scale;
}
}
// normalize CMI and relevane again
double maxCMI = _parms._top_n_features == nPreds
? ArrayUtils.maxValue(_cmiRawCV)
: Arrays.stream(_cmiRawCV).sorted().toArray()[Math.min(_parms._top_n_features, nPreds)-1];
double oneOverMaxCMI = maxCMI == 0 ? 0 : 1.0/maxCMI;
for (int pIndex = 0; pIndex < nPreds; pIndex++) {
_cmiCV[pIndex] = _cmiRawCV[pIndex]*oneOverMaxCMI;
}
_columnsCV = allNamesList.stream().toArray(String[]::new);
}
@Override
public ModelCategory[] can_build() {
return new ModelCategory[] { ModelCategory.Binomial, ModelCategory.Multinomial};
}
@Override
public boolean isSupervised() {
return true;
}
@Override
public boolean havePojo() {
return false;
}
@Override
public boolean haveMojo() {
return false;
}
@Override
public void init(boolean expensive) {
super.init(expensive);
if (expensive)
validateInfoGramParameters();
}
private void validateInfoGramParameters() {
Frame dataset = _parms.train();
if (!_parms.train().vec(_parms._response_column).isCategorical()) // only classification is supported now
error("response_column", "Regression is not supported for Infogram. If you meant to do" +
" classification, convert your response column to categorical/factor type before calling Infogram.");
// make sure protected attributes are true predictor columns
if (_parms._protected_columns != null) {
Set<String> colNames = new HashSet<>(Arrays.asList(dataset.names()));
for (String senAttribute : _parms._protected_columns)
if (!colNames.contains(senAttribute))
error("protected_columns", "protected_columns: "+senAttribute+" is not a valid " +
"column in the training dataset.");
}
_buildCore = _parms._protected_columns == null;
if (_buildCore) {
if (_parms._net_information_threshold == -1) { // not set
_parms._cmi_threshold = 0.1;
_parms._net_information_threshold = 0.1;
} else if (_parms._net_information_threshold > 1 || _parms._net_information_threshold < 0) {
error("net_information_threshold", " should be set to be between 0 and 1.");
} else {
_parms._cmi_threshold = _parms._net_information_threshold;
}
if (_parms._total_information_threshold == -1) { // not set
_parms._relevance_threshold = 0.1;
_parms._total_information_threshold = 0.1;
} else if (_parms._total_information_threshold < 0 || _parms._total_information_threshold > 1) {
error("total_information_threshold", " should be set to be between 0 and 1.");
} else {
_parms._relevance_threshold = _parms._total_information_threshold;
}
if (_parms._safety_index_threshold != -1) {
warn("safety_index_threshold", "Should not set safety_index_threshold for core infogram " +
"runs. Set net_information_threshold instead. Using default of 0.1 if not set");
}
if (_parms._relevance_index_threshold != -1) {
warn("relevance_index_threshold", "Should not set relevance_index_threshold for core " +
"infogram runs. Set total_information_threshold instead. Using default of 0.1 if not set");
}
} else { // fair infogram
if (_parms._safety_index_threshold == -1) {
_parms._cmi_threshold = 0.1;
_parms._safety_index_threshold = 0.1;
} else if (_parms._safety_index_threshold < 0 || _parms._safety_index_threshold > 1) {
error("safety_index_threshold", " should be set to be between 0 and 1.");
} else {
_parms._cmi_threshold = _parms._safety_index_threshold;
}
if (_parms._relevance_index_threshold == -1) {
_parms._relevance_threshold = 0.1;
_parms._relevance_index_threshold = 0.1;
} else if (_parms._relevance_index_threshold < 0 || _parms._relevance_index_threshold > 1) {
error("relevance_index_threshold", " should be set to be between 0 and 1.");
} else {
_parms._relevance_threshold = _parms._relevance_index_threshold;
}
if (_parms._net_information_threshold != -1) {
warn("net_information_threshold", "Should not set net_information_threshold for fair " +
"infogram runs, set safety_index_threshold instead. Using default of 0.1 if not set");
}
if (_parms._total_information_threshold != -1) {
warn("total_information_threshold", "Should not set total_information_threshold for fair" +
" infogram runs, set relevance_index_threshold instead. Using default of 0.1 if not set");
}
if (AUTO.equals(_parms._algorithm))
_parms._algorithm = gbm;
}
// check top k to be between 0 and training dataset column number
if (_parms._top_n_features < 0)
error("_topk", "topk must be between 0 and the number of predictor columns in your training dataset.");
_numPredictors = _parms.train().numCols()-1;
if (_parms._weights_column != null)
_numPredictors--;
if (_parms._offset_column != null)
_numPredictors--;
if ( _parms._top_n_features > _numPredictors) {
warn("top_n_features", "The top_n_features exceed the actual number of predictor columns in your training dataset." +
" It will be set to the number of predictors in your training dataset.");
_parms._top_n_features = _numPredictors;
}
if (_parms._nparallelism < 0)
error("nparallelism", "must be >= 0. If 0, it is adaptive");
if (_parms._nparallelism == 0) // adaptively set nparallelism
_parms._nparallelism = H2O.NUMCPUS;
if (_parms._compute_p_values)
error("compute_p_values", " compute_p_values calculation is not yet implemented.");
if (nclasses() < 2)
error("distribution", " infogram currently only supports classification models");
if (DistributionFamily.AUTO.equals(_parms._distribution)) {
_parms._distribution = (nclasses() == 2) ? DistributionFamily.bernoulli : DistributionFamily.multinomial;
}
if (_cvDone) { // disable cv now that we are in main model
_nFoldOrig = _parms._nfolds;
_foldColumnOrig = _parms._fold_column;
_foldAssignmentOrig = _parms._fold_assignment;
_parms._fold_column = null;
_parms._nfolds = 0;
_parms._fold_assignment = null;
}
}
private class InfogramDriver extends Driver {
void prepareModelTrainingFrame() {
String[] eligiblePredictors = extractPredictors(_parms, _train, _foldColumnOrig); // exclude senstive attributes if applicable
_baseOrSensitiveFrame = extractTrainingFrame(_parms, _parms._protected_columns, 1, _parms.train().clone());
_parms.extraModelSpecificParams(); // copy over model specific parameters to build infogram
_topKPredictors = extractTopKPredictors(_parms, _parms.train(), eligiblePredictors); // extract topK predictors
_numModels = 1 + _topKPredictors.length;
_modelDescription = generateModelDescription(_topKPredictors, _parms._protected_columns);
}
@Override
public void computeImpl() {
init(true);
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(Infogram.this);
_job.update(0, "Initializing model training");
buildModel();
}
// todo: add max_runtime_secs restrictions
public final void buildModel() {
try {
boolean validPresent = _parms.valid() != null;
prepareModelTrainingFrame(); // generate training frame with predictors and sensitive features (if specified)
InfogramModel model = new hex.Infogram.InfogramModel(dest(), _parms, new hex.Infogram.InfogramModel.InfogramModelOutput(Infogram.this));
_model = model.delete_and_lock(_job);
_model._output._start_time = System.currentTimeMillis();
_cmiRaw = new double[_numModels];
if (_parms.valid() != null)
_cmiRawValid = new double[_numModels];
buildInfoGramsNRelevance(validPresent); // calculate mean CMI
_job.update(1, "finished building models for Infogram ...");
_model._output.setDistribution(_parms._distribution);
copyCMIRelevance(_model._output); // copy over cmi, relevance of all predictors to model._output
_cmi = _model._output._cmi;
if (validPresent)
copyCMIRelevanceValid(_model._output); // copy over cmi, relevance of all predictors to model._output
_cmiRelKey = setCMIRelFrame(validPresent);
_model._output.extractAdmissibleFeatures(DKV.getGet(_cmiRelKey), false, false);
if (validPresent) {
_cmiRelKeyValid = _model._output._admissible_score_key_valid;
_model._output.extractAdmissibleFeatures(DKV.getGet(_cmiRelKeyValid), true, false);
_model._output._validNonZeroNumRows = _validNonZeroNumRows;
}
if (_cvDone) { // CV is enabled and now we are in main model
_cmiRelKeyCV = setCMIRelFrameCV(); // generate relevance and CMI frame from cv runs
_model._output._admissible_score_key_xval = _cmiRelKeyCV;
_model._output.extractAdmissibleFeatures(DKV.getGet(_cmiRelKeyCV), false, true);
_parms._nfolds = _nFoldOrig;
_parms._fold_assignment = _foldAssignmentOrig;
_parms._fold_column = _foldColumnOrig;
}
_job.update(1, "Infogram building completed...");
_model.update(_job._key);
} finally {
Scope.track(_baseOrSensitiveFrame);
final List<Key> keep = new ArrayList<>();
if (_model != null) {
keepFrameKeys(keep, _cmiRelKey);
if (_cmiRelKeyValid != null)
keepFrameKeys(keep, _cmiRelKeyValid);
if (_cmiRelKeyCV != null)
keepFrameKeys(keep, _cmiRelKeyCV);
// final model update
_model.update(_job._key);
_model.unlock(_job);
}
Scope.untrack(keep.toArray(new Key[keep.size()]));
}
}
/**
* Copy over info to model._output for _cmi_raw, _cmi, _topKFeatures,
* _all_predictor_names. Derive _admissible for predictors if cmi >= cmi_threshold and
* relevance >= relevance_threshold. Derive _admissible_index as distance from point with cmi = 1 and
* relevance = 1. In addition, all arrays are sorted on _admissible_index.
*/
private void copyCMIRelevance(InfogramModel.InfogramModelOutput modelOutput) {
modelOutput._cmi_raw = new double[_cmi.length];
System.arraycopy(_cmiRaw, 0, modelOutput._cmi_raw, 0, modelOutput._cmi_raw.length);
modelOutput._admissible_index = new double[_cmi.length];
modelOutput._admissible = new double[_cmi.length];
modelOutput._cmi = _cmi.clone();
modelOutput._topKFeatures = _topKPredictors.clone();
modelOutput._all_predictor_names = _topKPredictors.clone();
int numRows = _varImp.getRowDim();
String[] varRowHeaders = _varImp.getRowHeaders();
List<String> relNames = new ArrayList<>(Arrays.asList(varRowHeaders));
modelOutput._relevance = new double[numRows];
copyGenerateAdmissibleIndex(numRows, relNames, modelOutput._cmi, modelOutput._cmi_raw, modelOutput._relevance,
modelOutput._admissible_index, modelOutput._admissible, modelOutput._all_predictor_names);
}
public void copyCMIRelevanceValid(InfogramModel.InfogramModelOutput modelOutput) {
modelOutput._cmi_raw_valid = new double[_cmiValid.length];
System.arraycopy(_cmiRawValid, 0, modelOutput._cmi_raw_valid, 0, modelOutput._cmi_raw_valid.length);
modelOutput._admissible_index_valid = new double[_cmiValid.length];
modelOutput._admissible_valid = new double[_cmiValid.length];
modelOutput._cmi_valid = _cmiValid.clone();
int numRows = _varImp.getRowDim();
String[] varRowHeaders = _varImp.getRowHeaders();
List<String> relNames = new ArrayList<>(Arrays.asList(varRowHeaders));
modelOutput._all_predictor_names_valid = modelOutput._topKFeatures.clone();
modelOutput._relevance_valid = new double[numRows];
copyGenerateAdmissibleIndex(numRows, relNames, modelOutput._cmi_valid, modelOutput._cmi_raw_valid,
modelOutput._relevance_valid, modelOutput._admissible_index_valid, modelOutput._admissible_valid,
modelOutput._all_predictor_names_valid);
}
public void copyGenerateAdmissibleIndex(int numRows, List<String> relNames, double[] cmi,
double[] cmi_raw, double[] relevance, double[] admissible_index,
double[] admissible, String[] all_predictor_names) {
for (int index = 0; index < numRows; index++) { // extract predictor with varimp >= threshold
int newIndex = relNames.indexOf(all_predictor_names[index]);
relevance[index] = (double) _varImp.get(newIndex, 1);
double temp1 = relevance[index];
double temp2 = cmi[index];
admissible_index[index] = NORMALIZE_ADMISSIBLE_INDEX*Math.sqrt(temp1*temp1+temp2*temp2);
admissible[index] = (relevance[index] >= _parms._relevance_threshold && cmi[index] >= _parms._cmi_threshold) ? 1 : 0;
}
int[] indices = IntStream.range(0, cmi.length).toArray();
sort(indices, admissible_index, -1, -1);
sortCMIRel(indices, relevance, cmi_raw, cmi, all_predictor_names, admissible_index, admissible);
}
private Key<Frame> setCMIRelFrame(boolean validPresent) {
Frame cmiRelFrame = generateCMIRelevance(_model._output._all_predictor_names, _model._output._admissible,
_model._output._admissible_index, _model._output._relevance, _model._output._cmi,
_model._output._cmi_raw, _buildCore);
_model._output._admissible_score_key = cmiRelFrame._key;
if (validPresent) { // generate relevanceCMI frame for validation dataset
Frame cmiRelFrameValid = generateCMIRelevance(_model._output._all_predictor_names_valid,
_model._output._admissible_valid, _model._output._admissible_index_valid,
_model._output._relevance_valid, _model._output._cmi_valid, _model._output._cmi_raw_valid, _buildCore);
_model._output._admissible_score_key_valid = cmiRelFrameValid._key;
}
return cmiRelFrame._key;
}
private void cleanUpCV() {
String[] mainModelPredNames = _model._output._all_predictor_names;
List<String> cvNames = new ArrayList<>(Arrays.asList(_columnsCV));
int nPred = mainModelPredNames.length;
String[] newCVNames = new String[nPred];
double[] cmiCV = new double[nPred];
double[] cmiRawCV = new double[nPred];
for (int index=0; index < nPred; index++) {
String mainPredNames = mainModelPredNames[index];
int cvIndex = cvNames.indexOf(mainPredNames);
if (cvIndex >= 0) {
newCVNames[index] = mainPredNames;
cmiCV[index] = _cmiCV[cvIndex];
cmiRawCV[index] = _cmiRawCV[cvIndex];
}
}
_columnsCV = newCVNames.clone();
_cmiCV = cmiCV.clone();
_cmiRawCV = cmiRawCV.clone();
}
private Key<Frame> setCMIRelFrameCV() {
String[] mainModelPredNames = _model._output._all_predictor_names;
double[] mainModelRelevance = _model._output._relevance;
double[] relevanceCV = new double[mainModelRelevance.length];
int nPred = mainModelPredNames.length;
double[] admissibleIndex = new double[nPred];
double[] admissible = new double[nPred];
cleanUpCV();
// generate admissible, admissibleIndex referring to cvNames
for (int index=0; index<nPred; index++) {
relevanceCV[index] = mainModelRelevance[index];
double temp1 = 1 - relevanceCV[index];
double temp2 = 1 - _cmiCV[index];
admissibleIndex[index] = Math.sqrt(temp1 * temp1 + temp2 * temp2)*NORMALIZE_ADMISSIBLE_INDEX;
admissible[index] = _cmiCV[index] >= _parms._cmi_threshold && relevanceCV[index] >= _parms._relevance_threshold
? 1 : 0;
}
int[] indices = IntStream.range(0, relevanceCV.length).toArray();
_columnsCV = mainModelPredNames.clone();
sort(indices, admissibleIndex, -1, -1);
sortCMIRel(indices, relevanceCV, _cmiRawCV, _cmiCV, _columnsCV, admissibleIndex, admissible);
Frame cmiRelFrame = generateCMIRelevance(_columnsCV, admissible, admissibleIndex, relevanceCV, _cmiCV,
_cmiRawCV, _buildCore);
return cmiRelFrame._key;
}
/***
* Top level method to break down the infogram process into parts.
*
* I have a question here for all of you: Instead of generating the training frame and model builders for all
* the predictors, I break this down into several parts with each part generating _parms._nparallelism training
* frames and model builders. For each part, after _parms._nparallelism models are built, I extract the entropy
* for each predictor. Then, I move to the next part. My question here is: is this necessary? I am afraid of
* the memory consumption of spinning up so many training frames and model builders. If this is not an issue,
* please let me know.
*
* @param validPresent true if there is a validation dataset
*/
private void buildInfoGramsNRelevance(boolean validPresent) {
int outerLoop = (int) Math.floor(_numModels/_parms._nparallelism); // last model is build special
int modelCount = 0;
int lastModelInd = _numModels - 1;
if (outerLoop > 0) { // build parallel models but limit it to parms._nparallelism at a time
for (int outerInd = 0; outerInd < outerLoop; outerInd++) {
buildModelCMINRelevance(modelCount, _parms._nparallelism, lastModelInd);
modelCount += _parms._nparallelism;
_job.update(_parms._nparallelism, "in the middle of building infogram models.");
}
}
int leftOver = _numModels - modelCount;
if (leftOver > 0) { // finish building the leftover models
buildModelCMINRelevance(modelCount, leftOver, lastModelInd);
_job.update(leftOver, " building the final set of infogram models.");
}
_cmi = calculateFinalCMI(_cmiRaw, _buildCore); // scale cmi to be from 0 to 1, ignore last one
if (validPresent)
_cmiValid = calculateFinalCMI(_cmiRawValid, _buildCore);
}
/***
* This method basically go through all the predictors and calculate the cmi associated with each predictor. For
* core infogram, refer to https://github.com/h2oai/h2o-3/issues/7830 section I. For fair infogram, refer to
* https://github.com/h2oai/h2o-3/issues/7830 section II.
*
* @param modelCount : current model count to build
* @param numModel : total number of models to build
* @param lastModelInd : index of last model to build
*/
private void buildModelCMINRelevance(int modelCount, int numModel, int lastModelInd) {
boolean lastModelIndcluded = (modelCount+numModel >= lastModelInd);
Frame[] trainingFrames = buildTrainingFrames(modelCount, numModel, lastModelInd); // generate training frame
Model.Parameters[] modelParams = buildModelParameters(trainingFrames, _parms._infogram_algorithm_parameters,
numModel, _parms._algorithm); // generate parameters
ModelBuilder[] builders = ModelBuilderHelper.trainModelsParallel(buildModelBuilders(modelParams),
numModel); // build models in parallel
if (lastModelIndcluded) // extract relevance here for core infogram
extractRelevance(builders[numModel-1].get(), modelParams[numModel-1]);
_validNonZeroNumRows = generateInfoGrams(builders, trainingFrames, modelCount, numModel); // generate infogram
}
/**
* For core infogram, training frames are built by omitting the predictor of interest. For fair infogram,
* training frames are built with protected columns plus the predictor of interest. The very last training frame
* for core infogram will contain all predictors. For fair infogram, the very last training frame contains only the
* protected columns
*
* @param startInd : starting index of Frame[] to build
* @param numFrames : number of frames to build
* @param lastModelInd : index of last frame to build
* @return
*/
private Frame[] buildTrainingFrames(int startInd, int numFrames,
int lastModelInd) {
Frame[] trainingFrames = new Frame[numFrames];
Frame trainingFrame = _parms.train();
int finalFrameInd = startInd + numFrames;
int frameCount = 0;
for (int frameInd = startInd; frameInd < finalFrameInd; frameInd++) {
trainingFrames[frameCount] = new Frame(_baseOrSensitiveFrame);
if (_buildCore) {
for (int vecInd = 0; vecInd < _topKPredictors.length; vecInd++) {
if ((frameInd < lastModelInd) && (vecInd != frameInd)) // skip ith vector except last model
trainingFrames[frameCount].add(_topKPredictors[vecInd], trainingFrame.vec(_topKPredictors[vecInd]));
else if (frameInd == lastModelInd)// add all predictors
trainingFrames[frameCount].add(_topKPredictors[vecInd], trainingFrame.vec(_topKPredictors[vecInd]));
}
} else {
if (frameInd < lastModelInd) // add ith predictor
trainingFrames[frameCount].prepend(_topKPredictors[frameInd], trainingFrame.vec(_topKPredictors[frameInd]));
}
Scope.track(trainingFrames[frameCount]);
// frameKeys.add(trainingFrames[frameCount]._key);
//_generatedFrameKeys.add(trainingFrames[frameCount]._key);
DKV.put(trainingFrames[frameCount++]);
}
return trainingFrames;
}
/***
* Calculate the CMI for each predictor. Refer to https://github.com/h2oai/h2o-3/issues/7830 section I step 2
* for core infogram, or section II step 3 for fair infogram
*
*/
private long generateInfoGrams(ModelBuilder[] builders, Frame[] trainingFrames, int startIndex, int numModels) {
long nonZeroRows = Long.MAX_VALUE;
for (int index = 0; index < numModels; index++) {
Model oneModel = builders[index].get(); // extract model
int nclasses = oneModel._output.nclasses();
Frame prediction = oneModel.score(trainingFrames[index]); // generate prediction, cmi on training frame
prediction.add(_parms._response_column, trainingFrames[index].vec(_parms._response_column));
Scope.track_generic(oneModel);
if (oneModel._parms._weights_column != null && Arrays.asList(trainingFrames[index].names()).contains(oneModel._parms._weights_column))
prediction.add(oneModel._parms._weights_column, trainingFrames[index].vec(oneModel._parms._weights_column));
Scope.track(prediction);
_cmiRaw[index+startIndex] = new hex.Infogram.EstimateCMI(prediction, nclasses, oneModel._parms._response_column).doAll(prediction)._meanCMI; // calculate raw CMI
if (_parms.valid() != null) { // generate prediction, cmi on validation frame
Frame validFrame = _parms.valid();
Frame predictionValid = oneModel.score(validFrame); // already contains the response
predictionValid.add(_parms._response_column, validFrame.vec(_parms._response_column));
if (oneModel._parms._weights_column != null) { // weight column names are changed if cross-validation is on
if (Arrays.asList(validFrame.names()).contains("__internal_cv_weights__"))
predictionValid.add(oneModel._parms._weights_column, validFrame.vec("__internal_cv_weights__"));
else
predictionValid.add(oneModel._parms._weights_column, Arrays.asList(validFrame.names()).contains(oneModel._parms._weights_column)?
validFrame.vec(oneModel._parms._weights_column):
validFrame.anyVec().makeCon(1));
}
Scope.track(predictionValid);
EstimateCMI calCMI = new hex.Infogram.EstimateCMI(predictionValid, nclasses, oneModel._parms._response_column).doAll(predictionValid);
_cmiRawValid[index + startIndex] = calCMI._meanCMI;
nonZeroRows = Math.min(nonZeroRows, calCMI._nonZeroRows);
}
}
return nonZeroRows;
}
/**
* For core infogram, the last model is the one with all predictors. In this case, the relevance is basically the
* variable importance. For fair infogram, the last model is the one with all the predictors minus the protected
* columns. Again, the relevance is the variable importance.
*/
private void extractRelevance(Model model, Model.Parameters parms) {
if (_buildCore) { // full model is last one, just extract varImp
_varImp = model._output.getVariableImportances();
} else { // need to build model for fair info grame
Frame fullFrame = subtractAdd2Frame(_baseOrSensitiveFrame, _parms.train(), _parms._protected_columns,
_topKPredictors); // training frame is topKPredictors minus protected_columns
parms._train = fullFrame._key;
Scope.track(fullFrame);
ModelBuilder builder = ModelBuilder.make(parms);
Model fairModel = (Model) builder.trainModel().get();
_varImp = fairModel._output.getVariableImportances();
Scope.track_generic(fairModel);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex/Infogram/InfogramExtension.java
|
package hex.Infogram;
import org.apache.log4j.Logger;
import water.AbstractH2OExtension;
public class InfogramExtension extends AbstractH2OExtension {
private static final Logger LOG = Logger.getLogger(InfogramExtension.class);
public static String NAME = "Infogram";
@Override
public String getExtensionName() {
return NAME;
}
public void logNativeLibInfo() {
LOG.info("InfogramExtension is called.");
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex/Infogram/InfogramModel.java
|
package hex.Infogram;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import hex.*;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLMModel;
import hex.schemas.*;
import water.*;
import water.fvec.Frame;
import water.udf.CFuncRef;
import water.util.TwoDimTable;
import java.lang.reflect.Field;
import java.util.*;
import static hex.Infogram.InfogramModel.InfogramParameters.Algorithm.glm;
import static hex.genmodel.utils.DistributionFamily.*;
import static hex.util.DistributionUtils.familyToDistribution;
public class InfogramModel extends Model<InfogramModel, InfogramModel.InfogramParameters, InfogramModel.InfogramModelOutput> {
public InfogramModel(Key<InfogramModel> selfKey, InfogramParameters parms, InfogramModelOutput output) {
super(selfKey, parms, output);
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
assert domain == null;
switch(_output.getModelCategory()) {
case Binomial:
return new ModelMetricsBinomial.MetricBuilderBinomial(domain);
case Multinomial:
return new ModelMetricsMultinomial.MetricBuilderMultinomial(_output.nclasses(), domain, _parms._auc_type);
default:
throw H2O.unimpl("Invalid ModelCategory "+_output.getModelCategory());
}
}
@Override
protected double[] score0(double[] data, double[] preds) {
throw new UnsupportedOperationException("Infogram does not support scoring on data. It only provides information" +
" on predictors and choose admissible features for users. Users can take the admissible features, build" +
"their own model and score with that model.");
}
@Override
public Frame score(Frame fr, String destinationKey, Job j, boolean computeMetrics, CFuncRef customMetricFunc) {
score0(null, null);
return null;
}
public static class InfogramParameters extends Model.Parameters {
public Algorithm _algorithm = Algorithm.AUTO; // default to GBM
public String _algorithm_params = new String(); // store user specific parameters for chosen algorithm
public String[] _protected_columns = null; // store features to be excluded from final model
public double _cmi_threshold = 0.1; // default set by Deep
public double _relevance_threshold = 0.1; // default set by Deep
public double _total_information_threshold = -1; // relevance threshold for core infogram
public double _net_information_threshold = -1; // cmi threshold for core infogram
public double _safety_index_threshold = -1; // cmi threshold for safe infogram
public double _relevance_index_threshold = -1; // relevance threshold for safe infogram
public double _data_fraction = 1.0; // fraction of data to use to calculate infogram
public Model.Parameters _infogram_algorithm_parameters; // store parameters of chosen algorithm
public int _top_n_features = 50; // if 0 consider all predictors, otherwise, consider topk predictors
public boolean _compute_p_values = false; // if true, will calculate p-value
public int _nparallelism = 0;
public enum Algorithm {
AUTO,
deeplearning,
drf,
gbm,
glm,
xgboost
}
@Override
public String algoName() {
return "Infogram";
}
@Override
public String fullName() {
return "Information Diagram";
}
@Override
public String javaName() {
return InfogramModel.class.getName();
}
@Override
public long progressUnits() {
return train() != null ? train().numCols() : 1;
}
/**
* This method performs the following functions:
* 1. it will extract the algorithm specific parameters from _info_algorithm_params to
* infogram_algorithm_parameters which will be one of GBMParameters, DRFParameters, DeepLearningParameters or
* GLMParameters. This will be used to build models and extract the infogram.
* 2. Next, it will copy the parameters that are common to all algorithms from InfogramParameters to
* _algorithm_parameters.
*/
public void extraModelSpecificParams() {
Properties p = new Properties();
boolean fillParams;
ArrayList<String> excludeList = new ArrayList<>(); // prevent overriding of parameters set by user
fillParams = _algorithm_params != null && !_algorithm_params.isEmpty();
if (fillParams) { // only execute when algorithm specific parameters are filled in by user
HashMap<String, String[]> map =
new Gson().fromJson(_algorithm_params, new TypeToken<HashMap<String, String[]>>() {
}.getType());
for (Map.Entry<String, String[]> param : map.entrySet()) {
String[] paramVal = param.getValue();
String paramName = param.getKey();
excludeList.add("_" + paramName);
if (paramVal.length == 1) {
p.setProperty(paramName, paramVal[0]);
} else {
p.setProperty(paramName, Arrays.toString(paramVal));
}
}
}
InfogramV3.InfogramParametersV3.generateModelParams(this, p, excludeList);
copyInfoGramParams(excludeList); // copy over InfogramParameters that are applicable to model specific algos
}
public void copyInfoGramParams(List<String> excludeList) {
Field[] algoParams = Model.Parameters.class.getDeclaredFields();
Field algoField;
for (Field oneField : algoParams) {
try {
String fieldName = oneField.getName();
algoField = this.getClass().getField(fieldName);
if (excludeList.size() == 0 || !excludeList.contains(fieldName)) {
algoField.set(_infogram_algorithm_parameters, oneField.get(this));
}
} catch (IllegalAccessException | NoSuchFieldException e) { // suppress error printing. Only care about fields that are accessible
;
}
}
}
}
public static class InfogramModelOutput extends Model.Output {
final public static int _COLUMN_INDEX = 0;
final public static int _ADMISSIBLE_PREDICTOR_INDEX = 1;
final public static int _RELEVANCE_INDEX = 3;
final public static int _CMI_INDEX = 4;
final public static int _CMI_RAW_INDEX = 5;
public double[] _admissible_cmi; // conditional info for admissible features in _admissible_features
public double[] _admissible_cmi_raw; // conditional info for admissible features in _admissible_features raw
public double[] _admissible_relevance; // varimp values for admissible features in _admissible_features
public String[] _admissible_features; // predictors chosen that exceeds both conditional_info and varimp thresholds
public String[] _admissible_features_valid;
public String[] _admissible_features_xval;
public double[] _admissible_index; // store normalized distance from 0,0 corner of infogram plot from 0 to 1
public double[] _admissible_index_valid; // needed to build validation frame
public double[] _admissible; // 0 if predictor is admissible and 1 otherwise
public double[] _admissible_valid;
public DistributionFamily _distribution;
public double[] _cmi_raw; // cmi before normalization and for all predictors
public double[] _cmi_raw_valid;
public double[] _cmi; // normalized cmi
public double[] _cmi_valid;
public String[] _all_predictor_names;
public String[] _all_predictor_names_valid;
public double[] _relevance; // variable importance for all predictors
public double[] _relevance_valid; // equals to _relevance but may change in order
public Key<Frame> _admissible_score_key;
public Key<Frame> _admissible_score_key_valid;
public Key<Frame> _admissible_score_key_xval;
public String[] _topKFeatures;
public long _validNonZeroNumRows;
@Override
public ModelCategory getModelCategory() {
if (bernoulli.equals(_distribution)) {
return ModelCategory.Binomial;
} else if (multinomial.equals(_distribution)) {
return ModelCategory.Multinomial;
} else if (ordinal.equals(_distribution)) {
return ModelCategory.Ordinal;
}
throw new IllegalArgumentException("Infogram currently only support binomial and multinomial classification");
}
public void setDistribution(DistributionFamily distribution) {
_distribution = distribution;
}
public InfogramModelOutput(Infogram b) {
super(b);
if (glm.equals(b._parms._algorithm))
_distribution = familyToDistribution(((GLMModel.GLMParameters) b._parms._infogram_algorithm_parameters)._family);
}
/***
* Generate arrays containing only admissible features which are predictors with both cmi >= cmi_threshold and
* relevance >= relevance_threshold
*
* @param relCMIFrame H2O Frame containing relevance, cmi, ... info
* @param validFrame true if validation dataset exists
* @param cvFrame true if cross-validation is enabled
*/
public void extractAdmissibleFeatures(Frame relCMIFrame, boolean validFrame, boolean cvFrame) {
long numRow = relCMIFrame.numRows();
// relCMIFrame contains c1:column, c2:admissible, c3:admissible_index, c4:relevance, c5:cmi, c6 cmi_raw
List<Double> varimps = new ArrayList<>();
List<Double> predictorCMI = new ArrayList<>();
List<Double> predictorCMIRaw = new ArrayList<>();
List<String> admissiblePred = new ArrayList<>();
for (long rowIndex=0; rowIndex<numRow; rowIndex++) {
if (relCMIFrame.vec(_ADMISSIBLE_PREDICTOR_INDEX).at(rowIndex) > 0) {
varimps.add(relCMIFrame.vec(_RELEVANCE_INDEX).at(rowIndex));
predictorCMI.add(relCMIFrame.vec(_CMI_INDEX).at(rowIndex));
predictorCMIRaw.add(relCMIFrame.vec(_CMI_RAW_INDEX).at(rowIndex));
admissiblePred.add(relCMIFrame.vec(_COLUMN_INDEX).stringAt(rowIndex));
}
}
if (validFrame) {
_admissible_features_valid = admissiblePred.toArray(new String[admissiblePred.size()]);
} else if (cvFrame) {
_admissible_features_xval = admissiblePred.toArray(new String[admissiblePred.size()]);
} else {
_admissible_features = admissiblePred.toArray(new String[admissiblePred.size()]);
_admissible_cmi = predictorCMI.stream().mapToDouble(i -> i).toArray();
_admissible_cmi_raw = predictorCMIRaw.stream().mapToDouble(i -> i).toArray();
_admissible_relevance = varimps.stream().mapToDouble(i -> i).toArray();
}
}
/***
* This method will sort _relvance, _cmi_raw, _cmi_normalize, _all_predictor_names such that features that
* are closest to upper right corner of infogram comes first with the order specified in the index
*/
public static void sortCMIRel(int[] indices, double[] relevance, double[] cmiRawA, double[] cmi,
String[] allPredictorNames, double[] admissibleIndex, double[] admissibleA) {
int indexLength = indices.length;
double[] rel = new double[indexLength];
double[] cmiRaw = new double[indexLength];
double[] cmiNorm = new double[indexLength];
double[] distanceCorner = new double[indexLength];
String[] predNames = new String[indexLength];
double[] admissible = new double[indexLength];
double[] admissibleI = new double[indexLength];
for (int index = 0; index < indexLength; index++) {
rel[index] = relevance[indices[index]];
cmiRaw[index] = cmiRawA[indices[index]];
cmiNorm[index] = cmi[indices[index]];
predNames[index] = allPredictorNames[indices[index]];
distanceCorner[index] = admissibleIndex[indices[index]];
admissible[index] = admissibleA[indices[index]];
admissibleI[index] = admissibleIndex[indices[index]];
}
System.arraycopy(rel, 0, relevance, 0, indexLength);
System.arraycopy(cmiNorm, 0, cmi, 0, indexLength);
System.arraycopy(cmiRaw, 0, cmiRawA, 0, indexLength);
System.arraycopy(predNames, 0, allPredictorNames, 0, indexLength);
System.arraycopy(distanceCorner, 0, admissibleIndex, 0, indexLength);
System.arraycopy(admissible, 0, admissibleA, 0, indexLength);
System.arraycopy(admissibleI, 0, admissibleIndex, 0, indexLength);
}
}
@Override
public boolean haveMojo() {
return false;
}
@Override
public boolean havePojo() {
return false;
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
super.remove_impl(fs, cascade);
Keyed.remove(_output._admissible_score_key, fs, true);
Keyed.remove(_output._admissible_score_key_valid, fs, true);
Keyed.remove(_output._admissible_score_key_xval, fs, true);
return fs;
}
@Override
protected AutoBuffer writeAll_impl(AutoBuffer ab) {
if (_output._admissible_score_key != null)
ab.putKey(_output._admissible_score_key);
if (_output._admissible_score_key_valid != null)
ab.putKey(_output._admissible_score_key_valid);
if (_output._admissible_score_key_xval != null)
ab.putKey(_output._admissible_score_key_xval);
return super.writeAll_impl(ab);
}
@Override
protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
if (_output._admissible_score_key != null)
ab.getKey(_output._admissible_score_key, fs);
if (_output._admissible_score_key_valid != null)
ab.getKey(_output._admissible_score_key_valid, fs);
if (_output._admissible_score_key_xval != null)
ab.getKey(_output._admissible_score_key_xval, fs);
return super.readAll_impl(ab, fs);
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex/Infogram/InfogramUtils.java
|
package hex.Infogram;
import hex.Model;
import hex.ModelBuilder;
import hex.ModelBuilderHelper;
import hex.SplitFrame;
import hex.schemas.*;
import water.DKV;
import water.Key;
import water.Scope;
import water.api.SchemaServer;
import water.api.schemas3.ModelParametersSchemaV3;
import water.fvec.Frame;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.util.TwoDimTable;
import static hex.Infogram.InfogramModel.InfogramModelOutput._CMI_RAW_INDEX;
import static hex.Infogram.InfogramModel.InfogramModelOutput._COLUMN_INDEX;
import static hex.Infogram.InfogramModel.InfogramParameters;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.DoubleStream;
public class InfogramUtils {
/**
* This method will take the columns of _parms.train(). It will then remove the response, any columns in
* _parms._sensitive_attributes from the columns of _parms.train(), weights_column, offset_column. Then, the
* columns that are left are the columns that are eligible to get their InfoGram.
*/
public static String[] extractPredictors(InfogramParameters parms, Frame train, String foldColumnName) {
List<String> colNames = new ArrayList<>(Arrays.asList(train.names()));
String[] nonPredictors = parms.getNonPredictors();
for (String nonPred : nonPredictors)
colNames.remove(nonPred);
if (parms._protected_columns != null)
for (String protectPred : parms._protected_columns)
colNames.remove(protectPred);
if (foldColumnName != null)
colNames.remove(foldColumnName);
return colNames.toArray(new String[colNames.size()]);
}
/**
* Method to run infogram model once in order to get the variable importance of the topK predictors
*/
public static String[] extractTopKPredictors(InfogramParameters parms, Frame trainFrame,
String[] eligiblePredictors) {
if (parms._top_n_features >= eligiblePredictors.length) return eligiblePredictors;
Frame topTrain = extractTrainingFrame(parms, eligiblePredictors, 1, trainFrame);
Scope.track(topTrain);
parms._infogram_algorithm_parameters._train = topTrain._key;
Model.Parameters[] modelParams = buildModelParameters(new Frame[]{topTrain}, parms._infogram_algorithm_parameters,
1, parms._algorithm); // generate parameters
ModelBuilder[] builders = ModelBuilderHelper.trainModelsParallel(buildModelBuilders(modelParams),
1);
Model builtModel = builders[0].get();
Scope.track_generic(builtModel);
TwoDimTable varImp = builtModel._output.getVariableImportances();
String[] ntopPredictors = new String[parms._top_n_features];
String[] rowHeaders = varImp.getRowHeaders();
System.arraycopy(rowHeaders, 0, ntopPredictors, 0, parms._top_n_features);
return ntopPredictors;
}
public static int findstart(Key<Frame>[] generatedFrameKeys) {
int arrLen = generatedFrameKeys.length;
for (int index=0; index < arrLen; index++)
if (generatedFrameKeys[index] == null)
return index;
return -1; // all keys are taken
}
/**
* This method will perform two functions:
* - if user only wants a fraction of the training dataset to be used for infogram calculation, we will split the
* training frame and only use a fraction of it for infogram training purposes;
* - next, a new training dataset will be generated containing only the predictors in predictors2Use array.
*/
public static Frame extractTrainingFrame(InfogramParameters parms, String[] sensitivePredictors, double dataFraction,
Frame trainFrame) {
if (dataFraction < 1) { // only use a fraction training data for speedup
SplitFrame sf = new SplitFrame(trainFrame, new double[]{parms._data_fraction, 1-parms._data_fraction},
new Key[]{Key.make("ig_train_"+trainFrame._key), Key.make("ig_discard"+trainFrame._key)});
sf.exec().get();
Key[] ksplits = sf._destination_frames;
trainFrame = DKV.get(ksplits[0]).get();
DKV.remove(ksplits[1]); // discard unwanted portion
}
final Frame extractedFrame = new Frame(Key.make());
if (sensitivePredictors != null)
for (String colName : sensitivePredictors) // add sensitive features to Frame
extractedFrame.add(colName, trainFrame.vec(colName));
String[] nonPredictors = parms.getNonPredictors();
List<String> colNames = Arrays.asList(trainFrame.names());
boolean cvWeightsPresent = parms._weights_column != null && colNames.contains("__internal_cv_weights__")
&& (parms._weights_column.equals("__internal_cv_weights__") ||
parms._weights_column.equals("infogram_internal_cv_weights_"));
for (String nonPredName : nonPredictors) {
if (("__internal_cv_weights__".equals(nonPredName) || "infogram_internal_cv_weights_".equals(nonPredName))
&& colNames.contains("__internal_cv_weights__")) {
String cvWeightName = "infogram_internal_cv_weights_"; // switch weights column to turn off cv in algo used to build infogram
extractedFrame.add(cvWeightName, trainFrame.vec("__internal_cv_weights__"));
parms._weights_column = cvWeightName;
} else if (nonPredName.equals(parms._fold_column) && colNames.contains(parms._fold_column) && !cvWeightsPresent) {
extractedFrame.add(nonPredName, trainFrame.vec(nonPredName));
} else if (!nonPredName.equals(parms._fold_column) && colNames.contains(nonPredName)) {
extractedFrame.add(nonPredName, trainFrame.vec(nonPredName));
}
}
if (!(parms._fold_column != null && colNames.contains(parms._fold_column) && !cvWeightsPresent))
parms._fold_column = null;
DKV.put(extractedFrame);
return extractedFrame;
}
public static String[] generateModelDescription(String[] topKPredictors, String[] sensitive_attributes) {
int numModel = topKPredictors.length+1;
String[] modelNames = new String[numModel];
int numPredInd = topKPredictors.length-1;
if (sensitive_attributes == null) { // contains only predictors
for (int index = 0; index < numPredInd; index++)
modelNames[index] = "Model built missing predictor "+topKPredictors[index];
modelNames[numPredInd] = "Full model built with all predictors";
} else { // contains one predictor and all sensitive_attributes
for (int index = 0; index < numPredInd; index++)
modelNames[index] = "Model built with sensitive_features and predictor "+topKPredictors[index];
modelNames[numPredInd] = "Model built with sensitive_features only";
}
return modelNames;
}
/***
* Build model parameters for model specified in infogram_algorithm. Any model specific parameters can be specified
* in infogram_algorithm_params.
*/
public static Model.Parameters[] buildModelParameters(Frame[] trainingFrames, Model.Parameters infoParams,
int numModels, InfogramParameters.Algorithm algoName) {
ModelParametersSchemaV3 paramsSchema;
switch (algoName) {
case glm:
paramsSchema = new GLMV3.GLMParametersV3();
break;
case AUTO:
case gbm:
paramsSchema = new GBMV3.GBMParametersV3();
break;
case drf:
paramsSchema = new DRFV3.DRFParametersV3();
break;
case deeplearning:
paramsSchema = new DeepLearningV3.DeepLearningParametersV3();
break;
case xgboost:
Model.Parameters params = ModelBuilder.makeParameters("XGBoost");
paramsSchema = (ModelParametersSchemaV3<?, ?>) SchemaServer.schema(params);
break;
default:
throw new UnsupportedOperationException("Unknown algo: " + algoName);
}
Model.Parameters[] modelParams = new Model.Parameters[numModels];
for (int index = 0; index < numModels; index++) {
modelParams[index] = (Model.Parameters) paramsSchema.fillFromImpl(infoParams).createAndFillImpl();
modelParams[index]._ignored_columns = null; // training frame contains only needed columns
modelParams[index]._train = trainingFrames[index]._key;
}
return modelParams;
}
public static ModelBuilder[] buildModelBuilders(Model.Parameters[] modelParams) {
int numModel = modelParams.length;
ModelBuilder[] modelBuilders = new ModelBuilder[numModel];
for (int index = 0; index < numModel; index++)
modelBuilders[index] = ModelBuilder.make(modelParams[index]);
return modelBuilders;
}
public static Frame generateCMIRelevance(String[] allPredictorNames, double[] admissible, double[] admissibleIndex,
double[] relevance, double[] cmi, double[] cmiRaw, boolean buildCore) {
Vec.VectorGroup vg = Vec.VectorGroup.VG_LEN1;
Vec vName = Vec.makeVec(allPredictorNames, vg.addVec());
Vec vAdm = Vec.makeVec(admissible, vg.addVec());
Vec vAdmIndex = Vec.makeVec(admissibleIndex, vg.addVec());
Vec vRel = Vec.makeVec(relevance, vg.addVec());
Vec vCMI = Vec.makeVec(cmi, vg.addVec());
Vec vCMIRaw = Vec.makeVec(cmiRaw, vg.addVec());
String[] columnNames = buildCore ? new String[]{"column", "admissible", "admissible_index", "total_information",
"net_information", "cmi_raw"} : new String[]{"column", "admissible", "admissible_index",
"relevance_index", "safety_index", "cmi_raw"};
Frame cmiRelFrame = new Frame(Key.<Frame>make(), columnNames, new Vec[]{vName, vAdm, vAdmIndex, vRel, vCMI, vCMIRaw});
DKV.put(cmiRelFrame);
return cmiRelFrame;
}
public static void removeFromDKV(Key<Frame>[] generatedFrameKeys) {
for (Key<Frame> oneFrameKey : generatedFrameKeys)
if (null != oneFrameKey)
DKV.remove(oneFrameKey);
else
break;
}
/***
* To calculate the CMI, refer to https://github.com/h2oai/h2o-3/issues/7830 section I step 2 for core infogram,
* section II step 2 for fair infogram. Note that the last model is built with all predictors for core infogram or
* built with protected columns for fair infogram.
*/
public static double[] calculateFinalCMI(double[] cmiRaw, boolean buildCore) {
int lastInd = cmiRaw.length-1; // index of full model or model with sensitive features only
double maxCMI = 0;
for (int index = 0; index < lastInd; index++) {
if (buildCore)
cmiRaw[index] = Math.max(0, cmiRaw[lastInd] - cmiRaw[index]);
else
cmiRaw[index] = Math.max(0, cmiRaw[index] - cmiRaw[lastInd]);
if (cmiRaw[index] > maxCMI)
maxCMI = cmiRaw[index];
}
double scale = maxCMI == 0 ? 0 : 1.0/maxCMI;
double[] cmi = new double[lastInd];
double[] cmiLong = DoubleStream.of(cmiRaw).map(d->d*scale).toArray();
System.arraycopy(cmiLong, 0, cmi, 0, lastInd);
return cmi;
}
public static Frame subtractAdd2Frame(Frame base, Frame featureFrame, String[] removeFeatures, String[] addFeatures) {
Frame newFrame = new Frame(base);
if (removeFeatures != null) {
for (String removeEle : removeFeatures)
newFrame.remove(removeEle);
}
for (String addEle : addFeatures)
newFrame.add(addEle, featureFrame.vec(addEle));
DKV.put(newFrame);
return newFrame;
}
public static void extractInfogramInfo(InfogramModel infoModel, double[][] cmiRaw,
List<List<String>> columns, int foldIndex) {
Frame validFrame = DKV.getGet(infoModel._output._admissible_score_key_valid);
// relCMIFrame contains c1:column, c2:admissible, c3:admissible_index, c4:relevance, c5:cmi, c6 cmi_raw
cmiRaw[foldIndex] = vec2array(validFrame.vec(_CMI_RAW_INDEX));
String[] oneColumn = strVec2array(validFrame.vec(_COLUMN_INDEX));
ArrayList<String> oneFrameColumn = new ArrayList(Arrays.asList(oneColumn));
columns.add(oneFrameColumn);
validFrame.remove();
}
static double[] vec2array(Vec v) {
assert v.length() < Integer.MAX_VALUE;
final int len = (int) v.length();
double[] array = new double[len];
for (int i = 0; i < len; i++) array[i] = v.at(i);
return array;
}
static String[] strVec2array(Vec v) {
assert v.length() < Integer.MAX_VALUE;
final int len = (int) v.length();
BufferedString bs = new BufferedString();
String[] array = new String[len];
for (int i = 0; i < len; i++) {
BufferedString s = v.atStr(bs, i);
if (s != null) array[i] = s.toString();
}
return array;
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex/schemas/InfogramModelV3.java
|
package hex.schemas;
import hex.Infogram.InfogramModel;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
public class InfogramModelV3 extends ModelSchemaV3<InfogramModel, InfogramModelV3, InfogramModel.InfogramParameters,
InfogramV3.InfogramParametersV3, InfogramModel.InfogramModelOutput, InfogramModelV3.InfogramModelOutputV3> {
public static final class InfogramModelOutputV3 extends ModelOutputSchemaV3<InfogramModel.InfogramModelOutput, InfogramModelOutputV3> {
@API(help="Array of conditional mutual information for admissible features normalized to 0.0 and 1.0",
direction = API.Direction.OUTPUT)
public double[] admissible_cmi; // conditional mutual info for admissible features in _admissible_features
@API(help="Array of conditional mutual information for admissible features raw and not normalized to 0.0 and 1.0",
direction = API.Direction.OUTPUT)
public double[] admissible_cmi_raw; // raw conditional mutual info for admissible features in _admissible_features
@API(help="Array of variable importance for admissible features", direction = API.Direction.OUTPUT)
public double[] admissible_relevance; // varimp values for admissible features in _admissible_features
@API(help="Array containing names of admissible features for the user", direction = API.Direction.OUTPUT)
public String[] admissible_features; // predictors chosen that exceeds both conditional_info and varimp thresholds
@API(help="Array containing names of admissible features for the user from the validation dataset.",
direction = API.Direction.OUTPUT)
public String[] admissible_features_valid; // predictors chosen that exceeds both conditional_info and varimp thresholds
@API(help="Array containing names of admissible features for the user from cross-validation.",
direction = API.Direction.OUTPUT)
public String[] admissible_features_xval; // predictors chosen that exceeds both conditional_info and varimp thresholds
@API(help="Array of raw conditional mutual information for all features excluding sensitive attributes if " +
"applicable", direction = API.Direction.OUTPUT)
public double[] cmi_raw; // cmi before normalization and for all predictors
@API(help="Array of conditional mutual information for all features excluding sensitive attributes if applicable " +
"normalized to 0.0 and 1.0", direction = API.Direction.OUTPUT)
public double[] cmi;
@API(help="Array containing names of all features excluding sensitive attributes if applicable corresponding to CMI" +
" and relevance", direction = API.Direction.OUTPUT)
public String[] all_predictor_names;
@API(help="Array of variable importance for all features excluding sensitive attributes if applicable",
direction = API.Direction.OUTPUT)
public double[] relevance; // variable importance for all predictors
@API(help="Frame key that stores the predictor names, net CMI and relevance.", direction = API.Direction.OUTPUT)
KeyV3.FrameKeyV3 admissible_score_key;
@API(help="Frame key that stores the predictor names, net CMI and relevance calculated from validation dataset.",
direction = API.Direction.OUTPUT)
KeyV3.FrameKeyV3 admissible_score_key_valid;
@API(help="Frame key that stores the predictor names, net CMI and relevance from cross-validation.",
direction = API.Direction.OUTPUT)
KeyV3.FrameKeyV3 admissible_score_key_xval;
}
public InfogramV3.InfogramParametersV3 createParametersSchema() { return new InfogramV3.InfogramParametersV3(); }
public InfogramModelOutputV3 createOutputSchema() { return new InfogramModelOutputV3(); }
@Override
public InfogramModel createImpl() {
InfogramModel.InfogramParameters parms = parameters.createImpl();
return new InfogramModel(model_id.key(), parms, null);
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/hex/schemas/InfogramV3.java
|
package hex.schemas;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import hex.Infogram.Infogram;
import hex.Infogram.InfogramModel;
import hex.Model;
import hex.ModelBuilder;
import hex.deeplearning.DeepLearningModel;
import hex.glm.GLMModel;
import hex.tree.drf.DRFModel;
import hex.tree.gbm.GBMModel;
import water.api.API;
import water.api.EnumValuesProvider;
import water.api.SchemaServer;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelParametersSchemaV3;
import static hex.util.DistributionUtils.distributionToFamily;
import java.util.*;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class InfogramV3 extends ModelBuilderSchema<Infogram, InfogramV3, InfogramV3.InfogramParametersV3> {
public static final class InfogramParametersV3 extends ModelParametersSchemaV3<InfogramModel.InfogramParameters, InfogramParametersV3> {
public static final String[] fields = new String[] {
"model_id",
"training_frame",
"validation_frame",
"seed",
"keep_cross_validation_models",
"keep_cross_validation_predictions",
"keep_cross_validation_fold_assignment",
"nfolds",
"fold_assignment",
"fold_column",
"response_column",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"offset_column",
"weights_column",
"standardize",
"distribution",
"plug_values",
"max_iterations",
"stopping_rounds",
"stopping_metric",
"stopping_tolerance",
"balance_classes",
"class_sampling_factors",
"max_after_balance_size",
"max_runtime_secs",
"custom_metric_func",
"auc_type",
// new parameters for INFOGRAMs only
"algorithm", // choose algo and parameter to generate infogram
"algorithm_params",
"protected_columns",
"total_information_threshold",
"net_information_threshold",
"relevance_index_threshold",
"safety_index_threshold",
"data_fraction",
"top_n_features"
};
@API(help = "Seed for pseudo random number generator (if applicable).", gridable = true)
public long seed;
// Input fields
@API(help = "Standardize numeric columns to have zero mean and unit variance.", level = API.Level.critical)
public boolean standardize;
@API(help = "Plug Values (a single row frame containing values that will be used to impute missing values of the" +
" training/validation frame, use with conjunction missing_values_handling = PlugValues).",
direction = API.Direction.INPUT)
public KeyV3.FrameKeyV3 plug_values;
@API(help = "Maximum number of iterations.", level = API.Level.secondary)
public int max_iterations;
@API(help = "Prior probability for y==1. To be used only for logistic regression iff the data has been sampled " +
"and the mean of response does not reflect reality.", level = API.Level.expert)
public double prior;
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data).",
level = API.Level.secondary, direction = API.Direction.INOUT)
public boolean balance_classes;
/**
* Desired over/under-sampling ratios per class (lexicographic order).
* Only when balance_classes is enabled.
* If not specified, they will be automatically computed to obtain class balance during training.
*/
@API(help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling" +
" factors will be automatically computed to obtain class balance during training. Requires " +
"balance_classes.", level = API.Level.expert, direction = API.Direction.INOUT)
public float[] class_sampling_factors;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0). " +
"Requires balance_classes.", /* dmin=1e-3, */ level = API.Level.expert, direction = API.Direction.INOUT)
public float max_after_balance_size;
//Infogram fields
@API(level = API.Level.critical, direction = API.Direction.INOUT,
valuesProvider = InfogramAlrogithmProvider.class,
help = "Type of machine learning algorithm used to build the infogram. Options include "
+ "'AUTO' (gbm), "
+ "'deeplearning' (Deep Learning with default parameters), "
+ "'drf' (Random Forest with default parameters), "
+ "'gbm' (GBM with default parameters), "
+ "'glm' (GLM with default parameters), "
+ "or 'xgboost' (if available, XGBoost with default parameters)."
)
public InfogramModel.InfogramParameters.Algorithm algorithm;
@API(help = "Customized parameters for the machine learning algorithm specified in the algorithm parameter.",
level = API.Level.expert, gridable=true)
public String algorithm_params;
@API(help = "Columns that contain features that are sensitive and need to be protected (legally, or otherwise), " +
"if applicable. These features (e.g. race, gender, etc) should not drive the prediction of the response.",
level = API.Level.secondary, gridable=true)
public String[] protected_columns;
@API(help = "A number between 0 and 1 representing a threshold for total information, defaulting to 0.1. " +
"For a specific feature, if the total information is higher than this threshold, and the corresponding " +
"net information is also higher than the threshold ``net_information_threshold``, that feature will be " +
"considered admissible. The total information is the x-axis of the Core Infogram. " +
"Default is -1 which gets set to 0.1.",
level = API.Level.secondary, gridable = true)
public double total_information_threshold;
@API(help = "A number between 0 and 1 representing a threshold for net information, defaulting to 0.1. For a " +
"specific feature, if the net information is higher than this threshold, and the corresponding total " +
"information is also higher than the total_information_threshold, that feature will be considered admissible. " +
"The net information is the y-axis of the Core Infogram. Default is -1 which gets set to 0.1.",
level = API.Level.secondary, gridable = true)
public double net_information_threshold;
@API(help = "A number between 0 and 1 representing a threshold for the relevance index, defaulting to 0.1. This is " +
"only used when ``protected_columns`` is set by the user. For a specific feature, if the relevance index " +
"value is higher than this threshold, and the corresponding safety index is also higher than the " +
"safety_index_threshold``, that feature will be considered admissible. The relevance index is the x-axis " +
"of the Fair Infogram. Default is -1 which gets set to 0.1.",
level = API.Level.secondary, gridable = true)
public double relevance_index_threshold;
@API(help = "A number between 0 and 1 representing a threshold for the safety index, defaulting to 0.1. This is " +
"only used when protected_columns is set by the user. For a specific feature, if the safety index value " +
"is higher than this threshold, and the corresponding relevance index is also higher than the " +
"relevance_index_threshold, that feature will be considered admissible. The safety index is the y-axis of " +
"the Fair Infogram. Default is -1 which gets set to 0.1.",
level = API.Level.secondary, gridable = true)
public double safety_index_threshold;
@API(help = "The fraction of training frame to use to build the infogram model. Defaults to 1.0, and any value greater " +
"than 0 and less than or equal to 1.0 is acceptable.",
level = API.Level.secondary, gridable = true)
public double data_fraction;
@API(help = "An integer specifying the number of columns to evaluate in the infogram. The columns are ranked by " +
"variable importance, and the top N are evaluated. Defaults to 50.",
level = API.Level.secondary, gridable = true)
public int top_n_features;
public InfogramModel.InfogramParameters fillImpl(InfogramModel.InfogramParameters impl) {
super.fillImpl(impl);
if (algorithm_params != null && !algorithm_params.isEmpty()) {
Properties p = generateProperties(algorithm_params);
ParamNParamSchema schemaParams = generateParamsSchema(algorithm);
schemaParams._paramsSchema.init_meta();
impl._infogram_algorithm_parameters = (Model.Parameters) schemaParams._paramsSchema
.fillFromImpl(schemaParams._params)
.fillFromParms(p, true)
.createAndFillImpl();
super.fillImpl(impl);
}
return impl;
}
public static void generateModelParams(InfogramModel.InfogramParameters parms, Properties p,
ArrayList<String> excludeList) {
ModelParametersSchemaV3 paramsSchema;
Model.Parameters params;
switch (parms._algorithm) {
case glm:
paramsSchema = new GLMV3.GLMParametersV3();
params = new GLMModel.GLMParameters();
excludeList.add("_distribution");
((GLMModel.GLMParameters) params)._family = distributionToFamily(parms._distribution);
break;
case AUTO: // auto defaults to GBM
case gbm:
paramsSchema = new GBMV3.GBMParametersV3();
params = new GBMModel.GBMParameters();
if (!excludeList.contains("_stopping_tolerance")) {
params._stopping_tolerance = 0.01; // set default to 0.01
excludeList.add("_stopping_tolerance");
}
break;
case drf:
paramsSchema = new DRFV3.DRFParametersV3();
params = new DRFModel.DRFParameters();
if (!excludeList.contains("_stopping_tolerance")) {
params._stopping_tolerance = 0.01; // set default to 0.01
excludeList.add("_stopping_tolerance");
}
break;
case deeplearning:
paramsSchema = new DeepLearningV3.DeepLearningParametersV3();
params = new DeepLearningModel.DeepLearningParameters();
break;
case xgboost:
params = ModelBuilder.makeParameters("XGBoost");
paramsSchema = (ModelParametersSchemaV3<?, ?>) SchemaServer.schema(params);
break;
default:
throw new UnsupportedOperationException("Unknown algo: " + parms._algorithm);
}
paramsSchema.init_meta();
parms._infogram_algorithm_parameters = (Model.Parameters) paramsSchema
.fillFromImpl(params)
.fillFromParms(p, true)
.createAndFillImpl();
}
Properties generateProperties(String algoParms) {
Properties p = new Properties();
HashMap<String, String[]> map = new Gson().fromJson(algoParms, new TypeToken<HashMap<String, String[]>>() {
}.getType());
for (Map.Entry<String, String[]> param : map.entrySet()) {
String[] paramVal = param.getValue();
if (paramVal.length == 1) {
p.setProperty(param.getKey(), paramVal[0]);
} else {
p.setProperty(param.getKey(), Arrays.toString(paramVal));
}
}
return p;
}
private class ParamNParamSchema {
private ModelParametersSchemaV3 _paramsSchema;
private Model.Parameters _params;
public ParamNParamSchema(ModelParametersSchemaV3 schema, Model.Parameters params) {
_paramsSchema = schema;
_params = params;
}
}
ParamNParamSchema generateParamsSchema(InfogramModel.InfogramParameters.Algorithm chosenAlgo) {
ModelParametersSchemaV3<?, ?> paramsSchema;
Model.Parameters params;
switch (chosenAlgo) {
case AUTO:
case glm:
paramsSchema = new GLMV3.GLMParametersV3();
params = new GLMModel.GLMParameters();
((GLMModel.GLMParameters) params)._family = GLMModel.GLMParameters.Family.AUTO;
break;
case gbm:
paramsSchema = new GBMV3.GBMParametersV3();
params = new GBMModel.GBMParameters();
break;
case drf:
paramsSchema = new DRFV3.DRFParametersV3();
params = new DRFModel.DRFParameters();
break;
case deeplearning:
paramsSchema = new DeepLearningV3.DeepLearningParametersV3();
params = new DeepLearningModel.DeepLearningParameters();
break;
case xgboost:
params = ModelBuilder.makeParameters("XGBoost");
paramsSchema = (ModelParametersSchemaV3<?, ?>) SchemaServer.schema(params);
break;
default:
throw new UnsupportedOperationException("Unknown given algo: " + chosenAlgo);
}
return new ParamNParamSchema(paramsSchema, params);
}
}
public static final class InfogramAlrogithmProvider extends EnumValuesProvider<InfogramModel.InfogramParameters.Algorithm> {
public InfogramAlrogithmProvider() { super(InfogramModel.InfogramParameters.Algorithm.class); }
}
}
|
0
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/water
|
java-sources/ai/h2o/h2o-admissibleml/3.46.0.7/water/infogram/RegisterRestApi.java
|
package water.infogram;
import hex.Infogram.Infogram;
import water.api.AlgoAbstractRegister;
import water.api.RestApiContext;
import water.api.SchemaServer;
public class RegisterRestApi extends AlgoAbstractRegister {
@Override
public void registerEndPoints(RestApiContext context) {
Infogram infogramMB = new Infogram(true);
// Register InfoGram model builder REST API
registerModelBuilder(context, infogramMB, SchemaServer.getStableVersion());
}
@Override
public String getName() {
return "Infogram";
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ContributionsMeanAggregator.java
|
package hex;
import water.Job;
import water.MRTask;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.NewChunk;
import java.util.stream.Stream;
public class ContributionsMeanAggregator extends MRTask<ContributionsMeanAggregator> {
final int _nBgRows;
double[][] _partialSums;
final int _rowIdxIdx;
final int _nRows;
final int _nCols;
int _startIndex;
final Job _j;
public ContributionsMeanAggregator(Job j, int nRows, int nCols, int nBgRows) {
_j = j;
_nRows = nRows;
_nCols = nCols;
_rowIdxIdx = nCols;
_nBgRows = nBgRows;
_startIndex = 0;
}
public ContributionsMeanAggregator setStartIndex(int startIndex) {
_startIndex = startIndex;
return this;
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
if (isCancelled() || null != _j && _j.stop_requested()) return;
_partialSums = MemoryManager.malloc8d(_nRows, _nCols);
for (int i = 0; i < cs[0]._len; i++) {
final int rowIdx = (int) cs[_rowIdxIdx].at8(i);
for (int j = 0; j < _nCols; j++) {
_partialSums[rowIdx - _startIndex][j] += cs[j].atd(i);
}
}
}
@Override
public void reduce(ContributionsMeanAggregator mrt) {
for (int i = 0; i < _partialSums.length; i++) {
for (int j = 0; j < _partialSums[0].length; j++) {
_partialSums[i][j] += mrt._partialSums[i][j];
}
}
mrt._partialSums = null;
}
@Override
protected void postGlobal() {
NewChunk[] ncs = Stream.of(appendables()).map(vec -> vec.chunkForChunkIdx(0)).toArray(NewChunk[]::new);
for (int i = 0; i < _partialSums.length; i++) {
for (int j = 0; j < _partialSums[0].length; j++) {
ncs[j].addNum(_partialSums[i][j] / _nBgRows);
}
}
_partialSums = null;
for (NewChunk nc : ncs)
nc.close(0, _fs);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ContributionsWithBackgroundFrameTask.java
|
package hex;
import water.*;
import water.fvec.*;
import water.util.Log;
import water.util.fp.Function;
import java.util.*;
import java.util.stream.IntStream;
import static water.SplitToChunksApplyCombine.concatFrames;
/***
* Calls map(Chunk[] frame, Chunk[] background, NewChunk[] ncs) by copying the smaller frame across the nodes.
* @param <T>
*/
public abstract class ContributionsWithBackgroundFrameTask<T extends ContributionsWithBackgroundFrameTask<T>> extends MRTask<T> {
transient Frame _frame;
transient Frame _backgroundFrame;
Key<Frame> _frameKey;
Key<Frame> _backgroundFrameKey;
final boolean _aggregate;
boolean _isFrameBigger;
long _startRow;
long _endRow;
Job _job;
public ContributionsWithBackgroundFrameTask(Key<Frame> frKey, Key<Frame> backgroundFrameKey, boolean perReference) {
assert null != frKey.get();
assert null != backgroundFrameKey.get();
_frameKey = frKey;
_backgroundFrameKey = backgroundFrameKey;
_frame = frKey.get();
_backgroundFrame = backgroundFrameKey.get();
assert _frame.numRows() > 0 : "Frame has to contain at least one row.";
assert _backgroundFrame.numRows() > 0 : "Background frame has to contain at least one row.";
_isFrameBigger = _frame.numRows() > _backgroundFrame.numRows();
_aggregate = !perReference;
_startRow = -1;
_endRow = -1;
}
protected void loadFrames() {
if (null == _frame)
_frame = _frameKey.get();
if (null == _backgroundFrame)
_backgroundFrame = _backgroundFrameKey.get();
assert _frame != null && _backgroundFrame != null;
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
loadFrames();
Frame smallerFrame = _isFrameBigger ? _backgroundFrame : _frame;
long sfIdx = 0;
long maxSfIdx = smallerFrame.numRows();
if (!_isFrameBigger && _startRow != -1 && _endRow != -1) {
sfIdx = _startRow;
maxSfIdx = _endRow;
}
while (sfIdx < maxSfIdx) {
if (isCancelled() || null != _job && _job.stop_requested()) return;
long finalSfIdx = sfIdx;
Chunk[] sfCs = IntStream
.range(0, smallerFrame.numCols())
.mapToObj(col -> smallerFrame.vec(col).chunkForRow(finalSfIdx))
.toArray(Chunk[]::new);
NewChunk[] ncsSlice = Arrays.copyOf(ncs, ncs.length - 2);
if (_isFrameBigger) {
map(cs, sfCs, ncsSlice);
for (int i = 0; i < cs[0]._len; i++) {
for (int j = 0; j < sfCs[0]._len; j++) {
ncs[ncs.length - 2].addNum(cs[0].start() + i); // row idx
ncs[ncs.length - 1].addNum(sfCs[0].start() + j); // background idx
}
}
} else {
map(sfCs, cs, ncsSlice);
for (int i = 0; i < sfCs[0]._len; i++) {
for (int j = 0; j < cs[0]._len; j++) {
ncs[ncs.length - 2].addNum(sfCs[0].start() + i); // row idx
ncs[ncs.length - 1].addNum(cs[0].start() + j); // background idx
}
}
}
sfIdx += sfCs[0]._len;
}
}
public static double estimateRequiredMemory(int nCols, Frame frame, Frame backgroundFrame) {
return 8 * nCols * frame.numRows() * backgroundFrame.numRows();
}
public static double estimatePerNodeMinimalMemory(int nCols, Frame frame, Frame backgroundFrame){
boolean isFrameBigger = frame.numRows() > backgroundFrame.numRows();
double reqMem = estimateRequiredMemory(nCols, frame, backgroundFrame);
Frame biggerFrame = isFrameBigger ? frame : backgroundFrame;
long[] frESPC = biggerFrame.anyVec().espc();
// Guess the max size of the chunk from the bigger frame as 2 * average chunk
double maxMinChunkSizeInVectorGroup = 2 * 8 * nCols * biggerFrame.numRows() / (double) biggerFrame.anyVec().nChunks();
// Try to compute it exactly
if (null != frESPC) {
long maxFr = 0;
for (int i = 0; i < frESPC.length-1; i++) {
maxFr = Math.max(maxFr, frESPC[i+1]-frESPC[i]);
}
maxMinChunkSizeInVectorGroup = Math.max(maxMinChunkSizeInVectorGroup, 8*nCols*maxFr);
}
long nRowsOfSmallerFrame = isFrameBigger ? backgroundFrame.numRows() : frame.numRows();
// We need the whole smaller frame on each node and one chunk per col of the bigger frame (at minimum)
return Math.max(reqMem / H2O.CLOUD._memary.length, maxMinChunkSizeInVectorGroup + nRowsOfSmallerFrame * nCols * 8);
}
double estimatePerNodeMinimalMemory(int nCols) {
return estimatePerNodeMinimalMemory(nCols, _frame, _backgroundFrame);
}
public static long minMemoryPerNode() {
long minMem = Long.MAX_VALUE;
for (H2ONode h2o : H2O.CLOUD._memary) {
long mem = h2o._heartbeat.get_free_mem(); // in bytes
if (mem < minMem)
minMem = mem;
}
return minMem;
}
public static long totalFreeMemory() {
long mem = 0;
for (H2ONode h2o : H2O.CLOUD._memary) {
mem += h2o._heartbeat.get_free_mem(); // in bytes
}
return mem;
}
public static boolean enoughMinMemory(double estimatedMemory) {
return minMemoryPerNode() > estimatedMemory;
}
abstract protected void map(Chunk[] cs, Chunk[] bgCs, NewChunk[] ncs);
void setChunkRange(int startCIdx, int endCIdx) {
assert !_isFrameBigger;
_startRow = _frame.anyVec().chunkForChunkIdx(startCIdx).start();
_endRow = _frame.anyVec().chunkForChunkIdx(endCIdx).start() + _frame.anyVec().chunkForChunkIdx(endCIdx)._len;
}
// takes care of mapping over the bigger frame
public Frame runAndGetOutput(Job j, Key<Frame> destinationKey, String[] names) {
_job = j;
loadFrames();
double reqMem = estimateRequiredMemory(names.length + 2, _frame, _backgroundFrame);
double reqPerNodeMem = estimatePerNodeMinimalMemory(names.length + 2);
String[] namesWithRowIdx = new String[names.length + 2];
System.arraycopy(names, 0, namesWithRowIdx, 0, names.length);
namesWithRowIdx[names.length] = "RowIdx";
namesWithRowIdx[names.length + 1] = "BackgroundRowIdx";
Key<Frame> individualContributionsKey = _aggregate ? Key.make(destinationKey + "_individual_contribs") : destinationKey;
if (!_aggregate) {
if (!enoughMinMemory(reqPerNodeMem)) {
throw new RuntimeException("Not enough memory. Estimated minimal total memory is " + reqMem + "B. " +
"Estimated minimal per node memory (assuming perfectly balanced datasets) is " + reqPerNodeMem + "B. " +
"Node with minimum memory has " + minMemoryPerNode() + "B. Total available memory is " + totalFreeMemory() + "B."
);
}
Frame indivContribs = withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(namesWithRowIdx.length, Vec.T_NUM, _isFrameBigger ? _frame : _backgroundFrame)
.outputFrame(individualContributionsKey, namesWithRowIdx, null);
return indivContribs;
} else {
if (!enoughMinMemory(reqPerNodeMem)) {
if (minMemoryPerNode() < 5 * (names.length + 2) * _frame.numRows() * 8) {
throw new RuntimeException("Not enough memory. Estimated minimal total memory is " + reqMem + "B. " +
"Estimated minimal per node memory (assuming perfectly balanced datasets) is " + reqPerNodeMem + "B. " +
"Node with minimum memory has " + minMemoryPerNode() + "B. Total available memory is " + totalFreeMemory() + "B."
);
}
// Split the _frame in subsections and calculate baselines (expand the frame) and then the average (reduce the frame sieze)
int nChunks = _frame.anyVec().nChunks();
// last iteration we need memory for ~whole aggregated frame + expanded subframe
int nSubFrames = (int) Math.ceil(2*reqMem / (minMemoryPerNode() - 8 * _frame.numRows() * (names.length)));
nSubFrames = nChunks;
int chunksPerIter = (int) Math.max(1, Math.floor(nChunks / nSubFrames));
Log.warn("Not enough memory to calculate SHAP at once. Calculating in " + (nSubFrames) + " iterations.");
_isFrameBigger = false; // ensure we map over the BG frame so we can average over the results properly;
try (Scope.Safe safe = Scope.safe()) {
List<Frame> subFrames = new LinkedList<Frame>();
for (int i = 0; i < nSubFrames; i++) {
setChunkRange(i * chunksPerIter, Math.min(nChunks - 1, (i + 1) * chunksPerIter - 1));
Frame indivContribs = clone().withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(namesWithRowIdx.length, Vec.T_NUM, _backgroundFrame)
.outputFrame(Key.make(destinationKey + "_individual_contribs_" + i), namesWithRowIdx, null);
Frame subFrame = new ContributionsMeanAggregator(_job,(int) (_endRow - _startRow), names.length, (int) _backgroundFrame.numRows())
.setStartIndex((int) _startRow)
.withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(names.length, Vec.T_NUM, indivContribs)
.outputFrame(Key.make(destinationKey + "_part_" + i), names, null);
subFrames.add(Scope.track(subFrame));
indivContribs.delete();
}
Frame result = concatFrames(subFrames, destinationKey);
return Scope.untrack(result);
}
} else {
Frame indivContribs = withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(namesWithRowIdx.length, Vec.T_NUM, _isFrameBigger ? _frame : _backgroundFrame)
.outputFrame(individualContributionsKey, namesWithRowIdx, null);
try {
return new ContributionsMeanAggregator(_job, (int) _frame.numRows(), names.length, (int) _backgroundFrame.numRows())
.withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(names.length, Vec.T_NUM, indivContribs)
.outputFrame(destinationKey, names, null);
} finally {
indivContribs.delete(true);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/DataInfo.java
|
package hex;
import water.*;
import water.fvec.*;
import water.util.ArrayUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import static water.util.ArrayUtils.findLongestCommonPrefix;
/**
* Created by tomasnykodym on 1/29/15.
*
* Provides higher level interface for accessing data row-wise.
*
* Performs on the fly auto-expansion of categorical variables (to 1 hot encoding) and standardization ( or normalize/demean/descale/none) of predictors and response.
* Supports sparse data, sparse columns can be transformed to sparse rows on the fly with some (significant) memory overhead,
* as the data of the whole chunk(s) will be copied.
*
*/
public class DataInfo extends Keyed<DataInfo> {
public int [] _activeCols;
public Frame _adaptedFrame; // the modified DataInfo frame (columns sorted by largest categorical -> least then all numerical columns)
public int _responses; // number of responses
public Vec setWeights(String name, Vec vec) {
if(_weights)
return _adaptedFrame.replace(weightChunkId(),vec);
_adaptedFrame.insertVec(weightChunkId(),name,vec);
_weights = true;
return null;
}
public void dropWeights() {
if(!_weights)return;
_adaptedFrame.remove(weightChunkId());
_weights = false;
}
public void dropInteractions() { // only called to cleanup the InteractionWrappedVecs!
if(_interactions!=null) {
Vec[] vecs = _adaptedFrame.remove(_interactionVecs);
for(Vec v:vecs)v.remove();
_interactions = null;
}
}
public int[] activeCols() {
if(_activeCols != null) return _activeCols;
int [] res = new int[fullN()+1];
for(int i = 0; i < res.length; ++i)
res[i] = i;
return res;
}
public void addResponse(String [] names, Vec[] vecs) {
_adaptedFrame.add(names,vecs);
_responses += vecs.length;
}
public int[] catNAFill() {return _catNAFill;}
public int catNAFill(int cid) {return _catNAFill[cid];}
public double[] numNAFill() {return _numNAFill; }
public double numNAFill(int nid) {return _numNAFill[nid];}
public void setCatNAFill(int[] catNAFill) {
_catNAFill = catNAFill;
}
public double normSub(int i) {
return _normSub == null?0:_normSub[i];
}
public double normMul(int i) {
return _normMul == null?1:_normMul[i];
}
public enum TransformType {
NONE, STANDARDIZE, NORMALIZE, DEMEAN, DESCALE;
public boolean isMeanAdjusted(){
switch(this){
case NONE:
case DESCALE:
case NORMALIZE:
return false;
case STANDARDIZE:
case DEMEAN:
return true;
default:
throw H2O.unimpl();
}
}
public boolean isSigmaScaled(){
switch(this){
case NONE:
case DEMEAN:
case NORMALIZE:
return false;
case STANDARDIZE:
case DESCALE:
return true;
default:
throw H2O.unimpl();
}
}
}
public TransformType _predictor_transform;
public TransformType _response_transform;
public boolean _useAllFactorLevels;
public int _nums; // "raw" number of numerical columns as they exist in the frame
public int _cats; // "raw" number of categorical columns as they exist in the frame
public int [] _catOffsets; // offset column indices for the 1-hot expanded values (includes enum-enum interaction)
public boolean [] _catMissing; // bucket for missing levels
private int [] _catNAFill; // majority class of each categorical col (or last bucket if _catMissing[i] is true)
public double[] _numNAFill;
public int [] _permutation; // permutation matrix mapping input col indices to adaptedFrame
public double [] _normMul; // scale the predictor column by this value
public double [] _normSub; // subtract from the predictor this value
public double [] _normSigmaStandardizationOff;
public double [] _normSubStandardizationOff;
public double [] _normRespMul; // scale the response column by this value
public double [] _normRespSub; // subtract from the response column this value
public double [] _numMeans;
public boolean _intercept = true;
public boolean _offset;
public boolean _weights;
public boolean _fold;
public boolean _treatment;
public Model.InteractionPair[] _interactions; // raw set of interactions
public Model.InteractionSpec _interactionSpec; // formal specification of interactions
public int _interactionVecs[]; // the interaction columns appearing in _adaptedFrame
public int[] _numOffsets; // offset column indices used by numerical interactions: total number of numerical columns is given by _numOffsets[_nums] - _numOffsets[0]
public int responseChunkId(int n){return n + _cats + _nums + (_weights?1:0) + (_offset?1:0) + (_fold?1:0) + (_treatment?1:0);}
public int treatmentChunkId(){return _cats + _nums + (_weights?1:0) + (_offset?1:0) + (_fold?1:0);}
public int foldChunkId(){return _cats + _nums + (_weights?1:0) + (_offset?1:0);}
public int offsetChunkId(){return _cats + _nums + (_weights ?1:0);}
public int weightChunkId(){return _cats + _nums;}
public int outputChunkId() { return outputChunkId(0);}
public int outputChunkId(int n) { return n + _cats + _nums + (_weights?1:0) + (_offset?1:0) + (_fold?1:0) + (_treatment?1:0) + _responses;}
public void addOutput(String name, Vec v) {_adaptedFrame.add(name,v);}
public Vec getOutputVec(int i) {return _adaptedFrame.vec(outputChunkId(i));}
public void setResponse(String name, Vec v){ setResponse(name,v,0);}
public void setResponse(String name, Vec v, int n){ _adaptedFrame.insertVec(responseChunkId(n),name,v);}
public final boolean _skipMissing;
public final boolean _imputeMissing;
public boolean _valid; // DataInfo over validation data set, can have unseen (unmapped) categorical levels
public final int [][] _catLvls; // cat lvls post filter (e.g. by strong rules)
public final int [][] _intLvls; // interaction lvls post filter (e.g. by strong rules)
private DataInfo() { _intLvls=null; _catLvls = null; _skipMissing = true; _imputeMissing = false; _valid = false; _offset = false; _weights = false; _fold = false; _treatment=false;}
public String[] _coefNames;
public int[] _coefOriginalIndices; //
@Override protected long checksum_impl() {throw H2O.unimpl();} // don't really need checksum
// Modify the train & valid frames directly; sort the categorical columns
// up front according to size; compute the mean/sigma for each column for
// later normalization.
public DataInfo(Frame train, Frame valid, boolean useAllFactorLevels, TransformType predictor_transform, boolean skipMissing, boolean imputeMissing, boolean missingBucket) {
this(train, valid, 0, useAllFactorLevels, predictor_transform, TransformType.NONE, skipMissing, imputeMissing, missingBucket, /* weight */ false, /* offset */ false, /* fold */ false, /* intercept */ false);
}
public DataInfo(Frame train, Frame valid, int nResponses, boolean useAllFactorLevels, TransformType predictor_transform, TransformType response_transform, boolean skipMissing, boolean imputeMissing, boolean missingBucket, boolean weight, boolean offset, boolean fold) {
this(train,valid,nResponses,useAllFactorLevels,predictor_transform,response_transform,skipMissing,imputeMissing,missingBucket,weight,offset,fold,null);
}
public DataInfo(Frame train, Frame valid, int nResponses, boolean useAllFactorLevels, TransformType predictor_transform, TransformType response_transform, boolean skipMissing, boolean imputeMissing, boolean missingBucket, boolean weight, boolean offset, boolean fold, Model.InteractionSpec interactions) {
this(train, valid, nResponses, useAllFactorLevels, predictor_transform, response_transform, skipMissing, imputeMissing, new MeanImputer(), missingBucket, weight, offset, fold, false, interactions);
}
public DataInfo(Frame train, Frame valid, int nResponses, boolean useAllFactorLevels, TransformType predictor_transform, TransformType response_transform, boolean skipMissing, boolean imputeMissing, boolean missingBucket, boolean weight, boolean offset, boolean fold, boolean treatment, Model.InteractionSpec interactions) {
this(train, valid, nResponses, useAllFactorLevels, predictor_transform, response_transform, skipMissing, imputeMissing, new MeanImputer(), missingBucket, weight, offset, fold, treatment, interactions);
}
public DataInfo(Frame train, Frame valid, int nResponses, boolean useAllFactorLevels, TransformType predictor_transform, TransformType response_transform, boolean skipMissing, boolean imputeMissing, Imputer imputer, boolean missingBucket, boolean weight, boolean offset, boolean fold, Model.InteractionSpec interactions) {
this(train, valid, nResponses, useAllFactorLevels, predictor_transform, response_transform, skipMissing, imputeMissing, imputer, missingBucket, weight, offset, fold, false, interactions);
}
/**
*
* The train/valid Frame instances are sorted by categorical (themselves sorted by
* cardinality greatest to least) with all numerical columns following. The response
* column(s) are placed at the end.
*
*
* Interactions:
* 1. Num-Num (Note: N(0,1) * N(0,1) ~ N(0,1) )
* 2. Num-Enum
* 3. Enum-Enum
*
* Interactions are produced on the fly and are dense (in all 3 cases). Consumers of
* DataInfo should not have to care how these interactions are generated. Any heuristic
* using the fullN value should continue functioning the same.
*
* Interactions are specified in two ways:
* A. As a list of pairs of column indices.
* B. As a list of pairs of column indices with limited enums.
*/
public DataInfo(Frame train, Frame valid, int nResponses, boolean useAllFactorLevels, TransformType predictor_transform, TransformType response_transform, boolean skipMissing, boolean imputeMissing, Imputer imputer, boolean missingBucket, boolean weight, boolean offset, boolean fold, boolean treatment, Model.InteractionSpec interactions) {
super(Key.<DataInfo>make());
assert predictor_transform != null;
assert response_transform != null;
_valid = valid != null;
_offset = offset;
_weights = weight;
_fold = fold;
_treatment = treatment;
assert !(skipMissing && imputeMissing) : "skipMissing and imputeMissing cannot both be true";
_skipMissing = skipMissing;
_imputeMissing = imputeMissing;
_predictor_transform = predictor_transform;
_response_transform = response_transform;
_responses = nResponses;
_useAllFactorLevels = useAllFactorLevels;
_interactionSpec = interactions;
if (interactions != null) {
train = interactions.reorderColumns(train);
valid = interactions.reorderColumns(valid);
_interactions = interactions.makeInteractionPairs(train);
}
// create dummy InteractionWrappedVecs and shove them onto the front
if( _interactions!=null ) {
_interactionVecs=new int[_interactions.length];
Frame inter = Model.makeInteractions(train, false, _interactions, _useAllFactorLevels, _skipMissing,predictor_transform==TransformType.STANDARDIZE);
train = inter.add(_interactionSpec.removeInteractionOnlyColumns(train));
if( valid!=null ) {
inter = Model.makeInteractions(valid, true, _interactions, _useAllFactorLevels, _skipMissing, predictor_transform == TransformType.STANDARDIZE); // FIXME: should be using the training subs/muls!
valid = inter.add(_interactionSpec.removeInteractionOnlyColumns(valid));
}
}
_permutation = new int[train.numCols()];
final Vec[] tvecs = train.vecs();
// Count categorical-vs-numerical
final int n = tvecs.length-_responses - (offset?1:0) - (weight?1:0) - (fold?1:0) - (treatment?1:0);
int [] nums = MemoryManager.malloc4(n);
int [] cats = MemoryManager.malloc4(n);
int nnums = 0, ncats = 0;
for(int i = 0; i < n; ++i)
if (tvecs[i].isCategorical())
cats[ncats++] = i;
else
nums[nnums++] = i;
_nums = nnums;
_cats = ncats;
_catLvls = new int[ncats][];
// sort the cats in the decreasing order according to their size
for(int i = 0; i < ncats; ++i)
for(int j = i+1; j < ncats; ++j)
if( tvecs[cats[i]].domain().length < tvecs[cats[j]].domain().length ) {
int x = cats[i];
cats[i] = cats[j];
cats[j] = x;
}
String[] names = new String[train.numCols()];
Vec[] tvecs2 = new Vec[train.numCols()];
// Compute the cardinality of each cat
_catNAFill = new int[ncats];
_catOffsets = MemoryManager.malloc4(ncats+1);
_catMissing = new boolean[ncats];
int len = _catOffsets[0] = 0;
int interactionIdx=0; // simple index into the _interactionVecs array
ArrayList<Integer> interactionIds;
if( _interactions==null ) {
interactionIds = new ArrayList<>();
for(int i=0;i<tvecs.length;++i)
if( tvecs[i] instanceof InteractionWrappedVec ) interactionIds.add(i);
if( interactionIds.size() > 0 ) {
_interactionVecs = new int[interactionIds.size()];
for (int i = 0; i < _interactionVecs.length; ++i)
_interactionVecs[i] = interactionIds.get(i);
}
}
for(int i = 0; i < ncats; ++i) {
names[i] = train._names[cats[i]];
Vec v = (tvecs2[i] = tvecs[cats[i]]);
_catMissing[i] = missingBucket; //needed for test time
if( v instanceof InteractionWrappedVec ) {
_interactionVecs[interactionIdx++]=i; // i (and not cats[i]) because this is the index in _adaptedFrame
_catOffsets[i + 1] = (len += v.domain().length + (missingBucket ? 1 : 0));
}
else
_catOffsets[i+1] = (len += v.domain().length - (useAllFactorLevels?0:1) + (missingBucket? 1 : 0)); //missing values turn into a new factor level
_catNAFill[i] = imputeMissing ?
imputer.imputeCat(names[i], train.vec(cats[i]), _useAllFactorLevels)
:
_catMissing[i] ? v.domain().length - (_useAllFactorLevels || isInteractionVec(i)?0:1) : -100;
_permutation[i] = cats[i];
}
_numOffsets = MemoryManager.malloc4(nnums+1);
_numOffsets[0]=len;
boolean isIWV; // is InteractionWrappedVec?
for(int i = 0; i < nnums; ++i) {
names[i+ncats] = train._names[nums[i]];
Vec v = train.vec(nums[i]);
tvecs2[i+ncats] = v;
isIWV = v instanceof InteractionWrappedVec;
if( isIWV ) {
_interactionVecs[interactionIdx++]=i+ncats;
}
_numOffsets[i+1] = (len+= (isIWV ? ((InteractionWrappedVec) v).expandedLength() : 1));
_permutation[i+ncats] = nums[i];
}
_numMeans = new double[numNums()];
_numNAFill = new double[numNums()];
int numIdx=0; // index into the _numMeans
for(int i=0;i<nnums;++i) {
String name = train.name(nums[i]);
Vec v = train.vec(nums[i]);
if( v instanceof InteractionWrappedVec ) {
InteractionWrappedVec iwv = (InteractionWrappedVec)v;
int start = iwv._useAllFactorLevels?0:1;
int length = iwv.expandedLength();
double[] means = iwv.getMeans();
System.arraycopy(means,start,_numMeans,numIdx,length);
double[] naFill = imputer.imputeInteraction(name, iwv, means);
System.arraycopy(naFill,start,_numNAFill,numIdx,length);
numIdx+=length;
}
else {
_numMeans[numIdx] = v.mean();
_numNAFill[numIdx] = imputer.imputeNum(name, v);
numIdx++;
}
}
for(int i = names.length-nResponses - (weight?1:0) - (offset?1:0) - (fold?1:0) - (treatment?1:0); i < names.length; ++i) {
names[i] = train._names[i];
tvecs2[i] = train.vec(i);
}
_adaptedFrame = new Frame(names,tvecs2);
train.restructure(names,tvecs2);
if (valid != null)
valid.restructure(names,valid.vecs(names));
// _adaptedFrame = train;
setPredictorTransform(predictor_transform);
if(_responses > 0)
setResponseTransform(response_transform);
_intLvls = new int[_interactionVecs==null?0:_interactionVecs.length][];
}
public DataInfo disableIntercept() {
_intercept = false;
return this;
}
public DataInfo(Frame train, Frame valid, int nResponses, boolean useAllFactorLevels, TransformType predictor_transform, TransformType response_transform, boolean skipMissing, boolean imputeMissing, boolean missingBucket, boolean weight, boolean offset, boolean fold, boolean intercept) {
this(train, valid, nResponses, useAllFactorLevels, predictor_transform, response_transform, skipMissing, imputeMissing, missingBucket, weight, offset, fold);
_intercept = intercept;
}
public DataInfo validDinfo(Frame valid) {
DataInfo res = new DataInfo(_adaptedFrame,null,1,_useAllFactorLevels,TransformType.NONE,TransformType.NONE,_skipMissing,_imputeMissing,!(_skipMissing || _imputeMissing),_weights,_offset,_fold);
res._interactions = _interactions;
res._interactionSpec = _interactionSpec;
if (_interactionSpec != null) {
valid = Model.makeInteractions(valid, true, _interactions, _useAllFactorLevels, _skipMissing, false).add(valid);
}
res._adaptedFrame = new Frame(_adaptedFrame.names(),valid.vecs(_adaptedFrame.names()));
res._valid = true;
return res;
}
public double[] denormalizeBeta(double [] beta) {
int N = fullN()+1;
assert (beta.length % N) == 0:"beta len = " + beta.length + " expected multiple of " + N;
int nclasses = beta.length/N;
beta = MemoryManager.arrayCopyOf(beta,beta.length);
if (_predictor_transform == DataInfo.TransformType.STANDARDIZE) {
for(int c = 0; c < nclasses; ++c) {
int off = N*c;
double norm = 0.0; // Reverse any normalization on the intercept
// denormalize only the numeric coefs (categoricals are not normalized)
final int numoff = numStart();
for (int i = numoff; i < N-1; i++) {
double b = beta[off + i] * _normMul[i - numoff];
norm += b * _normSub[i - numoff]; // Also accumulate the intercept adjustment
beta[off + i] = b;
}
beta[off + N - 1] -= norm;
}
}
return beta;
}
public double[] normalizeBeta(double [] beta, boolean standardize){
int N = fullN()+1;
assert (beta.length % N) == 0:"beta len = " + beta.length + " expected multiple of" + N;
int nclasses = beta.length/N;
beta = MemoryManager.arrayCopyOf(beta,beta.length);
if (standardize == false && _predictor_transform == DataInfo.TransformType.NONE && _normSubStandardizationOff != null && _normSigmaStandardizationOff != null) {
for(int c = 0; c < nclasses; ++c) {
int off = N*c;
double norm = 0.0; // Reverse any normalization on the intercept
// denormalize only the numeric coefs (categoricals are not normalized)
final int numoff = numStart();
for (int i = numoff; i < N-1; i++) {
double b = beta[off + i] * _normSigmaStandardizationOff[i - numoff];
norm += beta[off + i] * _normSubStandardizationOff[i - numoff]; // Also accumulate the intercept adjustment
beta[off + i] = b;
}
beta[off + N - 1] += norm;
}
}
return beta;
}
private int [] _fullCatOffsets;
private int [][] _catMap;
protected int [] fullCatOffsets(){ return _fullCatOffsets == null?_catOffsets:_fullCatOffsets;}
// private constructor called by filterExpandedColumns
private DataInfo(DataInfo dinfo,Frame fr, double [] normMul, double [] normSub, int[][] catLevels, int[][] intLvls, int [] catModes, int[] activeCols) {
_activeCols=activeCols;
_fullCatOffsets = dinfo._catOffsets;
if(!dinfo._useAllFactorLevels) {
_fullCatOffsets = dinfo._catOffsets.clone();
for (int i = 0; i < _fullCatOffsets.length; ++i)
_fullCatOffsets[i] += i; // add for the skipped zeros.
}
_cats = catLevels.length;
_catMap = new int[_cats][];
_offset = dinfo._offset;
_weights = dinfo._weights;
_fold = dinfo._fold;
_treatment = dinfo._treatment;
_valid = false;
_interactions = null;
ArrayList<Integer> interactionVecs = new ArrayList<>();
for(int i=0;i<fr.numCols();++i)
if( fr.vec(i) instanceof InteractionWrappedVec ) interactionVecs.add(i);
if( interactionVecs.size() > 0 ) {
_interactionVecs = new int[interactionVecs.size()];
for (int i = 0; i < _interactionVecs.length; ++i)
_interactionVecs[i] = interactionVecs.get(i);
}
assert dinfo._predictor_transform != null;
assert dinfo._response_transform != null;
_predictor_transform = dinfo._predictor_transform;
_response_transform = dinfo._response_transform;
_skipMissing = dinfo._skipMissing;
_imputeMissing = dinfo._imputeMissing;
_adaptedFrame = fr;
_catOffsets = MemoryManager.malloc4(catLevels.length + 1);
_catMissing = new boolean[catLevels.length];
Arrays.fill(_catMissing,!(dinfo._imputeMissing || dinfo._skipMissing));
int s = 0;
for(int i = 0; i < catLevels.length; ++i){
if(catLevels[i] != null) {
_catMap[i] = new int[_adaptedFrame.vec(i).cardinality()];
Arrays.fill(_catMap[i],-1);
for (int j = 0; j < catLevels[i].length; j++) {
_catMap[i][catLevels[i][j]] = j;
}
}
_catOffsets[i] = s;
s += catLevels[i].length;
}
_catOffsets[_catOffsets.length-1] = s;
_catLvls = catLevels;
_intLvls = intLvls;
_responses = dinfo._responses;
_useAllFactorLevels = true;//dinfo._useAllFactorLevels;
_normMul = normMul;
_normSub = normSub;
_catNAFill = catModes;
}
public static int imputeCat(Vec v) {return imputeCat(v,true);}
public static int imputeCat(Vec v, boolean useAllFactorLevels) {
if(v.isCategorical()) {
if (useAllFactorLevels) return v.mode();
long[] bins = v.bins();
return ArrayUtils.maxIndex(bins,0);
}
return (int)Math.round(v.mean());
}
/**
* Filter the _adaptedFrame so that it contains only the Vecs referenced by the cols
* parameter. This is done by recording the ignored columns in array ignoredCols. The enum columns are
* not expanded and considered as one column. However, it is possible that a level inside the enum column
* can be ignored. In this case, the enum levels are adjusted accordingly.
*
* @param cols Array of the expanded column indices to keep.
* @return A DataInfo with _activeCols specifying the active columns
*/
public DataInfo filterExpandedColumns(int [] cols){
assert _activeCols==null;
assert _predictor_transform != null;
assert _response_transform != null;
if(cols == null)return IcedUtils.deepCopy(this); // keep all columns
int hasIcpt = (cols.length > 0 && cols[cols.length-1] == fullN())?1:0;
int i = 0, j = 0, ignoredCnt = 0;
//public DataInfo(Frame fr, int hasResponses, boolean useAllFactorLvls, double [] normSub, double [] normMul, double [] normRespSub, double [] normRespMul){
int [][] catLvls = new int[_cats][]; // categorical levels to keep (used in getCategoricalOffsetId binary search)
int [][] intLvls = new int[_interactionVecs==null?0:_interactionVecs.length][]; // interactions levels to keep (used in getInteractionOffsetId binary search)
int [] ignoredCols = MemoryManager.malloc4(_nums + _cats); // capital 'v' Vec indices to be frame.remove'd, one per column not expanded
// first do categoricals...
if(_catOffsets != null) {
int coff = _useAllFactorLevels?0:1;
while (i < cols.length && cols[i] < numStart()) { // iterate over categorical cols
int[] levels = MemoryManager.malloc4(_catOffsets[j + 1] - _catOffsets[j]);
int k = 0; // keep track of how many levels we have (so we can "trim" the levels array when inserting into catLvls)
while (i < cols.length && cols[i] < _catOffsets[j + 1])
levels[k++] = (cols[i++] - _catOffsets[j]) + coff;
if (k > 0)
catLvls[j] = Arrays.copyOf(levels, k);
++j;
}
}
int [] catModes = _catNAFill;
for(int k =0; k < catLvls.length; ++k)
if(catLvls[k] == null)ignoredCols[ignoredCnt++] = k;
if(ignoredCnt > 0){
int [][] cs = new int[_cats-ignoredCnt][];
catModes = new int[_cats-ignoredCnt];
int y = 0;
for (int c = 0; c < catLvls.length; ++c)
if (catLvls[c] != null) {
catModes[y] = _catNAFill[c];
cs[y++] = catLvls[c];
}
assert y == cs.length;
catLvls = cs;
}
// now do the interaction vecs -- these happen to always sit first in the "nums" section of _adaptedFrame
// also, these have the exact same filtering logic as the categoricals above
int prev=j=0; // reset j for _numOffsets
if( _interactionVecs!=null && (_numOffsets.length > intLvls.length) ) { // second condition happens when there are no num columns
while( i < cols.length && cols[i] < _numOffsets[intLvls.length]) {
int[] lvls = MemoryManager.malloc4(_numOffsets[j+1] - _numOffsets[j]);
int k=0; // same as above
while(i<cols.length && cols[i] < _numOffsets[j+1])
lvls[k++] = (cols[i++] - _numOffsets[j]); // no useAllFactorLevels offset since it's tucked away in the count already
if( k>0 )
intLvls[j] = Arrays.copyOf(lvls,k);
++j;
}
int preIgnoredCnt=ignoredCnt;
for(int k=0;k<intLvls.length;++k)
if( null==intLvls[k] ) { ignoredCols[ignoredCnt++] = k+_cats; }
if( ignoredCnt > preIgnoredCnt ) { // got more ignored, trim out the nulls
int[][] is = new int[_interactionVecs.length - (ignoredCnt-preIgnoredCnt)][];
int y=0;
for (int[] intLvl : intLvls)
if (intLvl != null)
is[y++] = intLvl;
intLvls=is;
}
}
// now numerics
prev=j=_interactionVecs==null?0:_interactionVecs.length;
for(;i<cols.length;++i){
if (j>=_numOffsets.length) // happens when there are no num, enumxnum, numxnum columns
break;
int numsToIgnore = (cols[i]-_numOffsets[j]);
for(int k=0;k<numsToIgnore;++k){
ignoredCols[ignoredCnt++] = _cats+prev++;
++j;
}
prev = ++j;
}
for(int k = prev; k < _nums; ++k)
ignoredCols[ignoredCnt++] = k+_cats;
Frame f = new Frame(_adaptedFrame.names().clone(),_adaptedFrame.vecs().clone());
if(ignoredCnt > 0) f.remove(Arrays.copyOf(ignoredCols,ignoredCnt));
assert catLvls.length <= f.numCols():"cats = " + catLvls.length + " numcols = " + f.numCols();
double [] normSub = null;
double [] normMul = null;
int id = Arrays.binarySearch(cols,numStart());
if(id < 0) id = -id-1;
int nnums = cols.length - id - hasIcpt;
int off = numStart();
if(_normSub != null) {
normSub = new double[nnums];
for(int k = id; k < (id + nnums); ++k)
normSub[k-id] = _normSub[cols[k]-off];
}
if(_normMul != null) {
normMul = new double[nnums];
for(int k = id; k < (id + nnums); ++k)
normMul[k-id] = _normMul[cols[k]-off];
}
DataInfo dinfo = new DataInfo(this,f,normMul,normSub,catLvls,intLvls,catModes,cols);
dinfo._nums=f.numCols()-dinfo._cats - dinfo._responses - (dinfo._offset?1:0) - (dinfo._weights?1:0) - (dinfo._fold?1:0) - (dinfo._treatment?1:0);
dinfo._numMeans=new double[nnums];
dinfo._numNAFill=new double[nnums];
int colsSize = id+nnums; // small optimization
for(int k=id; k < colsSize;++k ) {
int index1 = k - id;
int index2 = cols[k] - off;
dinfo._numMeans[index1] = _numMeans[index2];
dinfo._numNAFill[index1] = _numNAFill[index2];
}
return dinfo;
}
public void updateWeightedSigmaAndMean(double [] sigmas, double [] mean) {
int sub = numNums() - _nums;
if(_predictor_transform.isSigmaScaled()) {
if(sigmas.length+(sub) != _normMul.length) // numNums() - _nums checks for interactions (numNums() > _nums in the case of numerical interactions)
throw new IllegalArgumentException("Length of sigmas does not match number of scaled columns.");
for(int i = 0; i < _normMul.length; ++i)
_normMul[i] = i<sub?_normMul[i]:(sigmas[i-sub] != 0?1.0/sigmas[i-sub]:1);
}
if(_predictor_transform.isMeanAdjusted()) {
if(mean.length+(sub) != _normSub.length) // numNums() - _nums checks for interactions (numNums() > _nums in the case of numerical interactions)
throw new IllegalArgumentException("Length of means does not match number of scaled columns.");
for(int i=0;i<_normSub.length;++i)
_normSub[i] = i<sub?_normSub[i]:mean[i-sub];
}
}
public void updateWeightedSigmaAndMeanForResponse(double [] sigmas, double [] mean) {
if(_response_transform.isSigmaScaled()) {
if(sigmas.length != _normRespMul.length)
throw new IllegalArgumentException("Length of sigmas does not match number of scaled columns.");
for(int i = 0; i < sigmas.length; ++i)
_normRespMul[i] = sigmas[i] != 0?1.0/sigmas[i]:1;
}
if(_response_transform.isMeanAdjusted()) {
if(mean.length != _normRespSub.length)
throw new IllegalArgumentException("Length of means does not match number of scaled columns.");
System.arraycopy(mean,0,_normRespSub,0,mean.length);
}
}
private void setTransform(TransformType t, double [] normMul, double [] normSub, int vecStart, int n) {
int idx=0; // idx!=i when interactions are in play, otherwise, it's just 'i'
for (int i = 0; i < n; ++i) {
Vec v = _adaptedFrame.vec(vecStart + i);
boolean isIWV = v instanceof InteractionWrappedVec;
switch (t) {
case STANDARDIZE:
if( isIWV ) {
InteractionWrappedVec iwv = (InteractionWrappedVec)v;
for(int offset=0;offset<iwv.expandedLength();++offset) {
normMul[idx+offset] = iwv.getMul(offset+(iwv._useAllFactorLevels?0:1));
normSub[idx+offset] = iwv.getSub(offset+(iwv._useAllFactorLevels?0:1));
}
} else {
normMul[idx] = (v.sigma() != 0) ? 1.0 / v.sigma() : 1.0;
normSub[idx] = v.mean();
}
break;
case NONE:
if( isIWV ) {
InteractionWrappedVec iwv = (InteractionWrappedVec)v;
for(int offset=0;offset<iwv.expandedLength();++offset) {
normMul[idx+offset] = iwv.getSigma(offset+(iwv._useAllFactorLevels?0:1));
normSub[idx+offset] = iwv.getSub(offset+(iwv._useAllFactorLevels?0:1));
}
} else {
normMul[idx] = v.sigma();
normSub[idx] = v.mean();
}
break;
case NORMALIZE:
if( isIWV ) throw H2O.unimpl();
normMul[idx] = (v.max() - v.min() > 0)?1.0/(v.max() - v.min()):1.0;
normSub[idx] = v.mean();
break;
case DEMEAN:
if (isIWV) {
InteractionWrappedVec iwv = (InteractionWrappedVec)v;
for(int offset=0;offset<iwv.expandedLength();++offset) {
normSub[idx+offset] = iwv.getMeans()[offset];
normMul[idx+offset] = 1;
}
} else {
normSub[idx] = v.mean();
normMul[idx] = 1;
}
break;
case DESCALE:
if( isIWV ) throw H2O.unimpl();
normMul[idx] = (v.sigma() != 0)?1.0/v.sigma():1.0;
normSub[idx] = 0;
break;
default:
throw H2O.unimpl();
}
assert !Double.isNaN(normMul[idx]);
assert !Double.isNaN(normSub[idx]);
idx = isIWV?(idx+nextNumericIdx(i)):(idx+1);
}
}
public void setPredictorTransform(TransformType t){
_predictor_transform = t;
if(t == TransformType.NONE) {
_normMul = null;
_normSub = null;
if (_adaptedFrame != null) {
_normSigmaStandardizationOff = MemoryManager.malloc8d(numNums());
_normSubStandardizationOff = MemoryManager.malloc8d(numNums());
setTransform(t, _normSigmaStandardizationOff, _normSubStandardizationOff, _cats, _nums);
}
} else {
_normMul = MemoryManager.malloc8d(numNums());
_normSub = MemoryManager.malloc8d(numNums());
setTransform(t,_normMul,_normSub,_cats,_nums);
}
}
public void setResponseTransform(TransformType t){
_response_transform = t;
if(t == TransformType.NONE) {
_normRespMul = null;
_normRespSub = null;
} else {
_normRespMul = MemoryManager.malloc8d(_responses);
_normRespSub = MemoryManager.malloc8d(_responses);
setTransform(t,_normRespMul,_normRespSub,_adaptedFrame.numCols()-_responses,_responses);
}
}
public boolean isInteractionVec(int colid) {
if( null==_interactions && null==_interactionVecs ) return false;
if( _adaptedFrame!=null )
return _adaptedFrame.vec(colid) instanceof InteractionWrappedVec;
else
return Arrays.binarySearch(_interactionVecs,colid) >= 0;
}
/**
*
* Get the fully expanded number of predictor columns.
* Note that this value does not include:
* response column(s)
* weight column
* offset column
* fold column
* treatment column
*
* @return expanded number of columns in the underlying frame
*/
public final int fullN() { return numNums() + numCats(); }
public final int largestCat(){ return _cats > 0?_catOffsets[1]:0; }
public final int numStart() { return _catOffsets[_cats]; }
public final int numCats() { return _catOffsets[_cats]; }
public final int numNums() {
int nnums=0;
if( _numOffsets==null && _intLvls.length>0 ) { // filtered columns?
for (int[] _intLvl : _intLvls) nnums += _intLvl==null?0:_intLvl.length-1; // minus 1 for the fact that we get a +1 from the dummy interaction vec sitting in the frame!
return nnums+_nums;
}
return _interactionVecs!=null&&_numOffsets!=null?(_numOffsets[_numOffsets.length-1]-numStart()):_nums;
}
/**
* Get the next expanded number-column index.
*/
public final int nextNumericIdx(int currentColIdx) {
if( _numOffsets==null ) {
if( currentColIdx < _interactionVecs.length ) { // currently sitting on an interaction vec, return the number of levels
return _intLvls[currentColIdx].length;
} else
return 1;
}
if( currentColIdx+1 >= _numOffsets.length ) return fullN() - _numOffsets[currentColIdx];
return _numOffsets[currentColIdx+1] - _numOffsets[currentColIdx];
}
public final String[] coefNames() {
if (_coefNames != null) return _coefNames; // already computed
int k = 0;
final int n = fullN(); // total number of columns to compute
String [] res = new String[n];
final Vec [] vecs = _adaptedFrame.vecs();
// first do all of the expanded categorical names
for(int i = 0; i < _cats; ++i) {
for (int j = (_useAllFactorLevels || vecs[i] instanceof InteractionWrappedVec) ? 0 : 1; j < vecs[i].domain().length; ++j) {
int jj = getCategoricalId(i, j);
if(jj < 0)
continue;
res[k++] = _adaptedFrame._names[i] + "." + vecs[i].domain()[j];
}
if (_catMissing[i] && getCategoricalId(i, -1) >=0)
res[k++] = _adaptedFrame._names[i] + ".missing(NA)";
if( vecs[i] instanceof InteractionWrappedVec ) {
InteractionWrappedVec iwv = (InteractionWrappedVec)vecs[i];
if( null!=iwv.missingDomains() ) {
for(String s: iwv.missingDomains() )
res[k++] = s+".missing(NA)";
}
}
}
// now loop over the numerical columns, collecting up any expanded InteractionVec names
if( _interactions==null ) {
final int nums = n-k;
System.arraycopy(_adaptedFrame._names, _cats, res, k, nums);
} else {
for (int i = 0; i <= _nums; i++) {
InteractionWrappedVec v;
if( i+_cats >= n || k >=n ) break;
if (vecs[i+_cats] instanceof InteractionWrappedVec && ((v = (InteractionWrappedVec) vecs[i+_cats]).domain() != null)) { // in this case, get the categoricalOffset
for (int j = v._useAllFactorLevels?0:1; j < v.domain().length; ++j) {
if (getCategoricalIdFromInteraction(_cats+i, j) < 0)
continue;
res[k++] = _adaptedFrame._names[i+_cats] + "." + v.domain()[j];
}
} else
res[k++] = _adaptedFrame._names[i+_cats];
}
}
_coefNames = res;
return res;
}
public final int[] coefOriginalColumnIndices(Frame adaptedFrame) {
int k = 0;
final int n = fullN(); // total number of columns to compute
int[] res = new int[n];
final Vec [] vecs = adaptedFrame.vecs();
// first do all of the expanded categorical names
for(int i = 0; i < _cats; ++i) {
for (int j = (_useAllFactorLevels || vecs[i] instanceof InteractionWrappedVec) ? 0 : 1; j < vecs[i].domain().length; ++j) {
int jj = getCategoricalId(i, j);
if(jj < 0)
continue;
res[k++] = i;
}
if (_catMissing[i] && getCategoricalId(i, -1) >=0)
res[k++] = i;
if( vecs[i] instanceof InteractionWrappedVec ) {
InteractionWrappedVec iwv = (InteractionWrappedVec)vecs[i];
if( null != iwv.missingDomains() ) {
for(String s: iwv.missingDomains() )
res[k++] = i;
}
}
}
// now loop over the numerical columns, collecting up any expanded InteractionVec names
if( _interactions == null ) {
int index = _cats;
for(int i = k; i < n; i++) {
res[i] = index++;
}
} else {
for (int i = 0; i <= _nums; i++) {
InteractionWrappedVec v;
if( i+_cats >= n || k >=n ) break;
if (vecs[i+_cats] instanceof InteractionWrappedVec && ((v = (InteractionWrappedVec) vecs[i+_cats]).domain() != null)) { // in this case, get the categoricalOffset
for (int j = v._useAllFactorLevels?0:1; j < v.domain().length; ++j) {
if (getCategoricalIdFromInteraction(_cats+i, j) < 0)
continue;
res[k++] = i+_cats;
}
} else
res[k++] = i+_cats;
}
}
if (null != _adaptedFrame && Objects.equals(_adaptedFrame._key, adaptedFrame._key))
_coefOriginalIndices = res;
return res;
}
public final int[] coefOriginalColumnIndices() {
if (_coefOriginalIndices != null) return _coefOriginalIndices; // already computed
return coefOriginalColumnIndices(_adaptedFrame);
}
public final String[] coefOriginalNames(Frame adaptedFrame) {
int[] coefOriginalIndices = coefOriginalColumnIndices(adaptedFrame);
String[] originalNames = new String[coefOriginalIndices[coefOriginalIndices.length - 1] + 1]; //needs +1 since we have 0 based indexing so if we have index N we need to have N+1 elements
int i = 0, j = 0;
while (i < coefOriginalIndices.length && j < originalNames.length) {
List<Integer> coefOriginalIndicesList = new ArrayList<>(coefOriginalIndices.length);
for (int value : coefOriginalIndices) coefOriginalIndicesList.add(value);
int end = coefOriginalIndicesList.lastIndexOf(coefOriginalIndices[i]);
String prefix = findLongestCommonPrefix(Arrays.copyOfRange(coefNames(), i, end + 1));
if (end > i) { // categorical variable
// Let's hope levels in this categorical variable don't have common prefix with '.'
// We know that we encode cat. vars as "variable_name.level" so we know that the prefix should end
// with ".". So make sure it's the case otherwise this can break on categorical variables like "pclass" in titanic
// dataset where every level starts with "Class " which leads to "pclass.Class " as the original name
prefix = prefix.substring(0, prefix.lastIndexOf("."));
}
if (".".equals(prefix.substring(prefix.length() - 1))) {
prefix = prefix.substring(0, prefix.length() - 1);
}
originalNames[j] = prefix;
i = end + 1;
j++;
}
return originalNames;
}
public final String[] coefOriginalNames() {
return coefOriginalNames(_adaptedFrame);
}
// Return permutation matrix mapping input names to adaptedFrame colnames
public int[] mapNames(String[] names) {
assert names.length == _adaptedFrame._names.length : "Names must be the same length!";
int[] idx = new int[names.length];
Arrays.fill(idx, -1);
for(int i = 0; i < _adaptedFrame._names.length; i++) {
for(int j = 0; j < names.length; j++) {
if( names[j].equals(_adaptedFrame.name(i)) ) {
idx[i] = j; break;
}
}
}
return idx;
}
/**
* Undo the standardization/normalization of numerical columns
* @param in input values
* @param out output values (can be the same as input)
*/
public final void unScaleNumericals(double[] in, double[] out) {
if (_nums == 0) return;
assert (in.length == out.length);
assert (in.length == fullN());
for (int k=numStart(); k < fullN(); ++k) {
double m = _normMul == null ? 1f : _normMul[k-numStart()];
double s = _normSub == null ? 0f : _normSub[k-numStart()];
out[k] = in[k] / m + s;
}
}
public final class Row extends Iced {
public boolean predictors_bad; // should the row be skipped (GLM skip NA for example)
public boolean response_bad;
public boolean isBad(){return predictors_bad || response_bad;}
public double [] numVals; // the backing data of the row
public double [] response;
public int [] numIds; // location of next sparse value
public int [] binIds; // location of categorical
public long rid; // row number (sometimes within chunk, or absolute)
public int cid; // categorical id
public int nBins; // number of enum columns (not expanded)
public int nNums; // number of numeric columns (not expanded)
public double offset = 0;
public double weight = 1;
public final boolean isSparse(){return numIds != null;}
public double[] mtrxMul(double [][] m, double [] res){
for(int i = 0; i < m.length; ++i)
res[i] = innerProduct(m[i],false);
return res;
}
public Row(boolean sparse, int nNums, int nBins, int nresponses, int i, long start) {
binIds = MemoryManager.malloc4(nBins);
numVals = MemoryManager.malloc8d(nNums);
response = MemoryManager.malloc8d(nresponses);
if(sparse)
numIds = MemoryManager.malloc4(nNums);
this.nNums = sparse?0:nNums;
cid = i;
rid = start + i;
}
public Row(boolean sparse, double[] numVals, int[] binIds, double[] response, int i, long start) {
int nNums = numVals == null ? 0:numVals.length;
this.numVals = numVals;
if(sparse)
numIds = MemoryManager.malloc4(nNums);
this.nNums = sparse ? 0:nNums;
this.nBins = binIds == null ? 0:binIds.length;
this.binIds = binIds;
this.response = response;
cid = i;
rid = start + i;
}
public Row(double [] nums) {
numVals = nums;
nNums = nums.length;
}
public double response(int i) {return response[i];}
public double get(int i) {
int off = numStart();
if(i >= off) { // numbers
if(numIds == null)
return numVals[i-off];
int j = Arrays.binarySearch(numIds,0,nNums,i);
return j >= 0?numVals[j]:0;
} else { // categoricals
int j = Arrays.binarySearch(binIds,0,nBins,i);
return j >= 0?1:0;
}
}
public void addNum(int id, double val) {
if(numIds.length == nNums) {
int newSz = Math.max(4,numIds.length + (numIds.length >> 1));
numIds = Arrays.copyOf(numIds, newSz);
numVals = Arrays.copyOf(numVals, newSz);
}
int i = nNums++;
numIds[i] = id;
numVals[i] = val;
}
/*
This method will perform an inner product of rows. It will be able to handle categorical data
as well as numerical data. However, the two rows must have exactly the same column types. This
is used in a situation where the rows are coming from the same dataset.
*/
public final double dotSame(Row rowj) {
// nums
double elementij = 0.0;
for(int i = 0; i < this.nNums; ++i) {
elementij += this.numVals[i]*rowj.numVals[i]; // multiply numerical parts of columns
}
// cat X cat
if (this.binIds.length > 0) { // categorical columns exists
for (int j = 0; j < this.nBins; ++j) {
if (this.binIds[j] == rowj.binIds[j]) {
elementij += 1;
}
}
}
return elementij*this.weight*rowj.weight;
}
public final double innerProduct(double [] vec) { return innerProduct(vec,false);}
public final double innerProduct(double [] vec, boolean icptFirst) {
double res = 0;
int off = 0;
if(icptFirst) {
off = 1;
res = vec[0];
}
int numStart = off + numStart();
for(int i = 0; i < nBins; ++i)
res += vec[off+binIds[i]];
if(numIds == null) {
for (int i = 0; i < numVals.length; ++i)
res += numVals[i] * vec[numStart + i];
} else {
for (int i = 0; i < nNums; ++i)
res += numVals[i] * vec[off+numIds[i]];
}
if(_intercept && !icptFirst)
res += vec[vec.length-1];
return res;
}
public final double innerProduct(DataInfo.Row row) {
assert !_intercept;
assert numIds == null;
double res = 0;
for (int i = 0; i < nBins; ++i)
if (binIds[i] == row.binIds[i])
res += 1;
for (int i = 0; i < numVals.length; ++i)
res += numVals[i] * row.numVals[i];
return res;
}
public final double twoNormSq() {
assert !_intercept;
assert numIds == null;
double res = nBins;
for (double v : numVals)
res += v * v;
return res;
}
public double[] expandCats() {
if(isSparse() || _responses > 0) throw H2O.unimpl();
return expandCatsPredsOnly(null);
}
public double[] expandCatsPredsOnly(double[] res) {
if(isSparse()) throw H2O.unimpl();
int N = fullN();
int numStart = numStart();
if (res == null)
res = new double[N + (_intercept ? 1:0)];
else
Arrays.fill(res, 0);
for(int i = 0; i < nBins; ++i)
res[binIds[i]] = 1;
if(numIds == null) {
System.arraycopy(numVals,0,res,numStart,numVals.length);
} else {
for(int i = 0; i < nNums; ++i)
res[numIds[i]] = numVals[i];
}
if(_intercept)
res[res.length-1] = 1;
return res;
}
public String toString() {
return this.rid + Arrays.toString(Arrays.copyOf(binIds,nBins)) + ", " + Arrays.toString(numVals);
}
public void setResponse(int i, double z) {response[i] = z;}
public void standardize(double[] normSub, double[] normMul) {
if(numIds == null){
for(int i = 0; i < numVals.length; ++i)
numVals[i] = (numVals[i] - normSub[i])*normMul[i];
} else
for(int i = 0; i < nNums; ++i) {
int j = numIds[i];
numVals[i] = (numVals[i] - normSub[j])*normMul[j];
}
}
public Row deepClone() {
Row cloned = (Row) clone();
cloned.numVals = numVals.clone();
if (numIds != null)
cloned.numIds = numIds.clone();
cloned.response = response.clone();
cloned.binIds = binIds.clone();
return cloned;
}
public void addToArray(double scale, double []res) {
for (int i = 0; i < nBins; i++)
res[binIds[i]] += scale;
int numstart = numStart();
if (numIds != null) {
for (int i = 0; i < nNums; ++i)
res[numIds[i]] += scale * numVals[i];
} else for (int i = 0; i < numVals.length; ++i)
if (numVals[i] != 0)
res[numstart + i] += scale * numVals[i];
if (_intercept)
res[res.length - 1] += scale;
}
}
public final int getCategoricalId(int cid, double val) {
if(Double.isNaN(val)) return getCategoricalId(cid, -1);
int ival = (int)val;
if(ival != val) throw new IllegalArgumentException("Categorical id must be an integer or NA (missing).");
return getCategoricalId(cid,ival);
}
/**
* Get the offset into the expanded categorical
* @param cid the column id
* @param val the integer representation of the categorical level
* @return offset into the fullN set of columns
*/
public final int getCategoricalId(int cid, int val) {
boolean isIWV = isInteractionVec(cid);
if(val == -1) { // NA
if (isIWV && !_useAllFactorLevels)
val = _catNAFill[cid]-1; // need to -1 here because no -1 in 6 lines later for isIWV vector
else
val = _catNAFill[cid];
}
if (!_useAllFactorLevels && !isIWV) { // categorical interaction vecs drop reference level in a special way
val = val - 1;
}
if(val < 0) return -1; // column si to be skipped
int [] offs = fullCatOffsets();
int expandedVal = val + offs[cid];
if(expandedVal >= offs[cid+1]) { // previously unseen level
assert (isIWV && !_useAllFactorLevels) || _valid : "Categorical value out of bounds, got " + val + ", next cat starts at " + fullCatOffsets()[cid + 1];
if(_skipMissing)
return -1;
val = _catNAFill[cid];
if (!_useAllFactorLevels && !isIWV) { // categorical interaction vecs drop reference level in a special way
val = val - 1;
}
}
if (_catMap != null && _catMap[cid] != null) { // some levels are ignored?
val = _catMap[cid][val];
assert _useAllFactorLevels;
}
return val < 0?-1:val + _catOffsets[cid];
}
public final int getCategoricalIdFromInteraction(int cid, int val) {
InteractionWrappedVec v = (InteractionWrappedVec) _adaptedFrame.vec(cid);
if (v.isCategorical())
return getCategoricalId(cid, val);
assert v.domain() != null : "No domain levels found for interactions! cid: " + cid + " val: " + val;
cid -= _cats;
if (! v._useAllFactorLevels)
val--;
assert val >= 0;
if (val >= _numOffsets[cid+1]) { // previously unseen interaction (aka new domain level)
assert _valid : "interaction value out of bounds, got " + val + ", next cat starts at " + _numOffsets[cid+1];
val = v.mode();
}
if( cid < _intLvls.length && _intLvls[cid]!=null ) {
assert _useAllFactorLevels; // useAllFactorLevels has to be defined on a global level (not just for the interaction)
val = Arrays.binarySearch(_intLvls[cid],val);
}
return val < 0?-1:val+_numOffsets[cid];
}
public final Row extractDenseRow(Chunk[] chunks, int rid, Row row) {
row.predictors_bad = false;
row.response_bad = false;
row.rid = rid + chunks[0].start();
row.cid = rid;
if(_weights)
row.weight = chunks[weightChunkId()].atd(rid);
if(row.weight == 0) return row;
if (_skipMissing) {
int N = _cats + _nums;
for (int i = 0; i < N; ++i)
if (chunks[i].isNA(rid)) {
row.predictors_bad = true;
return row;
}
}
int nbins = 0;
for (int i = 0; i < _cats; ++i) {
int cid = getCategoricalId(i,chunks[i].isNA(rid)? _catNAFill[i]:(int)chunks[i].at8(rid));
if(cid >= 0)
row.binIds[nbins++] = cid;
}
row.nBins = nbins;
final int n = _nums;
int numValsIdx=0; // since we're dense, need a second index to track interaction nums
for( int i=0;i<n;i++) {
if( isInteractionVec(_cats + i) ) { // categorical-categorical interaction is handled as plain categorical (above)... so if we have interactions either v1 is categorical, v2 is categorical, or neither are categorical
InteractionWrappedVec iwv = (InteractionWrappedVec)_adaptedFrame.vec(_cats+i);
int interactionOffset = getInteractionOffset(chunks,_cats+i,rid);
for(int offset=0;offset<iwv.expandedLength();++offset) {
if( i < _intLvls.length && _intLvls[i]!=null && Arrays.binarySearch(_intLvls[i],offset) < 0 ) continue; // skip the filtered out interactions
double d=0;
if( offset==interactionOffset ) d=chunks[_cats + i].atd(rid);
if( Double.isNaN(d) )
d = _numNAFill[numValsIdx];
if( _normMul != null && _normSub != null )
d = (d - _normSub[numValsIdx]) * _normMul[numValsIdx];
row.numVals[numValsIdx++]=d;
}
} else {
double d = chunks[_cats + i].atd(rid); // can be NA if skipMissing() == false
if (Double.isNaN(d))
d = _numNAFill[numValsIdx];
if (_normMul != null && _normSub != null)
d = (d - _normSub[numValsIdx]) * _normMul[numValsIdx];
row.numVals[numValsIdx++] = d;
}
}
for (int i = 0; i < _responses; ++i) {
row.response[i] = chunks[responseChunkId(i)].atd(rid);
if(Double.isNaN(row.response[i])) {
row.response_bad = true;
break;
}
if (_normRespMul != null)
row.response[i] = (row.response[i] - _normRespSub[i]) * _normRespMul[i];
}
if(_offset)
row.offset = chunks[offsetChunkId()].atd(rid);
return row;
}
public int getInteractionOffset(Chunk[] chunks, int cid, int rid) {
boolean useAllFactors = ((InteractionWrappedVec)chunks[cid].vec())._useAllFactorLevels;
InteractionWrappedVec.InteractionWrappedChunk c = (InteractionWrappedVec.InteractionWrappedChunk)chunks[cid];
if (c._c1IsCat) { // looking at enum by num or num by enum interaction here
if (!c._c[0].isNA(rid)) { // todo: add in other NA fill for enum columns
return (int)c._c[0].at8(rid)-(useAllFactors?0:1);
} else { // NA at c._c[0].at8(rid)
return (int)c._c[0].vec().mode()-(useAllFactors?0:1);
}
} else if (c._c2IsCat) {
if (!c._c[1].isNA(rid)) {
return (int)c._c[1].at8(rid)-(useAllFactors?0:1);
} else {
return (int)c._c[1].vec().mode()-(useAllFactors?0:1);
}
}
return 0; // no offset for num by num interaction column
}
public Vec getWeightsVec(){return _adaptedFrame.vec(weightChunkId());}
public Vec getOffsetVec(){return _adaptedFrame.vec(offsetChunkId());}
public Row newDenseRow(){return new Row(false,numNums(),_cats,_responses,0,0);} // TODO: _nums => numNums since currently extracting out interactions into dense
public Row newDenseRow(double[] numVals, long start) {
return new Row(false, numVals, null, null, 0, start);
}
public final class Rows {
public final int _nrows;
private final Row _denseRow;
private final Row [] _sparseRows;
public final boolean _sparse;
private final Chunk [] _chks;
private Rows(Chunk [] chks, boolean sparse) {
_nrows = chks[0]._len;
_sparse = sparse;
long start = chks[0].start();
if(sparse) {
_denseRow = null;
_chks = null;
_sparseRows = extractSparseRows(chks);
} else {
_denseRow = DataInfo.this.newDenseRow();
_chks = chks;
_sparseRows = null;
}
}
public Row row(int i) {return _sparse?_sparseRows[i]:extractDenseRow(_chks,i,_denseRow);}
}
public Rows rows(Chunk [] chks) {
int cnt = 0;
for(Chunk c:chks)
if(c.isSparseZero())
++cnt;
return rows(chks,cnt > (chks.length >> 1));
}
public Rows rows(Chunk [] chks, boolean sparse) {return new Rows(chks,sparse);}
/**
* Extract (sparse) rows from given chunks.
* Note: 0 remains 0 - _normSub of DataInfo isn't used (mean shift during standarization is not reverted) - UNLESS offset is specified (for GLM only)
* Essentially turns the dataset 90 degrees.
* @param chunks - chunk of dataset
* @return array of sparse rows
*/
public final Row[] extractSparseRows(Chunk [] chunks) {
Row[] rows = new Row[chunks[0]._len];
long startOff = chunks[0].start();
for (int i = 0; i < rows.length; ++i) {
rows[i] = new Row(true, Math.min(_nums, 16), _cats, _responses, i, startOff); // if sparse, _nums is the correct number of nonzero values! i.e., do not use numNums()
rows[i].rid = chunks[0].start() + i;
if(_offset) {
rows[i].offset = chunks[offsetChunkId()].atd(i);
if(Double.isNaN(rows[i].offset)) {
rows[i].predictors_bad = true;
continue;
}
}
if(_weights) {
rows[i].weight = chunks[weightChunkId()].atd(i);
if(Double.isNaN(rows[i].weight))
rows[i].predictors_bad = true;
}
}
// categoricals
for (int i = 0; i < _cats; ++i) {
for (int r = 0; r < chunks[0]._len; ++r) {
Row row = rows[r];
boolean isMissing = chunks[i].isNA(r);
if(_skipMissing && isMissing){
row.predictors_bad = true;
continue;
}
int cid = getCategoricalId(i,isMissing? -1:(int)chunks[i].at8(r));
if(cid >=0)
row.binIds[row.nBins++] = cid;
}
}
// generic numbers + interactions
int interactionOffset=0;
for (int cid = 0; cid < _nums; ++cid) {
Chunk c = chunks[_cats + cid];
int oldRow = -1;
if (c instanceof InteractionWrappedVec.InteractionWrappedChunk) { // for each row, only 1 value in an interaction is 'hot' all other values are off (i.e., are 0)
InteractionWrappedVec iwv = (InteractionWrappedVec)c.vec();
for(int r=0;r<c._len;++r) { // the vec is "vertically" dense and "horizontally" sparse (i.e., every row has one, and only one, value)
Row row = rows[r];
if( c.isNA(r) && _skipMissing)
row.predictors_bad = true;
if(row.predictors_bad) continue;
int cidVirtualOffset = getInteractionOffset(chunks,_cats+cid,r); // the "virtual" offset into the hot-expanded interaction
if( cidVirtualOffset>=0 ) {
if( cid < _intLvls.length && _intLvls[cid]!=null && Arrays.binarySearch(_intLvls[cid],cidVirtualOffset) < 0 ) continue; // skip the filtered out interactions
if( c.atd(r)==0 ) continue;
double d = c.atd(r);
if( Double.isNaN(d) )
d = _numNAFill[interactionOffset+cidVirtualOffset]; // FIXME: if this produces a "true" NA then should sub with mean? with?
if (_normMul != null)
d *= _normMul[interactionOffset+cidVirtualOffset];
row.addNum(numStart()+interactionOffset+cidVirtualOffset, d);
}
}
interactionOffset+=nextNumericIdx(cid);
} else {
for (int r = c.nextNZ(-1, _imputeMissing); r < c._len; r = c.nextNZ(r, _imputeMissing)) {
if (c.atd(r) == 0) continue;
assert r > oldRow;
oldRow = r; Row row = rows[r];
if (c.isNA(r) && _skipMissing)
row.predictors_bad = true;
if (row.predictors_bad) continue;
double d = c.atd(r);
if (Double.isNaN(d))
d = _numNAFill[cid];
if (_normMul != null)
d *= _normMul[interactionOffset];
row.addNum(numStart()+interactionOffset,d);
}
interactionOffset++;
}
}
// response(s)
for (int i = 1; i <= _responses; ++i) {
int rid = responseChunkId(i-1);
Chunk rChunk = chunks[rid];
for (int r = 0; r < chunks[0]._len; ++r) {
Row row = rows[r];
row.response[i-1] = rChunk.atd(r);
if(Double.isNaN(row.response[i-1])) {
row.response_bad = true;
}
if (_normRespMul != null) {
row.response[i-1] = (row.response[i-1] - _normRespSub[i-1]) * _normRespMul[i-1];
}
}
}
return rows;
}
public DataInfo scoringInfo(String[] names, Frame adaptFrame) {
return scoringInfo(names, adaptFrame, -1, true);
}
/**
* Creates a scoringInfo from a DataInfo instance created during model training
* @param names column names
* @param adaptFrame adapted frame
* @param nResponses number of responses (-1 indicates autodetect: 0/1 based on presence of a single response)
* @param fixIVW whether to force global useFactorLevels flag to InteractionWrappedVecs (GLM behavior)
* @return
*/
public DataInfo scoringInfo(String[] names, Frame adaptFrame, int nResponses, boolean fixIVW) {
DataInfo res = IcedUtils.deepCopy(this);
res._normMul = null;
res._normRespSub = null;
res._normRespMul = null;
res._normRespSub = null;
res._predictor_transform = TransformType.NONE;
res._response_transform = TransformType.NONE;
res._adaptedFrame = adaptFrame;
res._weights = _weights && adaptFrame.find(names[weightChunkId()]) != -1;
res._offset = _offset && adaptFrame.find(names[offsetChunkId()]) != -1;
res._fold = _fold && adaptFrame.find(names[foldChunkId()]) != -1;
res._treatment = _treatment && adaptFrame.find(names[treatmentChunkId()]) != -1;
if (nResponses != -1) {
res._responses = nResponses;
} else {
int resId = adaptFrame.find(names[responseChunkId(0)]);
if (resId == -1 || adaptFrame.vec(resId).isBad())
res._responses = 0;
else // NOTE: DataInfo can have extra columns encoded as response, e.g. helper columns when doing Multinomail IRLSM, don't need those for scoring!.
res._responses = 1;
}
res._valid = true;
res._interactions=_interactions;
res._interactionSpec=_interactionSpec;
if (fixIVW) {
// ensure that vecs are in the DKV, may have been swept up in the Scope.exit call
for (Vec v : res._adaptedFrame.vecs())
if (v instanceof InteractionWrappedVec) {
((InteractionWrappedVec) v)._useAllFactorLevels = _useAllFactorLevels;
((InteractionWrappedVec) v)._skipMissing = _skipMissing;
DKV.put(v);
}
}
return res;
}
/**
* Creates a DataInfo for scoring on a test Frame from a DataInfo instance created during model training
* This is a lightweight version of the method only usable for models that don't use advanced features of DataInfo (eg. interaction terms)
* @return DataInfo for scoring
*/
public DataInfo scoringInfo() {
DataInfo res = IcedUtils.deepCopy(this);
res._valid = true;
return res;
}
public interface Imputer {
int imputeCat(String name, Vec v, boolean useAllFactorLevels);
double imputeNum(String name, Vec v);
double[] imputeInteraction(String name, InteractionWrappedVec iv, double[] means);
}
public static class MeanImputer implements Imputer {
@Override
public int imputeCat(String name, Vec v, boolean useAllFactorLevels) {
return DataInfo.imputeCat(v, useAllFactorLevels);
}
@Override
public double imputeNum(String name, Vec v) {
return v.mean();
}
@Override
public double[] imputeInteraction(String name, InteractionWrappedVec iv, double[] means) {
return means;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/FrameTask.java
|
package hex;
import water.*;
import water.fvec.Chunk;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import water.util.RandomUtils;
import java.util.Arrays;
import java.util.Random;
public abstract class FrameTask<T extends FrameTask<T>> extends MRTask<T>{
protected boolean _sparse;
protected transient DataInfo _dinfo;
public DataInfo dinfo() { return _dinfo; }
final Key _dinfoKey;
final int [] _activeCols;
final protected Key<Job> _jobKey;
protected float _useFraction = 1.0f;
private final long _seed;
protected boolean _shuffle = false;
private final int _iteration;
public FrameTask(Key<Job> jobKey, DataInfo dinfo) {
this(jobKey, dinfo, 0xDECAFBEE, -1, false);
}
public FrameTask(Key<Job> jobKey, DataInfo dinfo, long seed, int iteration, boolean sparse) {
this(jobKey,dinfo==null?null:dinfo._key,dinfo==null?null:dinfo._activeCols,seed,iteration, sparse,null);
}
public FrameTask(Key<Job> jobKey, DataInfo dinfo, long seed, int iteration, boolean sparse, H2O.H2OCountedCompleter cmp) {
this(jobKey,dinfo==null?null:dinfo._key,dinfo==null?null:dinfo._activeCols,seed,iteration, sparse,cmp);
}
private FrameTask(Key<Job> jobKey, Key dinfoKey, int [] activeCols,long seed, int iteration, boolean sparse, H2O.H2OCountedCompleter cmp) {
super(cmp);
_jobKey = jobKey;
_dinfoKey = dinfoKey;
_activeCols = activeCols;
_seed = seed;
_iteration = iteration;
_sparse = sparse;
}
@Override protected void setupLocal(){
DataInfo dinfo = DKV.get(_dinfoKey).get();
_dinfo = _activeCols == null?dinfo:dinfo.filterExpandedColumns(_activeCols);
}
@Override protected void closeLocal(){ _dinfo = null;}
/**
* Method to process one row of the data. See for separate mini-batch logic below.
* Numeric and categorical values are passed separately, as is response.
* Categoricals are passed as absolute indexes into the expanded beta vector, 0-levels are skipped
* (so the number of passed categoricals will not be the same for every row).
*
* Categorical expansion/indexing:
* Categoricals are placed in the beginning of the beta vector.
* Each cat variable with n levels is expanded into n-1 independent binary variables.
* Indexes in cats[] will point to the appropriate coefficient in the beta vector, so e.g.
* assume we have 2 categorical columns both with values A,B,C, then the following rows will have following indexes:
* A,A - ncats = 0, we do not pass any categorical here
* A,B - ncats = 1, indexes = [2]
* B,B - ncats = 2, indexes = [0,2]
* and so on
*
* @param gid - global id of this row, in [0,_adaptedFrame.numRows())
*/
protected void processRow(long gid, DataInfo.Row r){throw new RuntimeException("should've been overridden!");}
protected void processRow(long gid, DataInfo.Row r, NewChunk [] outputs){throw new RuntimeException("should've been overridden!");}
// mini-batch version - for DL only for now
protected void processRow(long gid, DataInfo.Row r, int mb){throw new RuntimeException("should've been overridden!");}
protected boolean skipRow(long gid) { return false; }
/**
* Mini-Batch update of model parameters
* @param seed
* @param responses
* @param offsets
* @param n actual number of rows in this minibatch
*/
protected void processMiniBatch(long seed, double[] responses, double[] offsets, int n){}
/**
* Note: If this is overridden, then applyMiniBatch must be overridden as well to perform the model/weight mini-batch update
* @return Return the mini-batch size
*/
protected int getMiniBatchSize(){ return 0; }
/**
* Override this to initialize at the beginning of chunk processing.
* @return whether or not to process this chunk
*/
protected boolean chunkInit(){ return true; }
/**
* Override this to do post-chunk processing work.
* @param n Number of processed rows
*/
protected void chunkDone(long n){}
/**
* Extracts the values, applies regularization to numerics, adds appropriate offsets to categoricals,
* and adapts response according to the CaseMode/CaseValue if set.
*/
@Override public void map(Chunk [] chunks, NewChunk [] outputs) {
if(_jobKey != null && _jobKey.get() != null && _jobKey.get().stop_requested()) throw new Job.JobCancelledException();
final int nrows = chunks[0]._len;
final long offset = chunks[0].start();
boolean doWork = chunkInit();
if (!doWork) return;
final boolean obs_weights = _dinfo._weights
&& !_fr.vecs()[_dinfo.weightChunkId()].isConst() //if all constant weights (such as 1) -> doesn't count as obs weights
&& !(_fr.vecs()[_dinfo.weightChunkId()].isBinary()); //special case for cross-val -> doesn't count as obs weights
final double global_weight_sum = obs_weights ? Math.round(_fr.vecs()[_dinfo.weightChunkId()].mean() * _fr.numRows()) : 0;
DataInfo.Row row = null;
DataInfo.Row[] rows = null;
if (_sparse)
rows = _dinfo.extractSparseRows(chunks);
else
row = _dinfo.newDenseRow();
double[] weight_map = null;
double relative_chunk_weight = 1;
//TODO: store node-local helper arrays in _dinfo -> avoid re-allocation and construction
if (obs_weights) {
weight_map = new double[nrows];
double weight_sum = 0;
for (int i = 0; i < nrows; ++i) {
row = _sparse ? rows[i] : _dinfo.extractDenseRow(chunks, i, row);
weight_sum += row.weight;
weight_map[i] = weight_sum;
assert (i == 0 || row.weight == 0 || weight_map[i] > weight_map[i - 1]);
}
if (weight_sum > 0) {
ArrayUtils.div(weight_map, weight_sum); //normalize to 0...1
relative_chunk_weight = global_weight_sum * nrows / _fr.numRows() / weight_sum;
} else return; //nothing to do here - all rows have 0 weight
}
//Example:
// _useFraction = 0.8 -> 1 repeat with fraction = 0.8
// _useFraction = 1.0 -> 1 repeat with fraction = 1.0
// _useFraction = 1.1 -> 2 repeats with fraction = 0.55
// _useFraction = 2.1 -> 3 repeats with fraction = 0.7
// _useFraction = 3.0 -> 3 repeats with fraction = 1.0
final int repeats = (int) Math.ceil(_useFraction * relative_chunk_weight);
final float fraction = (float) (_useFraction * relative_chunk_weight) / repeats;
assert (fraction <= 1.0);
final boolean sample = (fraction < 0.999 || obs_weights || _shuffle);
final long chunkSeed = (0x8734093502429734L + _seed + offset) * (_iteration + 0x9823423497823423L);
final Random skip_rng = sample ? RandomUtils.getRNG(chunkSeed) : null;
int[] shufIdx = skip_rng == null ? null : new int[nrows];
if (skip_rng != null) {
for (int i = 0; i < nrows; ++i) shufIdx[i] = i;
ArrayUtils.shuffleArray(shufIdx, skip_rng);
}
double[] responses = new double[getMiniBatchSize()];
double[] offsets = new double[getMiniBatchSize()];
long seed = 0;
final int miniBatchSize = getMiniBatchSize();
long num_processed_rows = 0;
long num_skipped_rows = 0;
int miniBatchCounter = 0;
for(int rep = 0; rep < repeats; ++rep) {
for(int row_idx = 0; row_idx < nrows; ++row_idx){
int r = sample ? -1 : 0;
// only train with a given number of training samples (fraction*nrows)
if (sample && !obs_weights && skip_rng.nextDouble() > fraction) continue;
if (obs_weights && num_processed_rows % 2 == 0) { //every second row is randomly sampled -> that way we won't "forget" rare rows
// importance sampling based on inverse of cumulative distribution
double key = skip_rng.nextDouble();
r = Arrays.binarySearch(weight_map, 0, nrows, key);
// Log.info(Arrays.toString(weight_map));
// Log.info("key: " + key + " idx: " + (r >= 0 ? r : (-r-1)));
if (r<0) r=-r-1;
assert(r == 0 || weight_map[r] > weight_map[r-1]);
} else if (r == -1){
r = shufIdx[row_idx];
// if we have weights, and we did the %2 skipping above, then we need to find an alternate row with non-zero weight
while (obs_weights && ((r == 0 && weight_map[r] == 0) || (r > 0 && weight_map[r] == weight_map[r-1]))) {
r = skip_rng.nextInt(nrows); //random sampling with replacement
}
} else {
assert(!obs_weights);
r = row_idx; //linear scan - slightly faster
}
assert(r >= 0 && r<=nrows);
seed = offset + rep * nrows + r;
if (skipRow(seed)) {
num_skipped_rows++;
continue;
}
row = _sparse ? rows[r] : _dinfo.extractDenseRow(chunks, r, row);
if(row.isBad() || row.weight == 0) {
num_skipped_rows++;
continue;
} else {
assert(row.weight > 0); //check that we never process a row that was held out via row.weight = 0
if (outputs != null && outputs.length > 0) {
assert(miniBatchSize==0);
processRow(seed, row, outputs);
}
else {
if (miniBatchSize > 0) { //DL
processRow(seed, row, miniBatchCounter);
responses[miniBatchCounter] = row.response != null && row.response.length > 0 ? row.response(0) : 0 /*autoencoder dummy*/;
offsets[miniBatchCounter] = row.offset;
miniBatchCounter++;
}
else //all other algos
processRow(seed, row);
}
}
num_processed_rows++;
if (miniBatchCounter > 0 && miniBatchCounter % miniBatchSize == 0) {
processMiniBatch(seed, responses, offsets, miniBatchCounter);
miniBatchCounter = 0;
}
}
}
if (miniBatchCounter>0) {
processMiniBatch(seed, responses, offsets, miniBatchCounter); //last bit
}
assert(fraction != 1 || num_processed_rows + num_skipped_rows == repeats * nrows);
chunkDone(num_processed_rows);
}
public static class ExtractDenseRow extends MRTask<ExtractDenseRow> {
final private DataInfo _di; //INPUT
final private long _gid; //INPUT
public DataInfo.Row _row; //OUTPUT
public ExtractDenseRow(DataInfo di, long globalRowId) { _di = di; _gid = globalRowId; }
@Override
public void map(Chunk[] cs) {
// fill up _row with the data of row with global id _gid
long start = cs[0].start();
if (start <= _gid && cs[0].start()+cs[0].len() > _gid) {
_row = _di.newDenseRow();
_di.extractDenseRow(cs, (int)(_gid-cs[0].start()), _row);
}
}
@Override
public void reduce(ExtractDenseRow mrt) {
if (mrt._row != null) {
assert(this._row == null); //only one thread actually filled the output _row
_row = mrt._row;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/FrameTask2.java
|
package hex;
import hex.DataInfo.Row;
import water.H2O.H2OCountedCompleter;
import water.Job;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.util.FrameUtils;
/**
* Created by tomasnykodym on 6/1/15.
*
* Frame task updated with sparse data support. Separate class for now,
* should be merged with FrameTask(1) at some point.
*
*
*
*/
public abstract class FrameTask2<T extends FrameTask2<T>> extends MRTask<T> {
protected boolean _sparse;
final Key<Job> _jobKey;
protected final DataInfo _dinfo;
public FrameTask2(H2OCountedCompleter cmp, DataInfo dinfo, Key<Job> jobKey){
super(cmp);
_dinfo = dinfo;
_jobKey = jobKey;
_sparse = handlesSparseData() && FrameUtils.sparseRatio(dinfo._adaptedFrame) < .5;
}
public T setSparse(boolean b) { _sparse = b; return self();}
/**
* Initialization method, called once per "chunk".
* Typically create result object used by processRow to store rersults.
*
*/
public void chunkInit(){}
/**
* Perform action after processing one "chunk" of data/
*/
public void chunkDone(){}
private transient Job _job;
@Override
public void setupLocal(){if(_jobKey != null)_job = _jobKey.get();}
public boolean handlesSparseData(){return false;}
protected abstract void processRow(Row r);
@Override public void map(Chunk[] chks) {
if(_job != null && _job.stop_requested()) throw new Job.JobCancelledException(_job);
chunkInit();
// compute
if(_sparse) {
for(Row r:_dinfo.extractSparseRows(chks)) {
if(!r.isBad() && r.weight != 0)
processRow(r);
}
} else {
Row row = _dinfo.newDenseRow();
for(int r = 0 ; r < chks[0]._len; ++r) {
_dinfo.extractDenseRow(chks, r, row);
if(!row.isBad() && row.weight != 0)
processRow(row);
}
}
chunkDone();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/adaboost/AdaBoost.java
|
package hex.adaboost;
import com.google.gson.*;
import hex.Model;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.deeplearning.DeepLearning;
import hex.deeplearning.DeepLearningModel;
import hex.glm.GLM;
import hex.glm.GLMModel;
import hex.tree.drf.DRF;
import hex.tree.drf.DRFModel;
import hex.tree.gbm.GBM;
import hex.tree.gbm.GBMModel;
import org.apache.log4j.Logger;
import water.*;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Timer;
import water.util.TwoDimTable;
import java.lang.reflect.Field;
import java.util.*;
/**
* Implementation of AdaBoost algorithm based on
*
* Raul Rojas, "Adaboost and the Super Bowl of Classifiers A Tutorial Introduction to Adaptive Boosting"
* Alexandru Niculescu-Mizil and Richard A. Caruana, "Obtaining Calibrated Probabilities from Boosting"
* Y. Freund, R. Schapire, “A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting”, 1995.
*
* @author Adam Valenta
*/
public class AdaBoost extends ModelBuilder<AdaBoostModel, AdaBoostModel.AdaBoostParameters, AdaBoostModel.AdaBoostOutput> {
private static final Logger LOG = Logger.getLogger(AdaBoost.class);
private static final int MAX_LEARNERS = 100_000;
private AdaBoostModel _model;
private String _weightsName = "weights";
private Gson _gsonParser;
// Called from an http request
public AdaBoost(AdaBoostModel.AdaBoostParameters parms) {
super(parms);
init(false);
}
public AdaBoost(boolean startup_once) {
super(new AdaBoostModel.AdaBoostParameters(), startup_once);
}
@Override
public boolean havePojo() {
return false;
}
@Override
public boolean haveMojo() {
return false;
}
@Override
public void init(boolean expensive) {
super.init(expensive);
if(_parms._nlearners < 1 || _parms._nlearners > MAX_LEARNERS)
error("n_estimators", "Parameter n_estimators must be in interval [1, "
+ MAX_LEARNERS + "] but it is " + _parms._nlearners);
if (_parms._weak_learner == AdaBoostModel.Algorithm.AUTO) {
_parms._weak_learner = AdaBoostModel.Algorithm.DRF;
}
if (_parms._weights_column != null) {
// _parms._weights_column cannot be used all time since it breaks scoring
_weightsName = _parms._weights_column;
}
if( !(0. < _parms._learn_rate && _parms._learn_rate <= 1.0) ) {
error("learn_rate", "learn_rate must be between 0 and 1");
}
if (useCustomWeakLearnerParameters()) {
try {
_gsonParser = new GsonBuilder()
.setFieldNamingStrategy(new PrecedingUnderscoreNamingStrategy())
.create();
_gsonParser.fromJson(_parms._weak_learner_params, JsonObject.class);
} catch (JsonSyntaxException syntaxException) {
error("weak_learner_params", "Provided parameters are not in the valid json format. Got error: " + syntaxException.getMessage());
}
}
}
private boolean useCustomWeakLearnerParameters() {
return _parms._weak_learner_params != null && !_parms._weak_learner_params.isEmpty();
}
private class PrecedingUnderscoreNamingStrategy implements FieldNamingStrategy
{
public String translateName(Field field)
{
String fieldName =
FieldNamingPolicy.LOWER_CASE_WITH_UNDERSCORES.translateName(field);
if (fieldName.startsWith("_"))
{
fieldName = fieldName.substring(1);
}
return fieldName;
}
}
private class AdaBoostDriver extends Driver {
@Override
public void computeImpl() {
_model = null;
try {
init(true);
if (error_count() > 0) {
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(AdaBoost.this);
}
_model = new AdaBoostModel(dest(), _parms,
new AdaBoostModel.AdaBoostOutput(AdaBoost.this));
_model.delete_and_lock(_job);
buildAdaboost();
LOG.info(_model.toString());
} finally {
if (_model != null)
_model.unlock(_job);
}
}
private void buildAdaboost() {
_model._output.alphas = new double[(int)_parms._nlearners];
_model._output.models = new Key[(int)_parms._nlearners];
Frame _trainWithWeights;
if (_parms._weights_column == null) {
_trainWithWeights = new Frame(train());
Vec weights = _trainWithWeights.anyVec().makeCons(1,1,null,null)[0];
_weightsName = _trainWithWeights.uniquify(_weightsName); // be sure that we are not accidentally using some column in the train
_trainWithWeights.add(_weightsName, weights);
DKV.put(_trainWithWeights);
Scope.track(weights);
} else {
_trainWithWeights = _parms.train();
}
for (int n = 0; n < _parms._nlearners; n++) {
Timer timer = new Timer();
ModelBuilder job = chooseWeakLearner(_trainWithWeights);
job._parms._seed += n;
Model model = (Model) job.trainModel().get();
DKV.put(model);
Scope.untrack(model._key);
_model._output.models[n] = model._key;
Frame predictions = model.score(_trainWithWeights);
Scope.track(predictions);
CountWeTask countWe = new CountWeTask().doAll(_trainWithWeights.vec(_weightsName), _trainWithWeights.vec(_parms._response_column), predictions.vec("predict"));
double eM = countWe.We / countWe.W;
double alphaM = _parms._learn_rate * Math.log((1 - eM) / eM);
_model._output.alphas[n] = alphaM;
UpdateWeightsTask updateWeightsTask = new UpdateWeightsTask(alphaM);
updateWeightsTask.doAll(_trainWithWeights.vec(_weightsName), _trainWithWeights.vec(_parms._response_column), predictions.vec("predict"));
_job.update(1);
_model.update(_job);
LOG.info((n + 1) + ". estimator was built in " + timer.toString());
LOG.info("*********************************************************************");
}
if (_trainWithWeights != _parms.train()) {
DKV.remove(_trainWithWeights._key);
}
_model._output._model_summary = createModelSummaryTable();
}
}
@Override
protected Driver trainModelImpl() {
return new AdaBoostDriver();
}
@Override
public BuilderVisibility builderVisibility() {
return BuilderVisibility.Experimental;
}
@Override
public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.Binomial,
};
}
@Override
public boolean isSupervised() {
return true;
}
private ModelBuilder chooseWeakLearner(Frame frame) {
switch (_parms._weak_learner) {
case GLM:
return getGLMWeakLearner(frame);
case GBM:
return getGBMWeakLearner(frame);
case DEEP_LEARNING:
return getDeepLearningWeakLearner(frame);
default:
case DRF:
return getDRFWeakLearner(frame);
}
}
private DRF getDRFWeakLearner(Frame frame) {
DRFModel.DRFParameters parms = useCustomWeakLearnerParameters() ? _gsonParser.fromJson(_parms._weak_learner_params, DRFModel.DRFParameters.class) : new DRFModel.DRFParameters();
parms._train = frame._key;
parms._response_column = _parms._response_column;
parms._weights_column = _weightsName;
parms._seed = _parms._seed;
if (!useCustomWeakLearnerParameters()) {
parms._mtries = 1;
parms._min_rows = 1;
parms._ntrees = 1;
parms._sample_rate = 1;
parms._max_depth = 1;
}
return new DRF(parms);
}
private GLM getGLMWeakLearner(Frame frame) {
GLMModel.GLMParameters parms = useCustomWeakLearnerParameters() ? _gsonParser.fromJson(_parms._weak_learner_params, GLMModel.GLMParameters.class) : new GLMModel.GLMParameters();
parms._train = frame._key;
parms._response_column = _parms._response_column;
parms._weights_column = _weightsName;
parms._seed = _parms._seed;
return new GLM(parms);
}
private GBM getGBMWeakLearner(Frame frame) {
GBMModel.GBMParameters parms = useCustomWeakLearnerParameters() ? _gsonParser.fromJson(_parms._weak_learner_params, GBMModel.GBMParameters.class) : new GBMModel.GBMParameters();
parms._train = frame._key;
parms._response_column = _parms._response_column;
parms._weights_column = _weightsName;
if (!useCustomWeakLearnerParameters()) {
parms._min_rows = 1;
parms._ntrees = 1;
parms._sample_rate = 1;
parms._max_depth = 1;
parms._seed = _parms._seed;
}
return new GBM(parms);
}
private DeepLearning getDeepLearningWeakLearner(Frame frame) {
DeepLearningModel.DeepLearningParameters parms = useCustomWeakLearnerParameters() ? _gsonParser.fromJson(_parms._weak_learner_params, DeepLearningModel.DeepLearningParameters.class) :new DeepLearningModel.DeepLearningParameters();
parms._train = frame._key;
parms._response_column = _parms._response_column;
parms._weights_column = _weightsName;
parms._seed = _parms._seed;
if (!useCustomWeakLearnerParameters()) {
parms._epochs = 10;
parms._hidden = new int[]{2};
}
return new DeepLearning(parms);
}
public TwoDimTable createModelSummaryTable() {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Number of weak learners"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Learn rate"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Weak learner"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Seed"); colTypes.add("long"); colFormat.add("%d");
final int rows = 1;
TwoDimTable table = new TwoDimTable(
"Model Summary", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
int col = 0;
table.set(row, col++, _parms._nlearners);
table.set(row, col++, _parms._learn_rate);
table.set(row, col++, _parms._weak_learner.toString());
table.set(row, col, _parms._seed);
return table;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/adaboost/AdaBoostModel.java
|
package hex.adaboost;
import hex.Model;
import hex.ModelCategory;
import hex.ModelMetrics;
import hex.ModelMetricsBinomial;
import org.apache.log4j.Logger;
import water.*;
public class AdaBoostModel extends Model<AdaBoostModel, AdaBoostModel.AdaBoostParameters, AdaBoostModel.AdaBoostOutput> {
private static final Logger LOG = Logger.getLogger(AdaBoostModel.class);
public enum Algorithm {DRF, GLM, GBM, DEEP_LEARNING,AUTO}
public AdaBoostModel(Key<AdaBoostModel> selfKey, AdaBoostParameters parms,
AdaBoostOutput output) {
super(selfKey, parms, output);
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
if (_output.getModelCategory() == ModelCategory.Binomial) {
return new ModelMetricsBinomial.MetricBuilderBinomial(domain);
}
throw H2O.unimpl("AdaBoost currently support only binary classification");
}
@Override
protected String[] makeScoringNames(){
return new String[]{"predict", "p0", "p1"};
}
@Override
protected double[] score0(double[] data, double[] preds) {
double alphas0 = 0;
double alphas1 = 0;
double linearCombination = 0;
for (int i = 0; i < _output.alphas.length; i++) {
Model model = DKV.getGet(_output.models[i]);
if (model.score(data) == 0) {
linearCombination += _output.alphas[i]*-1;
alphas0 += _output.alphas[i];
} else {
linearCombination += _output.alphas[i];
alphas1 += _output.alphas[i];
}
}
preds[0] = alphas0 > alphas1 ? 0 : 1;
preds[2] = 1/(1 + Math.exp(-2*linearCombination));
preds[1] = 1 - preds[2];
return preds;
}
@Override protected boolean needsPostProcess() { return false; /* pred[0] is already set by score0 */ }
public static class AdaBoostOutput extends Model.Output {
public double[] alphas;
public Key<Model>[] models;
public AdaBoostOutput(AdaBoost adaBoostModel) {
super(adaBoostModel);
}
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
for (Key<Model> iTreeKey : _output.models) {
Keyed.remove(iTreeKey, fs, true);
}
return super.remove_impl(fs, cascade);
}
@Override
protected AutoBuffer writeAll_impl(AutoBuffer ab) {
for (Key<Model> iTreeKey : _output.models) {
ab.putKey(iTreeKey);
}
return super.writeAll_impl(ab);
}
@Override
protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
for (Key<Model> iTreeKey : _output.models) {
ab.getKey(iTreeKey, fs);
}
return super.readAll_impl(ab,fs);
}
public static class AdaBoostParameters extends Model.Parameters {
/**
* Number of weak learners to train. Defaults to 50.
*/
public int _nlearners;
/**
* Choose a weak learner type. Defaults to DRF.
*/
public Algorithm _weak_learner;
/**
* Specify how quickly the training converge. Number in (0,1]. Defaults to 0.5.
*/
public double _learn_rate;
/**
* Custom _weak_learner parameters.
*/
public String _weak_learner_params;
@Override
public String algoName() {
return "AdaBoost";
}
@Override
public String fullName() {
return "AdaBoost";
}
@Override
public String javaName() {
return AdaBoostModel.class.getName();
}
@Override
public long progressUnits() {
return _nlearners;
}
public AdaBoostParameters() {
super();
_nlearners = 50;
_weak_learner = Algorithm.AUTO;
_learn_rate = 0.5;
_weak_learner_params = "";
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/adaboost/CountWeTask.java
|
package hex.adaboost;
import water.MRTask;
import water.fvec.Chunk;
/**
* Count sum of all weights and sum of bad predicted weights for AdaBoost purpose
*/
class CountWeTask extends MRTask<CountWeTask> {
double W = 0;
double We = 0;
@Override
public void map(Chunk weights, Chunk response, Chunk predict) {
for (int row = 0; row < weights._len; row++) {
double weight = weights.atd(row);
W += weight;
if (response.at8(row) != predict.at8(row)) {
We += weight;
}
}
}
@Override
public void reduce(CountWeTask mrt) {
W += mrt.W;
We += mrt.We;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/adaboost/UpdateWeightsTask.java
|
package hex.adaboost;
import water.MRTask;
import water.fvec.Chunk;
/**
* Update weights according to AdaBoost algorithm
*/
class UpdateWeightsTask extends MRTask<UpdateWeightsTask> {
double expAm;
double expAmInverse;
public UpdateWeightsTask(double alphaM) {
expAm = Math.exp(alphaM);
expAmInverse = Math.exp(-alphaM);
}
@Override
public void map(Chunk weights, Chunk response, Chunk predict) {
for (int row = 0; row < weights._len; row++) {
double weight = weights.atd(row);
if (response.at8(row) != predict.at8(row)) {
weights.set(row, weight * expAm);
} else {
weights.set(row, weight * expAmInverse);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/aggregator/Aggregator.java
|
package hex.aggregator;
import hex.*;
import hex.util.LinearAlgebraUtils;
import water.*;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.IcedInt;
import water.util.Log;
import java.util.Arrays;
public class Aggregator extends ModelBuilder<AggregatorModel,AggregatorModel.AggregatorParameters,AggregatorModel.AggregatorOutput> {
@Override
public ToEigenVec getToEigenVec() {
return LinearAlgebraUtils.toEigen;
}
@Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Stable; }
@Override public boolean isSupervised() { return false; }
public static class Exemplar extends Iced<Exemplar> {
Exemplar(double[] d, int[] c, long id) { data=d; cats=c; gid=id; _cnt=1; }
final double[] data; //numerical
final int[] cats; //categorical
final long gid;
long _cnt; // exemplar count
/**
* Add a new exemplar to the input array (doubling it if necessary)
* @param es Array of exemplars
* @param e Adding this exemplar to the array of exemplars
* @return Array of exemplars containing the new exemplar
*/
public static Exemplar[] addExemplar(Exemplar[] es, Exemplar e) {
if (es.length == 0) {
return new Exemplar[]{e};
} else {
Exemplar[] res=es;
int idx = es.length - 1;
while (idx >= 0 && es[idx] == null) idx--;
if (idx == es.length - 1) {
res = Arrays.copyOf(es, es.length << 1);
res[es.length] = e;
return res;
}
res[idx + 1] = e;
return res;
}
}
/**
* Trim any training nulls
* @param es the array to trim
* @return a new Exemplar[] without trailing nulls
*/
public static Exemplar[] trim(Exemplar[] es) {
int idx=es.length-1;
while(idx>=0 && null==es[idx]) idx--;
return Arrays.copyOf(es,idx+1);
}
private double squaredEuclideanDistance(double[] e2, double thresh) {
double sum = 0;
int n = 0;
boolean missing = false;
double e1[] = data;
double ncols = e1.length;
for (int j = 0; j < ncols; j++) {
final double d1 = e1[j];
final double d2 = e2[j];
if (!isMissing(d1) && !isMissing(d2)) {
final double dist = (d1 - d2);
sum += dist*dist;
n++;
} else {
missing=true;
}
if (!missing && sum > thresh) break; //early cutout
}
sum *= ncols / n;
return sum;
}
private static boolean isMissing(double x) {
return Double.isNaN(x);
}
}
// Number of columns in training set (p)
@Override protected AggregatorDriver trainModelImpl() { return new AggregatorDriver(); }
@Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.Clustering }; }
// Called from an http request
public Aggregator(AggregatorModel.AggregatorParameters parms) { super(parms); init(false); }
public Aggregator(boolean startup_once) { super(new AggregatorModel.AggregatorParameters(),startup_once); }
@Override
public void init(boolean expensive) {
if (expensive && _parms._categorical_encoding == Model.Parameters.CategoricalEncodingScheme.AUTO){
_parms._categorical_encoding=Model.Parameters.CategoricalEncodingScheme.Eigen;
}
if (_parms._target_num_exemplars <= 0) {
error("_target_num_exemplars", "target_num_exemplars must be > 0.");
}
if (_parms._rel_tol_num_exemplars <= 0 || _parms._rel_tol_num_exemplars>=1) {
error("_rel_tol_num_exemplars", "rel_tol_num_exemplars must be inside 0...1.");
}
super.init(expensive);
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(Aggregator.this);
}
class AggregatorDriver extends Driver {
// Main worker thread
@Override
public void computeImpl() {
AggregatorModel model = null;
DataInfo di = null;
try {
init(true); // Initialize parameters
if (error_count() > 0) throw new IllegalArgumentException("Found validation errors: " + validationErrors());
// The model to be built
model = new AggregatorModel(dest(), _parms, new AggregatorModel.AggregatorOutput(Aggregator.this));
model.delete_and_lock(_job);
Frame orig = train(); //this has ignored columns removed etc.
_job.update(1,"Preprocessing data.");
di = new DataInfo(orig, null, true, _parms._transform, false, false, false);
DKV.put(di);
Vec assignment;
AggregateTask aggTask;
final double radiusBase = .1 / Math.pow(Math.log(orig.numRows()), 1.0 / orig.numCols()); // Lee's magic formula
final int targetNumExemplars = (int)Math.min((long)_parms._target_num_exemplars, orig.numRows());
// Increase radius until we have low enough number of exemplars
_job.update(0, "Aggregating.");
int numExemplars;
double lo = 0;
double hi = 256;
double mid = 8; //starting point of radius_scale
int noNewExamplarsIterCount = 0;
int previousNumExemplars = 0;
double tol = _parms._rel_tol_num_exemplars;
int upperLimit = (int)((1.+tol)*targetNumExemplars);
int lowerLimit = (int)((1.-tol)*targetNumExemplars);
Key terminateKey = Key.make();
while(true) {
Log.info("radius_scale lo/mid/hi: " + lo + "/" + mid + "/" + hi);
double radius = mid * radiusBase;
if (targetNumExemplars==orig.numRows()) radius = 0;
// Add workspace vector for exemplar assignment
Vec[] vecs = Arrays.copyOf(orig.vecs(), orig.vecs().length + 1);
assignment = vecs[vecs.length - 1] = orig.anyVec().makeZero();
Log.info("Aggregating with radius " + String.format("%5f", radius) + ":");
aggTask = new AggregateTask(di._key, radius, _job._key, upperLimit, radius == 0 ? null : terminateKey).doAll(vecs);
if (radius == 0) {
Log.info(" Returning original dataset.");
numExemplars = aggTask._exemplars.length;
assert(numExemplars == orig.numRows());
break;
}
// stuck in range [0,256] with too many exemplars? - just do it
if (aggTask.isTerminated() && Math.abs(hi-lo) < 1e-3 * Math.abs(lo+hi)) {
aggTask = new AggregateTask(di._key, radius, _job._key, (int)orig.numRows(), terminateKey).doAll(vecs);
Log.info(" Running again without early cutout.");
numExemplars = aggTask._exemplars.length;
break;
}
if (aggTask.isTerminated() || aggTask._exemplars.length > upperLimit) {
Log.info(" Too many exemplars.");
lo = mid;
} else {
numExemplars = aggTask._exemplars.length;
Log.info(" " + numExemplars + " exemplars.");
if (numExemplars >= lowerLimit && numExemplars <= upperLimit) { // close enough
Log.info(" Within " + (100*tol) +"% of target number of exemplars. Done.");
break;
} else {
Log.info(" Too few exemplars.");
hi = mid;
if(previousNumExemplars == numExemplars) noNewExamplarsIterCount++;
if (noNewExamplarsIterCount > _parms._num_iteration_without_new_exemplar) {
Log.info("Exiting with " + numExemplars + " exemplars as last " + _parms._num_iteration_without_new_exemplar + " iterations did not accure any more exemplars");
break;
}
previousNumExemplars = numExemplars;
}
}
mid = lo + (hi-lo)/2.;
}
_job.update(1, "Aggregation finished. Got " + numExemplars + " examplars");
assert (!aggTask.isTerminated());
DKV.remove(terminateKey);
String msg = "Creating exemplar assignments.";
Log.info(msg);
_job.update(1, msg);
new RenumberTask(aggTask._mapping).doAll(assignment);
// Populate model output state
model._exemplars = aggTask._exemplars;
model._counts = new long[aggTask._exemplars.length];
for(int i=0;i<aggTask._exemplars.length;++i)
model._counts[i] = aggTask._exemplars[i]._cnt;
model._exemplar_assignment_vec_key = assignment._key;
model._output._output_frame = Key.make("aggregated_" + _parms._train.toString() + "_by_" + model._key);
msg = "Creating output frame.";
Log.info(msg);
_job.update(1, msg);
model.createFrameOfExemplars(_parms._train.get(), model._output._output_frame);
if(model._parms._save_mapping_frame){
model._output._mapping_frame = Key.make("aggregated_mapping_" + _parms._train.toString() + "_by_" + model._key);
model.createMappingOfExemplars(model._output._mapping_frame);
}
_job.update(1, "Done.");
model.update(_job);
} finally {
if (model != null) {
model.unlock(_job);
Scope.untrack(model._exemplar_assignment_vec_key);
Frame outFrame = model._output._output_frame != null ? model._output._output_frame.get() : null;
if (outFrame != null) Scope.untrack(outFrame.keys());
Frame mappingFrame = model._output._mapping_frame != null ? model._output._mapping_frame.get() : null;
if (mappingFrame != null) Scope.untrack(mappingFrame.keys());
}
if (di!=null) di.remove();
}
}
}
private static class AggregateTask extends MRTask<AggregateTask> {
//INPUT
final double _delta;
final Key _dataInfoKey;
final Key _jobKey;
final int _maxExemplars;
// OUTPUT
Exemplar[] _exemplars;
Key _terminateKey;
// long[] _counts;
static class MyPair extends Iced<MyPair> implements Comparable<MyPair> {
long first;
long second;
public MyPair(long f, long s) { first=f; second=s; }
public MyPair(){}
@Override
public int compareTo(MyPair o) {
if (first < o.first) return -1;
if (first == o.first) return 0;
return 1;
}
}
// WORKSPACE
static private class GIDMapping extends Iced<GIDMapping> {
MyPair[] pairSet;
int len;
int capacity;
public GIDMapping() {
capacity=32;
len=0;
pairSet = new MyPair[capacity];
}
void set(long from, long to) {
for (int i=0;i<len;++i) {
MyPair p = pairSet[i];
// assert (p.first != from);
if (p.second == from) {
p.second = to;
}
}
MyPair p = new MyPair(from, to);
if (len==capacity) {
capacity*=2;
pairSet = Arrays.copyOf(pairSet, capacity);
}
pairSet[len++]=p;
}
long[][] unsortedList() {
long[][] li = new long[2][len];
MyPair[] pl = pairSet;
for (int i=0;i<len;++i) {
li[0][i] = pl[i].first;
li[1][i] = pl[i].second;
}
return li;
}
}
GIDMapping _mapping;
public AggregateTask(Key<DataInfo> dataInfoKey, double radius, Key<Job> jobKey, int maxExemplars, Key terminateKey) {
_delta = radius*radius;
_dataInfoKey = dataInfoKey;
_jobKey = jobKey;
_maxExemplars = maxExemplars;
_terminateKey = terminateKey;
if (_terminateKey!=null)
DKV.put(_terminateKey, new IcedInt(0));
}
private boolean isTerminated() {
return _terminateKey != null && ((IcedInt)(DKV.getGet(_terminateKey)))._val==1;
}
private void terminate() {
if (_terminateKey != null)
DKV.put(_terminateKey, new IcedInt(1));
}
@Override
public void map(Chunk[] chks) {
_mapping = new GIDMapping();
Exemplar[] es = new Exemplar[4];
Chunk[] dataChks = Arrays.copyOf(chks, chks.length-1);
Chunk assignmentChk = chks[chks.length-1];
// loop over rows
DataInfo di = ((DataInfo)_dataInfoKey.get());
assert(di!=null);
DataInfo.Row row = di.newDenseRow(); //shared _dataInfo - faster, no writes
final int nCols = row.nNums;
for (int r=0; r<chks[0]._len; ++r) {
if (r%100 == 0 && isTerminated())
return;
long rowIndex = chks[0].start()+r;
row = di.extractDenseRow(dataChks, r, row);
double[] data = Arrays.copyOf(row.numVals, nCols);
int[] cats = Arrays.copyOf(row.binIds, row.binIds.length);
if (r==0) {
Exemplar ex = new Exemplar(data, cats, rowIndex);
es = Exemplar.addExemplar(es,ex);
assignmentChk.set(r, ex.gid);
} else {
/* find closest exemplar to this case */
double distanceToNearestExemplar = Double.MAX_VALUE;
int closestExemplarIndex = 0;
int index = 0;
long gid=-1;
for(Exemplar e: es) {
if( null==e ) break;
// all categoricals must match: only non-trivial (empty) for categorical_handling == Enum
if (!Arrays.equals(cats, e.cats)) {
index++;
continue;
}
double distToExemplar = e.squaredEuclideanDistance(data,distanceToNearestExemplar);
if( distToExemplar < distanceToNearestExemplar ) {
distanceToNearestExemplar = distToExemplar;
closestExemplarIndex = index;
gid=e.gid;
}
/* do not need to look further even if some other exemplar is closer */
if (distanceToNearestExemplar < _delta)
break;
index++;
}
/* found a close exemplar, so add to list */
if (distanceToNearestExemplar < _delta) {
es[closestExemplarIndex]._cnt++;
assignmentChk.set(r, gid);
} else {
/* otherwise, assign a new exemplar */
Exemplar ex = new Exemplar(data, cats, rowIndex);
assert(Arrays.equals(cats, ex.cats));
es = Exemplar.addExemplar(es,ex);
if (es.length > 2*_maxExemplars) { //es array grows by 2x - have to be conservative here
terminate();
}
assignmentChk.set(r, rowIndex); //assign to self
}
}
}
// populate output primitive arrays
_exemplars = Exemplar.trim(es);
if (_exemplars.length > _maxExemplars) {
terminate();
}
if (isTerminated())
return;
assert(_exemplars.length <= chks[0].len());
long sum=0;
for (Exemplar e: _exemplars) sum+=e._cnt;
assert(sum <= chks[0].len());
((Job)_jobKey.get()).update(1, "Aggregating.");
}
@Override
public void reduce(AggregateTask mrt) {
if (isTerminated() || _exemplars == null || mrt._exemplars == null || _exemplars.length > _maxExemplars || mrt._exemplars.length > _maxExemplars) {
terminate();
_mapping = null;
_exemplars = null;
mrt._exemplars = null;
}
if (isTerminated())
return;
for (int i=0; i<mrt._mapping.len; ++i)
_mapping.set(mrt._mapping.pairSet[i].first, mrt._mapping.pairSet[i].second);
// reduce mrt into this
Exemplar[] exemplars = mrt._exemplars;
// long[] counts = mrt._counts;
long localCounts = 0;
for (Exemplar e : _exemplars) localCounts += e._cnt;
long remoteCounts = 0;
for (Exemplar e : mrt._exemplars) remoteCounts += e._cnt;
// remote tasks exemplars
for(int r=0;r<mrt._exemplars.length;++r) {
double distanceToNearestExemplar = Double.MAX_VALUE;
int closestExemplarIndex = 0;
int index=0;
for(Exemplar le: _exemplars) {
if( null==le ) break; // tapped out
double distToExemplar = le.squaredEuclideanDistance(mrt._exemplars[r].data,distanceToNearestExemplar);
if( distToExemplar < distanceToNearestExemplar ) {
distanceToNearestExemplar = distToExemplar;
closestExemplarIndex=index;
}
/* do not need to look further even if some other exemplar is closer */
if (distanceToNearestExemplar < _delta)
break;
index++;
}
if (distanceToNearestExemplar < _delta) {
// add remote exemplar counts/indices to one of my exemplars that are close enough
_exemplars[closestExemplarIndex]._cnt += mrt._exemplars[r]._cnt;
// Log.info("Reduce: Reassigning " + counts[r] + " rows from " + exemplars[r].gid + " to " + _exemplars[closestExemplarIndex].gid);
_mapping.set(exemplars[r].gid, _exemplars[closestExemplarIndex].gid);
} else {
_exemplars = Exemplar.addExemplar(_exemplars, IcedUtils.deepCopy(mrt._exemplars[r]));
}
}
mrt._exemplars = null;
_exemplars = Exemplar.trim(_exemplars);
assert(_exemplars.length <= localCounts + remoteCounts);
long sum=0;
for(Exemplar e: _exemplars) sum+=e._cnt;
assert(sum == localCounts + remoteCounts);
((Job)_jobKey.get()).update(1, "Aggregating.");
}
}
private static class RenumberTask extends MRTask<RenumberTask> {
final long[][] _map;
public RenumberTask(AggregateTask.GIDMapping mapping) { _map = mapping.unsortedList(); }
@Override
public void map(Chunk c) {
for (int i=0;i<c._len;++i) {
long old = c.at8(i);
//int pos=Arrays.binarySearch(_map[0], old);
int pos = ArrayUtils.find(_map[0], old);
if (pos>=0) {
long newVal =_map[1][pos];
c.set(i, newVal);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/aggregator/AggregatorModel.java
|
package hex.aggregator;
import hex.*;
import hex.pca.PCAModel;
import hex.util.LinearAlgebraUtils;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.udf.CFuncRef;
import water.util.ArrayUtils;
import water.util.FrameUtils;
import water.util.VecUtils;
import java.util.Arrays;
public class AggregatorModel extends Model<AggregatorModel,AggregatorModel.AggregatorParameters,AggregatorModel.AggregatorOutput> implements Model.ExemplarMembers {
@Override
public ToEigenVec getToEigenVec() {
return LinearAlgebraUtils.toEigen;
}
public static class AggregatorParameters extends Model.Parameters {
public String algoName() { return "Aggregator"; }
public String fullName() { return "Aggregator"; }
public String javaName() { return AggregatorModel.class.getName(); }
@Override public long progressUnits() { return 5 + 2*train().anyVec().nChunks() - 1; } // nChunks maps and nChunks-1 reduces, multiply by two for main job overhead
//public double _radius_scale=1.0;
// public int _max_iterations = 1000; // Max iterations for SVD
public DataInfo.TransformType _transform = DataInfo.TransformType.NORMALIZE; // Data transformation
public PCAModel.PCAParameters.Method _pca_method = PCAModel.PCAParameters.Method.Power; // Method for dimensionality reduction
public int _k = 1; // Number of principal components
public int _target_num_exemplars = 5000;
public double _rel_tol_num_exemplars = 0.5;
public boolean _use_all_factor_levels = false; // When expanding categoricals, should first level be kept or dropped?
public boolean _save_mapping_frame = false;
public int _num_iteration_without_new_exemplar = 500;
}
public static class AggregatorOutput extends Model.Output {
public AggregatorOutput(Aggregator b) { super(b); }
@Override public int nfeatures() { return _output_frame.get().numCols()-1/*counts*/; }
@Override public ModelCategory getModelCategory() { return ModelCategory.Clustering; }
public Key<Frame> _output_frame;
public Key<Frame> _mapping_frame;
}
public Aggregator.Exemplar[] _exemplars;
public long[] _counts;
public Key<Vec> _exemplar_assignment_vec_key;
public AggregatorModel(Key selfKey, AggregatorParameters parms, AggregatorOutput output) {
super(selfKey,parms,output);
}
@Override
protected PredictScoreResult predictScoreImpl(Frame orig, Frame adaptedFr, String destination_key, final Job j, boolean computeMetrics, CFuncRef customMetricFunc) {
return new PredictScoreResult(null, null, null);
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
Keyed.remove(_exemplar_assignment_vec_key);
return super.remove_impl(fs, cascade);
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
return null;
}
@Override
protected double[] score0(double[] data, double[] preds) {
return preds;
}
public Frame createFrameOfExemplars(Frame orig, Key destination_key) {
final long[] keep = new long[_exemplars.length];
for (int i=0;i<keep.length;++i)
keep[i]=_exemplars[i].gid;
Vec exAssignment = _exemplar_assignment_vec_key.get();
// preserve the original row order
Vec booleanCol = new MRTask() {
@Override
public void map(Chunk c2) {
for (int i=0;i<keep.length;++i) {
if (keep[i] < c2.start()) continue;
if (keep[i] >= c2.start()+c2._len) continue;
c2.set((int)(keep[i]-c2.start()), 1);
}
}
}.doAll(new Frame(new Vec[]{exAssignment.makeZero()}))._fr.vec(0);
Vec[] vecs = Arrays.copyOf(orig.vecs(), orig.vecs().length+1);
vecs[vecs.length-1] = booleanCol;
Frame ff = new Frame(orig.names(), orig.vecs());
ff.add("predicate", booleanCol);
Frame res = new Frame.DeepSelect().doAll(orig.types(),ff).outputFrame(destination_key, orig.names(), orig.domains());
FrameUtils.shrinkDomainsToObservedSubset(res);
booleanCol.remove();
assert(res.numRows()==_exemplars.length);
Vec cnts = res.anyVec().makeZero();
Vec.Writer vw = cnts.open();
for (int i=0;i<_counts.length;++i)
vw.set(i, _counts[i]);
vw.close();
res.add("counts", cnts);
DKV.put(destination_key, res);
return res;
}
public Frame createMappingOfExemplars(Key destinationKey){
final long[] keep = MemoryManager.malloc8(_exemplars.length);
for (int i=0;i<keep.length;++i)
keep[i]=_exemplars[i].gid;
Vec exAssignment = _exemplar_assignment_vec_key.get();
Arrays.sort(keep);
Vec exemplarAssignment = new MRTask() {
@Override
public void map(Chunk c1, NewChunk nc) {
for (int i = 0; i < c1._len; i++) {
long gid = c1.at8(i);
nc.addNum(ArrayUtils.find(keep, gid));
}
}
}.doAll(Vec.T_NUM,exAssignment).outputFrame().vec(0);
Frame mapping = new Frame(destinationKey,new String[]{"exemplar_assignment"}, new Vec[]{exemplarAssignment});
final long[] uniqueExemplars = new VecUtils.CollectIntegerDomain().doAll(mapping.vecs()).domain();
assert(uniqueExemplars.length==_exemplars.length);
assert(mapping.numRows()==exAssignment.length());
for(long exmp: uniqueExemplars){
assert(exmp <= _exemplars.length);
}
DKV.put(mapping);
return mapping;
}
@Override
public Frame scoreExemplarMembers(Key<Frame> destination_key, final int exemplarIdx) {
Vec booleanCol = new MRTask() {
@Override
public void map(Chunk c, NewChunk nc) {
for (int i=0;i<c._len;++i)
nc.addNum(c.at8(i)==_exemplars[exemplarIdx].gid ? 1 : 0,0);
}
}.doAll(Vec.T_NUM, new Frame(new Vec[]{_exemplar_assignment_vec_key.get()})).outputFrame().anyVec();
Frame orig = _parms.train();
Vec[] vecs = Arrays.copyOf(orig.vecs(), orig.vecs().length+1);
vecs[vecs.length-1] = booleanCol;
Frame ff = new Frame(orig.names(), orig.vecs());
ff.add("predicate", booleanCol);
Frame res = new Frame.DeepSelect().doAll(orig.types(),ff).outputFrame(destination_key, orig.names(), orig.domains());
FrameUtils.shrinkDomainsToObservedSubset(res);
DKV.put(res);
assert(res.numRows()==_counts[exemplarIdx]);
booleanCol.remove();
return res;
}
public void checkConsistency() {
long sum = 0;
for (long l : this._counts) sum += l;
assert (sum == _parms.train().numRows());
final long[] exemplarGIDs = new long[this._counts.length];
for (int i = 0; i < this._exemplars.length; ++i)
exemplarGIDs[i] = this._exemplars[i].gid;
long[] counts = new long[this._exemplars.length];
for (int i = 0; i < _parms.train().numRows(); ++i) {
long ass = (_exemplar_assignment_vec_key.get()).at8(i);
for (int j = 0; j < exemplarGIDs.length; ++j) {
if (exemplarGIDs[j] == ass) {
counts[j]++;
break;
}
}
}
sum = 0;
for (long l : counts) sum += l;
assert (sum == _parms.train().numRows());
for (int i = 0; i < counts.length; ++i) {
assert (counts[i] == this._counts[i]);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/aggregator/ModelMetricsAggregator.java
|
package hex.aggregator;
import hex.CustomMetric;
import hex.Model;
import hex.ModelMetrics;
import hex.ModelMetricsUnsupervised;
import water.fvec.Frame;
public class ModelMetricsAggregator extends ModelMetricsUnsupervised {
public ModelMetricsAggregator(Model model, Frame frame, CustomMetric customMetric) {
super(model, frame, 0, Double.NaN, customMetric);
}
// Aggregator currently does not have any model metrics to compute during scoring
public static class AggregatorModelMetrics extends MetricBuilderUnsupervised {
public AggregatorModelMetrics(int dims) {
_work = new double[dims];
}
@Override
public double[] perRow(double[] preds, float[] dataRow, Model m) { return preds; }
@Override
public ModelMetrics makeModelMetrics(Model m, Frame f) {
return m.addModelMetrics(new hex.aggregator.ModelMetricsAggregator(m, f, _customMetric));
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/anovaglm/ANOVAGLM.java
|
package hex.anovaglm;
import hex.DataInfo;
import hex.ModelBuilder;
import hex.ModelBuilderHelper;
import hex.ModelCategory;
import hex.glm.GLM;
import hex.glm.GLMModel;
import water.DKV;
import water.Key;
import water.Scope;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.fvec.Vec;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static hex.anovaglm.ANOVAGLMUtils.*;
import static hex.gam.MatrixFrameUtils.GamUtils.keepFrameKeys;
import static hex.glm.GLMModel.GLMParameters;
import static hex.glm.GLMModel.GLMParameters.Family.*;
import static water.util.ArrayUtils.flat;
public class ANOVAGLM extends ModelBuilder<ANOVAGLMModel, ANOVAGLMModel.ANOVAGLMParameters, ANOVAGLMModel.ANOVAGLMModelOutput> {
public int _numberOfModels = 4;// (A, A*B), (B, A*B), (A, B), (A, B, A*B)
public int _numberOfPredCombo = 3;
public int _numberOfPredictors = 3; // A, B, interaction of A and B
DataInfo _dinfo;
String[][] _predictComboNames; // store single predictors, predictor interaction columns
int[] _degreeOfFreedom;
String[] _modelNames; // store model description
String[] _predNamesIndividual; // store individual column names
public String[][] _transformedColNames; // store expanded names for single predictors, predictor interactions.
public int[] _predictorColumnStart;
public ANOVAGLM(boolean startup_once) {
super(new ANOVAGLMModel.ANOVAGLMParameters(), startup_once);
}
public ANOVAGLM(ANOVAGLMModel.ANOVAGLMParameters parms) {
super(parms);
init(false);
}
public ANOVAGLM(ANOVAGLMModel.ANOVAGLMParameters parms, Key<ANOVAGLMModel> key) {
super(parms, key);
init(false);
}
@Override
protected int nModelsInParallel(int folds) { // disallow nfold cross-validation
return nModelsInParallel(1, 2);
}
@Override
protected ANOVAGLMDriver trainModelImpl() {
return new ANOVAGLMDriver();
}
@Override
public ModelCategory[] can_build() {
return new ModelCategory[]{ModelCategory.Regression, ModelCategory.Binomial, ModelCategory.Multinomial,
ModelCategory.Ordinal};
}
@Override
public boolean isSupervised() {
return true;
}
@Override
public boolean haveMojo() {
return false;
}
@Override
public boolean havePojo() {
return false;
}
public void init(boolean expensive) {
super.init(expensive);
if (expensive) {
initValidateAnovaGLMParameters();
}
}
/***
* Init and validate ANOVAGLMParameters.
*/
private void initValidateAnovaGLMParameters() {
if (_parms._link == null)
_parms._link = GLMModel.GLMParameters.Link.family_default;
_dinfo = new DataInfo(_train.clone(), _valid, 1, true, DataInfo.TransformType.NONE,
DataInfo.TransformType.NONE,
_parms.missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.Skip,
_parms.imputeMissing(), _parms.makeImputer(), false, hasWeightCol(), hasOffsetCol(),
hasFoldCol(), null);
_numberOfPredictors = _dinfo._nums + _dinfo._cats;
if (_numberOfPredictors < 2)
error("predictors", " there must be at least two predictors.");
if (_parms._highest_interaction_term == 0)
_parms._highest_interaction_term = _numberOfPredictors;
if (_parms._highest_interaction_term < 1 || _parms._highest_interaction_term > _numberOfPredictors)
error("highest_interaction_term", " must be >= 1 or <= number of predictors.");
if (!(gaussian.equals(_parms._family) || tweedie.equals(_parms._family) || poisson.equals(_parms._family))) {
_parms._compute_p_values = false;
_parms._remove_collinear_columns = false;
}
if (nclasses() > 2)
error("family", " multinomial and ordinal are not supported at this point.");
_numberOfPredCombo = calculatePredComboNumber(_numberOfPredictors, _parms._highest_interaction_term);
_numberOfModels = _numberOfPredCombo + 1;
_predNamesIndividual = extractPredNames(_dinfo, _numberOfPredictors);
_predictComboNames = generatePredictorCombos(_predNamesIndividual, _parms._highest_interaction_term);
_transformedColNames = new String[_numberOfPredCombo][];
_predictorColumnStart = new int[_numberOfPredCombo];
_degreeOfFreedom = new int[_numberOfPredCombo];
generatePredictorNames(_predictComboNames, _transformedColNames, _predictorColumnStart, _degreeOfFreedom, _dinfo);
_modelNames = generateModelNames(_predictComboNames);
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(ANOVAGLM.this);
}
private class ANOVAGLMDriver extends Driver {
String[] _allTransformedColNames; // flatten new column names
Key<Frame> _transformedColsKey; // store transformed column frame key
Frame[] _trainingFrames; // store generated frames
GLMParameters[] _glmParams; // store GLMParameters needed to generate all the data
GLM[] _glmBuilder; // store GLM Builders to be build in parallel
GLM[] _glmResults;
Frame _completeTransformedFrame; // store transformed frame
public final void buildModel() {
ANOVAGLMModel model = null;
try {
_dinfo = new DataInfo(_completeTransformedFrame, _valid, 1, false,
DataInfo.TransformType.NONE, DataInfo.TransformType.NONE,
_parms.missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.Skip,
_parms.imputeMissing(), _parms.makeImputer(), false, hasWeightCol(), hasOffsetCol(),
hasFoldCol(), null);
model = new ANOVAGLMModel(dest(), _parms, new ANOVAGLMModel.ANOVAGLMModelOutput(ANOVAGLM.this, _dinfo));
model.write_lock(_job);
if (_parms._save_transformed_framekeys) {
model._output._transformed_columns_key = _transformedColsKey;
}
_trainingFrames = buildTrainingFrames(_transformedColsKey, _numberOfModels, _transformedColNames, _parms); // build up training frames
_glmParams = buildGLMParameters(_trainingFrames, _parms);
_job.update(1, "calling GLM to build GLM models ...");
_glmBuilder = buildGLMBuilders(_glmParams);
_glmResults = ModelBuilderHelper.trainModelsParallel(_glmBuilder, _parms._nparallelism); // set to 4 according to Michalk
model._output._glmModels = extractGLMModels(_glmResults);
model._output.copyGLMCoeffs(_modelNames);
fillModelMetrics(model, model._output._glmModels[_numberOfPredCombo], _trainingFrames[_numberOfPredCombo]); // take full model metrics as our model metrics
model.fillOutput(combineAndFlat(_predictComboNames), _degreeOfFreedom);
_job.update(0, "Completed GLM model building. Extracting metrics from GLM models and building" +
" ANOVAGLM outputs");
model.update(_job);
} finally {
final List<Key> keep = new ArrayList<>();
int numFrame2Delete = _parms._save_transformed_framekeys ? (_trainingFrames.length - 1) : _trainingFrames.length;
removeFromDKV(_trainingFrames, numFrame2Delete);
if (model != null) {
if (_parms._save_transformed_framekeys)
keepFrameKeys(keep, _transformedColsKey);
else
DKV.remove(_transformedColsKey);
Scope.untrack(keep.toArray(new Key[keep.size()]));
model.update(_job);
model.unlock(_job);
}
}
}
/***
* This method will transform the training frame such that the constraints on the GLM parameters will be satisfied.
* Refer to ANOVAGLMTutorial https://github.com/h2oai/h2o-3/issues/7561 section III.II.
*/
void generateTransformedColumns() {
_allTransformedColNames = flat(_transformedColNames);
List<String> expandedColNames = new ArrayList<>(Arrays.asList(_allTransformedColNames));
if (hasWeightCol())
expandedColNames.add(_parms._weights_column);
if (hasOffsetCol())
expandedColNames.add(_parms._offset_column);
expandedColNames.add(_parms._response_column);
GenerateTransformColumns gtc = new GenerateTransformColumns(_transformedColNames, _parms, _dinfo,
_predNamesIndividual.length, _predictComboNames);
gtc.doAll(expandedColNames.size(), Vec.T_NUM, _dinfo._adaptedFrame);
_completeTransformedFrame = gtc.outputFrame(Key.make(), expandedColNames.toArray(new String[0]), null);
if (_train.vec(_parms._response_column).isCategorical() &&
!_completeTransformedFrame.vec(_parms._response_column).isCategorical())
_completeTransformedFrame.replace(_completeTransformedFrame.numCols()-1,
_completeTransformedFrame.vec(_parms._response_column).toCategoricalVec()).remove();
_transformedColsKey = _completeTransformedFrame._key; // contains transformed predicts, weight/offset and response columns
DKV.put(_completeTransformedFrame);
}
@Override
public void computeImpl() {
init(true);
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(ANOVAGLM.this);
generateTransformedColumns();
_job.update(0, "Finished transforming training frame");
buildModel();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/anovaglm/ANOVAGLMModel.java
|
package hex.anovaglm;
import hex.*;
import hex.deeplearning.DeepLearningModel;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLM;
import hex.glm.GLMModel;
import org.apache.commons.math3.distribution.FDistribution;
import water.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.udf.CFuncRef;
import water.util.TwoDimTable;
import java.io.Serializable;
import java.util.Arrays;
import static hex.anovaglm.ANOVAGLMUtils.generateGLMSS;
import static hex.gam.MatrixFrameUtils.GAMModelUtils.genCoefficientTable;
import static hex.glm.GLMModel.GLMParameters.*;
import static hex.glm.GLMModel.GLMParameters.Family.AUTO;
import static hex.util.DistributionUtils.distributionToFamily;
import static hex.util.DistributionUtils.familyToDistribution;
public class ANOVAGLMModel extends Model<ANOVAGLMModel, ANOVAGLMModel.ANOVAGLMParameters, ANOVAGLMModel.ANOVAGLMModelOutput>{
public ANOVAGLMModel(Key<ANOVAGLMModel> selfKey, ANOVAGLMParameters parms, ANOVAGLMModelOutput output) {
super(selfKey, parms, output);
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
assert domain == null;
switch (_output.getModelCategory()) {
case Binomial:
return new ModelMetricsBinomial.MetricBuilderBinomial(domain);
case Multinomial:
return new ModelMetricsMultinomial.MetricBuilderMultinomial(_output.nclasses(), domain, _parms._auc_type);
case Regression:
return new ModelMetricsRegression.MetricBuilderRegression();
default:
throw H2O.unimpl("Invalid ModelCategory " + _output.getModelCategory());
}
}
@Override
protected double[] score0(double[] data, double[] preds) {
throw new UnsupportedOperationException("ANOVAGLM does not support scoring on data. It only provide information" +
" on predictor relevance");
}
@Override
public Frame score(Frame fr, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) {
throw new UnsupportedOperationException("ANOVAGLM does not support scoring on data. It only provide information" +
" on predictor relevance");
}
/***
* Return the ANOVA table as an H2OFrame per seb suggestion
* @return H2O Frame containing the ANOVA table as in the model summary
*/
@Override
public Frame result() {
return _output.generateResultFrame();
}
public static class ANOVAGLMParameters extends Model.Parameters {
public int _highest_interaction_term;
public double[] _alpha;
public double[] _lambda = new double[]{0};
public boolean _standardize = true;
public Family _family = AUTO;
public boolean _lambda_search;
public Link _link = Link.family_default;
public Solver _solver = Solver.IRLSM;
public String[] _interactions=null;
public double _tweedie_variance_power;
public double _tweedie_link_power=1.0;
public double _theta;
public double _invTheta;
public Serializable _missing_values_handling = MissingValuesHandling.MeanImputation;
public boolean _compute_p_values = true;
public boolean _remove_collinear_columns = true;
public int _nfolds = 0; // disable cross-validation
public Key<Frame> _plug_values = null;
public boolean _save_transformed_framekeys = false; // for debugging, save the transformed predictors/interaction
public int _nparallelism = 4;
@Override
public String algoName() {
return "ANOVAGLM";
}
@Override
public String fullName() {
return "ANOVA for Generalized Linear Model";
}
@Override
public String javaName() { return ANOVAGLMModel.class.getName(); }
@Override
public long progressUnits() {
return 1;
}
public MissingValuesHandling missingValuesHandling() {
if (_missing_values_handling instanceof MissingValuesHandling)
return (MissingValuesHandling) _missing_values_handling;
assert _missing_values_handling instanceof DeepLearningModel.DeepLearningParameters.MissingValuesHandling;
switch ((DeepLearningModel.DeepLearningParameters.MissingValuesHandling) _missing_values_handling) {
case MeanImputation:
return MissingValuesHandling.MeanImputation;
case Skip:
return MissingValuesHandling.Skip;
default:
throw new IllegalStateException("Unsupported missing values handling value: " + _missing_values_handling);
}
}
public boolean imputeMissing() {
return missingValuesHandling() == MissingValuesHandling.MeanImputation ||
missingValuesHandling() == MissingValuesHandling.PlugValues;
}
public DataInfo.Imputer makeImputer() {
if (missingValuesHandling() == MissingValuesHandling.PlugValues) {
if (_plug_values == null || _plug_values.get() == null) {
throw new IllegalStateException("Plug values frame needs to be specified when Missing Value Handling = PlugValues.");
}
return new GLM.PlugValuesImputer(_plug_values.get());
} else { // mean/mode imputation and skip (even skip needs an imputer right now! PUBDEV-6809)
return new DataInfo.MeanImputer();
}
}
@Override
public void setDistributionFamily(DistributionFamily distributionFamily) {
_family = distributionToFamily(distributionFamily);
_link = Link.family_default;
}
@Override
public DistributionFamily getDistributionFamily() {
return familyToDistribution(_family);
}
}
public static class ANOVAGLMModelOutput extends Model.Output {
DataInfo _dinfo;
public long _training_time_ms;
public String[][] _coefficient_names; // coefficient names of all models
Family _family;
Link _link;
public Key<Frame> _transformed_columns_key;
public TwoDimTable[] _coefficients_table;
GLMModel[] _glmModels;
String[] _modelNames;
int[] _degreeOfFreedom;
@Override
public ModelCategory getModelCategory() {
switch (_family) {
case quasibinomial:
case fractionalbinomial:
case binomial: return ModelCategory.Binomial;
case multinomial: return ModelCategory.Multinomial;
case ordinal: return ModelCategory.Ordinal;
default: return ModelCategory.Regression;
}
}
public String[][] coefficientNames() { return _coefficient_names; }
public ANOVAGLMModelOutput(ANOVAGLM b, DataInfo dinfo) {
super(b, dinfo._adaptedFrame);
_dinfo = dinfo;
_domains = dinfo._adaptedFrame.domains();
_family = b._parms._family;
_link = b._parms._link;
}
/***
* This method will copy the GLM cofficients from all GLM models and stuff them into a TwoDimTable array for
* the AnovaGLMModel._output.
*
* @param modelNames: string describing each GLM model built in terms of which predictor combo is left out.
*/
void copyGLMCoeffs(String[] modelNames) {
int numModels = _glmModels.length;
_coefficients_table = new TwoDimTable[numModels];
_coefficient_names = new String[numModels][];
for (int index = 0; index < numModels; index++) {
_coefficients_table[index] = genCoefficientTable(new String[]{"coefficients",
"standardized coefficients"}, _glmModels[index]._output.beta(),
_glmModels[index]._output.getNormBeta(), _glmModels[index]._output._coefficient_names,
"Coefficients for " + modelNames[index]);
_coefficient_names[index] = _glmModels[index]._output._coefficient_names.clone();
}
}
private Frame generateResultFrame() {
int lastModelIndex = _glmModels.length - 1;
String[] colNames = new String[]{"predictors_interactions", "family", "link", "ss", "df", "ms", "f", "p_value"};
String[] rowNames = new String[lastModelIndex];
String[] familyNames = new String[lastModelIndex];
String[] linkNames = new String[lastModelIndex];
double[] ss = generateGLMSS(_glmModels, _family);
double[] dof = Arrays.stream(_degreeOfFreedom).asDoubleStream().toArray();
double[] msA = new double[lastModelIndex];
double[] fA = new double[lastModelIndex];
double[] pValues = new double[lastModelIndex];
System.arraycopy(_modelNames, 0, rowNames, 0, lastModelIndex);
long dofFullModel = _glmModels[lastModelIndex]._output._training_metrics.residual_degrees_of_freedom();
double mse = ss[lastModelIndex]/dofFullModel;
double oneOverMse = 1.0/mse;
for (int rIndex = 0; rIndex < lastModelIndex; rIndex++) {
familyNames[rIndex] = _family.toString();
linkNames[rIndex] = _link.toString();
double ms = ss[rIndex]/_degreeOfFreedom[rIndex];
msA[rIndex] = ms;
double f = oneOverMse*ss[rIndex]/_degreeOfFreedom[rIndex];
fA[rIndex] = f;
FDistribution fdist = new FDistribution(_degreeOfFreedom[rIndex], dofFullModel);
double p_value = 1.0 - fdist.cumulativeProbability(f);
pValues[rIndex] = p_value;
}
Vec.VectorGroup vg = Vec.VectorGroup.VG_LEN1;
Vec rNames = Vec.makeVec(rowNames, vg.addVec());
Vec fNames = Vec.makeVec(familyNames, vg.addVec());
Vec lNames = Vec.makeVec(linkNames, vg.addVec());
Vec sumSquares = Vec.makeVec(ss, vg.addVec());
Vec degOfFreedom = Vec.makeVec(dof, vg.addVec());
Vec msV = Vec.makeVec(msA, vg.addVec());
Vec fV = Vec.makeVec(fA, vg.addVec());
Vec pValuesV = Vec.makeVec(pValues, vg.addVec());
return new Frame(Key.<Frame>make(), colNames, new Vec[]{rNames, fNames, lNames, sumSquares,
degOfFreedom, msV, fV, pValuesV});
}
}
public void fillOutput(String[] modelNames, int[] degreeOfFreedom) {
_output._modelNames = modelNames;
_output._degreeOfFreedom = degreeOfFreedom;
_output._model_summary = generateSummary();
}
/**
* The Type III SS calculation, degree of freedom, F-statistics and p-values will be included in the model
* summary. For details on how those are calculated, refer to ANOVAGLMTutorial
* https://github.com/h2oai/h2o-3/issues/7561 section V.
*
* @return a {@link TwoDimTable} representation of the result frame
*/
public TwoDimTable generateSummary(){
Frame result = result();
int ncols = result.numCols();
int nrows = (int) result.numRows();
String[] names = result.names();
String[] types = new String[]{"string", "string", "string", "double", "int", "double", "double", "double"};
String[] formats = new String[]{"%s", "%s", "%s", "%f", "%d", "%f", "%f", "%f"};
String[] rowHeaders = new String[nrows];
TwoDimTable table = new TwoDimTable("GLM ANOVA Type III SS", "summary",
rowHeaders, names, types, formats, "");
for (int rIdx = 0; rIdx < nrows; rIdx++) {
for (int cIdx = 0; cIdx < ncols; cIdx++) {
Vec v = result.vec(cIdx);
table.set(rIdx, cIdx, v.isNumeric() ? v.at(rIdx) : v.stringAt(rIdx));
if (cIdx == 0) rowHeaders[rIdx] = v.stringAt(rIdx);
}
}
result.delete();
return table;
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
super.remove_impl(fs, cascade);
Keyed.remove(_output._transformed_columns_key, fs, cascade);
return fs;
}
@Override
protected AutoBuffer writeAll_impl(AutoBuffer ab) {
if (_output._transformed_columns_key != null) ab.putKey(_output._transformed_columns_key);
return super.writeAll_impl(ab);
}
@Override
protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
if (_output._transformed_columns_key != null)
ab.getKey(_output._transformed_columns_key, fs);
return super.readAll_impl(ab, fs);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/anovaglm/ANOVAGLMUtils.java
|
package hex.anovaglm;
import hex.*;
import hex.glm.GLM;
import hex.glm.GLMModel;
import water.DKV;
import water.Key;
import water.Scope;
import water.fvec.Frame;
import water.util.TwoDimTable;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static hex.anovaglm.ANOVAGLMModel.ANOVAGLMParameters;
import static hex.gam.MatrixFrameUtils.GAMModelUtils.copyTwoDimTable;
import static hex.gam.MatrixFrameUtils.GAMModelUtils.genCoefficientTable;
import static hex.gam.MatrixFrameUtils.GamUtils.setParamField;
import static hex.glm.GLMModel.GLMParameters;
import static hex.glm.GLMModel.GLMParameters.Family.*;
public class ANOVAGLMUtils {
/***
* This method will extract the individual predictor names that will be used to build the GLM models.
*
* @param dinfo: DataInfo generated from dataset with all predictors
* @param numOfPredictors: number of individual predictors
* @return: copy of individual predictor names in a String array.
*/
public static String[] extractPredNames(DataInfo dinfo, int numOfPredictors) {
String[] predNames = new String[numOfPredictors];
String[] frameNames = dinfo._adaptedFrame.names();
System.arraycopy(frameNames, 0, predNames, 0, numOfPredictors);
return predNames;
}
/**
* In order to calculate Type III SS, we need the individual predictors and their interactions. For details, refer
* to ANOVAGLMTutorial https://github.com/h2oai/h2o-3/issues/7561 section IV
*
* @param predNamesIndividual: string containing individual predictor names
* @param maxPredInt: maximum number of predictors allowed in interaction term generation
* @return String array with double indices. First index refers to the predictor number, second index refers to
* the names of predictors involved in generating the interaction terms. For terms involving only
* individual predictors, there is only one predictor name.
*/
public static String[][] generatePredictorCombos(String[] predNamesIndividual, int maxPredInt) {
List<String[]> predCombo = new ArrayList<>();
addIndividualPred(predNamesIndividual, predCombo); // add individual predictors
for (int index = 2; index <= maxPredInt; index++) {
generateOneCombo(predNamesIndividual, index, predCombo);
}
return predCombo.toArray(new String[0][0]);
}
public static void addIndividualPred(String[] predNames, List<String[]> predCombo) {
int numPred = predNames.length;
for (int index=0; index < numPred; index++) {
predCombo.add(new String[]{predNames[index]});
}
}
public static void generateOneCombo(String[] predNames, int numInteract, List<String[]> predCombo) {
int predNum = predNames.length;
int[] predInd = IntStream.range(0, numInteract).toArray();
int zeroBound = predNum-numInteract;
int[] bounds = IntStream.range(zeroBound, predNum).toArray();
int numCombo = hex.genmodel.utils.MathUtils.combinatorial(predNum, numInteract);
for (int index = 0; index < numCombo; index++) {
predCombo.add(predCombo(predNames, predInd));
if (!updatePredCombo(predInd, bounds))
break; // done
}
}
public static boolean updatePredCombo(int[] predInd, int[] bounds) {
int predNum = predInd.length-1;
for (int index = predNum; index >= 0; index--) {
if (predInd[index] < bounds[index]) { //
predInd[index]++;
updateLaterBits(predInd, bounds, index, predNum);
return true;
}
}
return false;
}
public static void updateLaterBits(int[] predInd, int[] bounds, int index, int predNum) {
if (index < predNum) {
for (int ind = index+1; ind <= predNum; ind++) {
predInd[ind] = predInd[ind-1]+1;
}
}
}
public static String[] predCombo(String[] predNames, int[] predInd) {
int predNum = predInd.length;
String[] predCombos = new String[predNum];
for (int index = 0; index < predNum; index++)
predCombos[index] = predNames[predInd[index]];
return predCombos;
}
/***
* Given the number of individual predictors, the highest order of interaction terms allowed, this method will
* calculate the total number of predictors that will be used to build the full model.
*
* @param numPred: number of individual predictors
* @param highestInteractionTerms: highest number of predictors allowed in generating interactions
* @return
*/
public static int calculatePredComboNumber(int numPred, int highestInteractionTerms) {
int numCombo = numPred;
for (int index = 2; index <= highestInteractionTerms; index++)
numCombo += hex.genmodel.utils.MathUtils.combinatorial(numPred, index);
return numCombo;
}
/***
* This method will take the frame that contains transformed columns of predictor A, predictor B, interaction
* of predictor A and B and generate new training frames that contains the following columns:
* - transformed columns of predictor B, interaction of predictor A and B, response
* - transformed columns of predictor A, interaction of predictor A and B, response
* - transformed columns of predictor A, predictor B, response
* - transformed columns of predictor A, predictor B, interaction of predictor A and B, response
*
* The same logic applies if there are more than two individual predictors. You basically generate all the
* predictor combos. In building the model, you leave one predictor combo out.
*
* @param transformedCols: contains frame key of frame containing transformed columns of predictor A, predictor B,
* interaction of predictor A and B
* @param numberOfModels: number of models to build. For 2 factors, this should be 4.
* @return Array of training frames to build all the GLM models.
*/
public static Frame[] buildTrainingFrames(Key<Frame> transformedCols, int numberOfModels,
String[][] transformedColNames, ANOVAGLMParameters parms) {
Frame[] trainingFrames = new Frame[numberOfModels];
int numFrames2Build = numberOfModels-1;
Frame allCols = DKV.getGet(transformedCols); // contains all the transformed columns except response, weight/offset
trainingFrames[numFrames2Build] = allCols;
int[][] predNums = new int[numFrames2Build][];
for (int index = 0; index < numFrames2Build; index++) {
predNums[index] = oneIndexOut(index, numFrames2Build);
}
for (int index = 0; index < numFrames2Build; index++) {
trainingFrames[index] = buildSpecificFrame(predNums[index], allCols, transformedColNames, parms);
DKV.put(trainingFrames[index]);
}
return trainingFrames;
}
public static int[] oneIndexOut(int currIndex, int indexRange) {
int[] indexArray = new int[indexRange-1];
int count = 0;
for (int index = 0; index < indexRange; index++) {
if (index != currIndex) {
indexArray[count++] = index;
}
}
return indexArray;
}
/**
* I copied this method from Zuzana Olajcova to add model metrics of the full GLM model as the ANOVAModel model
* metrics
* @param aModel
* @param glmModel
* @param trainingFrame
*/
public static void fillModelMetrics(ANOVAGLMModel aModel, GLMModel glmModel, Frame trainingFrame) {
aModel._output._training_metrics = glmModel._output._training_metrics;
for (Key<ModelMetrics> modelMetricsKey : glmModel._output.getModelMetrics()) {
aModel.addModelMetrics(modelMetricsKey.get().deepCloneWithDifferentModelAndFrame(glmModel, trainingFrame));
}
aModel._output._scoring_history = copyTwoDimTable(glmModel._output._scoring_history, "glm scoring history");
}
/***
* Simple method to extract GLM Models from GLM ModelBuilders.
* @param glmResults: array of GLM ModelBuilders
* @return: array of GLMModels
*/
public static GLMModel[] extractGLMModels(GLM[] glmResults) {
int numberModel = glmResults.length;
GLMModel[] models = new GLMModel[numberModel];
for (int index = 0; index < numberModel; index++) {
models[index] = glmResults[index].get();
Scope.track_generic(models[index]);
}
return models;
}
public static void removeFromDKV(Frame[] trainingFrames, int numFrame2Delete) {
for (int index=0; index < numFrame2Delete; index++)
DKV.remove(trainingFrames[index]._key);
}
/***
* This method is used to attach the weight/offset columns if they exist and the response columns, specific
* transformed columns to a training frames.
*
* @param predNums: number of all predictor combos
* @param allCols: Frame containing all transformed columns
* @param transformedColNames: transformed predictor combo arrays containing only predictor combos for a specific
* training dataset. Recall that models are built with one predictor combo left out. This
* is to generate that training frame with a specific predictor combo left out.
* @param parms: AnovaGLMParameters
* @return training frame excluding a specific set of predictor combos.
*/
public static Frame buildSpecificFrame(int[] predNums, Frame allCols, String[][] transformedColNames,
ANOVAGLMParameters parms) {
final Frame predVecs = new Frame(Key.make());
int numVecs = predNums.length;
for (int index = 0; index < numVecs; index++) {
int predVecNum = predNums[index];
predVecs.add(allCols.subframe(transformedColNames[predVecNum]));
}
if (parms._weights_column != null)
predVecs.add(parms._weights_column, allCols.vec(parms._weights_column));
if (parms._offset_column != null)
predVecs.add(parms._offset_column, allCols.vec(parms._offset_column));
predVecs.add(parms._response_column, allCols.vec(parms._response_column));
return predVecs;
}
public static GLMParameters[] buildGLMParameters(Frame[] trainingFrames, ANOVAGLMParameters parms) {
final int numberOfModels = trainingFrames.length;
GLMParameters[] glmParams = new GLMParameters[numberOfModels];
final List<String> anovaglmOnlyList = Arrays.asList("save_transformed_framekeys", "type");
final Field[] field1 = ANOVAGLMParameters.class.getDeclaredFields();
final Field[] field2 = Model.Parameters.class.getDeclaredFields();
for (int index = 0; index < numberOfModels; index++) {
glmParams[index] = new GLMParameters();
setParamField(parms, glmParams[index], false, field1, anovaglmOnlyList);
setParamField(parms, glmParams[index], true, field2, Collections.emptyList());
glmParams[index]._train = trainingFrames[index]._key;
glmParams[index]._family = parms._family;
}
return glmParams;
}
/***
* This method is used to generate Model SS for all models built except the full model. Refer to AnovaGLMTutorial
* https://github.com/h2oai/h2o-3/issues/7561 section V.
*
* @param glmModels
* @param family
* @return
*/
public static double[] generateGLMSS(GLMModel[] glmModels, GLMParameters.Family family) {
int numModels = glmModels.length;
int lastModelIndex = numModels-1;
double[] modelSS = new double[numModels];
double[] rss = new double[numModels];
for (int index = 0; index < numModels; index++) {
if (binomial.equals(family) || quasibinomial.equals(family) || fractionalbinomial.equals(family))
rss[index] = ((ModelMetricsBinomialGLM) glmModels[index]._output._training_metrics).residual_deviance();
else // for numerical response column
rss[index] = ((ModelMetricsRegressionGLM) glmModels[index]._output._training_metrics).residual_deviance();
}
// calculate model ss as rss - rss with full model
for (int index = 0; index < lastModelIndex; index++)
modelSS[index] = rss[index]-rss[lastModelIndex];
modelSS[lastModelIndex] = rss[lastModelIndex];
return modelSS;
}
public static GLM[] buildGLMBuilders(GLMParameters[] glmParams) {
int numModel = glmParams.length; // copied from Zuzana
GLM[] builder = new GLM[numModel];
for (int index = 0; index < numModel; index++)
builder[index] = new GLM(glmParams[index]);
return builder;
}
/***
* This method aims to generate the column names of the final transformed frames. This means that for single
* enum predictor "ABC" with domains "0" and "1", "2", the new column names will be ABC_0, ABC_1. For single
* numerical column, the same column name will be used.
*
* To generate the names of interaction columns, let's assume there are three predictors, R (2 levels), C (3 levels),
* S (3 levels). If the highest interaction terms allowed is 3, we will generate the following transformed names
* for the interaction columns: R0:C0, R0:C1, C0:S0, C0:S1, C1:S0, C1:S1, R0:C0:S0, R0:C0:S1, R0:C1:S0, R0:C1:S1
* @param predComboNames: string array containing all predictor combos and for each combo, all the predictor names
* involved in generating the interactions.
* @param predictorNames: string array containing all predictor names and for each combo, all the predictor names
* * involved in generating the interactions.
* @param predColumnStart: column of each predictor combo after the frame transformation.
* @param degreeOfFreedom: degree of freedom for each predictor combo
* @param dinfo
*/
public static void generatePredictorNames(String[][] predComboNames, String[][] predictorNames, int[] predColumnStart,
int[] degreeOfFreedom, DataInfo dinfo) {
int predNums = predComboNames.length;
int colStart = 0;
for (int predInd = 0; predInd < predNums; predInd++) {
if (predComboNames[predInd].length == 1) {
if (dinfo._adaptedFrame.vec(predComboNames[predInd][0]).domain() == null) // one numeric column
predictorNames[predInd] = new String[]{predComboNames[predInd][0]};
else
predictorNames[predInd] = transformOneCol(dinfo._adaptedFrame, predComboNames[predInd][0]);
} else { // working with interaction columns
predictorNames[predInd] = transformMultipleCols(dinfo._adaptedFrame, predComboNames, predInd, predictorNames);
}
colStart = updateDOFColInfo(predInd, predictorNames[predInd], degreeOfFreedom, predColumnStart, colStart);
}
}
public static int updateDOFColInfo(int predInd, String[] predComboNames, int[] dof, int[] predCS, int offset) {
dof[predInd] = predComboNames.length;;
predCS[predInd] = offset;
return dof[predInd]+offset;
}
public static int findComboMatch(String[][] predComboNames, int currIndex) {
String[] currCombo = predComboNames[currIndex];
int startPos = 1;
for (int comboSize = currCombo.length-1; comboSize >= 0; comboSize--) {
String[] smallerCurrCombo = Arrays.copyOfRange(currCombo, startPos++, currCombo.length);
for (int sInd = currIndex - 1; sInd >= 0; sInd--) {
if (Arrays.equals(smallerCurrCombo, predComboNames[sInd]))
return sInd;
}
}
return -1;
}
public static String[] combineAndFlat(String[][] predictComboNames) {
int numCombos = predictComboNames.length;
String[] finalPredNames = new String[numCombos];
for (int index = 0; index < numCombos; index++) {
String start = predictComboNames[index][0];
if (predictComboNames[index].length > 1)
for (int subIndex = 1; subIndex < predictComboNames[index].length; subIndex++)
start = start +":"+predictComboNames[index][subIndex];
finalPredNames[index] = start;
}
return finalPredNames;
}
public static String[] transformMultipleCols(Frame vec2Transform, String[][] predComboNames, int currIndex,
String[][] predNames) {
String[] currPredCombo = predComboNames[currIndex];
int matchPreviousCombo = findComboMatch(predComboNames, currIndex);
String[] matchPredNames = predNames[matchPreviousCombo];
String[] searchPair = new String[]{currPredCombo[0], currPredCombo[1]};
return transformTwoCols(vec2Transform, searchPair, matchPredNames);
}
/**
* Generate frame transformation on two interacting columns. Refer to AnovaGLMTutorial
* https://github.com/h2oai/h2o-3/issues/7561 sectinos III.II. and IV.
*
* @param vec2Transform: frame containing the two predictors to transform
* @param vecNames: name of the predictors
* @param lastComboNames: predictor combo names of the second vector if applicable. This is used to transform
* more than two predictors
* @return String containing the transformed column names.
*/
public static String[] transformTwoCols(Frame vec2Transform, String[] vecNames, String[] lastComboNames) {
String[] domains1 = vec2Transform.vec(vecNames[0]).domain();
String[] domains2 = lastComboNames == null ? vec2Transform.vec(vecNames[1]).domain() : lastComboNames;
String colName1 = vecNames[0];
String colName2 = vecNames[1];
int degOfFreedomC1 = domains1 == null ? 1 : (domains1.length-1);
int degOfFreedomC2 = lastComboNames == null ? (domains2.length-1) : domains2.length;
String[] newColNames = new String[degOfFreedomC1*degOfFreedomC2];
int colIndex = 0;
for (int col1 = 0; col1 < degOfFreedomC1; col1++) {
String part1 = colName1;
if (domains1 != null)
part1 = colName1 + "_" + domains1[col1];
for (int col2 = 0; col2 < degOfFreedomC2; col2++) {
if (lastComboNames == null) {
if (domains2 == null)
newColNames[colIndex++] = part1 + ":" + colName2;
else
newColNames[colIndex++] = part1 + ":" + colName2 + "_" + domains2[col2];
} else {
newColNames[colIndex++] = part1 + ":"+domains2[col2];
}
}
}
return newColNames;
}
/**
* perform data transformation described in AnovaGLMTutorial https://github.com/h2oai/h2o-3/issues/7561
* section III.II on one predictor.
*
* @param vec2Transform: frame containing that one predictor to transform.
* @param vecName: name of predictor
* @return: string array containing the transformed predictor column names.
*/
public static String[] transformOneCol(Frame vec2Transform, String vecName) {
String[] domains = vec2Transform.vec(vecName).domain();
int degOfFreedom = domains.length-1;
String[] newColNames = new String[degOfFreedom];
for (int domainInd = 0; domainInd < degOfFreedom; domainInd++)
newColNames[domainInd] = vecName+"_"+domains[domainInd];
return newColNames;
}
public static String[] generateModelNames(String[][] predictComboNames) {
int numPredCombo = predictComboNames.length;
String[] modelNames = new String[numPredCombo+1];
for (int index=0; index < numPredCombo; index++) {
if (predictComboNames[index].length == 1)
modelNames[index] = "GLM model built without predictor " + predictComboNames[index][0];
else
modelNames[index] = "GLM model built without predictors interactions " +
Stream.of(predictComboNames[index]).collect(Collectors.joining(":"));
}
modelNames[numPredCombo] = "GLM model built with all predictors";
return modelNames;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/anovaglm/GenerateTransformColumns.java
|
package hex.anovaglm;
import hex.DataInfo;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.NewChunk;
import static hex.anovaglm.ANOVAGLMModel.ANOVAGLMParameters;
import static hex.anovaglm.ANOVAGLMUtils.findComboMatch;
/***
* This class will take two predictors and transform them according to rules specified in Wendy Docs
*/
public class GenerateTransformColumns extends MRTask<GenerateTransformColumns> {
final public int[] _newColNumber;
final public boolean _imputeMissing;
final public int[] _catNAFills;
final public double[] _numNAFills;
final int _numNewCols;
final boolean _hasWeight;
final boolean _hasOffset;
final int _weightID;
final int _offsetID;
final int _responseID;
final int _numPredIndividual;
final int _nCats;
final int _nNums;
final String[][] _transformedColNames;
final String[][] _predColsCombo;
public GenerateTransformColumns(String[][] newColNames, ANOVAGLMParameters parms, DataInfo dinfo, int numPreds,
String[][] predColsCombo) {
_predColsCombo = predColsCombo;
_transformedColNames = newColNames;
_newColNumber = countColNumber(newColNames);
_imputeMissing = parms.imputeMissing();
_catNAFills = dinfo.catNAFill();
_nCats = dinfo._cats;
_nNums = dinfo._nums;
_numNAFills = dinfo.numNAFill();
_numNewCols = _newColNumber.length;
_hasWeight = parms._weights_column != null;
_hasOffset = parms._offset_column != null;
_weightID = _hasWeight ? dinfo.weightChunkId() : -1;
_offsetID = _hasOffset ? dinfo.offsetChunkId() : -1;
_responseID = dinfo.responseChunkId(0);
_numPredIndividual = numPreds;
}
public static int[] countColNumber(String[][] transformedColNames) {
int[] colNumber = new int[transformedColNames.length];
for (int colInd = 0; colInd < transformedColNames.length; colInd++) {
colNumber[colInd] = transformedColNames[colInd].length;
}
return colNumber;
}
@Override
public void map(Chunk[] chk, NewChunk[] newChk) {
int numChkRows = chk[0].len();
double[][] changedRow = allocateRow(_newColNumber); // pre-allocate array for reuse
double[] oneRow = new double[_numPredIndividual]; // read in chunk row
for (int rowInd = 0; rowInd < numChkRows; rowInd++) {
if (!readCatVal(chk, rowInd, oneRow)) // read in one row of data
continue; // imputeMissing=skip and encounter NAs
transformOneRow(changedRow, oneRow, _numPredIndividual, _newColNumber);
int colIndex = 0;
for (int predInd = 0; predInd < _numNewCols; predInd++) {
for (int eleInd = 0; eleInd < _newColNumber[predInd]; eleInd++)
newChk[colIndex++].addNum(changedRow[predInd][eleInd]);
}
if (_hasWeight)
newChk[colIndex++].addNum(chk[_weightID].atd(rowInd));
if (_hasOffset)
newChk[colIndex++].addNum(chk[_offsetID].atd(rowInd));
newChk[colIndex].addNum(chk[_responseID].atd(rowInd));
}
}
public double imputeNA(int colIndex) {
if (colIndex < _nCats)
return _catNAFills[colIndex];
else
return _numNAFills[colIndex-_nCats];
}
public static double[][] allocateRow(int[] newColNumber) {
int numPreds = newColNumber.length;
double[][] oneRow = new double[numPreds][];
for (int index = 0; index < numPreds; index++)
oneRow[index] = new double[newColNumber[index]];
return oneRow;
}
public void transformOneRow(double[][] newRow, double[] val, int numPreds, int[] newColNumber) {
// transform individual enum predictors
for (int colInd = 0; colInd < _nCats; colInd++) {
for (int valInd = 0; valInd < newColNumber[colInd]; valInd++) {
if (val[colInd] == valInd)
newRow[colInd][valInd] = 1;
else if (val[colInd] == newColNumber[colInd])
newRow[colInd][valInd] = -1;
else
newRow[colInd][valInd] = 0;
}
}
// transform individual num predictors
for (int colInd = _nCats; colInd < _numPredIndividual; colInd++)
newRow[colInd][0] = val[colInd];
// transform interacting columns
transformInteractingPred(newRow);
}
public void transformInteractingPred(double[][] newRow) {
for (int newColInd = _numPredIndividual; newColInd < _numNewCols; newColInd++) {
String[] currPredNames = _predColsCombo[newColInd];
int matchPCols = findComboMatch(_predColsCombo, newColInd);
double[] transformedInteraction = newRow[matchPCols]; // grab the transformed interaction of later columns
int cols2TranformInd = find(_predColsCombo, currPredNames[0]);
double[] currTransform = newRow[cols2TranformInd];
int countInd = 0;
for (int currInd = 0; currInd < currTransform.length; currInd++) {
for (int matchInd = 0; matchInd < transformedInteraction.length; matchInd++)
newRow[newColInd][countInd++] = currTransform[currInd]*transformedInteraction[matchInd];
}
}
}
private static int find(String[][] totArray, String ele) {
int arrLen = totArray.length;
for (int locInd = 0; locInd < arrLen; locInd++)
if (totArray[locInd][0].equals(ele))
return locInd;
return -1;
}
boolean readCatVal(Chunk[] chk, int rowInd, double[] rowData) {
for (int index = 0; index < _numPredIndividual; index++) {
rowData[index] = chk[index].atd(rowInd);
if (Double.isNaN(rowData[index])) {
if (_imputeMissing)
rowData[index] = imputeNA(index);
else
return false;
}
}
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/api/MakeGLMModelHandler.java
|
package hex.api;
import hex.DataInfo;
import hex.DataInfo.TransformType;
import hex.Model;
import hex.glm.GLMModel;
import hex.glm.GLMModel.GLMOutput;
import hex.gram.Gram;
import hex.schemas.*;
import water.DKV;
import water.Key;
import water.MRTask;
import water.api.Handler;
import water.api.schemas3.KeyV3;
import water.fvec.*;
import water.fvec.Vec.VectorGroup;
import java.util.Arrays;
import java.util.Map;
/**
* Created by tomasnykodym on 3/25/15.
*/
public class MakeGLMModelHandler extends Handler {
public GLMModelV3 make_model(int version, MakeGLMModelV3 args){
GLMModel model = DKV.getGet(args.model.key());
if(model == null)
throw new IllegalArgumentException("missing source model " + args.model);
boolean multiClass = model._output._multinomial || model._output._ordinal;
String [] names = multiClass?model._output.multiClassCoeffNames():model._output.coefficientNames(); // coefficient names in order and with Intercept
Map<String,Double> coefs = model.coefficients();
if (args.beta.length != names.length) {
throw new IllegalArgumentException("model coefficient length " + names.length + " is different from coefficient" +
" provided by user " + args.beta.length + ".\n model coefficients needed are:\n" + String.join("\n", names));
}
for(int i = 0; i < args.names.length; ++i)
coefs.put(args.names[i],args.beta[i]);
double [] beta = model.beta().clone();
for(int i = 0; i < beta.length; ++i)
beta[i] = coefs.get(names[i]);
GLMModel m = new GLMModel(args.dest != null?args.dest.key():Key.make(),model._parms,null, model._ymu,
Double.NaN, Double.NaN, -1);
m.setInputParms(model._input_parms);
DataInfo dinfo = model.dinfo();
dinfo.setPredictorTransform(TransformType.NONE);
m._output = new GLMOutput(model.dinfo(), model._output._names, model._output._column_types, model._output._domains,
model._output.coefficientNames(), beta, model._output._binomial, model._output._multinomial,
model._output._ordinal);
DKV.put(m._key, m);
GLMModelV3 res = new GLMModelV3();
res.fillFromImpl(m);
return res;
}
public GLMRegularizationPathV3 extractRegularizationPath(int v, GLMRegularizationPathV3 args) {
GLMModel model = DKV.getGet(args.model.key());
if(model == null)
throw new IllegalArgumentException("missing source model " + args.model);
return new GLMRegularizationPathV3().fillFromImpl(model.getRegularizationPath());
}
// instead of adding a new endpoint, just put this stupid test functionality here
/** Get the expanded (interactions + offsets) dataset. Careful printing! Test only
*/
public DataInfoFrameV3 getDataInfoFrame(int version, DataInfoFrameV3 args) {
Frame fr = DKV.getGet(args.frame.key());
if( null==fr ) throw new IllegalArgumentException("no frame found");
args.result = new KeyV3.FrameKeyV3(oneHot(fr, Model.InteractionSpec.allPairwise(args.interactions), args.use_all, args.standardize, args.interactions_only, true)._key);
return args;
}
public static Frame oneHot(Frame fr, Model.InteractionSpec interactions, boolean useAll, boolean standardize, final boolean interactionsOnly, final boolean skipMissing) {
final DataInfo dinfo = new DataInfo(fr,null,1,useAll,standardize?TransformType.STANDARDIZE:TransformType.NONE,TransformType.NONE,skipMissing,false,false,false,false,false, interactions);
Frame res;
if( interactionsOnly ) {
if( null==dinfo._interactionVecs ) throw new IllegalArgumentException("no interactions");
int noutputs=0;
final int[] colIds = new int[dinfo._interactionVecs.length];
final int[] offsetIds = new int[dinfo._interactionVecs.length];
int idx=0;
String[] coefNames = dinfo.coefNames();
for(int i : dinfo._interactionVecs)
noutputs+= ( offsetIds[idx++] = ((InteractionWrappedVec)dinfo._adaptedFrame.vec(i)).expandedLength());
String[] names = new String[noutputs];
int offset=idx=0;
int namesIdx=0;
for(int i=0;i<dinfo._adaptedFrame.numCols();++i) {
Vec v = dinfo._adaptedFrame.vec(i);
if( v instanceof InteractionWrappedVec ) { // ding! start copying coefNames into names while offset < colIds[idx+1]
colIds[idx] = offset;
for(int nid=0;nid<offsetIds[idx];++nid)
names[namesIdx++] = coefNames[offset++];
idx++;
if( idx > dinfo._interactionVecs.length ) break; // no more interaciton vecs left
} else {
if( v.isCategorical() ) offset+= v.domain().length - (useAll?0:1);
else offset++;
}
}
res = new MRTask() {
@Override public void map(Chunk[] cs, NewChunk ncs[]) {
DataInfo.Row r = dinfo.newDenseRow();
for(int i=0;i<cs[0]._len;++i) {
r=dinfo.extractDenseRow(cs,i,r);
if( skipMissing && r.isBad() ) continue;
int newChkIdx=0;
for(int idx=0;idx<colIds.length;++idx) {
int startOffset = colIds[idx];
for(int start=startOffset;start<(startOffset+offsetIds[idx]);++start )
ncs[newChkIdx++].addNum(r.get(start));
}
}
}
}.doAll(noutputs,Vec.T_NUM,dinfo._adaptedFrame).outputFrame(Key.make(),names,null);
} else {
byte[] types = new byte[dinfo.fullN()];
Arrays.fill(types, Vec.T_NUM);
res = new MRTask() {
@Override
public void map(Chunk[] cs, NewChunk ncs[]) {
DataInfo.Row r = dinfo.newDenseRow();
for (int i = 0; i < cs[0]._len; ++i) {
r = dinfo.extractDenseRow(cs, i, r);
if( skipMissing && r.isBad() ) continue;
for (int n = 0; n < ncs.length; ++n)
ncs[n].addNum(r.get(n));
}
}
}.doAll(types, dinfo._adaptedFrame.vecs()).outputFrame(Key.make("OneHot"+Key.make().toString()), dinfo.coefNames(), null);
}
dinfo.dropInteractions();
dinfo.remove();
return res;
}
public GramV3 computeGram(int v, GramV3 input){
if(DKV.get(input.X.key()) == null)
throw new IllegalArgumentException("Frame " + input.X.key() + " does not exist.");
Frame fr = input.X.key().get();
Frame frcpy = new Frame(fr._names.clone(),fr.vecs().clone());
String wname = null;
Vec weight = null;
if(input.W != null && !input.W.column_name.isEmpty()) {
wname = input.W.column_name;
if(fr.find(wname) == -1) throw new IllegalArgumentException("Did not find weight vector " + wname);
weight = frcpy.remove(wname);
}
DataInfo dinfo = new DataInfo(frcpy,null,0,input.use_all_factor_levels,input.standardize?TransformType.STANDARDIZE:TransformType.NONE,TransformType.NONE,input.skip_missing,false,!input.skip_missing,/* weight */ false, /* offset */ false, /* fold */ false, /* intercept */ true);
DKV.put(dinfo);
if(weight != null)dinfo.setWeights(wname,weight);
Gram.GramTask gt = new Gram.GramTask(null,dinfo,false,true).doAll(dinfo._adaptedFrame);
double [][] gram = gt._gram.getXX();
dinfo.remove();
String [] names = water.util.ArrayUtils.append(dinfo.coefNames(),"Intercept");
Vec [] vecs = new Vec[gram.length];
Key[] keys = new VectorGroup().addVecs(vecs.length);
for(int i = 0; i < vecs.length; ++i)
vecs[i] = Vec.makeVec(gram[i],keys[i]);
input.destination_frame = new KeyV3.FrameKeyV3();
String keyname = input.X.key().toString();
if(keyname.endsWith(".hex"))
keyname = keyname.substring(0,keyname.lastIndexOf("."));
keyname = keyname + "_gram";
if(weight != null)
keyname = keyname + "_" + wname;
Key k = Key.make(keyname);
if(DKV.get(k) != null){
int cnt = 0;
while(cnt < 1000 && DKV.get(k = Key.make(keyname + "_" + cnt)) != null)cnt++;
if(cnt == 1000) throw new IllegalArgumentException("unable to make unique key");
}
input.destination_frame.fillFromImpl(k);
DKV.put(new Frame(k, names,vecs));
return input;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/api/RegisterAlgos.java
|
package hex.api;
import hex.ModelBuilder;
import hex.anovaglm.ANOVAGLM;
import hex.psvm.PSVM;
import hex.tree.TreeHandler;
import water.api.AlgoAbstractRegister;
import water.api.RestApiContext;
import water.api.SchemaServer;
public class RegisterAlgos extends AlgoAbstractRegister {
@Override
public void registerEndPoints(RestApiContext context) {
// List of algorithms
ModelBuilder[] algos = new ModelBuilder[]{
new hex.deeplearning.DeepLearning(true),
new hex.glm .GLM (true),
new hex.glrm .GLRM (true),
new hex.kmeans .KMeans (true),
new hex.naivebayes .NaiveBayes (true),
new hex.pca .PCA (true),
new hex.svd .SVD (true),
new hex.tree.drf .DRF (true),
new hex.tree.gbm .GBM (true),
new hex.tree.isofor .IsolationForest(true),
new hex.tree.isoforextended.ExtendedIsolationForest(true),
new hex.aggregator .Aggregator (true),
new hex.word2vec .Word2Vec (true),
new hex.ensemble .StackedEnsemble(true),
new hex.coxph .CoxPH (true),
new hex.generic .Generic (true),
new hex.gam .GAM (true),
new ANOVAGLM(true),
new PSVM(true),
new hex.rulefit .RuleFit (true),
new hex.tree.uplift.UpliftDRF (true),
new hex.modelselection.ModelSelection (true),
new hex.isotonic .IsotonicRegression(true),
new hex.tree.dt .DT (true),
new hex.hglm .HGLM (true),
new hex.adaboost. AdaBoost (true)
};
// "Word2Vec", "Example", "Grep"
for (ModelBuilder algo : algos) {
String base = algo.getClass().getSimpleName();
int version = SchemaServer.getStableVersion();
if ( base.equals("SVD") ||
base.equals("Aggregator") ||
base.equals("StackedEnsemble")) {
version = SchemaServer.getExperimentalVersion();
}
registerModelBuilder(context, algo, version);
}
context.registerEndpoint("make_glm_model", "POST /3/MakeGLMModel",
MakeGLMModelHandler.class, "make_model",
"Make a new GLM model based on existing one");
context.registerEndpoint("glm_regularization_path","GET /3/GetGLMRegPath", MakeGLMModelHandler.class, "extractRegularizationPath",
"Get full regularization path");
context.registerEndpoint("weighted_gram_matrix", "GET /3/ComputeGram", MakeGLMModelHandler.class, "computeGram",
"Get weighted gram matrix");
context.registerEndpoint("word2vec_synonyms", "GET /3/Word2VecSynonyms", Word2VecHandler.class, "findSynonyms",
"Find synonyms using a word2vec model");
context.registerEndpoint("word2vec_transform", "GET /3/Word2VecTransform", Word2VecHandler.class, "transform",
"Transform words to vectors using a word2vec model");
context.registerEndpoint("glm_datainfo_frame", "POST /3/DataInfoFrame",MakeGLMModelHandler.class, "getDataInfoFrame",
"Test only" );
context.registerEndpoint("get_tree", "GET /3/Tree", TreeHandler.class, "getTree", "Obtain a traverseable representation of a specific tree");
}
@Override
public String getName() {
return "Algos";
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/api/Word2VecHandler.java
|
package hex.api;
import hex.schemas.Word2VecSynonymsV3;
import hex.schemas.Word2VecTransformV3;
import hex.word2vec.Word2VecModel;
import water.DKV;
import water.api.Handler;
import water.api.schemas3.KeyV3;
import water.fvec.Frame;
import water.util.Log;
import java.util.*;
public class Word2VecHandler extends Handler {
public Word2VecSynonymsV3 findSynonyms(int version, Word2VecSynonymsV3 args) {
Word2VecModel model = DKV.getGet(args.model.key());
if (model == null)
throw new IllegalArgumentException("missing source model " + args.model);
Map<String, Float> synonyms = model.findSynonyms(args.word, args.count);
List<Map.Entry<String, Float>> result = new ArrayList<>(synonyms.entrySet());
Collections.sort(result, new Comparator<Map.Entry<String, Float>>() {
@Override
public int compare(Map.Entry<String, Float> o1, Map.Entry<String, Float> o2) {
return o2.getValue().compareTo(o1.getValue()); // reverse sort
}
});
args.synonyms = new String[result.size()];
args.scores = new double[result.size()];
if(result.size() > 0) {
int i = 0;
for (Map.Entry<String, Float> entry : result) {
args.synonyms[i] = entry.getKey();
args.scores[i] = entry.getValue();
i++;
}
}
if (result.size() < args.count) {
Log.warn(String.format("The result number of synonyms (%d) is less than the 'count' parameter (%d).", args.synonyms.length, args.count));
}
return args;
}
public Word2VecTransformV3 transform(int version, Word2VecTransformV3 args) {
Word2VecModel model = DKV.getGet(args.model.key());
if (model == null)
throw new IllegalArgumentException("missing source model " + args.model);
Frame words = DKV.getGet(args.words_frame.key());
if (words == null)
throw new IllegalArgumentException("missing words frame " + args.words_frame);
if (words.numCols() != 1) {
throw new IllegalArgumentException("words frame is expected to have a single string column, got" + words.numCols());
}
if (args.aggregate_method == null)
args.aggregate_method = Word2VecModel.AggregateMethod.NONE;
Frame vectors = model.transform(words.vec(0), args.aggregate_method);
args.vectors_frame = new KeyV3.FrameKeyV3(vectors._key);
return args;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/coxph/CPHBaseTask.java
|
package hex.coxph;
import hex.DataInfo;
import water.DKV;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
abstract class CPHBaseTask<T extends CPHBaseTask<T>> extends MRTask<T> {
private Key<DataInfo> _dinfoKey;
protected transient DataInfo _dinfo;
CPHBaseTask(DataInfo dinfo) {
_dinfoKey = dinfo._key;
}
@Override
public void map(Chunk[] cs) {
chunkInit();
DataInfo.Row row = _dinfo.newDenseRow();
for (int r = 0; r < cs[0]._len; r++) {
row = _dinfo.extractDenseRow(cs, r, row);
if (row.isBad() || row.weight == 0)
continue;
processRow(row);
}
}
abstract protected void processRow(DataInfo.Row row);
@Override
protected void setupLocal(){
_dinfo = DKV.get(_dinfoKey).get();
}
protected void chunkInit() {}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/coxph/CoxPH.java
|
package hex.coxph;
import Jama.Matrix;
import hex.*;
import hex.DataInfo.Row;
import hex.DataInfo.TransformType;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.ast.prims.mungers.AstGroup;
import water.util.*;
import water.util.Timer;
import static java.util.stream.Collectors.toList;
import static water.util.ArrayUtils.constAry;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* Cox Proportional Hazards Model
*/
public class CoxPH extends ModelBuilder<CoxPHModel,CoxPHModel.CoxPHParameters,CoxPHModel.CoxPHOutput> {
private static final int MAX_TIME_BINS = 100000;
@Override public ModelCategory[] can_build() { return new ModelCategory[] { ModelCategory.CoxPH }; }
@Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Stable; }
@Override public boolean isSupervised() { return true; }
public CoxPH(boolean startup_once) {
super(new CoxPHModel.CoxPHParameters(), startup_once);
}
public CoxPH( CoxPHModel.CoxPHParameters parms ) { super(parms); init(false); }
@Override protected CoxPHDriver trainModelImpl() { return new CoxPHDriver(); }
@Override
public boolean haveMojo() {
return true;
}
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made
* by the front-end whenever the GUI is clicked, and needs to be fast;
* heavy-weight prep needs to wait for the trainModel() call.
*/
@Override public void init(boolean expensive) {
super.init(expensive);
if (_parms._train != null && _parms.train() == null) {
error("train", "Invalid training frame (Frame key = " + _parms._train + " not found)");
}
if (_parms._train != null && _parms.train() != null) {
if (_parms._start_column != null) {
Vec startVec = _parms.startVec();
if (startVec == null) {
error("start_column", "start_column " + _parms._start_column + " not found in the training frame");
} else if (!startVec.isNumeric()) {
error("start_column", "start time must be undefined or of type numeric");
}
}
if (_parms._stop_column != null) {
Vec stopVec = _parms.stopVec();
if (stopVec == null) {
error("stop_column", "stop_column " + _parms._stop_column + " not found in the training frame");
} else if (!stopVec.isNumeric()) {
error("stop_column", "stop time must be of type numeric");
} else if (expensive) {
try {
CollectTimes.collect(_parms.stopVec(), _parms._single_node_mode);
} catch (CollectTimesException e) {
error("stop_column", e.getMessage());
}
}
}
if ((_parms._response_column != null) && ! _response.isInt() && (! _response.isCategorical()))
error("response_column", "response/event column must be of type integer or factor");
if (_parms.startVec() != null && _parms.stopVec() != null) {
if (_parms.startVec().min() >= _parms.stopVec().max())
error("start_column", "start times must be strictly less than stop times");
}
if (_parms._interactions != null) {
for (String col : _parms._interactions) {
if (col != null && !col.isEmpty() && _train.vec(col) == null) {
error("interactions", col + " not found in the training frame");
}
}
}
if (_parms._interactions_only != null) {
for (String col : _parms._interactions_only) {
if (col != null && !col.isEmpty() && _train.vec(col) == null) {
error("interactions_only", col + " not found in the training frame");
}
}
}
if (_parms._interaction_pairs != null) {
for (StringPair pair : _parms._interaction_pairs) {
if (pair._a != null && !pair._a.isEmpty() && _train.vec(pair._a) == null) {
error("interaction_pairs", pair._a + " not found in the training frame with columns"
+ Arrays.toString(_train.names()));
}
if (pair._b != null && !pair._b.isEmpty() && _train.vec(pair._b) == null) {
error("interaction_pairs", pair._b + " not found in the training frame with columns"
+ Arrays.toString(_train.names()));
}
}
}
if( _train != null ) {
int nonFeatureColCount = (_parms._start_column!=null?1:0) + (_parms._stop_column!=null?1:0);
if (_train.numCols() < (2 + nonFeatureColCount))
error("_train", "Training data must have at least 2 features (incl. response).");
if (null != _parms._stratify_by) {
int stratifyColCount = _parms._stratify_by.length;
if (_train.numCols() < (2 + nonFeatureColCount + stratifyColCount))
error("_train", "Training data must have at least 1 feature that is not a response and is not used for stratification.");
}
}
if (_parms.isStratified()) {
for (String col : _parms._stratify_by) {
Vec v = _parms.train().vec(col);
if (v == null) {
error("stratify_by", "column '" + col + "' not found");
} else if (v.get_type() != Vec.T_CAT) {
error("stratify_by", "non-categorical column '" + col + "' cannot be used for stratification");
}
if (_parms._interactions != null) {
for (String inter : _parms._interactions) {
if (col.equals(inter)) {
// Makes implementation simpler and should not have an actual impact anyway
error("stratify_by", "stratification column '" + col + "' cannot be used in an implicit interaction. " +
"Use explicit (pair-wise) interactions instead");
break;
}
}
}
}
}
}
if (Double.isNaN(_parms._lre_min) || _parms._lre_min <= 0)
error("lre_min", "lre_min must be a positive number");
if (_parms._max_iterations < 1)
error("max_iterations", "max_iterations must be a positive integer");
}
@Override
protected int init_getNClass() {
return 1;
}
static class DiscretizeTimeTask extends MRTask<DiscretizeTimeTask> {
final double[] _time;
final boolean _has_start_column;
private DiscretizeTimeTask(double[] time, boolean has_start_column) {
_time = time;
_has_start_column = has_start_column;
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
assert cs.length == (_has_start_column ? 2 : 1);
for (int i = 0; i < cs[0].len(); i++)
discretizeTime(i, cs, ncs, 0);
}
void discretizeTime(int i, Chunk[] cs, NewChunk[] ncs, int offset) {
final double stopTime = cs[cs.length - 1].atd(i);
final int t2 = Arrays.binarySearch(_time, stopTime);
if (t2 < 0)
throw new IllegalStateException("Encountered unexpected stop time");
ncs[ncs.length - 1].addNum(t2 + offset);
if (_has_start_column) {
final double startTime = cs[0].atd(i);
if (startTime >= stopTime)
throw new IllegalArgumentException("start times must be strictly less than stop times");
final int t1c = Arrays.binarySearch(_time, startTime);
final int t1 = t1c >= 0 ? t1c + 1 : -t1c - 1;
ncs[0].addNum(t1 + offset);
}
}
static Frame discretizeTime(double[] time, Vec startVec, Vec stopVec, boolean runLocal) {
final boolean hasStartColumn = startVec != null;
final Frame f = new Frame();
if (hasStartColumn)
f.add("__startCol", startVec);
f.add("__stopCol", stopVec);
byte[] outputTypes = hasStartColumn ? new byte[]{Vec.T_NUM, Vec.T_NUM} : new byte[]{Vec.T_NUM};
return new DiscretizeTimeTask(time, startVec != null)
.doAll(outputTypes, f, runLocal).outputFrame();
}
}
static class StrataTask extends DiscretizeTimeTask {
private final IcedHashMap<AstGroup.G, IcedInt> _strataMap;
private StrataTask(IcedHashMap<AstGroup.G, IcedInt> strata) {
this(strata, new double[0], false);
}
private StrataTask(IcedHashMap<AstGroup.G, IcedInt> strata, double[] time, boolean has_start_column) {
super(time, has_start_column);
_strataMap = strata;
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
Chunk[] scs; // strata chunks
Chunk[] tcs; // time chunks
NewChunk[] tncs; // time new chunks
if (ncs.length > 1) {
// split chunks into 2 groups: strata chunks and time chunks
scs = new Chunk[cs.length - ncs.length + 1];
System.arraycopy(cs, 0, scs, 0, scs.length);
tcs = new Chunk[ncs.length - 1];
System.arraycopy(cs, scs.length, tcs, 0, tcs.length);
tncs = new NewChunk[ncs.length - 1];
System.arraycopy(ncs, 1, tncs, 0, tncs.length);
} else {
scs = cs;
tcs = null;
tncs = null;
}
AstGroup.G g = new AstGroup.G(scs.length, null);
for (int i = 0; i < cs[0].len(); i++) {
g.fill(i, scs);
IcedInt strataId = _strataMap.get(g);
if (strataId == null) {
for (NewChunk nc : ncs)
nc.addNA();
} else {
ncs[0].addNum(strataId._val);
if (tcs != null) {
final int strataOffset = _time.length * strataId._val;
discretizeTime(i, tcs, tncs, strataOffset);
}
}
}
}
static Vec makeStrataVec(Frame f, String[] stratifyBy, IcedHashMap<AstGroup.G, IcedInt> mapping, boolean runLocal) {
final Frame sf = f.subframe(stratifyBy);
return new StrataTask(mapping).doAll(new byte[]{Vec.T_NUM}, sf, runLocal).outputFrame().anyVec();
}
static Frame stratifyTime(Frame f, double[] time, String[] stratifyBy, IcedHashMap<AstGroup.G, IcedInt> mapping,
Vec startVec, Vec stopVec, boolean runLocal) {
final Frame sf = f.subframe(stratifyBy);
final boolean hasStartColumn = startVec != null;
if (hasStartColumn)
sf.add("__startVec", startVec);
sf.add("__stopVec", stopVec);
return new StrataTask(mapping, time, hasStartColumn)
.doAll(constAry(hasStartColumn ? 3 : 2, Vec.T_NUM), sf, runLocal).outputFrame();
}
static void setupStrataMapping(Frame f, String[] stratifyBy, IcedHashMap<AstGroup.G, IcedInt> outMapping) {
final Frame sf = f.subframe(stratifyBy);
int[] idxs = MemoryManager.malloc4(stratifyBy.length);
for (int i = 0; i < idxs.length; i++)
idxs[i] = i;
Collection<AstGroup.G> groups = AstGroup.doGroups(sf, idxs, AstGroup.aggNRows());
groups: for (AstGroup.G g : groups) {
for (double val : g._gs)
if (Double.isNaN(val))
continue groups;
outMapping.put(g, new IcedInt(outMapping.size()));
}
}
}
public boolean hasStartColumn() {
return _parms._start_column != null;
}
@Override
protected boolean validateBinaryResponse() {
return false; // CoxPH can handle numerical 0-1 response, no warnings needed
}
public class CoxPHDriver extends Driver {
private Frame reorderTrainFrameColumns(IcedHashMap<AstGroup.G, IcedInt> outStrataMap, double time[]) {
Frame f = new Frame();
Vec weightVec = null;
Vec startVec = null;
Vec stopVec = null;
Vec eventVec = null;
Vec[] vecs = train().vecs();
String[] names = train().names();
for (int i = 0; i < names.length; i++) {
if (names[i].equals(_parms._weights_column))
weightVec = vecs[i];
else if (names[i].equals(_parms._start_column))
startVec = vecs[i];
else if (names[i].equals(_parms._stop_column))
stopVec = vecs[i];
else if (names[i].equals(_parms._response_column))
eventVec = vecs[i];
else
f.add(names[i], vecs[i]);
}
Vec strataVec = null;
Frame discretizedFr;
if (_parms.isStratified()) {
StrataTask.setupStrataMapping(f, _parms._stratify_by, outStrataMap);
discretizedFr = Scope.track(
StrataTask.stratifyTime(f, time, _parms._stratify_by, outStrataMap, startVec, stopVec, _parms._single_node_mode)
);
strataVec = discretizedFr.remove(0);
if (_parms.interactionSpec() == null) {
// no interactions => we can drop the columns earlier
f.remove(_parms._stratify_by);
}
} else {
discretizedFr = Scope.track(DiscretizeTimeTask.discretizeTime(time, startVec, stopVec, _parms._single_node_mode));
}
// swap time columns for their discretized versions
if (startVec != null) {
startVec = discretizedFr.vec(0);
stopVec = discretizedFr.vec(1);
} else
stopVec = discretizedFr.vec(0);
if (weightVec != null)
f.add(_parms._weights_column, weightVec);
if (strataVec != null)
f.add(_parms._strata_column, strataVec);
if (startVec != null)
f.add(_parms._start_column, startVec);
if (stopVec != null)
f.add(_parms._stop_column, stopVec);
if (eventVec != null)
f.add(_parms._response_column, eventVec);
return f;
}
protected void initStats(final CoxPHModel model, final DataInfo dinfo, final double[] time) {
CoxPHModel.CoxPHParameters p = model._parms;
CoxPHModel.CoxPHOutput o = model._output;
o._n = p.stopVec().length();
o.data_info = dinfo;
final int n_offsets = _offset == null ? 0 : 1;
final int n_coef = o.data_info.fullN() - n_offsets;
final String[] coefNames = o.data_info.coefNames();
o._coef_names = new String[n_coef];
System.arraycopy(coefNames, 0, o._coef_names, 0, n_coef);
o._coef = MemoryManager.malloc8d(n_coef);
o._exp_coef = MemoryManager.malloc8d(n_coef);
o._exp_neg_coef = MemoryManager.malloc8d(n_coef);
o._se_coef = MemoryManager.malloc8d(n_coef);
o._z_coef = MemoryManager.malloc8d(n_coef);
o._var_coef = MemoryManager.malloc8d(n_coef, n_coef);
o._mean_offset = MemoryManager.malloc8d(n_offsets);
o._offset_names = new String[n_offsets];
System.arraycopy(coefNames, n_coef, o._offset_names, 0, n_offsets);
final int n_time = (int) dinfo._adaptedFrame.vec(p._stop_column).max() + 1;
o._time = time;
o._n_risk = MemoryManager.malloc8d(n_time);
o._n_event = MemoryManager.malloc8d(n_time);
o._n_censor = MemoryManager.malloc8d(n_time);
}
protected void calcCounts(CoxPHModel model, final CoxPHTask coxMR) {
CoxPHModel.CoxPHParameters p = model._parms;
CoxPHModel.CoxPHOutput o = model._output;
o._n_missing = o._n - coxMR.n;
o._n = coxMR.n;
o._x_mean_cat = MemoryManager.malloc8d(coxMR.sumWeights.length, o.data_info.numCats());
o._x_mean_num = MemoryManager.malloc8d(coxMR.sumWeights.length, o.data_info.numNums() - o._mean_offset.length);
for (int s = 0; s < coxMR.sumWeights.length; s++) {
System.arraycopy(coxMR.sumWeightedCatX[s], 0, o._x_mean_cat[s], 0, o._x_mean_cat[s].length);
for (int j = 0; j < o._x_mean_cat[s].length; j++)
o._x_mean_cat[s][j] /= coxMR.sumWeights[s];
System.arraycopy(coxMR.sumWeightedNumX[s], 0, o._x_mean_num[s], 0, o._x_mean_num[s].length);
for (int j = 0; j < o._x_mean_num[s].length; j++)
o._x_mean_num[s][j] = o.data_info._normSub[j] + o._x_mean_num[s][j] / coxMR.sumWeights[s];
}
System.arraycopy(o.data_info._normSub, o.data_info.numNums() - o._mean_offset.length, o._mean_offset, 0, o._mean_offset.length);
for (int t = 0; t < coxMR.countEvents.length; ++t) {
o._total_event += coxMR.countEvents[t];
if (coxMR.sizeEvents[t] > 0 || coxMR.sizeCensored[t] > 0) {
o._n_risk[t] = coxMR.sizeRiskSet[t];
o._n_event[t] = coxMR.sizeEvents[t];
o._n_censor[t] = coxMR.sizeCensored[t];
}
}
if (p._start_column == null)
for (int t = o._n_risk.length - 2; t >= 0; --t)
o._n_risk[t] += o._n_risk[t + 1];
}
protected ComputationState calcLoglik(DataInfo dinfo, ComputationState cs, CoxPHModel.CoxPHParameters p, CoxPHTask coxMR) {
cs.reset();
switch (p._ties) {
case efron:
return EfronMethod.calcLoglik(dinfo, coxMR, cs, _parms._single_node_mode);
case breslow:
final int n_coef = cs._n_coef;
final int n_time = coxMR.sizeEvents.length;
double newLoglik = 0;
for (int i = 0; i < n_coef; i++)
cs._gradient[i] = coxMR.sumXEvents[i];
for (int t = n_time - 1; t >= 0; --t) {
final double sizeEvents_t = coxMR.sizeEvents[t];
if (sizeEvents_t > 0) {
final double sumLogRiskEvents_t = coxMR.sumLogRiskEvents[t];
final double rcumsumRisk_t = coxMR.rcumsumRisk[t];
newLoglik += sumLogRiskEvents_t;
newLoglik -= sizeEvents_t * Math.log(rcumsumRisk_t);
for (int j = 0; j < n_coef; ++j) {
final double dlogTerm = coxMR.rcumsumXRisk[t][j] / rcumsumRisk_t;
cs._gradient[j] -= sizeEvents_t * dlogTerm;
for (int k = 0; k < n_coef; ++k)
cs._hessian[j][k] -= sizeEvents_t *
(((coxMR.rcumsumXXRisk[t][j][k] / rcumsumRisk_t) -
(dlogTerm * (coxMR.rcumsumXRisk[t][k] / rcumsumRisk_t))));
}
}
}
cs._logLik = newLoglik;
return cs;
default:
throw new IllegalArgumentException("_ties method must be either efron or breslow");
}
}
protected void calcModelStats(CoxPHModel model, final double[] newCoef, final ComputationState cs) {
CoxPHModel.CoxPHParameters p = model._parms;
CoxPHModel.CoxPHOutput o = model._output;
final int n_coef = o._coef.length;
final Matrix inv_hessian = new Matrix(cs._hessian).inverse();
for (int j = 0; j < n_coef; ++j) {
for (int k = 0; k <= j; ++k) {
final double elem = -inv_hessian.get(j, k);
o._var_coef[j][k] = elem;
o._var_coef[k][j] = elem;
}
}
for (int j = 0; j < n_coef; ++j) {
o._coef[j] = newCoef[j];
o._exp_coef[j] = Math.exp(o._coef[j]);
o._exp_neg_coef[j] = Math.exp(- o._coef[j]);
o._se_coef[j] = Math.sqrt(o._var_coef[j][j]);
o._z_coef[j] = o._coef[j] / o._se_coef[j];
}
if (o._iter == 0) {
o._null_loglik = cs._logLik;
o._maxrsq = 1 - Math.exp(2 * o._null_loglik / o._n);
o._score_test = 0;
for (int j = 0; j < n_coef; ++j) {
double sum = 0;
for (int k = 0; k < n_coef; ++k)
sum += o._var_coef[j][k] * cs._gradient[k];
o._score_test += cs._gradient[j] * sum;
}
}
o._loglik = cs._logLik;
o._loglik_test = - 2 * (o._null_loglik - o._loglik);
o._rsq = 1 - Math.exp(- o._loglik_test / o._n);
o._wald_test = 0;
for (int j = 0; j < n_coef; ++j) {
double sum = 0;
for (int k = 0; k < n_coef; ++k)
sum -= cs._hessian[j][k] * (o._coef[k] - p._init);
o._wald_test += (o._coef[j] - p._init) * sum;
}
}
protected void calcCumhaz_0(CoxPHModel model, final CoxPHTask coxMR) {
CoxPHModel.CoxPHParameters p = model._parms;
CoxPHModel.CoxPHOutput o = model._output;
final int n_time = coxMR.sizeEvents.length;
o._cumhaz_0 = MemoryManager.malloc8d(n_time);
o._var_cumhaz_1 = MemoryManager.malloc8d(n_time);
o._var_cumhaz_2 = Key.make(model._key + "_var_cumhaz_2");
o._var_cumhaz_2_matrix = new CoxPHModel.FrameMatrix(o._var_cumhaz_2, n_time, o._coef.length);
final int num_strata = coxMR._num_strata;
o._baseline_hazard = Key.make(model._key + "_baseline_hazard");
o._baseline_hazard_matrix = new CoxPHModel.FrameMatrix(o._baseline_hazard, n_time / num_strata, num_strata + 1);
o._baseline_survival = Key.make(model._key + "_baseline_survival");
o._baseline_survival_matrix = new CoxPHModel.FrameMatrix(o._baseline_survival, coxMR.sizeEvents.length / num_strata, num_strata + 1);
final int n_coef = o._coef.length;
int nz = 0;
switch (p._ties) {
case efron:
for (int t = 0; t < coxMR.sizeEvents.length; ++t) {
final double sizeEvents_t = coxMR.sizeEvents[t];
final double sizeCensored_t = coxMR.sizeCensored[t];
if (sizeEvents_t > 0 || sizeCensored_t > 0) {
final long countEvents_t = coxMR.countEvents[t];
final double sumRiskEvents_t = coxMR.sumRiskEvents[t];
final double rcumsumRisk_t = coxMR.rcumsumRisk[t];
final double avgSize = sizeEvents_t / countEvents_t;
o._cumhaz_0[nz] = 0;
o._var_cumhaz_1[nz] = 0;
for (int j = 0; j < n_coef; ++j)
o._var_cumhaz_2_matrix.set(nz, j, 0);
for (long e = 0; e < countEvents_t; ++e) {
final double frac = ((double) e) / ((double) countEvents_t);
final double haz = 1 / (rcumsumRisk_t - frac * sumRiskEvents_t);
final double haz_sq = haz * haz;
o._cumhaz_0[nz] += avgSize * haz;
o._var_cumhaz_1[nz] += avgSize * haz_sq;
for (int j = 0; j < n_coef; ++j)
o._var_cumhaz_2_matrix.add(nz, j, avgSize * ((coxMR.rcumsumXRisk[t][j] - frac * coxMR.sumXRiskEvents[t][j]) * haz_sq));
}
nz++;
}
}
break;
case breslow:
for (int t = 0; t < coxMR.sizeEvents.length; ++t) {
final double sizeEvents_t = coxMR.sizeEvents[t];
final double sizeCensored_t = coxMR.sizeCensored[t];
if (sizeEvents_t > 0 || sizeCensored_t > 0) {
final double rcumsumRisk_t = coxMR.rcumsumRisk[t];
final double cumhaz_0_nz = sizeEvents_t / rcumsumRisk_t;
o._cumhaz_0[nz] = cumhaz_0_nz;
o._var_cumhaz_1[nz] = sizeEvents_t / (rcumsumRisk_t * rcumsumRisk_t);
for (int j = 0; j < n_coef; ++j)
o._var_cumhaz_2_matrix.set(nz, j, (coxMR.rcumsumXRisk[t][j] / rcumsumRisk_t) * cumhaz_0_nz);
nz++;
}
}
break;
default:
throw new IllegalArgumentException("_ties method must be either efron or breslow");
}
double[] totalRisks = coxMR.totalRisk.clone();
double[] sumHaz = new double[totalRisks.length];
for (int i = sumHaz.length - 1; i >= 0; i--) {
sumHaz[i] = 0d;
}
for (int t = 0; t < coxMR._time.length; ++t) {
o._baseline_hazard_matrix.set(t,0, coxMR._time[t]);
o._baseline_survival_matrix.set(t,0, coxMR._time[t]);
for (int strata = 0; strata < num_strata; strata++) {
final double weightEvent = coxMR.sizeEvents[t + coxMR._time.length * strata];
final double sumRiskEvent = coxMR.sumRiskAllEvents[t + coxMR._time.length * strata];
final double eventRisk = weightEvent / totalRisks[strata];
totalRisks[strata] -= sumRiskEvent;
sumHaz[strata] += eventRisk;
o._baseline_hazard_matrix.set(t, strata + 1, eventRisk);
o._baseline_survival_matrix.set(t, strata + 1, Math.exp(-sumHaz[strata]));
}
}
for (int t = 1; t < o._cumhaz_0.length; ++t) {
o._cumhaz_0[t] = o._cumhaz_0[t - 1] + o._cumhaz_0[t];
o._var_cumhaz_1[t] = o._var_cumhaz_1[t - 1] + o._var_cumhaz_1[t];
for (int j = 0; j < n_coef; ++j)
o._var_cumhaz_2_matrix.set(t, j, o._var_cumhaz_2_matrix.get(t - 1, j) + o._var_cumhaz_2_matrix.get(t, j));
}
// install MatricFrames into DKV
o._var_cumhaz_2_matrix.toFrame(o._var_cumhaz_2);
final Frame baselineHazardAsFrame = o._baseline_hazard_matrix.toFrame(o._baseline_hazard);
final Frame baselineSurvivalAsFrame = o._baseline_survival_matrix.toFrame(o._baseline_survival);
if (null == o._strataMap || 0 == o._strataMap.size()) {
baselineHazardAsFrame.setNames(new String[]{"t", "baseline hazard"});
baselineSurvivalAsFrame.setNames(new String[]{"t", "baseline survival"});
} else {
final Vec[] strataCols = train().vecs(_input_parms._stratify_by);
List<String> names = o._strataMap.entrySet().stream()
.sorted(Comparator.comparingInt(e -> e.getValue()._val))
.map(Map.Entry::getKey)
.map(i -> i._gs)
.map(a -> IntStream.range(0, strataCols.length)
.mapToObj(i -> strataCols[i].factor((int) a[i]))
)
.map(s -> s.collect(Collectors.joining(", ", "(", ")")))
.collect(toList());
names.add(0, "t");
baselineHazardAsFrame.setNames(names.toArray(new String[0]));
baselineSurvivalAsFrame.setNames(names.toArray(new String[0]));
}
}
@Override
public void computeImpl() {
CoxPHModel model = null;
try {
init(true);
final double[] time = CollectTimes.collect(_parms.stopVec(), _parms._single_node_mode);
_job.update(0, "Initializing model training");
IcedHashMap<AstGroup.G, IcedInt> strataMap = new IcedHashMap<>();
Frame f = reorderTrainFrameColumns(strataMap, time);
int nResponses = (_parms.startVec() == null ? 2 : 3) + (_parms.isStratified() ? 1 : 0);
final DataInfo dinfo = new DataInfo(f, null, nResponses, _parms._use_all_factor_levels,
TransformType.DEMEAN, TransformType.NONE, true, false, false,
hasWeightCol(), false, false, _parms.interactionSpec()).disableIntercept();
Scope.track_generic(dinfo);
DKV.put(dinfo);
// The model to be built
CoxPHModel.CoxPHOutput output = new CoxPHModel.CoxPHOutput(CoxPH.this, dinfo._adaptedFrame, train(), strataMap);
model = new CoxPHModel(_result, _parms, output);
model.delete_and_lock(_job);
initStats(model, dinfo, time);
ScoringHistory sc = new ScoringHistory(_parms._max_iterations + 1);
final int n_offsets = (_offset == null) ? 0 : 1;
final int n_coef = dinfo.fullN() - n_offsets;
final double[] step = MemoryManager.malloc8d(n_coef);
final double[] oldCoef = MemoryManager.malloc8d(n_coef);
final double[] newCoef = MemoryManager.malloc8d(n_coef);
Arrays.fill(step, Double.NaN);
Arrays.fill(oldCoef, Double.NaN);
for (int j = 0; j < n_coef; ++j)
newCoef[j] = model._parms._init;
double logLik = -Double.MAX_VALUE;
final boolean has_start_column = (model._parms.startVec() != null);
final boolean has_weights_column = (_weights != null);
final ComputationState cs = new ComputationState(n_coef);
Timer iterTimer = null;
CoxPHTask coxMR = null;
_job.update(1, "Running iteration 0");
for (int i = 0; i <= model._parms._max_iterations; ++i) {
iterTimer = new Timer();
model._output._iter = i;
Timer aggregTimer = new Timer();
coxMR = new CoxPHTask(dinfo, newCoef, time, (long) response().min() /* min event */,
n_offsets, has_start_column, dinfo._adaptedFrame.vec(_parms._strata_column), has_weights_column,
_parms._ties).doAll(dinfo._adaptedFrame, _parms._single_node_mode);
Log.info("CoxPHTask: iter=" + i + ", time=" + aggregTimer.toString());
_job.update(1);
Timer loglikTimer = new Timer();
final double newLoglik = calcLoglik(dinfo, cs, _parms, coxMR)._logLik;
Log.info("LogLik: iter=" + i + ", time=" + loglikTimer.toString() + ", logLik=" + newLoglik);
model._output._scoring_history = sc.addIterationScore(i, newLoglik).to2dTable(i+1);
if (newLoglik > logLik) {
if (i == 0)
calcCounts(model, coxMR);
calcModelStats(model, newCoef, cs);
if (newLoglik == 0)
model._output._lre = -Math.log10(Math.abs(logLik - newLoglik));
else
model._output._lre = -Math.log10(Math.abs((logLik - newLoglik) / newLoglik));
if (model._output._lre >= model._parms._lre_min)
break;
Arrays.fill(step, 0);
for (int j = 0; j < n_coef; ++j)
for (int k = 0; k < n_coef; ++k)
step[j] -= model._output._var_coef[j][k] * cs._gradient[k];
for (int j = 0; j < n_coef; ++j)
if (Double.isNaN(step[j]) || Double.isInfinite(step[j]))
break;
logLik = newLoglik;
System.arraycopy(newCoef, 0, oldCoef, 0, oldCoef.length);
} else {
for (int j = 0; j < n_coef; ++j)
step[j] /= 2;
}
for (int j = 0; j < n_coef; ++j)
newCoef[j] = oldCoef[j] - step[j];
model.update(_job);
_job.update(1, "Iteration = " + i + "/" + model._parms._max_iterations + ", logLik = " + logLik);
if (i != model._parms._max_iterations)
Log.info("CoxPH Iteration: iter=" + i + ", " + iterTimer.toString());
}
if (_parms._calc_cumhaz && coxMR != null) {
calcCumhaz_0(model, coxMR);
}
if (iterTimer != null) {
Log.info("CoxPH Last Iteration: " + iterTimer.toString());
}
final boolean _skip_scoring = H2O.getSysBoolProperty("debug.skipScoring", false);
if (!_skip_scoring) {
model.update(_job);
model.score(_parms.train()).delete();
model._output._training_metrics = ModelMetrics.getFromDKV(model, _parms.train());
model._output._concordance = ((ModelMetricsRegressionCoxPH) model._output._training_metrics).concordance();
}
model._output._model_summary = generateSummary(model._output);
Log.info(model._output._model_summary);
model.update(_job);
} finally {
if (model != null) model.unlock(_job);
}
}
}
private TwoDimTable generateSummary(CoxPHModel.CoxPHOutput output) {
String[] names = new String[]{"Formula", "Likelihood ratio test", "Concordance", "Number of Observations", "Number of Events"};
String[] types = new String[]{"string", "double", "double", "long", "long"};
String[] formats = new String[]{"%s", "%.5f", "%.5f", "%d", "%d"};
TwoDimTable summary = new TwoDimTable("CoxPH Model", "summary", new String[]{""}, names, types, formats, "");
summary.set(0, 0, output._formula);
summary.set(0, 1, output._loglik_test);
summary.set(0, 2, output._concordance);
summary.set(0, 3, output._n);
summary.set(0, 4, output._total_event);
return summary;
}
protected static class CoxPHTask extends CPHBaseTask<CoxPHTask> {
final double[] _beta;
final double[] _time;
final int _n_offsets;
final boolean _has_start_column;
final boolean _has_strata_column;
final boolean _has_weights_column;
final long _min_event;
final int _num_strata; // = 1 if the model is not stratified
final boolean _isBreslow;
// OUT
long n;
double[] sumWeights;
double[][] sumWeightedCatX;
double[][] sumWeightedNumX;
double[] sizeRiskSet;
double[] sizeCensored;
double[] sizeEvents;
long[] countEvents;
double[] sumXEvents;
double[] sumRiskEvents;
double[] sumRiskAllEvents;
double[][] sumXRiskEvents;
double[] sumLogRiskEvents;
double[] rcumsumRisk;
double[][] rcumsumXRisk;
double[] totalRisk;
// Breslow only
double[][][] rcumsumXXRisk;
CoxPHTask(DataInfo dinfo, final double[] beta, final double[] time, final long min_event,
final int n_offsets, final boolean has_start_column, Vec strata_column, final boolean has_weights_column,
final CoxPHModel.CoxPHParameters.CoxPHTies ties) {
super(dinfo);
_beta = beta;
_time = time;
_min_event = min_event;
_n_offsets = n_offsets;
_has_start_column = has_start_column;
_has_strata_column = strata_column != null;
_has_weights_column = has_weights_column;
_num_strata = _has_strata_column ? 1 + (int) strata_column.max() : 1;
_isBreslow = CoxPHModel.CoxPHParameters.CoxPHTies.breslow.equals(ties);
}
@Override
protected void chunkInit(){
final int n_time = _time.length * _num_strata;
final int n_coef = _beta.length;
sumWeights = MemoryManager.malloc8d(_num_strata);
sumWeightedCatX = MemoryManager.malloc8d(_num_strata, _dinfo.numCats());
sumWeightedNumX = MemoryManager.malloc8d(_num_strata, _dinfo.numNums());
sizeRiskSet = MemoryManager.malloc8d(n_time);
sizeCensored = MemoryManager.malloc8d(n_time);
sizeEvents = MemoryManager.malloc8d(n_time);
countEvents = MemoryManager.malloc8(n_time);
sumRiskEvents = MemoryManager.malloc8d(n_time);
sumRiskAllEvents = MemoryManager.malloc8d(n_time);
sumLogRiskEvents = MemoryManager.malloc8d(n_time);
rcumsumRisk = MemoryManager.malloc8d(n_time);
sumXEvents = MemoryManager.malloc8d(n_coef);
sumXRiskEvents = MemoryManager.malloc8d(n_time, n_coef);
rcumsumXRisk = MemoryManager.malloc8d(n_time, n_coef);
totalRisk = MemoryManager.malloc8d(_num_strata);
if (_isBreslow) { // Breslow only
rcumsumXXRisk = MemoryManager.malloc8d(n_time, n_coef, n_coef);
}
}
@Override
protected void processRow(Row row) {
n++;
double [] response = row.response;
int ncats = row.nBins;
int [] cats = row.binIds;
double [] nums = row.numVals;
final double weight = _has_weights_column ? row.weight : 1.0;
if (weight <= 0) {
throw new IllegalArgumentException("weights must be positive values");
}
int respIdx = response.length - 1;
final long event = (long) (response[respIdx--] - _min_event);
final int t2 = (int) response[respIdx--];
final int t1 = _has_start_column ? (int) response[respIdx--] : -1;
final double strata = _has_strata_column ? response[respIdx--] : 0;
assert respIdx == -1 : "expected to use all response data";
if (Double.isNaN(strata))
return; // skip this row
final int strataId = (int) strata;
final int numStart = _dinfo.numStart();
sumWeights[strataId] += weight;
for (int j = 0; j < ncats; ++j) {
sumWeightedCatX[strataId][cats[j]] += weight;
}
for (int j = 0; j < nums.length; ++j) {
sumWeightedNumX[strataId][j] += weight * nums[j];
}
double logRisk = 0;
for (int j = 0; j < ncats; ++j)
logRisk += _beta[cats[j]];
for (int j = 0; j < nums.length - _n_offsets; ++j)
logRisk += nums[j] * _beta[numStart + j];
for (int j = nums.length - _n_offsets; j < nums.length; ++j)
logRisk += nums[j];
final double risk = weight * Math.exp(logRisk);
logRisk *= weight;
totalRisk[strataId] += risk;
sumRiskAllEvents[t2] += risk;
if (event > 0) {
countEvents[t2]++;
sizeEvents[t2] += weight;
sumLogRiskEvents[t2] += logRisk;
sumRiskEvents[t2] += risk;
} else
sizeCensored[t2] += weight;
if (_has_start_column) {
for (int t = t1; t <= t2; ++t)
sizeRiskSet[t] += weight;
for (int t = t1; t <= t2; ++t)
rcumsumRisk[t] += risk;
} else {
sizeRiskSet[t2] += weight;
rcumsumRisk[t2] += risk;
}
final int ntotal = ncats + (nums.length - _n_offsets);
final int numStartIter = numStart - ncats;
for (int jit = 0; jit < ntotal; ++jit) {
final boolean jIsCat = jit < ncats;
final int j = jIsCat ? cats[jit] : numStartIter + jit;
final double x1 = jIsCat ? 1.0 : nums[jit - ncats];
final double xRisk = x1 * risk;
if (event > 0) {
sumXEvents[j] += weight * x1;
sumXRiskEvents[t2][j] += xRisk;
}
rcumsumXRisk[t2][j] += xRisk;
if (_has_start_column && (t1 % _time.length > 0)) {
rcumsumXRisk[t1 - 1][j] -= xRisk;
}
if (_isBreslow) { // Breslow only
for (int kit = 0; kit < ntotal; ++kit) {
final boolean kIsCat = kit < ncats;
final int k = kIsCat ? cats[kit] : numStartIter + kit;
final double x2 = kIsCat ? 1.0 : nums[kit - ncats];
final double xxRisk = x2 * xRisk;
if (_has_start_column) {
for (int t = t1; t <= t2; ++t)
rcumsumXXRisk[t][j][k] += xxRisk;
} else {
rcumsumXXRisk[t2][j][k] += xxRisk;
}
}
}
}
}
@Override
public void reduce(CoxPHTask that) {
n += that.n;
ArrayUtils.add(sumWeights, that.sumWeights);
ArrayUtils.add(sumWeightedCatX, that.sumWeightedCatX);
ArrayUtils.add(sumWeightedNumX, that.sumWeightedNumX);
ArrayUtils.add(sizeRiskSet, that.sizeRiskSet);
ArrayUtils.add(sizeCensored, that.sizeCensored);
ArrayUtils.add(sizeEvents, that.sizeEvents);
ArrayUtils.add(countEvents, that.countEvents);
ArrayUtils.add(sumXEvents, that.sumXEvents);
ArrayUtils.add(sumRiskEvents, that.sumRiskEvents);
ArrayUtils.add(sumRiskAllEvents, that.sumRiskAllEvents);
ArrayUtils.add(sumXRiskEvents, that.sumXRiskEvents);
ArrayUtils.add(sumLogRiskEvents, that.sumLogRiskEvents);
ArrayUtils.add(rcumsumRisk, that.rcumsumRisk);
ArrayUtils.add(rcumsumXRisk, that.rcumsumXRisk);
ArrayUtils.add(totalRisk, that.totalRisk);
if (_isBreslow) { // Breslow only
ArrayUtils.add(rcumsumXXRisk, that.rcumsumXXRisk);
}
}
@Override
protected void postGlobal() {
for (int t = rcumsumXRisk.length - 2; t >= 0; --t)
for (int j = 0; j < rcumsumXRisk[t].length; ++j)
rcumsumXRisk[t][j] += ((t + 1) % _time.length) == 0 ? 0 : rcumsumXRisk[t + 1][j];
if (! _has_start_column) {
for (int t = rcumsumRisk.length - 2; t >= 0; --t)
rcumsumRisk[t] += ((t + 1) % _time.length) == 0 ? 0 : rcumsumRisk[t + 1];
if (_isBreslow) { // Breslow only
for (int t = rcumsumXXRisk.length - 2; t >= 0; --t)
for (int j = 0; j < rcumsumXXRisk[t].length; ++j)
for (int k = 0; k < rcumsumXXRisk[t][j].length; ++k)
rcumsumXXRisk[t][j][k] += ((t + 1) % _time.length) == 0 ? 0 : rcumsumXXRisk[t + 1][j][k];
}
}
}
}
private static class CollectTimes extends VecUtils.CollectDoubleDomain {
private CollectTimes() {
super(new double[0], MAX_TIME_BINS);
}
static double[] collect(Vec timeVec, boolean runLocal) {
return new CollectTimes().doAll(timeVec, runLocal).domain();
}
@Override
protected void onMaxDomainExceeded(int maxDomainSize, int currentSize) {
throw new CollectTimesException("number of distinct stop times is at least " + currentSize + "; maximum number allowed is " + maxDomainSize);
}
}
private static class CollectTimesException extends RuntimeException {
private CollectTimesException(String message) {
super(message);
}
}
static class ComputationState {
final int _n_coef;
double _logLik;
double[] _gradient;
double[][] _hessian;
ComputationState(int n_coef) {
_n_coef = n_coef;
_logLik = 0;
_gradient = MemoryManager.malloc8d(n_coef);
_hessian = MemoryManager.malloc8d(n_coef, n_coef);
}
void reset() {
_logLik = 0;
for (int j = 0; j < _n_coef; ++j)
_gradient[j] = 0;
for (int j = 0; j < _n_coef; ++j)
for (int k = 0; k < _n_coef; ++k)
_hessian[j][k] = 0;
}
}
private static class ScoringHistory {
private long[]_scoringTimes;
private double[] _logLiks;
public ScoringHistory(int iterCnt) {
_scoringTimes = new long[iterCnt];
_logLiks = new double[iterCnt];
}
public ScoringHistory addIterationScore(int iter, double logLik) {
_scoringTimes[iter] = System.currentTimeMillis();
_logLiks[iter] = logLik;
return this;
}
public TwoDimTable to2dTable(int iterCnt) {
String[] cnames = new String[]{"timestamp", "duration", "iterations", "logLik"};
String[] ctypes = new String[]{"string", "string", "int", "double"};
String[] cformats = new String[]{"%s", "%s", "%d", "%.5f"};
TwoDimTable res = new TwoDimTable("Scoring History", "", new String[iterCnt], cnames, ctypes, cformats, "");
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
for (int i = 0; i < iterCnt; i++) {
int col = 0;
res.set(i, col++, fmt.print(_scoringTimes[i]));
res.set(i, col++, PrettyPrint.msecs(_scoringTimes[i] - _scoringTimes[0], true));
res.set(i, col++, i);
res.set(i, col++, _logLiks[i]);
}
return res;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/coxph/CoxPHModel.java
|
package hex.coxph;
import hex.*;
import hex.coxph.CoxPHModel.CoxPHOutput;
import hex.coxph.CoxPHModel.CoxPHParameters;
import hex.genmodel.descriptor.ModelDescriptor;
import hex.schemas.CoxPHModelV3;
import water.*;
import water.api.schemas3.ModelSchemaV3;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.ast.prims.mungers.AstGroup;
import water.udf.CFuncRef;
import water.util.ArrayUtils;
import water.util.IcedHashMap;
import water.util.IcedInt;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Stream;
public class CoxPHModel extends Model<CoxPHModel,CoxPHParameters,CoxPHOutput> {
public static class CoxPHParameters extends Model.Parameters {
public String algoName() { return "CoxPH"; }
public String fullName() { return "Cox Proportional Hazards"; }
public String javaName() { return CoxPHModel.class.getName(); }
@Override public long progressUnits() { return ((_max_iterations + 1) * 2) + 1; }
public String _start_column;
public String _stop_column;
final String _strata_column = "__strata";
public String[] _stratify_by;
public enum CoxPHTies { efron, breslow}
public CoxPHTies _ties = CoxPHTies.efron;
public double _init = 0;
public double _lre_min = 9;
public int _max_iterations = 20;
public boolean _use_all_factor_levels;
public String[] _interactions_only;
public String[] _interactions = null;
public StringPair[] _interaction_pairs = null;
public boolean _calc_cumhaz = true; // support survfit
/**
* If true, computation is performed with local jobs.
* {@link MRTask#doAll(Vec, boolean)} and other overloaded variants are during the computation called with runLocal
* set as true.
*
* Thus setting effects the main CoxPH computation only. Model metrics computation doesn't honour this setting -
* {@link ModelMetricsRegressionCoxPH#concordance()} computation ignores it.
*/
public boolean _single_node_mode = false;
String[] responseCols() {
String[] cols = _start_column != null ? new String[]{_start_column} : new String[0];
if (isStratified())
cols = ArrayUtils.append(cols, _start_column);
return ArrayUtils.append(cols, _stop_column, _response_column);
}
Vec startVec() { return train().vec(_start_column); }
Vec stopVec() { return train().vec(_stop_column); }
InteractionSpec interactionSpec() {
// add "stratify by" columns to "interaction only"
final String[] interOnly;
if (getInteractionsOnly() != null && _stratify_by != null) {
String[] io = getInteractionsOnly().clone();
Arrays.sort(io);
String[] sb = _stratify_by.clone();
Arrays.sort(sb);
interOnly = ArrayUtils.union(io, sb, true);
} else {
interOnly = getInteractionsOnly() != null ? getInteractionsOnly() : _stratify_by;
}
return InteractionSpec.create(_interactions, _interaction_pairs, interOnly, _stratify_by);
}
private String[] getInteractionsOnly() {
// clients sometimes represent empty interactions as [""] - sanitize this
if (_interactions_only != null && _interactions_only.length == 1 && "".equals(_interactions_only[0])) {
return null;
} else {
return _interactions_only;
}
}
boolean isStratified() { return _stratify_by != null && _stratify_by.length > 0; }
String toFormula(Frame f) {
StringBuilder sb = new StringBuilder();
sb.append("Surv(");
if (_start_column != null) {
sb.append(_start_column).append(", ");
}
sb.append(_stop_column).append(", ").append(_response_column);
sb.append(") ~ ");
Set<String> stratifyBy = _stratify_by != null ? new HashSet<>(Arrays.asList(_stratify_by)) : Collections.<String>emptySet();
Set<String> interactionsOnly = _interactions_only != null ? new HashSet<>(Arrays.asList(_interactions_only)) : Collections.<String>emptySet();
Set<String> specialCols = new HashSet<String>() {{
add(_start_column);
if (_stop_column != null)
add(_stop_column);
add(_response_column);
add(_strata_column);
if (_weights_column != null)
add(_weights_column);
if (_ignored_columns != null)
addAll(Arrays.asList(_ignored_columns));
}};
String sep = "";
for (String col : f._names) {
if (_offset_column != null && _offset_column.equals(col))
continue;
if (stratifyBy.contains(col) || interactionsOnly.contains(col) || specialCols.contains(col))
continue;
sb.append(sep).append(col);
sep = " + ";
}
if (_offset_column != null)
sb.append(sep).append("offset(").append(_offset_column).append(")");
InteractionSpec interactionSpec = interactionSpec();
if (interactionSpec != null) {
InteractionPair[] interactionPairs = interactionSpec().makeInteractionPairs(f);
for (InteractionPair ip : interactionPairs) {
sb.append(sep);
String v1 = f._names[ip.getV1()];
String v2 = f._names[ip.getV2()];
if (stratifyBy.contains(v1))
sb.append("strata(").append(v1).append(")");
else
sb.append(v1);
sb.append(":");
if (stratifyBy.contains(v2))
sb.append("strata(").append(v2).append(")");
else
sb.append(v2);
sep = " + ";
}
}
if (_stratify_by != null) {
final String tmp = sb.toString();
for (String col : _stratify_by) {
String strataCol = "strata(" + col + ")";
if (! tmp.contains(strataCol)) {
sb.append(sep).append(strataCol);
sep = " + ";
}
}
}
return sb.toString();
}
}
public static class CoxPHOutput extends Model.Output {
public CoxPHOutput(CoxPH coxPH, Frame adaptFr, Frame train, IcedHashMap<AstGroup.G, IcedInt> strataMap) {
super(coxPH, fullFrame(coxPH, adaptFr, train));
_strataOnlyCols = new String[_names.length - adaptFr._names.length];
for (int i = 0; i < _strataOnlyCols.length; i++)
_strataOnlyCols[i] = _names[i];
_ties = coxPH._parms._ties;
_formula = coxPH._parms.toFormula(train);
_interactionSpec = coxPH._parms.interactionSpec();
_strataMap = strataMap;
_hasStartColumn = coxPH.hasStartColumn();
_hasStrataColumn = coxPH._parms.isStratified();
}
@Override
public int nclasses() {
return 1;
}
@Override
protected int lastSpecialColumnIdx() {
return super.lastSpecialColumnIdx() - 1 - (_hasStartColumn ? 1 : 0) - (_hasStrataColumn ? 1 : 0);
}
public int weightsIdx() {
if (!_hasWeights)
return -1;
return lastSpecialColumnIdx() - (hasFold() ? 1 : 0);
}
public int offsetIdx() {
if (!_hasOffset)
return -1;
return lastSpecialColumnIdx() - (hasWeights() ? 1 : 0) - (hasFold() ? 1 : 0);
}
private static Frame fullFrame(CoxPH coxPH, Frame adaptFr, Frame train) {
if (! coxPH._parms.isStratified())
return adaptFr;
Frame ff = new Frame();
for (String col : coxPH._parms._stratify_by)
if (adaptFr.vec(col) == null)
ff.add(col, train.vec(col));
ff.add(adaptFr);
return ff;
}
@Override
public ModelCategory getModelCategory() { return ModelCategory.CoxPH; }
@Override
public InteractionBuilder interactionBuilder() {
return _interactionSpec != null ? new CoxPHInteractionBuilder() : null;
}
private class CoxPHInteractionBuilder implements InteractionBuilder {
@Override
public Frame makeInteractions(Frame f) {
Model.InteractionPair[] interactions = _interactionSpec.makeInteractionPairs(f);
f.add(Model.makeInteractions(f, false, interactions, data_info._useAllFactorLevels, data_info._skipMissing, data_info._predictor_transform == DataInfo.TransformType.STANDARDIZE));
return f;
}
}
InteractionSpec _interactionSpec;
DataInfo data_info;
IcedHashMap<AstGroup.G, IcedInt> _strataMap;
String[] _strataOnlyCols;
private final boolean _hasStartColumn;
private final boolean _hasStrataColumn;
public String[] _coef_names;
public double[] _coef;
public double[] _exp_coef;
public double[] _exp_neg_coef;
public double[] _se_coef;
public double[] _z_coef;
double[][] _var_coef;
double _null_loglik;
double _loglik;
double _loglik_test;
double _wald_test;
double _score_test;
double _rsq;
double _maxrsq;
double _lre;
int _iter;
double[][] _x_mean_cat;
double[][] _x_mean_num;
double[] _mean_offset;
String[] _offset_names;
long _n;
long _n_missing;
long _total_event;
double[] _time;
double[] _n_risk;
double[] _n_event;
double[] _n_censor;
double[] _cumhaz_0;
double[] _var_cumhaz_1;
FrameMatrix _var_cumhaz_2_matrix;
Key<Frame> _var_cumhaz_2;
Key<Frame> _baseline_hazard;
FrameMatrix _baseline_hazard_matrix;
Key<Frame> _baseline_survival;
FrameMatrix _baseline_survival_matrix;
CoxPHParameters.CoxPHTies _ties;
String _formula;
double _concordance;
}
public static class FrameMatrix extends Storage.DenseRowMatrix {
Key<Frame> _frame_key;
FrameMatrix(Key<Frame> frame_key, int rows, int cols) {
super(rows, cols);
_frame_key = frame_key;
}
@SuppressWarnings("unused")
public final AutoBuffer write_impl(AutoBuffer ab) {
Key.write_impl(_frame_key, ab);
return ab;
}
@SuppressWarnings({"unused", "unchecked"})
public final FrameMatrix read_impl(AutoBuffer ab) {
_frame_key = (Key<Frame>) Key.read_impl(null, ab);
// install in DKV if not already there
if (DKV.getGet(_frame_key) == null)
toFrame(_frame_key);
return this;
}
}
@Override
public ModelMetricsRegressionCoxPH.MetricBuilderRegressionCoxPH makeMetricBuilder(String[] domain) {
return new ModelMetricsRegressionCoxPH.MetricBuilderRegressionCoxPH(_parms._start_column, _parms._stop_column, _parms.isStratified(), _parms._stratify_by);
}
public ModelSchemaV3 schema() { return new CoxPHModelV3(); }
public CoxPHModel(final Key destKey, final CoxPHParameters parms, final CoxPHOutput output) {
super(destKey, parms, output);
}
@Override
protected PredictScoreResult predictScoreImpl(Frame fr, Frame adaptFrm, String destination_key, Job job, boolean computeMetrics, CFuncRef customMetricFunc) {
int nResponses = 0;
for (String col : _parms.responseCols())
if (adaptFrm.find(col) != -1)
nResponses++;
DataInfo scoringInfo = _output.data_info.scoringInfo(_output._names, adaptFrm, nResponses, false);
CoxPHScore score = new CoxPHScore(scoringInfo, _output, _parms.isStratified(), null != _parms._offset_column);
final Frame scored = score
.doAll(Vec.T_NUM, scoringInfo._adaptedFrame)
.outputFrame(Key.make(destination_key), new String[]{"lp"}, null);
ModelMetrics.MetricBuilder<?> mb = null;
if (computeMetrics) {
mb = makeMetricBuilder(null);
}
return new PredictScoreResult(mb, scored, scored);
}
@Override
public String[] adaptTestForTrain(Frame test, boolean expensive, boolean computeMetrics) {
boolean createStrataVec = _parms.isStratified() && (test.vec(_parms._strata_column) == null);
if (createStrataVec) {
Vec strataVec = test.anyVec().makeCon(Double.NaN);
_toDelete.put(strataVec._key, "adapted missing strata vector");
test.add(_parms._strata_column, strataVec);
}
String[] msgs = super.adaptTestForTrain(test, expensive, computeMetrics);
if (createStrataVec) {
Vec strataVec = CoxPH.StrataTask.makeStrataVec(test, _parms._stratify_by, _output._strataMap, _parms._single_node_mode);
_toDelete.put(strataVec._key, "adapted missing strata vector");
test.replace(test.find(_parms._strata_column), strataVec);
if (_output._strataOnlyCols != null)
test.remove(_output._strataOnlyCols);
}
return msgs;
}
@Override
protected String[] adaptTestForJavaScoring(Frame test, boolean computeMetrics) {
return super.adaptTestForTrain(test, true, computeMetrics);
}
private static class CoxPHScore extends MRTask<CoxPHScore> {
private DataInfo _dinfo;
private double[] _coef;
private double[] _lpBase;
private int _numStart;
private boolean _hasStrata;
private CoxPHScore(DataInfo dinfo, CoxPHOutput o, boolean hasStrata, boolean hasOffsets) {
final int strataCount = o._x_mean_cat.length;
_dinfo = dinfo;
_hasStrata = hasStrata;
_coef = hasOffsets ? ArrayUtils.append(o._coef, 1.0) : o._coef;
_numStart = o._x_mean_cat[0].length;
_lpBase = new double[strataCount];
for (int s = 0; s < strataCount; s++) {
for (int i = 0; i < o._x_mean_cat[s].length; i++)
_lpBase[s] += o._x_mean_cat[s][i] * _coef[i];
for (int i = 0; i < o._x_mean_num[s].length; i++)
_lpBase[s] += o._x_mean_num[s][i] * _coef[i + _numStart];
}
}
@Override
public void map(Chunk[] chks, NewChunk nc) {
DataInfo.Row r = _dinfo.newDenseRow();
for (int rid = 0; rid < chks[0]._len; ++rid) {
_dinfo.extractDenseRow(chks, rid, r);
if (r.predictors_bad) {
nc.addNA();
continue;
} else if (r.weight == 0) {
nc.addNum(0);
continue;
}
final double s = _hasStrata ? chks[_dinfo.responseChunkId(0)].atd(rid) : 0;
final boolean unknownStrata = Double.isNaN(s);
if (unknownStrata) {
nc.addNA();
} else {
final double lp = r.innerProduct(_coef) - _lpBase[(int) s];
nc.addNum(lp);
}
}
}
}
@Override public double[] score0(double[] data, double[] preds) {
throw new UnsupportedOperationException("CoxPHModel.score0 should never be called");
}
protected Futures remove_impl(Futures fs, boolean cascade) {
remove(fs, _output._var_cumhaz_2);
remove(fs, _output._baseline_hazard);
remove(fs, _output._baseline_survival);
super.remove_impl(fs, cascade);
return fs;
}
private void remove(Futures fs, Key<Frame> key) {
Frame fr = key != null ? key.get() : null;
if (fr != null) {
fr.remove(fs);
}
}
@Override
public CoxPHMojoWriter getMojo() {
return new CoxPHMojoWriter(this);
}
@Override
public ModelDescriptor modelDescriptor() {
return new CoxPHModelDescriptor(extraMojoFeatures());
}
public String[] extraMojoFeatures() {
InteractionSpec interactionSpec = _parms.interactionSpec();
if (interactionSpec == null) {
return new String[0];
}
String[] interactionsOnly = interactionSpec.getInteractionsOnly();
if (interactionsOnly == null) {
return new String[0];
}
Set<String> alreadyExported = new HashSet<>(Arrays.asList(_output._names));
return Stream.of(interactionsOnly)
.filter(((Predicate<String>) alreadyExported::contains).negate())
.toArray(String[]::new);
}
class CoxPHModelDescriptor extends H2OModelDescriptor {
private final String[] _extraMojoFeatures;
private CoxPHModelDescriptor(String[] extraMojoFeatures) {
_extraMojoFeatures = extraMojoFeatures;
}
@Override
public int nfeatures() {
return super.nfeatures() + _extraMojoFeatures.length;
}
@Override
public String[] features() {
return ArrayUtils.append(super.features(), _extraMojoFeatures);
}
@Override
public String[] columnNames() {
return ArrayUtils.insert(super.columnNames(), _extraMojoFeatures, super.nfeatures());
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/coxph/CoxPHMojoWriter.java
|
package hex.coxph;
import hex.Model;
import hex.ModelMojoWriter;
import water.Scope;
import water.fvec.Frame;
import water.rapids.ast.prims.mungers.AstGroup;
import water.util.ArrayUtils;
import water.util.IcedHashMap;
import water.util.IcedInt;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
public class CoxPHMojoWriter extends ModelMojoWriter<CoxPHModel, CoxPHModel.CoxPHParameters, CoxPHModel.CoxPHOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public CoxPHMojoWriter() {}
public CoxPHMojoWriter(CoxPHModel model) {
super(model);
}
@Override
public String mojoVersion() {
return "1.00";
}
@Override
protected void writeModelData() throws IOException {
writeRectangularDoubleArray(model._output._x_mean_cat, "x_mean_cat");
writeRectangularDoubleArray(model._output._x_mean_num, "x_mean_num");
writekv("coef", model._output._coef);
writekv("cats", model._output.data_info._cats);
writekv("cat_offsets", model._output.data_info._catOffsets);
writekv("use_all_factor_levels", model._output.data_info._useAllFactorLevels);
writekv("num_numerical_columns", model._output.data_info._nums);
writekv("num_offsets", model._output.data_info._numOffsets);
writeStrata();
writeInteractions();
}
private void writeStrata() throws IOException {
final IcedHashMap<AstGroup.G, IcedInt> strataMap = model._output._strataMap;
writekv("strata_count", strataMap.size());
int strataNum = 0;
for (AstGroup.G g : strataMap.keySet()) {
writekv("strata_" + strataNum, g._gs);
strataNum++;
}
}
private void writeInteractions() throws IOException {
final Model.InteractionPair[] interactions = model._output.data_info._interactions;
if (interactions == null || interactions.length == 0) {
return;
}
final String[] columnNames = model.modelDescriptor().columnNames();
int[] interaction_1 = new int[interactions.length];
int[] interaction_2 = new int[interactions.length];
int[] targets = new int[model._output.data_info._interactionVecs.length];
String[] interaction_column_names = new String[interactions.length];
List<String> allColNames = Arrays.asList(model._parms.train().names());
Frame train = model._parms.train();
Scope.track(train);
for (int i = 0; i < interactions.length; i++) {
interaction_1[i] = ArrayUtils.find(columnNames, interactions[i]._name1);
interaction_2[i] = ArrayUtils.find(columnNames, interactions[i]._name2);
String combinedName = interactions[i]._name1+"_"+interactions[i]._name2;
targets[i] = ArrayUtils.find(columnNames, combinedName); // column index in adaptedFrame
interaction_column_names[i] = combinedName;
}
writekv("interactions_1", interaction_1);
writekv("interactions_2", interaction_2);
writeStringArrays(interaction_column_names, "interaction_column_names");
writekv("interaction_targets", targets); // specifies the position of the interaction column in input array
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/coxph/EfronMethod.java
|
package hex.coxph;
import hex.DataInfo;
import water.*;
import water.util.ArrayUtils;
import static hex.coxph.CoxPH.CoxPHTask;
import static hex.coxph.CoxPH.ComputationState;
class EfronMethod {
static ComputationState calcLoglik(DataInfo dinfo, CoxPHTask coxMR, ComputationState cs, boolean runLocal) {
EfronDJKSetupFun djkTermSetup = EfronDJKSetupFun.setupEfron(coxMR);
EfronDJKTermTask djkTermTask = new EfronDJKTermTask(dinfo, coxMR, djkTermSetup)
.doAll(dinfo._adaptedFrame, runLocal);
EfronUpdateFun f = new EfronUpdateFun(cs, coxMR);
LocalMR<EfronUpdateFun> efronMR = makeEfronMRTask(f, coxMR.sizeEvents.length);
H2O.submitTask(efronMR).join();
for (int i = 0; i < f._n_coef; i++)
for (int j = 0; j < f._n_coef; j++)
f._hessian[i][j] += djkTermTask._djkTerm[i][j];
for (int i = 0; i < f._n_coef; i++)
f._gradient[i] += coxMR.sumXEvents[i];
return f.toComputationState(cs);
}
// We are dealing with doubles - order of summations in floating point math matter!
// In order to have a deterministic order we need to disable "previous task reuse" - that will give us
// a deterministic order of applying reduce operations.
static LocalMR<EfronUpdateFun> makeEfronMRTask(EfronUpdateFun f, int nEvents) {
return new LocalMR<EfronUpdateFun>(f, nEvents)
.withNoPrevTaskReuse();
}
}
class EfronDJKSetupFun extends MrFun<EfronDJKSetupFun> {
private final CoxPHTask _coxMR;
double[] _riskTermT2;
double[] _cumsumRiskTerm;
public EfronDJKSetupFun() { _coxMR = null; }
private EfronDJKSetupFun(CoxPHTask coxMR) {
_coxMR = coxMR;
_riskTermT2 = new double[coxMR.sizeEvents.length];
_cumsumRiskTerm = new double[coxMR.sizeEvents.length];
}
@Override
protected void map(int t) {
final double sizeEvents_t = _coxMR.sizeEvents[t];
final long countEvents_t = _coxMR.countEvents[t];
final double sumRiskEvents_t = _coxMR.sumRiskEvents[t];
final double rcumsumRisk_t = _coxMR.rcumsumRisk[t];
final double avgSize = sizeEvents_t / countEvents_t;
for (long e = 0; e < countEvents_t; ++e) {
final double frac = ((double) e) / ((double) countEvents_t);
final double term = rcumsumRisk_t - frac * sumRiskEvents_t;
_riskTermT2[t] += avgSize * frac / term;
_cumsumRiskTerm[t] += avgSize / term;
}
}
private EfronDJKSetupFun postProcess() {
final int timeLen = _coxMR._time.length;
for (int t = 1; t < _cumsumRiskTerm.length; t++) {
_cumsumRiskTerm[t] += (t % timeLen) == 0 ? 0 : _cumsumRiskTerm[t - 1];
}
return this;
}
static EfronDJKSetupFun setupEfron(CoxPHTask coxMR) {
EfronDJKSetupFun djkTermSetup = new EfronDJKSetupFun(coxMR);
H2O.submitTask(new LocalMR(djkTermSetup, coxMR.sizeEvents.length)).join();
return djkTermSetup.postProcess();
}
}
class EfronDJKTermTask extends CPHBaseTask<EfronDJKTermTask> {
private double[] _cumsumRiskTerm;
private double[] _riskTermT2;
private double[] _beta;
private final int _n_offsets;
private final int _n_time;
private final long _min_event;
private final boolean _has_weights_column;
private final boolean _has_start_column;
private final boolean _has_strata_column;
// OUT
double[][] _djkTerm;
EfronDJKTermTask(DataInfo dinfo, CoxPHTask coxMR, EfronDJKSetupFun setup) {
super(dinfo);
_cumsumRiskTerm = setup._cumsumRiskTerm;
_riskTermT2 = setup._riskTermT2;
_beta = coxMR._beta;
_n_offsets = coxMR._n_offsets;
_n_time = coxMR._time.length;
_min_event = coxMR._min_event;
_has_weights_column = coxMR._has_weights_column;
_has_start_column = coxMR._has_start_column;
_has_strata_column = coxMR._has_strata_column;
}
@Override
protected void chunkInit() {
final int n_coef = _beta.length;
_djkTerm = MemoryManager.malloc8d(n_coef, n_coef);
}
@Override
protected void processRow(DataInfo.Row row) {
double [] response = row.response;
int ncats = row.nBins;
int [] cats = row.binIds;
double [] nums = row.numVals;
final double weight = _has_weights_column ? row.weight : 1.0;
if (weight <= 0)
throw new IllegalArgumentException("weights must be positive values");
int respIdx = response.length - 1;
final long event = (long) (response[respIdx--] - _min_event);
final int t2 = (int) response[respIdx--];
int t1 = _has_start_column ? (int) response[respIdx--] : -1;
final double strata = _has_strata_column ? response[respIdx--] : 0;
assert respIdx == -1 : "expected to use all response data";
if (Double.isNaN(strata))
return; // skip this row
final int numStart = _dinfo.numStart();
// risk is cheaper to recalculate than trying to re-use risk calculated in CoxPHTask
double logRisk = 0;
for (int j = 0; j < ncats; ++j)
logRisk += _beta[cats[j]];
for (int j = 0; j < nums.length - _n_offsets; ++j)
logRisk += nums[j] * _beta[numStart + j];
for (int j = nums.length - _n_offsets; j < nums.length; ++j)
logRisk += nums[j];
final double risk = weight * Math.exp(logRisk);
final int ntotal = ncats + (nums.length - _n_offsets);
final int numStartIter = numStart - ncats;
final double cumsumRiskTerm;
if (_has_start_column && (t1 % _n_time > 0)) {
cumsumRiskTerm = _cumsumRiskTerm[t2] - _cumsumRiskTerm[t1 - 1];
} else {
cumsumRiskTerm = _cumsumRiskTerm[t2];
}
final double riskTermT2 = event > 0 ? _riskTermT2[t2] : 0;
final double mult = (riskTermT2 - cumsumRiskTerm) * risk;
for (int jit = 0; jit < ntotal; ++jit) {
final boolean jIsCat = jit < ncats;
final int j = jIsCat ? cats[jit] : numStartIter + jit;
final double x1 = jIsCat ? 1.0 : nums[jit - ncats];
final double x1mult = x1 * mult;
for (int kit = jit; kit < ntotal; ++kit) {
final boolean kIsCat = kit < ncats;
final int k = kIsCat ? cats[kit] : numStartIter + kit;
final double x2 = kIsCat ? 1.0 : nums[kit - ncats];
_djkTerm[j][k] += x1mult * x2;
}
}
}
@Override
protected void closeLocal() {
// to avoid sending them back over the wire
_cumsumRiskTerm = null;
_riskTermT2 = null;
_beta = null;
}
@Override
public void reduce(EfronDJKTermTask that) {
ArrayUtils.add(_djkTerm, that._djkTerm);
}
@Override
protected void postGlobal() {
for (int j = 1; j < _djkTerm.length; j++) {
for (int k = 0; k < j; k++)
_djkTerm[j][k] = _djkTerm[k][j];
}
}
}
class EfronUpdateFun extends MrFun<EfronUpdateFun> {
transient CoxPHTask _coxMR;
int _n_coef;
double _logLik;
double[] _gradient;
double[][] _hessian;
EfronUpdateFun(ComputationState cs, CoxPHTask coxMR) {
_coxMR = coxMR;
_n_coef = cs._n_coef;
_logLik = cs._logLik;
_gradient = cs._gradient;
_hessian = cs._hessian;
}
@Override
protected void map(int t) {
final double sizeEvents_t = _coxMR.sizeEvents[t];
if (sizeEvents_t > 0) {
final long countEvents_t = _coxMR.countEvents[t];
final double sumLogRiskEvents_t = _coxMR.sumLogRiskEvents[t];
final double sumRiskEvents_t = _coxMR.sumRiskEvents[t];
final double rcumsumRisk_t = _coxMR.rcumsumRisk[t];
final double avgSize = sizeEvents_t / countEvents_t;
_logLik += sumLogRiskEvents_t;
for (long e = 0; e < countEvents_t; ++e) {
final double frac = ((double) e) / ((double) countEvents_t);
final double term = rcumsumRisk_t - frac * sumRiskEvents_t;
_logLik -= avgSize * Math.log(term);
for (int j = 0; j < _n_coef; ++j) {
final double djTerm = _coxMR.rcumsumXRisk[t][j] - frac * _coxMR.sumXRiskEvents[t][j];
final double djLogTerm = djTerm / term;
_gradient[j] -= avgSize * djLogTerm;
for (int k = 0; k < _n_coef; ++k) {
final double dkTerm = _coxMR.rcumsumXRisk[t][k] - frac * _coxMR.sumXRiskEvents[t][k];
_hessian[j][k] += avgSize * (djLogTerm * (dkTerm / term));
}
}
}
}
}
@Override
protected void reduce(EfronUpdateFun o) {
_logLik += o._logLik;
for (int i = 0; i < _n_coef; i++)
_gradient[i] += o._gradient[i];
for (int i = 0; i < _n_coef; i++)
for (int j = 0; j < _n_coef; j++)
_hessian[i][j] += o._hessian[i][j];
}
@Override
protected MrFun<EfronUpdateFun> makeCopy() {
return new EfronUpdateFun(new ComputationState(_n_coef), _coxMR);
}
ComputationState toComputationState(ComputationState cs) {
cs._logLik = _logLik;
cs._gradient = _gradient;
cs._hessian = _hessian;
return cs;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/coxph/Storage.java
|
package hex.coxph;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import org.apache.commons.math3.analysis.function.Abs;
import water.*;
import water.fvec.FileVec;
import water.fvec.Frame;
import water.fvec.Vec;
import static water.fvec.Vec.makeCon;
public class Storage {
/**
* Abstract matrix interface
*/
public interface Matrix {
double get(int row, int col);
void set(int row, int col, double val);
void add(int row, int col, double val);
int cols();
int rows();
long size();
double[] raw();
Frame toFrame(Key<Frame> key);
}
static abstract class AbstractMatrix<T extends AbstractMatrix> extends Iced<T> implements Matrix {
@Override public final Frame toFrame(Key<Frame> key) { return Storage.toFrame(this, key); }
}
/**
* Dense row matrix implementation
*/
public static class DenseRowMatrix extends AbstractMatrix<DenseRowMatrix> {
private double[] _data;
private int _cols;
private int _rows;
DenseRowMatrix(int rows, int cols) { this(MemoryManager.malloc8d(cols * rows), rows, cols); }
private DenseRowMatrix(double[] v, int rows, int cols) { _data = v; _rows = rows; _cols = cols; }
@Override public double get(int row, int col) {
assert(row<_rows && col<_cols) : "_data.length: " + _data.length + ", checking: " + row + " < " + _rows + " && " + col + " < " + _cols;
return _data[row*_cols + col];
}
@Override public void set(int row, int col, double val) { assert(row<_rows && col<_cols); _data[row*_cols + col] = val; }
@Override public void add(int row, int col, double val) { assert(row<_rows && col<_cols); _data[row*_cols + col] += val; }
@Override public int cols() { return _cols; }
@Override public int rows() { return _rows; }
@Override public long size() { return (long)_rows*(long)_cols; }
@Override public double[] raw() { return _data; }
}
/**
* Helper to convert a Matrix into a Frame
*
* @param m Matrix
* @param key Key for output Frame
* @return Reference to Frame (which is also in DKV)
*/
private static Frame toFrame(Matrix m, Key<Frame> key) {
H2O.submitTask(new ConvertMatrixToFrame(m, key)).join();
Frame f = DKV.getGet(key);
assert f != null;
return f;
}
private static class ConvertMatrixToFrame extends H2O.H2OCountedCompleter<ConvertMatrixToFrame> {
private final Matrix _m;
private final Key<Frame> _key;
private ConvertMatrixToFrame(Matrix m, Key<Frame> key) { _m = m; _key = key; }
@Override
public void compute2() {
final int log_rows_per_chunk = Math.max(1, FileVec.DFLT_LOG2_CHUNK_SIZE - (int) Math.floor(Math.log(_m.rows()) / Math.log(2.)));
Vec vs[] = new Vec[_m.cols()];
FillVec[] fv = new FillVec[_m.cols()];
for (int i = 0; i < _m.cols(); ++i) {
vs[i] = makeCon(0, _m.rows(), log_rows_per_chunk);
fv[i] = new FillVec(_m, vs[i], i);
}
ForkJoinTask.invokeAll(fv);
Frame f = new Frame(_key, vs);
DKV.put(_key, f);
tryComplete();
}
}
private static class FillVec extends RecursiveAction {
FillVec(Matrix m, Vec v, int col) {
_m = m; _v = v; _col = col;
}
final Matrix _m;
final Vec _v;
final int _col;
@Override public void compute() {
try (Vec.Writer vw = _v.open()) {
for (int r = 0; r < _m.rows(); r++)
vw.set(r, _m.get(r, _col));
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepLearning.java
|
package hex.deeplearning;
import hex.*;
import hex.deeplearning.DeepLearningModel.DeepLearningParameters;
import hex.deeplearning.DeepLearningModel.DeepLearningParameters.MissingValuesHandling;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLMTask;
import hex.util.EffectiveParametersUtils;
import hex.util.LinearAlgebraUtils;
import water.*;
import water.exceptions.H2OIllegalArgumentException;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.fvec.RebalanceDataSet;
import water.fvec.Vec;
import water.init.Linpack;
import water.init.NetworkTest;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.MRUtils;
import water.util.PrettyPrint;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static hex.util.LinearAlgebraUtils.toEigenArray;
import static water.util.MRUtils.sampleFrame;
import static water.util.MRUtils.sampleFrameStratified;
/**
* Deep Learning Neural Net implementation based on MRTask
*/
public class DeepLearning extends ModelBuilder<DeepLearningModel,DeepLearningModel.DeepLearningParameters,DeepLearningModel.DeepLearningModelOutput> {
/** Main constructor from Deep Learning parameters */
public DeepLearning( DeepLearningParameters parms ) { super(parms); init(false); }
public DeepLearning( DeepLearningParameters parms, Key<DeepLearningModel> key ) { super(parms,key); init(false); }
public DeepLearning( boolean startup_once ) { super(new DeepLearningParameters(),startup_once); }
/** Types of models we can build with DeepLearning */
@Override public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.Regression,
ModelCategory.Binomial,
ModelCategory.Multinomial,
ModelCategory.AutoEncoder
};
}
@Override public boolean havePojo() { return true; }
@Override public boolean haveMojo() { return true; }
@Override
public ToEigenVec getToEigenVec() {
return LinearAlgebraUtils.toEigen;
}
@Override public boolean isSupervised() { return !_parms._autoencoder; }
@Override protected DeepLearningDriver trainModelImpl() { return new DeepLearningDriver(); }
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made
* by the front-end whenever the GUI is clicked, and needs to be fast;
* heavy-weight prep needs to wait for the trainModel() call.
*
* Validate the very large number of arguments in the DL Parameter directly. */
@Override public void init(boolean expensive) {
super.init(expensive);
_parms.validate(this, expensive);
_orig_projection_array = LinearAlgebraUtils.toEigenProjectionArray(_origTrain, _train, expensive);
DistributionFamily[] allowed_distributions = new DistributionFamily[] {
DistributionFamily.AUTO,
DistributionFamily.bernoulli,
DistributionFamily.multinomial,
DistributionFamily.gaussian,
DistributionFamily.poisson,
DistributionFamily.gamma,
DistributionFamily.laplace,
DistributionFamily.quantile,
DistributionFamily.huber,
DistributionFamily.tweedie,
};
if (!(ArrayUtils.contains(allowed_distributions, _parms._distribution)))
error("_distribution", _parms._distribution.name() + " is not supported for DeepLearning in current H2O.");
if (expensive && error_count() == 0) checkMemoryFootPrint();
}
/**
* Helper to create the DataInfo object from training/validation frames and the DL parameters
* @param train Training frame
* @param valid Validation frame
* @param parms Model parameters
* @param nClasses Number of response levels (1: regression, >=2: classification)
* @return DataInfo
*/
static DataInfo makeDataInfo(Frame train, Frame valid, DeepLearningParameters parms, int nClasses) {
double x = 0.782347234;
boolean identityLink = DistributionFactory.getDistribution(parms).link(x) == x;
DataInfo dinfo = new DataInfo(
train,
valid,
parms._autoencoder ? 0 : 1, //nResponses
parms._autoencoder || parms._use_all_factor_levels, //use all FactorLevels for auto-encoder
parms._standardize ? (parms._autoencoder ? DataInfo.TransformType.NORMALIZE : parms._sparse ? DataInfo.TransformType.DESCALE : DataInfo.TransformType.STANDARDIZE) : DataInfo.TransformType.NONE, //transform predictors
!parms._standardize || train.lastVec().isCategorical() ? DataInfo.TransformType.NONE : identityLink ? DataInfo.TransformType.STANDARDIZE : DataInfo.TransformType.NONE, //transform response for regression with identity link
parms._missing_values_handling == DeepLearningParameters.MissingValuesHandling.Skip, //whether to skip missing
false, // do not replace NAs in numeric cols with mean
true, // always add a bucket for missing values
parms._weights_column != null, // observation weights
parms._offset_column != null,
parms._fold_column != null
);
// Checks and adjustments:
// 1) observation weights (adjust mean/sigmas for predictors and response)
// 2) NAs (check that there's enough rows left)
GLMTask.YMUTask ymt = new GLMTask.YMUTask(dinfo, nClasses,!parms._autoencoder && nClasses == 1, parms._missing_values_handling == MissingValuesHandling.Skip, !parms._autoencoder,true).doAll(dinfo._adaptedFrame);
if (ymt.wsum() == 0 && parms._missing_values_handling == DeepLearningParameters.MissingValuesHandling.Skip)
throw new H2OIllegalArgumentException("No rows left in the dataset after filtering out rows with missing values. Ignore columns with many NAs or set missing_values_handling to 'MeanImputation'.");
if (parms._weights_column != null && parms._offset_column != null) {
Log.warn("Combination of offset and weights can lead to slight differences because Rollupstats aren't weighted - need to re-calculate weighted mean/sigma of the response including offset terms.");
}
if (parms._weights_column != null && parms._offset_column == null /*FIXME: offset not yet implemented*/) {
dinfo.updateWeightedSigmaAndMean(ymt.predictorSDs(), ymt.predictorMeans());
if (nClasses == 1)
dinfo.updateWeightedSigmaAndMeanForResponse(ymt.responseSDs(), ymt.responseMeans());
}
return dinfo;
}
@Override protected void checkMemoryFootPrint_impl() {
if (_parms._checkpoint != null) return;
long p = hex.util.LinearAlgebraUtils.numColsExp(_train,true) - (_parms._autoencoder ? 0 : _train.lastVec().cardinality());
String[][] dom = _train.domains();
// hack: add the factor levels for the NAs
for (int i=0; i<_train.numCols()-(_parms._autoencoder ? 0 : 1); ++i) {
if (dom[i] != null) {
p++;
}
}
// assert(makeDataInfo(_train, _valid, _parms).fullN() == p);
long output = _parms._autoencoder ? p : Math.abs(_train.lastVec().cardinality());
long model_size = 0;
if (_parms._hidden.length==0) {
model_size += p * output;
} else {
// weights
model_size += p * _parms._hidden[0];
int layer = 1;
for (; layer < _parms._hidden.length; ++layer)
model_size += _parms._hidden[layer - 1] * _parms._hidden[layer];
model_size += _parms._hidden[layer - 1] * output;
// biases
for (layer = 0; layer < _parms._hidden.length; ++layer)
model_size += _parms._hidden[layer];
model_size += output;
}
if (model_size > 1e8) {
String msg = "Model is too large: " + model_size + " parameters. Try reducing the number of neurons in the hidden layers (or reduce the number of categorical factors).";
error("_hidden", msg);
}
}
@Override public void cv_computeAndSetOptimalParameters(ModelBuilder[] cvModelBuilders) {
_parms._overwrite_with_best_model = false;
if( _parms._stopping_rounds == 0 && _parms._max_runtime_secs == 0) return; // No exciting changes to stopping conditions
// Extract stopping conditions from each CV model, and compute the best stopping answer
_parms._stopping_rounds = 0;
setMaxRuntimeSecsForMainModel();
double sum = 0;
for( ModelBuilder cvmb : cvModelBuilders )
sum += ((DeepLearningModel)DKV.getGet(cvmb.dest())).last_scored().epoch_counter;
_parms._epochs = sum/cvModelBuilders.length;
if( !_parms._quiet_mode ) {
warn("_epochs", "Setting optimal _epochs to " + _parms._epochs + " for cross-validation main model based on early stopping of cross-validation models.");
warn("_stopping_rounds", "Disabling convergence-based early stopping for cross-validation main model.");
if (_parms._main_model_time_budget_factor == 0)
warn("_max_runtime_secs", "Disabling maximum allowed runtime for cross-validation main model.");
}
}
@Override
protected Frame rebalance(final Frame original_fr, boolean local, final String name) {
if (original_fr == null) return null;
if (_parms._force_load_balance || _parms._reproducible) { //this is called before the parameters are sanitized, so force_load_balance might be user-disabled -> so must check reproducible flag as well
int original_chunks = original_fr.anyVec().nChunks();
_job.update(0,"Load balancing " + name.substring(name.length() - 5) + " data...");
int chunks = desiredChunks(original_fr, local);
if (!_parms._reproducible) {
if (original_chunks >= chunks){
if (!_parms._quiet_mode)
Log.info("Dataset already contains " + original_chunks + " chunks. No need to rebalance.");
return original_fr;
}
} else { //reproducible, set chunks to 1
assert chunks == 1;
if (!_parms._quiet_mode)
Log.warn("Reproducibility enforced - using only 1 thread - can be slow.");
if (original_chunks == 1)
return original_fr;
}
if (!_parms._quiet_mode)
Log.info("Rebalancing " + name.substring(name.length()-5) + " dataset into " + chunks + " chunks.");
Key newKey = Key.make(name + ".chks" + chunks);
RebalanceDataSet rb = new RebalanceDataSet(original_fr, newKey, chunks);
H2O.submitTask(rb).join();
Frame rebalanced_fr = DKV.get(newKey).get();
Scope.track(rebalanced_fr);
return rebalanced_fr;
}
return original_fr;
}
@Override
protected int desiredChunks(final Frame original_fr, boolean local) {
return _parms._reproducible ? 1 : (int) Math.min(4 * H2O.NUMCPUS * (local ? 1 : H2O.CLOUD.size()), original_fr.numRows());
}
public class DeepLearningDriver extends Driver {
@Override public void computeImpl() {
init(true); //this can change the seed if it was set to -1
if (Model.evaluateAutoModelParameters()) {
initActualParamValues();
}
Model.Parameters parmsToCheck = _parms.clone();
// Something goes wrong
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(DeepLearning.this);
buildModel();
//check that all members of _param apart of those which were originally set to AUTO haven't changed during DL model training
checkNonAutoParmsNotChanged(parmsToCheck, _parms);
}
public void checkNonAutoParmsNotChanged(Model.Parameters params1, Model.Parameters params2){
try {
for (Field field : params1.getClass().getFields()) {
Class type = field.getType();
Object value1 = field.get(params1);
if (value1 != null && !"AUTO".equalsIgnoreCase(value1.toString())){
Object value2 = field.get(params2);
assert(value1.toString().equalsIgnoreCase(value2.toString())) : "Found non-AUTO value in _parms which has changed during DL model training";
}
}
} catch (IllegalAccessException e) {
throw new RuntimeException("Error while checking param changes during DL model training", e);
}
}
/**
* Train a Deep Learning model, assumes that all members are populated
* If checkpoint == null, then start training a new model, otherwise continue from a checkpoint
*/
public final void buildModel() {
DeepLearningModel cp = null;
List<Key> removeMe = new ArrayList();
if (_parms._checkpoint == null) {
cp = new DeepLearningModel(dest(), _parms, new DeepLearningModel.DeepLearningModelOutput(DeepLearning.this), _train, _valid, nclasses());
if (_parms._pretrained_autoencoder != null) {
final DeepLearningModel pretrained = DKV.getGet(_parms._pretrained_autoencoder);
if (pretrained == null)
throw new H2OIllegalArgumentException("The pretrained model '" + _parms._pretrained_autoencoder + "' cannot be found.");
if (_parms._autoencoder || !pretrained._parms._autoencoder)
throw new H2OIllegalArgumentException("The pretrained model must be unsupervised (an autoencoder), and the model to be trained must be supervised.");
Log.info("Loading model parameters of input and hidden layers from the pretrained autoencoder model.");
cp.model_info().initializeFromPretrainedModel(pretrained.model_info());
} else {
cp.model_info().initializeMembers(_parms._initial_weights, _parms._initial_biases);
}
} else {
final DeepLearningModel previous = DKV.getGet(_parms._checkpoint);
if (previous == null) throw new IllegalArgumentException("Checkpoint not found.");
Log.info("Resuming from checkpoint.");
_job.update(0,"Resuming from checkpoint");
if( isClassifier() != previous._output.isClassifier() )
throw new H2OIllegalArgumentException("Response type must be the same as for the checkpointed model.");
if( isSupervised() != previous._output.isSupervised() )
throw new H2OIllegalArgumentException("Model type must be the same as for the checkpointed model.");
//READ ONLY
DeepLearningParameters.Sanity.checkIfParameterChangeAllowed(previous._input_parms, _parms);
DataInfo dinfo;
try {
// PUBDEV-2513: Adapt _train and _valid (in-place) to match the frames that were used for the previous model
// This can add or remove dummy columns (can happen if the dataset is sparse and datasets have different non-const columns)
for (String st : previous.adaptTestForTrain(_train,true,false)) Log.warn(st);
for (String st : previous.adaptTestForTrain(_valid,true,false)) Log.warn(st);
dinfo = makeDataInfo(_train, _valid, _parms, nclasses());
DKV.put(dinfo); // For FrameTask that needs DataInfo in the DKV as a standalone thing - the DeepLearningModel has its own copy inside itself
removeMe.add(dinfo._key);
cp = new DeepLearningModel(dest(), _parms, previous, false, dinfo);
cp.write_lock(_job);
if (!Arrays.equals(cp._output._names, previous._output._names)) {
throw new H2OIllegalArgumentException("The columns of the training data must be the same as for the checkpointed model. Check ignored columns (or disable ignore_const_cols).");
}
if (!Arrays.deepEquals(cp._output._domains, previous._output._domains)) {
throw new H2OIllegalArgumentException("Categorical factor levels of the training data must be the same as for the checkpointed model.");
}
if (dinfo.fullN() != previous.model_info().data_info().fullN()) {
throw new H2OIllegalArgumentException("Total number of predictors is different than for the checkpointed model.");
}
if (_parms._epochs <= previous.epoch_counter) {
throw new H2OIllegalArgumentException("Total number of epochs must be larger than the number of epochs already trained for the checkpointed model (" + previous.epoch_counter + ").");
}
// these are the mutable parameters that are to be used by the model (stored in model_info.parameters)
final DeepLearningParameters actualParms = cp.model_info().get_params(); //actually used parameters for model building (defaults filled in, etc.)
assert (actualParms != previous.model_info().get_params());
assert (actualParms != _parms);
assert (actualParms != previous._parms);
// Update actualNewP parameters based on what the user wants (cp_modifiable parameters only), was cloned from the previous model so far
//show the user only the changes in the user-facing parameters
DeepLearningParameters.Sanity.updateParametersDuringCheckpointRestart(_parms, previous._parms, false /*doIt*/, false /*quiet*/);
//actually change the parameters in the "insider" version of parameters
DeepLearningParameters.Sanity.updateParametersDuringCheckpointRestart(_parms /*user-given*/, cp.model_info().get_params() /*model_info.parameters that will be used*/, true /*doIt*/, true /*quiet*/);
// update/sanitize parameters (in place) to set defaults etc.
DeepLearningParameters.Sanity.modifyParms(_parms, cp.model_info().get_params(), nclasses());
Log.info("Continuing training after " + String.format("%.3f", previous.epoch_counter) + " epochs from the checkpointed model.");
cp.update(_job);
} catch (H2OIllegalArgumentException ex){
if (cp != null) {
cp.unlock(_job);
cp.delete();
cp = null;
}
throw ex;
} finally {
if (cp != null) cp.unlock(_job);
}
}
DistributionFamily actualDistribution = cp.model_info().get_params()._distribution;
if (Model.evaluateAutoModelParameters() && _parms._distribution == DistributionFamily.AUTO) {
_parms._distribution = actualDistribution;
cp._parms._distribution = actualDistribution;
}
trainModel(cp);
for (Key k : removeMe) DKV.remove(k);
// clean up, but don't delete weights and biases if user asked for export
List<Key> keep = new ArrayList<>();
try {
if ( _parms._export_weights_and_biases && cp._output.weights != null && cp._output.biases != null) {
for (Key k : Arrays.asList(cp._output.weights)) {
keep.add(k);
for (Vec vk : ((Frame) DKV.getGet(k)).vecs()) {
keep.add(vk._key);
}
}
for (Key k : Arrays.asList(cp._output.biases)) {
keep.add(k);
for (Vec vk : ((Frame) DKV.getGet(k)).vecs()) {
keep.add(vk._key);
}
}
}
} finally {
Scope.untrack(keep);
}
}
/**
* Train a Deep Learning neural net model
* @param model Input model (e.g., from initModel(), or from a previous training run)
* @return Trained model
*/
public final DeepLearningModel trainModel(DeepLearningModel model) {
Frame validScoreFrame = null;
Frame train, trainScoreFrame;
try {
// if (checkpoint == null && !quiet_mode) logStart(); //if checkpoint is given, some Job's params might be uninitialized (but the restarted model's parameters are correct)
if (model == null) {
model = DKV.get(dest()).get();
}
Log.info("Model category: " + (_parms._autoencoder ? "Auto-Encoder" : isClassifier() ? "Classification" : "Regression"));
final long model_size = model.model_info().size();
Log.info("Number of model parameters (weights/biases): " + String.format("%,d", model_size));
model.write_lock(_job);
_job.update(0,"Setting up training data...");
final DeepLearningParameters mp = model.model_info().get_params();
// temporary frames of the same "name" as the orig _train/_valid (asking the parameter's Key, not the actual frame)
// Note: don't put into DKV or they would overwrite the _train/_valid frames!
Frame tra_fr = new Frame(mp._train, _train.names(), _train.vecs());
Frame val_fr = _valid != null ? new Frame(mp._valid,_valid.names(), _valid.vecs()) : null;
train = tra_fr;
if (model._output.isClassifier() && mp._balance_classes) {
_job.update(0,"Balancing class distribution of training data...");
float[] trainSamplingFactors = new float[train.lastVec().domain().length]; //leave initialized to 0 -> will be filled up below
if (mp._class_sampling_factors != null) {
if (mp._class_sampling_factors.length != train.lastVec().domain().length)
throw new IllegalArgumentException("class_sampling_factors must have " + train.lastVec().domain().length + " elements");
trainSamplingFactors = mp._class_sampling_factors.clone(); //clone: don't modify the original
}
train = sampleFrameStratified(
train, train.lastVec(), train.vec(model._output.weightsName()), trainSamplingFactors, (long)(mp._max_after_balance_size*train.numRows()), mp._seed, true, false);
Vec l = train.lastVec();
Vec w = train.vec(model._output.weightsName());
MRUtils.ClassDist cd = new MRUtils.ClassDist(l);
model._output._modelClassDist = _weights != null ? cd.doAll(l, w).relDist() : cd.doAll(l).relDist();
}
model.training_rows = train.numRows();
if (_weights != null && _weights.min()==0 && _weights.max()==1 && _weights.isInt()) {
model.training_rows = Math.round(train.numRows()*_weights.mean());
Log.warn("Not counting " + (train.numRows() - model.training_rows) + " rows with weight=0 towards an epoch.");
}
Log.info("One epoch corresponds to " + model.training_rows + " training data rows.");
trainScoreFrame = sampleFrame(train, mp._score_training_samples, mp._seed); //training scoring dataset is always sampled uniformly from the training dataset
if( trainScoreFrame != train ) Scope.track(trainScoreFrame);
if (!_parms._quiet_mode) Log.info("Number of chunks of the training data: " + train.anyVec().nChunks());
if (val_fr != null) {
model.validation_rows = val_fr.numRows();
// validation scoring dataset can be sampled in multiple ways from the given validation dataset
if (model._output.isClassifier() && mp._balance_classes && mp._score_validation_sampling == DeepLearningParameters.ClassSamplingMethod.Stratified) {
_job.update(0,"Sampling validation data (stratified)...");
validScoreFrame = sampleFrameStratified(val_fr, val_fr.lastVec(), val_fr.vec(model._output.weightsName()), null,
mp._score_validation_samples > 0 ? mp._score_validation_samples : val_fr.numRows(), mp._seed +1, false /* no oversampling */, false);
} else {
_job.update(0,"Sampling validation data...");
validScoreFrame = sampleFrame(val_fr, mp._score_validation_samples, mp._seed +1);
if( validScoreFrame != val_fr ) Scope.track(validScoreFrame);
}
if (!_parms._quiet_mode) Log.info("Number of chunks of the validation data: " + validScoreFrame.anyVec().nChunks());
}
// Set train_samples_per_iteration size (cannot be done earlier since this depends on whether stratified sampling is done)
model.actual_train_samples_per_iteration = computeTrainSamplesPerIteration(mp, model.training_rows, model);
// Determine whether shuffling is enforced
if(mp._replicate_training_data && (model.actual_train_samples_per_iteration == model.training_rows*(mp._single_node_mode ?1:H2O.CLOUD.size())) && !mp._shuffle_training_data && H2O.CLOUD.size() > 1 && !mp._reproducible) {
if (!mp._quiet_mode)
Log.info("Enabling training data shuffling, because all nodes train on the full dataset (replicated training data).");
mp._shuffle_training_data = true;
}
if(!mp._shuffle_training_data && model.actual_train_samples_per_iteration == model.training_rows && train.anyVec().nChunks()==1) {
if (!mp._quiet_mode)
Log.info("Enabling training data shuffling to avoid training rows in the same order over and over (no Hogwild since there's only 1 chunk).");
mp._shuffle_training_data = true;
}
// if (!mp._quiet_mode) Log.info("Initial model:\n" + model.model_info());
long now = System.currentTimeMillis();
model._timeLastIterationEnter = now;
if (_parms._autoencoder) {
_job.update(0,"Scoring null model of autoencoder...");
if (!mp._quiet_mode)
Log.info("Scoring the null model of the autoencoder.");
model.doScoring(trainScoreFrame, validScoreFrame, _job._key, 0, false); //get the null model reconstruction error
}
// put the initial version of the model into DKV
model.update(_job);
model.total_setup_time_ms += now - _job.start_time();
Log.info("Total setup time: " + PrettyPrint.msecs(model.total_setup_time_ms, true));
Log.info("Starting to train the Deep Learning model.");
_job.update(0,"Training...");
//main loop
for(;;) {
model.iterations++;
model.set_model_info(mp._epochs == 0 ? model.model_info()
: H2O.CLOUD.size() > 1 && mp._replicate_training_data ? (mp._single_node_mode
? new DeepLearningTask2(_job._key, train, model.model_info(), rowFraction(train, mp, model), model.iterations).doAll(Key.make(H2O.SELF)).model_info() //replicated data + single node mode
: new DeepLearningTask2(_job._key, train, model.model_info(), rowFraction(train, mp, model), model.iterations).doAllNodes().model_info()) //replicated data + multi-node mode
: new DeepLearningTask(_job._key, model.model_info(), rowFraction(train, mp, model), model.iterations).doAll(train).model_info()); //distributed data (always in multi-node mode)
if (stop_requested() && !timeout()) throw new Job.JobCancelledException(_job);
if (!model.doScoring(trainScoreFrame, validScoreFrame, _job._key, model.iterations, false)) break; //finished training (or early stopping or convergence)
if (timeout()) { //stop after scoring
_job.update((long) (mp._epochs * train.numRows())); // mark progress as completed
break;
}
}
// replace the model with the best model so far (if it's better)
if (!stop_requested() && _parms._overwrite_with_best_model && model.actual_best_model_key != null && _parms._nfolds == 0) {
DeepLearningModel best_model = DKV.getGet(model.actual_best_model_key);
if (best_model != null && best_model.loss() < model.loss() && Arrays.equals(best_model.model_info().units, model.model_info().units)) {
if (!_parms._quiet_mode) {
Log.info("Setting the model to be the best model so far (based on scoring history).");
Log.info("Best model's loss: " + best_model.loss() + " vs this model's loss (before overwriting it with the best model): " + model.loss());
}
DeepLearningModelInfo mi = IcedUtils.deepCopy(best_model.model_info());
// Don't cheat - count full amount of training samples, since that's the amount of training it took to train (without finding anything better)
mi.set_processed_global(model.model_info().get_processed_global());
mi.set_processed_local(model.model_info().get_processed_local());
DeepLearningParameters parms = model.model_info().get_params(); // backup the parameters for this model
model.set_model_info(mi); // this overwrites also the parameters from the previous best model, but we only want the state
model.model_info().parameters = parms; // restore the parameters
model.update(_job);
model.doScoring(trainScoreFrame, validScoreFrame, _job._key, model.iterations, true);
if (best_model.loss() != model.loss()) {
if (!_parms._quiet_mode) {
Log.info("Best model's loss: " + best_model.loss() + " vs this model's loss (after overwriting it with the best model) : " + model.loss());
}
Log.warn("Even though the model was reset to the previous best model, we observe different scoring results. " +
"Most likely, the data set has changed during a checkpoint restart. If so, please compare the metrics to observe your data shift.");
}
}
}
//store coefficient names for future use
//possibly change
model.model_info().data_info().coefNames();
}
finally {
if (!_parms._quiet_mode) {
Log.info("==============================================================================================================================================================================");
if (stop_requested()) {
if (timeout())
warn("_max_runtime_secs", "Deep Learning model training was interrupted due to " +
"timeout. Increase _max_runtime_secs or set it to 0 to disable it.");
Log.info("Deep Learning model training was interrupted.");
} else {
Log.info("Finished training the Deep Learning model.");
if (model!=null) Log.info(model);
}
Log.info("==============================================================================================================================================================================");
}
if (model != null) {
model.deleteElasticAverageModels();
model.unlock(_job);
if (model.actual_best_model_key != null) {
assert (model.actual_best_model_key != model._key);
DKV.remove(model.actual_best_model_key);
}
}
}
return model;
}
public void initActualParamValues() {
if (_parms._autoencoder) {
if (_parms._stopping_metric == ScoreKeeper.StoppingMetric.AUTO) {
_parms._stopping_metric = ScoreKeeper.StoppingMetric.MSE;
}
} else {
EffectiveParametersUtils.initStoppingMetric(_parms, isClassifier());
}
EffectiveParametersUtils.initCategoricalEncoding(_parms, Model.Parameters.CategoricalEncodingScheme.OneHotInternal);
}
/**
* Compute the fraction of rows that need to be used for training during one iteration
* @param numRows number of training rows
* @param train_samples_per_iteration number of training rows to be processed per iteration
* @param replicate_training_data whether of not the training data is replicated on each node
* @return fraction of rows to be used for training during one iteration
*/
private float computeRowUsageFraction(final long numRows, final long train_samples_per_iteration, final boolean replicate_training_data) {
float rowUsageFraction = (float)train_samples_per_iteration / numRows;
if (replicate_training_data) rowUsageFraction /= H2O.CLOUD.size();
assert(rowUsageFraction > 0);
return rowUsageFraction;
}
private float rowFraction(Frame train, DeepLearningParameters p, DeepLearningModel m) {
return computeRowUsageFraction(train.numRows(), m.actual_train_samples_per_iteration, p._replicate_training_data);
}
}
/**
* Compute the actual train_samples_per_iteration size from the user-given parameter
* @param mp Model parameter (DeepLearning object)
* @param numRows number of training rows
* @param model DL model
* @return The total number of training rows to be processed per iteration (summed over on all nodes)
*/
static long computeTrainSamplesPerIteration(final DeepLearningParameters mp, final long numRows, final DeepLearningModel model) {
long tspi = mp._train_samples_per_iteration;
assert(tspi == 0 || tspi == -1 || tspi == -2 || tspi >= 1);
if (tspi == 0 || (!mp._replicate_training_data && tspi == -1) ) {
tspi = numRows;
if (!mp._quiet_mode) Log.info("Setting train_samples_per_iteration (" + mp._train_samples_per_iteration + ") to one epoch: #rows (" + tspi + ").");
}
else if (tspi == -1) {
tspi = (mp._single_node_mode ? 1 : H2O.CLOUD.size()) * numRows;
if (!mp._quiet_mode) Log.info("Setting train_samples_per_iteration (" + mp._train_samples_per_iteration + ") to #nodes x #rows (" + tspi + ").");
} else if (tspi == -2) {
// automatic tuning based on CPU speed, network speed and model size
// measure cpu speed
double total_gflops = 0;
for (H2ONode h2o : H2O.CLOUD._memary) {
HeartBeat hb = h2o._heartbeat;
total_gflops += hb._gflops; //can be NaN if not yet run
}
if (mp._single_node_mode) total_gflops /= H2O.CLOUD.size();
if (Double.isNaN(total_gflops)) {
total_gflops = Linpack.run(H2O.SELF._heartbeat._cpus_allowed) * (mp._single_node_mode ? 1 : H2O.CLOUD.size());
}
assert(!Double.isNaN(total_gflops));
final long model_size = model.model_info().size();
int[] msg_sizes = new int[]{ 1, (int)(model_size*4) == (model_size*4) ? (int)(model_size*4) : Integer.MAX_VALUE };
double[] microseconds_collective = new double[msg_sizes.length];
NetworkTest.NetworkTester nt = new NetworkTest.NetworkTester(msg_sizes,null,microseconds_collective,model_size>1e6 ? 1 : 5 /*repeats*/,false,true /*only collectives*/);
nt.compute2();
//length of the network traffic queue based on log-tree rollup (2 log(nodes))
int network_queue_length = mp._single_node_mode || H2O.CLOUD.size() == 1? 1 : 2*(int)Math.floor(Math.log(H2O.CLOUD.size())/Math.log(2));
// heuristics
double flops_overhead_per_row = 50;
if (mp._activation == DeepLearningParameters.Activation.Maxout || mp._activation == DeepLearningParameters.Activation.MaxoutWithDropout) {
flops_overhead_per_row *= 8;
} else if (mp._activation == DeepLearningParameters.Activation.Tanh || mp._activation == DeepLearningParameters.Activation.TanhWithDropout) {
flops_overhead_per_row *= 5;
}
// target fraction of comm vs cpu time: 5%
double fraction = mp._single_node_mode || H2O.CLOUD.size() == 1 ? 1e-3 : mp._target_ratio_comm_to_comp; //one single node mode, there's no model averaging effect, so less need to shorten the M/R iteration
// estimate the time for communication (network) and training (compute)
model.time_for_communication_us = (H2O.CLOUD.size() == 1 ? 1e4 /* add 10ms for single-node */ : 1e5 /* add 100ms for multi-node MR overhead */) + network_queue_length * microseconds_collective[1];
double time_per_row_us = (flops_overhead_per_row * model_size + 10000 * model.model_info().units[0]) / (total_gflops * 1e9) / H2O.SELF._heartbeat._cpus_allowed * 1e6;
assert(!Double.isNaN(time_per_row_us));
// compute the optimal number of training rows per iteration
// fraction := time_comm_us / (time_comm_us + tspi * time_per_row_us) ==> tspi = (time_comm_us/fraction - time_comm_us)/time_per_row_us
tspi = (long)((model.time_for_communication_us / fraction - model.time_for_communication_us)/ time_per_row_us);
tspi = Math.min(tspi, (mp._single_node_mode ? 1 : H2O.CLOUD.size()) * numRows * 10); //not more than 10x of what train_samples_per_iteration=-1 would do
// If the number is close to a multiple of epochs, use that -> prettier scoring
if (tspi > numRows && Math.abs(tspi % numRows)/(double)numRows < 0.2) tspi -= tspi % numRows;
tspi = Math.min(tspi, (long)(mp._epochs * numRows / 10)); //limit to number of epochs desired, but at least 10 iterations total
if (H2O.CLOUD.size() == 1 || mp._single_node_mode) {
tspi = Math.min(tspi, 10*(int)(1e6/time_per_row_us)); //in single-node mode, only run for at most 10 seconds
}
tspi = Math.max(1, tspi); //at least 1 row
tspi = Math.min(100000*H2O.CLOUD.size(), tspi); //at most 100k rows per node for initial guess - can always relax later on
if (!mp._quiet_mode) {
Log.info("Auto-tuning parameter 'train_samples_per_iteration':");
Log.info("Estimated compute power : " + Math.round(total_gflops*100)/100 + " GFlops");
Log.info("Estimated time for comm : " + PrettyPrint.usecs((long) model.time_for_communication_us));
Log.info("Estimated time per row : " + ((long)time_per_row_us > 0 ? PrettyPrint.usecs((long) time_per_row_us) : time_per_row_us + " usecs"));
Log.info("Estimated training speed: " + (int)(1e6/time_per_row_us) + " rows/sec");
Log.info("Setting train_samples_per_iteration (" + mp._train_samples_per_iteration + ") to auto-tuned value: " + tspi);
}
} else {
// limit user-given value to number of epochs desired
tspi = Math.max(1, Math.min(tspi, (long) (mp._epochs * numRows)));
}
assert(tspi != 0 && tspi != -1 && tspi != -2 && tspi >= 1);
return tspi;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepLearningModel.java
|
package hex.deeplearning;
import hex.*;
import hex.genmodel.CategoricalEncoding;
import hex.genmodel.utils.DistributionFamily;
import hex.util.EffectiveParametersUtils;
import hex.util.LinearAlgebraUtils;
import water.*;
import water.codegen.CodeGenerator;
import water.codegen.CodeGeneratorPipeline;
import water.exceptions.H2OIllegalArgumentException;
import water.exceptions.JCodeSB;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.udf.CFuncRef;
import water.util.*;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import static hex.ModelMetrics.calcVarImp;
import static hex.deeplearning.DeepLearning.makeDataInfo;
import static hex.deeplearning.DeepLearningModel.DeepLearningParameters.Loss.*;
import static hex.genmodel.utils.DistributionFamily.poisson;
import static water.H2O.technote;
/**
* The Deep Learning model
* It contains a DeepLearningModelInfo with the most up-to-date model,
* a scoring history, as well as some helpers to indicate the progress
*/
public class DeepLearningModel extends Model<DeepLearningModel, DeepLearningModel.DeepLearningParameters, DeepLearningModel.DeepLearningModelOutput>
implements Model.DeepFeatures, Model.Contributions {
@Override
public ToEigenVec getToEigenVec() {
return LinearAlgebraUtils.toEigen;
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options, Frame backgroundFrame) {
if (null == backgroundFrame)
throw H2O.unimpl("DeepLearning supports contribution calculation only with a background frame.");
Log.info("Starting contributions calculation for "+this._key+"...");
try (Scope.Safe s = Scope.safe(frame, backgroundFrame)) {
Frame adaptedFrame = adaptFrameForScore(frame, false);
DKV.put(Scope.track_generic(adaptedFrame)); //use track_generic as a Scope lookup optimization as we know it contains only protected vecs
Frame adaptedBgFrame = adaptFrameForScore(backgroundFrame, false);
DKV.put(Scope.track_generic(adaptedBgFrame)); //same as above
DeepSHAPContributionsWithBackground contributions = new DeepSHAPContributionsWithBackground(this,
adaptedFrame._key,
adaptedBgFrame._key,
options._outputPerReference,
ContributionsOutputFormat.Compact.equals(options._outputFormat)
? model_info.data_info().coefOriginalColumnIndices(adaptedFrame)
: null,
options._outputSpace);
String[] cols = ContributionsOutputFormat.Compact.equals(options._outputFormat)
? model_info.data_info.coefOriginalNames(adaptedFrame)
: model_info.data_info.coefNames();
String[] colNames = new String[cols.length + 1]; // +1 for bias term
System.arraycopy(cols, 0, colNames, 0, colNames.length - 1);
colNames[colNames.length - 1] = "BiasTerm";
return Scope.untrack(contributions.runAndGetOutput(j, destination_key, colNames));
} finally {
Log.info("Finished contributions calculation for "+this._key+"...");
}
}
/**
* The Deep Learning model output contains a few extra fields in addition to the metrics in Model.Output
* 1) Scoring history (raw data)
* 2) weights/biases (raw data)
* 3) variable importances (TwoDimTable)
*/
public static class DeepLearningModelOutput extends Model.Output {
public DeepLearningModelOutput(DeepLearning b) {
super(b);
autoencoder = b._parms._autoencoder;
assert b.isSupervised() == !autoencoder;
}
final boolean autoencoder;
@Override
public boolean isAutoencoder() { return autoencoder; }
DeepLearningScoringInfo errors;
Key[] weights;
Key[] biases;
double[] normmul;
double[] normsub;
double[] normrespmul;
double[] normrespsub;
int[] catoffsets;
public TwoDimTable _variable_importances;
@Override
public TwoDimTable getVariableImportances() {
return _variable_importances;
}
@Override public ModelCategory getModelCategory() {
return autoencoder ? ModelCategory.AutoEncoder : super.getModelCategory();
}
@Override public boolean isSupervised() {
return !autoencoder;
}
} // DeepLearningModelOutput
@Override
public void initActualParamValues() {
super.initActualParamValues();
EffectiveParametersUtils.initFoldAssignment(_parms);
}
void set_model_info(DeepLearningModelInfo mi) {
assert(mi != null);
model_info = mi;
}
final public DeepLearningModelInfo model_info() { return model_info; }
final public VarImp varImp() { return _output.errors.variable_importances; }
private volatile DeepLearningModelInfo model_info;
// timing
public long total_checkpointed_run_time_ms; //time spent in previous models
public long total_training_time_ms; //total time spent running (training+scoring, including all previous models)
public long total_scoring_time_ms; //total time spent scoring (including all previous models)
public long total_setup_time_ms; //total time spent setting up (including all previous models)
private long time_of_start_ms; //start time for this model (this cp restart)
// auto-tuning
public long actual_train_samples_per_iteration;
public double time_for_communication_us; //helper for auto-tuning: time in microseconds for collective bcast/reduce of the model
// helpers for diagnostics
public double epoch_counter;
public int iterations;
public boolean stopped_early;
public long training_rows;
public long validation_rows;
// Keep the best model so far, based on a single criterion (overall class. error or MSE)
private float _bestLoss = Float.POSITIVE_INFINITY;
public Key actual_best_model_key;
public Key model_info_key;
public DeepLearningScoringInfo last_scored() { return (DeepLearningScoringInfo) super.last_scored(); }
/**
* Get the parameters actually used for model building, not the user-given ones (_parms)
* They might differ since some defaults are filled in, and some invalid combinations are auto-disabled in modifyParams
* @return actually used parameters
*/
public final DeepLearningParameters get_params() { return model_info.get_params(); }
@Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
switch(_output.getModelCategory()) {
case Binomial: return new ModelMetricsBinomial.MetricBuilderBinomial(domain);
case Multinomial: return new ModelMetricsMultinomial.MetricBuilderMultinomial(_output.nclasses(),domain, get_params()._auc_type);
case Regression: return new ModelMetricsRegression.MetricBuilderRegression();
case AutoEncoder: return new ModelMetricsAutoEncoder.MetricBuilderAutoEncoder(_output.nfeatures());
default: throw H2O.unimpl("Invalid ModelCategory " + _output.getModelCategory());
}
}
/**
* Helper to allocate keys for output frames for weights and biases
* @param destKey Base destination key for output frames
*/
private void makeWeightsBiases(Key destKey) {
if (!model_info.get_params()._export_weights_and_biases) {
_output.weights = null;
_output.biases = null;
_output.normmul = null;
_output.normsub = null;
_output.normrespmul = null;
_output.normrespsub = null;
_output.catoffsets = null;
} else {
_output.weights = new Key[get_params()._hidden.length + 1];
for (int i = 0; i < _output.weights.length; ++i) {
_output.weights[i] = Key.make(destKey + ".weights." + i);
}
_output.biases = new Key[get_params()._hidden.length + 1];
for (int i = 0; i < _output.biases.length; ++i) {
_output.biases[i] = Key.make(destKey + ".biases." + i);
}
_output.normmul = model_info.data_info._normMul;
_output.normsub = model_info.data_info._normSub;
_output.normrespmul = model_info.data_info._normRespMul;
_output.normrespsub = model_info.data_info._normRespSub;
_output.catoffsets = model_info.data_info._catOffsets;
}
}
/** Constructor to restart from a checkpointed model
* @param destKey New destination key for the model
* @param parms User-given parameters for checkpoint restart
* @param cp Checkpoint to restart from
* @param store_best_model Store only the best model instead of the latest one */
public DeepLearningModel(final Key destKey, final DeepLearningParameters parms, final DeepLearningModel cp, final boolean store_best_model, final DataInfo dataInfo) {
super(destKey, parms == null ? (DeepLearningParameters)cp._parms.clone() : parms, (DeepLearningModelOutput)cp._output.clone());
assert(_parms != cp._parms); //make sure we have a clone
model_info = IcedUtils.deepCopy(cp.model_info); //don't want to interfere with model being built, just make a deep copy and store that
if (store_best_model) {
model_info.data_info = IcedUtils.deepCopy(dataInfo); //replace previous data_info with updated version that's passed in (contains enum for classification)
} else {
model_info.data_info = dataInfo; //shallow clone is ok
if (parms != null) {
assert (_parms == parms);
assert (_parms._checkpoint == parms._checkpoint);
assert (_parms._checkpoint == cp._key);
}
}
assert(get_params() != cp.model_info().get_params()); //make sure we have a clone
_dist = DistributionFactory.getDistribution(get_params());
assert(_dist._family != DistributionFamily.AUTO); // Note: Must use sanitized parameters via get_params() as this._params can still have defaults AUTO, etc.)
actual_best_model_key = cp.actual_best_model_key;
if (actual_best_model_key.get() == null) {
DeepLearningModel best = IcedUtils.deepCopy(cp);
//best.model_info.data_info = model_info.data_info; // Note: we currently DO NOT use the checkpoint's data info - as data may change during checkpoint restarts
actual_best_model_key = Key.<DeepLearningModel>make(H2O.SELF);
DKV.put(actual_best_model_key, best);
}
time_of_start_ms = cp.time_of_start_ms;
total_training_time_ms = cp.total_training_time_ms;
total_checkpointed_run_time_ms = cp.total_training_time_ms;
total_scoring_time_ms = cp.total_scoring_time_ms;
total_setup_time_ms = cp.total_setup_time_ms;
training_rows = cp.training_rows; //copy the value to display the right number on the model page before training has started
validation_rows = cp.validation_rows; //copy the value to display the right number on the model page before training has started
_bestLoss = cp._bestLoss;
epoch_counter = cp.epoch_counter;
iterations = cp.iterations;
// deep clone scoring history
scoringInfo = cp.scoringInfo.clone();
for (int i=0; i< scoringInfo.length;++i)
scoringInfo[i] = IcedUtils.deepCopy(cp.scoringInfo[i]);
_output.errors = last_scored();
makeWeightsBiases(destKey);
_output._scoring_history = DeepLearningScoringInfo.createScoringHistoryTable(scoringInfo, (null != get_params()._valid), false, _output.getModelCategory(), _output.isAutoencoder(), _parms.hasCustomMetricFunc());
_output._variable_importances = calcVarImp(last_scored().variable_importances);
_output.setNames(dataInfo._adaptedFrame.names(), dataInfo._adaptedFrame.typesStr());
_output._domains = dataInfo._adaptedFrame.domains();
assert(Arrays.equals(_key._kb, destKey._kb));
}
/**
* Regular constructor (from scratch)
* @param destKey destination key
* @param parms DL parameters
* @param output DL model output
* @param train Training frame
* @param valid Validation frame
* @param nClasses Number of classes (1 for regression or autoencoder)
*/
public DeepLearningModel(final Key destKey, final DeepLearningParameters parms, final DeepLearningModelOutput output, Frame train, Frame valid, int nClasses) {
super(destKey, parms, output);
final DataInfo dinfo = makeDataInfo(train, valid, _parms, nClasses);
DKV.put(dinfo);
_output.setNames(dinfo._adaptedFrame.names(), dinfo._adaptedFrame.typesStr());
_output._domains = dinfo._adaptedFrame.domains();
Log.info("Building the model on " + dinfo.numNums() + " numeric features and " + dinfo.numCats() + " (one-hot encoded) categorical features.");
model_info = new DeepLearningModelInfo(parms, destKey, dinfo, nClasses, train, valid);
model_info_key = Key.make(H2O.SELF);
_dist = DistributionFactory.getDistribution(get_params());
assert(_dist._family != DistributionFamily.AUTO); // Note: Must use sanitized parameters via get_params() as this._params can still have defaults AUTO, etc.)
actual_best_model_key = Key.make(H2O.SELF);
if (parms._nfolds != 0) actual_best_model_key = null;
if (!parms._autoencoder) {
scoringInfo = new DeepLearningScoringInfo[1];
scoringInfo[0] = new DeepLearningScoringInfo();
scoringInfo[0].validation = (parms._valid != null);
scoringInfo[0].time_stamp_ms = System.currentTimeMillis();
_output.errors = last_scored();
_output._scoring_history = DeepLearningScoringInfo.createScoringHistoryTable(scoringInfo, (null != get_params()._valid), false, _output.getModelCategory(), _output.isAutoencoder(), _parms.hasCustomMetricFunc());
_output._variable_importances = calcVarImp(last_scored().variable_importances);
}
time_of_start_ms = System.currentTimeMillis();
makeWeightsBiases(destKey);
assert _key.equals(destKey);
boolean fail = false;
long byte_size = 0;
try {
byte_size = new AutoBuffer().put(this).buf().length;
} catch(Throwable t) {
fail = true;
}
if (byte_size > Value.MAX || fail)
throw new IllegalArgumentException(technote(5, "Model is too large"));
}
public long _timeLastIterationEnter;
public long _timeLastScoreStart; //start actual scoring
private long _timeLastScoreEnd; //finished actual scoring
private long _timeLastPrintStart;
private void checkTimingConsistency() {
assert(total_scoring_time_ms <= total_training_time_ms);
assert(total_setup_time_ms <= total_training_time_ms);
assert(total_setup_time_ms+total_scoring_time_ms <= total_training_time_ms);
assert(total_training_time_ms >= total_checkpointed_run_time_ms);
assert(total_checkpointed_run_time_ms >= 0);
assert(total_training_time_ms >= 0);
assert(total_scoring_time_ms >= 0);
}
void updateTiming(Key<Job> job_key) {
final long now = System.currentTimeMillis();
long start_time_current_model = job_key.get().start_time();
total_training_time_ms = total_checkpointed_run_time_ms + (now - start_time_current_model);
checkTimingConsistency();
}
/**
* Score this DeepLearning model
* @param fTrain potentially downsampled training data for scoring
* @param fValid potentially downsampled validation data for scoring
* @param jobKey key of the owning job
* @param iteration Map/Reduce iteration count
* @return true if model building is ongoing
*/
boolean doScoring(Frame fTrain, Frame fValid, Key<Job> jobKey, int iteration, boolean finalScoring) {
final long now = System.currentTimeMillis();
final double time_since_last_iter = now - _timeLastIterationEnter;
updateTiming(jobKey);
_timeLastIterationEnter = now;
epoch_counter = (double)model_info().get_processed_total()/training_rows;
boolean keep_running;
// Auto-tuning
// if multi-node and auto-tuning and at least 10 ms for communication (to avoid doing thins on multi-JVM on same node),
// then adjust the auto-tuning parameter 'actual_train_samples_per_iteration' such that the targeted ratio of comm to comp is achieved
// Note: actual communication time is estimated by the NetworkTest's collective test.
if (H2O.CLOUD.size() > 1 && get_params()._train_samples_per_iteration == -2 && iteration > 1) {
Log.debug("Auto-tuning train_samples_per_iteration.");
if (time_for_communication_us > 1e4) {
Log.debug(" Time taken for communication: " + PrettyPrint.usecs((long) time_for_communication_us));
Log.debug(" Time taken for Map/Reduce iteration: " + PrettyPrint.msecs((long) time_since_last_iter, true));
final double comm_to_work_ratio = (time_for_communication_us * 1e-3) / time_since_last_iter;
Log.debug(" Ratio of network communication to computation: " + String.format("%.5f", comm_to_work_ratio));
Log.debug(" target_comm_to_work: " + get_params()._target_ratio_comm_to_comp);
Log.debug("Old value of train_samples_per_iteration: " + actual_train_samples_per_iteration);
double correction = get_params()._target_ratio_comm_to_comp / comm_to_work_ratio;
correction = Math.max(0.5,Math.min(2, correction)); //it's ok to train up to 2x more training rows per iteration, but not fewer than half.
if (Math.abs(correction) < 0.8 || Math.abs(correction) > 1.2) { //don't correct unless it's significant (avoid slow drift)
actual_train_samples_per_iteration /= correction;
actual_train_samples_per_iteration = Math.max(1, actual_train_samples_per_iteration);
Log.debug("New value of train_samples_per_iteration: " + actual_train_samples_per_iteration);
} else {
Log.debug("Keeping value of train_samples_per_iteration the same (would deviate too little from previous value): " + actual_train_samples_per_iteration);
}
} else {
Log.debug("Communication is faster than 10 ms. Not modifying train_samples_per_iteration: " + actual_train_samples_per_iteration);
}
}
keep_running = (epoch_counter < get_params()._epochs) && !stopped_early;
final long sinceLastScore = now -_timeLastScoreStart;
// this is potentially slow - only do every so often
if( !keep_running || get_params()._score_each_iteration ||
(sinceLastScore > get_params()._score_interval *1000 //don't score too often
&&(double)(_timeLastScoreEnd-_timeLastScoreStart)/sinceLastScore < get_params()._score_duty_cycle) ) { //duty cycle
jobKey.get().update(0,"Scoring on " + fTrain.numRows() + " training samples" +(fValid != null ? (", " + fValid.numRows() + " validation samples") : ""));
final boolean printme = !get_params()._quiet_mode;
_timeLastScoreStart = System.currentTimeMillis();
model_info().computeStats(); //might not be necessary, but is done to be certain that numbers are good
DeepLearningScoringInfo scoringInfo = new DeepLearningScoringInfo();
scoringInfo.time_stamp_ms = _timeLastScoreStart;
updateTiming(jobKey);
scoringInfo.total_training_time_ms = total_training_time_ms;
scoringInfo.total_scoring_time_ms = total_scoring_time_ms;
scoringInfo.total_setup_time_ms = total_setup_time_ms;
scoringInfo.epoch_counter = epoch_counter;
scoringInfo.iterations = iterations;
scoringInfo.training_samples = (double)model_info().get_processed_total();
scoringInfo.validation = fValid != null;
scoringInfo.score_training_samples = fTrain.numRows();
scoringInfo.is_classification = _output.isClassifier();
scoringInfo.is_autoencoder = _output.isAutoencoder();
if (get_params()._autoencoder) {
if (printme) Log.info("Scoring the auto-encoder.");
// training
{
final Frame mse_frame = scoreAutoEncoder(fTrain, Key.make(), false);
mse_frame.delete();
ModelMetrics mtrain = ModelMetrics.getFromDKV(this, fTrain); //updated by model.score
_output._training_metrics = mtrain;
scoringInfo.scored_train = new ScoreKeeper(mtrain);
}
if (fValid != null) {
final Frame mse_frame = scoreAutoEncoder(fValid, Key.make(), false);
mse_frame.delete();
ModelMetrics mtest = ModelMetrics.getFromDKV(this, fValid); //updated by model.score
_output._validation_metrics = mtest;
scoringInfo.scored_valid = new ScoreKeeper(mtest);
}
} else {
if (printme) Log.info("Scoring the model.");
// compute errors
final String m = model_info().toString();
if (m.length() > 0) Log.info(m);
// For GainsLift and Huber, we need the full predictions to compute the model metrics
boolean needPreds = _output.nclasses() == 2 /* gains/lift table requires predictions */ ||
get_params()._distribution == DistributionFamily.huber;
// Scoring on training data
hex.ModelMetrics mtrain;
Frame preds = null;
if (needPreds) {
// allocate predictions since they are needed
preds = score(fTrain, CFuncRef.from(_parms._custom_metric_func));
mtrain = ModelMetrics.getFromDKV(this, fTrain);
if (get_params()._distribution == DistributionFamily.huber) {
Vec absdiff = new MathUtils.ComputeAbsDiff().doAll(1, (byte)3,
new Frame(new String[]{"a","p"}, new Vec[]{fTrain.vec(get_params()._response_column), preds.anyVec()})
).outputFrame().anyVec();
double huberDelta = MathUtils.computeWeightedQuantile(fTrain.vec(get_params()._weights_column), absdiff, get_params()._huber_alpha);
if (model_info().gradientCheck == null) _dist.setHuberDelta(huberDelta);
}
} else {
// no need to allocate predictions
ModelMetrics.MetricBuilder mb = scoreMetrics(fTrain);
mtrain = mb.makeModelMetrics(this,fTrain,fTrain,null);
}
if (preds!=null) preds.remove();
_output._training_metrics = mtrain;
scoringInfo.scored_train = new ScoreKeeper(mtrain);
hex.ModelMetricsSupervised mm1 = (ModelMetricsSupervised)mtrain;
if (fTrain.numRows() != training_rows) {
_output._training_metrics._description = "Metrics reported on temporary training frame with " + fTrain.numRows() + " samples";
} else if (fTrain._key != null && fTrain._key.toString().contains("chunks")){
_output._training_metrics._description = "Metrics reported on temporary (load-balanced) training frame";
} else {
_output._training_metrics._description = "Metrics reported on full training frame";
}
// Scoring on validation data
hex.ModelMetrics mvalid;
if (fValid != null) {
preds = null;
if (needPreds) {
// allocate predictions since they are needed
preds = score(fValid, CFuncRef.from(_parms._custom_metric_func));
mvalid = ModelMetrics.getFromDKV(this, fValid);
} else {
// no need to allocate predictions
ModelMetrics.MetricBuilder mb = scoreMetrics(fValid);
mvalid = mb.makeModelMetrics(this, fValid, fValid,null);
}
if (preds!=null) preds.remove();
_output._validation_metrics = mvalid;
scoringInfo.scored_valid = new ScoreKeeper(mvalid);
if (mvalid != null) {
if (fValid.numRows() != validation_rows) {
_output._validation_metrics._description = "Metrics reported on temporary validation frame with " + fValid.numRows() + " samples";
if (get_params()._score_validation_sampling == DeepLearningParameters.ClassSamplingMethod.Stratified) {
_output._validation_metrics._description += " (stratified sampling)";
}
} else if (fValid._key != null && fValid._key.toString().contains("chunks")){
_output._validation_metrics._description = "Metrics reported on temporary (load-balanced) validation frame";
} else {
_output._validation_metrics._description = "Metrics reported on full validation frame";
}
}
}
}
if (get_params()._variable_importances) {
if (!get_params()._quiet_mode) Log.info("Computing variable importances.");
final float[] vi = model_info().computeVariableImportances();
scoringInfo.variable_importances = new VarImp(vi, Arrays.copyOfRange(model_info().data_info().coefNames(), 0, vi.length));
}
_timeLastScoreEnd = System.currentTimeMillis();
long scoringTime = _timeLastScoreEnd - _timeLastScoreStart;
total_scoring_time_ms += scoringTime;
updateTiming(jobKey);
// update the scoringInfo object to report proper speed
scoringInfo.total_training_time_ms = total_training_time_ms;
scoringInfo.total_scoring_time_ms = total_scoring_time_ms;
scoringInfo.this_scoring_time_ms = scoringTime;
// enlarge the error array by one, push latest score back
if (this.scoringInfo == null) {
this.scoringInfo = new DeepLearningScoringInfo[]{scoringInfo};
} else {
DeepLearningScoringInfo[] err2 = new DeepLearningScoringInfo[this.scoringInfo.length + 1];
System.arraycopy(this.scoringInfo, 0, err2, 0, this.scoringInfo.length);
err2[err2.length - 1] = scoringInfo;
this.scoringInfo = err2;
}
_output.errors = last_scored();
makeWeightsBiases(_key);
water.util.Timer t = new Timer();
// store weights and matrices to Frames
if (_output.weights != null && _output.biases != null) {
for (int i = 0; i < _output.weights.length; ++i) {
Frame f = model_info.get_weights(i).toFrame(_output.weights[i]);
if (i==0) {
f.setNames(model_info.data_info.coefNames());
DKV.put(f);
}
}
for (int i = 0; i < _output.biases.length; ++i) {
model_info.get_biases(i).toFrame(_output.biases[i]);
}
if (!get_params()._quiet_mode)
Log.info("Writing weights and biases to Frames took " + t.time()/1000. + " seconds.");
}
_output._scoring_history = DeepLearningScoringInfo.createScoringHistoryTable(this.scoringInfo, (null != get_params()._valid), false, _output.getModelCategory(), _output.isAutoencoder(), _parms.hasCustomMetricFunc());
_output._variable_importances = calcVarImp(last_scored().variable_importances);
_output._model_summary = model_info.createSummaryTable();
// always keep a copy of the best model so far (based on the following criterion)
if (!finalScoring) {
if (actual_best_model_key != null && get_params()._overwrite_with_best_model && (
// if we have a best_model in DKV, then compare against its error() (unless it's a different model as judged by the network size)
(DKV.get(actual_best_model_key) != null && (!(loss() >= DKV.get(actual_best_model_key).<DeepLearningModel>get().loss()) || !Arrays.equals(model_info().units, DKV.get(actual_best_model_key).<DeepLearningModel>get().model_info().units)))
||
// otherwise, compare against our own _bestError
(DKV.get(actual_best_model_key) == null && loss() < _bestLoss)
) ) {
_bestLoss = loss();
putMeAsBestModel(actual_best_model_key);
}
// print the freshly scored model to ASCII
if (keep_running && printme)
Log.info(toString());
if ((_output.isClassifier() && last_scored().scored_train._classError <= get_params()._classification_stop)
|| (!_output.isClassifier() && last_scored().scored_train._mse <= get_params()._regression_stop)) {
Log.info("Achieved requested predictive accuracy on the training data. Model building completed.");
stopped_early = true;
}
// note: stopping metric should be known at this point and setting problemType is redundant
ScoreKeeper.ProblemType problemType = get_params()._autoencoder ?
ScoreKeeper.ProblemType.autoencoder : ScoreKeeper.ProblemType.forSupervised(_output.isClassifier());
if (ScoreKeeper.stopEarly(ScoringInfo.scoreKeepers(scoring_history()),
get_params()._stopping_rounds, problemType, get_params()._stopping_metric, get_params()._stopping_tolerance, "model's last", true
)) {
Log.info("Convergence detected based on simple moving average of the loss function for the past " + get_params()._stopping_rounds + " scoring events. Model building completed.");
stopped_early = true;
}
if (printme) Log.info("Time taken for scoring and diagnostics: " + PrettyPrint.msecs(scoringInfo.this_scoring_time_ms, true));
}
}
if (stopped_early) {
// pretend as if we finished all epochs to get the progress bar pretty (especially for N-fold and grid-search)
((Job) DKV.getGet(jobKey)).update((long) (get_params()._epochs * training_rows));
update(jobKey);
return false;
}
progressUpdate(jobKey, keep_running);
update(jobKey);
return keep_running;
}
private void progressUpdate(Key<Job> job_key, boolean keep_running) {
updateTiming(job_key);
Job job = job_key.get();
double progress = job.progress();
// Log.info("2nd speed: (samples: " + model_info().get_processed_total() + ", total_run_time: " + total_training_time_ms + ", total_scoring_time: " + total_scoring_time_ms + ", total_setup_time: " + total_setup_time_ms + ")");
int speed = (int)(model_info().get_processed_total() * 1000. / (total_training_time_ms -total_scoring_time_ms-total_setup_time_ms));
assert(speed >= 0) : "negative speed computed! (total_run_time: " + total_training_time_ms + ", total_scoring_time: " + total_scoring_time_ms + ", total_setup_time: " + total_setup_time_ms + ")";
String msg =
"Iterations: " + String.format("%,d", iterations)
+ ". Epochs: " + String.format("%g", epoch_counter)
+ ". Speed: " + String.format("%,d", speed) + " samples/sec."
+ (progress == 0 ? "" : " Estimated time left: " + PrettyPrint.msecs((long) (total_training_time_ms * (1. - progress) / progress), true));
job.update(actual_train_samples_per_iteration,msg); //mark the amount of work done for the progress bar
long now = System.currentTimeMillis();
long sinceLastPrint = now -_timeLastPrintStart;
if (!keep_running || sinceLastPrint > get_params()._score_interval * 1000) { //print this after every score_interval, not considering duty cycle
_timeLastPrintStart = now;
if (!get_params()._quiet_mode) {
Log.info(
"Training time: " + PrettyPrint.msecs(total_training_time_ms, true) + " (scoring: " + PrettyPrint.msecs(total_scoring_time_ms, true) + "). "
+ "Processed " + String.format("%,d", model_info().get_processed_total()) + " samples" + " (" + String.format("%.3f", epoch_counter) + " epochs).\n");
Log.info(msg);
}
}
}
/** Make either a prediction or a reconstruction.
* @param orig Test dataset
* @param adaptedFr Test dataset, adapted to the model
* @param computeMetrics
* @return A frame containing the prediction or reconstruction
*/
@Override protected PredictScoreResult predictScoreImpl(Frame orig, Frame adaptedFr, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) {
if (!get_params()._autoencoder) {
return super.predictScoreImpl(orig, adaptedFr, destination_key, j, computeMetrics, customMetricFunc);
} else {
// Reconstruction
final int len = model_info().data_info().fullN();
assert(model_info().data_info()._responses == 0);
String[] coefnames = model_info().data_info().coefNames();
assert(len == coefnames.length);
String[] names = new String[len];
for(int i = 0; i < names.length; ++i) {
names[i] = "reconstr_" + coefnames[i];
}
Frame f = new MRTask() {
@Override public void map( Chunk chks[], NewChunk recon[] ) {
double tmp [] = new double[_output._names.length];
double preds[] = new double [len];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
double p[] = score_autoencoder(chks, row, tmp, preds, neurons, true /*reconstruction*/, false /*reconstruction_error_per_feature*/);
for( int c=0; c<len; c++ )
recon[c].addNum(p[c]);
}
}
}.doAll(len, Vec.T_NUM, adaptedFr).outputFrame();
Frame of = new Frame(Key.<Frame>make(destination_key), names, f.vecs());
DKV.put(of);
ModelMetrics.MetricBuilder<?> mb = makeMetricBuilder(null);
return new PredictScoreResult(mb, of, of);
}
}
@Override
protected double[] score0(double[] data, double[] preds) {
return score0(data, preds, 0);
}
/**
* Compute the loss function
* @param myRows Mini-Batch Array of denseRow's containing numerical/categorical predictor and response data (standardized)
* @return loss
*/
public double meanLoss(DataInfo.Row[] myRows) {
double loss = 0;
Neurons[] neurons = DeepLearningTask.makeNeuronsForTraining(model_info());
//for absolute error, gradient -1/1 matches the derivative of abs(x) without correction term
long seed = -1; //ignored
double[] responses = new double[myRows.length];
double[] offsets = new double[myRows.length];
int n=0;
for (int mb=0; mb<myRows.length; ++mb) {
DataInfo.Row myRow = myRows[mb];
if (myRow == null) continue;
n++;
((Neurons.Input) neurons[0]).setInput(seed, myRow.numIds, myRow.numVals, myRow.nBins, myRow.binIds, mb);
responses[mb] = myRow.response(0);
offsets[mb] = myRow.offset;
// check that all non-last layer errors/gradients are empty
for (int i = 0; i < neurons.length - 1; ++i) {
Storage.DenseVector e = neurons[i]._e == null ? null : neurons[i]._e[mb];
if (e == null) continue;
assert (ArrayUtils.sum(e.raw()) == 0);
}
}
DeepLearningTask.fpropMiniBatch(seed, neurons, model_info(), null, false, responses, offsets, myRows.length);
for (int mb=0; mb<myRows.length; ++mb) {
DataInfo.Row myRow = myRows[mb];
if (myRow==null) continue;
// check that all non-last layer errors/gradients are still empty
for (int i = 0; i<neurons.length-1;++i) {
Storage.DenseVector e = neurons[i]._e == null ? null : neurons[i]._e[mb];
if (e==null) continue;
assert (ArrayUtils.sum(e.raw()) == 0);
}
if (get_params()._loss == CrossEntropy) {
if (get_params()._balance_classes) throw H2O.unimpl();
int actual = (int) myRow.response[0];
double pred = neurons[neurons.length - 1]._a[mb].get(actual);
loss += -Math.log(Math.max(1e-15, pred)); //cross-entropy (same as log loss)
} else {
if (model_info.get_params()._autoencoder) throw H2O.unimpl();
//prediction and actual response in standardized response space
double pred = neurons[neurons.length - 1]._a[mb].get(0);
double actual = myRow.response[0];
// FIXME: re-enable this such that the loss is computed from the de-standardized prediction/response
//bring standardized prediction and actual response to real space
// DataInfo di = model_info().data_info();
// if (di._normRespMul != null) { //either both are null or none
// pred = (pred / di._normRespMul[0] + di._normRespSub[0]);
// actual = (actual / di._normRespMul[0] + di._normRespSub[0]);
// }
pred = _dist.linkInv(pred);
if (poisson.equals(_parms._distribution)) {
double linkF = DistributionFactory.LogExpUtil.log(pred);
loss += -2*(actual * linkF - DistributionFactory.LogExpUtil.exp(pred));
} else {
loss += _dist.deviance(1 /*weight*/, actual, pred);
}
}
// add L1/L2 penalty of model coefficients (weights & biases)
for (int i = 0; i <= get_params()._hidden.length+1; ++i) {
if (neurons[i]._w != null) {
for (int row = 0; row < neurons[i]._w.rows(); ++row) {
for (int col = 0; col < neurons[i]._w.cols(); ++col) {
loss += get_params()._l1 * Math.abs(neurons[i]._w.get(row, col));
loss += 0.5 * get_params()._l2 * Math.pow(neurons[i]._w.get(row, col), 2);
}
}
}
if (neurons[i]._b != null) {
for (int row = 0; row < neurons[i]._b.size(); ++row) {
loss += get_params()._l1 * Math.abs(neurons[i]._b.get(row));
loss += 0.5 * get_params()._l2 * Math.pow(neurons[i]._b.get(row), 2);
}
}
}
}
return n>0?loss/n:loss;
}
/**
* Predict from raw double values representing the data
* @param data raw array containing categorical values (horizontalized to 1,0,0,1,0,0 etc.) and numerical values (0.35,1.24,5.3234,etc), both can contain NaNs
* @param preds predicted label and per-class probabilities (for classification), predicted target (regression), can contain NaNs
* @return preds, can contain NaNs
*/
@Override
public double[] score0(double[] data, double[] preds, double offset) {
int mb=0;
int n=1;
if (model_info().isUnstable()) {
Log.err(unstable_msg);
throw new UnsupportedOperationException(unstable_msg);
}
Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
((Neurons.Input)neurons[0]).setInput(-1, data, mb);
DeepLearningTask.fpropMiniBatch(-1, neurons, model_info, null, false, null, new double[]{offset}, n);
double[] out = neurons[neurons.length - 1]._a[mb].raw();
if (get_params()._distribution == DistributionFamily.modified_huber) {
preds[0] = -1;
preds[2] = _dist.linkInv(out[0]);
preds[1] = 1-preds[2];
return preds;
} else if (_output.isClassifier()) {
assert (preds.length == out.length + 1);
for (int i = 0; i < preds.length - 1; ++i) {
preds[i + 1] = out[i];
if (Double.isNaN(preds[i + 1])) throw new RuntimeException("Predicted class probability NaN!");
}
// label assignment happens later - explicitly mark it as invalid here
preds[0] = -1;
} else {
if (model_info().data_info()._normRespMul != null) //either both are null or none
preds[0] = (out[0] / model_info().data_info()._normRespMul[0] + model_info().data_info()._normRespSub[0]);
else
preds[0] = out[0];
// transform prediction to response space
preds[0] = _dist.linkInv(preds[0]);
if (Double.isNaN(preds[0]))
throw new RuntimeException("Predicted regression target NaN!");
}
return preds;
}
/**
* Score auto-encoded reconstruction (on-the-fly, without allocating the reconstruction as done in Frame score(Frame fr))
* @param frame Original data (can contain response, will be ignored)
* @param destination_key Frame Id for output
* @param reconstruction_error_per_feature whether to return the squared error per feature
* @return Frame containing one Vec with reconstruction error (MSE) of each reconstructed row, caller is responsible for deletion
*/
public Frame scoreAutoEncoder(Frame frame, Key destination_key, final boolean reconstruction_error_per_feature) {
if (!get_params()._autoencoder)
throw new H2OIllegalArgumentException("Only for AutoEncoder Deep Learning model.", "");
final int len = _output._names.length;
Frame adaptFrm = new Frame(frame);
adaptTestForTrain(adaptFrm, true, false);
final int outputcols = reconstruction_error_per_feature ? model_info.data_info.fullN() : 1;
Frame mse = new MRTask() {
@Override public void map( Chunk chks[], NewChunk[] mse ) {
double tmp [] = new double[len];
double out[] = new double[outputcols];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
for( int i=0; i<len; i++ )
tmp[i] = chks[i].atd(row);
score_autoencoder(tmp, out, neurons, false /*reconstruction*/, reconstruction_error_per_feature);
for (int i=0; i<outputcols; ++i)
mse[i].addNum(out[i]);
}
}
}.doAll(outputcols, Vec.T_NUM, adaptFrm).outputFrame();
String[] names;
if (reconstruction_error_per_feature) {
String[] coefnames = model_info().data_info().coefNames();
assert (outputcols == coefnames.length);
names = new String[outputcols];
for (int i = 0; i < names.length; ++i) {
names[i] = "reconstr_" + coefnames[i] + ".SE";
}
} else {
names = new String[]{"Reconstruction.MSE"};
}
Frame res = new Frame(destination_key, names, mse.vecs());
DKV.put(res);
addModelMetrics(new ModelMetricsAutoEncoder(this, frame, res.numRows(), res.vecs()[0].mean() /*mean MSE*/, CustomMetric.EMPTY));
return res;
}
/**
* Score auto-encoded reconstruction (on-the-fly, and materialize the deep features of given layer
* @param frame Original data (can contain response, will be ignored)
* @param layer index of the hidden layer for which to extract the features
* @return Frame containing the deep features (#cols = hidden[layer])
*/
public Frame scoreDeepFeatures(Frame frame, final int layer) {
return scoreDeepFeatures(frame, layer, null);
}
public Frame scoreDeepFeatures(Frame frame, final int layer, final Job job) {
if (layer < 0 || layer >= model_info().get_params()._hidden.length)
throw new H2OIllegalArgumentException("hidden layer (index) to extract must be between " + 0 + " and " + (model_info().get_params()._hidden.length-1),"");
final int len = _output.nfeatures();
if (isSupervised()) {
int ridx = frame.find(_output.responseName());
if (ridx != -1) { // drop the response for scoring!
frame = new Frame(frame);
frame.remove(ridx);
}
}
Frame adaptFrm = new Frame(frame);
//create new features, will be dense
final int features = model_info().get_params()._hidden[layer];
Vec v = adaptFrm.anyVec();
Vec[] vecs = v!=null ? v.makeZeros(features) : null;
if (vecs == null) throw new IllegalArgumentException("Cannot create deep features from a frame with no columns.");
Scope.enter();
adaptTestForTrain(adaptFrm, true, false);
for (int j=0; j<features; ++j) {
adaptFrm.add("DF.L"+(layer+1)+".C" + (j+1), vecs[j]);
}
final int mb=0;
final int n=1;
new MRTask() {
@Override public void map( Chunk chks[] ) {
if (isCancelled() || job !=null && job.stop_requested()) throw new Job.JobCancelledException(job);
double tmp [] = new double[len];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
for( int i=0; i<len; i++ )
tmp[i] = chks[i].atd(row);
((Neurons.Input)neurons[0]).setInput(-1, tmp, mb); //FIXME: No weights yet
DeepLearningTask.fpropMiniBatch(-1, neurons, model_info, null, false, null, null /*no offset*/, n);
double[] out = neurons[layer+1]._a[mb].raw(); //extract the layer-th hidden feature
for( int c=0; c<features; c++ )
chks[_output._names.length+c].set(row,out[c]);
}
if (job != null) job.update(1);
}
}.doAll(adaptFrm);
// Return just the output columns
int x=_output._names.length, y=adaptFrm.numCols();
Frame ret = adaptFrm.extractFrame(x, y);
Scope.exit();
return ret;
}
@Override
public Frame scoreDeepFeatures(Frame frame, String layer, Job j) {
throw H2O.unimpl("Cannot extract named hidden layer '" + layer + "' for H2O DeepLearning.");
}
// Make (potentially expanded) reconstruction
private double[] score_autoencoder(Chunk[] chks, int row_in_chunk, double[] tmp, double[] preds, Neurons[] neurons, boolean reconstruction, boolean reconstruction_error_per_feature) {
assert(get_params()._autoencoder);
assert(tmp.length == _output._names.length);
for (int i=0; i<tmp.length; i++ )
tmp[i] = chks[i].atd(row_in_chunk);
score_autoencoder(tmp, preds, neurons, reconstruction, reconstruction_error_per_feature); // this fills preds, returns MSE error (ignored here)
return preds;
}
/**
* Helper to reconstruct original data into preds array and compute the reconstruction error (MSE)
* @param data Original data (unexpanded)
* @param preds Reconstruction (potentially expanded)
* @param neurons Array of neurons to work with (will call fprop on them)
*/
private void score_autoencoder(double[] data, double[] preds, Neurons[] neurons, boolean reconstruction, boolean reconstruction_error_per_feature) {
final int mb=0;
final int n=1;
assert(model_info().get_params()._autoencoder);
if (model_info().isUnstable()) {
Log.err(unstable_msg);
throw new UnsupportedOperationException(unstable_msg);
}
((Neurons.Input)neurons[0]).setInput(-1, data, mb);
DeepLearningTask.fpropMiniBatch(-1, neurons, model_info, null, false, null, null /*no offset*/, n); // reconstructs data in expanded space
double[] in = neurons[0]._a[mb].raw(); //input (expanded)
double[] out = neurons[neurons.length - 1]._a[mb].raw(); //output (expanded)
assert(in.length == out.length);
if (reconstruction) {
// Now scale back numerical columns to original data space (scale + shift)
model_info().data_info().unScaleNumericals(out, out); //only modifies the numericals
System.arraycopy(out, 0, preds, 0, out.length); //copy reconstruction into preds
} else if (reconstruction_error_per_feature){
// Compute SE of reconstruction in expanded space for each feature
for (int i = 0; i < in.length; ++i)
preds[i] = Math.pow((out[i] - in[i]), 2);
} else {
// Compute MSE of reconstruction in expanded space
assert(preds.length == 1);
double l2 = 0;
for (int i = 0; i < in.length; ++i)
l2 += Math.pow((out[i] - in[i]), 2);
l2 /= in.length;
preds[0] = l2;
}
}
// helper to push this model to another key (for keeping good models)
private void putMeAsBestModel(Key bestModelKey) {
DeepLearningModel bestModel = IcedUtils.deepCopy(this);
DKV.put(bestModelKey, bestModel);
if (model_info().get_params()._elastic_averaging) {
DeepLearningModelInfo eamodel = DKV.getGet(model_info.elasticAverageModelInfoKey());
if (eamodel != null)
DKV.put(bestModel.model_info().elasticAverageModelInfoKey(), eamodel);
}
assert (DKV.get(bestModelKey) != null);
assert (bestModel.compareTo(this) <= 0);
}
@Override protected Futures remove_impl(Futures fs, boolean cascade) {
if (_output.weights != null && _output.biases != null) {
for (Key k : _output.weights) Keyed.remove(k, fs, true);
for (Key k : _output.biases) Keyed.remove(k, fs, true);
}
if (actual_best_model_key!=null) DKV.remove(actual_best_model_key);
DKV.remove(model_info().data_info()._key, fs);
deleteElasticAverageModels();
return super.remove_impl(fs, cascade);
}
void deleteElasticAverageModels() {
if (model_info().get_params()._elastic_averaging) {
DKV.remove(model_info().elasticAverageModelInfoKey());
for (H2ONode node : H2O.CLOUD._memary) {
DKV.remove(model_info().localModelInfoKey(node));
}
}
}
private String getHeader() {
assert get_params()._autoencoder;
StringBuilder sb = new StringBuilder();
final int len = model_info().data_info().fullN();
String prefix = "reconstr_";
assert (model_info().data_info()._responses == 0);
String[] coefnames = model_info().data_info().coefNames();
assert (len == coefnames.length);
for (int c = 0; c < len; c++) {
if (c>0) sb.append(",");
sb.append(prefix).append(coefnames[c]);
}
return sb.toString();
}
@Override protected SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileCtx) {
sb = super.toJavaInit(sb, fileCtx);
final String mname = JCodeGen.toJavaId(_key.toString());
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info());
final DeepLearningParameters p = model_info.get_params();
CategoricalEncoding encoding = getGenModelEncoding();
if (encoding == null) {
throw new IllegalArgumentException("Only default, OneHotInternal, Binary, Eigen, LabelEncoder and SortByResponse categorical_encoding scheme is supported for POJO/MOJO");
}
sb.ip("public boolean isSupervised() { return " + isSupervised() + "; }").nl();
sb.ip("public int nfeatures() { return "+_output.nfeatures()+"; }").nl();
sb.ip("public int nclasses() { return "+ (p._autoencoder ? neurons[neurons.length-1].units : _output.nclasses()) + "; }").nl();
if (encoding != CategoricalEncoding.AUTO) {
sb.ip("public hex.genmodel.CategoricalEncoding getCategoricalEncoding() { return hex.genmodel.CategoricalEncoding." +
encoding.name() + "; }").nl();
}
if (encoding == CategoricalEncoding.Eigen) {
sb.ip("public double[] getOrigProjectionArray() { return " + PojoUtils.toJavaDoubleArray(_output._orig_projection_array) + "; }").nl();
}
if (model_info().data_info()._nums > 0) {
sb.i(0).p("// Thread-local storage for input neuron activation values.").nl();
sb.i(0).p("final double[] NUMS = new double[" + model_info().data_info()._nums +"];").nl();
JCodeGen.toClassWithArray(sb, "static", "NORMMUL", model_info().data_info()._normMul);//, "Standardization/Normalization scaling factor for numerical variables.");
JCodeGen.toClassWithArray(sb, "static", "NORMSUB", model_info().data_info()._normSub);//, "Standardization/Normalization offset for numerical variables.");
}
if (model_info().data_info()._cats > 0) {
sb.i(0).p("// Thread-local workspace for storing categorical input variables.").nl();
sb.i(0).p("final int[] CATS = new int[" + model_info().data_info()._cats +"];").nl();
}
JCodeGen.toStaticVar(sb, "CATOFFSETS", model_info().data_info()._catOffsets, "Offset into the workspace for categorical variables.");
if (model_info().data_info()._normRespMul != null) {
JCodeGen.toStaticVar(sb, "NORMRESPMUL", model_info().data_info()._normRespMul, "Standardization/Normalization scaling factor for response.");
JCodeGen.toStaticVar(sb, "NORMRESPSUB", model_info().data_info()._normRespSub, "Standardization/Normalization offset for response.");
}
if (p._hidden_dropout_ratios != null) {
JCodeGen.toStaticVar(sb, "HIDDEN_DROPOUT_RATIOS", p._hidden_dropout_ratios, "Hidden layer dropout ratios.");
}
final int[] layers = new int[neurons.length];
for (int i=0;i<neurons.length;++i)
layers[i] = neurons[i].units;
JCodeGen.toStaticVar(sb, "NEURONS", layers, "Number of neurons for each layer.");
if (get_params()._autoencoder) {
sb.i(1).p("public int getPredsSize() { return " + model_info.units[model_info.units.length-1] + "; }").nl();
sb.i(1).p("public boolean isAutoEncoder() { return true; }").nl();
sb.i(1).p("public String getHeader() { return \"" + getHeader() + "\"; }").nl();
}
// Generate activation storage
sb.i(1).p("// Thread-local storage for neuron activation values.").nl();
sb.i(1).p("final double[][] ACTIVATION = new double[][] {").nl();
for (int i=0; i<neurons.length; i++) {
sb.i(2).p("/* ").p(neurons[i].getClass().getSimpleName()).p(" */ ").p("new double[").p(layers[i]).p("]");
if (i!=neurons.length-1) sb.p(',');
sb.nl();
}
sb.i(1).p("};").nl();
// biases
sb.i(1).p("// Neuron bias values.").nl();
sb.i(1).p("public static final double[][] BIAS = new double[][] {").nl();
for (int i=0; i<neurons.length; i++) {
String colInfoClazz = mname + "_Bias_"+i;
sb.i(2).p("/* ").p(neurons[i].getClass().getSimpleName()).p(" */ ");
sb.p(colInfoClazz).p(".VALUES");
if (i!=neurons.length-1) sb.p(',');
sb.nl();
}
sb.i(1).p("};").nl();
// Generate additonal classes
fileCtx.add(new CodeGenerator() {
@Override
public void generate(JCodeSB out) {
for (int i=0; i<neurons.length; i++) {
String colInfoClazz = mname + "_Bias_"+i;
out.i().p("// Neuron bias values for ").p(neurons[i].getClass().getSimpleName()).p(" layer").nl();
double[] bias = i == 0 ? null : new double[model_info().get_biases(i-1).size()];
if (i>0) {
for (int j=0; j<bias.length; ++j) bias[j] = model_info().get_biases(i-1).get(j);
}
JCodeGen.toClassWithArray(out, null, colInfoClazz, bias);
}
}
});
// Weights
sb.i(1).p("// Connecting weights between neurons.").nl();
sb.i(1).p("public static final float[][] WEIGHT = new float[][] {").nl();
for (int i=0; i<neurons.length; i++) {
String colInfoClazz = mname + "_Weight_"+i;
sb.i(2).p("/* ").p(neurons[i].getClass().getSimpleName()).p(" */ ");
sb.p(colInfoClazz).p(".VALUES");
if (i!=neurons.length-1) sb.p(',');
sb.nl();
}
sb.i(1).p("};").nl();
// Generate weight classes
fileCtx.add(new CodeGenerator() {
@Override
public void generate(JCodeSB out) {
for (int i = 0; i < neurons.length; i++) {
String colInfoClazz = mname + "_Weight_" + i;
if (i > 0) {
out.i().p("// Neuron weights connecting ").
p(neurons[i - 1].getClass().getSimpleName()).p(" and ").
p(neurons[i].getClass().getSimpleName()).
p(" layer").nl();
}
float[]
weights =
i == 0 ? null : new float[model_info().get_weights(i - 1).rows() * model_info()
.get_weights(i - 1).cols()];
if (i > 0) {
final int rows = model_info().get_weights(i - 1).rows();
final int cols = model_info().get_weights(i - 1).cols();
for (int j = 0; j < rows; ++j)
for (int k = 0; k < cols; ++k)
weights[j * cols + k] = model_info().get_weights(i - 1).get(j, k);
}
JCodeGen.toClassWithArray(out, null, colInfoClazz, weights);
}
}
});
return sb;
}
@Override protected boolean toJavaCheckTooBig() { return (model_info.size() > 1e6); }
private SBPrintStream pureMatVec(final SBPrintStream bodySb) {
bodySb.i(1).p("int cols = ACTIVATION[i-1].length;").nl();
bodySb.i(1).p("int rows = ACTIVATION[i].length;").nl();
bodySb.i(1).p("int extra=cols-cols%8;").nl();
bodySb.i(1).p("int multiple = (cols/8)*8-1;").nl();
bodySb.i(1).p("int idx = 0;").nl();
bodySb.i(1).p("float[] a = WEIGHT[i];").nl();
bodySb.i(1).p("double[] x = ACTIVATION[i-1];").nl();
bodySb.i(1).p("double[] y = BIAS[i];").nl();
bodySb.i(1).p("double[] res = ACTIVATION[i];").nl();
bodySb.i(1).p("for (int row=0; row<rows; ++row) {").nl();
bodySb.i(2).p("double psum0 = 0, psum1 = 0, psum2 = 0, psum3 = 0, psum4 = 0, psum5 = 0, psum6 = 0, psum7 = 0;").nl();
bodySb.i(2).p("for (int col = 0; col < multiple; col += 8) {").nl();
bodySb.i(3).p("int off = idx + col;").nl();
bodySb.i(3).p("psum0 += a[off ] * x[col ];").nl();
bodySb.i(3).p("psum1 += a[off + 1] * x[col + 1];").nl();
bodySb.i(3).p("psum2 += a[off + 2] * x[col + 2];").nl();
bodySb.i(3).p("psum3 += a[off + 3] * x[col + 3];").nl();
bodySb.i(3).p("psum4 += a[off + 4] * x[col + 4];").nl();
bodySb.i(3).p("psum5 += a[off + 5] * x[col + 5];").nl();
bodySb.i(3).p("psum6 += a[off + 6] * x[col + 6];").nl();
bodySb.i(3).p("psum7 += a[off + 7] * x[col + 7];").nl();
bodySb.i(2).p("}").nl();
bodySb.i(2).p("res[row] += psum0 + psum1 + psum2 + psum3;").nl();
bodySb.i(2).p("res[row] += psum4 + psum5 + psum6 + psum7;").nl();
bodySb.i(2).p("for (int col = extra; col < cols; col++)").nl();
bodySb.i(3).p("res[row] += a[idx + col] * x[col];").nl();
bodySb.i(2).p("res[row] += y[row];").nl();
bodySb.i(2).p("idx += cols;").nl();
bodySb.i(1).p("}").nl();
return bodySb;
}
@Override protected void toJavaPredictBody(SBPrintStream bodySb,
CodeGeneratorPipeline classCtx,
CodeGeneratorPipeline fileCtx,
final boolean verboseCode) {
final DeepLearningParameters p = model_info.get_params();
bodySb.i().p("java.util.Arrays.fill(preds,0);").nl();
final int cats = model_info().data_info()._cats;
final int nums = model_info().data_info()._nums;
// initialize input layer
if (nums > 0) bodySb.i().p("java.util.Arrays.fill(NUMS,0);").nl();
if (cats > 0) bodySb.i().p("java.util.Arrays.fill(CATS,0);").nl();
bodySb.i().p("int i = 0, ncats = 0;").nl();
if (cats > 0) {
bodySb.i().p("for(; i<"+cats+"; ++i) {").nl();
bodySb.i(1).p("if (!Double.isNaN(data[i])) {").nl();
bodySb.i(2).p("int c = (int) data[i];").nl();
if (model_info().data_info()._useAllFactorLevels)
bodySb.i(2).p("CATS[ncats] = c + CATOFFSETS[i];").nl();
else {
bodySb.i(2).p("if (c != 0) {").nl();
bodySb.i(3).p("CATS[ncats] = c + CATOFFSETS[i] - 1;").nl();
bodySb.i(2).p("} else {").nl();
bodySb.i(3).p("CATS[ncats] = -1;").nl();
bodySb.i(2).p("}").nl();
}
bodySb.i(1).p("} else {").nl(); // set CAT level when encountering NAN
bodySb.i(2).p("CATS[ncats] = CATOFFSETS[i+1]-1;").nl();
bodySb.i(1).p("}").nl();
bodySb.i(1).p("ncats++;").nl();
bodySb.i().p("}").nl();
}
if (nums > 0) {
bodySb.i().p("final int n = data.length;").nl();
bodySb.i().p("for(; i<n; ++i) {").nl();
bodySb.i(1).p("NUMS[i" + (cats > 0 ? "-" + cats : "") + "] = Double.isNaN(data[i]) ? 0 : ");
if (model_info().data_info()._normMul != null) {
bodySb.p("(data[i] - NORMSUB.VALUES[i" + (cats > 0 ? "-" + cats : "") + "])*NORMMUL.VALUES[i" + (cats > 0 ? "-" + cats : "") + "];").nl();
} else {
bodySb.p("data[i];").nl();
}
bodySb.i(0).p("}").nl();
}
bodySb.i().p("java.util.Arrays.fill(ACTIVATION[0],0);").nl();
if (cats > 0) {
bodySb.i().p("for (i=0; i<ncats; ++i) {").nl();
bodySb.i(1).p("if(CATS[i] >= 0) ACTIVATION[0][CATS[i]] = 1;").nl();
bodySb.i(0).p("}").nl();
}
if (nums > 0) {
bodySb.i().p("for (i=0; i<NUMS.length; ++i) {").nl();
bodySb.i(1).p("ACTIVATION[0][CATOFFSETS[CATOFFSETS.length-1] + i] = Double.isNaN(NUMS[i]) ? 0 : NUMS[i];").nl();
bodySb.i().p("}").nl();
}
boolean tanh=(p._activation == DeepLearningParameters.Activation.Tanh || p._activation == DeepLearningParameters.Activation.TanhWithDropout);
boolean relu=(p._activation == DeepLearningParameters.Activation.Rectifier || p._activation == DeepLearningParameters.Activation.RectifierWithDropout);
boolean maxout=(p._activation == DeepLearningParameters.Activation.Maxout || p._activation == DeepLearningParameters.Activation.MaxoutWithDropout);
final String stopping = p._autoencoder ? "(i<=ACTIVATION.length-1)" : "(i<ACTIVATION.length-1)";
// make prediction: forward propagation
bodySb.i().p("for (i=1; i<ACTIVATION.length; ++i) {").nl();
bodySb.i(1).p("java.util.Arrays.fill(ACTIVATION[i],0);").nl();
if (maxout) {
bodySb.i(1).p("int _k = 2; // channels").nl();
bodySb.i(1).p("if " + stopping + " {").nl();
bodySb.i(2).p("double[] channel = new double[_k];").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; ++r) {").nl();
bodySb.i(3).p("final int cols = ACTIVATION[i-1].length;").nl();
bodySb.i(3).p("short maxK = 0;").nl();
bodySb.i(3).p("for (short k = 0; k < _k; ++k) {").nl();
bodySb.i(4).p("channel[k] = 0;").nl();
bodySb.i(4).p("for (int c=0; c<cols; ++c) {").nl();
bodySb.i(5).p("channel[k] += WEIGHT[i][_k*(r * cols + c) + k] * ACTIVATION[i-1][c];").nl();
bodySb.i(4).p("}").nl();
bodySb.i(4).p("channel[k] += BIAS[i][_k*r+k];").nl();
bodySb.i(4).p("if (channel[k] > channel[maxK]) maxK=k;").nl();
bodySb.i(3).p("}").nl();
bodySb.i(3).p("ACTIVATION[i][r] = channel[maxK];").nl();
} else {
// optimized
pureMatVec(bodySb);
// Activation function
bodySb.i(1).p("if " + stopping + " {").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; ++r) {").nl();
if (tanh) {
bodySb.i(3).p("ACTIVATION[i][r] = 1 - 2 / (1 + Math.exp(2*ACTIVATION[i][r]));").nl();
} else if (relu) {
bodySb.i(3).p("ACTIVATION[i][r] = Math.max(0, ACTIVATION[i][r]);").nl();
}
}
if (p._hidden_dropout_ratios != null) {
bodySb.i(3).p("if (i<ACTIVATION.length-1) {").nl();
bodySb.i(4).p("ACTIVATION[i][r] *= 1 - HIDDEN_DROPOUT_RATIOS[i-1];").nl();
bodySb.i(3).p("}").nl();
}
bodySb.i(2).p("}").nl();
bodySb.i(1).p("}").nl();
if (maxout) {
bodySb.i(1).p("if (i == ACTIVATION.length-1) {").nl();
pureMatVec(bodySb);
bodySb.i(1).p("}").nl();
}
if (_output.isClassifier() && _parms._distribution != DistributionFamily.modified_huber) {
bodySb.i(1).p("if (i == ACTIVATION.length-1) {").nl();
// softmax
bodySb.i(2).p("double max = ACTIVATION[i][0];").nl();
bodySb.i(2).p("for (int r=1; r<ACTIVATION[i].length; r++) {").nl();
bodySb.i(3).p("if (ACTIVATION[i][r]>max) max = ACTIVATION[i][r];").nl();
bodySb.i(2).p("}").nl();
bodySb.i(2).p("double scale = 0;").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; r++) {").nl();
bodySb.i(3).p("ACTIVATION[i][r] = Math.exp(ACTIVATION[i][r] - max);").nl();
bodySb.i(3).p("scale += ACTIVATION[i][r];").nl();
bodySb.i(2).p("}").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; r++) {").nl();
bodySb.i(3).p("if (Double.isNaN(ACTIVATION[i][r]))").nl();
bodySb.i(4).p("throw new RuntimeException(\"Numerical instability, predicted NaN.\");").nl();
bodySb.i(3).p("ACTIVATION[i][r] /= scale;").nl();
bodySb.i(3).p("preds[r+1] = ACTIVATION[i][r];").nl();
bodySb.i(2).p("}").nl();
bodySb.i(1).p("}").nl();
bodySb.i().p("}").nl();
} else if (!p._autoencoder) { //Regression and modified_huber
bodySb.i(1).p("if (i == ACTIVATION.length-1) {").nl();
// regression: set preds[1], FillPreds0 will put it into preds[0]
if (model_info().data_info()._normRespMul != null) {
bodySb.i(2).p("preds[1] = (ACTIVATION[i][0] / NORMRESPMUL[0] + NORMRESPSUB[0]);").nl();
}
else {
bodySb.i(2).p("preds[1] = ACTIVATION[i][0];").nl();
}
bodySb.i(2).p("preds[1] = " + _dist.linkInvString("preds[1]") + ";").nl();
if (_parms._distribution == DistributionFamily.modified_huber){
bodySb.i(2).p("preds[2] = preds[1];").nl();
bodySb.i(2).p("preds[1] = 1-preds[2];").nl();
}
bodySb.i(2).p("if (Double.isNaN(preds[1])) throw new RuntimeException(\"Predicted regression target NaN!\");").nl();
bodySb.i(1).p("}").nl();
bodySb.i().p("}").nl();
} else { //AutoEncoder
bodySb.i(1).p("if (i == ACTIVATION.length-1) {").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; r++) {").nl();
bodySb.i(3).p("if (Double.isNaN(ACTIVATION[i][r]))").nl();
bodySb.i(4).p("throw new RuntimeException(\"Numerical instability, reconstructed NaN.\");").nl();
bodySb.i(3).p("preds[r] = ACTIVATION[i][r];").nl();
bodySb.i(2).p("}").nl();
if (model_info().data_info()._nums > 0) {
int ns = model_info().data_info().numStart();
bodySb.i(2).p("for (int k=" + ns + "; k<" + model_info().data_info().fullN() + "; ++k) {").nl();
bodySb.i(3).p("preds[k] = preds[k] / NORMMUL.VALUES[k-" + ns + "] + NORMSUB.VALUES[k-" + ns + "];").nl();
bodySb.i(2).p("}").nl();
}
bodySb.i(1).p("}").nl();
bodySb.i().p("}").nl();
// DEBUGGING
// bodySb.i().p("System.out.println(java.util.Arrays.toString(data));").nl();
// bodySb.i().p("System.out.println(java.util.Arrays.toString(ACTIVATION[0]));").nl();
// bodySb.i().p("System.out.println(java.util.Arrays.toString(ACTIVATION[ACTIVATION.length-1]));").nl();
// bodySb.i().p("System.out.println(java.util.Arrays.toString(preds));").nl();
// bodySb.i().p("System.out.println(\"\");").nl();
}
if (_output.autoencoder) return;
if (_output.isClassifier()) {
if (get_params()._balance_classes)
bodySb.ip("hex.genmodel.GenModel.correctProbabilities(preds, PRIOR_CLASS_DISTRIB, MODEL_CLASS_DISTRIB);").nl();
bodySb.ip("preds[0] = hex.genmodel.GenModel.getPrediction(preds, PRIOR_CLASS_DISTRIB, data, " + defaultThreshold()+");").nl();
} else {
bodySb.ip("preds[0] = preds[1];").nl();
}
}
private final String unstable_msg = technote(4,
"\n\nTrying to predict with an unstable model." +
"\nJob was aborted due to observed numerical instability (exponential growth)."
+ "\nEither the weights or the bias values are unreasonably large or lead to large activation values."
+ "\nTry a different initial distribution, a bounded activation function (Tanh), adding regularization"
+ "\n(via max_w2, l1, l2, dropout) or learning rate (either enable adaptive_rate or use a smaller learning rate or faster annealing).");
@Override protected long checksum_impl() {
return super.checksum_impl() * model_info.checksum_impl();
}
/**
* Deep Learning Parameters
*/
public static class DeepLearningParameters extends Model.Parameters {
public String algoName() { return "DeepLearning"; }
public String fullName() { return "Deep Learning"; }
public String javaName() { return DeepLearningModel.class.getName(); }
@Override protected double defaultStoppingTolerance() { return 0; }
public DeepLearningParameters() {
super();
_stopping_rounds = 5;
}
@Override
public long progressUnits() {
if (train()==null) return 1;
return (long)Math.ceil(_epochs*train().numRows());
}
@Override
public double missingColumnsType() {
return _sparse ? 0 : Double.NaN;
}
/**
* If enabled, store the best model under the destination key of this model at the end of training.
* Only applicable if training is not cancelled.
*/
public boolean _overwrite_with_best_model = true;
public boolean _autoencoder = false;
public boolean _use_all_factor_levels = true;
/**
* If enabled, automatically standardize the data. If disabled, the user must provide properly scaled input data.
*/
public boolean _standardize = true;
/*Neural Net Topology*/
/**
* The activation function (non-linearity) to be used the neurons in the hidden layers.
* Tanh: Hyperbolic tangent function (same as scaled and shifted sigmoid).
* Rectifier: Chooses the maximum of (0, x) where x is the input value.
* Maxout: Choose the maximum coordinate of the input vector.
* With Dropout: Zero out a random user-given fraction of the
* incoming weights to each hidden layer during training, for each
* training row. This effectively trains exponentially many models at
* once, and can improve generalization.
*/
public Activation _activation = Activation.Rectifier;
/**
* The number and size of each hidden layer in the model.
* For example, if a user specifies "100,200,100" a model with 3 hidden
* layers will be produced, and the middle hidden layer will have 200
* neurons.
*/
public int[] _hidden = new int[]{200, 200};
/**
* The number of passes over the training dataset to be carried out.
* It is recommended to start with lower values for initial experiments.
* This value can be modified during checkpoint restarts and allows continuation
* of selected models.
*/
public double _epochs = 10;
/**
* The number of training data rows to be processed per iteration. Note that
* independent of this parameter, each row is used immediately to update the model
* with (online) stochastic gradient descent. This parameter controls the
* synchronization period between nodes in a distributed environment and the
* frequency at which scoring and model cancellation can happen. For example, if
* it is set to 10,000 on H2O running on 4 nodes, then each node will
* process 2,500 rows per iteration, sampling randomly from their local data.
* Then, model averaging between the nodes takes place, and scoring can happen
* (dependent on scoring interval and duty factor). Special values are 0 for
* one epoch per iteration, -1 for processing the maximum amount of data
* per iteration (if **replicate training data** is enabled, N epochs
* will be trained per iteration on N nodes, otherwise one epoch). Special value
* of -2 turns on automatic mode (auto-tuning).
*/
public long _train_samples_per_iteration = -2;
public double _target_ratio_comm_to_comp = 0.05;
/*Adaptive Learning Rate*/
/**
* The implemented adaptive learning rate algorithm (ADADELTA) automatically
* combines the benefits of learning rate annealing and momentum
* training to avoid slow convergence. Specification of only two
* parameters (rho and epsilon) simplifies hyper parameter search.
* In some cases, manually controlled (non-adaptive) learning rate and
* momentum specifications can lead to better results, but require the
* specification (and hyper parameter search) of up to 7 parameters.
* If the model is built on a topology with many local minima or
* long plateaus, it is possible for a constant learning rate to produce
* sub-optimal results. Learning rate annealing allows digging deeper into
* local minima, while rate decay allows specification of different
* learning rates per layer. When the gradient is being estimated in
* a long valley in the optimization landscape, a large learning rate
* can cause the gradient to oscillate and move in the wrong
* direction. When the gradient is computed on a relatively flat
* surface with small learning rates, the model can converge far
* slower than necessary.
*/
public boolean _adaptive_rate = true;
/**
* The first of two hyper parameters for adaptive learning rate (ADADELTA).
* It is similar to momentum and relates to the memory to prior weight updates.
* Typical values are between 0.9 and 0.999.
* This parameter is only active if adaptive learning rate is enabled.
*/
public double _rho = 0.99;
/**
* The second of two hyper parameters for adaptive learning rate (ADADELTA).
* It is similar to learning rate annealing during initial training
* and momentum at later stages where it allows forward progress.
* Typical values are between 1e-10 and 1e-4.
* This parameter is only active if adaptive learning rate is enabled.
*/
public double _epsilon = 1e-8;
/*Learning Rate*/
/**
* When adaptive learning rate is disabled, the magnitude of the weight
* updates are determined by the user specified learning rate
* (potentially annealed), and are a function of the difference
* between the predicted value and the target value. That difference,
* generally called delta, is only available at the output layer. To
* correct the output at each hidden layer, back propagation is
* used. Momentum modifies back propagation by allowing prior
* iterations to influence the current update. Using the momentum
* parameter can aid in avoiding local minima and the associated
* instability. Too much momentum can lead to instabilities, that's
* why the momentum is best ramped up slowly.
* This parameter is only active if adaptive learning rate is disabled.
*/
public double _rate = .005;
/**
* Learning rate annealing reduces the learning rate to "freeze" into
* local minima in the optimization landscape. The annealing rate is the
* inverse of the number of training samples it takes to cut the learning rate in half
* (e.g., 1e-6 means that it takes 1e6 training samples to halve the learning rate).
* This parameter is only active if adaptive learning rate is disabled.
*/
public double _rate_annealing = 1e-6;
/**
* The learning rate decay parameter controls the change of learning rate across layers.
* For example, assume the rate parameter is set to 0.01, and the rate_decay parameter is set to 0.5.
* Then the learning rate for the weights connecting the input and first hidden layer will be 0.01,
* the learning rate for the weights connecting the first and the second hidden layer will be 0.005,
* and the learning rate for the weights connecting the second and third hidden layer will be 0.0025, etc.
* This parameter is only active if adaptive learning rate is disabled.
*/
public double _rate_decay = 1.0;
/*Momentum*/
/**
* The momentum_start parameter controls the amount of momentum at the beginning of training.
* This parameter is only active if adaptive learning rate is disabled.
*/
public double _momentum_start = 0;
/**
* The momentum_ramp parameter controls the amount of learning for which momentum increases
* (assuming momentum_stable is larger than momentum_start). The ramp is measured in the number
* of training samples.
* This parameter is only active if adaptive learning rate is disabled.
*/
public double _momentum_ramp = 1e6;
/**
* The momentum_stable parameter controls the final momentum value reached after momentum_ramp training samples.
* The momentum used for training will remain the same for training beyond reaching that point.
* This parameter is only active if adaptive learning rate is disabled.
*/
public double _momentum_stable = 0;
/**
* The Nesterov accelerated gradient descent method is a modification to
* traditional gradient descent for convex functions. The method relies on
* gradient information at various points to build a polynomial approximation that
* minimizes the residuals in fewer iterations of the descent.
*/
public boolean _nesterov_accelerated_gradient = true;
/*Regularization*/
/**
* A fraction of the features for each training row to be omitted from training in order
* to improve generalization (dimension sampling).
*/
public double _input_dropout_ratio = 0.0;
/**
* A fraction of the inputs for each hidden layer to be omitted from training in order
* to improve generalization. Defaults to 0.5 for each hidden layer if omitted.
*/
public double[] _hidden_dropout_ratios;
/**
* A regularization method that constrains the absolute value of the weights and
* has the net effect of dropping some weights (setting them to zero) from a model
* to reduce complexity and avoid overfitting.
*/
public double _l1 = 0.0;
/**
* A regularization method that constrains the sum of the squared
* weights. This method introduces bias into parameter estimates, but
* frequently produces substantial gains in modeling as estimate variance is
* reduced.
*/
public double _l2 = 0.0;
/**
* A maximum on the sum of the squared incoming weights into
* any one neuron. This tuning parameter is especially useful for unbound
* activation functions such as Rectifier.
*/
public float _max_w2 = Float.MAX_VALUE;
/*Initialization*/
/**
* The distribution from which initial weights are to be drawn. The default
* option is an optimized initialization that considers the size of the network.
* The "uniform" option uses a uniform distribution with a mean of 0 and a given
* interval. The "normal" option draws weights from the standard normal
* distribution with a mean of 0 and given standard deviation.
*/
public InitialWeightDistribution _initial_weight_distribution = InitialWeightDistribution.UniformAdaptive;
/**
* The scale of the distribution function for Uniform or Normal distributions.
* For Uniform, the values are drawn uniformly from -initial_weight_scale...initial_weight_scale.
* For Normal, the values are drawn from a Normal distribution with a standard deviation of initial_weight_scale.
*/
public double _initial_weight_scale = 1.0;
/**
* Frame keys for initial weight matrices
*/
public Key[] _initial_weights;
/**
* Frame keys for initial bias vectors
*/
public Key[] _initial_biases;
/**
* The loss (error) function to be minimized by the model.
* Cross Entropy loss is used when the model output consists of independent
* hypotheses, and the outputs can be interpreted as the probability that each
* hypothesis is true. Cross entropy is the recommended loss function when the
* target values are class labels, and especially for imbalanced data.
* It strongly penalizes error in the prediction of the actual class label.
* Mean Square loss is used when the model output are continuous real values, but can
* be used for classification as well (where it emphasizes the error on all
* output classes, not just for the actual class).
*/
public Loss _loss = Automatic;
/*Scoring*/
/**
* The minimum time (in seconds) to elapse between model scoring. The actual
* interval is determined by the number of training samples per iteration and the scoring duty cycle.
*/
public double _score_interval = 5;
/**
* The number of training dataset points to be used for scoring. Will be
* randomly sampled. Use 0 for selecting the entire training dataset.
*/
public long _score_training_samples = 10000l;
/**
* The number of validation dataset points to be used for scoring. Can be
* randomly sampled or stratified (if "balance classes" is set and "score
* validation sampling" is set to stratify). Use 0 for selecting the entire
* training dataset.
*/
public long _score_validation_samples = 0l;
/**
* Maximum fraction of wall clock time spent on model scoring on training and validation samples,
* and on diagnostics such as computation of feature importances (i.e., not on training).
*/
public double _score_duty_cycle = 0.1;
/**
* The stopping criteria in terms of classification error (1-accuracy) on the
* training data scoring dataset. When the error is at or below this threshold,
* training stops.
*/
public double _classification_stop = 0;
/**
* The stopping criteria in terms of regression error (MSE) on the training
* data scoring dataset. When the error is at or below this threshold, training
* stops.
*/
public double _regression_stop = 1e-6;
/**
* Enable quiet mode for less output to standard output.
*/
public boolean _quiet_mode = false;
/**
* Method used to sample the validation dataset for scoring, see Score Validation Samples above.
*/
public ClassSamplingMethod _score_validation_sampling = ClassSamplingMethod.Uniform;
/*Misc*/
/**
* Gather diagnostics for hidden layers, such as mean and RMS values of learning
* rate, momentum, weights and biases.
*/
public boolean _diagnostics = true;
/**
* Whether to compute variable importances for input features.
* The implemented method (by Gedeon) considers the weights connecting the
* input features to the first two hidden layers.
*/
public boolean _variable_importances = true;
/**
* Enable fast mode (minor approximation in back-propagation), should not affect results significantly.
*/
public boolean _fast_mode = true;
/**
* Increase training speed on small datasets by splitting it into many chunks
* to allow utilization of all cores.
*/
public boolean _force_load_balance = true;
/**
* Replicate the entire training dataset onto every node for faster training on small datasets.
*/
public boolean _replicate_training_data = true;
/**
* Run on a single node for fine-tuning of model parameters. Can be useful for
* checkpoint resumes after training on multiple nodes for fast initial
* convergence.
*/
public boolean _single_node_mode = false;
/**
* Enable shuffling of training data (on each node). This option is
* recommended if training data is replicated on N nodes, and the number of training samples per iteration
* is close to N times the dataset size, where all nodes train with (almost) all
* the data. It is automatically enabled if the number of training samples per iteration is set to -1 (or to N
* times the dataset size or larger).
*/
public boolean _shuffle_training_data = false;
public MissingValuesHandling _missing_values_handling = MissingValuesHandling.MeanImputation;
public boolean _sparse = false;
public boolean _col_major = false;
public double _average_activation = 0;
public double _sparsity_beta = 0;
/**
* Max. number of categorical features, enforced via hashing (Experimental)
*/
public int _max_categorical_features = Integer.MAX_VALUE;
/**
* Force reproducibility on small data (will be slow - only uses 1 thread)
*/
public boolean _reproducible = false;
public boolean _export_weights_and_biases = false;
public boolean _elastic_averaging = false;
public double _elastic_averaging_moving_rate = 0.9;
public double _elastic_averaging_regularization = 1e-3;
// stochastic gradient descent: mini-batch size = 1
// batch gradient descent: mini-batch size = # training rows
public int _mini_batch_size = 1;
public enum MissingValuesHandling {
MeanImputation, Skip
}
public enum ClassSamplingMethod {
Uniform, Stratified
}
public enum InitialWeightDistribution {
UniformAdaptive, Uniform, Normal
}
/**
* Activation functions
*/
public enum Activation {
Tanh, TanhWithDropout, Rectifier, RectifierWithDropout, Maxout, MaxoutWithDropout, ExpRectifier, ExpRectifierWithDropout
}
/**
* Loss functions
* Absolute, Quadratic, Huber, Quantile for regression
* Quadratic, ModifiedHuber or CrossEntropy for classification
*/
public enum Loss {
Automatic, Quadratic, CrossEntropy, ModifiedHuber, Huber, Absolute, Quantile
}
/**
* Validate model parameters
* @param dl DL Model Builder (Driver)
* @param expensive (whether or not this is the "final" check)
*/
void validate(DeepLearning dl, boolean expensive) {
boolean classification = expensive || dl.nclasses() != 0 ? dl.isClassifier() : _loss == CrossEntropy || _loss == ModifiedHuber;
if (_loss == ModifiedHuber) dl.error("_loss", "ModifiedHuber loss function is not supported yet.");
// if (_hidden == null || _hidden.length == 0) dl.error("_hidden", "There must be at least one hidden layer.");
if (_hidden == null) _hidden = new int[0];
for (int h : _hidden) if (h <= 0) dl.error("_hidden", "Hidden layer size must be positive.");
if (_mini_batch_size < 1)
dl.error("_mini_batch_size", "Mini-batch size must be >= 1");
if (!_diagnostics)
dl.warn("_diagnostics", "Deprecated option: Diagnostics are always enabled.");
if (!_autoencoder) {
if (_valid == null)
dl.hide("_score_validation_samples", "score_validation_samples requires a validation frame.");
if (classification) {
dl.hide("_regression_stop", "regression_stop is used only with regression.");
} else {
dl.hide("_classification_stop", "classification_stop is used only with classification.");
}
if (!classification && _valid != null || _valid == null)
dl.hide("_score_validation_sampling", "score_validation_sampling requires classification and a validation frame.");
} else {
if (_nfolds > 1) {
dl.error("_nfolds", "N-fold cross-validation is not supported for Autoencoder.");
}
if(_custom_metric_func != null) {
dl.error("_custom_metric_func", "Custom metric is not supported for Autoencoder.");
}
}
if (_categorical_encoding==CategoricalEncodingScheme.Enum) {
dl.error("_categorical_encoding", "Cannot use Enum encoding for categoricals - need numbers!");
}
if (_categorical_encoding==CategoricalEncodingScheme.OneHotExplicit) {
dl.error("_categorical_encoding", "Won't use explicit Enum encoding for categoricals - it's much faster with OneHotInternal!");
}
if (_activation != Activation.TanhWithDropout && _activation != Activation.MaxoutWithDropout && _activation != Activation.RectifierWithDropout && _activation != Activation.ExpRectifierWithDropout) {
dl.hide("_hidden_dropout_ratios", "hidden_dropout_ratios requires a dropout activation function.");
}
if (_hidden_dropout_ratios != null) {
if (_hidden_dropout_ratios.length != _hidden.length) {
dl.error("_hidden_dropout_ratios", "Must have " + _hidden.length + " hidden layer dropout ratios.");
} else if (_activation != Activation.TanhWithDropout && _activation != Activation.MaxoutWithDropout && _activation != Activation.RectifierWithDropout && _activation != Activation.ExpRectifierWithDropout) {
dl.error("_hidden_dropout_ratios", "Cannot specify hidden_dropout_ratios with a non-dropout activation function. Use 'RectifierWithDropout', 'TanhWithDropout', etc.");
} else if (ArrayUtils.maxValue(_hidden_dropout_ratios) >= 1 || ArrayUtils.minValue(_hidden_dropout_ratios) < 0) {
dl.error("_hidden_dropout_ratios", "Hidden dropout ratios must be >= 0 and <1.");
}
}
if (_input_dropout_ratio < 0 || _input_dropout_ratio >= 1)
dl.error("_input_dropout_ratio", "Input dropout must be >= 0 and <1.");
if (_score_duty_cycle < 0 || _score_duty_cycle > 1)
dl.error("_score_duty_cycle", "Score duty cycle must be >= 0 and <=1.");
if (_l1 < 0)
dl.error("_l1", "L1 penalty must be >= 0.");
if (_l2 < 0)
dl.error("_l2", "L2 penalty must be >= 0.");
if (H2O.CLOUD.size() == 1 && _replicate_training_data)
dl.hide("_replicate_training_data", "replicate_training_data is only valid with cloud size greater than 1.");
if (_single_node_mode && (H2O.CLOUD.size() == 1 || !_replicate_training_data))
dl.hide("_single_node_mode", "single_node_mode is only used with multi-node operation with replicated training data.");
if (H2O.ARGS.client && _single_node_mode)
dl.error("_single_node_mode", "Cannot run on a single node in client mode");
if (_autoencoder)
dl.hide("_use_all_factor_levels", "use_all_factor_levels is mandatory in combination with autoencoder.");
if (_nfolds != 0)
dl.hide("_overwrite_with_best_model", "overwrite_with_best_model is unsupported in combination with n-fold cross-validation.");
if (_adaptive_rate) {
dl.hide("_rate", "rate is not used with adaptive_rate.");
dl.hide("_rate_annealing", "rate_annealing is not used with adaptive_rate.");
dl.hide("_rate_decay", "rate_decay is not used with adaptive_rate.");
dl.hide("_momentum_start", "momentum_start is not used with adaptive_rate.");
dl.hide("_momentum_ramp", "momentum_ramp is not used with adaptive_rate.");
dl.hide("_momentum_stable", "momentum_stable is not used with adaptive_rate.");
if (_rate!=0.005) dl.warn("_rate", "rate cannot be specified if adaptive_rate is enabled.");
if (_rate_annealing!=1e-6) dl.warn("_rate_annealing", "rate_annealing cannot be specified if adaptive_rate is enabled.");
if (_rate_decay!=1) dl.warn("_rate_decay", "rate_decay cannot be specified if adaptive_rate is enabled.");
if (_momentum_start!=0) dl.warn("_momentum_start", "momentum_start cannot be specified if adaptive_rate is enabled.");
if (_momentum_ramp!=1e6) dl.warn("_momentum_ramb", "momentum_ramp cannot be specified if adaptive_rate is enabled.");
if (_momentum_stable!=0) dl.warn("_momentum_stable", "momentum_stable cannot be specified if adaptive_rate is enabled.");
} else {
// ! adaptive_rate
dl.hide("_rho", "rho is only used with adaptive_rate.");
dl.hide("_epsilon", "epsilon is only used with adaptive_rate.");
}
if (_initial_weight_distribution == InitialWeightDistribution.UniformAdaptive) {
dl.hide("_initial_weight_scale", "initial_weight_scale is not used if initial_weight_distribution == UniformAdaptive.");
}
if ((_initial_weights != null || _initial_biases != null) && _checkpoint != null) {
dl.error("_checkpoint", "Cannot specify initial weights or biases during checkpoint restart. Will use the checkpoint model's weights and biases.");
}
if (_initial_weights != null && _initial_weights.length!=_hidden.length+1) {
dl.error("_initial_weights", "The number of initial weights matrices must be " + (_hidden.length+1) + " (some weight matrices can be NULL/None/null).");
}
if (_initial_biases != null && _initial_biases.length!=_hidden.length+1) {
dl.error("_initial_biases", "The number of initial bias vectors must be " + (_hidden.length+1) + " (some bias vectors can be NULL/None/null).");
}
if (_loss == null) {
if (expensive || dl.nclasses() != 0) {
dl.error("_loss", "Loss function must be specified. Try CrossEntropy for categorical response (classification), ModifiedHuber for binomial response, Quadratic, Absolute or Huber for numerical response (regression).");
}
//otherwise, we might not know whether classification=true or false (from R, for example, the training data isn't known when init(false) is called).
} else {
if (_autoencoder && _loss == CrossEntropy)
dl.error("_loss", "Cannot use CrossEntropy loss for auto-encoder.");
if (!classification && _loss == CrossEntropy)
dl.error("_loss", technote(2, "For CrossEntropy loss, the response must be categorical."));
}
if (!classification && _loss == CrossEntropy)
dl.error("_loss", "For CrossEntropy loss, the response must be categorical.");
if (classification && (_loss != Automatic && _loss != CrossEntropy && _loss != Quadratic && _loss != ModifiedHuber))
dl.error("_loss", "For classification tasks, the loss must be one of: Automatic, Quadratic, CrossEntropy or ModifiedHuber.");
if (classification) {
switch(_distribution) {
case gaussian:
case huber:
case laplace:
case quantile:
case tweedie:
case gamma:
case poisson:
dl.error("_distribution", technote(2, _distribution + " distribution is not allowed for classification."));
break;
case AUTO:
case bernoulli:
case modified_huber:
case multinomial:
default:
//OK
break;
}
} else {
switch(_distribution) {
case multinomial:
case bernoulli:
case modified_huber:
dl.error("_distribution", technote(2, _distribution + " distribution is not allowed for regression."));
break;
case tweedie:
case gamma:
case poisson:
if (_loss != Automatic)
dl.error("_distribution", "Only Automatic loss (deviance) is allowed for " + _distribution + " distribution.");
break;
case laplace:
if (_loss != Loss.Absolute && _loss != Automatic)
dl.error("_distribution", "Only Automatic or Absolute loss is allowed for " + _distribution + " distribution.");
break;
case quantile:
if (_loss != Loss.Quantile && _loss != Automatic)
dl.error("_distribution", "Only Automatic or Quantile loss is allowed for " + _distribution + " distribution.");
break;
case huber:
if (_loss != Loss.Huber && _loss != Automatic)
dl.error("_distribution", "Only Automatic or Huber loss is allowed for " + _distribution + " distribution.");
break;
case AUTO:
case gaussian:
default:
//OK
break;
}
}
if (_distribution == DistributionFamily.quasibinomial)
dl.error("_distribution", "Quasibinomial is not supported for deeplearning in current H2O.");
if (expensive) dl.checkDistributions();
if (_score_training_samples < 0)
dl.error("_score_training_samples", "Number of training samples for scoring must be >= 0 (0 for all).");
if (_score_validation_samples < 0)
dl.error("_score_validation_samples", "Number of training samples for scoring must be >= 0 (0 for all).");
if (_autoencoder && _sparsity_beta > 0) {
if (_activation == Activation.Tanh || _activation == Activation.TanhWithDropout || _activation == Activation.ExpRectifier || _activation == Activation.ExpRectifierWithDropout) {
if (_average_activation >= 1 || _average_activation <= -1)
dl.error("_average_activation", "Tanh average activation must be in (-1,1).");
} else if (_activation == Activation.Rectifier || _activation == Activation.RectifierWithDropout) {
if (_average_activation <= 0)
dl.error("_average_activation", "Rectifier average activation must be positive.");
}
}
if (!_autoencoder && _sparsity_beta != 0)
dl.error("_sparsity_beta", "Sparsity beta can only be used for autoencoder.");
if (classification && dl.hasOffsetCol())
dl.error("_offset_column", "Offset is only supported for regression.");
// reason for the error message below is that validation might not have the same horizontalized features as the training data (or different order)
if (_autoencoder && _activation == Activation.Maxout)
dl.error("_activation", "Maxout activation is not supported for auto-encoder.");
if (_max_categorical_features < 1)
dl.error("_max_categorical_features", "max_categorical_features must be at least 1.");
if (_col_major)
dl.error("_col_major", "Deprecated: Column major data handling not supported anymore - not faster.");
if (!_sparse && _col_major) {
dl.error("_col_major", "Cannot use column major storage for non-sparse data handling.");
}
if (_sparse && _elastic_averaging) {
dl.error("_elastic_averaging", "Cannot use elastic averaging for sparse data handling.");
}
if (_max_w2 <= 0) {
dl.error("_max_w2", "Cannot use max_w2 <= 0.");
}
if (expensive) {
if (!classification && _balance_classes) {
dl.error("_balance_classes", "balance_classes requires classification.");
}
if (_class_sampling_factors != null && !_balance_classes) {
dl.error("_class_sampling_factors", "class_sampling_factors requires balance_classes to be enabled.");
}
if (_replicate_training_data && null != train() && train().byteSize() > 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() && H2O.CLOUD.size() > 1) {
dl.error("_replicate_training_data", "Compressed training dataset takes more than 90% of avg. free available memory per node (" + 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() + "), cannot run with replicate_training_data.");
}
}
if (!_elastic_averaging) {
dl.hide("_elastic_averaging_moving_rate", "Elastic averaging is required for this parameter.");
dl.hide("_elastic_averaging_regularization", "Elastic averaging is required for this parameter.");
} else {
if (_elastic_averaging_moving_rate > 1 || _elastic_averaging_moving_rate < 0)
dl.error("_elastic_averaging_moving_rate", "Elastic averaging moving rate must be between 0 and 1.");
if (_elastic_averaging_regularization < 0)
dl.error("_elastic_averaging_regularization", "Elastic averaging regularization strength must be >= 0.");
}
if (_autoencoder && _stopping_metric != ScoreKeeper.StoppingMetric.AUTO && _stopping_metric != ScoreKeeper.StoppingMetric.MSE) {
dl.error("_stopping_metric", "Stopping metric must either be AUTO or MSE for autoencoder.");
}
}
static class Sanity {
// the following parameters can be modified when restarting from a checkpoint
transient static private final String[] cp_modifiable = new String[]{
"_seed",
"_checkpoint",
"_epochs",
"_score_interval",
"_train_samples_per_iteration",
"_target_ratio_comm_to_comp",
"_score_duty_cycle",
"_score_training_samples",
"_score_validation_samples",
"_score_validation_sampling",
"_classification_stop",
"_regression_stop",
"_stopping_rounds",
"_stopping_metric",
"_stopping_tolerance",
"_quiet_mode",
"_max_confusion_matrix_size",
"_diagnostics",
"_variable_importances",
"_initial_weight_distribution", //will be ignored anyway
"_initial_weight_scale", //will be ignored anyway
"_initial_weights",
"_initial_biases",
"_force_load_balance",
"_replicate_training_data",
"_shuffle_training_data",
"_single_node_mode",
"_fast_mode",
// Allow modification of the regularization parameters after a checkpoint restart
"_l1",
"_l2",
"_max_w2",
"_input_dropout_ratio",
"_hidden_dropout_ratios",
"_loss",
"_overwrite_with_best_model",
"_missing_values_handling",
"_average_activation",
"_reproducible",
"_export_weights_and_biases",
"_elastic_averaging",
"_elastic_averaging_moving_rate",
"_elastic_averaging_regularization",
"_mini_batch_size",
"_pretrained_autoencoder"
};
// the following parameters must not be modified when restarting from a checkpoint
transient static private final String[] cp_not_modifiable = new String[]{
"_drop_na20_cols",
"_response_column",
"_activation",
"_use_all_factor_levels",
"_standardize",
"_adaptive_rate",
"_autoencoder",
"_rho",
"_epsilon",
"_sparse",
"_sparsity_beta",
"_col_major",
"_rate",
"_rate_annealing",
"_rate_decay",
"_momentum_start",
"_momentum_ramp",
"_momentum_stable",
"_nesterov_accelerated_gradient",
"_ignore_const_cols",
"_max_categorical_features",
"_nfolds",
"_distribution",
"_quantile_alpha",
"_huber_alpha",
"_tweedie_power"
};
static void checkCompleteness() {
for (Field f : DeepLearningParameters.class.getDeclaredFields())
if (!ArrayUtils.contains(cp_not_modifiable, f.getName())
&&
!ArrayUtils.contains(cp_modifiable, f.getName())
) {
if (f.getName().equals("_hidden")) continue;
if (f.getName().equals("_ignored_columns")) continue;
if (f.getName().equals("$jacocoData")) continue; // If code coverage is enabled
throw H2O.unimpl("Please add " + f.getName() + " to either cp_modifiable or cp_not_modifiable");
}
}
/**
* Check that checkpoint continuation is possible
*
* @param oldP old DL parameters (from checkpoint)
* @param newP new DL parameters (user-given, to restart from checkpoint)
*/
static void checkIfParameterChangeAllowed(final DeepLearningParameters oldP, final DeepLearningParameters newP) {
checkCompleteness();
if (newP._nfolds != 0)
throw new UnsupportedOperationException("nfolds must be 0: Cross-validation is not supported during checkpoint restarts.");
if ((newP._valid == null) != (oldP._valid == null)) {
throw new H2OIllegalArgumentException("Presence of validation dataset must agree with the checkpointed model.");
}
if (!newP._autoencoder && (newP._response_column == null || !newP._response_column.equals(oldP._response_column))) {
throw new H2OIllegalArgumentException("Response column (" + newP._response_column + ") is not the same as for the checkpointed model: " + oldP._response_column);
}
if (!Arrays.equals(newP._hidden, oldP._hidden)) {
throw new H2OIllegalArgumentException("Hidden layers (" + Arrays.toString(newP._hidden) + ") is not the same as for the checkpointed model: " + Arrays.toString(oldP._hidden));
}
if (!Arrays.equals(newP._ignored_columns, oldP._ignored_columns)) {
throw new H2OIllegalArgumentException("Ignored columns must be the same as for the checkpointed model.");
}
//compare the user-given parameters before and after and check that they are not changed
for (Field fBefore : oldP.getClass().getFields()) {
if (ArrayUtils.contains(cp_not_modifiable, fBefore.getName())) {
for (Field fAfter : newP.getClass().getFields()) {
if (fBefore.equals(fAfter)) {
try {
if (fAfter.get(newP) == null || fBefore.get(oldP) == null || !fBefore.get(oldP).toString().equals(fAfter.get(newP).toString())) { // if either of the two parameters is null, skip the toString()
if (fBefore.get(oldP) == null && fAfter.get(newP) == null)
continue; //if both parameters are null, we don't need to do anything
throw new H2OIllegalArgumentException("Cannot change parameter: '" + fBefore.getName() + "': " + fBefore.get(oldP) + " -> " + fAfter.get(newP));
}
} catch (IllegalAccessException e) {
e.printStackTrace();
}
}
}
}
}
}
/**
* Update the parameters from checkpoint to user-specified
*
* @param srcParms source: user-specified parameters
* @param tgtParms target: parameters to be modified
* @param doIt whether to overwrite target parameters (or just print the message)
* @param quiet whether to suppress the notifications about parameter changes
*/
static void updateParametersDuringCheckpointRestart(DeepLearningParameters srcParms, DeepLearningParameters tgtParms/*actually used during training*/, boolean doIt, boolean quiet) {
for (Field fTarget : tgtParms.getClass().getFields()) {
if (ArrayUtils.contains(cp_modifiable, fTarget.getName())) {
for (Field fSource : srcParms.getClass().getFields()) {
if (fTarget.equals(fSource)) {
try {
if (fSource.get(srcParms) == null || fTarget.get(tgtParms) == null || !fTarget.get(tgtParms).toString().equals(fSource.get(srcParms).toString())) { // if either of the two parameters is null, skip the toString()
if (fTarget.get(tgtParms) == null && fSource.get(srcParms) == null)
continue; //if both parameters are null, we don't need to do anything
if (!tgtParms._quiet_mode && !quiet)
Log.info("Applying user-requested modification of '" + fTarget.getName() + "': " + fTarget.get(tgtParms) + " -> " + fSource.get(srcParms));
if (doIt)
fTarget.set(tgtParms, fSource.get(srcParms));
}
} catch (IllegalAccessException e) {
e.printStackTrace();
}
}
}
}
}
}
/**
* Take user-given parameters and turn them into usable, fully populated parameters (e.g., to be used by Neurons during training)
*
* @param fromParms raw user-given parameters from the REST API (READ ONLY)
* @param toParms modified set of parameters, with defaults filled in (WILL BE MODIFIED)
* @param nClasses number of classes (1 for regression or autoencoder)
*/
static void modifyParms(DeepLearningParameters fromParms, DeepLearningParameters toParms, int nClasses) {
if (fromParms._hidden_dropout_ratios == null) {
if (fromParms._activation == Activation.TanhWithDropout
|| fromParms._activation == Activation.MaxoutWithDropout
|| fromParms._activation == Activation.RectifierWithDropout
|| fromParms._activation == Activation.ExpRectifierWithDropout
) {
toParms._hidden_dropout_ratios = new double[fromParms._hidden.length];
if (!fromParms._quiet_mode)
Log.info("_hidden_dropout_ratios: Automatically setting all hidden dropout ratios to 0.5.");
Arrays.fill(toParms._hidden_dropout_ratios, 0.5);
}
} else {
toParms._hidden_dropout_ratios = fromParms._hidden_dropout_ratios.clone();
}
if (H2O.CLOUD.size() == 1 && fromParms._replicate_training_data) {
if (!fromParms._quiet_mode)
Log.info("_replicate_training_data: Disabling replicate_training_data on 1 node.");
toParms._replicate_training_data = false;
}
if (fromParms._single_node_mode && (H2O.CLOUD.size() == 1 || !fromParms._replicate_training_data)) {
if (!fromParms._quiet_mode)
Log.info("_single_node_mode: Disabling single_node_mode (only for multi-node operation with replicated training data).");
toParms._single_node_mode = false;
}
if (!fromParms._use_all_factor_levels && fromParms._autoencoder) {
if (!fromParms._quiet_mode)
Log.info("_use_all_factor_levels: Automatically enabling all_factor_levels for auto-encoders.");
toParms._use_all_factor_levels = true;
}
if (fromParms._overwrite_with_best_model && fromParms._nfolds != 0) {
if (!fromParms._quiet_mode)
Log.info("_overwrite_with_best_model: Disabling overwrite_with_best_model in combination with n-fold cross-validation.");
toParms._overwrite_with_best_model = false;
}
if (fromParms._categorical_encoding==CategoricalEncodingScheme.AUTO) {
if (!fromParms._quiet_mode)
Log.info("_categorical_encoding: Automatically enabling OneHotInternal categorical encoding.");
toParms._categorical_encoding = CategoricalEncodingScheme.OneHotInternal;
}
if (fromParms._mini_batch_size > 1) {
Log.warn("_mini_batch_size", "Only mini-batch size = 1 is supported right now.");
toParms._mini_batch_size = 1;
}
if (fromParms._adaptive_rate) {
if (!fromParms._quiet_mode)
Log.info("_adaptive_rate: Using automatic learning rate. Ignoring the following input parameters: "
+ "rate, rate_decay, rate_annealing, momentum_start, momentum_ramp, momentum_stable.");
toParms._rate = 0;
toParms._rate_decay = 0;
toParms._rate_annealing = 0;
toParms._momentum_start = 0;
toParms._momentum_ramp = 0;
toParms._momentum_stable = 0;
} else {
if (!fromParms._quiet_mode)
Log.info("_adaptive_rate: Using manual learning rate. Ignoring the following input parameters: "
+ "rho, epsilon.");
toParms._rho = 0;
toParms._epsilon = 0;
}
if (fromParms._activation == Activation.Rectifier || fromParms._activation == Activation.RectifierWithDropout) {
if (fromParms._max_w2 == Float.POSITIVE_INFINITY) {
if (!fromParms._quiet_mode)
Log.info("_max_w2: Automatically setting max_w2 to 1000 to keep (unbounded) Rectifier activation in check.");
toParms._max_w2 = 1e3f;
}
}
if (fromParms._nfolds != 0) {
if (fromParms._overwrite_with_best_model) {
if (!fromParms._quiet_mode)
Log.info("_overwrite_with_best_model: Automatically disabling overwrite_with_best_model, since the final model is the only scored model with n-fold cross-validation.");
toParms._overwrite_with_best_model = false;
}
}
if (fromParms._autoencoder && fromParms._stopping_metric == ScoreKeeper.StoppingMetric.AUTO) {
if (!fromParms._quiet_mode)
Log.info("_stopping_metric: Automatically setting stopping_metric to MSE for autoencoder.");
toParms._stopping_metric = ScoreKeeper.StoppingMetric.MSE;
}
// Automatically set the distribution
if (fromParms._distribution == DistributionFamily.AUTO) {
// For classification, allow AUTO/bernoulli/multinomial with losses CrossEntropy/Quadratic/Huber/Absolute
if (nClasses > 1) {
toParms._distribution = nClasses == 2 ? DistributionFamily.bernoulli : DistributionFamily.multinomial;
}
else {
//regression/autoencoder
switch(fromParms._loss) {
case Automatic:
case Quadratic:
toParms._distribution = DistributionFamily.gaussian;
break;
case Absolute:
toParms._distribution = DistributionFamily.laplace;
break;
case Quantile:
toParms._distribution = DistributionFamily.quantile;
break;
case Huber:
toParms._distribution = DistributionFamily.huber;
break;
case ModifiedHuber:
toParms._distribution = DistributionFamily.modified_huber;
break;
default:
throw H2O.unimpl();
}
}
}
if (fromParms._loss == Automatic) {
switch (toParms._distribution) {
// regression
case gaussian:
toParms._loss = Quadratic;
break;
case quantile:
toParms._loss = Loss.Quantile;
break;
case laplace:
toParms._loss = Loss.Absolute;
break;
case huber:
toParms._loss = Loss.Huber;
break;
case tweedie:
case poisson:
case gamma:
toParms._loss = Automatic; //deviance
break;
// classification
case multinomial:
case bernoulli:
toParms._loss = CrossEntropy;
break;
case modified_huber:
toParms._loss = ModifiedHuber;
break;
default:
throw H2O.unimpl();
}
}
if (fromParms._reproducible) {
if (!fromParms._quiet_mode)
Log.info("_reproducibility: Automatically enabling force_load_balancing and score_each_iteration to enforce reproducibility. Turning off replicate_training_data.");
toParms._force_load_balance = true;
toParms._score_each_iteration = true;
toParms._replicate_training_data = false;
if (fromParms._train_samples_per_iteration == -2) {
toParms._train_samples_per_iteration = -1;
Log.info("_reproducibility: Also setting train_samples_per_iteration to -1 since auto-tuning (-2) was specified.");
}
}
}
}
}
@Override
public DeepLearningMojoWriter getMojo() {
return new DeepLearningMojoWriter(this);
}
@Override
public boolean isFeatureUsedInPredict(String featureName) {
if (!_parms._variable_importances) return true;
int featureIdx = ArrayUtils.find(varImp()._names, featureName);
return featureIdx != -1 && (double) varImp()._varimp[featureIdx] != 0d;
}
@Override
public boolean isDistributionHuber() {
return super.isDistributionHuber() || get_params()._distribution == DistributionFamily.huber;
}
@Override protected CategoricalEncoding getGenModelEncoding() {
switch (_parms._categorical_encoding) {
case AUTO:
case SortByResponse:
case OneHotInternal:
return CategoricalEncoding.AUTO;
case Binary:
return CategoricalEncoding.Binary;
case Eigen:
return CategoricalEncoding.Eigen;
case LabelEncoder:
return CategoricalEncoding.LabelEncoder;
default:
return null;
}
}
@Override
public double score(double[] data) {
double[] pred = score0(data, new double[_output.nclasses() + 1], 0);
score0PostProcessSupervised(pred, data);
return pred[0];
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepLearningModelInfo.java
|
package hex.deeplearning;
import hex.DataInfo;
import hex.genmodel.utils.DistributionFamily;
import static java.lang.Double.isNaN;
import hex.Model;
import hex.deeplearning.DeepLearningModel.DeepLearningParameters;
import water.*;
import water.fvec.Frame;
import water.util.*;
import java.util.Arrays;
import java.util.Random;
/**
* This class contains the state of the Deep Learning model
* This will be shared: one per node
*/
final public class DeepLearningModelInfo extends Iced<DeepLearningModelInfo> {
public TwoDimTable summaryTable;
public DataInfo data_info;
public DataInfo data_info() {
return data_info;
}
// model is described by parameters and the following arrays
private Storage.DenseRowMatrix[] dense_row_weights; //one 2D weight matrix per layer (stored as a 1D array each)
private Storage.DenseVector[] biases; //one 1D bias array per layer
private Storage.DenseVector[] avg_activations; //one 1D array per hidden layer
// helpers for storing previous step deltas
// Note: These two arrays *could* be made transient and then initialized freshly in makeNeurons() and in DeepLearningTask.initLocal()
// But then, after each reduction, the weights would be lost and would have to restart afresh -> not *exactly* right, but close...
private Storage.DenseRowMatrix[] dense_row_weights_momenta;
private Storage.DenseVector[] biases_momenta;
// helpers for AdaDelta
private Storage.DenseRowMatrix[] dense_row_ada_dx_g;
private Storage.DenseVector[] biases_ada_dx_g;
private boolean[] _saw_missing_cats; // whether missing value was encountered for each categorical predictor - needed for varimp
// compute model size (number of model parameters required for making predictions)
// momenta are not counted here, but they are needed for model building
public long size() {
long siz = 0;
for (Storage.DenseRowMatrix w : dense_row_weights) if (w != null) siz += w.size();
for (Storage.Vector b : biases) siz += b.size();
return siz;
}
/**
* Check whether a missing value was found for every categorical predictor
* @param cats activation of categorical buckets for a given row
*/
void checkMissingCats(int[] cats) {
if (cats == null) return;
if (_saw_missing_cats == null) return;
for (int i=0; i<cats.length; ++i) {
assert(data_info._catMissing[i]); //have a missing bucket for each categorical
if (_saw_missing_cats[i]) continue;
_saw_missing_cats[i] = (cats[i] == data_info._catOffsets[i+1]-1);
}
}
// accessors to (shared) weights and biases - those will be updated racily (c.f. Hogwild!)
boolean has_momenta() {
return get_params()._momentum_start != 0 || get_params()._momentum_stable != 0;
}
boolean adaDelta() {
return get_params()._adaptive_rate;
}
public final Storage.DenseRowMatrix get_weights(int i) {
return dense_row_weights[i];
}
public final Storage.DenseVector get_biases(int i) {
return biases[i];
}
public final Storage.DenseRowMatrix get_weights_momenta(int i) {
return dense_row_weights_momenta[i];
}
public final Storage.DenseVector get_biases_momenta(int i) {
return biases_momenta[i];
}
public final Storage.DenseRowMatrix get_ada_dx_g(int i) {
return dense_row_ada_dx_g[i];
}
public final Storage.DenseVector get_biases_ada_dx_g(int i) {
return biases_ada_dx_g[i];
}
//accessor to shared parameter defining avg activations
public final Storage.DenseVector get_avg_activations(int i) {
return avg_activations[i];
}
public DeepLearningParameters parameters;
Key<Model> _model_id;
public final DeepLearningParameters get_params() { return parameters; }
public final void set_params(DeepLearningParameters p, Key<Model> model_id ) {
parameters = (DeepLearningParameters) p.clone();
_model_id = model_id;
}
private double[] mean_rate;
private double[] rms_rate;
private double[] mean_bias;
private double[] rms_bias;
private double[] mean_weight;
public double[] rms_weight;
public double[] mean_a;
private volatile boolean unstable = false;
public boolean isUnstable() { return unstable; }
public void setUnstable() {
if (!unstable) computeStats();
unstable = true;
}
private long processed_global;
public synchronized long get_processed_global() { return processed_global; }
public synchronized void set_processed_global(long p) { processed_global = p; }
public synchronized void add_processed_global(long p) { processed_global += p; }
private long processed_local;
public synchronized long get_processed_local() { return processed_local; }
public synchronized void set_processed_local(long p) { processed_local = p; }
public synchronized void add_processed_local(long p) { processed_local += p; }
public synchronized long get_processed_total() { return processed_global + processed_local; }
// package local helpers
int[] units; //number of neurons per layer, extracted from parameters and from datainfo
final boolean _classification; // Classification cache (nclasses>1)
final Frame _train; // Prepared training frame
final Frame _valid; // Prepared validation frame
/**
* Dummy constructor, only to be used for deserialization from autobuffer
*/
private DeepLearningModelInfo() {
super(); // key is null
_classification = false;
_train = _valid = null;
}
/**
* Main constructor
* @param params Model parameters
* @param dinfo Data Info
* @param nClasses number of classes (1 for regression, 0 for autoencoder)
* @param train User-given training data frame, prepared by AdaptTestTrain
* @param valid User-specified validation data frame, prepared by AdaptTestTrain
*/
public DeepLearningModelInfo(final DeepLearningParameters params, Key model_id, final DataInfo dinfo, int nClasses, Frame train, Frame valid) {
_classification = nClasses > 1;
_train = train;
_valid = valid;
data_info = dinfo;
parameters = (DeepLearningParameters) params.clone(); //make a copy, don't change model's parameters
_model_id = model_id;
DeepLearningParameters.Sanity.modifyParms(parameters, parameters, nClasses); //sanitize the model_info's parameters
final int num_input = dinfo.fullN();
final int num_output = get_params()._autoencoder ? num_input :
(_classification && parameters._distribution != DistributionFamily.modified_huber ? train.vec(parameters._response_column).cardinality() : 1);
if (!get_params()._autoencoder) assert(num_output == nClasses || parameters._distribution == DistributionFamily.modified_huber );
_saw_missing_cats = dinfo._cats > 0 ? new boolean[data_info._cats] : null;
assert (num_input > 0);
assert (num_output > 0);
if (has_momenta() && adaDelta())
throw new IllegalArgumentException("Cannot have non-zero momentum and adaptive rate at the same time.");
final int layers = get_params()._hidden.length;
// units (# neurons for each layer)
units = new int[layers + 2];
if (get_params()._max_categorical_features <= Integer.MAX_VALUE - dinfo._nums)
units[0] = Math.min(dinfo._nums + get_params()._max_categorical_features, num_input);
else
units[0] = num_input;
System.arraycopy(get_params()._hidden, 0, units, 1, layers);
units[layers + 1] = num_output;
boolean printLevels = units[0] > 1000L;
boolean warn = units[0] > 100000L;
if (printLevels) {
final String[][] domains = dinfo._adaptedFrame.domains();
if (warn) {
Log.warn("===================================================================================================================================");
Log.warn(num_input + " input features" + (dinfo._cats > 0 ? " (after categorical one-hot encoding)" : "") + ". Can be slow and require a lot of memory.");
}
FrameUtils.printTopCategoricalLevels(dinfo._adaptedFrame, warn, 10);
if (warn) {
Log.warn("Suggestions:");
Log.warn(" *) Limit the size of the first hidden layer");
if (dinfo._cats > 0) {
Log.warn(" *) Limit the total number of one-hot encoded features by setting 'categorical_encoding=\"enum_limited\"'");
Log.warn(" *) Limit the total number of one-hot encoded features with the parameter 'max_categorical_features' (experimental)");
Log.warn(" *) Run h2o.interaction(...,pairwise=F) on high-cardinality categorical columns to limit the factor count, see http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/deep-learning.html#faq");
}
Log.warn("===================================================================================================================================");
}
}
int[] mult = new int[layers + 1];
for (int i=0;i<layers;++i) {
mult[i] = (get_params()._activation == DeepLearningParameters.Activation.Maxout || get_params()._activation == DeepLearningParameters.Activation.MaxoutWithDropout) ? 2 : 1;
}
mult[layers]=1; //Output is never Maxout
// weights (to connect layers)
dense_row_weights = new Storage.DenseRowMatrix[layers + 1];
dense_row_weights[0] = new Storage.DenseRowMatrix(mult[0]*units[1], units[0]);
for (int i = 1; i <= layers; ++i)
dense_row_weights[i] = new Storage.DenseRowMatrix(mult[i] * units[i + 1] /*rows*/, units[i] /*cols*/);
// biases (only for hidden layers and output layer)
biases = new Storage.DenseVector[layers + 1];
for (int i = 0; i <= layers; ++i)
biases[i] = new Storage.DenseVector(mult[i] * units[i+1]);
// average activation (only for hidden layers)
if (get_params()._autoencoder && get_params()._sparsity_beta > 0) {
avg_activations = new Storage.DenseVector[layers];
mean_a = new double[layers];
for (int i = 0; i < layers; ++i)
avg_activations[i] = new Storage.DenseVector(mult[i] * units[i + 1]);
}
allocateHelperArrays();
// for diagnostics
mean_rate = new double[units.length-1];
rms_rate = new double[units.length-1];
mean_bias = new double[units.length-1];
rms_bias = new double[units.length-1];
mean_weight = new double[units.length-1];
rms_weight = new double[units.length-1];
}
/**
* Allocate helper arrays for momentum/learning rate, etc.
*/
void allocateHelperArrays() {
int[] mult = new int[units.length-1];
for (int i=0;i<units.length-1;++i) {
mult[i] = (get_params()._activation == DeepLearningParameters.Activation.Maxout || get_params()._activation == DeepLearningParameters.Activation.MaxoutWithDropout) ? 2 : 1;
}
mult[units.length-2]=1; //Output is never Maxout
if (has_momenta()) {
dense_row_weights_momenta = new Storage.DenseRowMatrix[dense_row_weights.length];
if (dense_row_weights[0] != null)
dense_row_weights_momenta[0] = new Storage.DenseRowMatrix(mult[0]*units[1], units[0]);
for (int i = 1; i < dense_row_weights_momenta.length; ++i)
dense_row_weights_momenta[i] = new Storage.DenseRowMatrix(mult[i]*units[i + 1], units[i]);
biases_momenta = new Storage.DenseVector[biases.length];
for (int i = 0; i < biases_momenta.length; ++i)
biases_momenta[i] = new Storage.DenseVector(mult[i]*units[i + 1]);
} else if (adaDelta()) {
dense_row_ada_dx_g = new Storage.DenseRowMatrix[dense_row_weights.length];
//AdaGrad
dense_row_ada_dx_g[0] = new Storage.DenseRowMatrix(mult[0]*2*units[1], units[0]);
for (int i = 1; i < dense_row_ada_dx_g.length; ++i) {
dense_row_ada_dx_g[i] = new Storage.DenseRowMatrix(mult[i]*units[i + 1], 2 * units[i]);
}
biases_ada_dx_g = new Storage.DenseVector[biases.length];
for (int i = 0; i < biases_ada_dx_g.length; ++i) {
biases_ada_dx_g[i] = new Storage.DenseVector(mult[i]*2* units[i + 1]);
}
}
}
/**
* Create a summary table
* @return TwoDimTable with the summary of the model
*/
TwoDimTable createSummaryTable() {
computeStats();
Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(this);
long byte_size = new AutoBuffer().put(this).buf().length;
TwoDimTable table = new TwoDimTable(
"Status of Neuron Layers",
(!get_params()._autoencoder ? ("predicting " + data_info._adaptedFrame.lastVecName() + ", ") : "") +
(get_params()._autoencoder ? "auto-encoder" :
_classification ? (units[units.length - 1] + "-class classification") : "regression")
+ ", " + get_params()._distribution + " distribution, " + get_params()._loss + " loss, "
+ String.format("%,d", size()) + " weights/biases, " + PrettyPrint.bytes(byte_size) + ", "
+ String.format("%,d", get_processed_global()) + " training samples, "
+ "mini-batch size " + String.format("%,d", get_params()._mini_batch_size),
new String[neurons.length],
new String[]{"Layer", "Units", "Type", "Dropout", "L1", "L2",
"Mean Rate", "Rate RMS", "Momentum",
"Mean Weight", "Weight RMS",
"Mean Bias", "Bias RMS"
},
new String[]{"int", "int", "string", "double", "double", "double",
"double", "double", "double",
"double", "double",
"double", "double"
},
new String[]{"%d", "%d", "%s", "%2.2f %%", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f"},
"");
for (int i = 0; i < neurons.length; ++i) {
table.set(i, 0, i + 1);
table.set(i, 1, neurons[i].units);
table.set(i, 2, neurons[i].getClass().getSimpleName());
if (i == 0) {
table.set(i, 3, neurons[i].params._input_dropout_ratio * 100);
continue;
} else if (i < neurons.length - 1) {
if (neurons[i].params._hidden_dropout_ratios == null) {
table.set(i, 3, 0);
} else {
table.set(i, 3, neurons[i].params._hidden_dropout_ratios[i - 1] * 100);
}
}
table.set(i, 4, neurons[i].params._l1);
table.set(i, 5, neurons[i].params._l2);
table.set(i, 6, (get_params()._adaptive_rate ? mean_rate[i-1] : neurons[i].rate(get_processed_total())));
table.set(i, 7, (get_params()._adaptive_rate ? rms_rate[i-1] : 0));
table.set(i, 8, get_params()._adaptive_rate ? 0 : neurons[i].momentum(get_processed_total()));
table.set(i, 9, mean_weight[i-1]);
table.set(i, 10, rms_weight[i-1]);
table.set(i, 11, mean_bias[i-1]);
table.set(i, 12, rms_bias[i-1]);
}
summaryTable = table;
return summaryTable;
}
/**
* Print a summary table
* @return String containing ASCII version of summary table
*/
@Override public String toString() {
StringBuilder sb = new StringBuilder();
if (!get_params()._quiet_mode) {
if (get_params()._sparsity_beta > 0) {
for (int k = 0; k < get_params()._hidden.length; k++) {
sb.append("Average activation in hidden layer ").append(k).append(" is ").append(mean_a[k]).append(" \n");
}
}
createSummaryTable();
sb.append(summaryTable.toString(1));
}
return sb.toString();
}
/**
* Debugging printout
* @return String with useful info
*/
public String toStringAll() {
StringBuilder sb = new StringBuilder();
sb.append(toString());
for (int i = 0; i < units.length - 1; ++i)
sb.append("\nweights[").append(i).append("][]=").append(Arrays.toString(get_weights(i).raw()));
for (int i = 0; i < units.length - 1; ++i) {
sb.append("\nbiases[").append(i).append("][]=").append(Arrays.toString(get_biases(i).raw()));
}
if (has_momenta()) {
for (int i = 0; i < units.length - 1; ++i)
sb.append("\nweights_momenta[").append(i).append("][]=").append(Arrays.toString(get_weights_momenta(i).raw()));
}
if (biases_momenta != null) {
for (int i = 0; i < units.length - 1; ++i) {
sb.append("\nbiases_momenta[").append(i).append("][]=").append(Arrays.toString(biases_momenta[i].raw()));
}
}
sb.append("\nunits[]=").append(Arrays.toString(units));
sb.append("\nprocessed global: ").append(get_processed_global());
sb.append("\nprocessed local: ").append(get_processed_local());
sb.append("\nprocessed total: ").append(get_processed_total());
sb.append("\n");
return sb.toString();
}
/**
* Initialize weights/biases
*/
void initializeMembers(Key<Frame>[] initial_weights, Key<Frame>[] initial_biases) {
randomizeWeights();
//TODO: determine good/optimal/best initialization scheme for biases
// hidden layers
for (int i = 0; i < get_params()._hidden.length; ++i) {
if (get_params()._activation == DeepLearningParameters.Activation.Rectifier
|| get_params()._activation == DeepLearningParameters.Activation.RectifierWithDropout
|| get_params()._activation == DeepLearningParameters.Activation.Maxout
|| get_params()._activation == DeepLearningParameters.Activation.MaxoutWithDropout
) {
// Arrays.fill(biases[i], 1.); //old behavior
Arrays.fill(biases[i].raw(), i == 0 ? 0.5f : 1f); //new behavior, might be slightly better
} else if (get_params()._activation == DeepLearningParameters.Activation.Tanh || get_params()._activation == DeepLearningParameters.Activation.TanhWithDropout) {
Arrays.fill(biases[i].raw(), 0f);
}
}
Arrays.fill(biases[biases.length - 1].raw(), 0f); //output layer
if (initial_weights!=null || initial_biases!=null) {
Log.info("Initializing initial model state from user-given weights/biases.");
for (int i = 0; i < get_params()._hidden.length+1; ++i) {
if (initial_weights[i] == null) {
Log.info("No user-given weight matrix given for weights #" + (i+1) + ". Initializing those weights randomly.");
continue;
}
if (initial_biases[i] == null) {
Log.info("No user-given bias vector given for biases #" + (i+1) + ". Initializing those biases randomly.");
continue;
}
Frame w = initial_weights[i].get();
if (w==null) {
throw new IllegalArgumentException("User-given weight matrix for weights #" + (i+1) + " '" + initial_weights[i].toString() + "' not found. Initializing those weights randomly.");
}
if (w.numRows() != get_weights(i).rows() || w.numCols() != get_weights(i).cols()) {
throw new IllegalArgumentException("Dimensionality mismatch: initial_weights matrix #" + i +
" should have " + get_weights(i).rows() + " rows and " + get_weights(i).cols()
+ " columns, but has " + w.numRows() + " rows and " + w.numCols() + " columns.");
}
Frame b = initial_biases[i].get();
if (b==null) {
throw new IllegalArgumentException("User-given bias vector for biases #" + (i+1) + " '" + initial_biases[i].toString() + "' not found. Initializing those biases randomly.");
}
if (b.numRows() != get_biases(i).size() || b.numCols() != 1) {
throw new IllegalArgumentException("Dimensionality mismatch: initial_biases vector #" + i +
" should have " + get_biases(i).size() + " rows and 1"
+ " column, but has " + b.numRows() + " rows and " + b.numCols() + " column(s).");
}
for (int c=0; c<w.numCols(); ++c)
for (int r=0; r<w.numRows(); ++r)
get_weights(i).set(r,c,(float)w.vec(c).at(r));
for (int r=0; r<w.numRows(); ++r)
get_biases(i).set(r,(float)b.vec(0).at(r));
}
}
else {
Log.info("Created random initial model state.");
}
}
/**
* Fill weights and biases from a pretrained autoencoder model
* @param autoencoder Autoencoder DL model with matching inputs and hidden layers
*/
void initializeFromPretrainedModel(DeepLearningModelInfo autoencoder) {
assert(autoencoder.parameters._autoencoder);
randomizeWeights();
// now overwrite the weights with those from the pretrained model
for (int w = 0; w < dense_row_weights.length-1 /*skip output layer*/; ++w) {
if (get_weights(w).rows() != autoencoder.get_weights(w).rows())
throw new IllegalArgumentException("Mismatch between weights in pretrained model and this model: rows in layer " + w + ": " + autoencoder.get_weights(w).rows() + " vs " + get_weights(w).rows() +
". Enable ignored_const_cols for both models and/or check categorical levels for consistency.");
if (get_weights(w).cols() != autoencoder.get_weights(w).cols())
throw new IllegalArgumentException("Mismatch between weights in pretrained model and this model: cols in layer " + w + ": " + autoencoder.get_weights(w).cols() + " vs " + get_weights(w).cols() +
". Enable ignored_const_cols for both models and/or check categorical levels for consistency.");
for (int i = 0; i < get_weights(w).rows(); i++) {
for (int j = 0; j < get_weights(w).cols(); j++) {
get_weights(w).set(i, j, autoencoder.get_weights(w).get(i, j));
}
}
}
for (int i = 0; i < get_params()._hidden.length; ++i) {
for (int j = 0; j < biases[i].raw().length; ++j) {
biases[i].set(j, autoencoder.biases[i].get(j));
}
}
Arrays.fill(biases[biases.length - 1].raw(), 0f); //output layer
}
/**
* Add another model info into this
* This will add the weights/biases/learning rate helpers, and the number of processed training samples
* Note: It will NOT add the elastic averaging helpers, which are always kept constant (they already are the result of a reduction)
* @param other Other DeepLearningModelInfo to add into this one
*/
public void add(DeepLearningModelInfo other) {
for (int i = 0; i < dense_row_weights.length; ++i)
ArrayUtils.add(get_weights(i).raw(), other.get_weights(i).raw());
for (int i = 0; i < biases.length; ++i) ArrayUtils.add(biases[i].raw(), other.biases[i].raw());
if (avg_activations != null)
for (int i = 0; i < avg_activations.length; ++i)
ArrayUtils.add(avg_activations[i].raw(), other.biases[i].raw());
if (has_momenta()) {
assert (other.has_momenta());
for (int i = 0; i < dense_row_weights_momenta.length; ++i)
ArrayUtils.add(get_weights_momenta(i).raw(), other.get_weights_momenta(i).raw());
for (int i = 0; i < biases_momenta.length; ++i)
ArrayUtils.add(biases_momenta[i].raw(), other.biases_momenta[i].raw());
}
if (adaDelta()) {
assert (other.adaDelta());
for (int i = 0; i < dense_row_ada_dx_g.length; ++i) {
ArrayUtils.add(get_ada_dx_g(i).raw(), other.get_ada_dx_g(i).raw());
}
}
add_processed_local(other.get_processed_local());
}
/**
* Multiply all weights/biases by a real-valued number
* @param N multiplication factor
*/
protected void mult(double N) {
div(1 / N);
}
/**
* Divide all weights/biases by a real-valued number
* @param N divisor
*/
protected void div(double N) {
for (int i = 0; i < dense_row_weights.length; ++i)
ArrayUtils.div(get_weights(i).raw(), (float)N);
for (Storage.Vector bias : biases) ArrayUtils.div(bias.raw(), N);
if (avg_activations != null)
for (Storage.Vector avgac : avg_activations)
ArrayUtils.div(avgac.raw(), N);
if (has_momenta()) {
for (int i = 0; i < dense_row_weights_momenta.length; ++i)
ArrayUtils.div(get_weights_momenta(i).raw(), (float)N);
for (Storage.Vector bias_momenta : biases_momenta) ArrayUtils.div(bias_momenta.raw(), N);
}
if (adaDelta()) {
for (int i = 0; i < dense_row_ada_dx_g.length; ++i) {
ArrayUtils.div(get_ada_dx_g(i).raw(), (float)N);
}
}
}
double uniformDist(Random rand, double min, double max) {
return min + rand.nextFloat() * (max - min);
}
/**
* Initialization of neural net weights
* cf. http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf
*/
private void randomizeWeights() {
for (int w = 0; w < dense_row_weights.length; ++w) {
final Random rng = water.util.RandomUtils.getRNG(get_params()._seed + 0xBAD5EED + w + 1); //to match NeuralNet behavior
final double range = Math.sqrt(6. / (units[w] + units[w + 1]));
for (int i = 0; i < get_weights(w).rows(); i++) {
for (int j = 0; j < get_weights(w).cols(); j++) {
if (get_params()._initial_weight_distribution == DeepLearningParameters.InitialWeightDistribution.UniformAdaptive) {
// cf. http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf
if (w == dense_row_weights.length - 1 && _classification)
get_weights(w).set(i, j, (float) (4. * uniformDist(rng, -range, range))); //Softmax might need an extra factor 4, since it's like a sigmoid
else
get_weights(w).set(i, j, (float) uniformDist(rng, -range, range));
} else if (get_params()._initial_weight_distribution == DeepLearningParameters.InitialWeightDistribution.Uniform) {
get_weights(w).set(i, j, (float) uniformDist(rng, -get_params()._initial_weight_scale, get_params()._initial_weight_scale));
} else if (get_params()._initial_weight_distribution == DeepLearningParameters.InitialWeightDistribution.Normal) {
get_weights(w).set(i, j, (float) (rng.nextGaussian() * get_params()._initial_weight_scale));
}
}
}
}
}
// TODO: Add "subset randomize" function
// int count = Math.min(15, _previous.units);
// double min = -.1f, max = +.1f;
// //double min = -1f, max = +1f;
// for( int o = 0; o < units; o++ ) {
// for( int n = 0; n < count; n++ ) {
// int i = rand.nextInt(_previous.units);
// int w = o * _previous.units + i;
// _w[w] = uniformDist(rand, min, max);
// }
// }
/**
* Compute Variable Importance, based on
* GEDEON: DATA MINING OF INPUTS: ANALYSING MAGNITUDE AND FUNCTIONAL MEASURES
*
* @return variable importances for input features
*/
public float[] computeVariableImportances() {
float[] vi = new float[units[0]];
Arrays.fill(vi, 0f);
if (units.length==2) {
// no hidden layers
for (int i = 0; i < units[0]; i++) {
// sum up abs weights going out from each input neuron
for (int j = 0; j < units[1]; j++) {
float wij = get_weights(0).get(j, i);
vi[i] += Math.abs(wij);
}
}
} else {
float[][] Qik = new float[units[0]][units[2]]; //importance of input i on output k
float[] sum_wj = new float[units[1]]; //sum of incoming weights into first hidden layer
float[] sum_wk = new float[units[2]]; //sum of incoming weights into output layer (or second hidden layer)
for (float[] Qi : Qik) Arrays.fill(Qi, 0f);
Arrays.fill(sum_wj, 0f);
Arrays.fill(sum_wk, 0f);
// compute sum of absolute incoming weights
for (int j = 0; j < units[1]; j++) {
for (int i = 0; i < units[0]; i++) {
float wij = get_weights(0).get(j, i);
sum_wj[j] += Math.abs(wij);
}
}
for (int k = 0; k < units[2]; k++) {
for (int j = 0; j < units[1]; j++) {
float wjk = get_weights(1).get(k, j);
sum_wk[k] += Math.abs(wjk);
}
}
// compute importance of input i on output k as product of connecting weights going through j
for (int i = 0; i < units[0]; i++) {
for (int k = 0; k < units[2]; k++) {
for (int j = 0; j < units[1]; j++) {
float wij = get_weights(0).get(j, i);
float wjk = get_weights(1).get(k, j);
//Qik[i][k] += Math.abs(wij)/sum_wj[j] * wjk; //Wong,Gedeon,Taggart '95
Qik[i][k] += Math.abs(wij) / sum_wj[j] * Math.abs(wjk) / sum_wk[k]; //Gedeon '97
}
}
}
// normalize Qik over all outputs k
for (int k = 0; k < units[2]; k++) {
float sumQk = 0;
for (int i = 0; i < units[0]; i++) sumQk += Qik[i][k];
for (int i = 0; i < units[0]; i++) Qik[i][k] /= sumQk;
}
// importance for feature i is the sum over k of i->k importances
for (int i = 0; i < units[0]; i++) vi[i] = ArrayUtils.sum(Qik[i]);
}
//normalize importances such that max(vi) = 1
ArrayUtils.div(vi, ArrayUtils.maxValue(vi));
// zero out missing categorical variables if they were never seen
if (_saw_missing_cats != null) {
for (int i = 0; i < _saw_missing_cats.length; ++i) {
assert (data_info._catMissing[i]); //have a missing bucket for each categorical
if (!_saw_missing_cats[i]) vi[data_info._catOffsets[i + 1] - 1] = 0;
}
}
return vi;
}
/**
* Compute statistics about this model on all nodes
*/
public void computeStats() {
float[][] rate = get_params()._adaptive_rate ? new float[units.length - 1][] : null;
if (get_params()._autoencoder && get_params()._sparsity_beta > 0) {
for (int k = 0; k < get_params()._hidden.length; k++) {
mean_a[k] = 0;
for (int j = 0; j < avg_activations[k].size(); j++)
mean_a[k] += avg_activations[k].get(j);
mean_a[k] /= avg_activations[k].size();
}
}
for (int y = 0; y < units.length-1; y++) {
mean_rate[y] = rms_rate[y] = 0;
mean_bias[y] = rms_bias[y] = 0;
mean_weight[y] = rms_weight[y] = 0;
for (int u = 0; u < biases[y].size(); u++) {
mean_bias[y] += biases[y].get(u);
}
if (rate != null) rate[y] = new float[get_weights(y).raw().length];
for (int u = 0; u < get_weights(y).raw().length; u++) {
mean_weight[y] += get_weights(y).raw()[u];
if (rate != null) {
// final float RMS_dx = (float)Math.sqrt(ada[y][2*u]+(float)get_params().epsilon);
// final float invRMS_g = (float)(1/Math.sqrt(ada[y][2*u+1]+(float)get_params().epsilon));
final float RMS_dx = MathUtils.approxSqrt(get_ada_dx_g(y).raw()[2 * u] + (float) get_params()._epsilon);
final float invRMS_g = MathUtils.approxInvSqrt(get_ada_dx_g(y).raw()[2 * u + 1] + (float) get_params()._epsilon);
rate[y][u] = RMS_dx * invRMS_g; //not exactly right, RMS_dx should be from the previous time step -> but close enough for diagnostics.
mean_rate[y] += rate[y][u];
}
}
mean_bias[y] /= biases[y].size();
mean_weight[y] /= get_weights(y).size();
if (rate != null) mean_rate[y] /= rate[y].length;
for (int u = 0; u < biases[y].size(); u++) {
final double db = biases[y].get(u) - mean_bias[y];
rms_bias[y] += db * db;
}
for (int u = 0; u < get_weights(y).size(); u++) {
final double dw = get_weights(y).raw()[u] - mean_weight[y];
rms_weight[y] += dw * dw;
if (rate != null) {
final double drate = rate[y][u] - mean_rate[y];
rms_rate[y] += drate * drate;
}
}
rms_bias[y] = MathUtils.approxSqrt(rms_bias[y] / biases[y].size());
rms_weight[y] = MathUtils.approxSqrt(rms_weight[y] / get_weights(y).size());
if (rate != null) rms_rate[y] = MathUtils.approxSqrt(rms_rate[y]/ rate[y].length);
// rms_bias[y] = (float)Math.sqrt(rms_bias[y]/biases[y].length);
// rms_weight[y] = (float)Math.sqrt(rms_weight[y]/weights[y].length);
// if (rate != null) rms_rate[y] = (float)Math.sqrt(rms_rate[y]/rate[y].length);
// Abort the run if weights or biases are unreasonably large (Note that all input values are normalized upfront)
// This can happen with Rectifier units when L1/L2/max_w2 are all set to 0, especially when using more than 1 hidden layer.
final double thresh = 1e10;
final double bthresh = 1e5;
unstable |= isNaN(mean_bias[y]) || isNaN(rms_bias[y])
|| isNaN(mean_weight[y]) || isNaN(rms_weight[y])
// large weights
|| Math.abs(mean_weight[y]) > thresh
|| rms_weight[y] > thresh
// large biases
|| Math.abs(mean_bias[y]) > bthresh
|| rms_bias[y] > bthresh;
}
}
/**
* Unique identifier for this model's state, based on raw numbers
*/
protected long checksum_impl() {
computeStats();
Random rng = new Random(0xDECAFBBB);
double cs = Double.longBitsToDouble(get_params()._seed);
cs += size() * get_processed_total();
for (double d : mean_bias) cs += (rng.nextDouble() * (d+123.23));
for (double d : rms_bias) cs += (rng.nextDouble() * (d+123.23));
for (double d : mean_weight) cs += (rng.nextDouble() * (d+123.23));
for (double d : rms_weight) cs += (rng.nextDouble() * (d+123.23));
for (double d : mean_rate) cs += (rng.nextDouble() * (d+123.23));
for (double d : rms_rate) cs += (rng.nextDouble() * (d+123.23));
return Double.doubleToRawLongBits(cs);
}
/**
* TimeAveraging as part of Elastic Averaging Algorithm
* Cf. equation 6 of arXiv:1412.6651v5
* @param nodeAverageModel current average of per-node models
* @return Time-average of node-averages (consensus model, "the" model)
*/
public static DeepLearningModelInfo timeAverage(DeepLearningModelInfo nodeAverageModel) {
float pa = (float) nodeAverageModel.get_params()._elastic_averaging_moving_rate;
assert(pa > 0 && pa <= 1);
DeepLearningModelInfo elasticAverage = DKV.getGet(nodeAverageModel.elasticAverageModelInfoKey()); //get latest version from DKV
if (elasticAverage == null || pa == 1) {
elasticAverage = IcedUtils.deepCopy(nodeAverageModel);
} else {
nodeAverageModel.mult(pa);
elasticAverage.mult(1 - pa);
elasticAverage.add(nodeAverageModel); //ignore processed local value set here
elasticAverage.set_processed_global(nodeAverageModel.get_processed_global());
}
elasticAverage.set_processed_local(0);
DKV.put(elasticAverage.elasticAverageModelInfoKey(), elasticAverage);
// nodeAverageModel.computeStats();
// elasticAverage.computeStats();
// Log.info("Local Model :\n" + nodeAverageModel.toString());
// Log.info("Elastic Average:\n" + elasticAverage.toString());
return elasticAverage;
}
public Key localModelInfoKey(H2ONode node) {
return Key.make(_model_id + ".node" + node.index(), Key.HIDDEN_USER_KEY, true, node);
}
public Key elasticAverageModelInfoKey() {
return Key.make(_model_id + ".elasticaverage", Key.HIDDEN_USER_KEY, true, H2O.CLOUD._memary[0]);
}
static public class GradientCheck {
GradientCheck(int l, int r, int c) { layer=l; row=r; col=c; gradient=0;}
int layer;
int row;
int col;
double gradient;
void apply(int l, int r, int c, double g) {
if (r==row && c==col && l==layer) {
gradient += g;
}
}
}
static public GradientCheck gradientCheck = null;
static public GradientCheck gradientCheckBias = null;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepLearningMojoWriter.java
|
package hex.deeplearning;
import hex.ModelMojoWriter;
import java.io.IOException;
import static water.H2O.technote;
public class DeepLearningMojoWriter extends ModelMojoWriter<DeepLearningModel,
DeepLearningModel.DeepLearningParameters, DeepLearningModel.DeepLearningModelOutput> {
@SuppressWarnings("unused")
public DeepLearningMojoWriter() {}
private DeepLearningModel.DeepLearningParameters _parms;
private DeepLearningModelInfo _model_info;
private DeepLearningModel.DeepLearningModelOutput _output;
public DeepLearningMojoWriter(DeepLearningModel model) {
super(model);
_parms = model.get_params();
_model_info = model.model_info();
_output = model._output;
if (_model_info.isUnstable()) { // do not generate mojo for unstable model
throw new UnsupportedOperationException(technote(4, "Refusing to create a MOJO for an unstable model."));
}
}
@Override
public String mojoVersion() {
return "1.10";
}
@Override
protected void writeModelData() throws IOException {
writekv("mini_batch_size", _parms._mini_batch_size);
writekv("nums", _model_info.data_info._nums);
writekv("cats", _model_info.data_info._cats);
writekv("cat_offsets", _model_info.data_info._catOffsets);
writekv("norm_mul", _model_info.data_info()._normMul);
writekv("norm_sub", _model_info.data_info()._normSub);
writekv("norm_resp_mul", _model_info.data_info._normRespMul);
writekv("norm_resp_sub", _model_info.data_info._normRespSub);
writekv("use_all_factor_levels", _parms._use_all_factor_levels);
writekv("activation", _parms._activation);
writekv("distribution", _parms._distribution);
boolean imputeMeans=_parms._missing_values_handling.equals(DeepLearningModel.DeepLearningParameters.MissingValuesHandling.MeanImputation);
writekv("mean_imputation", imputeMeans);
if (imputeMeans && _model_info.data_info._cats>0) { // only add this if there are categorical columns
writekv("cat_modes", _model_info.data_info.catNAFill());
}
writekv("neural_network_sizes", _model_info.units); // layer 0 is input, last layer is output
// keep track of neuron network sizes, weights and biases. Layer 0 is the output layer. Last layer is output layer
int numberOfWeights = 1+_parms._hidden.length;
double[] all_drop_out_ratios = new double[numberOfWeights];
for (int index = 0; index < numberOfWeights; index++) {
if (index==_parms._hidden.length) { // input layer
all_drop_out_ratios[index]=0.0;
} else {
if (_parms._hidden_dropout_ratios != null) {
all_drop_out_ratios[index]=_parms._hidden_dropout_ratios[index];
} else {
all_drop_out_ratios[index]=0.0;
}
}
//generate hash key to store weights/bias of all layers
writekv("weight_layer"+index, _model_info.get_weights(index).raw());
writekv("bias_layer"+index, _model_info.get_biases(index).raw());
}
writekv("hidden_dropout_ratios", all_drop_out_ratios);
writekv("_genmodel_encoding", model.getGenModelEncoding());
String[] origNames = model._output._origNames;
if (origNames != null) {
int nOrigNames = origNames.length;
writekv("_n_orig_names", nOrigNames);
writeStringArray(origNames, "_orig_names");
}
if (model._output._origDomains != null) {
int nOrigDomainValues = model._output._origDomains.length;
writekv("_n_orig_domain_values", nOrigDomainValues);
for (int i=0; i < nOrigDomainValues; i++) {
String[] currOrigDomain = model._output._origDomains[i];
writekv("_m_orig_domain_values_" + i, currOrigDomain == null ? 0 : currOrigDomain.length);
if (currOrigDomain != null) {
writeStringArray(currOrigDomain, "_orig_domain_values_" + i);
}
}
}
writekv("_orig_projection_array", model._output._orig_projection_array);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepLearningScoringInfo.java
|
package hex.deeplearning;
import hex.ScoringInfo;
import water.AutoBuffer;
/**
* Lightweight DeepLearning scoring history.
*/
public class DeepLearningScoringInfo extends ScoringInfo implements ScoringInfo.HasEpochs, ScoringInfo.HasSamples, ScoringInfo.HasIterations
{
public int iterations;
public double epoch_counter;
public double training_samples;
public long score_training_samples;
public long score_validation_samples;
public int iterations() { return iterations; };
public double epoch_counter() { return epoch_counter; }
public double training_samples() { return training_samples; }
public long score_training_samples() { return score_training_samples; }
public long score_validation_samples() { return score_validation_samples; }
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepLearningTask.java
|
package hex.deeplearning;
import hex.genmodel.utils.DistributionFamily;
import hex.deeplearning.DeepLearningModel.DeepLearningParameters;
import hex.DataInfo;
import hex.FrameTask;
import water.DKV;
import water.H2O;
import water.IcedUtils;
import water.Key;
import water.util.Log;
import water.util.RandomUtils;
import java.util.Arrays;
import java.util.Random;
public class DeepLearningTask extends FrameTask<DeepLearningTask> {
final private boolean _training;
private DeepLearningModelInfo _localmodel; //per-node state (to be reduced)
private DeepLearningModelInfo _sharedmodel; //input/output
transient Neurons[] _neurons;
transient Random _dropout_rng;
int _chunk_node_count = 1;
/**
* Accessor to the object containing the (final) state of the Deep Learning model
* Should only be queried after calling this.doAll(Frame training)
* @return "The" final model after one Map/Reduce iteration
*/
final public DeepLearningModelInfo model_info() {
assert(_sharedmodel != null);
return _sharedmodel;
}
/**
* The only constructor
* @param jobKey
* @param inputModel Initial model state
* @param fraction Fraction of rows of the training to train with
* @param iteration
*/
public DeepLearningTask(Key jobKey, DeepLearningModelInfo inputModel, float fraction, int iteration){
this(jobKey,inputModel,fraction,iteration,null);
}
public DeepLearningTask(Key jobKey, DeepLearningModelInfo inputModel, float fraction, int iteration, H2O.H2OCountedCompleter cmp){
super(jobKey, inputModel.data_info(),inputModel.get_params()._seed + inputModel.get_processed_global(), iteration, inputModel.get_params()._sparse,cmp);
assert(inputModel.get_processed_local() == 0);
_training=true;
_sharedmodel = inputModel;
// if (model_info().get_params()._elastic_averaging)
// DKV.put(_sharedmodel.elasticAverageModelInfoKey(), _sharedmodel);
_useFraction=fraction;
_shuffle = model_info().get_params()._shuffle_training_data;
}
/**
* Transfer ownership from global (shared) model to local model which will be worked on
*/
@Override protected void setupLocal(){
assert(_localmodel == null);
super.setupLocal();
if (model_info().get_params()._elastic_averaging) {
//Load my local model from DKV, to continue training
_localmodel = DKV.getGet(_sharedmodel.localModelInfoKey(H2O.SELF));
if (_localmodel != null) {
if (!Arrays.equals(_localmodel.units, _sharedmodel.units)) {
_localmodel = IcedUtils.deepCopy(_sharedmodel);
} else {
//Make sure that the local model has the right global (shared) parameters after checkpoint restart!
_localmodel.set_params(_sharedmodel.get_params(), _sharedmodel._model_id);
_localmodel.set_processed_global(_sharedmodel.get_processed_global());
}
}
else {
// first time around - use the randomized initial weights and don't spread the shared (random) model
_localmodel = IcedUtils.deepCopy(_sharedmodel);
_sharedmodel = null;
}
} else {
_localmodel = _sharedmodel;
_sharedmodel = null;
}
_localmodel.set_processed_local(0);
}
// Create local workspace (neurons) and link them to shared weights
@Override protected boolean chunkInit(){
if (_localmodel.get_processed_local() >= _useFraction * _fr.numRows())
return false;
_neurons = makeNeuronsForTraining(_localmodel);
_dropout_rng = RandomUtils.getRNG(System.currentTimeMillis());
return true;
}
/**
* Process one training row at a time (online learning)
* @param seed Seed is only used if reproducible mode is enabled
* @param r Row (must be dense for now)
* @param mb mini-batch internal index
*/
@Override public final void processRow(long seed, DataInfo.Row r, int mb) {
if (_localmodel.get_params()._reproducible) {
seed += _localmodel.get_processed_global(); //avoid periodicity
} else {
seed = _dropout_rng.nextLong(); // non-reproducible case - make a fast & good random number
}
_localmodel.checkMissingCats(r.binIds);
((Neurons.Input) _neurons[0]).setInput(seed, r.isSparse() ? r.numIds : null, r.numVals, r.nBins, r.binIds, mb);
}
/**
* Apply the gradient to update the weights
* @param seed
* @param responses
* @param offsets
* @param n number of trained examples in this last mini batch (usually == mini_batch_size, but can be less)
*/
@Override public void processMiniBatch(long seed, double[] responses, double[] offsets, int n) {
assert(_training);
if (_localmodel.get_params()._reproducible) {
seed += _localmodel.get_processed_global(); //avoid periodicity
} else {
seed = _dropout_rng.nextLong(); // non-reproducible case - make a fast & good random number
}
fpropMiniBatch(seed, _neurons, _localmodel, _localmodel.get_params()._elastic_averaging ? _sharedmodel : null, _training, responses, offsets, n);
bpropMiniBatch(_neurons, n);
}
/**
* Helper to apply back-propagation without clearing out the gradients afterwards
* Used for gradient checking
* @param neurons
* @param n number of trained examples in this last mini batch (usually == mini_batch_size, but can be less)
*/
static public void bpropMiniBatch(Neurons[] neurons, int n) {
neurons[neurons.length - 1].bpropOutputLayer(n);
for (int i = neurons.length - 2; i > 0; --i)
neurons[i].bprop(n);
for (int mb=0;mb<n;++mb) {
// all errors are reset to 0
for (int i = 0; i<neurons.length ;++i) {
Storage.DenseVector e = neurons[i]._e == null ? null : neurons[i]._e[mb];
if (e==null) continue;
Arrays.fill(e.raw(), 0);
}
}
}
@Override
protected int getMiniBatchSize() {
return _localmodel.get_params()._mini_batch_size;
}
/**
* After each chunk, add the number of processed rows to the counter
* @param n Number of processed rows
*/
@Override protected void chunkDone(long n) {
if (_training) _localmodel.add_processed_local(n);
}
/**
* After all maps are done on a node, this is called to store the per-node model into DKV (for elastic averaging)
* Otherwise, do nothing.
*/
@Override protected void closeLocal() {
if (_localmodel.get_params()._elastic_averaging) {
// store local model, as it will be reduced in the following, and hence averaged with other models
DKV.put(_localmodel.localModelInfoKey(H2O.SELF), _localmodel, _fs);
}
_sharedmodel = null; //avoid serialization overhead
}
/**
* Average the per-node models (for elastic averaging, already wrote them to DKV in postLocal())
* This is a no-op between F/J worker threads (operate on the same weights/biases)
* @param other
*/
@Override public void reduce(DeepLearningTask other){
if (_localmodel != null && other._localmodel != null && other._localmodel.get_processed_local() > 0 //other DLTask was active (its model_info should be used for averaging)
&& other._localmodel != _localmodel) //other DLTask worked on a different model_info
{
// avoid adding remote model info to unprocessed local data, still random
// (this can happen if we have no chunks on the master node)
if (_localmodel.get_processed_local() == 0) {
_localmodel = other._localmodel;
_chunk_node_count = other._chunk_node_count;
} else {
_localmodel.add(other._localmodel);
_chunk_node_count += other._chunk_node_count;
}
if (other._localmodel.isUnstable()) _localmodel.setUnstable();
}
}
static long _lastWarn;
static long _warnCount;
/**
* After all reduces are done, the driver node calls this method to clean up
* This is only needed if we're not inside a DeepLearningTask2 (which will do the reduction between replicated data workers).
* So if replication is disabled, and every node works on partial data, then we have work to do here (model averaging).
*/
@Override protected void postGlobal(){
DeepLearningParameters dlp = _localmodel.get_params();
if (H2O.CLOUD.size() > 1 && !dlp._replicate_training_data) {
long now = System.currentTimeMillis();
if (_chunk_node_count < H2O.CLOUD.size() && (now - _lastWarn > 5000) && _warnCount < 3) {
// Log.info("Synchronizing across " + _chunk_node_count + " H2O node(s).");
Log.warn(H2O.CLOUD.size() - _chunk_node_count + " node(s) (out of " + H2O.CLOUD.size()
+ ") are not contributing to model updates. Consider setting replicate_training_data to true or using a larger training dataset (or fewer H2O nodes).");
_lastWarn = now;
_warnCount++;
}
}
// Check that we're not inside a DeepLearningTask2
assert ((!dlp._replicate_training_data || H2O.CLOUD.size() == 1) == !_run_local);
if (!_run_local) {
_localmodel.add_processed_global(_localmodel.get_processed_local()); //move local sample counts to global ones
_localmodel.set_processed_local(0l);
// model averaging
if (_chunk_node_count > 1)
_localmodel.div(_chunk_node_count);
if (_localmodel.get_params()._elastic_averaging)
_sharedmodel = DeepLearningModelInfo.timeAverage(_localmodel);
} else {
//Get ready for reduction in DeepLearningTask2
//Just swap the local and global models
_sharedmodel = _localmodel;
}
if (_sharedmodel == null)
_sharedmodel = _localmodel;
_localmodel = null;
}
public static Neurons[] makeNeuronsForTraining(final DeepLearningModelInfo minfo) {
return makeNeurons(minfo, true);
}
public static Neurons[] makeNeuronsForTesting(final DeepLearningModelInfo minfo) {
return makeNeurons(minfo, false);
}
// Helper
private static Neurons[] makeNeurons(final DeepLearningModelInfo minfo, boolean training) {
DataInfo dinfo = minfo.data_info();
final DeepLearningParameters params = minfo.get_params();
final int[] h = params._hidden;
Neurons[] neurons = new Neurons[h.length + 2]; // input + hidden + output
// input
neurons[0] = new Neurons.Input(params, minfo.units[0], dinfo);
// hidden
for( int i = 0; i < h.length + (params._autoencoder ? 1 : 0); i++ ) {
int n = params._autoencoder && i == h.length ? minfo.units[0] : h[i];
switch( params._activation ) {
case Tanh:
neurons[i+1] = new Neurons.Tanh(n);
break;
case TanhWithDropout:
neurons[i+1] = params._autoencoder && i == h.length ? new Neurons.Tanh(n) : new Neurons.TanhDropout(n);
break;
case Rectifier:
neurons[i+1] = new Neurons.Rectifier(n);
break;
case RectifierWithDropout:
neurons[i+1] = params._autoencoder && i == h.length ? new Neurons.Rectifier(n) : new Neurons.RectifierDropout(n);
break;
case Maxout:
neurons[i+1] = new Neurons.Maxout(params,(short)2,n);
break;
case MaxoutWithDropout:
neurons[i+1] = params._autoencoder && i == h.length ? new Neurons.Maxout(params,(short)2,n) : new Neurons.MaxoutDropout(params,(short)2,n);
break;
case ExpRectifier:
neurons[i+1] = new Neurons.ExpRectifier(n);
break;
case ExpRectifierWithDropout:
neurons[i+1] = params._autoencoder && i == h.length ? new Neurons.ExpRectifier(n) : new Neurons.ExpRectifierDropout(n);
break;
}
}
if(!params._autoencoder) {
if (minfo._classification && minfo.get_params()._distribution != DistributionFamily.modified_huber)
neurons[neurons.length - 1] = new Neurons.Softmax(minfo.units[minfo.units.length - 1]);
else
neurons[neurons.length - 1] = new Neurons.Linear();
}
//copy parameters from NN, and set previous/input layer links
for( int i = 0; i < neurons.length; i++ ) {
neurons[i].init(neurons, i, params, minfo, training);
neurons[i]._input = neurons[0];
}
// // debugging
// for (Neurons n : neurons) Log.info(n.toString());
return neurons;
}
/**
* Forward propagation
* assumption: layer 0 has _a filled with (horizontalized categoricals) double values
* @param seed
* @param neurons
* @param minfo
* @param consensus_minfo
* @param training
* @param n Number of actually trained samples in this mini-batch
*/
public static void fpropMiniBatch(long seed, Neurons[] neurons, DeepLearningModelInfo minfo,
DeepLearningModelInfo consensus_minfo, boolean training, double[] responses, double[] offset, int n) {
// Forward propagation
for (int i=1; i<neurons.length; ++i)
neurons[i].fprop(seed, training, n);
// Add offset (in link space) if applicable
for (int mb=0;mb<n;++mb) {
if (offset!=null && offset[mb] > 0) {
assert (!minfo._classification); // Regression
double[] m = minfo.data_info()._normRespMul;
double[] s = minfo.data_info()._normRespSub;
double mul = m == null ? 1 : m[0];
double sub = s == null ? 0 : s[0];
neurons[neurons.length - 1]._a[mb].add(0, ((offset[mb] - sub) * mul));
}
if (training) {
// Compute the gradient at the output layer
// auto-encoder: pass a dummy "response" (ignored)
// otherwise: class label or regression target
neurons[neurons.length - 1].setOutputLayerGradient(responses[mb], mb, n);
// Elastic Averaging - set up helpers needed during back-propagation
if (consensus_minfo != null) {
for (int i = 1; i < neurons.length; i++) {
neurons[i]._wEA = consensus_minfo.get_weights(i - 1);
neurons[i]._bEA = consensus_minfo.get_biases(i - 1);
}
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepLearningTask2.java
|
package hex.deeplearning;
import water.Key;
import water.MRTask;
import water.fvec.Frame;
/**
* DRemoteTask-based Deep Learning.
* Every node has access to all the training data which leads to optimal CPU utilization and training accuracy IFF the data fits on every node.
*/
public class DeepLearningTask2 extends MRTask<DeepLearningTask2> {
/**
* Construct a DeepLearningTask2 where every node trains on the entire training dataset
* @param jobKey Job ID
* @param train Frame containing training data
* @param model_info Initial DeepLearningModelInfo (weights + biases)
* @param sync_fraction Fraction of the training data to use for one SGD iteration
*/
public DeepLearningTask2(Key jobKey, Frame train, DeepLearningModelInfo model_info, float sync_fraction, int iteration) {
assert(sync_fraction > 0);
_jobKey = jobKey;
_fr = train;
_sharedmodel = model_info;
_sync_fraction = sync_fraction;
_iteration = iteration;
}
/**
* Returns the aggregated DeepLearning model that was trained by all nodes (over all the training data)
* @return model_info object
*/
public DeepLearningModelInfo model_info() { return _sharedmodel; }
final private Key _jobKey;
final private Frame _fr;
private DeepLearningModelInfo _sharedmodel;
final private float _sync_fraction;
private DeepLearningTask _res;
private final int _iteration;
/**
* Do the local computation: Perform one DeepLearningTask (with run_local=true) iteration.
* Pass over all the data (will be replicated in dfork() here), and use _sync_fraction random rows.
* This calls DeepLearningTask's reduce() between worker threads that update the same local model_info via Hogwild!
* Once the computation is done, reduce() will be called
*/
@Override
public void setupLocal() {
super.setupLocal();
_res = new DeepLearningTask(_jobKey, _sharedmodel, _sync_fraction, _iteration, this);
addToPendingCount(1);
_res.dfork(null, _fr, true /*run_local*/);
}
/**
* Reduce between worker nodes, with network traffic (if greater than 1 nodes)
* After all reduce()'s are done, postGlobal() will be called
* @param drt task to reduce
*/
@Override
public void reduce(DeepLearningTask2 drt) {
if (_res == null) _res = drt._res;
else {
_res._chunk_node_count += drt._res._chunk_node_count;
_res.model_info().add(drt._res.model_info()); //add models, but don't average yet
}
assert(_res.model_info().get_params()._replicate_training_data);
}
/**
* Finish up the work after all nodes have reduced their models via the above reduce() method.
* All we do is average the models and add to the global training sample counter.
* After this returns, model_info() can be queried for the updated model.
*/
@Override
protected void postGlobal() {
assert(_res.model_info().get_params()._replicate_training_data);
super.postGlobal();
// model averaging (DeepLearningTask only computed the per-node models, each on all the data)
_res.model_info().div(_res._chunk_node_count);
_res.model_info().add_processed_global(_res.model_info().get_processed_local()); //switch from local counters to global counters
_res.model_info().set_processed_local(0l);
DeepLearningModelInfo nodeAverageModel = _res.model_info();
if (nodeAverageModel.get_params()._elastic_averaging)
_sharedmodel = DeepLearningModelInfo.timeAverage(nodeAverageModel);
else
_sharedmodel = nodeAverageModel;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/DeepSHAPContributionsWithBackground.java
|
package hex.deeplearning;
import hex.ContributionsWithBackgroundFrameTask;
import hex.DataInfo;
import water.H2O;
import water.Key;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import water.util.fp.Function;
import java.util.Arrays;
class DeepSHAPContributionsWithBackground extends ContributionsWithBackgroundFrameTask<DeepSHAPContributionsWithBackground> {
private final DeepLearningModel deepLearningModel;
transient Function<Double, Double> _activation;
transient Function<Double, Double> _activationDiff;
final int[] _origIndices;
int _hiddenLayerMultiplier;
final boolean _outputSpace;
public DeepSHAPContributionsWithBackground(DeepLearningModel deepLearningModel, Key<Frame> frKey, Key<Frame> backgroundFrameKey, boolean perReference, int[] origIndices, boolean outputSpace) {
super(frKey, backgroundFrameKey, perReference);
this.deepLearningModel = deepLearningModel;
_origIndices = origIndices;
_outputSpace = outputSpace;
}
@Override
protected void setupLocal() {
super.setupLocal();
switch (deepLearningModel._parms._activation) {
case Tanh:
case TanhWithDropout:
_activation = this::tanhActivation;
// differentials are used only in cases when delta_y/delta_x could be numerically unstable (abs(delta_x)<1e-6)
_activationDiff = this::tanhActivationDiff;
_hiddenLayerMultiplier = 1;
break;
case Rectifier:
case RectifierWithDropout:
_activation = this::rectifierActivation;
_activationDiff = this::rectifierActivationDiff;
_hiddenLayerMultiplier = 1;
break;
case Maxout: // will use different logic
case MaxoutWithDropout:
_activation = this::identity;
_activationDiff = this::identity;
_hiddenLayerMultiplier = 2;
break;
default:
// All currently supported activations in training are supported in DeepSHAP. This is here just in case
// somebody adds new activation function or finishes the ExpRectifier that is partially implemented.
H2O.unimpl("Activation " + deepLearningModel._parms._activation + " is not supported in DeepSHAP.");
}
}
protected double identity(double v) {
return v;
}
protected double tanhActivation(double v) {
return 1 - 2.0 / (1 + Math.exp(2. * v));
}
protected double tanhActivationDiff(double v) {
return 1 - Math.pow(1 - 2.0 / (1 + Math.exp(2. * v)), 2);
}
protected double rectifierActivation(double v) {
return 0.5 * (v + Math.abs(v));
}
protected double rectifierActivationDiff(double v) {
return v > 0 ? 1 : 0;
}
protected double div(double a, double b) {
if (Math.abs(b) < 1e-10) return 0;
return a / b;
}
protected double linearPred(Storage.DenseRowMatrix weights, Storage.DenseVector bias, double[] input, int index, boolean outputLayer) {
double tmp = bias.get(index);
if (outputLayer) {
for (int i = 0; i < input.length; i++) {
tmp += weights.get(index, i) * input[i];
}
} else {
for (int i = 0; i < input.length; i++) {
tmp += getWeight(weights, index, i) * input[i];
}
}
return tmp;
}
protected void softMax(double[] x) {
final double max = ArrayUtils.maxValue(x);
double scaling = 0;
for (int i = 0; i < x.length; i++) {
x[i] = Math.exp(x[i] - max);
scaling += x[i];
}
for (int i = 0; i < x.length; i++) {
x[i] /= scaling;
}
}
protected float getWeight(Storage.DenseRowMatrix w, int row, int col) {
if (_hiddenLayerMultiplier != 1) {
assert _hiddenLayerMultiplier == 2;
return w.raw()[2 * (row / 2 * w.cols() + col) + row % 2];
}
return w.get(row, col);
}
protected void forwardPass(DataInfo.Row row, double[][] forwardPassActivations) {
// Zero out the activations
for (int i = 0; i < forwardPassActivations.length; i++) {
Arrays.fill(forwardPassActivations[i], 0);
}
// Go through the network
// input layers
Storage.DenseRowMatrix w = deepLearningModel.model_info().get_weights(0);
Storage.DenseVector b = deepLearningModel.model_info().get_biases(0);
for (int l = 0; l < w.rows(); l++) {
for (int m = 0; m < w.cols(); m++) {
forwardPassActivations[0][l] += row.get(m) * getWeight(w, l, m);
}
forwardPassActivations[0][l] += b.get(l);
}
for (int l = 0; l < forwardPassActivations[1].length; l++) {
if (_hiddenLayerMultiplier == 1) { // not maxout
forwardPassActivations[1][l] = _activation.apply(forwardPassActivations[0][l]);
} else {
forwardPassActivations[1][l] = Math.max(forwardPassActivations[0][2 * l], forwardPassActivations[0][2 * l + 1]);
}
if (null != deepLearningModel.model_info().get_params()._hidden_dropout_ratios)
forwardPassActivations[1][l] *= 1 - deepLearningModel.model_info().get_params()._hidden_dropout_ratios[0];
}
// hidden layers
for (int i = 1; i < deepLearningModel._parms._hidden.length; i++) {
w = deepLearningModel.model_info().get_weights(i);
b = deepLearningModel.model_info().get_biases(i);
for (int l = 0; l < w.rows(); l++) {
forwardPassActivations[2 * i][l] = linearPred(w, b, forwardPassActivations[2 * i - 1], l, false);
}
for (int l = 0; l < forwardPassActivations[2 * i + 1].length; l++) {
if (_hiddenLayerMultiplier == 1) { // not maxout
forwardPassActivations[2 * i + 1][l] = _activation.apply(forwardPassActivations[2 * i][l]);
} else {
forwardPassActivations[2 * i + 1][l] = Math.max(forwardPassActivations[2 * i][2 * l], forwardPassActivations[2 * i][2 * l + 1]);
}
if (null != deepLearningModel.model_info().get_params()._hidden_dropout_ratios)
forwardPassActivations[2 * i + 1][l] *= 1 - deepLearningModel.model_info().get_params()._hidden_dropout_ratios[i];
}
}
// output layer
final int i = deepLearningModel._parms._hidden.length;
w = deepLearningModel.model_info().get_weights(i);
b = deepLearningModel.model_info().get_biases(i);
for (int l = 0; l < w.rows(); l++) {
forwardPassActivations[2 * i][l] = linearPred(w, b, forwardPassActivations[2 * i - 1], l, true);
forwardPassActivations[2 * i + 1][l] = forwardPassActivations[2 * i][l];
if (w.rows() == 1) {
if (deepLearningModel.model_info().data_info()._normRespMul != null)
forwardPassActivations[2 * i + 1][l] = (forwardPassActivations[2 * i + 1][l] / deepLearningModel.model_info().data_info()._normRespMul[0] + deepLearningModel.model_info().data_info()._normRespSub[0]);
// transform prediction to response space
forwardPassActivations[2 * i + 1][l] = deepLearningModel._dist.linkInv(forwardPassActivations[2 * i + 1][l]);
}
}
if (w.rows() == 2) // binomial classification
softMax(forwardPassActivations[2 * i + 1]);
}
protected void maxSHAP(double[] x, double[] bg, float[] contributions, int i, int j) {
// for more dimensional exact maxSHAP see supplementary material[0] for "A Unified Approach to Interpreting Model Predictions"
// by Scott M. Lundberg, Su-In Lee
// [0] currently at https://papers.nips.cc/paper_files/paper/2017/file/8a20a8621978632d76c43dfd28b67767-Supplemental.zip
final double maxBB = Math.max(bg[i], bg[j]);
final double maxBX = Math.max(bg[i], x[j]);
final double maxXB = Math.max(x[i], bg[j]);
final double maxXX = Math.max(x[i], x[j]);
final double maxXXmBB = maxXX - maxBB;
final double maxXBmBX = maxXB - maxBX;
contributions[0] = (float) (0.5 * (maxXXmBB + maxXBmBX));
contributions[1] = (float) (0.5 * (maxXXmBB - maxXBmBX));
}
protected void linearSHAP(Storage.DenseRowMatrix weights,
double[] contributions, int index) {
for (int i = 0; i < contributions.length; i++) {
// LinearSHAP
// phi = w_i *(x -bg)
// rescale => phi / (x-bg) => w_i
contributions[i] = weights.get(index, i);// * (input[i] - inputBg[i]) / (input[i] - inputBg[i]);
}
}
protected void nonLinearActivationSHAP(Storage.DenseRowMatrix weights,
double[][] forwardPass, double[][] forwardBgPass,
int currLayer, Storage.DenseRowMatrix contributions) {
// How this works?
// ---------------
// Let denote act(x) as a one dimensional activation function (e.g., tanh, max(x, 0),...) and linear function lin(X).
// For lin(X), we can solve interventional SHAP like this:
// lin(X) = wX + b
// phi_i = w_i * (X_i - BG_i), where BG is background sample
//
// For one dimensional activation function we can solve contributions easily - all contributions are from the only
// dimension we have so we just need to calculate it so it satisfies the "sum to the delta" property:
// act(x) - act(bg) = phi
//
//
// To calculate the contributions of act(lin(X)) we need to rescale the intermediate values and use the chain rule:
// m_{i} = phi/(X_i - BG_i)
// => m_i(lin) = w_i
// => m_i(act) = (act(x)-act(bg))/(x - bg)
//
// Chain rule:
// m_i(act(lin(X)) = m_i(lin) * m_i(act) = w_i * ((act(wX+b) - act(wBG+b))/(wX + b - wBG-b)) = w_i * delta_out/delta_in
//
// MaxOut is different from the rest of the activation functions as it takes multiple inputs - H2O suports just 2
// inputs. MaxOut can be calculated exactly faster than with a naive approach by sorting and then creating a decision
// tree (exploiting the fact that we know where the maximum is on the sorted input). But since we support just 2 inputs
// the naive approach requires just 4 calls to Math.max and simple arithmetic, so it's probably faster (but I didn't
// benchmark it).
//
// Then we use the chain rule:
// m_i(maxout(lin_a, lin_b)) = sum_j (m_i(maxout)_j * m_i(lin_j))
if (_hiddenLayerMultiplier > 1 && forwardPass.length > 2 * currLayer + 2) { // Is MaxOut and not the last layer (last layer is SoftMax here (regression uses linear combination))
final double dropoutRatio = null == deepLearningModel.model_info().get_params()._hidden_dropout_ratios
? 1
: 1 - deepLearningModel.model_info().get_params()._hidden_dropout_ratios[currLayer];
for (int row = 0; row < contributions.rows(); row++) {
final float[] deltaIn = new float[]{
(float) (forwardPass[2 * currLayer][2 * row] - forwardBgPass[2 * currLayer][2 * row]),
(float) (forwardPass[2 * currLayer][2 * row + 1] - forwardBgPass[2 * currLayer][2 * row + 1]),
};
float[] maxOutContr = new float[2];
maxSHAP(forwardPass[2 * currLayer], forwardBgPass[2 * currLayer], maxOutContr, 2 * row, 2 * row + 1);
for (int col = 0; col < contributions.cols(); col++) {
contributions.set(row, col,
(float) (dropoutRatio * (div(getWeight(weights, 2 * row, col) * maxOutContr[0], deltaIn[0]) +
div(getWeight(weights, 2 * row + 1, col) * maxOutContr[1], deltaIn[1])))
);
}
}
} else {
for (int row = 0; row < contributions.rows(); row++) {
final double deltaOut = forwardPass[2 * currLayer + 1][row] - forwardBgPass[2 * currLayer + 1][row];
final double deltaIn = forwardPass[2 * currLayer][row] - forwardBgPass[2 * currLayer][row];
final float ratio = (float) (Math.abs(deltaIn) > 1e-6 ? div(deltaOut, deltaIn) : _activationDiff.apply(forwardPass[2 * currLayer][row]));
for (int col = 0; col < contributions.cols(); col++)
contributions.set(row, col, weights.get(row, col) * ratio);
}
}
}
protected void combineMultiplicators(Storage.DenseRowMatrix m, double[][] contributions, int currentLayer) {
final int prevLayer = currentLayer + 1; // Contains multiplicators with respect to the output
Arrays.fill(contributions[currentLayer], 0);
for (int i = 0; i < m.rows(); i++) {
for (int j = 0; j < m.cols(); j++)
contributions[currentLayer][j] += m.get(i, j) * contributions[prevLayer][i];
}
}
protected void backwardPass(double[][] forwardPass, double[][] forwardBgPass, double[][] backwardPass, DataInfo.Row row, DataInfo.Row bgRow) {
for (int i = 0; i < backwardPass.length; i++) {
Arrays.fill(backwardPass[i], 0);
}
int i = backwardPass.length - 1;
final int backwardPassOffset = _origIndices == null ? 0 : 1;
final int outputNeuron = deepLearningModel.model_info().get_weights(backwardPass.length - 1 - backwardPassOffset).rows() - 1; // in regression we have one output and in binom. class we care only about P(y==1).
if (outputNeuron == 0) {
float[] outWeight = new float[backwardPass[i].length];
for (int j = 0; j < outWeight.length; j++) {
if (deepLearningModel.model_info().data_info._normRespMul != null) {
outWeight[j] = (float) (deepLearningModel.model_info().get_weights(i - backwardPassOffset).get(outputNeuron, j) / deepLearningModel.model_info().data_info._normRespMul[outputNeuron]);
} else {
outWeight[j] = deepLearningModel.model_info().get_weights(i - backwardPassOffset).get(outputNeuron, j);
}
}
linearSHAP(
new Storage.DenseRowMatrix(outWeight, 1, backwardPass[i].length),
backwardPass[i],
0
);
} else {
Storage.DenseRowMatrix m = new Storage.DenseRowMatrix(2, backwardPass[i].length);
nonLinearActivationSHAP(
deepLearningModel.model_info().get_weights(i - backwardPassOffset),
forwardPass,
forwardBgPass,
i - backwardPassOffset,
m
);
for (int j = 0; j < m.cols(); j++) {
backwardPass[i][j] = m.get(outputNeuron, j);
}
}
for (i = backwardPass.length - 2; i >= backwardPassOffset; i--) {
Storage.DenseRowMatrix m = new Storage.DenseRowMatrix(backwardPass[i + 1].length, backwardPass[i].length);
nonLinearActivationSHAP(
deepLearningModel.model_info().get_weights(i - backwardPassOffset),
forwardPass,
forwardBgPass,
i - backwardPassOffset,
m
);
combineMultiplicators(m, backwardPass, i);
}
if (null != _origIndices) {
Arrays.fill(backwardPass[0], 0.0);
for (i = 0; i < _origIndices.length; i++)
backwardPass[0][_origIndices[i]] += (backwardPass[1][i]) * (row.get(i) - bgRow.get(i));
} else {
for (i = 0; i < backwardPass[0].length; i++)
backwardPass[0][i] *= (row.get(i) - bgRow.get(i));
}
}
@Override
protected void map(Chunk[] cs, Chunk[] bgCs, NewChunk[] ncs) {
double[][] forwardPass = new double[2 * (deepLearningModel._parms._hidden.length + 1)][];
double[][] forwardBgPass = new double[2 * (deepLearningModel._parms._hidden.length + 1)][];
final int backwardPassOffset = _origIndices == null ? 1 : 2;
double[][] backwardPass = new double[deepLearningModel._parms._hidden.length + backwardPassOffset][];
backwardPass[0] = MemoryManager.malloc8d(ncs.length - 1);
if (backwardPassOffset > 1)
backwardPass[1] = MemoryManager.malloc8d(deepLearningModel.model_info().get_weights(0).cols());
for (int i = 0; i < deepLearningModel._parms._hidden.length; i++) {
forwardPass[2 * i] = MemoryManager.malloc8d(_hiddenLayerMultiplier * deepLearningModel._parms._hidden[i]);
forwardBgPass[2 * i] = MemoryManager.malloc8d(_hiddenLayerMultiplier * deepLearningModel._parms._hidden[i]);
forwardPass[2 * i + 1] = MemoryManager.malloc8d(deepLearningModel._parms._hidden[i]);
forwardBgPass[2 * i + 1] = MemoryManager.malloc8d(deepLearningModel._parms._hidden[i]);
backwardPass[i + backwardPassOffset] = MemoryManager.malloc8d(deepLearningModel._parms._hidden[i]);
}
forwardPass[2 * deepLearningModel._parms._hidden.length] = new double[deepLearningModel.model_info().get_weights(deepLearningModel._parms._hidden.length).rows()];
forwardBgPass[2 * deepLearningModel._parms._hidden.length] = new double[deepLearningModel.model_info().get_weights(deepLearningModel._parms._hidden.length).rows()];
forwardPass[2 * deepLearningModel._parms._hidden.length + 1] = new double[deepLearningModel.model_info().get_weights(deepLearningModel._parms._hidden.length).rows()];
forwardBgPass[2 * deepLearningModel._parms._hidden.length + 1] = new double[deepLearningModel.model_info().get_weights(deepLearningModel._parms._hidden.length).rows()];
DataInfo.Row row = deepLearningModel.model_info().data_info.newDenseRow();
DataInfo.Row bgRow = deepLearningModel.model_info().data_info.newDenseRow();
for (int j = 0; j < cs[0]._len; j++) {
deepLearningModel.model_info().data_info.extractDenseRow(cs, j, row);
forwardPass(row, forwardPass);
for (int k = 0; k < bgCs[0]._len; k++) {
deepLearningModel.model_info().data_info.extractDenseRow(bgCs, k, bgRow);
forwardPass(bgRow, forwardBgPass);
ncs[ncs.length - 1].addNum(forwardBgPass[forwardBgPass.length - 1][forwardBgPass[forwardBgPass.length - 1].length - 1]);
backwardPass(forwardPass, forwardBgPass, backwardPass, row, bgRow);
final double multiplier = _outputSpace && forwardPass[forwardPass.length - 1].length == 1
? div((forwardPass[forwardPass.length - 1][0] - forwardBgPass[forwardBgPass.length - 1][0]), Arrays.stream(backwardPass[0]).sum())
: 1;
for (int i = 0; i < backwardPass[0].length; i++) {
ncs[i].addNum(multiplier * backwardPass[0][i]);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/Dropout.java
|
package hex.deeplearning;
import water.util.RandomUtils;
import java.util.Arrays;
import java.util.Random;
/**
* Helper class for dropout training of Neural Nets
*/
public class Dropout {
private transient Random _rand;
private transient byte[] _bits;
private transient double _rate;
public byte[] bits() { return _bits; }
// public Dropout() {
// _rate = 0.5;
// }
@Override
public String toString() {
String s = "Dropout: " + super.toString();
s += "\nRandom: " + _rand.toString();
s += "\nDropout rate: " + _rate;
s += "\nbits: ";
for (int i=0; i< _bits.length*8; ++i) s += unit_active(i) ? "1":"0";
s += "\n";
return s;
}
Dropout(int units) {
_bits = new byte[(units+7)/8];
_rand = RandomUtils.getRNG(0);
_rate = 0.5;
}
Dropout(int units, double rate) {
this(units);
_rate = rate;
}
public void randomlySparsifyActivation(Storage.Vector a, long seed) {
if (a instanceof Storage.DenseVector)
randomlySparsifyActivation((Storage.DenseVector) a, seed);
else throw new UnsupportedOperationException("randomlySparsifyActivation not implemented for this type: " + a.getClass().getSimpleName());
}
// for input layer
private void randomlySparsifyActivation(Storage.DenseVector a, long seed) {
if (_rate == 0) return;
setSeed(seed);
for( int i = 0; i < a.size(); i++ )
if (_rand.nextFloat() < _rate) a.set(i, 0);
}
// for hidden layers
public void fillBytes(long seed) {
setSeed(seed);
if (_rate == 0.5) _rand.nextBytes(_bits);
else {
Arrays.fill(_bits, (byte)0);
for (int i=0;i<_bits.length*8;++i)
if (_rand.nextFloat() > _rate) _bits[i / 8] |= 1 << (i % 8);
}
}
public boolean unit_active(int o) {
return (_bits[o / 8] & (1 << (o % 8))) != 0;
}
private void setSeed(long seed) {
if ((seed >>> 32) < 0x0000ffffL) seed |= 0x5b93000000000000L;
if (((seed << 32) >>> 32) < 0x0000ffffL) seed |= 0xdb910000L;
_rand.setSeed(seed);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/MurmurHash.java
|
package hex.deeplearning;
// Copied from:
// https://github.com/apache/hadoop-common/blob/HADOOP-3628/src/core/org/apache/hadoop/util/hash/MurmurHash.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This is a very fast, non-cryptographic hash suitable for general hash-based
* lookup. See http://murmurhash.googlepages.com/ for more details.
*
* <p>The C version of MurmurHash 2.0 found at that site was ported
* to Java by Andrzej Bialecki (ab at getopt org).</p>
*/
public class MurmurHash {
private static MurmurHash _instance = new MurmurHash();
public static MurmurHash getInstance() {
return _instance;
}
public int hash(byte[] data, int length, int seed) {
int m = 0x5bd1e995;
int r = 24;
int h = seed ^ length;
int len_4 = length >> 2;
for (int i = 0; i < len_4; i++) {
int i_4 = i << 2;
int k = data[i_4 + 3];
k = k << 8;
k = k | (data[i_4 + 2] & 0xff);
k = k << 8;
k = k | (data[i_4 + 1] & 0xff);
k = k << 8;
k = k | (data[i_4 + 0] & 0xff);
k *= m;
k ^= k >>> r;
k *= m;
h *= m;
h ^= k;
}
// avoid calculating modulo
int len_m = len_4 << 2;
int left = length - len_m;
if (left != 0) {
if (left >= 3) {
h ^= (int) data[length - 3] << 16;
}
if (left >= 2) {
h ^= (int) data[length - 2] << 8;
}
if (left >= 1) {
h ^= (int) data[length - 1];
}
h *= m;
}
h ^= h >>> 13;
h *= m;
h ^= h >>> 15;
return h;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/Neurons.java
|
package hex.deeplearning;
import hex.DataInfo;
import hex.Distribution;
import hex.DistributionFactory;
import hex.deeplearning.DeepLearningModel.DeepLearningParameters;
import water.H2O;
import water.MemoryManager;
import water.util.ArrayUtils;
import water.util.MathUtils;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* This class implements the concept of a Neuron layer in a Neural Network
* During training, every MRTask F/J thread is expected to create these neurons for every map call (Cheap to make).
* These Neurons are NOT sent over the wire.
* The weights connecting the neurons are in a separate class (DeepLearningModel.DeepLearningModelInfo), and will be shared per node.
*/
public abstract class Neurons {
short _k; //number of parallel channels
int[/*minibatch*/][/*k*/] _maxIncoming; //index of largest incoming signal (out of k channels)
Distribution _dist;
protected int units;
/**
* Constructor of a Neuron Layer
* @param units How many neurons are in this layer?
*/
Neurons(int units) {
this.units = units;
}
/**
* Print the status of this neuron layer
* @return populated String
*/
@Override
public String toString() {
String s = this.getClass().getSimpleName();
s += "\nNumber of Neurons: " + units;
s += "\nParameters:\n" + params.toString();
if (_dropout != null) s += "\nDropout:\n" + _dropout.toString();
return s;
}
/**
* Parameters (deep-cloned() from the user input, can be modified here, e.g. learning rate decay)
*/
protected transient DeepLearningParameters params;
protected transient int _index; //which hidden layer it is
/**
* Layer state (one per neuron): activity, error
*/
public transient Storage.DenseVector[] _origa;
public transient Storage.DenseVector[] _a;
public transient Storage.DenseVector[] _e;
/**
* References for feed-forward connectivity
*/
public Neurons _previous;
public Neurons _input;
DeepLearningModelInfo _minfo; //reference to shared model info
public Storage.DenseRowMatrix _w;
public Storage.DenseRowMatrix _wEA; //weights for elastic averaging
public Storage.DenseVector _b;
public Storage.DenseVector _bEA; //bias for elastic averaging
/**
* References for momentum training
*/
Storage.DenseRowMatrix _wm;
Storage.DenseVector _bm;
/**
* References for ADADELTA
*/
Storage.DenseRowMatrix _ada_dx_g;
Storage.DenseVector _bias_ada_dx_g;
/**
* For Dropout training
*/
protected Dropout _dropout;
/**
* Helper to shortcut bprop
*/
private boolean _shortcut = false;
public Storage.DenseVector _avg_a;
/**
* Helper to check sanity of Neuron layers
* @param training whether training or testing is done
*/
void sanityCheck(boolean training) {
if (this instanceof Input) {
assert(_previous == null);
} else {
assert(_previous != null);
if (_minfo.has_momenta()) {
assert(_wm != null);
assert(_bm != null);
assert(_ada_dx_g == null);
}
if (_minfo.adaDelta()) {
if (params._rho == 0) throw new IllegalArgumentException("rho must be > 0 if epsilon is >0.");
if (params._epsilon == 0) throw new IllegalArgumentException("epsilon must be > 0 if rho is >0.");
assert(_minfo.adaDelta());
assert(_bias_ada_dx_g != null);
assert(_wm == null);
assert(_bm == null);
}
if (this instanceof MaxoutDropout || this instanceof TanhDropout || this instanceof RectifierDropout) {
assert (!training || _dropout != null);
}
}
}
/**
* Initialization of the parameters and connectivity of a Neuron layer
* @param neurons Array of all neuron layers, to establish feed-forward connectivity
* @param index Which layer am I?
* @param p User-given parameters (Job parental object hierarchy is not used)
* @param minfo Model information (weights/biases and their momenta)
* @param training Whether training is done or just testing (no need for dropout)
*/
public final void init(Neurons[] neurons, int index, DeepLearningParameters p, final DeepLearningModelInfo minfo, boolean training) {
_index = index-1;
params = (DeepLearningParameters)p.clone();
params._hidden_dropout_ratios = minfo.get_params()._hidden_dropout_ratios;
params._rate *= Math.pow(params._rate_decay, index-1);
params._distribution = minfo.get_params()._distribution;
_dist = DistributionFactory.getDistribution(params);
_a = new Storage.DenseVector[params._mini_batch_size];
for (int mb=0;mb<_a.length;++mb) _a[mb] = new Storage.DenseVector(units);
if (!(this instanceof Input)) {
_e = new Storage.DenseVector[params._mini_batch_size];
for (int mb=0;mb<_e.length;++mb) _e[mb] = new Storage.DenseVector(units);
} else if (params._autoencoder && params._input_dropout_ratio > 0) {
_origa = new Storage.DenseVector[params._mini_batch_size];
for (int mb=0;mb<_origa.length;++mb) _origa[mb] = new Storage.DenseVector(units);
}
if (training && (this instanceof MaxoutDropout || this instanceof TanhDropout
|| this instanceof RectifierDropout || this instanceof ExpRectifierDropout || this instanceof Input) ) {
_dropout = this instanceof Input ?
(params._input_dropout_ratio==0 ? null : new Dropout(units, params._input_dropout_ratio)) //input dropout
: new Dropout(units, params._hidden_dropout_ratios[_index]); //hidden dropout
}
if (!(this instanceof Input)) {
_previous = neurons[_index]; //incoming neurons
_minfo = minfo;
_w = minfo.get_weights(_index); //incoming weights
_b = minfo.get_biases(_index); //bias for this layer (starting at hidden layer)
if(params._autoencoder && params._sparsity_beta > 0 && _index < params._hidden.length) {
_avg_a = minfo.get_avg_activations(_index);
}
if (minfo.has_momenta()) {
_wm = minfo.get_weights_momenta(_index); //incoming weights
_bm = minfo.get_biases_momenta(_index); //bias for this layer (starting at hidden layer)
}
if (minfo.adaDelta()) {
_ada_dx_g = minfo.get_ada_dx_g(_index);
_bias_ada_dx_g = minfo.get_biases_ada_dx_g(_index);
}
_shortcut = (params._fast_mode || (
// not doing fast mode, but also don't have anything else to update (neither momentum nor ADADELTA history), and no L1/L2
!params._adaptive_rate && !_minfo.has_momenta() && params._l1 == 0.0 && params._l2 == 0.0));
}
sanityCheck(training);
}
/**
* Forward propagation
* @param seed For seeding the RNG inside (for dropout)
* @param training Whether training is done or just testing (no need for dropout)
* @param n number of actually trained samples in this mini-batch
*/
protected abstract void fprop(long seed, boolean training, int n);
/**
* Back propagation of error terms stored in _e (for non-final layers)
*/
protected abstract void bprop(int n);
/**
* Back-propagate gradient in output layer
*/
final protected void bpropOutputLayer(int n) {
assert(_index == params._hidden.length);
assert(_a.length == params._mini_batch_size);
final int rows = _a[0].size();
float m = _minfo.adaDelta() ? 0 : momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
for( int row = 0; row < rows; row++ ) {
double[] g = new double[n];
for (int mb=0;mb<n;++mb)
g[mb]=_e[mb].raw()[row];
bprop(row, g, r, m, n);
}
}
/**
* Accumulation of reconstruction errors for a generic Neurons class
* (This is only used for AutoEncoders)
*/
protected void setOutputLayerGradient(double ignored, int mb, int n) {
assert (_minfo.get_params()._autoencoder && _index == _minfo.get_params()._hidden.length);
final int rows = _a[mb].size();
for (int row = 0; row < rows; row++)
_e[mb].set(row, autoEncoderGradient(row, mb)/n);
}
/**
* Backpropagation: w -= rate * dE/dw, where dE/dw = dE/dy * dy/dnet * dnet/dw
* This method adds the dnet/dw = activation term per unit
* @param row row index (update weights feeding to this neuron)
* @param partial_grad partial derivative dE/dnet = dE/dy * dy/net
* @param rate learning rate
* @param momentum momentum factor (needed only if ADADELTA isn't used)
* @param n Actual mini-batch size
*/
final void bprop(final int row, final double[/*actual mini-batch size*/] partial_grad, final float rate, final float momentum, int n) {
final float rho = (float)params._rho;
final float eps = (float)params._epsilon;
final float l1 = (float)params._l1;
final float l2 = (float)params._l2;
final float max_w2 = params._max_w2;
final boolean have_momenta = _minfo.has_momenta();
final boolean have_ada = _minfo.adaDelta();
final boolean nesterov = params._nesterov_accelerated_gradient;
final boolean fast_mode = params._fast_mode;
final int cols = _previous._a[0].size();
assert(partial_grad.length == n);
double avg_grad2 = 0;
final int idx = row * cols;
for( int mb = 0; mb < n; mb++ ) {
if (_shortcut && partial_grad[mb] == 0f) return;
final boolean update_prev = _previous._e != null && _previous._e[mb] != null;
for( int col = 0; col < cols; col++ ) {
int w = idx + col;
// for Maxout, return the "winning" linear index into the matrix
if (_k != 0) w = _k * w + _maxIncoming[mb][row];
final double weight = _w.raw()[w];
if( update_prev ) _previous._e[mb].add(col, partial_grad[mb] * weight); // propagate the error dE/dnet to the previous layer, via connecting weights
final double previous_a = _previous._a[mb].get(col);
if (fast_mode && previous_a == 0) continue;
//this is the actual gradient dE/dw
double grad = partial_grad[mb] * previous_a + Math.signum(weight) * l1 + weight * l2;
if (_wEA !=null) {
grad += params._elastic_averaging_regularization * (_w.raw()[w] -_wEA.raw()[w]);
// Log.info("weight: my: " + _w.raw()[w] + ", consensus: " + _wEA.raw()[w] + ", delta: " + (_w.raw()[w] -_wEA.raw()[w]) + ", relative delta: " + (_w.raw()[w] -_wEA.raw()[w])/_w.raw()[w]);
}
// store the gradient
if (DeepLearningModelInfo.gradientCheck != null)
DeepLearningModelInfo.gradientCheck.apply(_index, row, col, grad);
if (have_ada) {
final double grad2 = grad*grad;
avg_grad2 += grad2;
float brate = computeAdaDeltaRateForWeight(grad, w, _ada_dx_g, rho, eps);
_w.raw()[w] -= brate * grad;
} else {
if (!nesterov) {
final double delta = -rate * grad;
_w.raw()[w] += delta;
if( have_momenta ) {
_w.raw()[w] += momentum * _wm.raw()[w];
_wm.raw()[w] = (float)delta;
}
} else {
double tmp = -grad;
if( have_momenta ) {
_wm.raw()[w] *= momentum;
_wm.raw()[w] += tmp;
tmp = _wm.raw()[w];
}
_w.raw()[w] += rate * tmp;
}
}
}
}
if (max_w2 != Float.POSITIVE_INFINITY)
for( int mb = 0; mb < n; mb++ )
rescale_weights(_w, row, max_w2, mb);
if (have_ada) avg_grad2 /= cols * n;
for( int mb = 0; mb < n; mb++ ) {
update_bias(_b, _bEA, _bm, row, partial_grad, avg_grad2, rate, momentum, mb);
}
}
private void rescale_weights(final Storage.DenseRowMatrix w, final int row, final float max_w2, int mb) {
final int cols = _previous._a[0].size();
int start;
int end;
if (_k != 0) {
start = _k * (row*cols ) + _maxIncoming[mb][row];
end = _k * (row*cols + (cols-1)) + _maxIncoming[mb][row];
} else {
if (mb>0) return; //already done rescaling for mb=0
start = row * cols;
end = row * cols + cols;
}
float r2 = MathUtils.sumSquares(w.raw(), start, end);
// float r2 = MathUtils.approxSumSquares(w.raw(), idx, idx + cols);
if( r2 > max_w2) {
final float scale = MathUtils.approxSqrt(max_w2 / r2);
for( int c = start; c < end; c++ )
w.raw()[c] *= scale;
}
}
/**
* Helper to compute the reconstruction error for auto-encoders (part of the gradient computation)
* @param row neuron index
* @param mb minibatch-internal index
* @return difference between the output (auto-encoder output layer activation) and the target (input layer activation)
*/
protected double autoEncoderGradient(int row, int mb) {
assert (_minfo.get_params()._autoencoder && _index == _minfo.get_params()._hidden.length);
final double t = _input._origa != null ? _input._origa[mb].get(row) : _input._a[mb].get(row);
final double y = _a[mb].get(row);
return -2*_dist.negHalfGradient(t, y);
}
/**
* Compute learning rate with AdaDelta, specialized for DenseRowMatrix
* http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
* @param grad gradient
* @param w neuron index
* @param ada_dx_g Matrix holding helper values (2 floats per weight)
* @param rho hyper-parameter #1
* @param eps hyper-parameter #2
* @return learning rate
*/
private static float computeAdaDeltaRateForWeight(final double grad, final int w,
final Storage.DenseRowMatrix ada_dx_g,
final float rho, final float eps) {
final double grad2 = grad*grad;
ada_dx_g.raw()[2*w+1] = (float)(rho * ada_dx_g.raw()[2*w+1] + (1 - rho) * grad2);
final float rate = MathUtils.approxSqrt((ada_dx_g.raw()[2 * w] + eps) / (ada_dx_g.raw()[2 * w + 1] + eps));
ada_dx_g.raw()[2*w ] = (float)(rho * ada_dx_g.raw()[2*w] + (1 - rho) * rate * rate * grad2);
return rate;
}
/**
* Compute learning rate with AdaDelta, specialized for DenseVector (Bias)
* @param grad2 squared gradient
* @param row neuron index
* @param bias_ada_dx_g Matrix holding helper values (2 floats per weight)
* @param rho hyper-parameter #1
* @param eps hyper-parameter #2
* @return learning rate
*/
private static double computeAdaDeltaRateForBias(final double grad2, final int row,
final Storage.DenseVector bias_ada_dx_g,
final float rho, final float eps) {
bias_ada_dx_g.raw()[2*row+1] = rho * bias_ada_dx_g.raw()[2*row+1] + (1f - rho) * grad2;
final double rate = MathUtils.approxSqrt((bias_ada_dx_g.raw()[2 * row] + eps) / (bias_ada_dx_g.raw()[2 * row + 1] + eps));
bias_ada_dx_g.raw()[2*row] = rho * bias_ada_dx_g.raw()[2*row ] + (1f - rho) * rate * rate * grad2;
return rate;
}
/**
* Helper to enforce learning rule to satisfy sparsity constraint:
* Computes the (rolling) average activation for each (hidden) neuron.
*/
void compute_sparsity() {
if (_avg_a != null) {
if (params._mini_batch_size > 1) throw H2O.unimpl("Sparsity constraint is not yet implemented for mini-batch size > 1.");
for (int mb = 0; mb < _minfo.get_params()._mini_batch_size; ++mb) {
for (int row = 0; row < _avg_a.size(); row++) {
_avg_a.set(row, 0.999 * (_avg_a.get(row)) + 0.001 * (_a[mb].get(row))); //TODO: fix for mini-batch size > 1
}
}
}
}
/**
* Helper to update the bias values
* @param _b bias vector
* @param _bEA elastic average bias vector
* @param _bm bias momentum vector
* @param row index of the neuron for which we back-propagate
* @param partial_grad partial derivative dE/dnet = dE/dy * dy/net
* @param avg_grad2 average squared gradient for this neuron's incoming weights (only for ADADELTA)
* @param rate learning rate
* @param momentum momentum factor (needed only if ADADELTA isn't used)
* @param mb which mini-batch index
*/
private void update_bias(final Storage.DenseVector _b, final Storage.DenseVector _bEA, final Storage.DenseVector _bm, final int row,
double[/*actual mini-batch size*/] partial_grad, final double avg_grad2, double rate, final double momentum, int mb) {
final boolean have_momenta = _minfo.has_momenta();
final boolean have_ada = _minfo.adaDelta();
final float l1 = (float)params._l1;
final float l2 = (float)params._l2;
final int b = _k != 0 ? _k*row+_maxIncoming[mb][row] : row;
final double bias = _b.get(b);
partial_grad[mb] += Math.signum(bias) * l1 + bias * l2;
if (_bEA != null) partial_grad[mb] += (bias - _bEA.get(b)) * params._elastic_averaging_regularization;
// store the gradient
if (DeepLearningModelInfo.gradientCheck != null)
DeepLearningModelInfo.gradientCheck.apply(_index, row, -1, partial_grad[mb]);
if (have_ada) {
final float rho = (float)params._rho;
final float eps = (float)params._epsilon;
rate = computeAdaDeltaRateForBias(avg_grad2, b, _bias_ada_dx_g, rho, eps);
}
if (!params._nesterov_accelerated_gradient) {
final double delta = -rate * partial_grad[mb];
_b.add(b, delta);
if (have_momenta) {
_b.add(b, momentum * _bm.get(b));
_bm.set(b, delta);
}
} else {
double d = -partial_grad[mb];
if (have_momenta) {
_bm.set(b, _bm.get(b) * momentum);
_bm.add(b, d);
d = _bm.get(b);
}
_b.add(b, rate * d);
}
//update for sparsity constraint
if (params._autoencoder && params._sparsity_beta > 0 && !(this instanceof Output) && !(this instanceof Input) && (_index != params._hidden.length)) {
_b.add(b, -(rate * params._sparsity_beta * (_avg_a.raw()[b] - params._average_activation)));
}
if (Double.isInfinite(_b.get(b))) _minfo.setUnstable();
}
/**
* The learning rate
* @param n The number of training samples seen so far (for rate_annealing greater than 0)
* @return Learning rate
*/
public float rate(double n) {
return (float)(params._rate / (1 + params._rate_annealing * n));
}
protected float momentum() {
return momentum(-1);
}
/**
* The momentum - real number in [0, 1)
* Can be a linear ramp from momentum_start to momentum_stable, over momentum_ramp training samples
* @param n The number of training samples seen so far
* @return momentum
*/
final public float momentum(double n) {
double m = params._momentum_start;
if( params._momentum_ramp > 0 ) {
final double num = n != -1 ? _minfo.get_processed_total() : n;
if( num >= params._momentum_ramp)
m = params._momentum_stable;
else
m += (params._momentum_stable - params._momentum_start) * num / params._momentum_ramp;
}
return (float)m;
}
/**
* Input layer of the Neural Network
* This layer is different from other layers as it has no incoming weights,
* but instead gets its activation values from the training points.
*/
public static class Input extends Neurons {
private DataInfo _dinfo; //training data
Input(DeepLearningParameters params, int units, final DataInfo d) {
super(units);
_dinfo = d;
_a = new Storage.DenseVector[params._mini_batch_size];
for (int i=0;i<_a.length;++i) _a[i] = new Storage.DenseVector(units);
}
@Override protected void bprop(int n) { throw new UnsupportedOperationException(); }
@Override protected void fprop(long seed, boolean training, int n) { throw new UnsupportedOperationException(); }
/**
* One of two methods to set layer input values. This one is for raw double data, e.g. for scoring
* @param seed For seeding the RNG inside (for input dropout)
* @param data Data (training columns and responses) to extract the training columns
* from to be mapped into the input neuron layer
* @param mb Mini-Batch index (which point inside this mini-batch)
*/
public void setInput(long seed, final double[] data, int mb) {
// Log.info("Data: " + ArrayUtils.toString(data));
assert(_dinfo != null);
double [] nums = MemoryManager.malloc8d(_dinfo._nums); // a bit wasteful - reallocated each time
int [] cats = MemoryManager.malloc4(_dinfo._cats); // a bit wasteful - reallocated each time
int i = 0, ncats = 0;
for(; i < _dinfo._cats; ++i){
assert(_dinfo._catMissing[i]); //we now *always* have a categorical level for NAs, just in case.
if (Double.isNaN(data[i])) {
cats[ncats] = (_dinfo._catOffsets[i+1]-1); //use the extra level for NAs made during training
} else {
int c = (int)data[i];
if (_dinfo._useAllFactorLevels)
cats[ncats] = c + _dinfo._catOffsets[i];
else if (c!=0)
cats[ncats] = c + _dinfo._catOffsets[i] - 1;
else {
// if the useAllFactorLevels is not used the zero level should be set to -1
// the net to be able calculate right encoding later
cats[ncats] = -1;
}
// If factor level in test set was not seen by training, then turn it into an NA
if (cats[ncats] >= _dinfo._catOffsets[i+1]) {
cats[ncats] = (_dinfo._catOffsets[i+1]-1);
}
}
ncats++;
}
for(;i < data.length;++i){
double d = data[i];
if(_dinfo._normMul != null) d = (d - _dinfo._normSub[i-_dinfo._cats])*_dinfo._normMul[i-_dinfo._cats];
nums[i-_dinfo._cats] = d; //can be NaN for missing numerical data
}
setInput(seed, null, nums, ncats, cats, mb);
}
/**
* The second method used to set input layer values. This one is used directly by FrameTask.processRow() and by the method above.
* @param seed For seeding the RNG inside (for input dropout)
* @param nums Array containing numerical values, can be NaN
* @param numcat Number of horizontalized categorical non-zero values (i.e., those not being the first factor of a class)
* @param cats Array of indices, the first numcat values are the input layer unit (==column) indices for the non-zero categorical values
* (This allows this array to be re-usable by the caller, without re-allocating each time)
* @param mb Mini-Batch index (which point inside this mini-batch)
*/
public void setInput(long seed, final int[] numIds, final double[] nums, final int numcat, final int[] cats, int mb) {
Arrays.fill(_a[mb].raw(), 0f);
// random projection from fullN down to max_categorical_features
if (params._max_categorical_features < _dinfo.fullN() - _dinfo._nums) {
assert(nums.length == _dinfo._nums);
final int M = nums.length + params._max_categorical_features;
// final boolean random_projection = false;
// final boolean hash_trick = true;
// if (random_projection) {
// final int N = _dinfo.fullN();
// assert (_a.size() == M);
//
// // sparse random projection
// for (int i = 0; i < M; ++i) {
// for (int c = 0; c < numcat; ++c) {
// int j = cats[c];
// Random rng = RandomUtils.getRNG(params._seed + i * N + j);
// double val = 0;
// final float rnd = rng.nextFloat();
// if (rnd < 1. / 6.) val = Math.sqrt(3);
// if (rnd > 5. / 6.) val = -Math.sqrt(3);
// _a.add(i, 1f * val);
// }
// Random rng = RandomUtils.getRNG(params._seed + i*N + _dinfo.numStart());
// for (int n = 0; n < nums.length; ++n) {
// double val = 0;
// final float rnd = rng.nextFloat();
// if (rnd < 1. / 6.) val = Math.sqrt(3);
// if (rnd > 5. / 6.) val = - Math.sqrt(3);
// _a.set(i, (Double.isNaN(nums[n]) ? 0f /*Always do MeanImputation during scoring*/ : nums[n]) * val);
// }
// }
// } else if (hash_trick) {
// Use hash trick for categorical features
assert (_a[mb].size() == M);
// hash N-nums.length down to M-nums.length = cM (#categorical slots - always use all numerical features)
final int cM = params._max_categorical_features;
assert (_a[mb].size() == M);
MurmurHash murmur = MurmurHash.getInstance();
for (int i = 0; i < numcat; ++i) {
ByteBuffer buf = ByteBuffer.allocate(4);
int hashval = murmur.hash(buf.putInt(cats[i]).array(), 4, (int)params._seed); // turn horizontalized categorical integer into another integer, based on seed
_a[mb].add(Math.abs(hashval % cM), 1f); // restrict to limited range
}
for (int i = 0; i < nums.length; ++i)
_a[mb].set(cM + i, Double.isNaN(nums[i]) ? 0f /*Always do MeanImputation during scoring*/ : nums[i]);
// }
} else {
assert(_a[mb].size() == _dinfo.fullN());
for (int i = 0; i < numcat; ++i) {
if(cats[i] >= 0) {
_a[mb].set(cats[i], 1f); // one-hot encode categoricals
}
}
if (numIds != null) {
//sparse
for (int i = 0; i < numIds.length; ++i)
_a[mb].set(numIds[i], Double.isNaN(nums[i]) ? 0f /*Always do MeanImputation during scoring*/ : nums[i]);
} else {
//dense
for (int i = 0; i < nums.length; ++i)
_a[mb].set(_dinfo.numStart() + i, Double.isNaN(nums[i]) ? 0f /*Always do MeanImputation during scoring*/ : nums[i]);
}
}
// Input Dropout
if (_dropout == null) return;
if (params._autoencoder && params._input_dropout_ratio > 0) {
// copy input into _origa -- needed for reconstruction error
System.arraycopy(_a[mb].raw(), 0, _origa[mb].raw(), 0, _a[mb].raw().length);
}
seed += params._seed + 0x1337B4BE;
_dropout.randomlySparsifyActivation(_a[mb], seed);
}
}
/**
* Tanh neurons - most common, most stable
*/
public static class Tanh extends Neurons {
public Tanh(int units) { super(units); }
@Override protected void fprop(long seed, boolean training, int n) {
// TODO: implement GEMM
for (int mb=0;mb<n;++mb)
gemv(_a[mb], _w, _previous._a[mb], _b, _dropout != null ? _dropout.bits() : null);
final int rows = _a[0].size();
for (int mb=0;mb<n;++mb)
for( int row = 0; row < rows; row++ )
_a[mb].set(row, 1. - 2. / (1. + Math.exp(2*_a[mb].get(row)))); //evals faster than tanh(x), but is slightly less numerically stable - OK
compute_sparsity();
}
// Computing partial derivative g = dE/dnet = dE/dy * dy/dnet, where dE/dy is the backpropagated error
// dy/dnet = (1 - a^2) for y(net) = tanh(net)
@Override protected void bprop(int n) {
assert (_index < _minfo.get_params()._hidden.length);
float m = _minfo.adaDelta() ? 0 : momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
final int rows = _a[0].size();
double[] g = new double[n];
for (int row = 0; row < rows; row++) {
for (int mb=0;mb<n;++mb)
g[mb] = _e[mb].get(row) * (1 - _a[mb].get(row) * _a[mb].get(row));
bprop(row, g, r, m, n);
}
}
}
/**
* Tanh neurons with dropout
*/
public static class TanhDropout extends Tanh {
public TanhDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training, int n) {
if (training) {
seed += params._seed + 0xDA7A6000;
_dropout.fillBytes(seed);
super.fprop(seed, true, n);
}
else {
super.fprop(seed, false, n);
for (int mb=0;mb<n;++mb)
ArrayUtils.mult(_a[mb].raw(), 1-params._hidden_dropout_ratios[_index]);
}
}
}
/**
* Maxout neurons (picks the max out of the k activation_j = sum(A_ij*x_i) + b_j)
* Requires k times the model parameters (weights/biases) as a "normal" neuron
*/
public static class Maxout extends Neurons {
public Maxout(DeepLearningParameters params, short k, int units) { super(units);
_k = k;
_maxIncoming=new int[params._mini_batch_size][];
for (int i=0;i<_maxIncoming.length;++i) _maxIncoming[i]=new int[units];
if (_k!=2) throw H2O.unimpl("Maxout is currently hardcoded for 2 channels. Trivial to enable k > 2 though.");
}
@Override protected void fprop(long seed, boolean training, int n) {
assert(_b.size() == _a[0].size() * _k);
assert(_w.size() == _a[0].size() * _previous._a[0].size() * _k);
final int rows = _a[0].size();
double[] channel = new double[_k];
for( int row = 0; row < rows; row++ ) {
for (int mb=0;mb<n;++mb) {
_a[mb].set(row, 0);
if( !training || _dropout == null || _dropout.unit_active(row) ) {
final int cols = _previous._a[mb].size();
// For each neuron in the previous layer, there's k channels
// Each channel has its own weight and bias values
// The channel leading to the highest incoming value (W*x + b) is the "winner" and will activate this neuron
short maxK = 0;
for( short k = 0; k < _k; k++ ) {
channel[k] = 0;
for( int col = 0; col < cols; col++ ) {
channel[k] += _w.raw()[_k*(row * cols + col) + k] * _previous._a[mb].get(col);
}
channel[k] += _b.raw()[_k*row+k];
if (channel[k] > channel[maxK]) maxK=k;
}
_maxIncoming[mb][row] = maxK;
_a[mb].set(row, channel[maxK]);
}
}
compute_sparsity();
}
}
@Override protected void bprop(int n) {
assert(_index != params._hidden.length);
float m = _minfo.adaDelta() ? 0 : momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
double[] g = new double[n];
final int rows = _a[0].size();
for (int row = 0; row < rows; row++) {
for (int mb=0;mb<n;++mb)
g[mb] = _e[mb].get(row);
bprop(row, g, r, m, n);
}
}
}
/**
* Maxout neurons with dropout
*/
public static class MaxoutDropout extends Maxout {
public MaxoutDropout(DeepLearningParameters params, short k, int units) { super(params,k,units); }
@Override protected void fprop(long seed, boolean training, int n) {
if (training) {
seed += params._seed + 0x51C8D00D;
_dropout.fillBytes(seed);
super.fprop(seed, true, n);
}
else {
super.fprop(seed, false, n);
for (int mb=0;mb<n;++mb)
ArrayUtils.mult(_a[mb].raw(), 1-params._hidden_dropout_ratios[_index]);
}
}
}
/**
* Rectifier linear unit (ReLU) neurons
*/
public static class Rectifier extends Neurons {
public Rectifier(int units) { super(units); }
@Override protected void fprop(long seed, boolean training, int n) {
// TODO: implement GEMM
for (int mb=0;mb<n;++mb)
gemv(_a[mb], _w, _previous._a[mb], _b, _dropout != null ? _dropout.bits() : null);
final int rows = _a[0].size();
for (int mb=0;mb<n;++mb) {
for( int row = 0; row < rows; row++ ) {
_a[mb].set(row, 0.5f* (_a[mb].get(row) + Math.abs(_a[mb].get(row)))); //faster than max(a, 0)
// _a.set(row, Math.max(_a.get(row), 0f));
}
}
compute_sparsity();
}
@Override protected void bprop(int n) {
assert (_index < _minfo.get_params()._hidden.length);
float m = _minfo.adaDelta() ? 0 : momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
final int rows = _a[0].size();
double[] g = new double[n];
for (int row = 0; row < rows; row++) {
for (int mb=0;mb<n;++mb)
//(d/dx)(max(0,x)) = 1 if x > 0, otherwise 0
g[mb] = _a[mb].get(row) > 0f ? _e[mb].get(row) : 0f;
bprop(row, g, r, m, n);
}
}
}
/**
* Rectifier linear unit (ReLU) neurons with dropout
*/
public static class RectifierDropout extends Rectifier {
public RectifierDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training, int n) {
if (training) {
seed += params._seed + 0x3C71F1ED;
_dropout.fillBytes(seed);
super.fprop(seed, true, n);
}
else {
super.fprop(seed, false, n);
for (int mb=0;mb<n;++mb)
ArrayUtils.mult(_a[mb].raw(), 1-params._hidden_dropout_ratios[_index]);
}
}
}
public static class ExpRectifier extends Neurons {
public ExpRectifier(int units) { super(units); }
@Override protected void fprop(long seed, boolean training, int n) {
for (int mb=0;mb<n;++mb)
gemv(_a[mb], _w, _previous._a[mb], _b, _dropout != null ? _dropout.bits() : null);
final int rows = _a[0].size();
for( int row = 0; row < rows; row++ ) {
for (int mb=0;mb<n;++mb) {
double x = _a[mb].get(row);
double val = x >= 0 ? x : Math.exp(x) - 1;
_a[mb].set(row, val);
}
}
compute_sparsity();
}
// Computing partial derivative g = dE/dnet = dE/dy * dy/dnet, where dE/dy is the backpropagated error
@Override protected void bprop(int n) {
assert (_index < _minfo.get_params()._hidden.length);
float m = _minfo.adaDelta() ? 0 : momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
final int rows = _a[0].size();
for (int row = 0; row < rows; row++) {
double [] g = new double[n];
for (int mb=0;mb<n;++mb) {
double x = _a[mb].get(row);
double val = x >= 0 ? 1 : Math.exp(x);
g[mb] = _e[mb].get(row) * val;
}
bprop(row, g, r, m, n);
}
}
}
/**
* Exponential Rectifier with dropout
*/
public static class ExpRectifierDropout extends ExpRectifier {
public ExpRectifierDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training, int n) {
if (training) {
seed += params._seed + 0xDA7A6000;
_dropout.fillBytes(seed);
super.fprop(seed, true, n);
}
else {
super.fprop(seed, false, n);
for (int mb=0;mb<n;++mb)
ArrayUtils.mult(_a[mb].raw(), 1-params._hidden_dropout_ratios[_index]);
}
}
}
/**
* Abstract class for Output neurons
*/
public static abstract class Output extends Neurons {
Output(int units) { super(units); }
protected void bprop(int n) { throw new UnsupportedOperationException(); }
}
/**
* Output neurons for classification - Softmax
*/
public static class Softmax extends Output {
public Softmax(int units) { super(units); }
protected void fprop(long seed, boolean training, int n) {
for (int mb=0;mb<n;++mb)
gemv(_a[mb], _w, _previous._a[mb], _b, null);
for (int mb=0;mb<n;++mb) {
final double max = ArrayUtils.maxValue(_a[mb].raw());
double scaling = 0;
final int rows = _a[mb].size();
for( int row = 0; row < rows; row++ ) {
_a[mb].set(row, Math.exp(_a[mb].get(row) - max));
scaling += _a[mb].get(row);
}
for( int row = 0; row < rows; row++ ) {
_a[mb].raw()[row] /= scaling;
}
}
}
/**
* Part of backpropagation for classification
* Update every weight as follows: w += -rate * dE/dw
* Compute dE/dw via chain rule: dE/dw = dE/dy * dy/dnet * dnet/dw, where net = sum(xi*wi)+b and y = activation function
* @param target actual class label (integer)
*/
@Override protected void setOutputLayerGradient(double target, int mb, int n) {
assert(target == (int)target);
double g; //partial derivative dE/dy * dy/dnet
final int rows = _a[mb].size();
for( int row = 0; row < rows; row++ ) {
final double t = (row == (int)target ? 1 : 0);
final double y = _a[mb].get(row);
//dy/dnet = derivative of softmax = (1-y)*y
switch(params._loss) {
case CrossEntropy:
//shortcut possible -dCE/dy * dy/dnet = target - y
g = y - t;
break;
case ModifiedHuber:
g = -2*_dist.negHalfGradient(t, y) * (1 - y) * y;
break;
case Quadratic:
g = (y - t) * (1f - y) * y;
break;
default:
throw H2O.unimpl();
}
_e[mb].set(row, g/n); //minibatch normalization
}
}
}
/**
* Output neurons for regression - Linear units
*/
public static class Linear extends Output {
public Linear() {
super(1);
}
protected void fprop(long seed, boolean training, int n) {
for (int mb=0;mb<n;++mb)
gemv(_a[mb], _w, _previous._a[mb], _b, _dropout != null ? _dropout.bits() : null);
}
/**
* Backpropagation for regression
* @param target floating-point target value
*/
@Override protected void setOutputLayerGradient(double target, int mb, int n) {
final int row = 0;
final double y = _a[mb].get(row);
double g = -2*_dist.negHalfGradient(target, y);
_e[mb].set(row, g/n); //minibatch normalization
}
}
/**
* Mat-Vec Plus Add (with optional row dropout)
* @param res = a*x+y (pre-allocated, will be overwritten)
* @param a matrix of size rows x cols
* @param x vector of length cols
* @param y vector of length rows
* @param row_bits if not null, check bits of this byte[] to determine whether a row is used or not
*/
static void gemv_naive(final double[] res, final float[] a, final double[] x, final double[] y, byte[] row_bits) {
final int cols = x.length;
final int rows = y.length;
assert(res.length == rows);
for(int row = 0; row<rows; row++) {
res[row] = 0;
if( row_bits != null && (row_bits[row / 8] & (1 << (row % 8))) == 0) continue;
for(int col = 0; col<cols; col++)
res[row] += a[row*cols+col] * x[col];
res[row] += y[row];
}
}
/**
* Optimized Mat-Vec Plus Add (with optional row dropout)
* Optimization: Partial sums can be evaluated in parallel
* @param res = a*x+y (pre-allocated, will be overwritten)
* @param a matrix of size rows x cols
* @param x vector of length cols
* @param y vector of length rows
* @param row_bits if not null, check bits of this byte[] to determine whether a row is used or not
*/
static void gemv_row_optimized(final double[] res, final float[] a, final double[] x, final double[] y, final byte[] row_bits) {
final int cols = x.length;
final int rows = y.length;
assert(res.length == rows);
final int extra=cols-cols%8;
final int multiple = (cols/8)*8-1;
int idx = 0;
for (int row = 0; row<rows; row++) {
res[row] = 0;
if( row_bits == null || (row_bits[row / 8] & (1 << (row % 8))) != 0) {
double psum0 = 0, psum1 = 0, psum2 = 0, psum3 = 0, psum4 = 0, psum5 = 0, psum6 = 0, psum7 = 0;
for (int col = 0; col < multiple; col += 8) {
int off = idx + col;
psum0 += a[off ] * x[col ];
psum1 += a[off + 1] * x[col + 1];
psum2 += a[off + 2] * x[col + 2];
psum3 += a[off + 3] * x[col + 3];
psum4 += a[off + 4] * x[col + 4];
psum5 += a[off + 5] * x[col + 5];
psum6 += a[off + 6] * x[col + 6];
psum7 += a[off + 7] * x[col + 7];
}
res[row] += psum0 + psum1 + psum2 + psum3;
res[row] += psum4 + psum5 + psum6 + psum7;
for (int col = extra; col < cols; col++)
res[row] += a[idx + col] * x[col];
res[row] += y[row];
}
idx += cols;
}
}
/**
* Helper to do a generic gemv: res = a*x + y
* @param res Dense result
* @param a DenseMatrix
* @param x DenseVector
* @param y Dense vector to add to result
* @param row_bits Bit mask for which rows to use
*/
static void gemv(final Storage.DenseVector res, final Storage.DenseRowMatrix a, final Storage.DenseVector x, final Storage.DenseVector y, byte[] row_bits) {
gemv_row_optimized(res.raw(), a.raw(), x.raw(), y.raw(), row_bits);
}
static void gemv_naive(final Storage.DenseVector res, final Storage.DenseRowMatrix a, final Storage.DenseVector x, final Storage.DenseVector y, byte[] row_bits) {
gemv_naive(res.raw(), a.raw(), x.raw(), y.raw(), row_bits);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/deeplearning/Storage.java
|
package hex.deeplearning;
import water.DKV;
import water.Iced;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.FileVec;
import water.fvec.Frame;
import water.fvec.Vec;
import static water.fvec.Vec.makeCon;
import java.util.Arrays;
import java.util.TreeMap;
public class Storage {
/**
* Abstract vector interface
*/
public abstract interface Vector {
public abstract double get(int i);
public abstract void set(int i, double val);
public abstract void add(int i, double val);
public abstract int size();
public abstract double[] raw();
public abstract Frame toFrame(Key key);
}
/**
* Abstract matrix interface
*/
public abstract interface Matrix {
abstract float get(int row, int col);
abstract void set(int row, int col, float val);
abstract void add(int row, int col, float val);
abstract int cols();
abstract int rows();
abstract long size();
abstract float[] raw();
public Frame toFrame(Key key);
}
/**
* Abstract tensor interface
*/
public abstract interface Tensor {
abstract float get(int slice, int row, int col);
abstract void set(int slice, int row, int col, float val);
abstract void add(int slice, int row, int col, float val);
abstract int slices();
abstract int cols();
abstract int rows();
abstract long size();
abstract float[] raw();
public Frame toFrame(int slice, Key key);
}
/**
* Dense vector implementation
*/
public static class DenseVector extends Iced implements Vector {
private double[] _data;
DenseVector(int len) { _data = new double[len]; }
DenseVector(double[] v) { _data = v; }
@Override public double get(int i) { return _data[i]; }
@Override public void set(int i, double val) { _data[i] = val; }
@Override public void add(int i, double val) { _data[i] += val; }
@Override public int size() { return _data.length; }
@Override public double[] raw() { return _data; }
@Override public Frame toFrame(Key key) { return Storage.toFrame(this, key); }
}
/**
* Dense row matrix implementation
*/
public final static class DenseRowMatrix extends Iced implements Matrix {
private float[] _data;
private int _cols;
private int _rows;
DenseRowMatrix(int rows, int cols) { this(new float[cols*rows], rows, cols); }
DenseRowMatrix(float[] v, int rows, int cols) { _data = v; _rows = rows; _cols = cols; }
@Override public float get(int row, int col) {
assert(row<_rows && col<_cols) : "_data.length: " + _data.length + ", checking: " + row + " < " + _rows + " && " + col + " < " + _cols;
return _data[row*_cols + col];
}
@Override public void set(int row, int col, float val) { assert(row<_rows && col<_cols); _data[row*_cols + col] = val; }
@Override public void add(int row, int col, float val) { assert(row<_rows && col<_cols); _data[row*_cols + col] += val; }
@Override public int cols() { return _cols; }
@Override public int rows() { return _rows; }
@Override public long size() { return (long)_rows*(long)_cols; }
public float[] raw() { return _data; }
@Override public Frame toFrame(Key key) { return Storage.toFrame(this, key); }
}
/**
* Dense column matrix implementation
*/
public final static class DenseColMatrix extends Iced implements Matrix {
private float[] _data;
private int _cols;
private int _rows;
DenseColMatrix(int rows, int cols) { this(new float[cols*rows], rows, cols); }
DenseColMatrix(float[] v, int rows, int cols) { _data = v; _rows = rows; _cols = cols; }
DenseColMatrix(DenseRowMatrix m, int rows, int cols) {
this(rows, cols);
for (int row=0;row<rows;++row)
for (int col=0;col<cols;++col)
set(row,col, m.get(row,col));
}
@Override public float get(int row, int col) { assert(row<_rows && col<_cols); return _data[col*_rows + row]; }
@Override public void set(int row, int col, float val) { assert(row<_rows && col<_cols); _data[col*_rows + row] = val; }
@Override public void add(int row, int col, float val) { assert(row<_rows && col<_cols); _data[col*_rows + row] += val; }
@Override public int cols() { return _cols; }
@Override public int rows() { return _rows; }
@Override public long size() { return (long)_rows*(long)_cols; }
public float[] raw() { return _data; }
@Override public Frame toFrame(Key key) { return Storage.toFrame(this, key); }
}
/**
* Sparse row matrix implementation
*/
public final static class SparseRowMatrix extends Iced implements Matrix {
private TreeMap<Integer, Float>[] _rows;
private int _cols;
SparseRowMatrix(int rows, int cols) { this(null, rows, cols); }
SparseRowMatrix(Matrix v, int rows, int cols) {
_rows = new TreeMap[rows];
for (int row=0;row<rows;++row) _rows[row] = new TreeMap<>();
_cols = cols;
if (v!=null)
for (int row=0;row<rows;++row)
for (int col=0;col<cols;++col)
if (v.get(row,col) != 0f)
add(row,col, v.get(row,col));
}
@Override public float get(int row, int col) { Float v = _rows[row].get(col); if (v == null) return 0f; else return v; }
@Override public void add(int row, int col, float val) { set(row,col,get(row,col)+val); }
@Override public void set(int row, int col, float val) { _rows[row].put(col, val); }
@Override public int cols() { return _cols; }
@Override public int rows() { return _rows.length; }
@Override public long size() { return (long)_rows.length*(long)_cols; }
TreeMap<Integer, Float> row(int row) { return _rows[row]; }
public float[] raw() { throw new UnsupportedOperationException("raw access to the data in a sparse matrix is not implemented."); }
@Override public Frame toFrame(Key key) { return Storage.toFrame(this, key); }
}
/**
* Sparse column matrix implementation
*/
static final class SparseColMatrix extends Iced implements Matrix {
private TreeMap<Integer, Float>[] _cols;
private int _rows;
SparseColMatrix(int rows, int cols) { this(null, rows, cols); }
SparseColMatrix(Matrix v, int rows, int cols) {
_rows = rows;
_cols = new TreeMap[cols];
for (int col=0;col<cols;++col) _cols[col] = new TreeMap<>();
if (v!=null)
for (int row=0;row<rows;++row)
for (int col=0;col<cols;++col)
if (v.get(row,col) != 0f)
add(row,col, v.get(row,col));
}
@Override public float get(int row, int col) { Float v = _cols[col].get(row); if (v == null) return 0f; else return v; }
@Override public void add(int row, int col, float val) { set(row,col,get(row,col)+val); }
@Override public void set(int row, int col, float val) { _cols[col].put(row, val); }
@Override public int cols() { return _cols.length; }
@Override public int rows() { return _rows; }
@Override public long size() { return (long)_rows*(long)_cols.length; }
TreeMap<Integer, Float> col(int col) { return _cols[col]; }
public float[] raw() { throw new UnsupportedOperationException("raw access to the data in a sparse matrix is not implemented."); }
@Override public Frame toFrame(Key key) { return Storage.toFrame(this, key); }
}
/**
* Helper to convert the Matrix to a Frame using MRTask
*/
static class FrameFiller extends MRTask<FrameFiller> {
final DenseColMatrix dcm;
final DenseRowMatrix drm;
final SparseRowMatrix srm;
final SparseColMatrix scm;
FrameFiller(Matrix m) {
if (m instanceof DenseColMatrix) {
dcm = (DenseColMatrix)m;
drm = null;
srm = null;
scm = null;
}
else if (m instanceof DenseRowMatrix) {
dcm = null;
drm = (DenseRowMatrix)m;
srm = null;
scm = null;
}
else if (m instanceof SparseRowMatrix) {
dcm = null;
drm = null;
srm = (SparseRowMatrix)m;
scm = null;
}
else {
dcm = null;
drm = null;
srm = null;
scm = (SparseColMatrix)m;
}
}
@Override public void map(Chunk[] cs) {
Matrix m=null;
if (dcm != null) m = dcm;
if (drm != null) m = drm;
if (scm != null) m = scm;
if (srm != null) m = srm;
int off = (int)cs[0].start();
assert(m.cols() == cs.length);
for (int c = 0; c < cs.length; ++c) {
for (int r = 0; r < cs[0]._len; ++r) {
cs[c].set(r, m.get(off + r, c));
}
}
}
}
/**
* Helper to convert a Vector into a Frame
* @param v Vector
* @param key Key for output Frame
* @return Reference to Frame (which is also in DKV)
*/
static Frame toFrame(Vector v, Key key) {
final int log_rows_per_chunk = Math.max(1, FileVec.DFLT_LOG2_CHUNK_SIZE - (int) Math.floor(Math.log(1) / Math.log(2.)));
Vec vv = makeCon(0, v.size(), log_rows_per_chunk, false /* no rebalancing! */);
Frame f = new Frame(key, new Vec[]{vv});
try( Vec.Writer vw = f.vecs()[0].open() ) {
for (int r = 0; r < v.size(); ++r)
vw.set(r, v.get(r));
}
DKV.put(key, f);
return f;
}
/**
* Helper to convert a Matrix into a Frame
* @param m Matrix
* @param key Key for output Frame
* @return Reference to Frame (which is also in DKV)
*/
static Frame toFrame(Matrix m, Key key) {
final int log_rows_per_chunk = Math.max(1, FileVec.DFLT_LOG2_CHUNK_SIZE - (int) Math.floor(Math.log(m.cols()) / Math.log(2.)));
Vec v[] = new Vec[m.cols()];
for (int i = 0; i < m.cols(); ++i) {
v[i] = makeCon(0, m.rows(), log_rows_per_chunk);
}
Frame f = new FrameFiller(m).doAll(new Frame(key, v))._fr;
DKV.put(key, f);
return f;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ensemble/Metalearner.java
|
package hex.ensemble;
import hex.Model;
import hex.ModelBuilder;
import hex.ensemble.StackedEnsembleModel.StackedEnsembleParameters;
import water.DKV;
import water.Job;
import water.Key;
import water.fvec.Frame;
import water.util.Log;
public abstract class Metalearner<B extends ModelBuilder<M, P, ?>, M extends Model<M, P, ?>, P extends Model.Parameters> {
/**
* Using an enum to list possible algos is not the greatest idea here
* as it forces us to hardcode supported algos and creates a dependency to metalearners provided in extensions (XGBoost).
* Also, it prevents us from loading custom metalearners.
*/
public enum Algorithm {
AUTO,
deeplearning,
drf,
gbm,
glm,
naivebayes,
xgboost,
}
protected Frame _levelOneTrainingFrame;
protected Frame _levelOneValidationFrame;
protected StackedEnsembleModel _model;
protected StackedEnsembleParameters _parms;
protected Job _job;
protected Key<Model> _metalearnerKey;
protected Job _metalearnerJob;
protected P _metalearner_parameters;
protected boolean _hasMetalearnerParams;
protected long _metalearnerSeed;
protected long _maxRuntimeSecs;
void init(Frame levelOneTrainingFrame,
Frame levelOneValidationFrame,
P metalearner_parameters,
StackedEnsembleModel model,
Job StackedEnsembleJob,
Key<Model> metalearnerKey,
Job metalearnerJob,
StackedEnsembleParameters parms,
boolean hasMetalearnerParams,
long metalearnerSeed,
long maxRuntimeSecs) {
_levelOneTrainingFrame = levelOneTrainingFrame;
_levelOneValidationFrame = levelOneValidationFrame;
_metalearner_parameters = metalearner_parameters;
_model = model;
_job = StackedEnsembleJob;
_metalearnerKey = metalearnerKey;
_metalearnerJob = metalearnerJob;
_parms = parms;
_hasMetalearnerParams = hasMetalearnerParams;
_metalearnerSeed = metalearnerSeed;
_maxRuntimeSecs = maxRuntimeSecs;
}
void compute() {
try {
_model.write_lock(_job);
B builder = createBuilder();
if (_hasMetalearnerParams) {
builder._parms = _metalearner_parameters;
}
setCommonParams(builder._parms);
setCrossValidationParams(builder._parms);
setCustomParams(builder._parms);
validateParams(builder._parms);
builder.init(false);
Job<M> j = builder.trainModel();
while (j.isRunning()) {
try {
_job.update(j.getWork(), "training metalearner(" + _model._parms._metalearner_algorithm + ")");
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
}
Log.info("Finished training metalearner model(" + _model._parms._metalearner_algorithm + ").");
_model._output._metalearner = builder.get();
_model._dist = _model._output._metalearner._dist;
_model.doScoreOrCopyMetrics(_job);
if (_parms._keep_levelone_frame) {
_model._output._levelone_frame_id = _levelOneTrainingFrame; //Keep Level One Training Frame in Stacked Ensemble model object
}
} finally {
cleanup();
_model.update(_job);
_model.unlock(_job);
}
}
abstract B createBuilder();
protected void setCommonParams(P parms) {
if (parms._seed == -1) { //use _metalearnerSeed only as legacy fallback if not set on metalearner_parameters
parms._seed = _metalearnerSeed;
}
parms._train = _levelOneTrainingFrame._key;
parms._valid = (_levelOneValidationFrame == null ? null : _levelOneValidationFrame._key);
parms._response_column = _model.responseColumn;
parms._max_runtime_secs = _maxRuntimeSecs;
parms._weights_column = _model._parms._weights_column;
parms._offset_column = _model._parms._offset_column;
parms._main_model_time_budget_factor = _model._parms._main_model_time_budget_factor;
parms._custom_metric_func = _model._parms._custom_metric_func;
parms._gainslift_bins = _model._parms._gainslift_bins;
}
protected void setCrossValidationParams(P parms) {
if (_model._parms._metalearner_fold_column == null) {
parms._nfolds = _model._parms._metalearner_nfolds;
if (_model._parms._metalearner_nfolds > 1) {
if (_model._parms._metalearner_fold_assignment == null) {
parms._fold_assignment = Model.Parameters.FoldAssignmentScheme.AUTO;
} else {
parms._fold_assignment = _model._parms._metalearner_fold_assignment;
}
}
} else {
parms._fold_column = _model._parms._metalearner_fold_column;
}
}
protected void setCustomParams(P parms) { }
protected void validateParams(P parms) { }
protected void cleanup() {
if (!_parms._keep_base_model_predictions) {
_model.deleteBaseModelPredictions();
}
if (!_parms._keep_levelone_frame) {
DKV.remove(_levelOneTrainingFrame._key); //Remove Level One Training Frame from DKV
}
if (null != _levelOneValidationFrame) {
DKV.remove(_levelOneValidationFrame._key); //Remove Level One Validation Frame from DKV
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ensemble/MetalearnerProvider.java
|
package hex.ensemble;
import water.api.Schema;
public interface MetalearnerProvider<M extends Metalearner> {
String getName();
M newInstance();
Schema newParametersSchemaInstance();
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ensemble/Metalearners.java
|
package hex.ensemble;
import hex.Model;
import hex.ModelBuilder;
import hex.ensemble.Metalearner.Algorithm;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLM;
import hex.glm.GLMModel;
import hex.glm.GLMModel.GLMParameters;
import hex.schemas.*;
import water.api.Schema;
import water.exceptions.H2OIllegalArgumentException;
import water.nbhm.NonBlockingHashMap;
import water.util.Log;
import java.util.ServiceLoader;
import java.util.function.Supplier;
import java.util.stream.Stream;
/**
* Entry point class to load and access the supported metalearners.
* Most of them are defined in this class, but some others can be loaded dynamically from the classpath,
* this is for example the case with the XGBoostMetalearner.
*/
public class Metalearners {
static final NonBlockingHashMap<String, MetalearnerProvider> providersByName = new NonBlockingHashMap<>();
static {
LocalProvider[] localProviders = new LocalProvider[] {
new LocalProvider<>(Algorithm.AUTO, AUTOMetalearner::new, GLMV3.GLMParametersV3::new),
new LocalProvider<>(Algorithm.deeplearning, DLMetalearner::new, DeepLearningV3.DeepLearningParametersV3::new),
new LocalProvider<>(Algorithm.drf, DRFMetalearner::new, DRFV3.DRFParametersV3::new),
new LocalProvider<>(Algorithm.gbm, GBMMetalearner::new, GBMV3.GBMParametersV3::new),
new LocalProvider<>(Algorithm.glm, GLMMetalearner::new, GLMV3.GLMParametersV3::new),
new LocalProvider<>(Algorithm.naivebayes, NaiveBayesMetalearner::new, NaiveBayesV3.NaiveBayesParametersV3::new),
};
for (MetalearnerProvider provider : localProviders) {
providersByName.put(provider.getName(), provider);
}
ServiceLoader<MetalearnerProvider> extensionProviders = ServiceLoader.load(MetalearnerProvider.class);
for (MetalearnerProvider provider : extensionProviders) {
providersByName.put(provider.getName(), provider);
}
}
static Algorithm getActualMetalearnerAlgo(Algorithm algo) {
assertAvailable(algo.name());
return algo == Algorithm.AUTO ? Algorithm.glm : algo;
}
public static Model.Parameters createParameters(String name) {
assertAvailable(name);
return createInstance(name).createBuilder()._parms;
}
public static Schema createParametersSchema(String name) {
assertAvailable(name);
return providersByName.get(name).newParametersSchemaInstance();
}
static Metalearner createInstance(String name) {
assertAvailable(name);
return providersByName.get(name).newInstance();
}
private static void assertAvailable(String algo) {
if (!providersByName.containsKey(algo))
throw new H2OIllegalArgumentException("'"+algo+"' metalearner is not supported or available.");
}
/**
* A local implementation of {@link MetalearnerProvider} to expose the {@link Metalearner}s defined in this class.
*/
static class LocalProvider<M extends Metalearner> implements MetalearnerProvider<M> {
private Algorithm _algorithm;
private Supplier<M> _instanceFactory;
private Supplier<Schema> _parameterSchemaInstanceFactory;
public LocalProvider(Algorithm algorithm,
Supplier<M> instanceFactory,
Supplier<Schema> parameterSchemaInstanceFactory) {
_algorithm = algorithm;
_instanceFactory = instanceFactory;
_parameterSchemaInstanceFactory = parameterSchemaInstanceFactory;
}
@Override
public String getName() {
return _algorithm.name();
}
@Override
public M newInstance() {
return _instanceFactory.get();
}
@Override
public Schema newParametersSchemaInstance() {
return _parameterSchemaInstanceFactory.get();
}
}
/**
* A simple implementation of {@link Metalearner} suitable for any algo; it is just using the algo with its default parameters.
*/
public static class SimpleMetalearner extends Metalearner {
private String _algo;
protected SimpleMetalearner(String algo) {
_algo = algo;
}
@Override
ModelBuilder createBuilder() {
return ModelBuilder.make(_algo, _metalearnerJob, _metalearnerKey);
}
protected String getAlgo() {
return _algo;
}
}
static class MetalearnerWithDistribution extends SimpleMetalearner {
protected MetalearnerWithDistribution(String algo) {
super(algo);
}
@Override
protected void validateParams(Model.Parameters parms) {
super.validateParams(parms);
// Check if distribution family is supported and if not pick a basic one
ModelBuilder mb = ModelBuilder.make(parms);
mb.init(false);
if (!Stream.of("_distribution", "_family")
.allMatch((field) -> mb.getMessagesByFieldAndSeverity(field, Log.ERRR).length == 0)) {
DistributionFamily distribution;
if (_model._output.nclasses() == 1) {
distribution = DistributionFamily.gaussian;
} else if (_model._output.nclasses() == 2) {
distribution = DistributionFamily.bernoulli;
} else {
distribution = DistributionFamily.multinomial;
}
Log.warn("Distribution \"" + parms._distribution +
"\" is not supported by metalearner algorithm \"" + getAlgo() +
"\". Using \"" + distribution + "\" instead.");
parms._distribution = distribution;
}
}
}
static class DLMetalearner extends MetalearnerWithDistribution {
public DLMetalearner() {
super(Algorithm.deeplearning.name());
}
}
static class DRFMetalearner extends MetalearnerWithDistribution {
public DRFMetalearner() {
super(Algorithm.drf.name());
}
}
static class GBMMetalearner extends MetalearnerWithDistribution {
public GBMMetalearner() {
super(Algorithm.gbm.name());
}
}
static class GLMMetalearner extends Metalearner<GLM, GLMModel, GLMParameters> {
@Override
GLM createBuilder() {
return ModelBuilder.make("GLM", _metalearnerJob, _metalearnerKey);
}
}
static class NaiveBayesMetalearner extends SimpleMetalearner {
public NaiveBayesMetalearner() {
super(Algorithm.naivebayes.name());
}
}
static class AUTOMetalearner extends GLMMetalearner {
@Override
protected void setCustomParams(GLMParameters parms) {
//add GLM custom params
super.setCustomParams(parms);
parms._generate_scoring_history = true;
parms._score_iteration_interval = (parms._valid == null) ? 5 : -1;
//specific to AUTO mode
// beta_constraints/non_negative are not supported for multinomial and ordinal families
parms._non_negative = !(
parms._family.equals(GLMParameters.Family.multinomial) ||
parms._family.equals(GLMParameters.Family.ordinal)
);
//parms._alpha = new double[] {0.0, 0.25, 0.5, 0.75, 1.0};
// feature columns are already homogeneous (probabilities); when standardization is enabled,
// there can be information loss if some columns have very low probabilities compared with others for example (bad model)
// giving more weight than it should to those columns.
parms._standardize = false;
// Enable lambda search if a validation frame is passed in to get a better GLM fit.
// Since we are also using non_negative to true, we should also set early_stopping = false.
if (parms._valid != null) {
parms._lambda_search = true;
parms._early_stopping = false;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ensemble/StackedEnsemble.java
|
package hex.ensemble;
import hex.Distribution;
import hex.Model;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLMModel;
import hex.grid.Grid;
import hex.tree.drf.DRFModel;
import jsr166y.CountedCompleter;
import water.DKV;
import water.Job;
import water.Key;
import water.Scope;
import water.exceptions.H2OIllegalArgumentException;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.ReflectionUtils;
import water.util.TwoDimTable;
import java.lang.reflect.Field;
import java.util.*;
import java.util.stream.Stream;
import static hex.Model.Parameters.FoldAssignmentScheme.AUTO;
import static hex.Model.Parameters.FoldAssignmentScheme.Random;
import static hex.genmodel.utils.DistributionFamily.*;
import static hex.util.DistributionUtils.familyToDistribution;
/**
* An ensemble of other models, created by <i>stacking</i> with the SuperLearner algorithm or a variation.
*/
public class StackedEnsemble extends ModelBuilder<StackedEnsembleModel,StackedEnsembleModel.StackedEnsembleParameters,StackedEnsembleModel.StackedEnsembleOutput> {
StackedEnsembleDriver _driver;
// The in-progress model being built
protected StackedEnsembleModel _model;
public StackedEnsemble(StackedEnsembleModel.StackedEnsembleParameters parms) {
super(parms);
init(false);
}
public StackedEnsemble(boolean startup_once) {
super(new StackedEnsembleModel.StackedEnsembleParameters(), startup_once);
}
@Override
public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.Regression,
ModelCategory.Binomial,
ModelCategory.Multinomial
};
}
@Override
public BuilderVisibility builderVisibility() {
return BuilderVisibility.Stable;
}
@Override
public boolean isSupervised() {
return true;
}
@Override
protected void ignoreBadColumns(int npredictors, boolean expensive){
HashSet usedColumns = new HashSet();
for(Key k: _parms._base_models) {
Model model = (Model) DKV.getGet(k);
usedColumns.add(model._parms._response_column);
usedColumns.addAll(Arrays.asList(model._parms.getNonPredictors()));
if (model._output._origNames != null)
usedColumns.addAll(Arrays.asList(model._output._origNames));
else
usedColumns.addAll(Arrays.asList(model._output._names));
}
usedColumns.addAll(Arrays.asList(_parms.getNonPredictors()));
// FilterCols(n=0) because there is no guarantee that non-predictors are
// at the end of the frame, e.g., `metalearner_fold` column can be anywhere,
// and `usedColumns` contain all used columns even the non-predictor ones
new FilterCols(0) {
@Override protected boolean filter(Vec v, String name) {
return !usedColumns.contains(name);
}
}.doIt(_train,"Dropping unused columns: ",expensive);
}
@Override
protected StackedEnsembleDriver trainModelImpl() {
return _driver = _parms._blending == null ? new StackedEnsembleCVStackingDriver() : new StackedEnsembleBlendingDriver();
}
@Override
public boolean haveMojo() {
return true;
}
@Override
public int nclasses() {
if (_parms._metalearner_parameters != null) {
DistributionFamily distribution = _parms._metalearner_parameters.getDistributionFamily();
if (Arrays.asList(multinomial, ordinal, AUTO).contains(distribution))
return _nclass;
if (Arrays.asList(bernoulli, quasibinomial, fractionalbinomial).contains(distribution))
return 2;
return 1;
}
return super.nclasses();
}
@Override
public void init(boolean expensive) {
expandBaseModels();
super.init(expensive);
if (_parms._distribution != DistributionFamily.AUTO) {
throw new H2OIllegalArgumentException("Setting \"distribution\" to StackedEnsemble is unsupported. Please set it in \"metalearner_parameters\".");
}
checkColumnPresent("fold", _parms._metalearner_fold_column, train(), valid(), _parms.blending());
checkColumnPresent("weights", _parms._weights_column, train(), valid(), _parms.blending());
checkColumnPresent("offset", _parms._offset_column, train(), valid(), _parms.blending());
validateBaseModels();
}
/**
* Expand base models - if a grid is provided instead of a model it gets expanded in to individual models.
*/
private void expandBaseModels() {
// H2O Flow initializes SE with no base_models
if (_parms._base_models == null) return;
List<Key> baseModels = new ArrayList<Key>();
for (Key baseModelKey : _parms._base_models) {
Object retrievedObject = DKV.getGet(baseModelKey);
if (retrievedObject instanceof Model) {
baseModels.add(baseModelKey);
} else if (retrievedObject instanceof Grid) {
Grid grid = (Grid) retrievedObject;
Collections.addAll(baseModels, grid.getModelKeys());
} else if (retrievedObject == null) {
throw new IllegalArgumentException(String.format("Specified id \"%s\" does not exist.", baseModelKey));
} else {
throw new IllegalArgumentException(String.format("Unsupported type \"%s\" as a base model.", retrievedObject.getClass().toString()));
}
}
_parms._base_models = baseModels.toArray(new Key[0]);
}
/**
* Validates base models.
*/
private void validateBaseModels() {
// H2O Flow initializes SE with no base_models
if (_parms._base_models == null) return;
boolean warnSameWeightsColumns = true;
String referenceWeightsColumn = null;
for (int i = 0; i < _parms._base_models.length; i++) {
Model baseModel = DKV.getGet(_parms._base_models[i]);
if (i == 0) {
if ((_parms._offset_column == null))
_parms._offset_column = baseModel._parms._offset_column;
referenceWeightsColumn = baseModel._parms._weights_column;
warnSameWeightsColumns = referenceWeightsColumn != null; // We don't want to warn if no weights are set
}
if (!Objects.equals(referenceWeightsColumn, baseModel._parms._weights_column)) {
warnSameWeightsColumns = false;
}
if (!Objects.equals(_parms._offset_column, baseModel._parms._offset_column))
throw new IllegalArgumentException("All base models must have the same offset_column!");
}
if (_parms._weights_column == null && warnSameWeightsColumns && _parms._base_models.length > 0) {
warn("_weights_column", "All base models use weights_column=\"" + referenceWeightsColumn +
"\" but Stacked Ensemble does not. If you want to use the same " +
"weights_column for the meta learner, please specify it as an argument " +
"in the h2o.stackedEnsemble call.");
}
}
/**
* Checks for presence of a column in given {@link Frame}s. Null column means no checks are done.
*
* @param columnName Name of the column, such as fold, weight, etc.
* @param columnId Actual column name in the frame. Null means no column has been specified.
* @param frames A list of frames to check the presence of fold column in
*/
private static void checkColumnPresent(final String columnName, final String columnId, final Frame... frames) {
if (columnId == null) return; // Unspecified column implies no checks are needs on provided frames
for (Frame frame : frames) {
if (frame == null) continue; // No frame provided, no checks required
if (frame.vec(columnId) == null) {
throw new IllegalArgumentException(String.format("Specified %s column '%s' not found in one of the supplied data frames. Available column names are: %s",
columnName, columnId, Arrays.toString(frame.names())));
}
}
}
static void addModelPredictionsToLevelOneFrame(Model aModel, Frame aModelsPredictions, Frame levelOneFrame) {
if (aModel._output.isBinomialClassifier()) {
// GLM uses a different column name than the other algos
Vec preds = aModelsPredictions.vec(2); // Predictions column names have been changed...
levelOneFrame.add(aModel._key.toString(), preds);
} else if (aModel._output.isMultinomialClassifier()) { //Multinomial
//Need to remove 'predict' column from multinomial since it contains outcome
Frame probabilities = aModelsPredictions.subframe(ArrayUtils.remove(aModelsPredictions.names(), "predict"));
probabilities.setNames(
Stream.of(probabilities.names())
.map((name) -> aModel._key.toString().concat("/").concat(name))
.toArray(String[]::new)
);
levelOneFrame.add(probabilities);
} else if (aModel._output.isAutoencoder()) {
throw new H2OIllegalArgumentException("Don't yet know how to stack autoencoders: " + aModel._key);
} else if (!aModel._output.isSupervised()) {
throw new H2OIllegalArgumentException("Don't yet know how to stack unsupervised models: " + aModel._key);
} else {
Vec preds = aModelsPredictions.vec("predict");
levelOneFrame.add(aModel._key.toString(), preds);
}
}
/**
* Add non predictor columns to levelOneFrame, i.e., all but those generated by base models. For example:
* response_column, metalearner_fold_column, weights_column
*
* @param parms StackedEnsembleParameters
* @param fr
* @param levelOneFrame
* @param training Used to determine which columns are necessary to add
*/
static void addNonPredictorsToLevelOneFrame(final StackedEnsembleModel.StackedEnsembleParameters parms, Frame fr, Frame levelOneFrame, boolean training) {
if (training) {
if (parms._metalearner_fold_column != null)
levelOneFrame.add(parms._metalearner_fold_column, fr.vec(parms._metalearner_fold_column));
}
if (parms._weights_column != null)
levelOneFrame.add(parms._weights_column, fr.vec(parms._weights_column));
if (parms._offset_column != null)
levelOneFrame.add(parms._offset_column, fr.vec(parms._offset_column));
levelOneFrame.add(parms._response_column, fr.vec(parms._response_column));
}
/**
* Inherit distribution and its parameters
* @param baseModelParms
*/
private void inheritDistributionAndParms(StackedEnsembleModel seModel, Model.Parameters baseModelParms) {
if (baseModelParms instanceof GLMModel.GLMParameters) {
try {
_parms._metalearner_parameters.setDistributionFamily(familyToDistribution(((GLMModel.GLMParameters) baseModelParms)._family));
} catch (IllegalArgumentException e) {
warn("distribution", "Stacked Ensemble is not able to inherit distribution from GLM's family " + ((GLMModel.GLMParameters) baseModelParms)._family + ".");
}
} else if (baseModelParms instanceof DRFModel.DRFParameters) {
inferBasicDistribution(seModel);
} else {
_parms._metalearner_parameters.setDistributionFamily(baseModelParms._distribution);
}
// deal with parameterized distributions
switch (baseModelParms._distribution) {
case custom:
_parms._metalearner_parameters._custom_distribution_func = baseModelParms._custom_distribution_func;
break;
case huber:
_parms._metalearner_parameters._huber_alpha = baseModelParms._huber_alpha;
break;
case tweedie:
_parms._metalearner_parameters._tweedie_power = baseModelParms._tweedie_power;
break;
case quantile:
_parms._metalearner_parameters._quantile_alpha = baseModelParms._quantile_alpha;
break;
}
}
void inferBasicDistribution(StackedEnsembleModel seModel) {
if (seModel._output.isBinomialClassifier()) {
_parms._metalearner_parameters.setDistributionFamily(DistributionFamily.bernoulli);
} else if (seModel._output.isClassifier()) {
_parms._metalearner_parameters.setDistributionFamily(DistributionFamily.multinomial);
} else {
_parms._metalearner_parameters.setDistributionFamily(DistributionFamily.gaussian);
}
}
/**
* Inherit family and its parameters
* @param baseModelParms
*/
private void inheritFamilyAndParms(StackedEnsembleModel seModel, Model.Parameters baseModelParms) {
GLMModel.GLMParameters metaParams = (GLMModel.GLMParameters) _parms._metalearner_parameters;
if (baseModelParms instanceof GLMModel.GLMParameters) {
GLMModel.GLMParameters glmParams = (GLMModel.GLMParameters) baseModelParms;
metaParams._family = glmParams._family;
metaParams._link = glmParams._link;
} else if (baseModelParms instanceof DRFModel.DRFParameters) {
inferBasicDistribution(seModel);
} else {
try {
metaParams.setDistributionFamily(baseModelParms._distribution);
} catch (H2OIllegalArgumentException e) {
warn("distribution", "Stacked Ensemble is not able to inherit family from a distribution " + baseModelParms._distribution + ".");
inferBasicDistribution(seModel);
}
}
// deal with parameterized distributions
if (metaParams._family == GLMModel.GLMParameters.Family.tweedie) {
_parms._metalearner_parameters._tweedie_power = baseModelParms._tweedie_power;
}
}
/**
* Infers distribution/family from a model
* @param aModel
* @return True if the distribution or family was inferred from a model
*/
boolean inferDistributionOrFamily(StackedEnsembleModel seModel, Model aModel) {
if (Metalearners.getActualMetalearnerAlgo(_parms._metalearner_algorithm) == Metalearner.Algorithm.glm) { //use family
if (((GLMModel.GLMParameters)_parms._metalearner_parameters)._family != GLMModel.GLMParameters.Family.AUTO) {
return false; // User specified family - no need to infer one; Link will be also used properly if it is specified
}
inheritFamilyAndParms(seModel,aModel._parms);
} else { // use distribution
if (_parms._metalearner_parameters._distribution != DistributionFamily.AUTO) {
return false; // User specified distribution; no need to infer one
}
inheritDistributionAndParms(seModel, aModel._parms);
}
return true;
}
private DistributionFamily distributionFamily(Model aModel) {
// TODO: hack alert: In DRF, _parms._distribution is always set to multinomial. Yay.
if (aModel instanceof DRFModel)
if (aModel._output.isBinomialClassifier())
return DistributionFamily.bernoulli;
else if (aModel._output.isClassifier())
return DistributionFamily.multinomial;
else
return DistributionFamily.gaussian;
if (aModel instanceof StackedEnsembleModel) {
StackedEnsembleModel seModel = (StackedEnsembleModel) aModel;
if (Metalearners.getActualMetalearnerAlgo(seModel._parms._metalearner_algorithm) == Metalearner.Algorithm.glm) {
return familyToDistribution(((GLMModel.GLMParameters) seModel._parms._metalearner_parameters)._family);
}
if (seModel._parms._metalearner_parameters._distribution != DistributionFamily.AUTO) {
return seModel._parms._metalearner_parameters._distribution;
}
}
try {
Field familyField = ReflectionUtils.findNamedField(aModel._parms, "_family");
Field distributionField = (familyField != null ? null : ReflectionUtils.findNamedField(aModel, "_dist"));
if (null != familyField) {
// GLM only, for now
GLMModel.GLMParameters.Family thisFamily = (GLMModel.GLMParameters.Family) familyField.get(aModel._parms);
return familyToDistribution(thisFamily);
}
if (null != distributionField) {
Distribution distribution = ((Distribution)distributionField.get(aModel));
DistributionFamily distributionFamily;
if (null != distribution)
distributionFamily = distribution._family;
else
distributionFamily = aModel._parms._distribution;
// NOTE: If the algo does smart guessing of the distribution family we need to duplicate the logic here.
if (distributionFamily == DistributionFamily.AUTO) {
if (aModel._output.isBinomialClassifier())
distributionFamily = DistributionFamily.bernoulli;
else if (aModel._output.isClassifier())
distributionFamily = DistributionFamily.multinomial;
else
distributionFamily = DistributionFamily.gaussian;
} // DistributionFamily.AUTO
return distributionFamily;
}
throw new H2OIllegalArgumentException("Don't know how to stack models that have neither a distribution hyperparameter nor a family hyperparameter.");
}
catch (Exception e) {
throw new H2OIllegalArgumentException(e.toString(), e.toString());
}
}
void checkAndInheritModelProperties(StackedEnsembleModel seModel) {
if (null == _parms._base_models || 0 == _parms._base_models.length)
throw new H2OIllegalArgumentException("When creating a StackedEnsemble you must specify one or more models; found 0.");
if (null != _parms._metalearner_fold_column && 0 != _parms._metalearner_nfolds)
throw new H2OIllegalArgumentException("Cannot specify fold_column and nfolds at the same time.");
Model aModel = null;
boolean retrievedFirstModelParams = false;
boolean inferredDistributionFromFirstModel = false;
GLMModel firstGLM = null;
boolean blending_mode = _parms._blending != null;
boolean cv_required_on_base_model = !blending_mode;
boolean require_consistent_training_frames = !blending_mode && !_parms._is_cv_model;
//following variables are collected from the 1st base model (should be identical across base models), i.e. when beenHere=false
int basemodel_nfolds = -1;
Model.Parameters.FoldAssignmentScheme basemodel_fold_assignment = null;
String basemodel_fold_column = null;
long seed = -1;
//end 1st model collected fields
// Make sure we can set metalearner's family and link if needed
if (_parms._metalearner_parameters == null) {
_parms.initMetalearnerParams();
}
for (Key<Model> k : _parms._base_models) {
aModel = DKV.getGet(k);
if (null == aModel) {
warn("base_models", "Failed to find base model; skipping: "+k);
continue;
}
Log.debug("Checking properties for model "+k);
if (!aModel.isSupervised()) {
throw new H2OIllegalArgumentException("Base model is not supervised: "+aModel._key.toString());
}
if (retrievedFirstModelParams) {
// check that the base models are all consistent with first based model
if (seModel.modelCategory != aModel._output.getModelCategory())
throw new H2OIllegalArgumentException("Base models are inconsistent: "
+"there is a mix of different categories of models among "+Arrays.toString(_parms._base_models));
if (! seModel.responseColumn.equals(aModel._parms._response_column))
throw new H2OIllegalArgumentException("Base models are inconsistent: they use different response columns."
+" Found: " + seModel.responseColumn + " (StackedEnsemble) and "+aModel._parms._response_column+" (model "+k+").");
if (require_consistent_training_frames) {
if (seModel.trainingFrameRows < 0) seModel.trainingFrameRows = _parms.train().numRows();
long numOfRowsUsedToTrain = aModel._parms.train() == null ?
aModel._output._cross_validation_holdout_predictions_frame_id.get().numRows() :
aModel._parms.train().numRows();
if (seModel.trainingFrameRows != numOfRowsUsedToTrain)
throw new H2OIllegalArgumentException("Base models are inconsistent: they use different size (number of rows) training frames."
+" Found: "+seModel.trainingFrameRows+" (StackedEnsemble) and "+numOfRowsUsedToTrain+" (model "+k+").");
}
if (cv_required_on_base_model) {
if (aModel._parms._fold_assignment != basemodel_fold_assignment
&& !(aModel._parms._fold_assignment == AUTO && basemodel_fold_assignment == Random)
) {
warn("base_models", "Base models are inconsistent: they use different fold_assignments. This can lead to data leakage.");
}
if (aModel._parms._fold_column == null) {
// If we don't have a fold_column require:
// nfolds > 1
// nfolds consistent across base models
if (aModel._parms._nfolds < 2)
throw new H2OIllegalArgumentException("Base model does not use cross-validation: "+aModel._parms._nfolds);
if (basemodel_nfolds != aModel._parms._nfolds)
warn("base_models", "Base models are inconsistent: they use different values for nfolds. This can lead to data leakage.");
if (basemodel_fold_assignment == Random && aModel._parms._seed != seed)
warn("base_models", "Base models are inconsistent: they use random-seeded k-fold cross-validation but have different seeds. This can lead to data leakage.");
} else {
if (!aModel._parms._fold_column.equals(basemodel_fold_column))
warn("base_models", "Base models are inconsistent: they use different fold_columns. This can lead to data leakage.");
}
if (! aModel._parms._keep_cross_validation_predictions)
throw new H2OIllegalArgumentException("Base model does not keep cross-validation predictions: "+aModel._parms._nfolds);
}
if (inferredDistributionFromFirstModel) {
// Check inferred params and if they differ fallback to basic distribution of model category
if (!(aModel instanceof DRFModel) && distributionFamily(aModel) == distributionFamily(seModel)) {
boolean sameParams = true;
switch (_parms._metalearner_parameters._distribution) {
case custom:
sameParams = _parms._metalearner_parameters._custom_distribution_func
.equals(aModel._parms._custom_distribution_func);
break;
case huber:
sameParams = _parms._metalearner_parameters._huber_alpha == aModel._parms._huber_alpha;
break;
case tweedie:
sameParams = _parms._metalearner_parameters._tweedie_power == aModel._parms._tweedie_power;
break;
case quantile:
sameParams = _parms._metalearner_parameters._quantile_alpha == aModel._parms._quantile_alpha;
break;
}
if ((aModel instanceof GLMModel) && (Metalearners.getActualMetalearnerAlgo(_parms._metalearner_algorithm) == Metalearner.Algorithm.glm)) {
if (firstGLM == null) {
firstGLM = (GLMModel) aModel;
inheritFamilyAndParms(seModel, firstGLM._parms);
} else {
sameParams = ((GLMModel.GLMParameters) _parms._metalearner_parameters)._link.equals(((GLMModel) aModel)._parms._link);
}
}
if (!sameParams) {
warn("distribution", "Base models are inconsistent; they use same distribution but different parameters of " +
"the distribution. Reverting to default distribution.");
inferBasicDistribution(seModel);
inferredDistributionFromFirstModel = false;
}
} else {
if (distributionFamily(aModel) != distributionFamily(seModel)) {
// Distribution of base models differ
warn("distribution","Base models are inconsistent; they use different distributions: "
+ distributionFamily(seModel) + " and: " + distributionFamily(aModel) +
". Reverting to default distribution.");
} // else the first model was DRF/XRT so we don't want to warn
inferBasicDistribution(seModel);
inferredDistributionFromFirstModel = false;
}
}
} else {
// !retrievedFirstModelParams: this is the first base_model
seModel.modelCategory = aModel._output.getModelCategory();
inferredDistributionFromFirstModel = inferDistributionOrFamily(seModel, aModel);
firstGLM = aModel instanceof GLMModel && inferredDistributionFromFirstModel ? (GLMModel) aModel : null;
seModel.responseColumn = aModel._parms._response_column;
if (! _parms._response_column.equals(seModel.responseColumn)) // _params._response_column can't be null, validated by ModelBuilder
throw new H2OIllegalArgumentException("StackedModel response_column must match the response_column of each base model."
+" Found: "+_parms._response_column+"(StackedEnsemble) and: "+seModel.responseColumn+" (model "+k+").");
basemodel_nfolds = aModel._parms._nfolds;
basemodel_fold_assignment = aModel._parms._fold_assignment;
if (basemodel_fold_assignment == AUTO) basemodel_fold_assignment = Random;
basemodel_fold_column = aModel._parms._fold_column;
seed = aModel._parms._seed;
retrievedFirstModelParams = true;
}
} // for all base_models
if (null == aModel)
throw new H2OIllegalArgumentException("When creating a StackedEnsemble you must specify one or more models; "
+_parms._base_models.length+" were specified but none of those were found: "+Arrays.toString(_parms._base_models));
}
private abstract class StackedEnsembleDriver extends Driver {
/**
* Prepare a "level one" frame for a given set of models, predictions-frames and actuals. Used for preparing
* training and validation frames for the metalearning step, and could also be used for bulk predictions for
* a StackedEnsemble.
*/
private Frame prepareLevelOneFrame(String levelOneKey, Model[] baseModels, Frame[] baseModelPredictions, Frame actuals) {
if (null == baseModels) throw new H2OIllegalArgumentException("Base models array is null.");
if (null == baseModelPredictions) throw new H2OIllegalArgumentException("Base model predictions array is null.");
if (baseModels.length == 0) throw new H2OIllegalArgumentException("Base models array is empty.");
if (baseModelPredictions.length == 0)
throw new H2OIllegalArgumentException("Base model predictions array is empty.");
if (baseModels.length != baseModelPredictions.length)
throw new H2OIllegalArgumentException("Base models and prediction arrays are different lengths.");
final StackedEnsembleModel.StackedEnsembleParameters.MetalearnerTransform transform;
if (_parms._metalearner_transform != null && _parms._metalearner_transform != StackedEnsembleModel.StackedEnsembleParameters.MetalearnerTransform.NONE) {
if (!(_model._output.isBinomialClassifier() || _model._output.isMultinomialClassifier()))
throw new H2OIllegalArgumentException("Metalearner transform is supported only for classification!");
transform = _parms._metalearner_transform;
} else {
transform = null;
}
if (null == levelOneKey) levelOneKey = "levelone_" + _model._key.toString() + "_" + _parms._metalearner_transform.toString();
// TODO: what if we're running multiple in parallel and have a name collision?
Frame old = DKV.getGet(levelOneKey);
if (old != null && old instanceof Frame) {
Frame oldFrame = (Frame) old;
oldFrame.write_lock(_job);
// Remove ALL the columns, so we don't delete them in remove_impl. Their
// lifetime is controlled by their model.
oldFrame.removeAll();
oldFrame.update(_job);
oldFrame.unlock(_job);
}
Frame levelOneFrame = transform == null ?
new Frame(Key.make(levelOneKey)) // no tranform -> this will be the final frame
:
new Frame(); // tranform -> this is only an intermediate result
for (int i = 0; i < baseModels.length; i++) {
Model baseModel = baseModels[i];
Frame baseModelPreds = baseModelPredictions[i];
if (null == baseModel) {
Log.warn("Failed to find base model; skipping: " + baseModels[i]);
continue;
}
if (null == baseModelPreds) {
Log.warn("Failed to find base model " + baseModel + " predictions; skipping: " + baseModelPreds._key);
continue;
}
StackedEnsemble.addModelPredictionsToLevelOneFrame(baseModel, baseModelPreds, levelOneFrame);
Scope.untrack(baseModelPredictions);
}
if (transform != null) {
levelOneFrame = _parms._metalearner_transform.transform(_model, levelOneFrame, Key.make(levelOneKey));
}
// Add metalearner fold column, weights column to level one frame if it exists
addNonPredictorsToLevelOneFrame(_model._parms, actuals, levelOneFrame, true);
Log.info("Finished creating \"level one\" frame for stacking: " + levelOneFrame.toString());
DKV.put(levelOneFrame);
return levelOneFrame;
}
/**
* Prepare a "level one" frame for a given set of models and actuals.
* Used for preparing validation frames for the metalearning step, and could also be used for bulk predictions for a StackedEnsemble.
*/
private Frame prepareLevelOneFrame(String levelOneKey, Key<Model>[] baseModelKeys, Frame actuals, boolean isTraining) {
List<Model> baseModels = new ArrayList<>();
List<Frame> baseModelPredictions = new ArrayList<>();
for (Key<Model> k : baseModelKeys) {
if (_model._output._metalearner == null || _model.isUsefulBaseModel(k)) {
Model aModel = DKV.getGet(k);
if (null == aModel)
throw new H2OIllegalArgumentException("Failed to find base model: " + k);
Frame predictions = getPredictionsForBaseModel(aModel, actuals, isTraining);
baseModels.add(aModel);
baseModelPredictions.add(predictions);
}
}
boolean keepLevelOneFrame = isTraining && _parms._keep_levelone_frame;
Frame levelOneFrame = prepareLevelOneFrame(levelOneKey, baseModels.toArray(new Model[0]), baseModelPredictions.toArray(new Frame[0]), actuals);
if (keepLevelOneFrame) {
levelOneFrame = levelOneFrame.deepCopy(levelOneFrame._key.toString());
levelOneFrame.write_lock(_job);
levelOneFrame.update(_job);
levelOneFrame.unlock(_job);
Scope.untrack(levelOneFrame.keysList());
}
return levelOneFrame;
}
@Override
public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
if (_model != null) _model.delete();
return super.onExceptionalCompletion(ex, caller);
}
protected Frame buildPredictionsForBaseModel(Model model, Frame frame) {
Key<Frame> predsKey = buildPredsKey(model, frame);
Frame preds = DKV.getGet(predsKey);
if (preds == null) {
preds = model.score(frame, predsKey.toString(), null, false); // no need for metrics here (leaks in client mode)
Scope.untrack(preds.keysList());
}
if (_model._output._base_model_predictions_keys == null)
_model._output._base_model_predictions_keys = new Key[0];
if (!ArrayUtils.contains(_model._output._base_model_predictions_keys, predsKey)){
_model._output._base_model_predictions_keys = ArrayUtils.append(_model._output._base_model_predictions_keys, predsKey);
}
//predictions are cleaned up by metalearner if necessary
return preds;
}
TwoDimTable generateModelSummary() {
HashMap<String, Integer> baseModelTypes = new HashMap<>();
HashMap<String, Integer> usedBaseModelTypes = new HashMap<>();
for (Key bmk : _model._parms._base_models) {
Model bm = (Model) bmk.get();
if (_model.isUsefulBaseModel(bmk))
usedBaseModelTypes.put(bm._parms.algoName(), usedBaseModelTypes.containsKey(bm._parms.algoName()) ? usedBaseModelTypes.get(bm._parms.algoName()) + 1 : 1);
baseModelTypes.put(bm._parms.algoName(), baseModelTypes.containsKey(bm._parms.algoName()) ? baseModelTypes.get(bm._parms.algoName()) + 1 : 1);
}
List<String> rowHeaders = new ArrayList<>();
List<String> rowValues = new ArrayList<>();
rowHeaders.add("Stacking strategy");
rowValues.add(_model._output._stacking_strategy.toString());
rowHeaders.add("Number of base models (used / total)");
rowValues.add(Arrays.stream(_model._parms._base_models).filter(_model::isUsefulBaseModel).count() + "/" + _model._parms._base_models.length);
for (Map.Entry<String, Integer> baseModelType : baseModelTypes.entrySet()) {
rowHeaders.add("# " + baseModelType.getKey() + " base models (used / total)");
rowValues.add(((usedBaseModelTypes.containsKey(baseModelType.getKey())) ?
usedBaseModelTypes.get(baseModelType.getKey()) : "0") + "/" + baseModelType.getValue());
}
// Metalearner
rowHeaders.add("Metalearner algorithm");
rowValues.add(_model._output._metalearner._parms.algoName());
rowHeaders.add("Metalearner fold assignment scheme");
rowValues.add(_model._output._metalearner._parms._fold_assignment == null ? "AUTO" : _model._output._metalearner._parms._fold_assignment.name());
rowHeaders.add("Metalearner nfolds");
rowValues.add(""+_model._output._metalearner._parms._nfolds);
rowHeaders.add("Metalearner fold_column");
rowValues.add(_model._output._metalearner._parms._fold_column);
rowHeaders.add("Custom metalearner hyperparameters");
rowValues.add(_model._parms._metalearner_params.isEmpty()? "None" : _model._parms._metalearner_params);
TwoDimTable ms = new TwoDimTable("Model Summary for Stacked Ensemble", "",
rowHeaders.toArray(new String[]{}),
new String[]{"Value"},
new String[]{"string"},
new String[]{"%s"},
"Key"
);
int i = 0;
for (String val : rowValues){
ms.set(i++, 0, val);
}
return ms;
}
protected abstract StackedEnsembleModel.StackingStrategy strategy();
/**
* @RETURN THE FRAME THAT IS USED TO COMPUTE THE PREDICTIONS FOR THE LEVEL-ONE TRAINING FRAME.
*/
protected abstract Frame getActualTrainingFrame();
protected abstract Frame getPredictionsForBaseModel(Model model, Frame actualsFrame, boolean isTrainingFrame);
private Key<Frame> buildPredsKey(Key model_key, long model_checksum, Key frame_key, long frame_checksum) {
return Key.make("preds_" + model_checksum + "_on_" + frame_checksum);
}
protected Key<Frame> buildPredsKey(Model model, Frame frame) {
return frame == null || model == null ? null : buildPredsKey(model._key, model.checksum(), frame._key, frame.checksum());
}
public void computeImpl() {
init(true);
if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(StackedEnsemble.this);
_model = new StackedEnsembleModel(dest(), _parms, new StackedEnsembleModel.StackedEnsembleOutput(StackedEnsemble.this));
_model._output._stacking_strategy = strategy();
try {
_model.delete_and_lock(_job); // and clear & write-lock it (smashing any prior)
checkAndInheritModelProperties(_model);
_model.update(_job);
} finally {
_model.unlock(_job);
}
String levelOneTrainKey = "levelone_training_" + _model._key.toString();
Frame levelOneTrainingFrame = prepareLevelOneFrame(levelOneTrainKey, _model._parms._base_models, getActualTrainingFrame(), true);
Frame levelOneValidationFrame = null;
if (_model._parms.valid() != null) {
String levelOneValidKey = "levelone_validation_" + _model._key.toString();
levelOneValidationFrame = prepareLevelOneFrame(levelOneValidKey, _model._parms._base_models, _model._parms.valid(), false);
}
Metalearner.Algorithm metalearnerAlgoSpec = _model._parms._metalearner_algorithm;
Metalearner.Algorithm metalearnerAlgoImpl = Metalearners.getActualMetalearnerAlgo(metalearnerAlgoSpec);
// Compute metalearner
if(metalearnerAlgoImpl != null) {
Key<Model> metalearnerKey = Key.<Model>make("metalearner_" + metalearnerAlgoSpec + "_" + _model._key);
Job metalearnerJob = new Job<>(metalearnerKey, ModelBuilder.javaName(metalearnerAlgoImpl.toString()),
"StackingEnsemble metalearner (" + metalearnerAlgoSpec + ")");
//Check if metalearner_params are passed in
boolean hasMetaLearnerParams = _model._parms._metalearner_parameters != null;
long metalearnerSeed = _model._parms._seed;
Metalearner metalearner = Metalearners.createInstance(metalearnerAlgoSpec.name());
metalearner.init(
levelOneTrainingFrame,
levelOneValidationFrame,
_model._parms._metalearner_parameters,
_model,
_job,
metalearnerKey,
metalearnerJob,
_parms,
hasMetaLearnerParams,
metalearnerSeed,
_parms._max_runtime_secs == 0 ? 0 : Math.max(remainingTimeSecs(), 1)
);
metalearner.compute();
} else {
throw new H2OIllegalArgumentException("Invalid `metalearner_algorithm`. Passed in " + metalearnerAlgoSpec +
" but must be one of " + Arrays.toString(Metalearner.Algorithm.values()));
}
if (_model.evalAutoParamsEnabled && _model._parms._metalearner_algorithm == Metalearner.Algorithm.AUTO)
_model._parms._metalearner_algorithm = metalearnerAlgoImpl;
_model._output._model_summary = generateModelSummary();
} // computeImpl
}
private class StackedEnsembleCVStackingDriver extends StackedEnsembleDriver {
@Override
protected StackedEnsembleModel.StackingStrategy strategy() {
return StackedEnsembleModel.StackingStrategy.cross_validation;
}
@Override
protected Frame getActualTrainingFrame() {
return _model._parms.train();
}
@Override
protected Frame getPredictionsForBaseModel(Model model, Frame actualsFrame, boolean isTraining) {
Frame fr;
if (isTraining) {
// for training, retrieve predictions from cv holdout predictions frame as all base models are required to get built with keep_cross_validation_frame=true
if (null == model._output._cross_validation_holdout_predictions_frame_id)
throw new H2OIllegalArgumentException("Failed to find the xval predictions frame id. . . Looks like keep_cross_validation_predictions wasn't set when building the models.");
fr = DKV.getGet(model._output._cross_validation_holdout_predictions_frame_id);
if (null == fr)
throw new H2OIllegalArgumentException("Failed to find the xval predictions frame. . . Looks like keep_cross_validation_predictions wasn't set when building the models, or the frame was deleted.");
} else {
fr = buildPredictionsForBaseModel(model, actualsFrame);
}
return fr;
}
}
private class StackedEnsembleBlendingDriver extends StackedEnsembleDriver {
@Override
protected StackedEnsembleModel.StackingStrategy strategy() {
return StackedEnsembleModel.StackingStrategy.blending;
}
@Override
protected Frame getActualTrainingFrame() {
return _model._parms.blending();
}
@Override
protected Frame getPredictionsForBaseModel(Model model, Frame actualsFrame, boolean isTrainingFrame) {
// if training we can stop prematurely due to a timeout but computing validation scores should be allowed to finish
if (stop_requested() && isTrainingFrame) {
throw new Job.JobCancelledException();
}
return buildPredictionsForBaseModel(model, actualsFrame);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ensemble/StackedEnsembleModel.java
|
package hex.ensemble;
import hex.*;
import hex.genmodel.utils.DistributionFamily;
import hex.genmodel.utils.LinkFunctionType;
import water.*;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.udf.CFuncRef;
import water.util.Log;
import water.util.MRUtils;
import water.util.TwoDimTable;
import water.util.fp.Function2;
import java.util.*;
import java.util.stream.Stream;
import static hex.Model.Contributions.ContributionsOutputFormat.Original;
import static hex.Model.Parameters.FoldAssignmentScheme.AUTO;
import static hex.Model.Parameters.FoldAssignmentScheme.Random;
/**
* An ensemble of other models, created by <i>stacking</i> with the SuperLearner algorithm or a variation.
*/
public class StackedEnsembleModel
extends Model<StackedEnsembleModel,StackedEnsembleModel.StackedEnsembleParameters,StackedEnsembleModel.StackedEnsembleOutput>
implements Model.Contributions{
// common parameters for the base models (keeping them public for backwards compatibility, although it's nonsense)
public ModelCategory modelCategory;
public long trainingFrameRows = -1;
public String responseColumn = null;
class GDeepSHAP extends MRTask<GDeepSHAP> {
final String[] _columns;
final int[][] _baseIdx;
final int[] _metaIdx;
final int[] _levelOneIdx;
final int _biasTermIdx;
final int _biasTermSrc;
final Integer[] _baseModelIdx;
final int[] _biasTermIndices;
final int[] _rowIndices;
final int[] _rowBgIndices;
final StackedEnsembleParameters.MetalearnerTransform _metaLearnerTransform;
GDeepSHAP(String[] columns, String[] baseModels, String[] bigFrameColumnsArr, Integer[] baseModelIdx, StackedEnsembleParameters.MetalearnerTransform metaLearnerTransform) {
_columns = columns;
_baseIdx = new int[columns.length][baseModels.length];
_metaIdx = new int[baseModels.length];
_levelOneIdx = new int[baseModels.length];
_biasTermIdx = columns.length;
List<String> bigFrameColumns = Arrays.asList(bigFrameColumnsArr);
_biasTermSrc = bigFrameColumns.indexOf("metalearner_BiasTerm");
_baseModelIdx = baseModelIdx;
_metaLearnerTransform = metaLearnerTransform;
_biasTermIndices = new int[baseModels.length];
_rowIndices = new int[baseModels.length + 1];
_rowBgIndices = new int[baseModels.length + 1];
for (int i = 0; i < columns.length; i++) {
for (int j = 0; j < baseModels.length; j++) {
_baseIdx[i][j] = bigFrameColumns.indexOf(baseModels[j] + "_" + columns[i]);
}
}
for (int i = 0; i < baseModels.length; i++) {
_metaIdx[i] = bigFrameColumns.indexOf("metalearner_" + baseModels[i]);
_levelOneIdx[i] = bigFrameColumns.indexOf(baseModels[i]);
_biasTermIndices[i] = bigFrameColumns.indexOf(baseModels[i]+"_RowIdx");
_rowIndices[i] = bigFrameColumns.indexOf(baseModels[i]+"_RowIdx");
_rowBgIndices[i] = bigFrameColumns.indexOf(baseModels[i]+"_BackgroundRowIdx");
}
_rowIndices[baseModels.length] = bigFrameColumns.indexOf("metalearner_RowIdx");
_rowBgIndices[baseModels.length] = bigFrameColumns.indexOf("metalearner_BackgroundRowIdx");
}
private double baseModelContribution(Chunk[] chunks, int rowIdx, int baseModelIdx, int featureIdx) {
return chunks[_baseIdx[featureIdx][baseModelIdx]].atd(rowIdx);
}
private double metalearnerContribution(Chunk[] chunks, int rowIdx, int baseModelIdx) {
return chunks[_metaIdx[baseModelIdx]].atd(rowIdx);
}
private double baseModelBiasTerm(Chunk[] chunks, int rowIdx, int baseModelIdx) {
return chunks[_biasTermIndices[baseModelIdx]].atd(rowIdx);
}
private double div(double a, double b) {
return Math.abs(b) < 1e-6 ? 0 : a/b;
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
// Multiplier is array of basemodel's contribution to metalearners prediction divided by the difference between
// prediction on the explained point and the background point. Simplified GDeepSHAP rescale rule for 2-layers:
//
// / phi_{i+1}_1_1 .. phi_{i+1}_1_m \ / f_i_1(x) - f_i_1(b) \
// phi_i (psi_{i+1} (/) (f_i(x)-f_i(b)) = (phi_i_1, ..., phi_i_n) | ... ... | (/) | ... | =
// \ phi_{i+1}_n_1 .. phi_{i+1}_n_m / \ f_i_n(x) - f_i_n(b) /
//
// = (sum_{k <= n} phi_i_k * phi_{i+1}_k_1 / (f_i_k(x) - f_i_k(b)), ..., sum_{k <= n} phi_i_k * phi_{i+1}_k_m / (f_i_k(x) - f_i_k(b)))
//
// in this notation multiplier[k] = phi_i_k / (f_i_k(x) - f_i_k(b))
double[] multiplier = MemoryManager.malloc8d(_metaIdx.length);
double result = 0;
for (int row = 0; row < cs[0]._len; row++) {
// check if row == row and bg row == bg row
long rowIdx = cs[_rowIndices[0]].at8(row);
long rowBgIdx = cs[_rowBgIndices[0]].at8(row);
for (int i = 0; i < _rowIndices.length; i++) {
assert rowIdx == cs[_rowIndices[i]].at8(row);
assert rowBgIdx == cs[_rowBgIndices[i]].at8(row);
}
Arrays.fill(multiplier, 0);
for (int bm = 0; bm < _baseModelIdx.length-1; bm++) {
for (int col = 0; col < _columns.length; col++) {
multiplier[bm] += baseModelContribution(cs, row, bm, col);
}
multiplier[bm] = div(metalearnerContribution(cs, row, bm), multiplier[bm]);
}
// Should we deal here with metalearner transform? No, since the transformation is one dimensional and
// transforms all the inputs independently the generalized rescale rule cancels it out.
//
// Let B stand for basemodel, L for logit metalearner transform, M for metalearner.
// Contributions of the transform sum up to f_L(x) - f_L(b) and since the transform is one dimensional
// we know that phi_L = f_L(x) - f_L(b).
// Let denote the final contributions as phi, (/) as Hadamard division and (*) Hadamard product.
//
// Without the metalearner transform the generalized rescale rule gives us:
// phi = phi_B (phi_M (/) (f_B(x)-f_B(b)))
//
// With metalearner transform we have:
// phi = (phi_B(phi_L(phi_M (/) (f_L(x)-f_L(b))) (/) (f_B(x)-f_B(b)))) =
// = (phi_B((f_L(x) - f_L(b))(phi_M (*) (1 (/) (f_L(x)-f_L(b)))) (/) (f_B(x)-f_B(b)))) =
// # Hadamard product is commutative and associative
// = (phi_B( (f_L(x) - f_L(b)) (/) (f_L(x)-f_L(b)) (*) phi_M)) (/) (f_B(x)-f_B(b)))) =
// = (phi_B( 1 (*) phi_M) (/) (f_B(x)-f_B(b)))) =
// = phi_B (phi_M (/) (f_B(x)-f_B(b)))
for (int col = 0; col < ncs.length-3; col++) {
result = 0;
for (int bm = 0; bm < multiplier.length; bm++) {
result += multiplier[bm]*baseModelContribution(cs, row, bm, col);
}
ncs[col].addNum(result);
}
ncs[ncs.length-3].addNum(cs[_biasTermSrc].atd(row));
ncs[ncs.length-2].addNum(cs[_rowIndices[0]].at8(row));
ncs[ncs.length-1].addNum(cs[_rowBgIndices[0]].at8(row));
}
}
}
int numOfUsefulBaseModels(){
int result = 0;
for (Key<Model> bm : _parms._base_models)
if (isUsefulBaseModel(bm))
result++;
return result;
}
private Frame baseLineContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options, Frame backgroundFrame) {
List<String> baseModels = new ArrayList<>();
List<Integer> baseModelsIdx = new ArrayList<>();
String[] columns = null;
baseModelsIdx.add(0);
try (Scope.Safe s = Scope.safe(frame, backgroundFrame)) {
Frame fr = new Frame();
for (Key<Model> bm : _parms._base_models) {
if (isUsefulBaseModel(bm)) {
baseModels.add(bm.toString());
Frame contributions = ((Model.Contributions) bm.get()).scoreContributions(
frame,
Key.make(destination_key.toString()+"_"+bm),
j,
new ContributionsOptions()
.setOutputFormat(options._outputFormat)
.setOutputSpace(true)
.setOutputPerReference(true),
backgroundFrame);
Scope.track(contributions);
if (null == columns)
columns = contributions._names;
if (!Arrays.equals(columns, contributions._names)) {
if (columns.length == contributions._names.length) {
HashSet<String> colSet = new HashSet<>();
List<String> colList = Arrays.asList(columns);
List<String> contrList = Arrays.asList(contributions._names);
colSet.addAll(colList);
if (colSet.containsAll(contrList)) {
int[] perm = new int[columns.length];
for (int i = 0; i < columns.length; i++) {
perm[i] = contrList.indexOf(columns[i]);
}
contributions.reOrder(perm);
}
}
if (!Arrays.equals(columns, contributions._names)) {
if (Original.equals(options._outputFormat)) {
throw new IllegalArgumentException("Base model contributions have different columns likely due to models using different categorical encoding. Please use output_format=\"compact\".");
}
throw new RuntimeException("Base model contributions have different columns. This is not expected. Please fill in a bug report.");
}
}
contributions.setNames(
Arrays.stream(contributions._names)
.map(name -> bm+"_"+name)
.toArray(String[]::new)
);
fr.add(contributions);
baseModelsIdx.add(fr.numCols());
}
}
if (baseModels.isEmpty())
throw new RuntimeException("Stacked Ensemble \"" + this._key + "\" doesn't use any base models. Stopping contribution calculation as no feature contributes.");
assert columns[columns.length - 3].equals("BiasTerm") && columns[columns.length - 2].equals("RowIdx") && columns[columns.length - 1].equals("BackgroundRowIdx");
String[] colsWithRows = columns;
columns = Arrays.copyOfRange(columns, 0, columns.length - 3);
Frame adaptFr = adaptFrameForScore(frame, false);
Frame levelOneFrame = makeLevelOnePredictFrame(frame, adaptFr, j);
Frame adaptFrBg = adaptFrameForScore(backgroundFrame, false);
Frame levelOneFrameBg = makeLevelOnePredictFrame(backgroundFrame, adaptFrBg, j);
Frame metalearnerContrib = ((Model.Contributions) _output._metalearner).scoreContributions(levelOneFrame,
Key.make(destination_key + "_" + _output._metalearner._key), j,
new ContributionsOptions()
.setOutputFormat(options._outputFormat)
.setOutputSpace(options._outputSpace)
.setOutputPerReference(true),
levelOneFrameBg);
Scope.track(metalearnerContrib);
metalearnerContrib.setNames(Arrays.stream(metalearnerContrib._names)
.map(name -> "metalearner_" + name)
.toArray(String[]::new));
fr.add(metalearnerContrib);
DKV.remove(metalearnerContrib.getKey());
return Scope.untrack(new GDeepSHAP(columns, baseModels.toArray(new String[0]),
fr._names, baseModelsIdx.toArray(new Integer[0]), _parms._metalearner_transform)
.withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(colsWithRows.length, Vec.T_NUM, fr)
.outputFrame(destination_key, colsWithRows, null));
}
}
@Override
public long scoreContributionsWorkEstimate(Frame frame, Frame backgroundFrame, boolean outputPerReference) {
long workAmount = Math.max(frame.numRows(), backgroundFrame.numRows()); // Maps over the bigger frame while the smaller is sent across the cluster
workAmount *= numOfUsefulBaseModels() + 1; // by each BaseModel and the metalearner
workAmount += frame.numRows() * backgroundFrame.numRows(); // G-DeepSHAP work
if (!outputPerReference)
workAmount += frame.numRows() * backgroundFrame.numRows(); // Aggregating over the baselines
return workAmount;
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options, Frame backgroundFrame) {
if (null == backgroundFrame)
throw H2O.unimpl("StackedEnsemble supports contribution calculation only with a background frame.");
Log.info("Starting contributions calculation for " + this._key + "...");
try (Scope.Safe safe = Scope.safe(frame, backgroundFrame)) {
Frame contributions;
if (options._outputPerReference) {
contributions = baseLineContributions(frame, destination_key, j, options, backgroundFrame);
} else {
Function2<Frame, Boolean, Frame> fun = (subFrame, resultIsFinalFrame) -> {
String[] columns = null;
String[] colsWithBiasTerm = null;
Frame indivContribs = baseLineContributions(subFrame, Key.make(destination_key+"_individual_contribs_for_subframe_"+subFrame._key), j, options, backgroundFrame);
columns = Arrays.copyOf(indivContribs.names(), indivContribs.names().length-3);
colsWithBiasTerm = Arrays.copyOf(indivContribs.names(), indivContribs.names().length-2);
assert colsWithBiasTerm[colsWithBiasTerm.length-1].equals("BiasTerm");
try {
return new ContributionsMeanAggregator(j, (int) subFrame.numRows(), columns.length+1 /* (bias term) */, (int) backgroundFrame.numRows())
.withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(columns.length+1, Vec.T_NUM, indivContribs)
.outputFrame(resultIsFinalFrame
? destination_key // no subframes -> one result with the destination key
: Key.make(destination_key+"_for_subframe_"+subFrame._key),
colsWithBiasTerm, null);
} finally {
indivContribs.delete(true);
}
};
if (backgroundFrame.anyVec().nChunks() > H2O.CLOUD._memary.length || // could be map-reduced over the bg frame
!ContributionsWithBackgroundFrameTask.enoughMinMemory(numOfUsefulBaseModels() *
ContributionsWithBackgroundFrameTask.estimatePerNodeMinimalMemory(frame.numCols(), frame, backgroundFrame))) // or we have no other choice due to memory
contributions = SplitToChunksApplyCombine.splitApplyCombine(frame, (fr -> fun.apply(fr, false)), destination_key);
else {
contributions = fun.apply(frame, true);
DKV.put(contributions);
}
}
return Scope.untrack(contributions);
} finally {
Log.info("Finished contributions calculation for " + this._key + "...");
}
}
public enum StackingStrategy {
cross_validation,
blending
}
// TODO: add a separate holdout dataset for the ensemble
// TODO: add a separate overall cross-validation for the ensemble, including _fold_column and FoldAssignmentScheme / _fold_assignment
public StackedEnsembleModel(Key selfKey, StackedEnsembleParameters parms, StackedEnsembleOutput output) {
super(selfKey, parms, output);
}
@Override
public void initActualParamValues() {
super.initActualParamValues();
if (_parms._metalearner_fold_assignment == AUTO) {
_parms._metalearner_fold_assignment = Random;
}
}
@Override
public boolean haveMojo() {
return super.haveMojo()
&& Stream.of(_parms._base_models)
.filter(this::isUsefulBaseModel)
.map(DKV::<Model>getGet)
.allMatch(Model::haveMojo);
}
public static class StackedEnsembleParameters extends Model.Parameters {
public String algoName() { return "StackedEnsemble"; }
public String fullName() { return "Stacked Ensemble"; }
public String javaName() { return StackedEnsembleModel.class.getName(); }
@Override public long progressUnits() { return 1; } // TODO
// base_models is a list of base model keys to ensemble (must have been cross-validated)
public Key<Model> _base_models[] = new Key[0];
// Should we keep the level-one frame of cv preds + response col?
public boolean _keep_levelone_frame = false;
// internal flag if we want to avoid having the base model predictions rescored multiple times, esp. for blending.
public boolean _keep_base_model_predictions = false;
// Metalearner params
//for stacking using cross-validation
public int _metalearner_nfolds;
public Parameters.FoldAssignmentScheme _metalearner_fold_assignment;
public String _metalearner_fold_column;
//the training frame used for blending (from which predictions columns are computed)
public Key<Frame> _blending;
public enum MetalearnerTransform {
NONE,
Logit;
private LinkFunction logitLink = LinkFunctionFactory.getLinkFunction(LinkFunctionType.logit);
public Frame transform(StackedEnsembleModel model, Frame frame, Key<Frame> destKey) {
if (this == Logit) {
return new MRTask() {
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
for (int c = 0; c < cs.length; c++) {
for (int i = 0; i < cs[c]._len; i++) {
final double p = Math.min(1 - 1e-9, Math.max(cs[c].atd(i), 1e-9)); // 0 and 1 don't work well with logit
ncs[c].addNum(logitLink.link(p));
}
}
}
}.doAll(frame.numCols(), Vec.T_NUM, frame)
.outputFrame(destKey, frame._names, null);
} else {
throw H2O.unimpl("Transformation "+this.name()+" is not supported.");
}
}
}
public MetalearnerTransform _metalearner_transform = MetalearnerTransform.NONE;
public Metalearner.Algorithm _metalearner_algorithm = Metalearner.Algorithm.AUTO;
public String _metalearner_params = new String(); //used for clients code-gen only.
public Model.Parameters _metalearner_parameters;
public long _score_training_samples = 10_000;
/**
* initialize {@link #_metalearner_parameters} with default parameters for the current {@link #_metalearner_algorithm}.
*/
public void initMetalearnerParams() {
initMetalearnerParams(_metalearner_algorithm);
}
/**
* initialize {@link #_metalearner_parameters} with default parameters for the given algorithm
* @param algo the metalearner algorithm we want to use and for which parameters are initialized.
*/
public void initMetalearnerParams(Metalearner.Algorithm algo) {
_metalearner_algorithm = algo;
_metalearner_parameters = Metalearners.createParameters(algo.name());
}
public final Frame blending() { return _blending == null ? null : _blending.get(); }
@Override
public String[] getNonPredictors() {
HashSet<String> nonPredictors = new HashSet<>();
nonPredictors.addAll(Arrays.asList(super.getNonPredictors()));
if (null != _metalearner_fold_column)
nonPredictors.add(_metalearner_fold_column);
return nonPredictors.toArray(new String[0]);
}
@Override
public DistributionFamily getDistributionFamily() {
if (_metalearner_parameters != null)
return _metalearner_parameters.getDistributionFamily();
return super.getDistributionFamily();
}
@Override
public void setDistributionFamily(DistributionFamily distributionFamily) {
assert _metalearner_parameters != null;
_metalearner_parameters.setDistributionFamily(distributionFamily);
}
}
public static class StackedEnsembleOutput extends Model.Output {
public StackedEnsembleOutput() { super(); }
public StackedEnsembleOutput(StackedEnsemble b) { super(b); }
public StackedEnsembleOutput(Job job) { _job = job; }
// The metalearner model (e.g., a GLM that has a coefficient for each of the base_learners).
public Model _metalearner;
public Frame _levelone_frame_id; //set only if StackedEnsembleParameters#_keep_levelone_frame=true
public StackingStrategy _stacking_strategy;
//Set of base model predictions that have been cached in DKV to avoid scoring the same model multiple times,
// it is then the responsibility of the client code to delete those frames from DKV.
//This especially useful when building SE models incrementally (e.g. in AutoML).
//The Set is instantiated and filled only if StackedEnsembleParameters#_keep_base_model_predictions=true.
public Key<Frame>[] _base_model_predictions_keys;
@Override
public int nfeatures() {
return super.nfeatures() - (_metalearner._parms._fold_column == null ? 0 : 1);
}
}
/**
* For StackedEnsemble we call score on all the base_models and then combine the results
* with the metalearner to create the final predictions frame.
*
* @see Model#predictScoreImpl(Frame, Frame, String, Job, boolean, CFuncRef)
* @param adaptFrm Already adapted frame
* @param computeMetrics
* @return A Frame containing the prediction column, and class distribution
*/
@Override
protected PredictScoreResult predictScoreImpl(Frame fr, Frame adaptFrm, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) {
try (Scope.Safe safe = Scope.safe(fr, adaptFrm)) {
Frame levelOneFrame = makeLevelOnePredictFrame(fr, adaptFrm, j);
// TODO: what if we're running multiple in parallel and have a name collision?
Log.info("Finished creating \"level one\" frame for scoring: "+levelOneFrame.toString());
// Score the dataset, building the class distribution & predictions
Model metalearner = this._output._metalearner;
Frame predictFr = metalearner.score(
levelOneFrame,
destination_key,
j,
computeMetrics,
CFuncRef.from(_parms._custom_metric_func)
);
ModelMetrics mmStackedEnsemble = null;
if (computeMetrics) {
// #score has just stored a ModelMetrics object for the (metalearner, preds_levelone) Model/Frame pair.
// We need to be able to look it up by the (this, fr) pair.
// The ModelMetrics object for the metalearner will be removed when the metalearner is removed.
Key<ModelMetrics>[] mms = metalearner._output.getModelMetrics();
ModelMetrics lastComputedMetric = mms[mms.length-1].get();
mmStackedEnsemble = lastComputedMetric.deepCloneWithDifferentModelAndFrame(this, fr);
this.addModelMetrics(mmStackedEnsemble);
//now that we have the metric set on the SE model, removing the one we just computed on metalearner (otherwise it leaks in client mode)
for (Key<ModelMetrics> mm : metalearner._output.clearModelMetrics(true)) {
DKV.remove(mm);
}
}
Scope.untrack(predictFr); // needed in the result
return new StackedEnsemblePredictScoreResult(predictFr, mmStackedEnsemble);
}
}
private Frame makeLevelOnePredictFrame(Frame fr, Frame adaptFrm, Job j) {
final StackedEnsembleParameters.MetalearnerTransform transform;
if (_parms._metalearner_transform != null && _parms._metalearner_transform != StackedEnsembleParameters.MetalearnerTransform.NONE) {
if (!(_output.isBinomialClassifier() || _output.isMultinomialClassifier()))
throw new H2OIllegalArgumentException("Metalearner transform is supported only for classification!");
transform = _parms._metalearner_transform;
} else {
transform = null;
}
final String seKey = this._key.toString();
final String frId = ""+(fr._key == null ? fr.checksum() : fr._key);
final Key<Frame> levelOneFrameKey = Key.make("preds_levelone_"+seKey+"_"+frId);
Frame levelOneFrame = transform == null
? new Frame(levelOneFrameKey) // no transform -> this will be the final frame
: new Frame(); // transform -> this is only an intermediate result
Model[] usefulBaseModels = Stream.of(_parms._base_models)
.filter(this::isUsefulBaseModel)
.map(Key::get)
.toArray(Model[]::new);
if (usefulBaseModels.length > 0) {
Frame[] baseModelPredictions = new Frame[usefulBaseModels.length];
// Run scoring of base models in parallel
H2O.submitTask(new LocalMR(new MrFun() {
@Override
protected void map(int id) {
baseModelPredictions[id] = usefulBaseModels[id].score(
fr,
"preds_base_"+seKey+"_"+usefulBaseModels[id]._key+"_"+frId,
j,
false
);
}
}, usefulBaseModels.length)).join();
for (int i = 0; i < usefulBaseModels.length; i++) {
StackedEnsemble.addModelPredictionsToLevelOneFrame(usefulBaseModels[i], baseModelPredictions[i], levelOneFrame);
DKV.remove(baseModelPredictions[i]._key); //Cleanup
Frame.deleteTempFrameAndItsNonSharedVecs(baseModelPredictions[i], levelOneFrame);
}
}
if (transform != null) {
Frame oldLOF = levelOneFrame;
levelOneFrame = transform.transform(this, levelOneFrame, levelOneFrameKey);
oldLOF.remove();
}
// Add response column, weights columns to level one frame
StackedEnsemble.addNonPredictorsToLevelOneFrame(_parms, adaptFrm, levelOneFrame, false);
Scope.track(levelOneFrame); // level-one frame is always temporary and must be used in a scoped context.
return levelOneFrame;
}
private class StackedEnsemblePredictScoreResult extends PredictScoreResult {
private final ModelMetrics _modelMetrics;
public StackedEnsemblePredictScoreResult(Frame preds, ModelMetrics modelMetrics) {
super(null, preds, preds);
_modelMetrics = modelMetrics;
}
@Override
public ModelMetrics makeModelMetrics(Frame fr, Frame adaptFrm) {
return _modelMetrics;
}
@Override
public ModelMetrics.MetricBuilder<?> getMetricBuilder() {
throw new UnsupportedOperationException("Stacked Ensemble model doesn't implement MetricBuilder infrastructure code, " +
"retrieve your metrics by calling getOrMakeMetrics method.");
}
}
/**
* Is the baseModel's prediction used in the metalearner?
*
* @param baseModelKey
*/
boolean isUsefulBaseModel(Key<Model> baseModelKey) {
Model metalearner = _output._metalearner;
assert metalearner != null : "can't use isUsefulBaseModel during training";
if (modelCategory == ModelCategory.Multinomial) {
// Multinomial models output multiple columns and a base model
// might be useful just for one category...
for (String feature : metalearner._output._names) {
if (feature.startsWith(baseModelKey.toString().concat("/"))){
if (metalearner.isFeatureUsedInPredict(feature)) {
return true;
}
}
}
return false;
} else {
return metalearner.isFeatureUsedInPredict(baseModelKey.toString());
}
}
/**
* Should never be called: the code paths that normally go here should call predictScoreImpl().
* @see Model#score0(double[], double[])
*/
@Override
protected double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/]) {
throw new UnsupportedOperationException("StackedEnsembleModel.score0() should never be called: the code paths that normally go here should call predictScoreImpl().");
}
@Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
throw new UnsupportedOperationException("StackedEnsembleModel.makeMetricBuilder should never be called!");
}
private ModelMetrics doScoreTrainingMetrics(Frame frame, Job job) {
Frame scoredFrame = (_parms._score_training_samples > 0 && _parms._score_training_samples < frame.numRows())
? MRUtils.sampleFrame(frame, _parms._score_training_samples, _parms._seed)
: frame;
try {
Frame adaptedFrame = new Frame(scoredFrame);
PredictScoreResult result = predictScoreImpl(scoredFrame, adaptedFrame, null, job, true, CFuncRef.from(_parms._custom_metric_func));
result.getPredictions().delete();
return result.makeModelMetrics(scoredFrame, adaptedFrame);
} finally {
if (scoredFrame != frame) scoredFrame.delete();
}
}
void doScoreOrCopyMetrics(Job job) {
// To get ensemble training metrics, the training frame needs to be re-scored since
// training metrics from metalearner are not equal to ensemble training metrics.
// The training metrics for the metalearner do not reflect true ensemble training metrics because
// the metalearner was trained on cv preds, not training preds. So, rather than clone the metalearner
// training metrics, we have to re-score the training frame on all the base models, then send these
// biased preds through to the metalearner, and then compute the metrics there.
//
// Job set to null since `stop_requested()` due to timeout would invalidate the whole SE at this point
// which would be unfortunate since this is the last step of SE training and it also should be relatively fast.
this._output._training_metrics = doScoreTrainingMetrics(this._parms.train(), null);
// Validation metrics can be copied from metalearner (may be null).
// Validation frame was already piped through so there's no need to re-do that to get the same results.
this._output._validation_metrics = this._output._metalearner._output._validation_metrics;
// Cross-validation metrics can be copied from metalearner (may be null).
// For cross-validation metrics, we use metalearner cross-validation metrics as a proxy for the ensemble
// cross-validation metrics -- the true k-fold cv metrics for the ensemble would require training k sets of
// cross-validated base models (rather than a single set of cross-validated base models), which is extremely
// computationally expensive and awkward from the standpoint of the existing Stacked Ensemble API.
// More info: https://github.com/h2oai/h2o-3/issues/10864
// Need to do DeepClone because otherwise framekey is incorrect (metalearner train is levelone not train)
if (null != this._output._metalearner._output._cross_validation_metrics) {
this._output._cross_validation_metrics = this._output._metalearner._output._cross_validation_metrics
.deepCloneWithDifferentModelAndFrame(this, this._output._metalearner._parms.train());
this._output._cross_validation_metrics_summary = (TwoDimTable) this._output._metalearner._output._cross_validation_metrics_summary.clone();
}
}
public void deleteBaseModelPredictions() {
if (_output._base_model_predictions_keys != null) {
for (Key<Frame> key : _output._base_model_predictions_keys) {
if (_output._levelone_frame_id != null && key.get() != null)
Frame.deleteTempFrameAndItsNonSharedVecs(key.get(), _output._levelone_frame_id);
else
Keyed.remove(key);
}
_output._base_model_predictions_keys = null;
}
}
@Override protected Futures remove_impl(Futures fs, boolean cascade) {
deleteBaseModelPredictions();
if (_output._metalearner != null)
_output._metalearner.remove(fs);
if (_output._levelone_frame_id != null)
_output._levelone_frame_id.remove(fs);
return super.remove_impl(fs, cascade);
}
/** Write out models (base + metalearner) */
@Override protected AutoBuffer writeAll_impl(AutoBuffer ab) {
//Metalearner
ab.putKey(_output._metalearner._key);
//Base Models
for (Key<Model> ks : _parms._base_models)
ab.putKey(ks);
return super.writeAll_impl(ab);
}
/** Read in models (base + metalearner) */
@Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
//Metalearner
ab.getKey(_output._metalearner._key,fs);
//Base Models
for (Key<Model> ks : _parms._base_models)
ab.getKey(ks,fs);
return super.readAll_impl(ab,fs);
}
@Override
public StackedEnsembleMojoWriter getMojo() {
return new StackedEnsembleMojoWriter(this);
}
@Override
public void deleteCrossValidationModels() {
if (_output._metalearner != null)
_output._metalearner.deleteCrossValidationModels();
}
@Override
public void deleteCrossValidationPreds() {
if (_output._metalearner != null)
_output._metalearner.deleteCrossValidationPreds();
}
@Override
public void deleteCrossValidationFoldAssignment() {
if (_output._metalearner != null)
_output._metalearner.deleteCrossValidationFoldAssignment();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/ensemble/StackedEnsembleMojoWriter.java
|
package hex.ensemble;
import hex.Model;
import hex.MultiModelMojoWriter;
import water.DKV;
import water.Key;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
public class StackedEnsembleMojoWriter extends MultiModelMojoWriter<StackedEnsembleModel,
StackedEnsembleModel.StackedEnsembleParameters, StackedEnsembleModel.StackedEnsembleOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public StackedEnsembleMojoWriter() {}
public StackedEnsembleMojoWriter(StackedEnsembleModel model) {
super(model);
// White list supported metalearning transforms
if (!(model._parms._metalearner_transform == StackedEnsembleModel.StackedEnsembleParameters.MetalearnerTransform.NONE ||
model._parms._metalearner_transform == StackedEnsembleModel.StackedEnsembleParameters.MetalearnerTransform.Logit)) {
throw new UnsupportedOperationException("Cannot save Stacked Ensemble with metalearner_transform = \"" +
model._parms._metalearner_transform.name() + "\" to MOJO.");
}
}
@Override
public String mojoVersion() {
return "1.01";
}
@Override
protected List<Model> getSubModels() {
LinkedList<Model> subModels = new LinkedList<>();
if (model._output._metalearner != null)
subModels.add(model._output._metalearner);
for (Key<Model> baseModelKey : model._parms._base_models)
if (baseModelKey != null && model.isUsefulBaseModel(baseModelKey)) {
Model aModel = DKV.getGet(baseModelKey);
subModels.add(aModel);
}
return subModels;
}
@Override
protected void writeParentModelData() throws IOException {
writekv("base_models_num", model._parms._base_models.length);
writekv("metalearner", model._output._metalearner._key);
writekv("metalearner_transform", model._parms._metalearner_transform.toString());
for (int i = 0; i < model._parms._base_models.length; i++) {
if (model.isUsefulBaseModel(model._parms._base_models[i])) {
writekv("base_model" + i, model._parms._base_models[i]);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GAM.java
|
package hex.gam;
import hex.*;
import hex.gam.GAMModel.GAMParameters;
import hex.gam.GamSplines.ThinPlateDistanceWithKnots;
import hex.gam.GamSplines.ThinPlatePolynomialWithKnots;
import hex.gam.MatrixFrameUtils.GamUtils;
import hex.gam.MatrixFrameUtils.GenCSSplineGamOneColumn;
import hex.gam.MatrixFrameUtils.GenISplineGamOneColumn;
import hex.gam.MatrixFrameUtils.GenMSplineGamOneColumn;
import hex.glm.GLM;
import hex.glm.GLMModel;
import hex.glm.GLMModel.GLMParameters;
import hex.gram.Gram;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import org.apache.commons.lang.NotImplementedException;
import water.*;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Rapids;
import water.rapids.Val;
import water.util.ArrayUtils;
import water.util.IcedHashSet;
import water.util.Log;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.DoubleStream;
import java.util.stream.Stream;
import static hex.gam.GAMModel.adaptValidFrame;
import static hex.gam.GamSplines.ThinPlateRegressionUtils.*;
import static hex.gam.MatrixFrameUtils.GAMModelUtils.*;
import static hex.gam.MatrixFrameUtils.GamUtils.AllocateType.*;
import static hex.gam.MatrixFrameUtils.GamUtils.*;
import static hex.gam.MatrixFrameUtils.GenCSSplineGamOneColumn.generateZTransp;
import static hex.genmodel.algos.gam.GamMojoModel.*;
import static hex.genmodel.utils.ArrayUtils.flat;
import static hex.glm.GLMModel.GLMParameters.Family.multinomial;
import static hex.glm.GLMModel.GLMParameters.Family.ordinal;
import static hex.glm.GLMModel.GLMParameters.GLMType.gam;
import static hex.util.LinearAlgebraUtils.generateOrthogonalComplement;
import static hex.util.LinearAlgebraUtils.generateQR;
import static water.util.ArrayUtils.*;
public class GAM extends ModelBuilder<GAMModel, GAMModel.GAMParameters, GAMModel.GAMModelOutput> {
private static final int MIN_CSPLINE_NUM_KNOTS = 3;
private static final int MIN_MorI_SPLINE_KNOTS = 2;
private double[][][] _knots; // Knots for splines
private int _thinPlateSmoothersWithKnotsNum = 0;
private int _cubicSplineNum = 0;
private int _iSplineNum = 0;
private int _mSplineNum = 0;
double[][] _gamColMeansRaw; // store raw gam column means in gam_column_sorted order and only for thin plate smoothers
public double[][] _oneOGamColStd;
public double[] _penaltyScale;
public int _glmNFolds = 0;
Model.Parameters.FoldAssignmentScheme _foldAssignment = null;
String _foldColumn = null;
boolean _cvOn = false;
@Override
public ModelCategory[] can_build() {
return new ModelCategory[]{ModelCategory.Regression};
}
@Override
public boolean isSupervised() {
return true;
}
@Override
public BuilderVisibility builderVisibility() {
return BuilderVisibility.Experimental;
}
@Override
public boolean havePojo() {
return false;
}
@Override
public boolean haveMojo() {
return true;
}
public GAM(boolean startup_once) {
super(new GAMModel.GAMParameters(), startup_once);
}
public GAM(GAMModel.GAMParameters parms) {
super(parms);
init(false);
}
public GAM(GAMModel.GAMParameters parms, Key<GAMModel> key) {
super(parms, key);
init(false);
}
/***
* This method will look at the keys of knots stored in _parms._knot_ids and copy them over to double[][][]
* array. Note that we have smoothers that take different number of columns. We will keep the gam columns
* of single predictor smoothers to the front and multiple predictor smoothers to the back of the array. For
* smoothers that take more than one predictor column, knot location is determined by first sorting the first
* gam_column and then extract the quantiles of that sorted gam_columns. Here, instead of taking the value for
* one gam column, we take the whole row with all the predictors for that smoother.
*
* @return double[][][] array containing the knots specified by users
*/
public double[][][] generateKnotsFromKeys() { // todo: parallize this operation
int numGamCols = _parms._gam_columns.length; // total number of predictors in all smoothers
double[][][] knots = new double[numGamCols][][]; // 1st index into gam column, 2nd index number of knots for the row
boolean allNull = _parms._knot_ids == null;
int csInd = 0;
int isInd = _cubicSplineNum;
int msInd = _cubicSplineNum+_iSplineNum;
int tpInd = _cubicSplineNum+_iSplineNum+_mSplineNum;
int gamIndex; // index into the sorted arrays with CS/I-splines/M front, TP back.
for (int outIndex = 0; outIndex < _parms._gam_columns.length; outIndex++) { // go through each gam_column group
String tempKey = allNull ? null : _parms._knot_ids[outIndex]; // one knot_id for each smoother
if (_parms._bs[outIndex] == TP_SPLINE_TYPE) { // thin plate regression
gamIndex = tpInd++;
} else if (_parms._bs[outIndex] == CS_SPLINE_TYPE) {
gamIndex = csInd++;
} else if (_parms._bs[outIndex] == IS_SPLINE_TYPE) {
gamIndex = isInd++;
} else if (_parms._bs[outIndex] == MS_SPLINE_TYPE) { // m-spline
gamIndex = msInd++;
} else {
throw new NotImplementedException(SPLINENOTIMPL);
}
knots[gamIndex] = new double[_parms._gam_columns[outIndex].length][];
if (tempKey != null) { // read knots location from Frame given by user
final Frame knotFrame = DKV.getGet(tempKey);
double[][] knotContent = new double[(int) knotFrame.numRows()][_parms._gam_columns[outIndex].length];
final ArrayUtils.FrameToArray f2a = new ArrayUtils.FrameToArray(0,
_parms._gam_columns[outIndex].length - 1, knotFrame.numRows(), knotContent);
knotContent = f2a.doAll(knotFrame).getArray(); // first index is row, second index is column
final double[][] knotCTranspose = ArrayUtils.transpose(knotContent);// change knots to correct order
for (int innerIndex = 0; innerIndex < knotCTranspose.length; innerIndex++) {
knots[gamIndex][innerIndex] = new double[knotContent.length];
System.arraycopy(knotCTranspose[innerIndex], 0, knots[gamIndex][innerIndex], 0,
knots[gamIndex][innerIndex].length);
if (knotCTranspose.length == 1 && (_parms._bs[outIndex] == CS_SPLINE_TYPE ||
_parms._bs[outIndex] == MS_SPLINE_TYPE || _parms._bs[outIndex] == IS_SPLINE_TYPE)) // only check for order to single smoothers
failVerifyKnots(knots[gamIndex][innerIndex], outIndex);
}
_parms._num_knots[outIndex] = knotContent.length;
} else { // current column knot key is null, we will use default method to generate knots
final Frame predictVec = new Frame(_parms._gam_columns[outIndex],
_parms.train().vecs(_parms._gam_columns[outIndex]));
if (_parms._bs[outIndex] == CS_SPLINE_TYPE || _parms._bs[outIndex] == IS_SPLINE_TYPE ||
_parms._bs[outIndex] == MS_SPLINE_TYPE) {
knots[gamIndex][0] = generateKnotsOneColumn(predictVec, _parms._num_knots[outIndex]);
failVerifyKnots(knots[gamIndex][0], outIndex);
} else { // generate knots for multi-predictor smoothers
knots[gamIndex] = genKnotsMultiplePreds(predictVec, _parms, outIndex);
failVerifyKnots(knots[gamIndex][0], outIndex);
}
}
if (_parms._bs[outIndex] == MS_SPLINE_TYPE) {
int numBasis = _parms._spline_orders[outIndex] + _parms._num_knots[outIndex] - 2;
if (numBasis < 2)
error("spline_orders and num_knots", "M-spline for column "+
_parms._gam_columns[outIndex][0]+" with spline_orders=1 must have more than 2 knots.");
}
}
return knots; // CS/I-splines come first, TP is at the back
}
// this function will check and make sure the knots location specified in knots are valid in the following sense:
// 1. They do not contain NaN
// 2. They are sorted in ascending order.
public void failVerifyKnots(double[] knots, int gam_column_index) {
for (int index = 0; index < knots.length; index++) {
if (Double.isNaN(knots[index])) {
error("gam_columns/knots_id", String.format("Knots generated by default or specified in knots_id " +
"ended up containing a NaN value for gam_column %s. Please specify alternate knots_id" +
" or choose other columns.", _parms._gam_columns[gam_column_index][0]));
return;
}
if (index > 0 && knots[index - 1] > knots[index]) {
error("knots_id", String.format("knots not sorted in ascending order for gam_column %s. " +
"Knots at index %d: %f. Knots at index %d: %f",_parms._gam_columns[gam_column_index][0], index-1,
knots[index-1], index, knots[index]));
return;
}
if (index > 0 && knots[index - 1] == knots[index]) {
error("gam_columns/knots_id", String.format("chosen gam_column %s does have not enough values to " +
"generate well-defined knots. Please choose other columns or reduce " +
"the number of knots. If knots are specified in knots_id, choose alternate knots_id as the" +
" knots are not in ascending order. Knots at index %d: %f. Knots at index %d: %f",
_parms._gam_columns[gam_column_index][0], index-1, knots[index-1], index, knots[index]));
return;
}
}
}
@Override
public void init(boolean expensive) {
if (_parms._nfolds > 0 || _parms._fold_column != null) {
_parms._glmCvOn = true; // added for client mode
_parms._glmNFolds = _parms._fold_column == null ? _parms._nfolds
: _parms.train().vec(_parms._fold_column).toCategoricalVec().domain().length;
_cvOn = true;
_glmNFolds = _parms._glmNFolds;
if (_parms._fold_assignment != null) {
_parms._glmFoldAssignment = _parms._fold_assignment; // added for client mode
_foldAssignment = _parms._fold_assignment;
_parms._fold_assignment = null;
}
if (_parms._fold_column != null) {
_parms._glmFoldColumn = _parms._fold_column; // added for client mode
_foldColumn = _parms._fold_column;
_parms._fold_column = null;
}
_parms._nfolds = 0;
}
super.init(expensive);
if (_parms._bs != null) {
boolean allMonotoneSplines = Arrays.stream(_parms._bs).filter(x -> x == 2).count() == _parms._bs.length;
boolean containsMonotoneSplines = Arrays.stream(_parms._bs).filter(x -> x == 2).count() > 0;
if (allMonotoneSplines && containsMonotoneSplines && !_parms._non_negative) {
warn("non_negative", " is not set to true when I-spline/monotone-spline (bs=2) is chosen." +
" You will not get monotonic output in this case even though you choose I-spline.");
}
}
if (expensive && (_knots == null)) // add GAM specific check here, only do it once especially during CV
validateGAMParameters();
}
private void validateGAMParameters() {
if (_parms._max_iterations == 0)
error("_max_iterations", H2O.technote(2, "if specified, must be >= 1."));
if (_parms._gam_columns == null) { // check _gam_columns contains valid columns
error("_gam_columns", "must specify columns names to apply GAM to. If you don't have any," +
" use GLM.");
} else { // check and make sure gam_columns column types are legal
checkGAMParamsLengths();
if (_parms._bs == null)
setDefaultBSType(_parms); // default to cs spline and thin plate for higher dimension
assertValidGAMColumnsCountSplineTypes(); // also number of CS, TP, I-spline, M-splines smoothers determined.
}
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(GAM.this);
if (_parms._scale == null)
setDefaultScale(_parms);
setGamPredSize(_parms, _cubicSplineNum+_iSplineNum+_mSplineNum);
if (_thinPlateSmoothersWithKnotsNum > 0)
setThinPlateParameters(_parms, _thinPlateSmoothersWithKnotsNum); // set the m, M for thin plate regression smoothers
checkOrChooseNumKnots(); // check valid num_knot assignment or choose num_knots
checkTrainRowNumKnots();
_knots = generateKnotsFromKeys(); // generate knots and verify that they are given correctly
sortGAMParameters(_parms, _cubicSplineNum, _iSplineNum, _mSplineNum); // move single predictor spline to front and thin plate to back
checkThinPlateParams();
if (_parms._saveZMatrix && ((_train.numCols() - 1 + _parms._num_knots.length) < 2))
error("_saveZMatrix", "can only be enabled if the number of predictors plus" +
" Gam columns in gam_columns exceeds 2");
if ((_parms._lambda_search || !_parms._intercept || _parms._lambda == null || _parms._lambda[0] > 0))
_parms._use_all_factor_levels = true;
checkNFamilyNLinkAssignment();
}
/**
* Check and make sure the there are enough number of rows in the training dataset to accomodate the num_knot
* settings.
*/
public void checkTrainRowNumKnots() {
for (int index = 0; index < _parms._gam_columns.length; index++) {
Frame dataset = _parms.train();
String cname = _parms._gam_columns[index][0]; // only check the first gam_column
if (_parms._bs[index] < 0 && _parms._bs[index] > 3)
error("bs", " bs can only be 0, 1, 2 and 3 but is "+_parms._bs[index]);
if (dataset.vec(cname).isInt() && ((dataset.vec(cname).max() - dataset.vec(cname).min() + 1) < _parms._num_knots[index]))
error("gam_columns", "column " + cname + " has cardinality lower than the number of knots and cannot be used as a gam" +
" column.");
}
}
/***
* Check and make sure if related parameters are defined, they must be of correct length. Their length must
* equal to the number of gam columns specified which is the length of _parms._gam_columns.length.
*/
public void checkGAMParamsLengths() {
if ((_parms._bs != null) && (_parms._gam_columns.length != _parms._bs.length)) // check length
error("bs", "Number of spline types in bs must match the number of gam column groups " +
"(gam_columns.length) specified in gam_columns");
if (_parms._knot_ids != null && (_parms._knot_ids.length != _parms._gam_columns.length)) // check knots location specification
error("knot_ids", "Number of knot_ids specified must match the number of gam column groups " +
"(gam_columns.length) specified in gam_columns");
if (_parms._num_knots != null && (_parms._num_knots.length != _parms._gam_columns.length))
error("num_knots", "Number of num_knots specified must match the number of gam column groups " +
"(gam_columns.length) specified in gam_columns");
if (_parms._scale != null && (_parms._scale.length != _parms._gam_columns.length))
error("scale", "Number of scale specified must match the number of gam column groups " +
"(gam_columns.length) specified in gam_columns");
if (_parms._splines_non_negative != null && (_parms._splines_non_negative.length != _parms._gam_columns.length))
error("splines_non_negative", "Number of splines_non_negative specified must match the number" +
" of gam column groups (gam_columns.length) specified in gam_columns");
}
/***
* check if _parms._family = AUTO, the correct link functions are assigned according to the response type. If no
* family type is assigned, they will be assigned automatically according to the response type.
*/
public void checkNFamilyNLinkAssignment() {
if (_parms._family == GLMParameters.Family.AUTO) {
if (nclasses() == 1 & _parms._link != GLMParameters.Link.family_default && _parms._link != GLMParameters.Link.identity
&& _parms._link != GLMParameters.Link.log && _parms._link != GLMParameters.Link.inverse && _parms._link != null) {
error("_family", H2O.technote(2, "AUTO for undelying response requires the link to" +
" be family_default, identity, log or inverse."));
} else if (nclasses() == 2 & _parms._link != GLMParameters.Link.family_default && _parms._link != GLMParameters.Link.logit
&& _parms._link != null) {
error("_family", H2O.technote(2, "AUTO for undelying response requires the link to" +
" be family_default or logit."));
} else if (nclasses() > 2 & _parms._link != GLMParameters.Link.family_default & _parms._link != GLMParameters.Link.multinomial
&& _parms._link != null) {
error("_family", H2O.technote(2, "AUTO for undelying response requires the link to" +
" be family_default or multinomial."));
}
if (_nclass == 1) {
_parms._family = GLMParameters.Family.gaussian;
} else if (_nclass == 2) {
_parms._family = GLMParameters.Family.binomial;
} else {
_parms._family = GLMParameters.Family.multinomial;
}
}
if (_parms._link == null || _parms._link.equals(GLMParameters.Link.family_default))
_parms._link = _parms._family.defaultLink;
if ((_parms._family == GLMParameters.Family.multinomial || _parms._family == GLMParameters.Family.ordinal ||
_parms._family == GLMParameters.Family.binomial)
&& response().get_type() != Vec.T_CAT) {
error("_response_column", String.format("For given response family '%s', please provide a categorical" +
" response column. Current response column type is '%s'.", _parms._family, response().get_type_str()));
}
}
/**
* verify and check thin plate regression smoothers specific parameters
**/
public void checkThinPlateParams() {
if (_thinPlateSmoothersWithKnotsNum ==0)
return;
_parms._num_knots_tp = new int[_thinPlateSmoothersWithKnotsNum];
System.arraycopy(_parms._num_knots_sorted, _cubicSplineNum+_iSplineNum+_mSplineNum, _parms._num_knots_tp, 0,
_thinPlateSmoothersWithKnotsNum);
int tpIndex = 0;
for (int index = 0; index < _parms._gam_columns.length; index++) {
if (_parms._bs_sorted[index] == TP_SPLINE_TYPE) {
if (_parms._num_knots_sorted[index] < _parms._M[tpIndex] + 1) {
error("num_knots", "num_knots for gam column start with " + _parms._gam_columns_sorted[index][0] +
" did not specify enough num_knots. It should be equal or greater than " + (_parms._M[tpIndex] + 1) + ".");
}
tpIndex++;
}
}
}
/**
* set default num_knots to 10 for gam_columns where there is no knot_id specified for CS smoothers
* for TP smoothers, default is set to be max of 10 or _M+2.
* for I-splines, default set to 2 which is minimum.
* for M-splines, default set to 2 which is minimum.
*/
public void checkOrChooseNumKnots() {
if (_parms._num_knots == null)
_parms._num_knots = new int[_parms._gam_columns.length]; // different columns may have different num knots
if (_parms._spline_orders == null) {
_parms._spline_orders = new int[_parms._gam_columns.length];
Arrays.fill(_parms._spline_orders, 3);
} else {
for (int index=0; index<_parms._spline_orders.length; index++)
if ((_parms._bs[index] == IS_SPLINE_TYPE || _parms._bs[index] == MS_SPLINE_TYPE) && _parms._spline_orders[index] < 1)
error("spline_orders", "GAM I-spline spline_orders must be >= 1");
}
int tpCount = 0;
for (int index = 0; index < _parms._num_knots.length; index++) { // set zero value _num_knots
if (_parms._knot_ids == null || (_parms._knot_ids != null && _parms._knot_ids[index] == null)) { // knots are not specified
int numKnots = _parms._num_knots[index];
if (_parms._bs[index] == IS_SPLINE_TYPE || _parms._bs[index] == MS_SPLINE_TYPE) {
if (_parms._num_knots[index] == 0) {
_parms._num_knots[index] = MIN_MorI_SPLINE_KNOTS;
if (_parms._bs[index] == MS_SPLINE_TYPE && _parms._spline_orders[index] == 1)
_parms._num_knots[index] += 1;
} else if (_parms._num_knots[index] < MIN_MorI_SPLINE_KNOTS) {
error("num_knots", " must >= "+MIN_MorI_SPLINE_KNOTS+" for M or I-splines.");
}
}
int naSum = 0;
for (int innerIndex = 0; innerIndex < _parms._gam_columns[index].length; innerIndex++) {
naSum += _parms.train().vec(_parms._gam_columns[index][innerIndex]).naCnt();
}
long eligibleRows = _train.numRows()-naSum;
if (_parms._num_knots[index] == 0) { // set num_knots to default
int defaultRows = 10;
if (_parms._bs[index] == TP_SPLINE_TYPE) {
defaultRows = Math.max(defaultRows, _parms._M[tpCount] + 2);
tpCount++;
}
if (_parms._bs[index] == IS_SPLINE_TYPE || _parms._bs[index] == MS_SPLINE_TYPE)
defaultRows = MIN_MorI_SPLINE_KNOTS;
_parms._num_knots[index] = eligibleRows < defaultRows ? (int) eligibleRows : defaultRows;
} else { // num_knots assigned by user and check to make sure it is legal
if (numKnots > eligibleRows) {
error("num_knots", " number of knots specified in num_knots: "+numKnots+" for smoother" +
" with first predictor "+_parms._gam_columns[index][0]+". Reduce _num_knots.");
}
if (_parms._bs[index] == CS_SPLINE_TYPE && _parms._num_knots[index] < MIN_CSPLINE_NUM_KNOTS)
error("num_knots", " number of knots specified in num_knots "+numKnots+
" for cs splines must be >= " + MIN_CSPLINE_NUM_KNOTS + ".");
if ((_parms._bs[index] == IS_SPLINE_TYPE || _parms._bs[index] == MS_SPLINE_TYPE)
&& _parms._num_knots[index] < MIN_MorI_SPLINE_KNOTS)
error("num_knots", " number of knots specified "+numKnots+" for M or I-splines must be" +
" >= "+MIN_MorI_SPLINE_KNOTS);
}
}
}
}
/**
* Check and make sure correct BS type is assigned to the various gam_columns specified. Make sure the gam columns
* specified are actually found in the training dataset. In addition, the number of CS and TP smoothers are
* counted here as well.
*/
public void assertValidGAMColumnsCountSplineTypes() {
Frame dataset = _parms.train();
List<String> cNames = Arrays.asList(dataset.names());
for (int index = 0; index < _parms._gam_columns.length; index++) {
if (_parms._bs != null) { // check and make sure the correct bs type is chosen
if (_parms._gam_columns[index].length > 1 && _parms._bs[index] != 1)
error("bs", "Smoother with multiple predictors can only use with thin plate spines, i.e., " +
"bs = 1");
if (_parms._bs[index] == TP_SPLINE_TYPE)
_thinPlateSmoothersWithKnotsNum++; // record number of thin plate
if (_parms._bs[index] == CS_SPLINE_TYPE)
_cubicSplineNum++;
if (_parms._bs[index] == IS_SPLINE_TYPE) {
if (multinomial.equals(_parms._family) || ordinal.equals(_parms._family))
error("family", "multinomial and ordinal families cannot be used with I-splines.");
_iSplineNum++;
}
if (_parms._bs[index] == MS_SPLINE_TYPE)
_mSplineNum++;
for (int innerIndex = 0; innerIndex < _parms._gam_columns[index].length; innerIndex++) {
String cname = _parms._gam_columns[index][innerIndex];
if (!cNames.contains(cname))
error("gam_columns", "column name: " + cname + " does not exist in your dataset.");
if (dataset.vec(cname).isCategorical())
error("gam_columns", "column " + cname + " is categorical and cannot be used as a gam " +
"column.");
if (dataset.vec(cname).isBad() || dataset.vec(cname).isTime() || dataset.vec(cname).isUUID() ||
dataset.vec(cname).isConst())
error("gam_columns", String.format("Column '%s' of type '%s' cannot be used as GAM column. Column types " +
"BAD, TIME, CONSTANT and UUID cannot be used.", cname, dataset.vec(cname).get_type_str()));
if (!dataset.vec(cname).isNumeric())
error("gam_columns", "column " + cname + " is not numerical and cannot be used as a gam" +
" column.");
}
}
}
}
@Override
protected boolean computePriorClassDistribution() {
return (_parms._family== multinomial)||(_parms._family== ordinal);
}
@Override
protected GAMDriver trainModelImpl() {
if (_parms._glmCvOn) { // for client mode, copy over the cv settings
_cvOn = true;
if (_parms._glmFoldAssignment != null)
_foldAssignment = _parms._glmFoldAssignment;
if (_parms._glmFoldColumn != null)
_foldColumn = _parms._glmFoldColumn;
_glmNFolds = _parms._glmNFolds;
}
return new GAMDriver();
}
@Override
protected int nModelsInParallel(int folds) {
return nModelsInParallel(folds,2);
}
private class GAMDriver extends Driver {
double[][][] _zTranspose; // store transpose(Z) matrices for CS and TP smoothers
double[][][] _zTransposeCS; // store transpose(zCS) for thin plate smoother to remove optimization constraint
double[][][] _penaltyMatCenter; // store centered penalty matrices of all smoothers
double[][][] _penaltyMat; // penalty matrix before any kind of processing
double[][][] _penaltyMatCS; // penalty matrix after removing optimization constraint, only for thin plate
double[][][] _starT; // store T* as in 3.2.3
public double[][][] _binvD; // store BinvD for each CS smoother specified for scoring
public int[] _numKnots; // store number of knots per smoother
String[][] _gamColNames; // store column names of all smoothers before any processing
String[][] _gamColNamesCenter; // gamColNames after centering is performed.
Key<Frame>[] _gamFrameKeysCenter;
double[][] _gamColMeans; // store gam column means without centering.
int[][][] _allPolyBasisList; // store polynomial basis function for all TP smoothers
DataInfo _dinfo = null;
/***
* This method will take the _train that contains the predictor columns and response columns only and add to it
* the following:
* 1. For each smoother included in gam_columns, expand it out to calculate the f(x) and attach to the frame.
* 2. For TP smoothers, it will calculate the zCS transpose
* 3. It will calculate the ztranspose that is used to center each smoother.
* 4. It will calculate a penalty matrix used to control the smoothness of GAM.
*
* @return
*/
Frame adaptTrain() {
int numGamFrame = _parms._gam_columns.length;
_zTranspose = GamUtils.allocate3DArray(numGamFrame, _parms, firstOneLess); // for centering for all smoothers
zeroOutIStranspose(_parms._bs_sorted, _zTranspose);
_penaltyMat = _parms._savePenaltyMat?GamUtils.allocate3DArray(numGamFrame, _parms, sameOrig):null;
_penaltyMatCenter = GamUtils.allocate3DArray(numGamFrame, _parms, bothOneLess);
removeCenteringIS(_penaltyMatCenter, _parms);
if (_cubicSplineNum > 0) // CS-spline only
_binvD = GamUtils.allocate3DArrayCS(_cubicSplineNum, _parms, firstTwoLess);
_numKnots = MemoryManager.malloc4(numGamFrame);
_gamColNames = new String[numGamFrame][];
_gamColNamesCenter = new String[numGamFrame][];
_gamFrameKeysCenter = new Key[numGamFrame];
_gamColMeans = new double[numGamFrame][]; // means of gamified columns
_penaltyScale = new double[numGamFrame];
if (_thinPlateSmoothersWithKnotsNum > 0) { // only allocate if there are thin plate smoothers
int[] kMinusM = subtract(_parms._num_knots_tp, _parms._M);
_zTransposeCS = GamUtils.allocate3DArrayTP(_thinPlateSmoothersWithKnotsNum, _parms, kMinusM, _parms._num_knots_tp);
_penaltyMatCS = GamUtils.allocate3DArrayTP(_thinPlateSmoothersWithKnotsNum, _parms, kMinusM, kMinusM);
_allPolyBasisList = new int[_thinPlateSmoothersWithKnotsNum][][];
_gamColMeansRaw = new double[_thinPlateSmoothersWithKnotsNum][];
_oneOGamColStd = new double[_thinPlateSmoothersWithKnotsNum][];
if (_parms._savePenaltyMat)
_starT = GamUtils.allocate3DArrayTP(_thinPlateSmoothersWithKnotsNum, _parms, _parms._num_knots_tp, _parms._M);
}
addGAM2Train(); // add GAM columns to training frame
return buildGamFrame(_parms, _train, _gamFrameKeysCenter, _foldColumn); // add gam cols to _train
}
// This class generate the thin plate regression smoothers as denoted in GamThinPlateRegressionH2O.pdf
public class ThinPlateRegressionSmootherWithKnots extends RecursiveAction {
final Frame _predictVec;
final int _numKnots;
final int _numKnotsM1;
final int _numKnotsMM; // store k-M
final int _splineType;
final boolean _savePenaltyMat;
final double[][] _knots;
final GAMParameters _parms;
final int _gamColIndex;
final int _thinPlateGamColIndex;
final int _numPred; // number of predictors (d)
final int _M;
public ThinPlateRegressionSmootherWithKnots(Frame predV, GAMParameters parms, int gamColIndex, double[][] knots,
int thinPlateInd) {
_predictVec = predV;
_knots = knots;
_numKnots = parms._num_knots_sorted[gamColIndex];
_numKnotsM1 = _numKnots-1;
_parms = parms;
_splineType = _parms._bs_sorted[gamColIndex];
_gamColIndex = gamColIndex;
_thinPlateGamColIndex = thinPlateInd;
_savePenaltyMat = _parms._savePenaltyMat;
_numPred = parms._gam_columns_sorted[gamColIndex].length;
_M = _parms._M[_thinPlateGamColIndex];
_numKnotsMM = _numKnots-_M;
}
@Override
protected void compute() {
double[] rawColMeans = new double[_numPred];
double[] oneOverColStd = new double[_numPred];
for (int colInd = 0; colInd < _numPred; colInd++) {
rawColMeans[colInd] = _predictVec.vec(colInd).mean();
oneOverColStd[colInd] = 1.0/_predictVec.vec(colInd).sigma(); // std
}
System.arraycopy(rawColMeans, 0, _gamColMeansRaw[_thinPlateGamColIndex], 0, rawColMeans.length);
System.arraycopy(oneOverColStd, 0, _oneOGamColStd[_thinPlateGamColIndex], 0, oneOverColStd.length);
ThinPlateDistanceWithKnots distanceMeasure =
new ThinPlateDistanceWithKnots(_knots, _numPred, oneOverColStd,
_parms._standardize_tp_gam_cols).doAll(_numKnots, Vec.T_NUM, _predictVec); // Xnmd in 3.1
List<Integer[]> polyBasisDegree = findPolyBasis(_numPred, calculatem(_numPred));// polynomial basis lists in 3.2
int[][] polyBasisArray = convertList2Array(polyBasisDegree, _M, _numPred);
copy2DArray(polyBasisArray, _allPolyBasisList[_thinPlateGamColIndex]);
String colNameStub = genThinPlateNameStart(_parms, _gamColIndex); // gam column names before processing
String[] gamColNames = generateGamColNamesThinPlateKnots(_gamColIndex, _parms, polyBasisArray, colNameStub);
System.arraycopy(gamColNames, 0, _gamColNames[_gamColIndex], 0, gamColNames.length);
String[] distanceColNames = extractColNames(gamColNames, 0, 0, _numKnots);
String[] polyNames = extractColNames(gamColNames, _numKnots, 0, _M);
Frame thinPlateFrame = distanceMeasure.outputFrame(Key.make(), distanceColNames, null);
for (int index = 0; index < _numKnots; index++)
_gamColMeans[_gamColIndex][index] = thinPlateFrame.vec(index).mean();
double[][] starT = generateStarT(_knots, polyBasisDegree, rawColMeans, oneOverColStd,
_parms._standardize_tp_gam_cols); // generate T* in 3.2.3
double[][] qmat = generateQR(starT);
double[][] penaltyMat = distanceMeasure.generatePenalty(); // penalty matrix 3.1.1
double[][] zCST = generateOrthogonalComplement(qmat, starT, _numKnotsMM, _parms._seed);
copy2DArray(zCST, _zTransposeCS[_thinPlateGamColIndex]);
ThinPlatePolynomialWithKnots thinPlatePoly = new ThinPlatePolynomialWithKnots(_numPred,
polyBasisArray, rawColMeans, oneOverColStd,
_parms._standardize_tp_gam_cols).doAll(_M, Vec.T_NUM, _predictVec);// generate polynomial basis T in 3.2
Frame thinPlatePolyBasis = thinPlatePoly.outputFrame(null, polyNames, null);
for (int index = 0; index < _M; index++) // calculate gamified column means
_gamColMeans[_gamColIndex][index+_numKnots] = thinPlatePolyBasis.vec(index).mean();
thinPlateFrame = ThinPlateDistanceWithKnots.applyTransform(thinPlateFrame, colNameStub
+"TPKnots_", _parms, zCST, _numKnotsMM); // generate Xcs as in 3.3
thinPlateFrame.add(thinPlatePolyBasis.names(), thinPlatePolyBasis.removeAll()); // concatenate Xcs and T
double[][] ztranspose = generateZTransp(thinPlateFrame, _numKnots); // generate Z for centering as in 3.4
copy2DArray(ztranspose, _zTranspose[_gamColIndex]);
double[][] penaltyMatCS = ArrayUtils.multArrArr(ArrayUtils.multArrArr(zCST, penaltyMat),
ArrayUtils.transpose(zCST)); // transform penalty matrix to transpose(Zcs)*Xnmd*Zcs, 3.3
if (_parms._scale_tp_penalty_mat) { // R does this scaling of penalty matrix. I left it to users to choose
ScaleTPPenalty scaleTPPenaltyCS = new ScaleTPPenalty(penaltyMatCS, thinPlateFrame).doAll(thinPlateFrame);
_penaltyScale[_gamColIndex] = scaleTPPenaltyCS._s_scale;
penaltyMatCS = scaleTPPenaltyCS._penaltyMat;
}
double[][] expandPenaltyCS = expandArray(penaltyMatCS, _numKnots); // used for penalty matrix
if (_savePenaltyMat) { // save intermediate steps for debugging
copy2DArray(penaltyMat, _penaltyMat[_gamColIndex]);
copy2DArray(starT, _starT[_thinPlateGamColIndex]);
copy2DArray(penaltyMatCS, _penaltyMatCS[_thinPlateGamColIndex]);
}
double[][] penaltyCenter = ArrayUtils.multArrArr(ArrayUtils.multArrArr(ztranspose, expandPenaltyCS),
ArrayUtils.transpose(ztranspose));
copy2DArray(penaltyCenter, _penaltyMatCenter[_gamColIndex]);
thinPlateFrame = ThinPlateDistanceWithKnots.applyTransform(thinPlateFrame, colNameStub+"center",
_parms, ztranspose, _numKnotsM1); // generate Xz as in 3.4
_gamFrameKeysCenter[_gamColIndex] = thinPlateFrame._key;
DKV.put(thinPlateFrame);
System.arraycopy(thinPlateFrame.names(), 0, _gamColNamesCenter[_gamColIndex], 0, _numKnotsM1);
}
}
public class ISplineSmoother extends RecursiveAction {
final Frame _predictVec;
final int _numKnots; // not counting knot duplication here
final int _order;
final double[] _knots; // not counting knot duplication here
final boolean _savePenaltyMat;
final String[] _newGAMColNames;
final int _gamColIndex; // gam column order from user input
final int _singlePredSplineInd; // gam column index after moving tp to the back
final int _splineType;
public ISplineSmoother(Frame gamPred, GAMParameters parms, int gamColIndex, String[] gamColNames, double[] knots,
int singlePredInd) {
_predictVec = gamPred;
_numKnots = parms._num_knots_sorted[gamColIndex];
_knots = knots;
_order = parms._spline_orders_sorted[gamColIndex];
_savePenaltyMat = parms._savePenaltyMat;
_newGAMColNames = gamColNames;
_gamColIndex = gamColIndex;
_singlePredSplineInd = singlePredInd;
_splineType = parms._bs_sorted[gamColIndex];
}
@Override
protected void compute() {
// generate GAM basis functions
int order = _parms._spline_orders_sorted[_gamColIndex];
int numBasis = _knots.length+order-2;
int totKnots = numBasis + order;
GenISplineGamOneColumn oneGAMCol = new GenISplineGamOneColumn(_parms, _knots, _gamColIndex, _predictVec,
numBasis, totKnots);
oneGAMCol.doAll(oneGAMCol._numBasis, Vec.T_NUM, _predictVec);
if (_savePenaltyMat) {
copy2DArray(oneGAMCol._penaltyMat, _penaltyMat[_gamColIndex]);
_penaltyScale[_gamColIndex] = oneGAMCol._s_scale;
}
// extract generated gam columns
Frame oneGamifiedColumn = oneGAMCol.outputFrame(Key.make(), _newGAMColNames, null);
for (int index=0; index<numBasis; index++)
_gamColMeans[_gamColIndex][index] = oneGamifiedColumn.vec(index).mean();
DKV.put(oneGamifiedColumn);
_gamFrameKeysCenter[_gamColIndex] = oneGamifiedColumn._key;
System.arraycopy(oneGamifiedColumn.names(), 0, _gamColNamesCenter[_gamColIndex], 0,
numBasis);
copy2DArray(oneGAMCol._penaltyMat, _penaltyMatCenter[_gamColIndex]);
}
}
public class MSplineSmoother extends RecursiveAction {
final Frame _predictVec;
final int _numKnots; // not counting knot duplication here
final int _order;
final double[] _knots; // not counting knot duplication here
final boolean _savePenaltyMat;
final String[] _newGAMColNames;
final int _gamColIndex; // gam column order from user input
final int _singlePredSplineInd; // gam column index after moving tp to the back
final int _splineType;
public MSplineSmoother(Frame gamPred, GAMParameters parms, int gamColIndex, String[] gamColNames, double[] knots,
int singlePredInd) {
_predictVec = gamPred;
_numKnots = parms._num_knots_sorted[gamColIndex];
_knots = knots;
_order = parms._spline_orders_sorted[gamColIndex];
_savePenaltyMat = parms._savePenaltyMat;
_newGAMColNames = gamColNames;
_gamColIndex = gamColIndex;
_singlePredSplineInd = singlePredInd;
_splineType = parms._bs_sorted[gamColIndex];
}
@Override
protected void compute() {
// generate GAM basis functions
int order = _parms._spline_orders_sorted[_gamColIndex];
int numBasis = _knots.length+order-2;
int numBasisM1 = numBasis-1;
int totKnots = numBasis + order;
GenMSplineGamOneColumn oneGAMCol = new GenMSplineGamOneColumn(_parms, _knots, _gamColIndex, _predictVec,
numBasis, totKnots);
oneGAMCol.doAll(oneGAMCol._numBasis, Vec.T_NUM, _predictVec);
if (_savePenaltyMat) {
copy2DArray(oneGAMCol._penaltyMat, _penaltyMat[_gamColIndex]);
_penaltyScale[_gamColIndex] = oneGAMCol._s_scale;
}
// extract generated gam columns
Frame oneGamifiedColCenter = oneGAMCol.outputFrame(Key.make(), _newGAMColNames, null);
for (int index=0; index<numBasis; index++)
_gamColMeans[_gamColIndex][index] = oneGamifiedColCenter.vec(index).mean();
oneGamifiedColCenter = oneGAMCol.centralizeFrame(oneGamifiedColCenter,
_predictVec.name(0)+"_"+_splineType+"_center", _parms);
copy2DArray(oneGAMCol._ZTransp, _zTranspose[_gamColIndex]); // copy transpose(Z)
DKV.put(oneGamifiedColCenter);
_gamFrameKeysCenter[_gamColIndex] = oneGamifiedColCenter._key;
System.arraycopy(oneGamifiedColCenter.names(), 0, _gamColNamesCenter[_gamColIndex], 0,
numBasisM1);
double[][] transformedPenalty = ArrayUtils.multArrArr(ArrayUtils.multArrArr(oneGAMCol._ZTransp,
oneGAMCol._penaltyMat), ArrayUtils.transpose(oneGAMCol._ZTransp)); // transform penalty as zt*S*z
copy2DArray(transformedPenalty, _penaltyMatCenter[_gamColIndex]);
System.arraycopy(oneGamifiedColCenter.names(), 0, _gamColNamesCenter[_gamColIndex], 0,
numBasisM1);
}
}
public class CubicSplineSmoother extends RecursiveAction {
final Frame _predictVec;
final int _numKnots;
final int _numKnotsM1;
final int _splineType;
final boolean _savePenaltyMat;
final String[] _newColNames;
final double[] _knots;
final GAMParameters _parms;
final int _gamColIndex;
final int _singlePredSplineInd;
public CubicSplineSmoother(Frame predV, GAMParameters parms, int gamColIndex, String[] gamColNames, double[] knots,
int csInd) {
_predictVec = predV;
_numKnots = parms._num_knots_sorted[gamColIndex];
_numKnotsM1 = _numKnots-1;
_splineType = parms._bs_sorted[gamColIndex];
_savePenaltyMat = parms._savePenaltyMat;
_newColNames = gamColNames;
_knots = knots;
_parms = parms;
_gamColIndex = gamColIndex;
_singlePredSplineInd = csInd;
}
@Override
protected void compute() {
GenCSSplineGamOneColumn genOneGamCol = new GenCSSplineGamOneColumn(_splineType, _numKnots,
_knots, _predictVec).doAll(_numKnots, Vec.T_NUM, _predictVec);
if (_savePenaltyMat) { // only save this for debugging
copy2DArray(genOneGamCol._penaltyMat, _penaltyMat[_gamColIndex]); // copy penalty matrix
_penaltyScale[_gamColIndex] = genOneGamCol._s_scale; // penaltyMat is scaled by 1/_s_scale
}
Frame oneAugmentedColumnCenter = genOneGamCol.outputFrame(Key.make(), _newColNames,
null); // one gamified frame
for (int index = 0; index < _numKnots; index++)
_gamColMeans[_gamColIndex][index] = oneAugmentedColumnCenter.vec(index).mean();
oneAugmentedColumnCenter = genOneGamCol.centralizeFrame(oneAugmentedColumnCenter,
_predictVec.name(0) + "_" + _splineType + "_center_cs_", _parms);
copy2DArray(genOneGamCol._ZTransp, _zTranspose[_gamColIndex]); // copy transpose(Z)
double[][] transformedPenalty = ArrayUtils.multArrArr(ArrayUtils.multArrArr(genOneGamCol._ZTransp,
genOneGamCol._penaltyMat), ArrayUtils.transpose(genOneGamCol._ZTransp)); // transform penalty as zt*S*z
copy2DArray(transformedPenalty, _penaltyMatCenter[_gamColIndex]);
_gamFrameKeysCenter[_gamColIndex] = oneAugmentedColumnCenter._key;
DKV.put(oneAugmentedColumnCenter);
System.arraycopy(oneAugmentedColumnCenter.names(), 0, _gamColNamesCenter[_gamColIndex], 0,
_numKnotsM1);
copy2DArray(genOneGamCol._bInvD, _binvD[_singlePredSplineInd]);
}
}
void addGAM2Train() {
final int numGamFrame = _parms._gam_columns.length; // number of smoothers to generate
RecursiveAction[] generateGamColumn = new RecursiveAction[numGamFrame];
int thinPlateInd = 0;
int singlePredictorSmootherInd = 0;
Frame trainFrame = _parms.train();
for (int index = 0; index < numGamFrame; index++) { // generate smoothers/splines
final Frame predictVec = prepareGamVec(index, _parms, trainFrame);// extract predictors from frame
// numKnots for M or I-spline will be the number of basis
final int numKnots = _parms._bs_sorted[index] == IS_SPLINE_TYPE || _parms._bs_sorted[index] == MS_SPLINE_TYPE ?
_parms._num_knots_sorted[index] + _parms._spline_orders_sorted[index] - 2 :
_parms._num_knots_sorted[index];
final int numKnotsM1 = numKnots - 1;
if (_parms._bs_sorted[index] == TP_SPLINE_TYPE) { // for TP splines
final int kPlusM = _parms._num_knots_sorted[index]+_parms._M[thinPlateInd];
_gamColNames[index] = new String[kPlusM];
_gamColNamesCenter[index] = new String[numKnotsM1];
_gamColMeans[index] = new double[kPlusM];
_allPolyBasisList[thinPlateInd] = new int[_parms._M[thinPlateInd]][_parms._gamPredSize[index]];
_gamColMeansRaw[thinPlateInd] = new double[_parms._gamPredSize[index]];
_oneOGamColStd[thinPlateInd] = new double[_parms._gamPredSize[index]];
generateGamColumn[index] = new ThinPlateRegressionSmootherWithKnots(predictVec, _parms, index, _knots[index],
thinPlateInd++);
} else { // for single predictor GAM columns
_gamColNames[index] = generateGamColNames(index, _parms);
_gamColMeans[index] = new double[numKnots];
if (_parms._bs_sorted[index] == CS_SPLINE_TYPE) { // cs spline
_gamColNamesCenter[index] = new String[numKnotsM1];
generateGamColumn[index] = new CubicSplineSmoother(predictVec, _parms, index, _gamColNames[index],
_knots[index][0], singlePredictorSmootherInd++);
} else if (_parms._bs_sorted[index] == IS_SPLINE_TYPE){ // I-splines
_gamColNamesCenter[index] = new String[numKnots];
generateGamColumn[index] = new ISplineSmoother(predictVec, _parms, index, _gamColNames[index],
_knots[index][0], singlePredictorSmootherInd++);
} else if (_parms._bs_sorted[index] == MS_SPLINE_TYPE){ // M-spline here
_gamColNamesCenter[index] = new String[numKnotsM1];
generateGamColumn[index] = new MSplineSmoother(predictVec, _parms, index, _gamColNames[index],
_knots[index][0], singlePredictorSmootherInd++);
} else
throw new NotImplementedException(SPLINENOTIMPL);
}
}
ForkJoinTask.invokeAll(generateGamColumn);
if (_iSplineNum > 0 && !_parms._betaConstraintsOff) { // set up coefficient constraints >= 0 or <= 0 for I-splines
Frame constraintF = genConstraints();
Scope.track(constraintF);
if (_parms._beta_constraints != null) {
DKV.put(constraintF);
Frame origConstraints = DKV.getGet(_parms._beta_constraints);
String tree = "(rbind "+origConstraints.getKey().toString()+" "+constraintF.getKey().toString()+" )";
Val val = Rapids.exec(tree);
Frame newConstraints = new Frame(val.getFrame());
DKV.put(newConstraints);
Scope.track(newConstraints);
_parms._beta_constraints = newConstraints._key;
} else {
_parms._beta_constraints = constraintF._key;
DKV.put(constraintF);
}
}
}
/**
* For all gamified columns with I-spline, put in beta constraints to make sure the coefficients are non-negative
* or non-positive. This will ensure contribution from I-splines are either monontonically increasing or
* decreasing.
*/
public Frame genConstraints() {
int numGamCols = _parms._gam_columns.length;
String[] colNames = new String[]{"names", "lower_bounds", "upper_bounds"};
Vec.VectorGroup vg = Vec.VectorGroup.VG_LEN1;
List<String> iSplineColNames = new ArrayList<>();
List<Double> upperBList = new ArrayList<>();
List<Double> lowerBList = new ArrayList<>();
for (int index=0; index<numGamCols; index++) {
if (_parms._bs_sorted[index] == IS_SPLINE_TYPE) { // I-splines
int numCols = _gamColNamesCenter[index].length;
iSplineColNames.addAll(Stream.of(_gamColNamesCenter[index]).collect(Collectors.toList()));
if (_parms._splines_non_negative_sorted[index]) { // monotonically increasing
upperBList.addAll(DoubleStream.generate(()->Double.POSITIVE_INFINITY ).limit(numCols).boxed().collect(Collectors.toList()));
lowerBList.addAll(DoubleStream.generate(()->0.0).limit(numCols).boxed().collect(Collectors.toList()));
} else { // monotonically decreasing
upperBList.addAll(DoubleStream.generate(()->0.0).limit(numCols).boxed().collect(Collectors.toList()));
lowerBList.addAll(DoubleStream.generate(()->Double.NEGATIVE_INFINITY).limit(numCols).boxed().collect(Collectors.toList()));
}
}
}
int numConstraints = iSplineColNames.size();
if (numConstraints > 0) {
String[] constraintNames = iSplineColNames.stream().toArray(String[]::new);
double[] lowerBounds = lowerBList.stream().mapToDouble(Double::doubleValue).toArray();
double[] upperBounds = upperBList.stream().mapToDouble(Double::doubleValue).toArray();
Vec gamNames = Scope.track(Vec.makeVec(constraintNames, vg.addVec()));
Vec lowBounds = Scope.track(Vec.makeVec(lowerBounds, vg.addVec()));
Vec upBounds = Scope.track(Vec.makeVec(upperBounds, vg.addVec()));
return new Frame(Key.<Frame>make(), colNames, new Vec[]{gamNames, lowBounds, upBounds});
}
return null;
}
void verifyGamTransformedFrame(Frame gamTransformed) {
final int numGamFrame = _parms._gam_columns.length;
for (int findex = 0; findex < numGamFrame; findex++) {
final int numGamCols = _gamColNamesCenter[findex].length;
for (int index = 0; index < numGamCols; index++) {
if (gamTransformed.vec(_gamColNamesCenter[findex][index]).isConst())
error(_gamColNamesCenter[findex][index], "gam column transformation generated constant columns" +
" for " + _parms._gam_columns[findex]);
}
}
}
@Override
public void computeImpl() {
init(true);
if (error_count() > 0) // if something goes wrong, let's throw a fit
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(GAM.this);
// add gamified columns to training frame
Frame newTFrame = new Frame(rebalance(adaptTrain(), false, Key.make()+".temporary.train"));
verifyGamTransformedFrame(newTFrame);
if (error_count() > 0) // if something goes wrong during gam transformation, let's throw a fit again!
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(GAM.this);
if (valid() != null) { // transform the validation frame if present
int[] singleGamColsCount = new int[]{_cubicSplineNum, _iSplineNum, _mSplineNum};
_valid = rebalance(adaptValidFrame(_parms.valid(), _valid, _parms, _gamColNamesCenter, _binvD,
_zTranspose, _knots, _zTransposeCS, _allPolyBasisList, _gamColMeansRaw, _oneOGamColStd, singleGamColsCount),
false, Key.make() + ".temporary.valid");
}
DKV.put(newTFrame); // This one will cause deleted vectors if add to Scope.track
Frame newValidFrame = _valid == null ? null : new Frame(_valid);
if (newValidFrame != null) {
DKV.put(newValidFrame);
}
_job.update(0, "Initializing model training");
buildModel(newTFrame, newValidFrame); // build gam model
}
public final void buildModel(Frame newTFrame, Frame newValidFrame) {
GAMModel model = null;
final IcedHashSet<Key<Frame>> validKeys = new IcedHashSet<>();
try {
_job.update(0, "Adding GAM columns to training dataset...");
if (_foldColumn != null)
_parms._fold_column = _foldColumn;
_dinfo = new DataInfo(_train.clone(), _valid, 1, _parms._use_all_factor_levels
|| _parms._lambda_search, _parms._standardize ?
DataInfo.TransformType.STANDARDIZE : DataInfo.TransformType.NONE, DataInfo.TransformType.NONE,
_parms.missingValuesHandling() == GLMParameters.MissingValuesHandling.Skip,
_parms.missingValuesHandling() == GLMParameters.MissingValuesHandling.MeanImputation
|| _parms.missingValuesHandling() == GLMParameters.MissingValuesHandling.PlugValues,
_parms.makeImputer(), false, hasWeightCol(), hasOffsetCol(), hasFoldCol(),
_parms.interactionSpec());
DKV.put(_dinfo._key, _dinfo);
if (_foldColumn != null)
_parms._fold_column = null;
model = new GAMModel(dest(), _parms, new GAMModel.GAMModelOutput(GAM.this, _dinfo));
model.write_lock(_job);
if (_parms._keep_gam_cols) { // save gam column keys
model._output._gamTransformedTrainCenter = newTFrame._key;
}
_job.update(1, "calling GLM to build GAM model...");
GLMModel glmModel = buildGLMModel(_parms, newTFrame, newValidFrame); // obtained GLM model
if (model.evalAutoParamsEnabled) {
model.initActualParamValuesAfterGlmCreation();
}
Scope.track_generic(glmModel);
_job.update(0, "Building out GAM model...");
model.update(_job);
fillOutGAMModel(glmModel, model); // build up GAM model by copying over results in glmModel
// build GAM Model Metrics
_job.update(0, "Scoring training frame");
scoreGenModelMetrics(model, glmModel,train(), true); // score training dataset and generate model metrics
if (valid() != null) {
scoreGenModelMetrics(model, glmModel, valid(), false); // score validation dataset and generate model metrics
}
} catch(Gram.NonSPDMatrixException exception) {
throw new Gram.NonSPDMatrixException("Consider enable lambda_search, decrease scale parameter value for TP " +
"smoothers, \ndisable scaling for TP penalty matrics, or not use thin plate regression smoothers at all.");
} finally {
try {
final List<Key> keep = new ArrayList<>();
if (model != null) {
if (_parms._keep_gam_cols) {
keepFrameKeys(keep, newTFrame._key);
} else {
DKV.remove(newTFrame._key);
}
if (_cvOn) {
if (_parms._keep_cross_validation_predictions) {
keepFrameKeys(keep, model._output._cross_validation_holdout_predictions_frame_id);
for (int fInd = 0; fInd < _glmNFolds; fInd++)
keepFrameKeys(keep, model._output._cross_validation_predictions[fInd]);
}
if (_parms._keep_cross_validation_fold_assignment)
keepFrameKeys(keep, model._output._cross_validation_fold_assignment_frame_id);
}
}
if (_dinfo != null)
_dinfo.remove();
if (newValidFrame != null && validKeys != null) {
keepFrameKeys(keep, newValidFrame._key); // save valid frame keys for scoring later
validKeys.addIfAbsent(newValidFrame._key); // save valid frame keys from folds to remove later
model._validKeys = validKeys; // move valid keys here to model._validKeys to be removed later
}
Scope.untrack(keep.toArray(new Key[keep.size()]));
} finally {
// Make sure Model is unlocked, as if an exception is thrown, the `ModelBuilder` expects the underlying model to be unlocked.
model.update(_job);
model.unlock(_job);
}
}
}
/**
* This part will perform scoring and generate the model metrics for training data and validation data if
* provided by user.
*
* @param model
* @param scoreFrame
* @param forTraining true for training dataset and false for validation dataset
*/
private void scoreGenModelMetrics(GAMModel model, GLMModel glmModel, Frame scoreFrame, boolean forTraining) {
Frame scoringTrain = new Frame(scoreFrame);
model.adaptTestForTrain(scoringTrain, true, true);
Frame scoredResult = model.score(scoringTrain);
scoredResult.delete();
ModelMetrics glmMetrics = forTraining ? glmModel._output._training_metrics : glmModel._output._validation_metrics;
if (forTraining) {
model._output.copyMetrics(model, scoringTrain, forTraining, glmMetrics);
Log.info("GAM[dest=" + dest() + "]" + model._output._training_metrics.toString());
} else {
model._output.copyMetrics(model, scoringTrain, forTraining, glmMetrics);
Log.info("GAM[dest=" + dest() + "]" + model._output._validation_metrics.toString());
}
}
GLMModel buildGLMModel(GAMParameters parms, Frame trainData, Frame validFrame) {
GLMParameters glmParam = copyGAMParams2GLMParams(parms, trainData, validFrame); // copy parameter from GAM to GLM
int numGamCols = _parms._gam_columns.length;
for (int find = 0; find < numGamCols; find++) {
if ((_parms._scale_sorted != null) && (_parms._scale_sorted[find] != 1.0))
_penaltyMatCenter[find] = ArrayUtils.mult(_penaltyMatCenter[find], _parms._scale_sorted[find]);
}
glmParam._glmType = gam;
if (_foldColumn == null) {
glmParam._nfolds = _glmNFolds;
} else {
glmParam._fold_column = _foldColumn;
glmParam._nfolds = 0;
}
glmParam._fold_assignment = _foldAssignment;
return new GLM(glmParam, _penaltyMatCenter, _gamColNamesCenter).trainModel().get();
}
void fillOutGAMModel(GLMModel glm, GAMModel model) {
model._gamColNamesNoCentering = _gamColNames; // copy over gam column names
model._gamColNames = _gamColNamesCenter;
model._output._gamColNames = _gamColNamesCenter;
model._output._zTranspose = _zTranspose;
model._output._zTransposeCS = _zTransposeCS;
model._output._allPolyBasisList = _allPolyBasisList;
model._gamFrameKeysCenter = _gamFrameKeysCenter;
model._nclass = _nclass;
model._output._binvD = _binvD;
model._output._knots = _knots;
model._output._numKnots = _numKnots;
model._cubicSplineNum = _cubicSplineNum;
model._mSplineNum = _mSplineNum;
model._iSplineNum = _iSplineNum;
model._thinPlateSmoothersWithKnotsNum = _thinPlateSmoothersWithKnotsNum;
model._output._gamColMeansRaw = _gamColMeansRaw;
model._output._oneOGamColStd = _oneOGamColStd;
// extract and store best_alpha/lambda/devianceTrain/devianceValid from best submodel of GLM model
model._output._best_alpha = glm._output.getSubmodel(glm._output._selected_submodel_idx).alpha_value;
model._output._best_lambda = glm._output.getSubmodel(glm._output._selected_submodel_idx).lambda_value;
model._output._devianceTrain = glm._output.getSubmodel(glm._output._selected_submodel_idx).devianceTrain;
model._output._devianceValid = glm._output.getSubmodel(glm._output._selected_submodel_idx).devianceValid;
model._gamColMeans = flat(_gamColMeans);
if (_parms._lambda == null) // copy over lambdas used
_parms._lambda = glm._parms._lambda.clone();
if (_parms._keep_gam_cols)
model._output._gam_transformed_center_key = model._output._gamTransformedTrainCenter.toString();
if (_parms._savePenaltyMat) {
model._output._penaltyMatricesCenter = _penaltyMatCenter;
model._output._penaltyMatrices = _penaltyMat;
model._output._penaltyScale = _penaltyScale;
if (_thinPlateSmoothersWithKnotsNum > 0) {
model._output._penaltyMatCS = _penaltyMatCS;
model._output._starT = _starT;
}
}
if (_parms._store_knot_locations)
model._output.copyKnots(_knots, _parms._gam_columns_sorted);
copyGLMCoeffs(glm, model, _parms, nclasses()); // copy over coefficient names and generate coefficients as beta = z*GLM_beta
copyGLMtoGAMModel(model, glm, _parms, valid()!=null); // copy over fields from glm model to gam model
if (_cvOn) {
_parms._betaConstraintsOff = true;
copyCVGLMtoGAMModel(model, glm, _parms, _foldColumn); // copy over fields from cross-validation
_parms._betaConstraintsOff = false;
_parms._nfolds = _foldColumn == null ? _glmNFolds : 0; // restore original cross-validation parameter values
_parms._fold_assignment = _foldAssignment;
_parms._fold_column = _foldColumn;
}
}
public GLMParameters copyGAMParams2GLMParams(GAMParameters parms, Frame trainData, Frame valid) {
GLMParameters glmParam = new GLMParameters();
List<String> gamOnlyList = Arrays.asList(
"_num_knots", "_gam_columns", "_bs", "_scale", "_train",
"_saveZMatrix", "_saveGamCols", "_savePenaltyMat"
);
Field[] field1 = GAMParameters.class.getDeclaredFields();
setParamField(parms, glmParam, false, field1, gamOnlyList);
Field[] field2 = Model.Parameters.class.getDeclaredFields();
setParamField(parms, glmParam, true, field2, gamOnlyList);
glmParam._train = trainData._key;
glmParam._valid = valid==null?null:valid._key;
glmParam._nfolds = _glmNFolds; // will do cv in GLM and not in GAM
glmParam._fold_assignment = _foldAssignment;
return glmParam;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GAMModel.java
|
package hex.gam;
import hex.*;
import hex.deeplearning.DeepLearningModel;
import hex.gam.MatrixFrameUtils.AddCSGamColumns;
import hex.gam.MatrixFrameUtils.AddISGamColumns;
import hex.gam.MatrixFrameUtils.AddMSGamColumns;
import hex.gam.MatrixFrameUtils.AddTPKnotsGamColumns;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLM;
import hex.glm.GLMModel;
import hex.glm.GLMModel.GLMParameters.Family;
import hex.glm.GLMModel.GLMParameters.Link;
import hex.glm.GLMModel.GLMParameters.Solver;
import hex.util.EffectiveParametersUtils;
import water.*;
import water.exceptions.H2OColumnNotFoundArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.udf.CFuncRef;
import water.util.*;
import java.io.Serializable;
import java.util.Arrays;
import static hex.gam.MatrixFrameUtils.GamUtils.*;
import static hex.genmodel.algos.gam.GamMojoModel.*;
import static hex.glm.GLMModel.GLMParameters.MissingValuesHandling;
import static hex.util.DistributionUtils.distributionToFamily;
import static hex.util.DistributionUtils.familyToDistribution;
public class GAMModel extends Model<GAMModel, GAMModel.GAMParameters, GAMModel.GAMModelOutput> {
private static final String[] BINOMIAL_CLASS_NAMES = new String[]{"0", "1"};
private static final int CS_NUM_INDEX = 0;
private static final int IS_NUM_INDEX = 1;
private static final int MS_NUM_INDEX = 2;
private static final int NUM_SINGLE_SPLINE_TYPES = 3;
public String[][] _gamColNamesNoCentering; // store column names only for GAM columns
public String[][] _gamColNames; // store column names only for GAM columns after decentering
public int[] _gamPredSize; // store size of predictors for gam smoother
public int[] _m; // parameter related to gamPredSize;
public int[] _M; // size of polynomial basis for thin plate regression smoothers
public int _cubicSplineNum;
public int _iSplineNum;
public int _mSplineNum;
public int _thinPlateSmoothersWithKnotsNum;
public Key<Frame>[] _gamFrameKeysCenter;
public double[] _gamColMeans;
public int _nclass; // 2 for binomial, > 2 for multinomial and ordinal
public double[] _ymu;
public long _nobs;
public long _nullDOF;
public int _rank;
public IcedHashSet<Key<Frame>> _validKeys = null;
@Override public String[] makeScoringNames() {
String[] names = super.makeScoringNames();
if (_output._glm_vcov != null)
names = ArrayUtils.append(names, "StdErr");
return names;
}
@Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
if (domain==null && (_parms._family==Family.binomial || _parms._family==Family.quasibinomial ||
_parms._family==Family.negativebinomial || _parms._family==Family.fractionalbinomial)) {
if (_parms._family == Family.fractionalbinomial)
domain = BINOMIAL_CLASS_NAMES;
else
domain = _output._responseDomains;
}
GLMModel.GLMWeightsFun glmf = new GLMModel.GLMWeightsFun(_parms._family, _parms._link, _parms._tweedie_variance_power,
_parms._tweedie_link_power, _parms._theta, 1, false);
return new MetricBuilderGAM(domain, _ymu, glmf, _rank, true, _parms._intercept, _nclass, _parms._auc_type);
}
public GAMModel(Key<GAMModel> selfKey, GAMParameters parms, GAMModelOutput output) {
super(selfKey, parms, output);
assert(Arrays.equals(_key._kb, selfKey._kb));
}
public void initActualParamValuesAfterGlmCreation(){
EffectiveParametersUtils.initFoldAssignment(_parms);
}
public TwoDimTable genCoefficientMagTableMultinomial(String[] colHeaders, double[][] coefficients,
String[] coefficientNames, String tableHeader) {
String[] colTypes = new String[]{ "double", "string"};
String[] colFormat = new String[]{"%5f", ""};
int nCoeff = coefficients[0].length;
int nClass = coefficients.length;
String[] coeffNames = new String[nCoeff - 1];
String[] coeffNames2 = new String[coeffNames.length];
double[] coeffMags = new double[coeffNames.length];
double[] coeffMags2 = new double[coeffNames.length];
String[] coeffSigns = new String[coeffNames.length];
Log.info("genCoefficientMagTableMultinomial", String.format("coeffNames length: %d. coeffMags " +
"length: %d, coeffSigns length: %d", coeffNames.length, coeffMags.length, coeffSigns.length));
int countIndex = 0;
for (int index = 0; index < nCoeff; index++) {
if (!coefficientNames[index].equals("Intercept")) {
for (int classInd = 0; classInd < nClass; classInd++) {
coeffMags[countIndex] += Math.abs(coefficients[classInd][index]); // add abs(coefficients) of diff classes
}
coeffNames[countIndex] = coefficientNames[index];
coeffSigns[countIndex] = "POS"; // assign all signs to positive for multinomial
countIndex++;
}
}
// sort in descending order of the magnitudes
Integer[] indices = sortCoeffMags(coeffMags.length, coeffMags);
// reorder names and coeffMags with indices
for (int index = 0; index < coeffMags.length; index++) {
coeffMags2[index] = coeffMags[indices[index]];
coeffNames2[index] = coeffNames[indices[index]];
}
Log.info("genCoefficientMagTableMultinomial", String.format("coeffNames2 length: %d. coeffMags2 " +
"length: %d, coeffSigns length: %d", coeffNames2.length, coeffMags2.length, coeffSigns.length));
TwoDimTable table = new TwoDimTable(tableHeader, "Standardized Coefficient Magnitutes", coeffNames2, colHeaders, colTypes, colFormat,
"names");
fillUpCoeffsMag(coeffMags2, coeffSigns, table, 0);
return table;
}
public TwoDimTable genCoefficientMagTable(String[] colHeaders, double[] coefficients,
String[] coefficientNames, String tableHeader) {
String[] colTypes = new String[]{ "double", "string"};
String[] colFormat = new String[]{"%5f", ""};
int nCoeff = coefficients.length;
String[] coeffNames = new String[nCoeff-1];
double[] coeffMags = new double[nCoeff-1]; // skip over intercepts
String[] coeffSigns = new String[nCoeff-1];
int countMagIndex = 0;
for (int index = 0; index < nCoeff; index++) {
if (!coefficientNames[index].equals("Intercept")) {
coeffMags[countMagIndex] = Math.abs(coefficients[index]);
coeffSigns[countMagIndex] = coefficients[index] > 0 ? "POS" : "NEG";
coeffNames[countMagIndex++] = coefficientNames[index];
}
}
Integer[] indices = sortCoeffMags(coeffMags.length, coeffMags); // sort magnitude indices in decreasing magnitude
String[] names2 = new String[coeffNames.length];
double[] mag2 = new double[coeffNames.length];
String[] sign2 = new String[coeffNames.length];
for (int i = 0; i < coeffNames.length; ++i) {
names2[i] = coeffNames[indices[i]];
mag2[i] = coeffMags[indices[i]];
sign2[i] = coeffSigns[indices[i]];
}
Log.info("genCoefficientMagTableMultinomial", String.format("coeffNames length: %d. coeffMags " +
"length: %d, coeffSigns length: %d", coeffNames.length, coeffMags.length, coeffSigns.length));
TwoDimTable table = new TwoDimTable(tableHeader, "", names2, colHeaders, colTypes, colFormat,
"names");
fillUpCoeffsMag( mag2, sign2, table, 0);
return table;
}
private void fillUpCoeffsMag(double[] coeffMags, String[] coeffSigns, TwoDimTable tdt, int rowStart) {
int arrLength = coeffMags.length+rowStart;
int arrCounter=0;
for (int i=rowStart; i<arrLength; i++) {
tdt.set(i, 0, coeffMags[arrCounter]);
tdt.set(i, 1, coeffSigns[arrCounter]);
arrCounter++;
}
}
/** Score on an already adapted validation frame during cross validation. This function is not expected to be called
* by other methods. Returns a MetricBuilder that can be used to make a model metrics.
* @param adaptFrm Already adapted frame with gamified columns
* @return MetricBuilder
*/
@Override
protected ModelMetrics.MetricBuilder scoreMetrics(Frame adaptFrm) {
GAMScore gs = makeScoringTask(adaptFrm,false,null, true);
assert gs._dinfo._valid:"_valid flag should be set on data info when doing scoring";
return gs.doAll(gs._dinfo._adaptedFrame)._mb;
}
@SuppressWarnings("WeakerAccess")
public static class GAMParameters extends Model.Parameters {
// the following parameters will be passed to GLM algos
public boolean _standardize = false; // pass to GLM algo
public Family _family = Family.AUTO;
public Link _link = Link.family_default;
public Solver _solver = Solver.AUTO;
public double _tweedie_variance_power;
public double _tweedie_link_power;
public double _theta; // 1/k and is used by negative binomial distribution only
public double [] _alpha;
public double [] _lambda;
public double[] _startval;
public Serializable _missing_values_handling = MissingValuesHandling.MeanImputation;
public boolean _lambda_search = false;
public boolean _use_all_factor_levels = false;
public int _max_iterations = -1;
public boolean _intercept = true;
public double _beta_epsilon = 1e-4;
public double _objective_epsilon = -1;
public double _obj_reg = -1;
public boolean _compute_p_values = false;
public boolean _scale_tp_penalty_mat = false;
public boolean _standardize_tp_gam_cols = false;
public String[] _interactions=null;
public StringPair[] _interaction_pairs=null;
public Key<Frame> _plug_values = null;
// internal parameter, handle with care. GLM will stop when there is more than this number of active predictors (after strong rule screening)
public int _max_active_predictors = -1; // not used in GAM, copied over to GLM params
public boolean _generate_scoring_history = false; // if true, will generate GLM scoring history but will slow algo down
// the following parameters are for GAM
public int[] _num_knots; // array storing number of knots per smoother
public int[] _spline_orders; // storing I-spline orders for each predictor
public int[] _spline_orders_sorted;
public int[] _num_knots_sorted;
public int[] _num_knots_tp; // store num_knots for thin plate regression
public String[] _knot_ids; // store frame keys that contain knots location for each smoother in gam_X;
public String[][] _gam_columns; // array storing which predictor columns are specified
public String[][] _gam_columns_sorted; // move CS spline to the front and tp to the back in gam_columns
public int[] _gamPredSize; // store size of predictors for gam smoother
public int[] _m; // parameter related to gamPredSize;
public int[] _M; // size of polynomial basis for thin plate regression smoothers
public int[] _bs; // choose spline function for gam column, 0 = cr, 1 = thin plate regression with knots,
// 2 = monotone I-spline, 3 = NBSplineTypeI M-splines
public int[] _bs_sorted; // choose spline function for gam column, 0 = cr, 1 = thin plate regression with knots,
// 2 = monotone I-spline, 3 = NBSplineTypeI M-splines
public double[] _scale; // array storing scaling values to control wriggliness of fit
public double[] _scale_sorted;
public boolean _saveZMatrix = false; // if asserted will save Z matrix
public boolean _keep_gam_cols = false; // if true will save the keys to gam Columns only
public boolean _savePenaltyMat = false; // if true will save penalty matrices as triple array
public String algoName() { return "GAM"; }
public String fullName() { return "Generalized Additive Model"; }
public String javaName() { return GAMModel.class.getName(); }
public double _prior = -1;
public boolean _cold_start = false; // start building GLM model from scratch if true
public int _nlambdas = -1;
public boolean _non_negative = false;
public boolean _remove_collinear_columns = false;
public double _gradient_epsilon = -1;
public boolean _early_stopping = true; // internal GLM early stopping.
public Key<Frame> _beta_constraints = null;
public double _lambda_min_ratio = -1;
public boolean _betaConstraintsOff = false; // used for cross-validations
// internal parameters added to support client mode
int _glmNFolds = 0;
Model.Parameters.FoldAssignmentScheme _glmFoldAssignment = null;
String _glmFoldColumn = null;
boolean _glmCvOn = false;
public boolean[] _splines_non_negative;
public boolean[] _splines_non_negative_sorted;
public boolean _store_knot_locations = false;
@Override
public long progressUnits() {
return 1;
}
public InteractionSpec interactionSpec() {
return InteractionSpec.create(_interactions, _interaction_pairs);
}
public MissingValuesHandling missingValuesHandling() {
if (_missing_values_handling instanceof MissingValuesHandling)
return (MissingValuesHandling) _missing_values_handling;
assert _missing_values_handling instanceof DeepLearningModel.DeepLearningParameters.MissingValuesHandling;
switch ((DeepLearningModel.DeepLearningParameters.MissingValuesHandling) _missing_values_handling) {
case MeanImputation:
return MissingValuesHandling.MeanImputation;
case Skip:
return MissingValuesHandling.Skip;
default:
throw new IllegalStateException("Unsupported missing values handling value: " + _missing_values_handling);
}
}
public DataInfo.Imputer makeImputer() {
if (missingValuesHandling() == MissingValuesHandling.PlugValues) {
if (_plug_values == null || _plug_values.get() == null) {
throw new IllegalStateException("Plug values frame needs to be specified when Missing Value Handling = PlugValues.");
}
return new GLM.PlugValuesImputer(_plug_values.get());
} else { // mean/mode imputation and skip (even skip needs an imputer right now! PUBDEV-6809)
return new DataInfo.MeanImputer();
}
}
public double linkInv(double x) {
switch(_link) {
case identity:
return x;
case ologlog:
return 1.0-Math.exp(-1.0*Math.exp(x));
case ologit:
case logit:
return 1.0 / (Math.exp(-x) + 1.0);
case log:
return Math.exp(x);
case inverse:
double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x);
return 1.0 / xx;
case tweedie:
return _tweedie_link_power == 0
?Math.max(2e-16,Math.exp(x))
:Math.pow(x, 1/ _tweedie_link_power);
default:
throw new RuntimeException("unexpected link function " + _link.toString());
}
}
@Override
public void setDistributionFamily(DistributionFamily distributionFamily) {
_family = distributionToFamily(distributionFamily);
_link = Link.family_default;
}
@Override
public DistributionFamily getDistributionFamily() {
return familyToDistribution(_family);
}
}
@Override
protected String[][] scoringDomains(){
int responseColIdx = _output._dinfo.responseChunkId(0);
String [][] domains = _output._domains;
if ((_parms._family == Family.binomial || _parms._family == Family.quasibinomial ||
_parms._family == Family.fractionalbinomial)
&& _output._domains[responseColIdx] == null) {
domains = domains.clone();
if (_parms._family == Family.fractionalbinomial)
domains[responseColIdx] = BINOMIAL_CLASS_NAMES;
else
domains[responseColIdx] = _output._responseDomains;
}
return domains;
}
public static class GAMModelOutput extends Model.Output {
public String[] _coefficient_names_no_centering;
public String[] _coefficient_names;
public TwoDimTable _glm_model_summary;
public ModelMetrics _glm_training_metrics;
public ModelMetrics _glm_validation_metrics;
public double _glm_dispersion;
public double[] _glm_zvalues;
public double[] _glm_pvalues;
public double[][] _glm_vcov;
public double[] _glm_stdErr;
public double _glm_best_lamda_value;
public TwoDimTable _glm_scoring_history;
public TwoDimTable[] _glm_cv_scoring_history;
public TwoDimTable _coefficients_table;
public TwoDimTable _coefficients_table_no_centering;
public TwoDimTable _standardized_coefficient_magnitudes;
public TwoDimTable _variable_importances;
public VarImp _varimp; // should contain the same content as standardized coefficients
public double[] _model_beta_no_centering; // coefficients generated during model training
public double[] _standardized_model_beta_no_centering; // standardized coefficients generated during model training
public double[] _model_beta; // coefficients generated during model training
public double[] _standardized_model_beta; // standardized coefficients generated during model training
public double[][] _model_beta_multinomial_no_centering; // store multinomial coefficients during model training
public double[][] _standardized_model_beta_multinomial_no_centering; // store standardized multinomial coefficients during model training
public double[][] _model_beta_multinomial; // store multinomial coefficients during model training
public double[][] _standardized_model_beta_multinomial; // store standardized multinomial coefficients during model training
public double _best_alpha;
public double _best_lambda;
public double _devianceValid = Double.NaN;
public double _devianceTrain = Double.NaN;
private double[] _zvalues;
private double _dispersion;
private boolean _dispersionEstimated;
public String[][] _gamColNames; // store gam column names after transformation and centering
public double[][][] _zTranspose; // Z matrix for centralization, can be null
public double[][][] _penaltyMatricesCenter; // stores t(Z)*t(D)*Binv*D*Z and can be null
public double[][][] _penaltyMatrices; // store t(D)*Binv*D and can be null
public double[][][] _binvD; // store BinvD for each gam column specified for scoring
public double[][][] _knots; // store knots location for each gam smoother
int[][][] _allPolyBasisList; // store polynomial basis function for all tp smoothers
double[][][] _penaltyMatCS; // penalty matrix after removing optimization constraint, only for thin plate
double[][][] _zTransposeCS; // store for each thin plate smoother for removing optimization constraint
public int[] _numKnots; // store number of knots per gam smoother
public double[][][] _starT;
public double[][] _gamColMeansRaw;
public double[][] _oneOGamColStd;
public double[] _penaltyScale;
public Key<Frame> _gamTransformedTrainCenter; // contain key of predictors, all gamified columns centered
public DataInfo _dinfo;
public String[] _responseDomains;
public String _gam_transformed_center_key;
final Family _family;
public String[] _gam_knot_column_names;
public double[][] _knot_locations;
/***
* The function will copy over the knot locations into _knot_locations and the gam column names corresponding to
* the knot locations into _gam_knot_column_names.
*/
public void copyKnots(double[][][] knots, String[][] gam_columns_sorted) {
int numGam = gam_columns_sorted.length;
int trueGamNum = 0;
for (int index=0; index<numGam; index++) {
trueGamNum += gam_columns_sorted[index].length;
}
_gam_knot_column_names = new String[trueGamNum];
_knot_locations = new double[trueGamNum][];
int knotIndex=0;
for (int index=0; index<numGam; index++) {
if (knots[index].length==1) {
_gam_knot_column_names[knotIndex] = gam_columns_sorted[index][0];
_knot_locations[knotIndex++] = knots[index][0].clone();
} else {
int dupKnots = knots[index].length;
for (int index2=0; index2<dupKnots; index2++) {
_gam_knot_column_names[knotIndex] = gam_columns_sorted[index][index2];
_knot_locations[knotIndex++] = knots[index][index2].clone();
}
}
}
}
@Override
public int nclasses() {
if (_family == Family.multinomial || _family == Family.ordinal) {
return super.nclasses();
} else if (Family.binomial == _family || Family.quasibinomial == _family
|| Family.fractionalbinomial == _family) {
return 2;
} else {
return 1;
}
}
/** Names of levels for a categorical response column. */
@Override
public String[] classNames() {
if (_family == Family.quasibinomial || _family == Family.binomial)
return _responseDomains;
else if (_family == Family.fractionalbinomial)
return BINOMIAL_CLASS_NAMES;
else
return super.classNames();
}
public GAMModelOutput(GAM b, DataInfo dinfo) {
super(b, dinfo._adaptedFrame);
_dinfo = dinfo;
_domains = dinfo._adaptedFrame.domains(); // get domain of dataset predictors
_family = b._parms._family;
if (_family.equals(Family.quasibinomial)) {
_responseDomains = new VecUtils.CollectDoubleDomain(null, 2).doAll(dinfo._adaptedFrame.vec(b._parms._response_column)).stringDomain(dinfo._adaptedFrame.vec(b._parms._response_column).isInt());
} else {
_responseDomains = dinfo._adaptedFrame.lastVec().domain();
}
}
@Override public ModelCategory getModelCategory() {
switch (_family) {
case quasibinomial:
case fractionalbinomial:
case binomial: return ModelCategory.Binomial;
case multinomial: return ModelCategory.Multinomial;
case ordinal: return ModelCategory.Ordinal;
default: return ModelCategory.Regression;
}
}
public void copyMetrics(GAMModel gamModel, Frame train, boolean forTrain, ModelMetrics glmMetrics) {
ModelMetrics tmpMetrics = glmMetrics.deepCloneWithDifferentModelAndFrame(gamModel, train);
if (forTrain)
gamModel._output._training_metrics = tmpMetrics;
else
gamModel._output._validation_metrics = tmpMetrics;
}
}
/**
* This method will massage the input training frame such that it can be used for scoring for a GAM model.
*
* @param test Testing Frame, updated in-place
* @param expensive Try hard to adapt; this might involve the creation of
* whole Vecs and thus get expensive. If {@code false}, then only adapt if
* no warnings and errors; otherwise just the messages are produced.
* Created Vecs have to be deleted by the caller (e.g. Scope.enter/exit).
* @param computeMetrics
* @return
*/
@Override
public String[] adaptTestForTrain(Frame test, boolean expensive, boolean computeMetrics) {
// compare column names with test frame. If equal, call adaptTestForTrain. Otherwise, need to adapt it first
String[] testNames = test.names();
if (!equalColNames(testNames, _output._dinfo._adaptedFrame.names(), _parms._response_column)) { // shallow check: column number, column names only
Frame adptedF = cleanUpInputFrame(test); // column names here need to be in same sequence of dinfo._adaptedFrame
int testNumCols = test.numCols();
for (int index = 0; index < testNumCols; index++)
test.remove(0);
int adaptNumCols = adptedF.numCols();
for (int index = 0; index < adaptNumCols; index++)
test.add(adptedF.name(index), adptedF.vec(index));
return super.adaptTestForTrain(test, expensive, computeMetrics);
}
return super.adaptTestForTrain(test, expensive, computeMetrics);
}
public Frame cleanUpInputFrame(Frame test) {
Frame adptedF = new Frame(Key.make(), test.names(), test.vecs().clone()); // clone test dataset
int[] singleSplineNum = new int[]{_cubicSplineNum, _iSplineNum, _mSplineNum};
return cleanUpInputFrame(adptedF, _parms, _gamColNames, _output._binvD, _output._zTranspose,
_output._knots, _output._zTransposeCS, _output._allPolyBasisList, _output._gamColMeansRaw,
_output._oneOGamColStd, singleSplineNum);
}
public static Frame cleanUpInputFrame(Frame adptedF, GAMParameters parms, String[][] gamColNames, double[][][] binvD,
double[][][] zTranspose, double[][][] knots,
double[][][] zTransposeCS, int[][][] polyBasisList, double[][] gamColMeansRaw,
double[][] oneOGamColStd, int[] singleSplineNum) {
String[] testNames = adptedF.names(); // adptedF contains predictors, gam_columns and extras
// add gam columns for CS smoothers
Frame csAugmentedColumns = addSingleVariableGamColumns(adptedF, parms, gamColNames, binvD, zTranspose, knots,
singleSplineNum);
// add gam columns for TP smoothers
Frame tpAugmentedColumns = addTPGamColumns(adptedF, parms, zTransposeCS, zTranspose, polyBasisList,
knots, gamColMeansRaw, oneOGamColStd);
if (csAugmentedColumns == null)
csAugmentedColumns = tpAugmentedColumns;
else if (tpAugmentedColumns != null)
csAugmentedColumns.add(tpAugmentedColumns.names(), tpAugmentedColumns.removeAll());
if (parms._ignored_columns != null) { // remove ignored columns
for (String iname:parms._ignored_columns) {
if (ArrayUtils.contains(testNames, iname)) {
adptedF.remove(iname);
}
}
}
Vec respV = null;
Vec weightV = null;
Vec offsetV = null;
if (parms._weights_column != null && ArrayUtils.contains(testNames, parms._weights_column)) // move weight column to be last column before response column
weightV = adptedF.remove(parms._weights_column);
if (parms._offset_column != null && ArrayUtils.contains(testNames, parms._offset_column))
offsetV = adptedF.remove(parms._offset_column);
if (ArrayUtils.contains(testNames, parms._response_column))
respV = adptedF.remove(parms._response_column);
adptedF.add(csAugmentedColumns.names(), csAugmentedColumns.removeAll());
Scope.track(csAugmentedColumns);
if (weightV != null)
adptedF.add(parms._weights_column, weightV);
if (offsetV != null)
adptedF.add(parms._offset_column, offsetV);
if (respV != null)
adptedF.add(parms._response_column, respV);
return adptedF;
}
public static Frame adaptValidFrame(Frame adptedF, Frame valid, GAMParameters parms, String[][] gamColNames, double[][][] binvD,
double[][][] zTranspose, double[][][] knots,
double[][][] zTransposeCS, int[][][] polyBasisList, double[][] gamColMeansRaw,
double[][] oneOGamColStd, int[] singleGAMArrays) {
// add gam columns for single predictor splines
Frame singleVariableGamColumns = addSingleVariableGamColumns(adptedF, parms, gamColNames, binvD, zTranspose, knots,
singleGAMArrays);
// add gam columns for TP smoothers
Frame tpAugmentedColumns = addTPGamColumns(adptedF, parms, zTransposeCS, zTranspose, polyBasisList,
knots, gamColMeansRaw, oneOGamColStd);
if (singleVariableGamColumns == null)
singleVariableGamColumns = tpAugmentedColumns;
else if (tpAugmentedColumns != null)
singleVariableGamColumns.add(tpAugmentedColumns.names(), tpAugmentedColumns.removeAll());
Vec respV = null;
Vec weightV = null;
Vec offsetV = null;
if (parms._weights_column != null && ArrayUtils.contains(valid.names(), parms._weights_column)) // move weight column to be last column before response column
weightV = valid.remove(parms._weights_column);
if (parms._offset_column != null && ArrayUtils.contains(valid.names(), parms._offset_column))
offsetV = valid.remove(parms._offset_column);
if (ArrayUtils.contains(valid.names(), parms._response_column))
respV = valid.remove(parms._response_column);
valid.add(singleVariableGamColumns.names(), singleVariableGamColumns.removeAll());
Scope.track(singleVariableGamColumns);
if (weightV != null)
valid.add(parms._weights_column, weightV);
if (offsetV != null)
valid.add(parms._offset_column, offsetV);
if (respV != null)
valid.add(parms._response_column, respV);
return valid;
}
public static Frame addTPGamColumns(Frame adaptedF, GAMParameters parms, double[][][] zTransposeCS,
double[][][] zTranspose, int[][][] polyBasisList, double[][][] knots,
double[][] gamColMeansRaw, double[][] oneOColStd) {
int numTPCols = parms._M==null?0:parms._M.length;
if (numTPCols == 0)
return null;
AddTPKnotsGamColumns addTPCols = new AddTPKnotsGamColumns(parms, zTransposeCS, zTranspose, polyBasisList, knots,
adaptedF);
addTPCols.addTPGamCols(gamColMeansRaw, oneOColStd); // generate thin plate regression smoothers
return concateGamVecs(addTPCols._gamFrameKeysCenter);
}
public static Frame addSingleVariableGamColumns(Frame adptedF, GAMParameters parms, String[][] gamColNames,
double[][][] binvD, double[][][] zTranspose, double[][][] knots,
int[] singleGAMColArrays) {
int numCSGamCol = singleGAMColArrays[CS_NUM_INDEX];
int numISGamCol = singleGAMColArrays[IS_NUM_INDEX];
int numMSGamCol = singleGAMColArrays[MS_NUM_INDEX];
int numSingleVariableGamCols = ArrayUtils.sum(singleGAMColArrays);
if (numSingleVariableGamCols == 0) // no single variable GAM columns
return null;
Vec[] gamColCSSplines = new Vec[numCSGamCol];
Vec[] gamColISplines = new Vec[numISGamCol];
Vec[] gamColMSplines = new Vec[numMSGamCol];
String[] gamColCSNames = new String[numCSGamCol];
String[] gamColISNames = new String[numISGamCol];
String[] gamColMSNames = new String[numMSGamCol];
int countCS = 0;
int countIS = 0;
int countMS = 0;
for (int vind=0; vind<numSingleVariableGamCols; vind++) { // separate predictors to the different splines
if (adptedF.vec(parms._gam_columns_sorted[vind][0]) == null)
throw new H2OColumnNotFoundArgumentException("gam_columns", adptedF, parms._gam_columns_sorted[vind][0]);
if (parms._bs_sorted[vind] == CS_SPLINE_TYPE) {
gamColCSSplines[countCS] = adptedF.vec(parms._gam_columns_sorted[vind][0]).clone();
gamColCSNames[countCS++] = parms._gam_columns_sorted[vind][0];
} else if (parms._bs_sorted[vind] == IS_SPLINE_TYPE) {
gamColISplines[countIS] = adptedF.vec(parms._gam_columns_sorted[vind][0]).clone();
gamColISNames[countIS++] = parms._gam_columns_sorted[vind][0];
} else if (parms._bs_sorted[vind] == MS_SPLINE_TYPE) {
gamColMSplines[countMS] = adptedF.vec(parms._gam_columns_sorted[vind][0]).clone();
gamColMSNames[countMS++] = parms._gam_columns_sorted[vind][0];
}
}
Frame gamifiedCSCols = null;
Frame gamifiedISCols = null;
Frame gamifiedMSCols = null;
if (numCSGamCol > 0)
gamifiedCSCols = gamifiedSinglePredictors(gamColCSNames, gamColCSSplines, binvD, CS_SPLINE_TYPE, zTranspose, knots,
parms, gamColNames);
if (numISGamCol > 0)
gamifiedISCols = gamifiedSinglePredictors(gamColISNames, gamColISplines, null, IS_SPLINE_TYPE, null,
knots, parms, gamColNames);
if (numMSGamCol > 0)
gamifiedMSCols = gamifiedSinglePredictors(gamColMSNames, gamColMSplines, null, MS_SPLINE_TYPE, zTranspose, knots,
parms, gamColNames);
return mergedGamifiedCols(new Frame[]{gamifiedCSCols, gamifiedISCols, gamifiedMSCols});
}
private static Frame mergedGamifiedCols(Frame[] allGamifiedCols) {
Frame mergedFrame = null;
int numGams = allGamifiedCols.length;
for (int index = 0; index < numGams; index++) {
if (allGamifiedCols[index] != null) {
if (mergedFrame == null) {
mergedFrame = allGamifiedCols[index];
} else {
mergedFrame.add(allGamifiedCols[index].names(), allGamifiedCols[index].removeAll());
Scope.track(allGamifiedCols[index]);
}
}
}
Scope.track(mergedFrame);
return mergedFrame;
}
private static Frame gamifiedSinglePredictors(String[] gamifiedColNames, Vec[] gamColCSSplines, double[][][] binvD, int bsType,
double[][][] zTranspose, double[][][] knots, GAMParameters parms, String[][] gamColNames) {
Frame onlyGamifiedPredictors = new Frame(gamifiedColNames, gamColCSSplines);
int numGamCentered = 0;
AddCSGamColumns genCSGamCols = null;
AddISGamColumns genISGamCols = null;
AddMSGamColumns genMSGamCols = null;
if (bsType == CS_SPLINE_TYPE) {
genCSGamCols = new AddCSGamColumns(binvD, zTranspose, knots, parms._num_knots_sorted, onlyGamifiedPredictors,
parms._bs_sorted);
genCSGamCols.doAll(genCSGamCols._gamCols2Add, Vec.T_NUM, onlyGamifiedPredictors);
numGamCentered = genCSGamCols._gamCols2Add;
} else if (bsType == IS_SPLINE_TYPE) {
genISGamCols = new AddISGamColumns(knots, parms._num_knots_sorted, parms._bs_sorted, parms._spline_orders_sorted,
onlyGamifiedPredictors);
genISGamCols.doAll(genISGamCols._totGamifiedColCentered, Vec.T_NUM, onlyGamifiedPredictors);
numGamCentered = genISGamCols._totGamifiedColCentered;
} else if (bsType == MS_SPLINE_TYPE) {
genMSGamCols = new AddMSGamColumns(knots, zTranspose, parms._num_knots_sorted, parms._bs_sorted,
parms._spline_orders_sorted, onlyGamifiedPredictors);
genMSGamCols.doAll(genMSGamCols._totGamifiedColCentered, Vec.T_NUM, onlyGamifiedPredictors);
numGamCentered = genMSGamCols._totGamifiedColCentered;
}
String[] gamColsNamesCentered = new String[numGamCentered];
int offset = 0;
int numGamCols = parms._gam_columns.length;
for (int ind = 0; ind < numGamCols; ind++) {
if (bsType == parms._bs_sorted[ind]) {
System.arraycopy(gamColNames[ind], 0, gamColsNamesCentered, offset, gamColNames[ind].length);
offset += gamColNames[ind].length;
}
}
if (bsType == CS_SPLINE_TYPE)
return genCSGamCols.outputFrame(Key.make(), gamColsNamesCentered, null);
else if (bsType == IS_SPLINE_TYPE)
return genISGamCols.outputFrame(Key.make(), gamColsNamesCentered, null);
else if (bsType == MS_SPLINE_TYPE)
return genMSGamCols.outputFrame(Key.make(), gamColsNamesCentered, null);
else
return null;
}
@Override
protected PredictScoreResult predictScoreImpl(Frame fr, Frame adaptFrm, String destination_key, Job j,
boolean computeMetrics, CFuncRef customMetricFunc) {
String[] predictNames = makeScoringNames();
String[][] domains = new String[predictNames.length][];
GAMScore gs = makeScoringTask(adaptFrm, true, j, computeMetrics);
gs.doAll(predictNames.length, Vec.T_NUM, gs._dinfo._adaptedFrame);
ModelMetrics.MetricBuilder<?> mb = null;
Frame rawFrame = null;
if (gs._computeMetrics) {
mb = gs._mb;
rawFrame = gs.outputFrame();
}
domains[0] = gs._predDomains;
Frame outputFrame = gs.outputFrame(Key.make(destination_key), predictNames, domains);
return new PredictScoreResult(mb, rawFrame, outputFrame);
}
private GAMScore makeScoringTask(Frame adaptFrm, boolean makePredictions, Job j, boolean computeMetrics) {
int responseId = adaptFrm.find(_output.responseName());
if(responseId > -1 && adaptFrm.vec(responseId).isBad()) { // remove inserted invalid response
adaptFrm = new Frame(adaptFrm.names(),adaptFrm.vecs());
adaptFrm.remove(responseId);
}
final boolean detectedComputeMetrics = computeMetrics && (adaptFrm.vec(_output.responseName()) != null && !adaptFrm.vec(_output.responseName()).isBad());
String [] domain = _output.nclasses()<=1 ? null : (!detectedComputeMetrics ? _output._domains[_output._domains.length-1] : adaptFrm.lastVec().domain());
if (_parms._family.equals(Family.quasibinomial))
domain = _output._responseDomains;
return new GAMScore(j, this, _output._dinfo.scoringInfo(_output._names,adaptFrm),domain,detectedComputeMetrics,
makePredictions);
}
private class GAMScore extends MRTask<GAMScore> {
private DataInfo _dinfo;
private double[] _coeffs;
private double[][] _coeffs_multinomial;
private int _nclass;
private boolean _computeMetrics;
private final Job _j;
private Family _family;
private transient double[] _eta; // store eta calculation
private String[] _predDomains;
private final GAMModel _m;
private final double _defaultThreshold;
private int _lastClass;
private ModelMetrics.MetricBuilder _mb;
final boolean _generatePredictions;
private transient double[][] _vcov;
private transient double[] _tmp;
private boolean _classifier2class;
private GAMScore(final Job j, final GAMModel m, DataInfo dinfo, final String[] domain, final boolean computeMetrics,
final boolean makePredictions) {
_j = j;
_m = m;
_computeMetrics = computeMetrics;
_predDomains = domain;
_nclass = m._output.nclasses();
_generatePredictions = makePredictions;
_classifier2class = _m._parms._family == GLMModel.GLMParameters.Family.binomial ||
_m._parms._family == Family.quasibinomial || _m._parms._family == Family.fractionalbinomial;
if(_m._parms._family == GLMModel.GLMParameters.Family.multinomial ||
_m._parms._family == GLMModel.GLMParameters.Family.ordinal){
_coeffs = null;
_coeffs_multinomial = m._output._model_beta_multinomial;
} else {
double [] beta = m._output._model_beta;
int [] ids = new int[beta.length-1];
int k = 0;
for(int i = 0; i < beta.length-1; ++i){ // pick out beta that is not zero in ids
if(beta[i] != 0) ids[k++] = i;
}
if (k < beta.length - 1) {
ids = Arrays.copyOf(ids, k);
dinfo = dinfo.filterExpandedColumns(ids);
double[] beta2 = MemoryManager.malloc8d(ids.length + 1);
int l = 0;
for (int x : ids) {
beta2[l++] = beta[x];
}
beta2[l] = beta[beta.length - 1];
beta = beta2;
}
_coeffs_multinomial = null;
_coeffs = beta;
}
_dinfo = dinfo;
_dinfo._valid = true; // marking dinfo as validation data set disables an assert on unseen levels (which should not happen in train)
_defaultThreshold = m.defaultThreshold();
_family = m._parms._family;
_lastClass = _nclass-1;
}
@Override
public void map(Chunk[]chks, NewChunk[] nc) {
if (isCancelled() || _j != null && _j.stop_requested()) return;
if (_family.equals(Family.ordinal)||_family.equals(Family.multinomial))
_eta = MemoryManager.malloc8d(_nclass);
_vcov = _m._output._glm_vcov;
if (_vcov != null)
_tmp = MemoryManager.malloc8d(_vcov.length);
int numPredVals = _nclass<=1?1:_nclass+1; // number of predictor values expected.
double[] predictVals = MemoryManager.malloc8d(numPredVals);
float[] trueResponse = null;
if (_computeMetrics) {
_mb = _m.makeMetricBuilder(_predDomains);
trueResponse = new float[1];
}
DataInfo.Row r = _dinfo.newDenseRow();
int chkLen = chks[0]._len;
for (int rid = 0; rid < chkLen; rid++) { // extract each row
_dinfo.extractDenseRow(chks, rid, r);
processRow(r, predictVals, nc, numPredVals);
if (_computeMetrics && !r.response_bad) {
trueResponse[0] = (float) r.response[0];
_mb.perRow(predictVals, trueResponse, r.weight, r.offset, _m);
}
}
if (_j != null) _j.update(1);
}
private void processRow(DataInfo.Row r, double[] ps, NewChunk[] preds, int ncols) {
if (r.predictors_bad)
Arrays.fill(ps, Double.NaN); // output NaN with bad predictor entries
else if (r.weight == 0)
Arrays.fill(ps, 0.0); // zero weight entries got 0 too
switch (_family) {
case multinomial: ps = scoreMultinomialRow(r, r.offset, ps); break;
case ordinal: ps = scoreOrdinalRow(r, r.offset, ps); break;
default: ps = scoreRow(r, r.offset, ps); break;
}
if (_generatePredictions) {
for (int predCol = 0; predCol < ncols; predCol++) { // write prediction to NewChunk
preds[predCol].addNum(ps[predCol]);
}
if (_vcov != null)
preds[ncols].addNum(Math.sqrt(r.innerProduct(r.mtrxMul(_vcov, _tmp))));
}
}
public double[] scoreRow(DataInfo.Row r, double offset, double[] preds) {
double mu = _m._parms.linkInv(r.innerProduct(_coeffs) + offset);
if (_classifier2class) { // threshold for prediction
preds[0] = mu >= _defaultThreshold ? 1 : 0;
preds[1] = 1.0 - mu; // class 0
preds[2] = mu; // class 1
} else
preds[0] = mu;
return preds;
}
public double[] scoreOrdinalRow(DataInfo.Row r, double offset, double[] preds) {
final double[][] bm = _coeffs_multinomial;
Arrays.fill(preds,0); // initialize to small number
preds[0] = _lastClass; // initialize to last class by default here
double previousCDF = 0.0;
for (int cInd = 0; cInd < _lastClass; cInd++) { // classify row and calculate PDF of each class
double eta = r.innerProduct(bm[cInd]) + offset;
double currCDF = 1.0 / (1 + Math.exp(-eta));
preds[cInd + 1] = currCDF - previousCDF;
previousCDF = currCDF;
if (eta > 0) { // found the correct class
preds[0] = cInd;
break;
}
}
for (int cInd = (int) preds[0] + 1; cInd < _lastClass; cInd++) { // continue PDF calculation
double currCDF = 1.0 / (1 + Math.exp(-r.innerProduct(bm[cInd]) + offset));
preds[cInd + 1] = currCDF - previousCDF;
previousCDF = currCDF;
}
preds[_nclass] = 1-previousCDF;
return preds;
}
public double[] scoreMultinomialRow(DataInfo.Row r, double offset, double[] preds) {
double[] eta = _eta;
final double[][] bm = _coeffs_multinomial;
double sumExp = 0;
double maxRow = Double.NEGATIVE_INFINITY;
for (int c = 0; c < bm.length; ++c) {
eta[c] = r.innerProduct(bm[c]) + offset;
if(eta[c] > maxRow)
maxRow = eta[c];
}
for (int c = 0; c < bm.length; ++c)
sumExp += eta[c] = Math.exp(eta[c]-maxRow); // intercept
sumExp = 1.0 / sumExp;
for (int c = 0; c < bm.length; ++c)
preds[c + 1] = eta[c] * sumExp;
preds[0] = ArrayUtils.maxIndex(eta);
return preds;
}
@Override
public void reduce(GAMScore other) {
if (_mb !=null)
_mb.reduce(other._mb);
}
@Override
protected void postGlobal() {
if (_mb != null)
_mb.postGlobal();
}
}
@Override
public double[] score0(double[] data, double[] preds) {
throw new UnsupportedOperationException("GAMModel.score0 should never be called");
}
@Override
public GAMMojoWriter getMojo() {
return new GAMMojoWriter(this);
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
super.remove_impl(fs, cascade);
Keyed.remove(_output._gamTransformedTrainCenter, fs, true);
if (_validKeys != null)
for (Key oneKey:_validKeys) {
Keyed.remove(oneKey, fs, true);
}
if (_parms._keep_cross_validation_predictions)
Keyed.remove(_output._cross_validation_holdout_predictions_frame_id, fs, true);
if (_parms._keep_cross_validation_fold_assignment)
Keyed.remove(_output._cross_validation_fold_assignment_frame_id, fs, true);
if (_parms._keep_cross_validation_models && _output._cross_validation_models!=null) {
for (Key oneModelKey : _output._cross_validation_models)
Keyed.remove(oneModelKey, fs, true);
}
return fs;
}
@Override protected AutoBuffer writeAll_impl(AutoBuffer ab) {
if (_output._gamTransformedTrainCenter!=null)
ab.putKey(_output._gamTransformedTrainCenter);
if (_parms._keep_cross_validation_predictions)
ab.putKey(_output._cross_validation_holdout_predictions_frame_id);
if (_parms._keep_cross_validation_fold_assignment)
ab.putKey(_output._cross_validation_fold_assignment_frame_id);
if (_parms._keep_cross_validation_models && _output._cross_validation_models!=null) {
for (Key oneModelKey : _output._cross_validation_models)
ab.putKey(oneModelKey);
}
return super.writeAll_impl(ab);
}
@Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
if (_output._gamTransformedTrainCenter!=null)
ab.getKey(_output._gamTransformedTrainCenter, fs);
if (_parms._keep_cross_validation_predictions)
ab.getKey(_output._cross_validation_holdout_predictions_frame_id, fs);
if (_parms._keep_cross_validation_fold_assignment)
ab.getKey(_output._cross_validation_fold_assignment_frame_id, fs);
if (_parms._keep_cross_validation_models && _output._cross_validation_models!=null) {
for (Key oneModelKey : _output._cross_validation_models)
ab.getKey(oneModelKey, fs);
}
return super.readAll_impl(ab, fs);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GAMMojoWriter.java
|
package hex.gam;
import hex.ModelMojoWriter;
import hex.glm.GLMModel;
import java.io.IOException;
import static hex.glm.GLMModel.GLMParameters.Family.*;
public class GAMMojoWriter extends ModelMojoWriter<GAMModel, GAMModel.GAMParameters, GAMModel.GAMModelOutput> {
@Override
public String mojoVersion() {
return "1.00";
}
@SuppressWarnings("unused")
public GAMMojoWriter(){}
public GAMMojoWriter(GAMModel model) {
super(model);
}
@Override
protected void writeModelData() throws IOException {
int numGamCols = model._parms._gam_columns.length;
writekv("use_all_factor_levels", model._parms._use_all_factor_levels);
writekv("cats", model._output._dinfo._cats);
writekv("cat_offsets", model._output._dinfo._catOffsets);
writekv("numsCenter", model._output._dinfo._nums);
writekv("num", model._output._dinfo._nums+numGamCols);
boolean imputeMeans = model._parms.missingValuesHandling().equals(GLMModel.GLMParameters.MissingValuesHandling.MeanImputation);
writekv("mean_imputation", imputeMeans);
if (imputeMeans) {
writekv("numNAFillsCenter", model._output._dinfo.numNAFill());
writekv("catNAFills", model._output._dinfo.catNAFill());
}
if (model._parms._family.equals(binomial))
writekv("family", "bernoulli");
else
writekv("family", model._parms._family);
writekv("link", model._parms._link);
if (model._parms._family.equals(GLMModel.GLMParameters.Family.tweedie))
writekv("tweedie_link_power", model._parms._tweedie_link_power);
// add GAM specific parameters
writekv("num_knots", model._parms._num_knots); // an array
writekv("num_knots_sorted", model._parms._num_knots_sorted); // an array
write2DStringArrays(model._parms._gam_columns, "gam_columns"); // gam_columns specified by users
write2DStringArrays(model._parms._gam_columns_sorted, "gam_columns_sorted"); // gam_columns specified by users
int numGamLength = 0;
int numGamCLength = 0;
for (int cInd=0; cInd < numGamCols; cInd++) { // contains expanded gam column names center and not centered
numGamLength += model._gamColNamesNoCentering[cInd].length;
numGamCLength += model._gamColNames[cInd].length;
}
int[] gamColumnDim = genGamColumnDim(model._parms._gam_columns);
writekv("gam_column_dim", gamColumnDim); // an array indicating array size of parms._gam_columns
int[] gamColumnDimSorted = genGamColumnDim(model._parms._gam_columns_sorted);
writekv("gam_column_dim_sorted", gamColumnDimSorted); // an array
String[] trainColGamColNoCenter = genTrainColGamCols(numGamLength, numGamCLength);
writekv("num_expanded_gam_columns", numGamLength);
writekv("num_expanded_gam_columns_center", numGamCLength);
writeStringArrays(trainColGamColNoCenter, "_names_no_centering"); // column names without centering
writekv("total feature size", trainColGamColNoCenter.length);
int[] gamColNamesDim = genGamColumnDim(model._gamColNamesNoCentering);
writekv("gamColName_dim", gamColNamesDim);
write2DStringArrays(model._gamColNames, "gamColNamesCenter");// numGamCol by numKnots for CS, by numKnots+M for TP
write2DStringArrays(model._gamColNamesNoCentering,"gamColNames"); // numGamCol by numKnots-1
if (model._parms._family==multinomial || model._parms._family==ordinal) {
write2DArray(model._output._model_beta_multinomial_no_centering, "beta_multinomial");
writekv("beta length per class", model._output._model_beta_multinomial_no_centering[0].length);
write2DArray(model._output._model_beta_multinomial, "beta_multinomial_centering");
writekv("beta center length per class", model._output._model_beta_multinomial[0].length);
} else {
writekv("beta", model._output._model_beta_no_centering); // beta without centering
writekv("beta length per class", model._output._model_beta_no_centering.length);
writekv("beta_center", model._output._model_beta);
writekv("beta center length per class", model._output._model_beta.length);
}
writekv("bs", model._parms._bs); // an array of choice of spline function types
writekv("bs_sorted", model._parms._bs_sorted); // an array of choice of spline functions
write3DArray(model._output._knots, "knots");
write3DArray(model._output._zTranspose, "zTranspose");
writekv("_d", model._parms._gamPredSize);
writekv("num_CS_col", model._cubicSplineNum);
writekv("num_IS_col", model._iSplineNum);
writekv("num_MS_col", model._mSplineNum);
if (model._iSplineNum > 0 || model._mSplineNum > 0) {
writekv("spline_orders_sorted", model._parms._spline_orders_sorted);
writekv("spline_orders", model._parms._spline_orders);
}
if (model._output._zTransposeCS != null) { // only for thin plate regression splines
write3DIntArray(model._output._allPolyBasisList, "polynomialBasisList");
write3DArray(model._output._zTransposeCS, "zTransposeCS");
write2DArray(model._output._gamColMeansRaw, "gamColMeansRaw");
write2DArray(model._output._oneOGamColStd, "gamColStdRaw");
writekv("_M", model._parms._M);
writekv("_m", model._parms._m);
writekv("num_knots_TP", model._parms._num_knots_tp); // an array
writekv("standardize", model._parms._standardize);
writekv("num_TP_col", model._thinPlateSmoothersWithKnotsNum);
} else {
writekv("num_TP_col", 0);
}
if (model._cubicSplineNum > 0)
write3DArray(model._output._binvD, "_binvD");
}
public int[] genGamColumnDim(String[][] gamColumnNames) {
int numGamCols = gamColumnNames.length;
int[] gamColDim = new int[numGamCols];
for (int index = 0; index < numGamCols; index++)
gamColDim[index] = gamColumnNames[index].length;
return gamColDim;
}
public String[] genTrainColGamCols(int gamColLength, int gamCColLength) {
int colLength = model._output._names.length-gamCColLength+gamColLength-1;// to exclude response
int normalColLength = model._output._names.length-gamCColLength-1;
String[] trainNamesNGamNames = new String[colLength];
System.arraycopy(model._output._names, 0, trainNamesNGamNames, 0, normalColLength);
int startInd = normalColLength;
for (int gind = 0; gind < model._gamColNamesNoCentering.length; gind++) {
int copyLen = model._gamColNamesNoCentering[gind].length;
System.arraycopy(model._gamColNamesNoCentering[gind], 0, trainNamesNGamNames, startInd, copyLen);
startInd += copyLen;
}
return trainNamesNGamNames;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MetricBuilderGAM.java
|
package hex.gam;
import hex.*;
import hex.glm.GLMModel;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.MathUtils;
import java.util.Arrays;
import static hex.glm.GLMModel.GLMParameters.Family.*;
public class MetricBuilderGAM extends ModelMetricsSupervised.MetricBuilderSupervised<MetricBuilderGAM> {
double _residual_deviance;
double _null_deviance;
long _nobs;
double _log_likelihood;
double _aic;
private double _aic2;
final GLMModel.GLMWeightsFun _glmf;
ModelMetrics.MetricBuilder _metricBuilder; // point to generic model metric classes
final boolean _intercept;
private final double[] _ymu;
final boolean _computeMetrics;
final private int _rank;
int _nclass;
public MetricBuilderGAM(String[] domain, double[] ymu, GLMModel.GLMWeightsFun glmf, int rank, boolean computeMetrics, boolean intercept, int nclass, MultinomialAucType aucType) {
super(domain==null?0:domain.length, domain);
_intercept = intercept;
_computeMetrics = computeMetrics;
_glmf = glmf;
_rank = rank;
_nclass = nclass;
_ymu = ymu;
switch (_glmf._family) {
case binomial:
case quasibinomial:
case fractionalbinomial:
_metricBuilder = new ModelMetricsBinomial.MetricBuilderBinomial(domain); break;
case multinomial:
_metricBuilder = new ModelMetricsMultinomial.MetricBuilderMultinomial(nclass, domain, aucType); break;
case ordinal:
_metricBuilder = new ModelMetricsOrdinal.MetricBuilderOrdinal(nclass, domain); break;
default:
_metricBuilder = new ModelMetricsRegression.MetricBuilderRegression(); // everything else goes back regression
}
}
@Override
public double[] perRow(double[] ds, float[] yact, double weight, double offset, Model m) {
if (weight == 0) return ds;
_metricBuilder.perRow(ds, yact, weight, offset, m); // grab the generic terms
if (_glmf._family.equals(GLMModel.GLMParameters.Family.negativebinomial))
_log_likelihood += m.likelihood(weight, yact[0], ds);
if (!ArrayUtils.hasNaNsOrInfs(ds) && !ArrayUtils.hasNaNsOrInfs(yact)) {
if (_glmf._family.equals(GLMModel.GLMParameters.Family.multinomial) || _glmf._family.equals(GLMModel.GLMParameters.Family.ordinal))
add2(yact[0], ds[0], weight, offset);
else if (_glmf._family.equals(binomial) || _glmf._family.equals(quasibinomial) ||
_glmf._family.equals(fractionalbinomial))
add2(yact[0], ds[2], weight, offset);
else
add2(yact[0], ds[0], weight, offset);
}
return ds;
}
private void add2(double yresp, double ypredict, double weight, double offset) {
_wcount += weight;
++_nobs;
if (!_glmf._family.equals(multinomial) && !_glmf._family.equals(ordinal)) {
_residual_deviance += weight * _glmf.deviance(yresp, ypredict);
if (offset == 0)
_null_deviance += weight * _glmf.deviance(yresp, _ymu[0]);
else
_null_deviance += weight * _glmf.deviance(yresp, _glmf.linkInv(offset + _glmf.link(_ymu[0])));
}
if (_glmf._family.equals(poisson)) { // AIC for poisson
long y = Math.round(yresp);
double logfactorial = MathUtils.logFactorial(y);
_aic2 += weight*(yresp*Math.log(ypredict)-logfactorial-ypredict);
}
}
public void reduce(MetricBuilderGAM other) {
if (_computeMetrics)
_metricBuilder.reduce(other._metricBuilder);
_residual_deviance += other._residual_deviance;
_null_deviance += other._null_deviance;
if (Arrays.asList(gaussian, binomial, quasibinomial, fractionalbinomial,
poisson, negativebinomial, gamma, tweedie).contains(_glmf._family)) {
_log_likelihood += other._log_likelihood;
}
_nobs += other._nobs;
_aic2 += other._aic2;
_wcount += other._wcount;
}
public final double residualDeviance() { return _residual_deviance;}
public final long nullDOF() { return _nobs-(_intercept?1:0);}
public final long resDOF() {
if (_glmf._family.equals(ordinal))
return _nobs-(_rank/(_nclasses-1)+_nclasses-2);
else
return _nobs-_rank;
}
protected void computeAIC(){
_aic = 0;
switch( _glmf._family) {
case gaussian:
_aic = _nobs * (Math.log(_residual_deviance / _nobs * 2 * Math.PI) + 1) + 2;
break;
case quasibinomial:
case fractionalbinomial:
case binomial:
_aic = _residual_deviance;
break;
case poisson:
_aic = -2*_aic2;
break; // AIC is set during the validation task
case gamma:
_aic = Double.NaN;
break;
case ordinal:
case tweedie:
case multinomial:
_aic = Double.NaN;
break;
case negativebinomial:
_aic = 2* _log_likelihood;
break;
default:
assert false : "missing implementation for family " + _glmf._family;
}
_aic += 2*_rank;
}
@Override
public double[] perRow(double[] ds, float[] yact, Model m) {
return perRow(ds, yact, 1, 0, m);
}
@Override
public ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
GAMModel gamM = (GAMModel) m;
computeAIC();
ModelMetrics mm=_metricBuilder.makeModelMetrics(gamM, f, null, null);
if (_glmf._family.equals(GLMModel.GLMParameters.Family.binomial) || _glmf._family.equals(quasibinomial) ||
_glmf._family.equals(fractionalbinomial)) {
ModelMetricsBinomial metricsBinomial = (ModelMetricsBinomial) mm;
GainsLift gl = null;
if (preds != null) {
Vec resp = f.vec(gamM._parms._response_column);
Vec weights = f.vec(gamM._parms._weights_column);
if (resp != null && fractionalbinomial != _glmf._family) {
gl = new GainsLift(preds.lastVec(), resp, weights);
gl._groups = m._parms._gainslift_bins;
gl.exec(gamM._output._job);
}
}
mm = new ModelMetricsBinomialGLM(m, f, mm._nobs, mm._MSE, _domain, metricsBinomial._sigma,
metricsBinomial._auc, metricsBinomial._logloss, residualDeviance(), _null_deviance, _aic, nullDOF(),
resDOF(), gl, _customMetric, _log_likelihood);
} else if (_glmf._family.equals(multinomial)) {
ModelMetricsMultinomial metricsMultinomial = (ModelMetricsMultinomial) mm;
mm = new ModelMetricsBinomialGLM.ModelMetricsMultinomialGLM(m, f, metricsMultinomial._nobs,
metricsMultinomial._MSE, metricsMultinomial._domain, metricsMultinomial._sigma, metricsMultinomial._cm,
metricsMultinomial._hit_ratios, metricsMultinomial._logloss, residualDeviance(),_null_deviance, _aic,
nullDOF(), resDOF(), metricsMultinomial._auc, _customMetric, _log_likelihood);
} else if (_glmf._family == GLMModel.GLMParameters.Family.ordinal) { // ordinal should have a different resDOF()
ModelMetricsOrdinal metricsOrdinal = (ModelMetricsOrdinal) mm;
mm = new ModelMetricsBinomialGLM.ModelMetricsOrdinalGLM(m, f, metricsOrdinal._nobs, metricsOrdinal._MSE,
metricsOrdinal._domain, metricsOrdinal._sigma, metricsOrdinal._cm, metricsOrdinal._hit_ratios,
metricsOrdinal._logloss, residualDeviance(), _null_deviance, _aic, nullDOF(), resDOF(), _customMetric, _log_likelihood);
} else {
ModelMetricsRegression metricsRegression = (ModelMetricsRegression) mm;
mm = new ModelMetricsRegressionGLM(m, f, metricsRegression._nobs, metricsRegression._MSE,
metricsRegression._sigma, metricsRegression._mean_absolute_error,
metricsRegression._root_mean_squared_log_error, residualDeviance(),
residualDeviance() / _wcount, _null_deviance, _aic, nullDOF(), resDOF(), _customMetric, _log_likelihood);
}
return gamM.addModelMetrics(mm);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GamSplines/CubicRegressionSplines.java
|
package hex.gam.GamSplines;
import hex.gam.MatrixFrameUtils.TriDiagonalMatrix;
import hex.util.LinearAlgebraUtils;
import water.util.ArrayUtils;
import static hex.genmodel.utils.ArrayUtils.eleDiff;
import static hex.util.LinearAlgebraUtils.generateTriDiagMatrix;
public class CubicRegressionSplines {
public double[] _knots; // store knot values for the spline class
public double[] _hj; // store difference between knots, length _knotNum-1
int _knotNum; // number of knot values
public CubicRegressionSplines(int knotNum, double[] knots) {
_knotNum = knotNum;
_knots = knots;
_hj = eleDiff(_knots);
}
public double[][] gen_BIndvD(double[] hj) { // generate matrix bInvD
TriDiagonalMatrix matrixD = new TriDiagonalMatrix(hj); // of dimension (_knotNum-2) by _knotNum
double[][] matB = generateTriDiagMatrix(hj);
// obtain cholesky of matB
LinearAlgebraUtils.choleskySymDiagMat(matB); // verified
// expand matB from being a lower diagonal matrix only to a full blown square matrix
double[][] fullmatB = LinearAlgebraUtils.expandLowTrian2Ful(matB);
// obtain inverse of matB
double[][] bInve = LinearAlgebraUtils.chol2Inv(fullmatB, false); // verified with small matrix
// perform inverse(matB)*matD and return it
return LinearAlgebraUtils.matrixMultiplyTriagonal(bInve, matrixD, true);
}
public double[][] gen_penalty_matrix(double[] hj, double[][] binvD) {
TriDiagonalMatrix matrixD = new TriDiagonalMatrix(hj); // of dimension (_knotNum-2) by _knotNum
return LinearAlgebraUtils.matrixMultiplyTriagonal(ArrayUtils.transpose(binvD), matrixD, false);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GamSplines/NBSplinesTypeIDerivative.java
|
package hex.gam.GamSplines;
import hex.genmodel.algos.gam.NBSplinesTypeI;
import static hex.gam.GamSplines.NBSplinesUtils.integratePolynomial;
import static hex.genmodel.algos.gam.GamUtilsISplines.*;
import static hex.genmodel.algos.gam.NBSplinesTypeI.*;
public class NBSplinesTypeIDerivative {
/***
* This class implements the first or second derivative of NBSpline Type I (derivative of Mi,k(t)) in order to
* generate the penalty function described in Section VI.I equation 16 of doc in
* the GitHub issue: https://github.com/h2oai/h2o-3/issues/7261.
* Doc 2 is the doc for M-spline implementation and can be found here:
* https://github.com/h2oai/h2o-3/issues/6926
*/
private final int _order; // order k as in derivative of Mi,k(t)
private final int _basisIndex; // index i
private final double[] _knots; // knots sequence with duplication
private final double _commonConst; // k/(ti+k-ti)
public double[][] _coeffs; // store coefficients for derivate of Mi,k(t) for knot intervals where spline is non-zero
private NBSplinesTypeI _left; // point to spline Mi,k-1(t)
private NBSplinesTypeI _right; // point to spline Mi+1,k-1(t)
public NBSplinesTypeIDerivative(int basisIndex, int order, double[] fullKnots) {
_order = order;
_basisIndex = basisIndex;
_knots = extractKnots(_basisIndex, order, fullKnots); // extract knot sequence over which spline is non-zero
_commonConst = _order * ((_knots[_order] == _knots[0]) ? 0 : 1.0 / (_knots[_order] - _knots[0]));
_left = formBasisDeriv(fullKnots, _order - 1, basisIndex, fullKnots.length - 1);
_right = formBasisDeriv(fullKnots, _order - 1, basisIndex + 1, fullKnots.length - 1);
_coeffs = extractDerivativeCoeff(_left, _right, fullKnots, basisIndex, _commonConst);
}
/***
* This function extracts the coefficients for the derivative of a NBSplineTypeI (Mi,k(t)) as described in Section
* VI of doc.
*/
public static double[][] extractDerivativeCoeff(NBSplinesTypeI left, NBSplinesTypeI rite, double[] knots,
int basisIndex, double parentConst) {
double[][] coeffsLeft = extractCoeffs(left, basisIndex, parentConst);
double[][] coeffsRite = extractCoeffs(rite, basisIndex + 1, -parentConst);
double[][] combinedCoeffs = new double[knots.length - 1][];
sumCoeffs(coeffsLeft, coeffsRite, combinedCoeffs);
return combinedCoeffs;
}
/***
* Generate penalty matrix for I-spline as described in Section VI of doc.
*
* @param knots : containing all knots without duplication
* @param order : order of original I-splines
* @return double[][] array of size number of basis function by number of total numbers
*/
public static double[][] genISPenaltyMatrix(double[] knots, int order) {
int numBasis = knots.length + order - 2;
if (order <= 1)
return new double[numBasis][numBasis]; // derivative of order 1 NBSpline will generate all 0
double[] knotsWithDuplicates = fillKnots(knots, order); // knot sequence over which to perform integration
NBSplinesTypeIDerivative[] allDerivatives = form1stOrderDerivatives(numBasis, order, knotsWithDuplicates);
double[][] penaltyMat = new double[numBasis][numBasis];
for (int i = 0; i < numBasis; i++) {
for (int j = i; j < numBasis; j++) {
double[][] coeffProduct = formDerivateProduct(allDerivatives[i]._coeffs, allDerivatives[j]._coeffs);
penaltyMat[i][j] = integratePolynomial(knotsWithDuplicates, coeffProduct);
penaltyMat[j][i] = penaltyMat[i][j];
}
}
return penaltyMat;
}
/***
* Generate penalty matrix for M-spline as described in Section III of doc 2.
*
* @param knots : containing all knots without duplication
* @param order : order of original I-splines
* @return double[][] array of size number of basis function by number of total numbers
*/
public static double[][] genMSPenaltyMatrix(double[] knots, int order) {
int numBasis = knots.length + order - 2;
if (order <= 2)
return new double[numBasis][numBasis]; // derivative of order 2 NBSpline will generate all 0
double[] knotsWithDuplicates = fillKnots(knots, order); // knot sequence over which to perform integration
double[][][] allDerivCoeffs = form2ndDerivCoeffs(numBasis, order, knotsWithDuplicates);
double[][] penaltyMat = new double[numBasis][numBasis];
for (int i = 0; i < numBasis; i++) {
for (int j = i; j < numBasis; j++) {
double[][] coeffProduct = formDerivateProduct(allDerivCoeffs[i], allDerivCoeffs[j]);
penaltyMat[i][j] = integratePolynomial(knotsWithDuplicates, coeffProduct);
penaltyMat[j][i] = penaltyMat[i][j];
}
}
return penaltyMat;
}
public static double[][][] form2ndDerivCoeffs(int numBasis, int order, double[] fullKnots) {
double[][][] derivCoeffs = new double[numBasis][][];
NBSplinesTypeI[] msBasis = new NBSplinesTypeI[numBasis];
int numKnotInt = fullKnots.length-1;
for (int index = 0; index < numBasis; index++) {
msBasis[index] = formBasisDeriv(fullKnots, order, index, numKnotInt);
// extract coefficients of spline
extractNBSplineCoeffs(msBasis[index], order, new double[]{1}, 1, index);
// take 2nd derivative of Mspline by dealing with the coefficients
derivCoeffs[index] = derivativeCoeffs(msBasis[index]._nodeCoeffs);
}
return derivCoeffs;
}
public static double[][] derivativeCoeffs(double[][] origCoeffs) {
int numCoeffs = origCoeffs.length;
double[][] derivCoeffs = new double[numCoeffs][];
for (int index=0; index<numCoeffs; index++) {
double[] currCoeffs = origCoeffs[index];
if (currCoeffs != null && currCoeffs.length > 2) {
int count = 0;
int currCoeffLen = currCoeffs.length;
derivCoeffs[index] = new double[currCoeffLen-2];
for (int index2=2; index2<currCoeffLen; index2++)
derivCoeffs[index][count++] = currCoeffs[index2]*index2*(index2-1);
}
}
return derivCoeffs;
}
/***
* Method to generate an array of derivatives of NBSplineTypeI. See Section VI.I of doc.
*
* @param numBasis: integer representing number of basis functions for knot sequence
* @param order: order of NBSplineTypeI to generate
* @param fullKnots: complete knot sequence with duplicate knots at both ends
*/
public static NBSplinesTypeIDerivative[] form1stOrderDerivatives(int numBasis, int order, double[] fullKnots) {
NBSplinesTypeIDerivative[] allDerivs = new NBSplinesTypeIDerivative[numBasis];
for (int index=0; index<numBasis; index++)
allDerivs[index] = new NBSplinesTypeIDerivative(index, order, fullKnots); // dMi,k(t)/dt
return allDerivs;
}
/***
* Form product of derivative basis function for index firstIndex, secondIndex like M'i,k(t)*M'j,k(t). See Section
* VI.I of doc.
*/
public static double[][] formDerivateProduct(double[][] firstCoeff, double[][] secondCoeff) {
int numBasis = firstCoeff.length;
double[][] polyProduct = new double[numBasis][];
for (int index=0; index<numBasis; index++) {
if (firstCoeff[index] != null && secondCoeff[index] != null)
polyProduct[index] = polynomialProduct(firstCoeff[index], secondCoeff[index]);
}
return polyProduct;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GamSplines/NBSplinesUtils.java
|
package hex.gam.GamSplines;
import water.util.ArrayUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.IntStream;
public class NBSplinesUtils {
/***
* Perform integration of polynomials as described in Section VI.IV, equation 17 of doc I.
*/
public static double integratePolynomial(double[] knotsWithDuplicates, double[][] coeffProduct) {
double sumValue = 0;
int numBasis = coeffProduct.length;
for (int index=0; index < numBasis; index++) {
if (coeffProduct[index] != null) {
int orderSize = coeffProduct[index].length;
double firstKnot = knotsWithDuplicates[index];
double secondKnot = knotsWithDuplicates[index+1];
double[] coeffs = coeffProduct[index];
double tempSum = 0;
for (int orderIndex = 0; orderIndex < orderSize; orderIndex++) {
tempSum += coeffs[orderIndex]/(orderIndex+1)*(Math.pow(secondKnot, orderIndex+1)-
Math.pow(firstKnot, orderIndex+1));
}
sumValue += tempSum;
}
}
return sumValue;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GamSplines/ThinPlateDistanceWithKnots.java
|
package hex.gam.GamSplines;
import hex.DataInfo;
import hex.glm.GLMModel.GLMParameters.MissingValuesHandling;
import hex.util.LinearAlgebraUtils.BMulInPlaceTask;
import water.MRTask;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import static hex.gam.GAMModel.GAMParameters;
import static hex.gam.GamSplines.ThinPlateRegressionUtils.*;
import static hex.genmodel.algos.gam.GamUtilsThinPlateRegression.calculateDistance;
import static org.apache.commons.math3.util.CombinatoricsUtils.factorial;
import static water.util.ArrayUtils.transpose;
/**
* Implementation details of this class can be found in GamThinPlateRegressionH2O.doc attached to this
* GitHub issue: https://github.com/h2oai/h2o-3/issues/7783
**/
public class ThinPlateDistanceWithKnots extends MRTask<ThinPlateDistanceWithKnots> {
final double[][] _knots; // store knot values for the spline class
final int _knotNum; // number of knot values
final int _d; // number of predictors for smoothers
final int _m; // highest degree of polynomial basis +1
final public double _constantTerms;
final int _weightID;
final boolean _dEven;
final double[] _oneOverGamColStd;
final boolean _standardizeGAM;
public ThinPlateDistanceWithKnots(double[][] knots, int d, double[] oneOGamColStd, boolean standardizeGAM) {
_knots = knots;
_knotNum = _knots[0].length;
_d = d;
_dEven = _d%2==0;
_m = calculatem(_d);
_weightID = _d; // weight column index
_oneOverGamColStd = oneOGamColStd;
_standardizeGAM = standardizeGAM;
if (_dEven)
_constantTerms = Math.pow(-1, _m+1+_d/2.0)/(Math.pow(2, 2*_m-1)*Math.pow(Math.PI, _d/2.0)*factorial(_m-1)*
factorial(_m-_d/2));
else
_constantTerms = Math.pow(-1, _m)*_m/(factorial(2*_m)*Math.pow(Math.PI, (_d-1)/2.0));
}
@Override
public void map(Chunk[] chk, NewChunk[] newGamCols) {
int nrows = chk[0].len();
double[] rowValues = MemoryManager.malloc8d(_knotNum);
double[] chkRowValues = MemoryManager.malloc8d(_d);
for (int rowIndex = 0; rowIndex < nrows; rowIndex++) {
if (chk[_weightID].atd(rowIndex) != 0) {
if (checkRowNA(chk, rowIndex)) {
fillRowOneValue(newGamCols, _knotNum, Double.NaN);
} else { // calculate distance measure as in 3.1
fillRowData(chkRowValues, chk, rowIndex, _d);
calculateDistance(rowValues, chkRowValues, _knotNum, _knots, _d, _m, _dEven, _constantTerms,
_oneOverGamColStd, _standardizeGAM);
fillRowArray(newGamCols, _knotNum, rowValues);
}
} else { // insert 0 to newChunk for weight == 0
fillRowOneValue(newGamCols, _knotNum, 0.0);
}
}
}
public static void fillRowData(double[] rowHolder, Chunk[] chk, int rowIndex, int d) {
for (int colIndex = 0; colIndex < d; colIndex++)
rowHolder[colIndex] = chk[colIndex].atd(rowIndex);
}
/**
* This function perform the operation described in 3.3 regarding the part of data Xnmd.
*
* @param fr: H2OFrame to add gamificed columns to.
* @param colNameStart start of column names for gamified columns
* @param parms GAMParameters
* @param zCST transpose of zCS transform matrix
* @param newColNum number of gamified columns to be added
* @return
*/
public static Frame applyTransform(Frame fr, String colNameStart, GAMParameters parms, double[][] zCST, int newColNum) {
int numCols = fr.numCols(); // == numKnots
DataInfo frInfo = new DataInfo(fr, null, 0, false, DataInfo.TransformType.NONE,
DataInfo.TransformType.NONE, MissingValuesHandling.Skip == parms._missing_values_handling,
(parms._missing_values_handling == MissingValuesHandling.MeanImputation) ||
(parms._missing_values_handling == MissingValuesHandling.PlugValues), parms.makeImputer(),
false, false, false, false, null);
// expand the frame with k-M columns which will contain the product of Xnmd*ZCS
for (int colInd = 0; colInd < newColNum; colInd++) {
fr.add(colNameStart+"_tp_"+colInd, fr.anyVec().makeZero());
}
new BMulInPlaceTask(frInfo, zCST, numCols, false).doAll(fr);
for (int index=0; index < numCols; index++) { // remove the original gam columns
Vec temp = fr.remove(0);
temp.remove();
}
return fr;
}
public double[][] generatePenalty() {
double[][] penaltyMat = new double[_knotNum][_knotNum];
double[][] knotsTranspose = transpose(_knots);
double[] tempVal = MemoryManager.malloc8d(_knotNum);
for (int index = 0; index < _knotNum; index++) {
calculateDistance(tempVal, knotsTranspose[index], _knotNum, _knots, _d, _m, _dEven, _constantTerms,
_oneOverGamColStd, _standardizeGAM);
System.arraycopy(tempVal, 0, penaltyMat[index], 0, _knotNum);
} // penaltyMat is right now hollow with zero off diagonal
return penaltyMat;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GamSplines/ThinPlatePolynomialWithKnots.java
|
package hex.gam.GamSplines;
import water.MRTask;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.NewChunk;
import static hex.gam.GamSplines.ThinPlateRegressionUtils.*;
import static hex.genmodel.algos.gam.GamUtilsThinPlateRegression.calculatePolynomialBasis;
public class ThinPlatePolynomialWithKnots extends MRTask<ThinPlatePolynomialWithKnots> {
final int _weightID;
final int[][] _polyBasisList;
final int _M; // size of polynomial basis
final int _d; // number of predictors used
final double[] _gamColMeanRaw;
final double[] _oneOverColStd;
final boolean _standardizeGAM;
public ThinPlatePolynomialWithKnots(int weightID, int[][] polyBasis, double[] gamColMeanRaw, double[] oneOverColStd, boolean standardizeGAM) {
_weightID = weightID;
_d = weightID;
_polyBasisList = polyBasis;
_M = polyBasis.length;
_gamColMeanRaw = gamColMeanRaw;
_oneOverColStd = oneOverColStd;
_standardizeGAM = standardizeGAM;
}
@Override
public void map(Chunk[] chk, NewChunk[] newGamCols) {
int numRow = chk[0].len();
double[] onePolyRow = MemoryManager.malloc8d(_M);
double[] oneDataRow = MemoryManager.malloc8d(_d);
for (int rowIndex = 0; rowIndex < numRow; rowIndex++) {
if (chk[_weightID].atd(rowIndex) != 0) {
if (checkRowNA(chk, rowIndex)) {
fillRowOneValue(newGamCols, _M, Double.NaN);
} else {
extractNDemeanOneRowFromChunk(chk, rowIndex, oneDataRow, _d); // extract data to oneDataRow
calculatePolynomialBasis(onePolyRow, oneDataRow, _d, _M, _polyBasisList, _gamColMeanRaw, _oneOverColStd,
_standardizeGAM); // generate polynomial basis for oneDataRow
fillRowArray(newGamCols, _M, onePolyRow); // fill newChunk with array onePolyRow
}
} else { // set the row to zero
fillRowOneValue(newGamCols, _M, 0.0);
}
}
}
// grab data in each chunk into an array
public static void extractNDemeanOneRowFromChunk(Chunk[] chk, int rowIndex, double[] oneRow, int d) {
for (int colInd = 0; colInd < d; colInd++)
oneRow[colInd] = chk[colInd].atd(rowIndex);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/GamSplines/ThinPlateRegressionUtils.java
|
package hex.gam.GamSplines;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static hex.gam.GAMModel.GAMParameters;
import static water.util.ArrayUtils.maxValue;
/**
* This class contains functions that perform different functions for generating the thin plate regression splines
* and the polynomial basis functions
*/
public class ThinPlateRegressionUtils {
/**
* For thin plate regression, given d (number of predictors for a smooth), it will return m where (m-1) is the
* maximum polynomial degree in the polynomial basis functions. The formula used is m = ceiling of (d+1)/2+1
*
* @param d : integer denoting number of predictors for thin plate regression smooth.
* @return m : integer denoting the maximum polynomial degree + 1 for polynomial basis function.
*/
public static int calculatem(int d) {
return ((int) Math.floor((d+1.0)*0.5))+1;
}
public static int calculateM(int d, int m) {
int topComb = d+m-1;
return hex.genmodel.utils.MathUtils.combinatorial(topComb, d);
}
/**
* This method, given number of predictors in the smooth d, number of polynomials in the polynomial basis m, will
* generate a list of integer array specifying for each predictors the degree that predictor will have. For instance,
* if for a predictor, the degree is 0, a constant 1 is used. If for a particular predictor, the degree is 2,
* predictor*predictor is used.
*
* @param d
* @param m
* @return
*/
public static List<Integer[]> findPolyBasis(int d, int m) {
int polyOrder = m-1;
int[] possibleDegree = new int[polyOrder];
for (int index = 1; index < m; index++) // generate all polynomial order combinations
possibleDegree[index-1] = index;
Integer[] basisPolyOrder = new Integer[d]; // store one combination
List<Integer[]> totPolyBasis = new ArrayList<>(); // store all combination
for (int degree : possibleDegree) {
ArrayList<int[]> oneCombo = new ArrayList<>();
findOnePerm(degree, possibleDegree, 0, oneCombo, null);
mergeCombos(oneCombo, basisPolyOrder, possibleDegree, totPolyBasis);// merge all combos found for all possibleDegree
}
return findAllPolybasis(totPolyBasis);
}
/**
* For each list in onePolyBasis, we still need to find all the permutations for that list. In addition, we need to
* add the combination for the 0th order as well. For instance, if the list contains {0,0,1}, we need to add to that
* the lists {0,1,0} and {1,0,0} as well.
* @param onePolyBasis
* @return
*/
public static List<Integer[]> findAllPolybasis(List<Integer[]> onePolyBasis) {
int listSize = onePolyBasis.size();
List<Integer[]> allPermutes = new ArrayList<>();
for (int index = 0; index < listSize; index++) {
Integer[] oneBasis = onePolyBasis.get(index);
int[] freqTable = generateOrderFreq(oneBasis); // find polynomial basis order and count
List<List<Integer>> basisPermuations = new ArrayList<>();
List<Integer> prefix = new ArrayList<>();
findPermute(freqTable, prefix, oneBasis.length, basisPermuations);
addPermutationList(allPermutes, basisPermuations);
}
// add the list of all zeros
Integer[] allZeros = new Integer[onePolyBasis.get(0).length];
for (int index = 0; index < allZeros.length; index++)
allZeros[index] = 0;
allPermutes.add(0, allZeros); // add all zero degree to the front
return allPermutes;
}
public static void addPermutationList(List<Integer[]> onePolyBasis, List<List<Integer>> permute1Basis) {
for (List<Integer> onePermute : permute1Basis) {
Integer[] oneCombo = onePermute.toArray(new Integer[0]);
onePolyBasis.add(oneCombo);
}
}
public static void findPermute(int[] freqMap, List<Integer> prefix, int remaining,
List<List<Integer>> basisPerm) {
if (remaining == 0) { // done with choosing all permutation
basisPerm.add(prefix);
} else {
for (int index=0; index < freqMap.length; index++) {
int val = freqMap[index];
if (val > 0) {
freqMap[index]--;
ArrayList<Integer> newPrefix = new ArrayList<>(prefix);
newPrefix.add(index);
findPermute(freqMap, newPrefix, remaining-1, basisPerm);
freqMap[index] = val;
}
}
}
}
public static int[] generateOrderFreq(Integer[] oneBasis) {
int maxVal = maxValue(oneBasis);
int[] mapFreq = new int[maxVal+1];
for (int val : oneBasis)
mapFreq[val]++;
return mapFreq;
}
public static void mergeCombos(ArrayList<int[]> oneCombo, Integer[] basisOrder, int[] polyBasis, List<Integer[]> polyBasisSet) {
for (int[] oneList : oneCombo) {
Arrays.fill(basisOrder, 0);
expandCombo(oneList, polyBasis, basisOrder);
polyBasisSet.add(basisOrder.clone());
}
}
/**
* Given a combo found by findOnePerm say for d = 5, m = 4, for degree = 1 to m-1 (3 in this case). The basis poly
* is {3,2,1}. The returned list of combo are: {{0, 0, 1}, {0, 1, 0}, {0, 0, 2}, {1,0,0}, {0,1,1}, {0,0,3}}.
* However, we need to convert this list back to the perspective of the predictors. In this case, we have 5
* predictors. This function will translate the list from findOnePerm to the perspective of the predictors. For
* instance, for degree = 1, we need to have one predictive to have degree of 1 and hence the list should be
* {1, 0, 0, 0, 0}. For degree = 2, we can have one predictor taking degree of 2 or two predictors each taking
* degree 1. The same applies to the other degrees. Hence, this function will return the following list:
* {{1, 0, 0, 0, 0}, {0, 2, 0, 0, 0}, {1, 1, 0, 0, 0}, {0, 0, 3, 0, 0}, {1, 2, 0, 0, 0} and {1, 1, 1, 0, 0}}.
* @param oneList
* @param polyBasis
* @param basisOrder
*/
public static void expandCombo(int[] oneList, int[] polyBasis, Integer[] basisOrder) {
int expandIndex = 0;
for (int index = 0; index < polyBasis.length; index++) {
int count = 0;
if (oneList[index] == 0) {
basisOrder[expandIndex++] = 0;
} else {
while (count < oneList[index]) {
basisOrder[expandIndex++] = polyBasis[index];
count++;
}
}
}
}
/**
* For a fixed degree specified as totDegree, specified a set of combination of polynomials to achieve the totDegree.
* For instance, if degreeCombo = {1,2,3} and the totDegree is 1. There is only one way to achieve it by the array
* {1,0,0}. If totDegree = 2, there are two arrays that will work: {0,1,0} or {2,0,0}. If totDegree = 3, there will
* be 3 arrays that will work {3,0,0}, {1,1,0}, {0,0,1}.
*
* @param totDegree : integer representing degree of polynomial basis
* @param degreeCombo : degrees allowed for polynomial basis
* @param index
* @param allCombos
* @param currCombo
*/
public static void findOnePerm(int totDegree, int[] degreeCombo, int index, ArrayList<int[]> allCombos,
int[] currCombo) {
if (totDegree == 0) {
if (currCombo != null)
allCombos.add(currCombo.clone());
} else if (totDegree >= 0 && index < degreeCombo.length){
int totPass = totDegree / degreeCombo[index];
int degreeCount = 0;
if (currCombo == null)
currCombo = degreeCombo.clone();
while (degreeCount <= totPass) {
setCombo(currCombo, index, degreeCount);
findOnePerm(totDegree - degreeCount * degreeCombo[index], degreeCombo, index + 1,
allCombos, currCombo);
degreeCount++;
}
}
}
public static void setCombo(int[] currCombo, int index, int degreeCount) {
currCombo[index] = degreeCount;
int combSize = currCombo.length;
for (int tempIndex = index+1; tempIndex < combSize; tempIndex++)
currCombo[tempIndex] = 0;
}
public static double[][] generateStarT(double[][] knots, List<Integer[]> polyBasisDegree, double[] gamColMeanRaw,
double[] oneOColStd, boolean standardizeTPSmoothers) {
int numKnots = knots[0].length;
int M = polyBasisDegree.size();
int d = knots.length;
double[][] knotsDemean = new double[d][numKnots];
for (int predInd = 0; predInd < d; predInd++)
for (int index = 0; index < numKnots; index++) {
knotsDemean[predInd][index] = standardizeTPSmoothers
? (knots[predInd][index]-gamColMeanRaw[predInd])*oneOColStd[predInd]
: (knots[predInd][index]-gamColMeanRaw[predInd]);
}
double[][] starT = new double[numKnots][M];
for (int rowInd = 0; rowInd < numKnots; rowInd++) {
for (int polyBasisInd = 0; polyBasisInd < M; polyBasisInd++) {
Integer[] oneBasis = polyBasisDegree.get(polyBasisInd);
double polyBasisVal = 1.0;
for (int predInd = 0; predInd < d; predInd++) {
polyBasisVal *= Math.pow(knotsDemean[predInd][rowInd], oneBasis[predInd]);
}
starT[rowInd][polyBasisInd] = polyBasisVal;
}
}
return starT;
}
public static void fillRowOneValue(NewChunk[] newChk, int colWidth, double fillValue) {
for (int colInd = 0; colInd < colWidth; colInd++)
newChk[colInd].addNum(fillValue);
}
public static void fillRowArray(NewChunk[] newChk, int colWidth, double[] fillValue) {
for (int colInd = 0; colInd < colWidth; colInd++)
newChk[colInd].addNum(fillValue[colInd]);
}
public static boolean checkRowNA(Chunk[] chk, int rowIndex) {
int numCol = chk.length;
for (int colIndex = 0; colIndex < numCol; colIndex++) {
if (Double.isNaN(chk[colIndex].atd(rowIndex)))
return true;
}
return false;
}
public static boolean checkFrameRowNA(Frame chk, long rowIndex) {
int numCol = chk.numCols();
for (int colIndex = 0; colIndex < numCol; colIndex++) {
if (Double.isNaN(chk.vec(colIndex).at(rowIndex)))
return true;
}
return false;
}
public static String genThinPlateNameStart(GAMParameters parms, int gamColIndex) {
StringBuffer colNameStub = new StringBuffer();
for (int gColInd = 0; gColInd < parms._gam_columns_sorted[gamColIndex].length; gColInd++) {
colNameStub.append(parms._gam_columns_sorted[gamColIndex][gColInd]);
colNameStub.append("_");
}
colNameStub.append(parms._bs_sorted[gamColIndex]);
colNameStub.append("_");
return colNameStub.toString();
}
public static String[] extractColNames(String[] src, int srcStart, int destStart, int length) {
String[] distanceColNames = new String[length]; // exclude the polynomial basis names
System.arraycopy(src, srcStart, distanceColNames, destStart, length);
return distanceColNames;
}
public static int[][] convertList2Array(List<Integer[]> list2Convert, int M, int d) {
int[][] polyBasisArr = new int[M][d];
for (int index = 0; index < M; index++) {
List<Integer> oneList = Arrays.asList(list2Convert.get(index));
polyBasisArr[index] = oneList.stream().mapToInt(Integer::intValue).toArray();
}
return polyBasisArr;
}
/**
* Generate knots for thin plate (TP) smoothers. Sort the first predictor column, take quatiles out of the sorted first
* column and grab the corresponding rows of second, third, ... predictor columns.
* @param predictVec H2OFrame containing predictor columns used to build the TP smoothers.
* @param parms GAMParameter
* @param predIndex integer denoting GAM column specificationm in parms._gam_columns
* @return array of knot values for predictor columns specified in parms._gam_columns[predIndex]
*/
public static double[][] genKnotsMultiplePreds(Frame predictVec, GAMParameters parms, int predIndex) {
Frame sortedFirstDim = predictVec.sort(new int[]{0}); // sort with first GAM Columns
double stepProb = 1.0 / parms._num_knots[predIndex];
long rowSteps = (long) Math.floor(stepProb * sortedFirstDim.numRows());
int numPred = parms._gam_columns[predIndex].length;
double[][] knots = new double[numPred][parms._num_knots[predIndex]];
long nrow = sortedFirstDim.numRows();
long nextRow = 1;
long currRow = 0;
for (int knotIndex = 0; knotIndex < parms._num_knots[predIndex]; knotIndex++) {
currRow = knotIndex*rowSteps;
nextRow = (knotIndex+1)*rowSteps;
while (currRow < nrow && currRow < nextRow) { // look for knots that do not contains NAs
if (!checkFrameRowNA(sortedFirstDim, currRow)) {
for (int colIndex = 0; colIndex < numPred; colIndex++) {
knots[colIndex][knotIndex] = sortedFirstDim.vec(colIndex).at(currRow);
}
break;
}
currRow++;
}
}
sortedFirstDim.remove(); // remove sorted frame
parms._num_knots[predIndex] = knots[0].length;
return knots;
}
/**
* this class performs scaling on TP penalty matrices that is done in R.
*/
public static class ScaleTPPenalty extends MRTask<ScaleTPPenalty> {
public double[][] _penaltyMat;
double[] _maxAbsRowSum; // store maximum row sum per chunk
public int _initChunks; // number of chunks
public double _s_scale;
public ScaleTPPenalty(double[][] origPenaltyMat, Frame distancePlusPoly) {
_penaltyMat = origPenaltyMat;
_initChunks = distancePlusPoly.vec(0).nChunks();
}
@Override
public void map(Chunk[] chk, NewChunk[] newGamCols) {
_maxAbsRowSum = new double[_initChunks];
int cIndex = chk[0].cidx();
_maxAbsRowSum[cIndex] = Double.NEGATIVE_INFINITY;
int numRow = chk[0]._len;
for (int rowIndex = 0; rowIndex < numRow; rowIndex++) {
double rowSum = 0.0;
for (int colIndex = 0; colIndex < chk.length; colIndex++) {
rowSum += Math.abs(chk[colIndex].atd(rowIndex));
}
if (rowSum > _maxAbsRowSum[cIndex])
_maxAbsRowSum[cIndex] = rowSum;
}
}
@Override
public void reduce(ScaleTPPenalty other) {
ArrayUtils.add(_maxAbsRowSum, other._maxAbsRowSum);
}
@Override
public void postGlobal() { // scale the _penalty function according to R
double tempMaxValue = ArrayUtils.maxValue(_maxAbsRowSum);
_s_scale = tempMaxValue*tempMaxValue/ArrayUtils.rNorm(_penaltyMat, 'i'); // symmetric matrix
ArrayUtils.mult(_penaltyMat, _s_scale);
_s_scale = 1.0 / _s_scale;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/AddCSGamColumns.java
|
package hex.gam.MatrixFrameUtils;
import hex.gam.GamSplines.CubicRegressionSplines;
import water.MRTask;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import static hex.genmodel.algos.gam.GamMojoModel.CS_SPLINE_TYPE;
import static hex.genmodel.algos.gam.GamUtilsCubicRegression.*;
/**
* Given a Frame, the class will generate all the gamified columns.
*/
public class AddCSGamColumns extends MRTask<AddCSGamColumns> {
double[][][] _binvD;
double[][][] _knotsMat;
double[][][] _ztransp;
int[] _numKnots;
public int _numGAMcols;
public int _gamCols2Add = 0;
double[] _vmax;
double[] _vmin;
int[] _gamColsOffsets;
Frame _gamFrame;
public AddCSGamColumns(double[][][] binvD, double[][][] ztransp, double[][][] knotsMat, int[] numKnots,
Frame gamColFrames, int[] bsSorted) {
_numGAMcols = gamColFrames.numCols(); // only for CS splines
_binvD = new double[_numGAMcols][][];
_knotsMat = new double[_numGAMcols][][];
_ztransp = new double[_numGAMcols][][];
_numKnots = new int[_numGAMcols];
int numTotGamCols = numKnots.length;
_vmax = MemoryManager.malloc8d(_numGAMcols);
_vmin = MemoryManager.malloc8d(_numGAMcols);
_gamColsOffsets = MemoryManager.malloc4(_numGAMcols);
_gamFrame = gamColFrames; // contain predictor columns, response column
int countCSGam = 0;
for (int ind = 0; ind < numTotGamCols; ind++) {
if (bsSorted[ind] == CS_SPLINE_TYPE) {
_vmax[countCSGam] = gamColFrames.vec(countCSGam).max();
_vmin[countCSGam] = gamColFrames.vec(countCSGam).min();
_ztransp[countCSGam] = ztransp[ind];
_binvD[countCSGam] = binvD[ind];
_knotsMat[countCSGam] = knotsMat[ind];
_numKnots[countCSGam] = numKnots[ind];
_gamColsOffsets[countCSGam++] += _gamCols2Add;
_gamCols2Add += _numKnots[ind] - 1; // minus one from centering
}
}
}
@Override
public void map(Chunk[] chk, NewChunk[] newChunks) {
CubicRegressionSplines[] crSplines = new CubicRegressionSplines[_numGAMcols];
double[][] basisVals = new double[_numGAMcols][];
double[][] basisValsCenter = new double[_numGAMcols][];
for (int gcolInd = 0; gcolInd < _numGAMcols; gcolInd++) { // prepare splines
crSplines[gcolInd] = new CubicRegressionSplines(_numKnots[gcolInd], _knotsMat[gcolInd][0]);
basisValsCenter[gcolInd] = MemoryManager.malloc8d(_numKnots[gcolInd]-1); // with centering, it is one less
basisVals[gcolInd] = MemoryManager.malloc8d(_numKnots[gcolInd]); // without centering
}
int chkLen = chk[0]._len;
for (int rInd = 0; rInd < chkLen; rInd++) { // go through each row
for (int cInd = 0; cInd < _numGAMcols; cInd++) { // add each column
generateOneGAMcols(cInd, _gamColsOffsets[cInd], basisVals[cInd], basisValsCenter[cInd], _binvD[cInd],
crSplines[cInd], chk[cInd].atd(rInd), newChunks);
}
}
}
public void generateOneGAMcols(int colInd, int colOffset, double[] basisVals, double[] basisValCenter,
double[][] bInvD, CubicRegressionSplines splines, double xval, NewChunk[] newChunks) {
int centerKnots = _numKnots[colInd]-1; // number of columns after gamification
if (!Double.isNaN(xval)) {
int binIndex = locateBin(xval, splines._knots); // location to update
// update from F matrix F matrix = [0;invB*D;0] and c functions
updateFMatrixCFunc(basisVals, xval, binIndex, splines._knots, splines._hj, bInvD);
// update from a+ and a- functions
updateAFunc(basisVals, xval, binIndex, splines._knots, splines._hj);
// add centering
basisValCenter = ArrayUtils.multArrVec(_ztransp[colInd], basisVals, basisValCenter);
// copy updates to the newChunk row
for (int colIndex = 0; colIndex < centerKnots; colIndex++) {
newChunks[colIndex + colOffset].addNum(basisValCenter[colIndex]);
}
} else { // set NaN
for (int colIndex = 0; colIndex < centerKnots; colIndex++)
newChunks[colIndex + colOffset].addNum(Double.NaN);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/AddISGamColumns.java
|
package hex.gam.MatrixFrameUtils;
import hex.genmodel.algos.gam.ISplines;
import water.MRTask;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
/**
* class to gamified all gam_columns with bs set to 2. For details regarding I-spline, please refer to doc I in
* the GitHub issue: https://github.com/h2oai/h2o-3/issues/7261. This one performs gamification only.
*/
public class AddISGamColumns extends MRTask<AddISGamColumns> {
double[][][] _knotsMat; // knots without duplication for I-spline
int[] _numKnots;
int[] _numBasis;
public int _numGAMCols; // count number of I-Spline gam columns
int[] _gamColsOffsets;
Frame _gamFrame;
int[] _bs; // for I-spline only
int[] _splineOrder; // for I-spline only
int _totGamifiedCols=0;
public int _totGamifiedColCentered=0;
public AddISGamColumns(double[][][] knotsMat, int[] numKnot, int[] bs, int[] splineOrder,
Frame gamColFrames) {
_gamFrame = gamColFrames;
_numGAMCols = gamColFrames.numCols();
_gamColsOffsets = MemoryManager.malloc4(_numGAMCols);
_knotsMat = new double[_numGAMCols][][];
_bs = new int[_numGAMCols];
_splineOrder = new int[_numGAMCols];
_numKnots = new int[_numGAMCols];
_numBasis = new int[_numGAMCols];
int totGamCols = bs.length;
int countIS = 0;
int offset = 0;
for (int index=0; index<totGamCols; index++) {
if (bs[index]==2) {
int numBasis = numKnot[index]+splineOrder[index]-2;
_totGamifiedCols += numBasis;
_totGamifiedColCentered += numBasis;
_knotsMat[countIS] = knotsMat[index];
_bs[countIS] = bs[index];
_numKnots[countIS] = numKnot[index];
_numBasis[countIS] = numBasis;
_splineOrder[countIS] = splineOrder[index];
_gamColsOffsets[countIS++] = offset;
offset += numBasis; // minus 1 for centering
}
}
}
@Override
public void map(Chunk[] chk, NewChunk[] newChunks) {
ISplines[] isBasis = new ISplines[_numGAMCols];
double[][] basisVals = new double[_numGAMCols][];
for (int index=0; index<_numGAMCols; index++) {
isBasis[index] = new ISplines(_splineOrder[index], _knotsMat[index][0]);
basisVals[index] = MemoryManager.malloc8d(_numBasis[index]);
}
int chkLen = chk[0].len();
for (int rInd=0; rInd<chkLen; rInd++) {
for (int cInd=0; cInd<_numGAMCols; cInd++)
generateOneISGAMCols(cInd, _gamColsOffsets[cInd], basisVals[cInd], isBasis[cInd], chk[cInd].atd(rInd),
newChunks);
}
}
/***
* Perform gamification of one column using I-spline basis function described in Section V of doc I.
*/
public void generateOneISGAMCols(int colInd, int colOffset, double[] basisVals, ISplines isBasis, double xval,
NewChunk[] newChunks) {
int numVals = _numBasis[colInd];
if (!Double.isNaN(xval)) {
isBasis.gamifyVal(basisVals, xval);
// basisValsCenter = ArrayUtils.multArrVec(_ztransp[colInd], basisVals, basisValsCenter);
for (int colIndex=0; colIndex < numVals; colIndex++)
newChunks[colIndex+colOffset].addNum(basisVals[colIndex]);
} else {
for (int colIndex=0; colIndex < numVals; colIndex++)
newChunks[colIndex+colOffset].addNum(Double.NaN);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/AddMSGamColumns.java
|
package hex.gam.MatrixFrameUtils;
import hex.genmodel.algos.gam.MSplines;
import water.MRTask;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import static hex.genmodel.algos.gam.GamMojoModel.MS_SPLINE_TYPE;
/**
* This task will gamified all gam predictors with bs=3. It will not generate the penalty matrix or the Z matrix.
* Those are assumed to be generated already earlier.
*/
public class AddMSGamColumns extends MRTask<AddMSGamColumns> {
double[][][] _knotsMat; // knots without duplication for M-spline
int[] _numKnots;
int[] _numBasis;
public int _numGAMCols; // count number of M-Spline gam columns
int[] _gamColsOffsets;
Frame _gamFrame;
int[] _bs; // for M-spline only
int[] _splineOrder; // for M-spline only
int _totGamifiedCols=0;
public int _totGamifiedColCentered=0;
final double[][][] _ztransp;
public AddMSGamColumns(double[][][] knotsMat, double[][][] ztransp, int[] numKnot, int[] bs, int[] splineOrder,
Frame gamColFrames) {
_gamFrame = gamColFrames;
_numGAMCols = gamColFrames.numCols();
_gamColsOffsets = MemoryManager.malloc4(_numGAMCols);
_knotsMat = new double[_numGAMCols][][];
_bs = new int[_numGAMCols];
_splineOrder = new int[_numGAMCols];
_numKnots = new int[_numGAMCols];
_numBasis = new int[_numGAMCols];
_ztransp = new double[_numGAMCols][][];
int totGamCols = bs.length;
int countMS = 0;
int offset = 0;
for (int index=0; index<totGamCols; index++) {
if (bs[index]==MS_SPLINE_TYPE) {
int numBasis = numKnot[index]+splineOrder[index]-2;
int numBasisM1 = numBasis - 1;
_totGamifiedCols += numBasis;
_totGamifiedColCentered += numBasisM1;
_knotsMat[countMS] = knotsMat[index];
_bs[countMS] = bs[index];
_numKnots[countMS] = numKnot[index];
_numBasis[countMS] = numBasis;
_splineOrder[countMS] = splineOrder[index];
_ztransp[countMS] = ztransp[index];
_gamColsOffsets[countMS++] = offset;
offset += numBasisM1; // minus 1 for centering
}
}
}
@Override
public void map(Chunk[] chk, NewChunk[] newChunks) {
MSplines[] msBasis = new MSplines[_numGAMCols];
double[][] basisVals = new double[_numGAMCols][];
double[][] basisValsCenter = new double[_numGAMCols][];
for (int index=0; index<_numGAMCols; index++) {
msBasis[index] = new MSplines(_splineOrder[index], _knotsMat[index][0]);
basisVals[index] = MemoryManager.malloc8d(_numBasis[index]);
basisValsCenter[index] = MemoryManager.malloc8d(_numBasis[index]-1);
}
int chkLen = chk[0].len();
for (int rInd=0; rInd<chkLen; rInd++) {
for (int cInd=0; cInd<_numGAMCols; cInd++)
generateOneMSGAMCols(cInd, _gamColsOffsets[cInd], basisVals[cInd], basisValsCenter[cInd], msBasis[cInd], chk[cInd].atd(rInd),
newChunks);
}
}
/***
* Perform gamification of one column using I-spline basis function described in Section V of doc I.
*/
public void generateOneMSGAMCols(int colInd, int colOffset, double[] basisVals, double[] basisValsCenter,
MSplines msBasis, double xval, NewChunk[] newChunks) {
int numVals = _numBasis[colInd]-1; // shrink after centralize
if (!Double.isNaN(xval)) {
msBasis.gamifyVal(basisVals, xval);
basisValsCenter = ArrayUtils.multArrVec(_ztransp[colInd], basisVals, basisValsCenter);
for (int colIndex=0; colIndex < numVals; colIndex++)
newChunks[colIndex+colOffset].addNum(basisValsCenter[colIndex]);
} else {
for (int colIndex=0; colIndex < numVals; colIndex++)
newChunks[colIndex+colOffset].addNum(Double.NaN);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/AddTPKnotsGamColumns.java
|
package hex.gam.MatrixFrameUtils;
import hex.gam.GamSplines.ThinPlateDistanceWithKnots;
import hex.gam.GamSplines.ThinPlatePolynomialWithKnots;
import water.DKV;
import water.Key;
import water.fvec.Frame;
import water.fvec.Vec;
import static hex.gam.GAMModel.GAMParameters;
import static hex.gam.GamSplines.ThinPlateRegressionUtils.extractColNames;
import static hex.gam.GamSplines.ThinPlateRegressionUtils.genThinPlateNameStart;
import static hex.gam.MatrixFrameUtils.GamUtils.generateGamColNamesThinPlateKnots;
import static hex.gam.MatrixFrameUtils.GamUtils.prepareGamVec;
import static org.apache.commons.math3.util.CombinatoricsUtils.factorial;
import static water.util.ArrayUtils.sum;
// This class generatea all TP gamified columns
public class AddTPKnotsGamColumns {
final double[][][] _zCS;
final double[][][] _z;
final int[][][] _polyBasisList;
final int[] _numKnots;
final int[] _d;
final int[] _M;
final int[] _m;
final GAMParameters _parms;
public final int _gamCols2Add;
final double[][][] _knots;
final int _numTPCols;
final int _numCSCols;
final double[] _constantTerms;
final boolean[] _dEven;
final Frame _adapted;
public Key<Frame>[] _gamFrameKeysCenter; // store frame keys of transformed gam columns
public AddTPKnotsGamColumns(GAMParameters parms, double[][][] zcs, double[][][] z, int[][][] polyBasis,
double[][][] knots, Frame fr) {
_zCS = zcs;
_z = z;
_polyBasisList = polyBasis;
_numKnots = parms._num_knots_tp;
_d = parms._gamPredSize;
_M = parms._M;
_m = parms._m;
_parms = parms;
_gamCols2Add = sum(_numKnots) - _numKnots.length;
_knots = knots;
_numTPCols = _M.length;
_numCSCols = parms._gam_columns_sorted.length - _numTPCols;
_dEven = new boolean[_numTPCols];
_constantTerms = new double[_numTPCols];
_gamFrameKeysCenter = new Key[_numTPCols];
for (int index = 0; index < _numTPCols; index++) {
_dEven[index] = (_parms._gamPredSize[index] % 2) == 0;
if (_dEven[index])
_constantTerms[index] = Math.pow(-1, _m[index]+1+_d[index]/2.0)/(Math.pow(2, _m[index]-1)*Math.pow(Math.PI,
_d[index]/2.0)*factorial(_m[index]-_d[index]/2));
else
_constantTerms[index] = Math.pow(-1, _m[index])*_m[index]/(factorial(2*_m[index])*Math.pow(Math.PI,
(_d[index]-1)/2.0));
}
_adapted = fr;
}
public void addTPGamCols(double[][] gamColMeansRaw, double[][] oneOColStd) {
for (int index = 0; index < _numTPCols; index++) { // generate smoothers/splines for each gam smoother
final int offsetIndex = index + _numCSCols;
final Frame predictVec = prepareGamVec(offsetIndex, _parms, _adapted); // extract predictors from training frame
ApplyTPRegressionSmootherWithKnots addSmoother = new ApplyTPRegressionSmootherWithKnots(predictVec, _parms, offsetIndex,
_knots[offsetIndex], index, _zCS[index], _z[offsetIndex], _polyBasisList[index], gamColMeansRaw[index],
oneOColStd[index]);
addSmoother.applySmoothers();
}
}
public class ApplyTPRegressionSmootherWithKnots {
final Frame _predictVec;
final int _numKnots;
final int _numKnotsM1;
final int _numKnotsMM; // store k-M
final double[][] _knots;
final GAMParameters _parms;
final int _gamColIndex;
final int _thinPlateGamColIndex;
final int _numPred; // number of predictors == d
final int _M;
final double[][] _zCST;
final double[][] _zT;
final int[][] _polyBasisList;
final double[] _gamColMeanRaw;
final double[] _oneOColStd;
public ApplyTPRegressionSmootherWithKnots(Frame predV, GAMParameters parms, int gamColIndex, double[][] knots,
int thinPlateInd, double[][] zCST, double[][] zT, int[][] polyBasis,
double[] gamColMeanRaw, double[] oneOColStd) {
_predictVec = predV;
_knots = knots;
_numKnots = knots[0].length;
_numKnotsM1 = _numKnots-1;
_parms = parms;
_gamColIndex = gamColIndex;
_thinPlateGamColIndex = thinPlateInd;
_numPred = parms._gam_columns_sorted[gamColIndex].length;
_M = _parms._M[_thinPlateGamColIndex];
_numKnotsMM = _numKnots-_M;
_zCST = zCST;
_zT = zT;
_polyBasisList = polyBasis;
_gamColMeanRaw = gamColMeanRaw;
_oneOColStd = oneOColStd;
}
void applySmoothers() {
ThinPlateDistanceWithKnots distanceMeasure =
new ThinPlateDistanceWithKnots(_knots, _numPred, _oneOColStd, _parms._standardize).doAll(_numKnots,
Vec.T_NUM, _predictVec); // Xnmd in 3.1
String colNameStub = genThinPlateNameStart(_parms, _gamColIndex); // gam column names before processing
String[] gamColNames = generateGamColNamesThinPlateKnots(_gamColIndex, _parms, _polyBasisList, colNameStub);
String[] distanceColNames = extractColNames(gamColNames, 0, 0, _numKnots);
String[] polyNames = extractColNames(gamColNames, _numKnots, 0, _M);
Frame thinPlateFrame = distanceMeasure.outputFrame(Key.make(), distanceColNames, null);
thinPlateFrame = ThinPlateDistanceWithKnots.applyTransform(thinPlateFrame, colNameStub
+"CS_", _parms, _zCST, _numKnotsMM); // generate Xcs as in 3.3
ThinPlatePolynomialWithKnots thinPlatePoly = new ThinPlatePolynomialWithKnots(_numPred, _polyBasisList,
_gamColMeanRaw, _oneOColStd, _parms._standardize).doAll(_M,
Vec.T_NUM, _predictVec); // generate polynomial basis T as in 3.2
Frame thinPlatePolyBasis = thinPlatePoly.outputFrame(null, polyNames, null);
thinPlateFrame.add(thinPlatePolyBasis.names(), thinPlatePolyBasis.removeAll()); // concatenate Xcs and T
thinPlateFrame = ThinPlateDistanceWithKnots.applyTransform(thinPlateFrame, colNameStub+"center",
_parms, _zT, _numKnotsM1); // generate Xz as in 3.4
_gamFrameKeysCenter[_thinPlateGamColIndex] = thinPlateFrame._key;
DKV.put(thinPlateFrame);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/GAMModelUtils.java
|
package hex.gam.MatrixFrameUtils;
import hex.VarImp;
import hex.gam.GAMModel;
import hex.glm.GLMModel;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.TwoDimTable;
import static hex.ModelMetrics.calcVarImp;
import static hex.gam.GAMModel.*;
import static hex.genmodel.algos.gam.GamMojoModel.*;
import static hex.glm.GLMModel.GLMParameters;
import static hex.glm.GLMModel.GLMParameters.Family.multinomial;
import static hex.glm.GLMModel.GLMParameters.Family.ordinal;
import static water.util.ArrayUtils.find;
public class GAMModelUtils {
public static void copyGLMCoeffs(GLMModel glm, GAMModel model, GAMParameters parms, int nclass) {
boolean multiClass = parms._family == multinomial || parms._family == ordinal;
int totCoefNumsNoCenter = (multiClass?glm.coefficients().size()/nclass:glm.coefficients().size())
+gamNoCenterCoeffLength(parms);
model._output._coefficient_names_no_centering = new String[totCoefNumsNoCenter]; // copy coefficient names from GLM to GAM
int gamNumStart = copyGLMCoeffNames2GAMCoeffNames(model, glm);
copyGLMCoeffs2GAMCoeffs(model, glm, parms._family, gamNumStart, nclass, parms._intercept); // obtain beta without centering
// copy over GLM coefficients
int glmCoeffLen = glm._output._coefficient_names.length;
model._output._coefficient_names = new String[glmCoeffLen];
System.arraycopy(glm._output._coefficient_names, 0, model._output._coefficient_names, 0,
glmCoeffLen);
if (multiClass) {
double[][] model_beta_multinomial = glm._output.get_global_beta_multinomial();
double[][] standardized_model_beta_multinomial = glm._output.getNormBetaMultinomial();
model._output._model_beta_multinomial = new double[nclass][glmCoeffLen];
model._output._standardized_model_beta_multinomial = new double[nclass][glmCoeffLen];
for (int classInd = 0; classInd < nclass; classInd++) {
System.arraycopy(model_beta_multinomial[classInd], 0, model._output._model_beta_multinomial[classInd],
0, glmCoeffLen);
System.arraycopy(standardized_model_beta_multinomial[classInd], 0,
model._output._standardized_model_beta_multinomial[classInd], 0, glmCoeffLen);
}
} else {
model._output._model_beta = new double[glmCoeffLen];
model._output._standardized_model_beta = new double[glmCoeffLen];
System.arraycopy(glm._output.beta(), 0, model._output._model_beta, 0, glmCoeffLen);
System.arraycopy(glm._output.getNormBeta(), 0, model._output._standardized_model_beta, 0,
glmCoeffLen);
}
}
/***
* Find the number of gamified column coefficients. This is more difficult with thin plate regression smoothers.
* For thin plate regression smoothers, each smoother will have columns _numKnots + _M
* @param parms
*/
public static int gamNoCenterCoeffLength(GAMParameters parms) {
int tpCount = 0;
int numGam = parms._gam_columns.length;
int gamifiedColCount = 0;
for (int index = 0; index < numGam; index++) {
if (parms._bs_sorted[index]==CS_SPLINE_TYPE || parms._bs_sorted[index]==IS_SPLINE_TYPE ||
parms._bs_sorted[index]==MS_SPLINE_TYPE) { // cubic spline
gamifiedColCount++;
} else if (parms._bs_sorted[index] == TP_SPLINE_TYPE) {
gamifiedColCount += (1+parms._M[tpCount++]);
}
}
return gamifiedColCount;
}
public static void copyGLMtoGAMModel(GAMModel model, GLMModel glmModel, GAMParameters parms, boolean validNotNull) {
model._output._glm_best_lamda_value = glmModel._output.bestSubmodel().lambda_value; // exposed best lambda used
model._output._glm_training_metrics = glmModel._output._training_metrics;
if (validNotNull)
model._output._glm_validation_metrics = glmModel._output._validation_metrics;
model._output._glm_model_summary = copyTwoDimTable(glmModel._output._model_summary, "glm model summary");
model._output._glm_scoring_history = copyTwoDimTable(glmModel._output._scoring_history, "glm scoring history");
if (parms._family == multinomial || parms._family == ordinal) {
model._output._coefficients_table = genCoefficientTableMultinomial(new String[]{"Coefficients",
"Standardized Coefficients"}, model._output._model_beta_multinomial,
model._output._standardized_model_beta_multinomial, model._output._coefficient_names,"GAM Coefficients");
model._output._coefficients_table_no_centering = genCoefficientTableMultinomial(new String[]{"coefficients " +
"no centering", "standardized coefficients no centering"},
model._output._model_beta_multinomial_no_centering, model._output._standardized_model_beta_multinomial_no_centering,
model._output._coefficient_names_no_centering,"GAM Coefficients No Centering");
model._output._standardized_coefficient_magnitudes = model.genCoefficientMagTableMultinomial(new String[]{"coefficients", "signs"},
model._output._standardized_model_beta_multinomial, model._output._coefficient_names, "standardized coefficients magnitude");
} else{
model._output._coefficients_table = genCoefficientTable(new String[]{"coefficients", "standardized coefficients"}, model._output._model_beta,
model._output._standardized_model_beta, model._output._coefficient_names, "GAM Coefficients");
model._output._coefficients_table_no_centering = genCoefficientTable(new String[]{"coefficients no centering",
"standardized coefficients no centering"}, model._output._model_beta_no_centering,
model._output._standardized_model_beta_no_centering,
model._output._coefficient_names_no_centering,
"GAM Coefficients No Centering");
model._output._standardized_coefficient_magnitudes = model.genCoefficientMagTable(new String[]{"coefficients", "signs"},
model._output._standardized_model_beta, model._output._coefficient_names, "standardized coefficients magnitude");
}
if (parms._compute_p_values) {
model._output._glm_zvalues = glmModel._output.zValues().clone();
model._output._glm_pvalues = glmModel._output.pValues().clone();
model._output._glm_stdErr = glmModel._output.stdErr().clone();
model._output._glm_vcov = glmModel._output.vcov().clone();
}
model._output._glm_dispersion = glmModel._output.dispersion();
model._nobs = glmModel._nobs;
model._nullDOF = glmModel._nullDOF;
model._ymu = new double[glmModel._ymu.length];
model._rank = glmModel._output.bestSubmodel().rank();
model._ymu = new double[glmModel._ymu.length];
System.arraycopy(glmModel._ymu, 0, model._ymu, 0, glmModel._ymu.length);
// pass GLM _solver value to GAM so that GAM effective _solver value can be set
if (model.evalAutoParamsEnabled && model._parms._solver == GLMParameters.Solver.AUTO) {
model._parms._solver = glmModel._parms._solver;
}
model._output._varimp = new VarImp(glmModel._output._varimp._varimp, glmModel._output._varimp._names);
model._output._variable_importances = calcVarImp(model._output._varimp);
}
public static TwoDimTable copyTwoDimTable(TwoDimTable table, String tableHeader) {
String[] rowHeaders = table.getRowHeaders();
String[] colTypes = table.getColTypes();
int tableSize = rowHeaders.length;
int colSize = colTypes.length;
TwoDimTable tableCopy = new TwoDimTable(tableHeader, "",
rowHeaders, table.getColHeaders(), colTypes, table.getColFormats(),
"names");
for (int rowIndex = 0; rowIndex < tableSize; rowIndex++) {
for (int colIndex = 0; colIndex < colSize; colIndex++) {
tableCopy.set(rowIndex, colIndex,table.get(rowIndex, colIndex));
}
}
return tableCopy;
}
public static TwoDimTable genCoefficientTable(String[] colHeaders, double[] coefficients, double[] coefficientsStand,
String[] coefficientNames, String tableHeader) {
String[] colTypes = new String[]{ "double", "double"};
String[] colFormat = new String[]{"%5f", "%5f"};
int nCoeff = coefficients.length;
String[] coeffNames = new String[nCoeff];
System.arraycopy(coefficientNames, 0, coeffNames, 0, nCoeff);
Log.info("genCoefficientMagTable", String.format("coemffNames length: %d. coefficients " +
"length: %d, coeffSigns length: %d", coeffNames.length, coefficients.length, coefficientsStand.length));
TwoDimTable table = new TwoDimTable(tableHeader, "", coeffNames, colHeaders, colTypes, colFormat,
"names");
fillUpCoeffs(coefficients, coefficientsStand, table, 0);
return table;
}
public static TwoDimTable genCoefficientTableMultinomial(String[] colHeaders, double[][] coefficients, double[][] coefficients_stand,
String[] coefficientNames, String tableHeader) {
String[] colTypes = new String[]{"double", "double"};
String[] colFormat = new String[]{"%5f", "%5f"};
int nCoeff = coefficients[0].length;
int nclass = coefficients.length;
int totCoeff = nCoeff*nclass;
String[] coeffNames = new String[totCoeff];
int coeffCounter=0;
for (int classInd=0; classInd < nclass; classInd++){
for (int ind=0; ind < nCoeff; ind++) {
coeffNames[coeffCounter++] = coefficientNames[ind]+"_class_"+classInd;
}
}
TwoDimTable table = new TwoDimTable(tableHeader, "", coeffNames, colHeaders, colTypes, colFormat,
"names");
for (int classInd=0; classInd<nclass; classInd++)
fillUpCoeffs(coefficients[classInd], coefficients_stand[classInd], table, classInd*nCoeff);
return table;
}
public static void fillUpCoeffs(double[] coeffValues, double[] coeffValuesStand, TwoDimTable tdt, int rowStart) {
int arrLength = coeffValues.length+rowStart;
int arrCounter=0;
for (int i=rowStart; i<arrLength; i++) {
tdt.set(i, 0, coeffValues[arrCounter]);
tdt.set(i, 1, coeffValuesStand[arrCounter]);
arrCounter++;
}
}
public static int copyGLMCoeffNames2GAMCoeffNames(GAMModel model, GLMModel glm) {
int numGamCols = model._gamColNamesNoCentering.length;
String[] glmColNames = glm._output.coefficientNames();
int lastGLMCoeffIndex = glmColNames.length-1;
int lastGAMCoeffIndex = lastGLMCoeffIndex+gamNoCenterCoeffLength(model._parms);
int gamNumColStart = find(glmColNames, model._gamColNames[0][0]);
int gamLengthCopied = gamNumColStart;
System.arraycopy(glmColNames, 0, model._output._coefficient_names_no_centering, 0, gamLengthCopied); // copy coeff names before gam columns
for (int gamColInd = 0; gamColInd < numGamCols; gamColInd++) {
System.arraycopy(
model._gamColNamesNoCentering[gamColInd], 0,
model._output._coefficient_names_no_centering, gamLengthCopied,
model._gamColNamesNoCentering[gamColInd].length
);
gamLengthCopied += model._gamColNamesNoCentering[gamColInd].length;
}
model._output._coefficient_names_no_centering[lastGAMCoeffIndex] = glmColNames[lastGLMCoeffIndex];// copy intercept
return gamNumColStart;
}
public static void copyGLMCoeffs2GAMCoeffs(GAMModel model, GLMModel glm, GLMParameters.Family family,
int gamNumStart, int nclass, boolean hasIntercept) {
int numCoeffPerClass = model._output._coefficient_names_no_centering.length;
if (family.equals(GLMParameters.Family.multinomial) || family.equals(GLMParameters.Family.ordinal)) {
double[][] model_beta_multinomial = glm._output.get_global_beta_multinomial();
double[][] standardized_model_beta_multinomial = glm._output.getNormBetaMultinomial();
model._output._model_beta_multinomial_no_centering = new double[nclass][];
model._output._standardized_model_beta_multinomial_no_centering = new double[nclass][];
for (int classInd = 0; classInd < nclass; classInd++) {
model._output._model_beta_multinomial_no_centering[classInd] = convertCenterBeta2Beta(model._output._zTranspose,
gamNumStart, model_beta_multinomial[classInd], numCoeffPerClass, model._output._gamColNames, hasIntercept);
model._output._standardized_model_beta_multinomial_no_centering[classInd] = convertCenterBeta2Beta(model._output._zTranspose,
gamNumStart, standardized_model_beta_multinomial[classInd], numCoeffPerClass, model._output._gamColNames, hasIntercept);
}
} else { // other families
model._output._model_beta_no_centering = convertCenterBeta2Beta(model._output._zTranspose, gamNumStart,
glm.beta(), numCoeffPerClass, model._output._gamColNames, hasIntercept);
model._output._standardized_model_beta_no_centering = convertCenterBeta2Beta(model._output._zTranspose, gamNumStart,
glm._output.getNormBeta(), numCoeffPerClass, model._output._gamColNames, hasIntercept);
}
}
// This method carries out the evaluation of beta = Z betaCenter as explained in documentation 7.2
public static double[] convertCenterBeta2Beta(double[][][] ztranspose, int gamNumStart, double[] centerBeta,
int betaSize, String[][] gamColNames, boolean hasIntercept) {
double[] originalBeta = new double[betaSize];
if (ztranspose!=null) { // centering is performed
int numGamCols = ztranspose.length;
int gamColStart = gamNumStart;
int origGamColStart = gamNumStart;
System.arraycopy(centerBeta,0, originalBeta, 0, gamColStart); // copy everything before gamCols
int numGamCoef;
for (int colInd=0; colInd < numGamCols; colInd++) {
numGamCoef = gamColNames[colInd].length;
double[] tempCbeta = new double[ztranspose[colInd].length];
if (tempCbeta.length > 0) {
System.arraycopy(centerBeta, gamColStart, tempCbeta, 0, tempCbeta.length);
double[] tempBeta = ArrayUtils.multVecArr(tempCbeta, ztranspose[colInd]);
System.arraycopy(tempBeta, 0, originalBeta, origGamColStart, tempBeta.length);
gamColStart += tempCbeta.length;
origGamColStart += tempBeta.length;
} else { // no centering needed for these GAM coefficients
System.arraycopy(centerBeta, gamColStart, originalBeta, origGamColStart, numGamCoef);
origGamColStart += numGamCoef;
gamColStart += numGamCoef;
}
}
if (hasIntercept)
originalBeta[betaSize-1]=centerBeta[centerBeta.length-1];
} else { // no centering for all gam columns
System.arraycopy(centerBeta, 0, originalBeta, 0, betaSize); // no change needed, just copy over
}
return originalBeta;
}
public static void zeroOutIStranspose(int[] bs_sorted, double[][][] zTranspose) {
int numGam = bs_sorted.length;
for (int index=0; index<numGam; index++)
if (bs_sorted[index] == 2)
zTranspose[index] = new double[0][0];
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/GamUtils.java
|
package hex.gam.MatrixFrameUtils;
import hex.Model;
import hex.gam.GAM;
import hex.gam.GAMModel;
import hex.gam.GAMModel.GAMParameters;
import hex.glm.GLMModel;
import hex.quantile.Quantile;
import hex.quantile.QuantileModel;
import org.apache.commons.lang.NotImplementedException;
import water.DKV;
import water.Key;
import water.MemoryManager;
import water.Scope;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.TwoDimTable;
import java.lang.reflect.Field;
import java.util.*;
import static hex.gam.GamSplines.ThinPlateRegressionUtils.calculateM;
import static hex.gam.GamSplines.ThinPlateRegressionUtils.calculatem;
import static hex.gam.MatrixFrameUtils.GAMModelUtils.*;
import static hex.genmodel.algos.gam.GamMojoModel.*;
public class GamUtils {
public final static String SPLINENOTIMPL = "Spline type not implemented.";
public static final double EPS = 1e-12;
// allocate 3D array to store various information;
public static double[][][] allocate3DArrayCS(int num2DArrays, GAMParameters parms, AllocateType fileMode) {
double[][][] array3D = new double[num2DArrays][][];
int gamColCount = 0;
for (int frameIdx = 0; frameIdx < num2DArrays; frameIdx++) {
if (parms._gam_columns_sorted[frameIdx].length == 1) {
int numKnots = parms._num_knots_sorted[frameIdx];
array3D[gamColCount++] = allocate2DArray(fileMode, numKnots);
}
}
return array3D;
}
public static double[][][] allocate3DArray(int num2DArrays, GAMParameters parms, AllocateType fileMode) {
double[][][] array3D = new double[num2DArrays][][];
for (int frameIdx = 0; frameIdx < num2DArrays; frameIdx++) {
if (parms._bs_sorted[frameIdx] == IS_SPLINE_TYPE) { // I-spline, no centering needed
int totBasis = parms._num_knots_sorted[frameIdx] + parms._spline_orders_sorted[frameIdx] - 2; // I-spline order=NBSplineTypeII order
array3D[frameIdx] = allocate2DArray(fileMode, totBasis);
} else { // centering needed for other spline types
if (parms._bs_sorted[frameIdx] == MS_SPLINE_TYPE) {
int totBasis = parms._num_knots_sorted[frameIdx] + parms._spline_orders_sorted[frameIdx] - 2;
array3D[frameIdx] = allocate2DArray(fileMode, totBasis);
} else {
array3D[frameIdx] = allocate2DArray(fileMode, parms._num_knots_sorted[frameIdx]);
}
}
}
return array3D;
}
/***
* This function is used to remove the dimension change due to centering for I-splines
*/
public static void removeCenteringIS(double[][][] penaltyMatCenter, GAMParameters parms) {
int numGamCol = parms._bs_sorted.length;
for (int index=0; index<numGamCol; index++)
if (parms._bs_sorted[index]==IS_SPLINE_TYPE) {
int numBasis = parms._num_knots_sorted[index]+parms._spline_orders_sorted[index]-2;
penaltyMatCenter[index] = allocate2DArray(AllocateType.sameOrig, numBasis);
}
}
// allocate 3D array to store various information;
public static double[][][] allocate3DArrayTP(int num2DArrays, GAMParameters parms, int[] secondDim, int[] thirdDim) {
double[][][] array3D = new double[num2DArrays][][];
int gamColCount = 0;
int numGamCols = parms._gam_columns.length;
for (int frameIdx = 0; frameIdx < numGamCols; frameIdx++) {
if (parms._bs_sorted[frameIdx] == TP_SPLINE_TYPE) {
array3D[gamColCount] = MemoryManager.malloc8d(secondDim[gamColCount], thirdDim[gamColCount]);
gamColCount++;
}
}
return array3D;
}
// allocate 3D array to store various information;
public static double[][] allocate2DArray(AllocateType fileMode, int numKnots) {
double[][] array2D;
switch (fileMode) {
case firstOneLess: array2D = MemoryManager.malloc8d(numKnots-1, numKnots); break;
case sameOrig: array2D = MemoryManager.malloc8d(numKnots, numKnots); break;
case bothOneLess: array2D = MemoryManager.malloc8d(numKnots-1, numKnots-1); break;
case firstTwoLess: array2D = MemoryManager.malloc8d(numKnots-2, numKnots); break;
default: throw new IllegalArgumentException("fileMode can only be firstOneLess, sameOrig, bothOneLess or " +
"firstTwoLess.");
}
return array2D;
}
public enum AllocateType {firstOneLess, sameOrig, bothOneLess, firstTwoLess} // special functions are performed depending on GLMType. Internal use
public static Integer[] sortCoeffMags(int arrayLength, double[] coeffMags) {
Integer[] indices = new Integer[arrayLength];
for (int i = 0; i < indices.length; ++i)
indices[i] = i;
Arrays.sort(indices, new Comparator<Integer>() {
@Override
public int compare(Integer o1, Integer o2) {
if (coeffMags[o1] < coeffMags[o2]) return +1;
if (coeffMags[o1] > coeffMags[o2]) return -1;
return 0;
}
});
return indices;
}
public static boolean equalColNames(String[] name1, String[] standardN, String response_column) {
boolean name1ContainsResp = ArrayUtils.contains(name1, response_column);
boolean standarNContainsResp = ArrayUtils.contains(standardN, response_column);
boolean equalNames = name1.length==standardN.length;
if (name1ContainsResp && !standarNContainsResp) // if name1 contains response but standardN does not
equalNames = name1.length==(standardN.length+1);
else if (!name1ContainsResp && standarNContainsResp) // if name1 does not contain response but standardN does
equalNames = (name1.length+1)==standardN.length;
if (equalNames) { // number of columns are correct but with the same column names and column types?
for (String name : name1) {
if (name==response_column) // leave out the response columns in this comparison. Only worry about predictors
continue;
if (!ArrayUtils.contains(standardN, name))
return false;
}
return true;
} else
return equalNames;
}
public static void copyCVGLMtoGAMModel(GAMModel model, GLMModel glmModel, GAMParameters parms, String foldColumn) {
// copy over cross-validation metrics
model._output._cross_validation_metrics = glmModel._output._cross_validation_metrics;
model._output._cross_validation_metrics_summary =
copyTwoDimTable(glmModel._output._cross_validation_metrics_summary,
"GLM cross-validation metrics summary");
int nFolds = glmModel._output._cv_scoring_history.length;
model._output._glm_cv_scoring_history = new TwoDimTable[nFolds];
if (parms._keep_cross_validation_predictions)
model._output._cross_validation_predictions = new Key[nFolds];
for (int fInd = 0; fInd < nFolds; fInd++) {
model._output._glm_cv_scoring_history[fInd] = copyTwoDimTable(glmModel._output._cv_scoring_history[fInd],
glmModel._output._cv_scoring_history[fInd].getTableHeader());
// copy over hold-out predictions
if (parms._keep_cross_validation_predictions) {
Frame pred = DKV.getGet(glmModel._output._cross_validation_predictions[fInd]);
Frame newPred = pred.deepCopy(Key.make().toString());
DKV.put(newPred);
model._output._cross_validation_predictions[fInd] = newPred.getKey();
}
}
// copy over cross-validation models
if (parms._keep_cross_validation_models)
model._output._cross_validation_models = buildCVGamModels(model, glmModel, parms, foldColumn);
// copy over fold_assignments
if (parms._keep_cross_validation_predictions) {
Frame cvPred = DKV.getGet(glmModel._output._cross_validation_holdout_predictions_frame_id);
Frame newPred = cvPred.deepCopy(Key.make().toString());
DKV.put(newPred);
model._output._cross_validation_holdout_predictions_frame_id = newPred.getKey();
}
if (parms._keep_cross_validation_fold_assignment) {
Frame foldAssignment = DKV.getGet(glmModel._output._cross_validation_fold_assignment_frame_id);
Frame newFold = foldAssignment.deepCopy((Key.make()).toString());
DKV.put(newFold);
model._output._cross_validation_fold_assignment_frame_id = newFold.getKey();
}
}
public static Key[] buildCVGamModels(GAMModel model, GLMModel glmModel, GAMParameters parms, String foldColumn) {
int nFolds = glmModel._output._cross_validation_models.length;
Key[] cvModelKeys = new Key[nFolds];
for (int fInd=0; fInd<nFolds; fInd++) {
GLMModel cvModel = DKV.getGet(glmModel._output._cross_validation_models[fInd]);
// set up GAMParameters
GAMParameters gamParams = makeGAMParameters(parms);
if (foldColumn != null) {
if (gamParams._ignored_columns != null) {
List<String> ignoredCols = new ArrayList<>(Arrays.asList(gamParams._ignored_columns));
ignoredCols.add(foldColumn);
gamParams._ignored_columns = ignoredCols.toArray(new String[0]);
} else {
gamParams._ignored_columns = new String[]{foldColumn};
}
}
int maxIterations = gamParams._max_iterations;
gamParams._max_iterations = 1;
// instantiate GAMModels
GAMModel gamModel = new GAM(gamParams).trainModel().get();
gamParams._max_iterations = maxIterations;
// extract GLM CV model run results to GAMModels
copyGLMCoeffs(cvModel, gamModel, gamParams, model._nclass);
copyGLMtoGAMModel(gamModel, cvModel, parms, true);
cvModelKeys[fInd] = gamModel.getKey();
DKV.put(gamModel);
}
return cvModelKeys;
}
public static GAMParameters makeGAMParameters(GAMParameters parms) {
GAMParameters gamParams = new GAMParameters();
final Field[] field1 = GAMParameters.class.getDeclaredFields();
final Field[] field2 = Model.Parameters.class.getDeclaredFields();
setParamField(parms, gamParams, false, field1, Collections.emptyList());
setParamField(parms, gamParams, true, field2, Collections.emptyList());
gamParams._nfolds = 0;
gamParams._keep_cross_validation_predictions = false;
gamParams._keep_cross_validation_fold_assignment = false;
gamParams._keep_cross_validation_models = false;
gamParams._train = parms._train;
return gamParams;
}
public static void setParamField(Model.Parameters parms, Model.Parameters glmParam, boolean superClassParams,
Field[] gamFields, List<String> excludeList) {
// assign relevant GAMParameter fields to GLMParameter fields
Field glmField;
boolean emptyExcludeList = excludeList == null || excludeList.size() == 0;
for (Field oneField : gamFields) {
try {
if (emptyExcludeList || !excludeList.contains(oneField.getName())) {
if (superClassParams)
glmField = glmParam.getClass().getSuperclass().getDeclaredField(oneField.getName());
else
glmField = glmParam.getClass().getDeclaredField(oneField.getName());
glmField.set(glmParam, oneField.get(parms));
}
} catch (IllegalAccessException|NoSuchFieldException e) { // suppress error printing, only cares about fields that are accessible
;
}
}
}
public static void keepFrameKeys(List<Key> keep, Key<Frame> ... keyNames) {
for (Key<Frame> keyName:keyNames) {
Frame loadingFrm = DKV.getGet(keyName);
if (loadingFrm != null) for (Vec vec : loadingFrm.vecs()) keep.add(vec._key);
}
}
public static void setDefaultBSType(GAMParameters parms) {
parms._bs = new int[parms._gam_columns.length];
for (int index = 0; index < parms._bs.length; index++) {
if (parms._gam_columns[index].length > 1) {
parms._bs[index] = TP_SPLINE_TYPE;
} else {
parms._bs[index] = CS_SPLINE_TYPE;
}
}
}
public static void setThinPlateParameters(GAMParameters parms, int thinPlateNum) {
int numGamCols = parms._gam_columns.length;
parms._m = MemoryManager.malloc4(thinPlateNum);
parms._M = MemoryManager.malloc4(thinPlateNum);
int countThinPlate = 0;
for (int index = 0; index < numGamCols; index++) {
if (parms._bs[index] == 1) {
int d = parms._gam_columns[index].length;
parms._m[countThinPlate] = calculatem(d);
parms._M[countThinPlate] = calculateM(d, parms._m[countThinPlate]);
countThinPlate++;
}
}
}
/***
* For each spline type, calculate the gam columns in each gam column group. For thin-plate splines, this can be 1,
* 2, or .... However, for all other spline types, this can only be one.
*/
public static void setGamPredSize(GAMParameters parms, int singleSplineOffset) {
int numGamCols = parms._gam_columns.length;
int tpCount = singleSplineOffset;
int singleSplineCount = 0;
parms._gamPredSize = MemoryManager.malloc4(numGamCols);
for (int index = 0; index < numGamCols; index++) {
if (parms._bs[index] == TP_SPLINE_TYPE) { // tp
parms._gamPredSize[tpCount++] = parms._gam_columns[index].length;
} else { // single predictor gam column
parms._gamPredSize[singleSplineCount++] = 1;
}
}
}
// This method will generate knot locations by choosing them from a uniform quantile distribution of that
// chosen column.
public static double[] generateKnotsOneColumn(Frame gamFrame, int knotNum) {
double[] knots = MemoryManager.malloc8d(knotNum);
try {
Scope.enter();
Frame tempFrame = new Frame(gamFrame); // make sure we have a frame key
DKV.put(tempFrame);
double[] prob = MemoryManager.malloc8d(knotNum);
assert knotNum > 1;
double stepProb = 1.0 / (knotNum - 1);
for (int knotInd = 0; knotInd < knotNum; knotInd++)
prob[knotInd] = knotInd * stepProb;
QuantileModel.QuantileParameters parms = new QuantileModel.QuantileParameters();
parms._train = tempFrame._key;
parms._probs = prob;
QuantileModel qModel = new Quantile(parms).trainModel().get();
DKV.remove(tempFrame._key);
Scope.track_generic(qModel);
// make boundary values to be slightly wider
qModel._output._quantiles[0][0] -= EPS;
qModel._output._quantiles[0][qModel._output._quantiles[0].length-1] += EPS;
System.arraycopy(qModel._output._quantiles[0], 0, knots, 0, knotNum);
} finally {
Scope.exit();
}
return knots;
}
// grab all predictors to build a smoother
public static Frame prepareGamVec(int gam_column_index, GAMParameters parms, Frame fr) {
final Vec weights_column = ((parms._weights_column == null) || (fr.vec(parms._weights_column) == null))
? Scope.track(Vec.makeOne(fr.numRows())) : fr.vec(parms._weights_column);
final Frame predictVec = new Frame();
int numPredictors = parms._gam_columns_sorted[gam_column_index].length;
for (int colInd = 0; colInd < numPredictors; colInd++)
predictVec.add(parms._gam_columns_sorted[gam_column_index][colInd],
fr.vec(parms._gam_columns_sorted[gam_column_index][colInd]));
predictVec.add("weights_column", weights_column); // add weight columns for CV support
return predictVec;
}
public static String[] generateGamColNames(int gamColIndex, GAMParameters parms) {
String[] newColNames = null;
if (parms._bs_sorted[gamColIndex] == CS_SPLINE_TYPE)
newColNames = new String[parms._num_knots_sorted[gamColIndex]];
else
newColNames = new String[parms._num_knots_sorted[gamColIndex]+parms._spline_orders_sorted[gamColIndex]-2];
String stubName = parms._gam_columns_sorted[gamColIndex][0]+"_";
if (parms._bs_sorted[gamColIndex]==CS_SPLINE_TYPE)
stubName += "cr_";
else if (parms._bs_sorted[gamColIndex]==IS_SPLINE_TYPE)
stubName += "is_";
else if (parms._bs_sorted[gamColIndex]==MS_SPLINE_TYPE)
stubName += "ms_";
else if (parms._bs_sorted[gamColIndex]==TP_SPLINE_TYPE)
stubName += "tp_";
else
throw new NotImplementedException(SPLINENOTIMPL);
for (int knotIndex = 0; knotIndex < newColNames.length; knotIndex++) {
newColNames[knotIndex] = stubName+knotIndex;
}
return newColNames;
}
public static String[] generateGamColNamesThinPlateKnots(int gamColIndex, GAMParameters parms,
int[][] polyBasisDegree, String nameStub) {
int num_knots = parms._num_knots_sorted[gamColIndex];
int polyBasisSize = polyBasisDegree.length;
String[] gamColNames = new String[num_knots+polyBasisSize];
for (int index = 0; index < num_knots; index++)
gamColNames[index] = nameStub+index;
for (int index = 0; index < polyBasisSize; index++) {
gamColNames[index+num_knots] = genPolyBasisNames(parms._gam_columns_sorted[gamColIndex], polyBasisDegree[index]);
}
return gamColNames;
}
public static String genPolyBasisNames(String[] gam_columns, int[] oneBasis) {
StringBuffer polyBasisName = new StringBuffer();
int numGamCols = gam_columns.length;
int beforeLastIndex = numGamCols-1;
for (int index = 0; index < numGamCols; index++) {
polyBasisName.append(gam_columns[index]);
polyBasisName.append("_");
polyBasisName.append(oneBasis[index]);
if (index < beforeLastIndex)
polyBasisName.append("_");
}
return polyBasisName.toString();
}
public static Frame buildGamFrame(GAMParameters parms, Frame train, Key<Frame>[] gamFrameKeysCenter, String foldColumn) {
Vec responseVec = train.remove(parms._response_column);
List<String> ignored_cols = parms._ignored_columns == null?new ArrayList<>():Arrays.asList(parms._ignored_columns);
Vec weightsVec = null;
Vec offsetVec = null;
Vec foldVec = null;
if (parms._offset_column != null)
offsetVec = train.remove(parms._offset_column);
if (parms._weights_column != null) // move weight vector to be the last vector before response variable
weightsVec = train.remove(parms._weights_column);
if (foldColumn != null)
foldVec = train.remove(foldColumn);
for (int colIdx = 0; colIdx < parms._gam_columns_sorted.length; colIdx++) { // append the augmented columns to _train
Frame gamFrame = Scope.track(gamFrameKeysCenter[colIdx].get());
train.add(gamFrame.names(), gamFrame.removeAll());
if (ignored_cols.contains(parms._gam_columns_sorted[colIdx]))
train.remove(parms._gam_columns_sorted[colIdx]);
}
if (foldColumn != null)
train.add(foldColumn, foldVec);
if (weightsVec != null)
train.add(parms._weights_column, weightsVec);
if (offsetVec != null)
train.add(parms._offset_column, offsetVec);
if (responseVec != null)
train.add(parms._response_column, responseVec);
return train;
}
public static Frame concateGamVecs(Key<Frame>[] gamFrameKeysCenter) {
Frame gamVecs = new Frame(Key.make());
for (int index = 0; index < gamFrameKeysCenter.length; index++) {
Frame tempCols = Scope.track(gamFrameKeysCenter[index].get());
gamVecs.add(tempCols.names(), tempCols.removeAll());
}
return gamVecs;
}
/**
* move CS spline smoothers to the front and TP spline smoothers to the back for arrays:
* gam_columns, bs, scale, num_knots.
* The array knots have already been moved with CS spline/I-spline in the front and TP splines in the back
*/
public static void sortGAMParameters(GAMParameters parms, int csGamCol, int isGamCol, int msGamCol) {
int gamColNum = parms._gam_columns.length; // all gam cols regardless of types
int csIndex = 0;
int isIndex = csGamCol;
int msIndex = isIndex+isGamCol;
int tpIndex = msIndex+msGamCol;
parms._gam_columns_sorted = new String[gamColNum][];
parms._num_knots_sorted = MemoryManager.malloc4(gamColNum);
parms._scale_sorted = MemoryManager.malloc8d(gamColNum);
parms._bs_sorted = MemoryManager.malloc4(gamColNum);
parms._gamPredSize = MemoryManager.malloc4(gamColNum);
parms._spline_orders_sorted = MemoryManager.malloc4(gamColNum);
if (parms._splines_non_negative == null) {
parms._splines_non_negative = new boolean[parms._gam_columns.length];
Arrays.fill(parms._splines_non_negative, true);
}
parms._splines_non_negative_sorted = MemoryManager.mallocZ(gamColNum);
for (int index = 0; index < gamColNum; index++) {
if (parms._bs[index] == CS_SPLINE_TYPE) { // CS spline
setGamParameters(parms, index, csIndex++);
} else if (parms._bs[index] == IS_SPLINE_TYPE) {
setGamParameters(parms, index, isIndex);
parms._spline_orders_sorted[isIndex++] = parms._spline_orders[index];
} else if (parms._bs[index] == MS_SPLINE_TYPE) {
setGamParameters(parms, index, msIndex);
parms._spline_orders_sorted[msIndex++] = parms._spline_orders[index];
} else if (parms._bs[index] == TP_SPLINE_TYPE) { // thin plate spline
setGamParameters(parms, index, tpIndex++);
} else {
throw new NotImplementedException(SPLINENOTIMPL);
}
}
}
public static void setGamParameters(GAMParameters parms, int gamIndex, int splineIndex) {
parms._gam_columns_sorted[splineIndex] = parms._gam_columns[gamIndex].clone();
parms._num_knots_sorted[splineIndex] = parms._num_knots[gamIndex];
parms._scale_sorted[splineIndex] = parms._scale[gamIndex];
parms._gamPredSize[splineIndex] = parms._gam_columns_sorted[splineIndex].length;
parms._bs_sorted[splineIndex] = parms._bs[gamIndex];
parms._splines_non_negative_sorted[splineIndex] = parms._splines_non_negative[gamIndex];
}
// default value of scale is 1.0
public static void setDefaultScale(GAMParameters parms) {
int numGamCol = parms._gam_columns.length;
parms._scale = new double[numGamCol];
for (int index = 0; index < numGamCol; index++)
parms._scale[index] = 1.0;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/GenCSSplineGamOneColumn.java
|
package hex.gam.MatrixFrameUtils;
import hex.DataInfo;
import hex.gam.GAMModel.GAMParameters;
import hex.gam.GamSplines.CubicRegressionSplines;
import hex.genmodel.algos.gam.GamUtilsCubicRegression;
import hex.glm.GLMModel.GLMParameters.MissingValuesHandling;
import hex.util.LinearAlgebraUtils.BMulInPlaceTask;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.ArrayUtils;
import static hex.genmodel.algos.gam.GamUtilsCubicRegression.locateBin;
public class GenCSSplineGamOneColumn extends MRTask<GenCSSplineGamOneColumn> {
int _splineType;
public int _numKnots; // number of knots
public double[][] _bInvD; // store inv(B)*D
public int _initChunks;
public double[][] _ZTransp; // store Z matrix transpose
public double[][] _penaltyMat; // store penalty matrix
public double[] _knots;
double[] _maxAbsRowSum; // store maximum row sum
public double _s_scale;
public GenCSSplineGamOneColumn(int splineType, int numKnots, double[] knots, Frame gamx) {
_splineType = splineType;
_numKnots = numKnots;
CubicRegressionSplines crSplines = new CubicRegressionSplines(numKnots, knots);
_bInvD = crSplines.gen_BIndvD(crSplines._hj);
_penaltyMat = crSplines.gen_penalty_matrix(crSplines._hj, _bInvD);
_initChunks = gamx.vec(0).nChunks();
_knots = knots;
}
@Override
public void map(Chunk[] chk, NewChunk[] newGamCols) {
_maxAbsRowSum = new double[_initChunks];
int cIndex = chk[0].cidx();
_maxAbsRowSum[cIndex] = Double.NEGATIVE_INFINITY;
int chunkRows = chk[0].len(); // number of rows in chunk
CubicRegressionSplines crSplines = new CubicRegressionSplines(_numKnots, _knots); // not iced, must have own
double[] basisVals = new double[_numKnots];
for (int rowIndex = 0; rowIndex < chunkRows; rowIndex++) {
double gamRowSum = 0.0;
// find index of knot bin where row value belongs to
if (chk[1].atd(rowIndex) != 0) { // consider weight column value during gamification. If 0, insert rows of zeros.
double xval = chk[0].atd(rowIndex);
if (Double.isNaN(xval)) { // fill with NaN
for (int colIndex = 0; colIndex < _numKnots; colIndex++)
newGamCols[colIndex].addNum(Double.NaN);
} else {
int binIndex = locateBin(xval, _knots); // location to update
// update from F matrix F matrix = [0;invB*D;0] and c functions
GamUtilsCubicRegression.updateFMatrixCFunc(basisVals, xval, binIndex, _knots, crSplines._hj, _bInvD);
// update from a+ and a- functions
GamUtilsCubicRegression.updateAFunc(basisVals, xval, binIndex, _knots, crSplines._hj);
// copy updates to the newChunk row
for (int colIndex = 0; colIndex < _numKnots; colIndex++) {
newGamCols[colIndex].addNum(basisVals[colIndex]);
gamRowSum += Math.abs(basisVals[colIndex]);
}
if (gamRowSum > _maxAbsRowSum[cIndex])
_maxAbsRowSum[cIndex] = gamRowSum;
}
} else { // zero weight, fill entries with zeros and skip all that processing
for (int colIndex = 0; colIndex < _numKnots; colIndex++)
newGamCols[colIndex].addNum(0.0);
}
}
}
@Override
public void reduce(GenCSSplineGamOneColumn other) {
ArrayUtils.add(_maxAbsRowSum, other._maxAbsRowSum);
}
@Override
public void postGlobal() { // scale the _penalty function according to R
double tempMaxValue = ArrayUtils.maxValue(_maxAbsRowSum);
_s_scale = tempMaxValue*tempMaxValue/ArrayUtils.rNorm(_penaltyMat, 'i');
if (Double.isFinite(_s_scale))
ArrayUtils.mult(_penaltyMat, _s_scale);
_s_scale = 1.0/ _s_scale;
}
public static double[][] generateZTransp(Frame gamX, int numKnots) {
double[] u = new double[numKnots];
for (int cind = 0; cind < numKnots; cind++)
u[cind] = gamX.vec(cind).mean();
double[][] ZTransp = new double[numKnots - 1][numKnots];
double mag = ArrayUtils.innerProduct(u, u);
u[0] = u[0] - (u[0] > 0 ? -1 : 1) * Math.sqrt(mag); // form a = u-v and stored back in _u
double twoOmagSq = 2.0 / ArrayUtils.innerProduct(u, u);
for (int rowIndex = 0; rowIndex < numKnots; rowIndex++) { // form Z matrix transpose here
for (int colIndex = 0; colIndex < numKnots; colIndex++) { // skip the first column
if (colIndex > 0)
ZTransp[colIndex - 1][rowIndex] = (colIndex == rowIndex ? 1 : 0) - u[rowIndex] * u[colIndex] * twoOmagSq;
}
}
return ZTransp;
}
public Frame centralizeFrame(Frame fr, String colNameStart, GAMParameters parms) {
_ZTransp = generateZTransp(fr, _numKnots);
return centralizeFrame(fr, colNameStart, parms, _ZTransp);
}
public static Frame centralizeFrame(Frame fr, String colNameStart, GAMParameters parms, double[][] zTransp) {
int numCols = fr.numCols();
int ncolExp = numCols-1;
DataInfo frInfo = new DataInfo(fr, null, 0, false, DataInfo.TransformType.NONE
, DataInfo.TransformType.NONE, MissingValuesHandling.Skip == parms._missing_values_handling,
(parms._missing_values_handling == MissingValuesHandling.MeanImputation) ||
(parms._missing_values_handling == MissingValuesHandling.PlugValues), parms.makeImputer(),
false, false, false, false, null);
for (int index=0; index < ncolExp; index++) {
fr.add(colNameStart+"_"+index, fr.anyVec().makeZero()); // add numCols-1 columns to fr
}
new BMulInPlaceTask(frInfo, zTransp, numCols, false).doAll(fr);
for (int index=0; index < numCols; index++) { // remove the original gam columns
Vec temp = fr.remove(0);
temp.remove();
}
return fr;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/GenISplineGamOneColumn.java
|
package hex.gam.MatrixFrameUtils;
import hex.gam.GAMModel;
import hex.genmodel.algos.gam.ISplines;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import static hex.gam.GamSplines.NBSplinesTypeIDerivative.genISPenaltyMatrix;
/**
* Gamified one gam column at a time using I-spline. See doc in the GH issue: https://github.com/h2oai/h2o-3/issues/7261.
* This one is different from AddISGamColumns because it generates the penalty matrix, zTranform if applicable and
* perform scaling of the penalty matrix apart from performing gamification.
*/
public class GenISplineGamOneColumn extends MRTask<GenISplineGamOneColumn> {
private final double[] _knots; // knots without duplication
private final int _order;
double[] _maxAbsRowSum; // store maximum row sum
public double _s_scale;
private final int _gamColNChunks;
public double[][] _penaltyMat; // store penalty matrix
public final int _numBasis;
public final int _totKnots;
public GenISplineGamOneColumn(GAMModel.GAMParameters parm, double[] knots, int gamColIndex, Frame gamCol,
int nBasis, int totKnots) {
_knots = knots;
_order = parm._spline_orders_sorted[gamColIndex];
_numBasis = nBasis > 0 ? nBasis : knots.length+_order-2;
_totKnots = totKnots > 0 ? totKnots : knots.length+2*_order-2;
_gamColNChunks = gamCol.vec(0).nChunks();
_penaltyMat = genISPenaltyMatrix(knots, parm._spline_orders_sorted[gamColIndex]);
}
@Override
public void map(Chunk[] chk, NewChunk[] newGamCols) {
ISplines basisFuncs = new ISplines(_order, _knots);
_maxAbsRowSum = new double[_gamColNChunks];
double[] basisVals = new double[_numBasis]; // array to hold each gamified row
int cIndex = chk[0].cidx();
_maxAbsRowSum[cIndex] = Double.NEGATIVE_INFINITY;
int chkRows = chk[0].len();
for (int rowIndex=0; rowIndex < chkRows; rowIndex++) {
double gamRowSum = 0.0;
if (chk[1].atd(rowIndex) != 0) {
double xval = chk[0].atd(rowIndex);
if (Double.isNaN(xval)) {
for (int colIndex = 0; colIndex < _numBasis; colIndex++)
newGamCols[colIndex].addNum(Double.NaN);
} else {
basisFuncs.gamifyVal(basisVals, xval);
// copy updates to the newChunk row
for (int colIndex = 0; colIndex < _numBasis; colIndex++) {
newGamCols[colIndex].addNum(basisVals[colIndex]);
gamRowSum += Math.abs(basisVals[colIndex]);
}
if (gamRowSum > _maxAbsRowSum[cIndex])
_maxAbsRowSum[cIndex] = gamRowSum;
}
} else {
for (int colIndex = 0; colIndex < _numBasis; colIndex++)
newGamCols[colIndex].addNum(0.0);
}
}
}
public void reduce(GenISplineGamOneColumn other) {
ArrayUtils.add(_maxAbsRowSum, other._maxAbsRowSum);
}
@Override
public void postGlobal() { // scale the _penalty function according to R
double tempMaxValue = ArrayUtils.maxValue(_maxAbsRowSum);
_s_scale = tempMaxValue*tempMaxValue/ArrayUtils.rNorm(_penaltyMat, 'i');
if (Double.isFinite(_s_scale))
ArrayUtils.mult(_penaltyMat, _s_scale);
_s_scale = 1.0/ _s_scale;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/GenMSplineGamOneColumn.java
|
package hex.gam.MatrixFrameUtils;
import hex.gam.GAMModel;
import hex.genmodel.algos.gam.MSplines;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import static hex.gam.GamSplines.NBSplinesTypeIDerivative.genMSPenaltyMatrix;
import static hex.gam.MatrixFrameUtils.GenCSSplineGamOneColumn.generateZTransp;
public class GenMSplineGamOneColumn extends MRTask<GenMSplineGamOneColumn> {
private final double[] _knots; // knots without duplication/extension
private final int _order; // actual polynomial spline has order _order-1
double[] _maxAbsRowSum;
public double _s_scale;
private final int _gamColNChunks;
public double[][] _ZTransp; // store Z matrix transpose, keep for now
public double[][] _penaltyMat; // store penalty matrix
public final int _numBasis;
public final int _totKnots;
/**
* Perform gamification on one predictor.
*
* @param parm: GAM parameters
* @param knots: double array of knots without duplication
* @param gamColIndex: index of which sorted gam columns we are dealing with.
* @param gamCol: frame containing predictor to be gamified
* @param nBasis: number of basis function
* @param totKnots: total number of knots with duplication
*/
public GenMSplineGamOneColumn(GAMModel.GAMParameters parm, double[] knots, int gamColIndex, Frame gamCol,
int nBasis, int totKnots) {
_knots = knots;
_order = parm._spline_orders_sorted[gamColIndex];
_numBasis = nBasis > 0 ? nBasis : knots.length+_order-2;
_totKnots = totKnots > 0 ? totKnots : knots.length+2*_order-2;
_gamColNChunks = gamCol.vec(0).nChunks();
_penaltyMat = genMSPenaltyMatrix(knots, parm._spline_orders_sorted[gamColIndex]);
}
@Override
public void map(Chunk[] chk, NewChunk[] newGamCols) {
MSplines basisFuncs = new MSplines(_order, _knots);
_maxAbsRowSum = new double[_gamColNChunks];
double[] basisVals = new double[_numBasis]; // array to hold each gamified row
int cIndex = chk[0].cidx();
_maxAbsRowSum[cIndex] = Double.NEGATIVE_INFINITY;
int chkRows = chk[0].len();
for (int rowIndex=0; rowIndex < chkRows; rowIndex++) {
double gamRowSum = 0.0;
if (chk[1].atd(rowIndex) != 0) {
double xval = chk[0].atd(rowIndex);
if (Double.isNaN(xval)) {
for (int colIndex = 0; colIndex < _numBasis; colIndex++)
newGamCols[colIndex].addNum(Double.NaN);
} else {
basisFuncs.gamifyVal(basisVals, xval);
// copy updates to the newChunk row
for (int colIndex = 0; colIndex < _numBasis; colIndex++) {
newGamCols[colIndex].addNum(basisVals[colIndex]);
gamRowSum += Math.abs(basisVals[colIndex]);
}
if (gamRowSum > _maxAbsRowSum[cIndex])
_maxAbsRowSum[cIndex] = gamRowSum;
}
} else {
for (int colIndex = 0; colIndex < _numBasis; colIndex++)
newGamCols[colIndex].addNum(0.0);
}
}
}
public void reduce(GenMSplineGamOneColumn other) {
ArrayUtils.add(_maxAbsRowSum, other._maxAbsRowSum);
}
@Override
public void postGlobal() { // scale the _penalty function according to R
double tempMaxValue = ArrayUtils.maxValue(_maxAbsRowSum);
_s_scale = tempMaxValue*tempMaxValue/ArrayUtils.rNorm(_penaltyMat, 'i');
if (Double.isFinite(_s_scale))
ArrayUtils.mult(_penaltyMat, _s_scale);
_s_scale = 1.0/ _s_scale;
}
public Frame centralizeFrame(Frame fr, String colNameStart, GAMModel.GAMParameters parms) {
_ZTransp = generateZTransp(fr, _numBasis);
return GenCSSplineGamOneColumn.centralizeFrame(fr, colNameStart, parms, _ZTransp);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gam/MatrixFrameUtils/TriDiagonalMatrix.java
|
package hex.gam.MatrixFrameUtils;
import water.MemoryManager;
public class TriDiagonalMatrix {
public double[] _first_diag;
public double[] _second_diag;
public double[] _third_diag;
public int _size; // number of diagonal elements. Matrix size is _size by _size+2
public TriDiagonalMatrix(int size) {
assert size>0:"Size of BiDiagonalMatrix must be > 0 but is "+size;
_size = size;
_first_diag = MemoryManager.malloc8d(size);
_second_diag = MemoryManager.malloc8d(size);
_third_diag = MemoryManager.malloc8d(size);
}
// Implement the generation of D matrix. Refer to doc 6.1
public TriDiagonalMatrix(double[] hj) {
this(hj.length-1); // hj size k-1
int diagSize = _size;
for (int index=0; index < diagSize; index++) {
double oneOhj = 1.0/hj[index];
double oneOhjP1 = 1/hj[index+1];
_first_diag[index] = oneOhj;
_second_diag[index] = -oneOhj-oneOhjP1;
_third_diag[index] = oneOhjP1;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/generic/Generic.java
|
package hex.generic;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.genmodel.*;
import hex.genmodel.descriptor.ModelDescriptor;
import hex.genmodel.descriptor.ModelDescriptorBuilder;
import water.H2O;
import water.Key;
import water.fvec.ByteVec;
import water.fvec.Frame;
import water.parser.ZipUtil;
import water.util.Log;
import java.io.IOException;
import java.net.URI;
import java.util.*;
/**
* Generic model able to do scoring with any underlying model deserializable into a format known by the {@link GenericModel}.
* Only H2O Mojos are currently supported.
*/
public class Generic extends ModelBuilder<GenericModel, GenericModelParameters, GenericModelOutput> {
/**
* Unmodifiable {@link Set} of Algorithm MOJOs which are allowed to be imported as generic model
*/
private static final Set<String> ALLOWED_MOJO_ALGOS;
static{
final Set<String> allowedAlgos = new HashSet<>(6);
allowedAlgos.add("gbm");
allowedAlgos.add("glm");
allowedAlgos.add("xgboost");
allowedAlgos.add("isolationforest");
allowedAlgos.add("extendedisolationforest");
allowedAlgos.add("drf");
allowedAlgos.add("deeplearning");
allowedAlgos.add("stackedensemble");
allowedAlgos.add("coxph");
allowedAlgos.add("rulefit");
allowedAlgos.add("gam");
allowedAlgos.add("upliftdrf");
ALLOWED_MOJO_ALGOS = Collections.unmodifiableSet(allowedAlgos);
}
public Generic(GenericModelParameters genericParameters){
super(genericParameters);
init(false);
}
public Generic(boolean startup_once) {
super(new GenericModelParameters(), startup_once);
}
@Override
public void init(boolean expensive) {
super.init(expensive);
if (_parms._path != null && _parms._model_key != null) {
error("_path",
"Path cannot be set for MOJO that is supposed to be loaded from distributed memory (key=" + _parms._model_key + ").");
}
}
@Override
protected Driver trainModelImpl() {
return new MojoDelegatingModelDriver();
}
@Override
public ModelCategory[] can_build() {
return ModelCategory.values();
}
@Override
public boolean haveMojo() {
return true;
}
@Override
public boolean isSupervised() {
return false;
}
class MojoDelegatingModelDriver extends Driver {
@Override
public void compute2() {
if (_parms._path != null) { // If there is a file to be imported, do the import before the scope is entered
_parms._model_key = importFile();
}
super.compute2();
}
@Override
public void computeImpl() {
final Key<Frame> dataKey;
if (_parms._model_key != null) {
dataKey = _parms._model_key;
} else {
throw new IllegalArgumentException("Either MOJO zip path or key to the uploaded MOJO frame must be specified");
}
final ByteVec modelBytes = readModelData(dataKey);
try {
final GenericModel genericModel;
if (ZipUtil.isCompressed(modelBytes)) {
genericModel = importMojo(modelBytes, dataKey);
} else {
if (H2O.getSysBoolProperty("pojo.import.enabled", false)) {
warn("_path", "Trying to import a POJO model - this is currently an experimental feature.");
genericModel = importPojo(modelBytes, dataKey, _result.toString());
} else {
throw new SecurityException("POJO import is disabled since it brings a security risk. " +
"To enable the feature, set the java property `sys.ai.h2o.pojo.import.enabled` to true.");
}
}
genericModel.write_lock(_job);
genericModel.unlock(_job);
} catch (IOException e) {
throw new IllegalStateException("Unreachable model file: " + dataKey, e);
}
}
private GenericModel importMojo(ByteVec mojoBytes, Key<Frame> dataKey) throws IOException {
final MojoReaderBackend readerBackend = MojoReaderBackendFactory.createReaderBackend(
mojoBytes.openStream(_job._key), MojoReaderBackendFactory.CachingStrategy.MEMORY);
final MojoModel mojoModel = ModelMojoReader.readFrom(readerBackend, true);
if(! ALLOWED_MOJO_ALGOS.contains(mojoModel._modelDescriptor.algoName().toLowerCase())) {
if (_parms._disable_algo_check)
Log.warn(String.format("MOJO model '%s' is not supported but user disabled white-list check. Trying to load anyway.", mojoModel._modelDescriptor.algoName()));
else
throw new IllegalArgumentException(String.format("Unsupported MOJO model '%s'. ", mojoModel._modelDescriptor.algoName()));
}
final GenericModelOutput genericModelOutput = new GenericModelOutput(mojoModel._modelDescriptor, mojoModel._modelAttributes, mojoModel._reproducibilityInformation);
return new GenericModel(_result, _parms, genericModelOutput, mojoModel, dataKey);
}
private GenericModel importPojo(ByteVec pojoBytes, Key<Frame> pojoKey, String modelId) throws IOException {
GenModel genmodel = PojoLoader.loadPojoFromSourceCode(pojoBytes, pojoKey, modelId);
ModelDescriptor pojoDescriptor = ModelDescriptorBuilder.makeDescriptor(genmodel);
final GenericModelOutput genericModelOutput = new GenericModelOutput(pojoDescriptor);
return new GenericModel(_result, _parms, genericModelOutput, genmodel, pojoKey);
}
}
private Key<Frame> importFile() {
ArrayList<String> files = new ArrayList<>();
ArrayList<String> keys = new ArrayList<>();
ArrayList<String> fails = new ArrayList<>();
ArrayList<String> dels = new ArrayList<>();
H2O.getPM().importFiles(_parms._path, null, files, keys, fails, dels);
if (!fails.isEmpty()) {
throw new RuntimeException("Failed to import file: " + Arrays.toString(fails.toArray()));
}
assert keys.size() == 1;
return Key.make(keys.get(0));
}
/**
* Retrieves pre-uploaded MOJO archive and performs basic verifications, if present.
*
* @param key Key to MOJO bytes in DKV
* @return An instance of {@link ByteVec} containing the bytes of an uploaded MOJO, if present. Or exception. Never returns null.
* @throws IllegalArgumentException In case the supplied key is invalid (MOJO missing, empty key etc.)
*/
private ByteVec readModelData(final Key<Frame> key) throws IllegalArgumentException {
Objects.requireNonNull(key); // Nicer null pointer exception in case null key is accidentally provided
Frame mojoFrame = key.get();
if (mojoFrame.numCols() > 1)
throw new IllegalArgumentException(String.format("Given model frame with key '%s' should contain only 1 column with model bytes. More columns found. Incorrect key provided ?", key));
ByteVec mojoData = (ByteVec) mojoFrame.anyVec();
if (mojoData.length() < 1)
throw new IllegalArgumentException(String.format("Given model frame with key '%s' is empty (0 bytes). Please provide a non-empty model file.", key));
return mojoData;
}
@Override
public BuilderVisibility builderVisibility() {
return BuilderVisibility.Stable;
}
/**
* Convenience method for importing MOJO into H2O.
*
* @param location absolute path to MOJO file
* @param disableAlgoCheck if true skip the check of white-listed MOJO models, use at your own risk - some features might not work.
* @return instance of H2O Model wrapping a MOJO
*/
public static GenericModel importMojoModel(String location, boolean disableAlgoCheck) {
GenericModelParameters p = new GenericModelParameters();
p._path = location;
p._disable_algo_check = disableAlgoCheck;
return new Generic(p).trainModel().get();
}
public static GenericModel importMojoModel(URI location) {
return importMojoModel(location.toString(), false);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/generic/GenericModel.java
|
package hex.generic;
import hex.*;
import hex.genmodel.*;
import hex.genmodel.algos.glm.GlmMojoModelBase;
import hex.genmodel.algos.kmeans.KMeansMojoModel;
import hex.genmodel.descriptor.ModelDescriptor;
import hex.genmodel.descriptor.ModelDescriptorBuilder;
import hex.genmodel.easy.EasyPredictModelWrapper;
import hex.genmodel.easy.RowData;
import hex.genmodel.easy.exception.PredictException;
import hex.glm.GLMModel;
import hex.tree.isofor.ModelMetricsAnomaly;
import water.*;
import water.fvec.*;
import water.udf.CFuncRef;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.RowDataUtils;
import java.io.IOException;
import java.util.*;
public class GenericModel extends Model<GenericModel, GenericModelParameters, GenericModelOutput>
implements Model.Contributions {
/**
* Captures model-specific behaviors
*/
private static final Map<String, ModelBehavior[]> DEFAULT_MODEL_BEHAVIORS;
static{
final Map<String, ModelBehavior[]> behaviors = new HashMap<>();
behaviors.put(
"gam", new ModelBehavior[]{
ModelBehavior.USE_MOJO_PREDICT // GAM score0 cannot be used directly because it introduces special column in RawData conversion phase
}
);
DEFAULT_MODEL_BEHAVIORS = Collections.unmodifiableMap(behaviors);
}
/**
* name of the algo for MOJO, "pojo" for POJO models
*/
private final String _algoName;
private final GenModelSource<?> _genModelSource;
private GLMModel.GLMParameters _glmParameters;
/**
* Full constructor
*
*/
public GenericModel(Key<GenericModel> selfKey, GenericModelParameters parms, GenericModelOutput output,
MojoModel mojoModel, Key<Frame> mojoSource) {
super(selfKey, parms, output);
_algoName = mojoModel._algoName;
_genModelSource = new MojoModelSource(mojoSource, mojoModel, defaultModelBehaviors(_algoName));
_output = new GenericModelOutput(mojoModel._modelDescriptor, mojoModel._modelAttributes, mojoModel._reproducibilityInformation);
if (mojoModel._modelAttributes != null && mojoModel._modelAttributes.getModelParameters() != null) {
_parms._modelParameters = GenericModelParameters.convertParameters(mojoModel._modelAttributes.getModelParameters());
}
_glmParameters = null;
if(_algoName.toLowerCase().contains("glm")) {
GlmMojoModelBase glmModel = (GlmMojoModelBase) mojoModel;
// create GLM parameters instance
_glmParameters = new GLMModel.GLMParameters(
GLMModel.GLMParameters.Family.valueOf(getParamByName("family").toString()),
GLMModel.GLMParameters.Link.valueOf(getParamByName("link").toString()),
Arrays.stream(getParamByName("lambda").toString().trim().replaceAll("\\[", "")
.replaceAll("\\]", "").split(",\\s*"))
.mapToDouble(Double::parseDouble).toArray(),
Arrays.stream(getParamByName("alpha").toString().trim().replaceAll("\\[", "")
.replaceAll("\\]", "").split(",\\s*"))
.mapToDouble(Double::parseDouble).toArray(),
Double.parseDouble(getParamByName("tweedie_variance_power").toString()),
Double.parseDouble(getParamByName("tweedie_link_power").toString()),
null,
Double.parseDouble(getParamByName("theta").toString()),
glmModel.getDispersionEstimated()
);
}
}
public GenericModel(Key<GenericModel> selfKey, GenericModelParameters parms, GenericModelOutput output,
GenModel pojoModel, Key<Frame> pojoSource) {
super(selfKey, parms, output);
_algoName = "pojo";
_genModelSource = new PojoModelSource(selfKey.toString(), pojoSource, pojoModel);
_output = new GenericModelOutput(ModelDescriptorBuilder.makeDescriptor(pojoModel));
}
@Override
public boolean isGeneric() {
return true;
}
static ModelBehavior[] defaultModelBehaviors(String algoName) {
return DEFAULT_MODEL_BEHAVIORS.get(algoName);
}
private static MojoModel reconstructMojo(ByteVec mojoBytes) {
try {
final MojoReaderBackend readerBackend = MojoReaderBackendFactory.createReaderBackend(mojoBytes.openStream(null), MojoReaderBackendFactory.CachingStrategy.MEMORY);
return ModelMojoReader.readFrom(readerBackend, true);
} catch (IOException e) {
throw new IllegalStateException("Unreachable MOJO file: " + mojoBytes._key, e);
}
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
switch(_output.getModelCategory()) {
case Unknown:
throw new IllegalStateException("Model category is unknown");
case Binomial:
return new ModelMetricsBinomial.MetricBuilderBinomial(domain);
case Multinomial:
return new ModelMetricsMultinomial.MetricBuilderMultinomial(_output.nclasses(), domain, _parms._auc_type);
case Ordinal:
return new ModelMetricsOrdinal.MetricBuilderOrdinal(_output.nclasses(), domain);
case Regression: return new ModelMetricsRegression.MetricBuilderRegression();
case Clustering:
if (genModel() instanceof KMeansMojoModel) {
KMeansMojoModel kMeansMojoModel = (KMeansMojoModel) genModel();
return new ModelMetricsClustering.MetricBuilderClustering(_output.nfeatures(), kMeansMojoModel.getNumClusters());
} else {
return unsupportedMetricsBuilder();
}
case AutoEncoder:
return new ModelMetricsAutoEncoder.MetricBuilderAutoEncoder(_output.nfeatures());
case DimReduction:
return unsupportedMetricsBuilder();
case WordEmbedding:
return unsupportedMetricsBuilder();
case CoxPH:
return new ModelMetricsRegressionCoxPH.MetricBuilderRegressionCoxPH("start", "stop", false, new String[0]);
case AnomalyDetection:
return new ModelMetricsAnomaly.MetricBuilderAnomaly();
case BinomialUplift:
return new ModelMetricsBinomialUplift.MetricBuilderBinomialUplift(domain, null);
default:
throw H2O.unimpl();
}
}
@Override
protected Frame adaptFrameForScore(Frame fr, boolean computeMetrics) {
if (hasBehavior(ModelBehavior.USE_MOJO_PREDICT)) {
// We do not need to adapt the frame in any way, MOJO will handle it itself
return fr;
} else
return super.adaptFrameForScore(fr, computeMetrics);
}
@Override
protected PredictScoreResult predictScoreImpl(Frame fr, Frame adaptFrm, String destination_key, Job j,
boolean computeMetrics, CFuncRef customMetricFunc) {
if (hasBehavior(ModelBehavior.USE_MOJO_PREDICT)) {
return predictScoreMojoImpl(fr, destination_key, j, computeMetrics);
} else
return super.predictScoreImpl(fr, adaptFrm, destination_key, j, computeMetrics, customMetricFunc);
}
private Iced getParamByName(String name) {
return Arrays.stream(this._parms._modelParameters)
.filter(p -> Objects.equals(p.name, name)).findAny().get().actual_value;
}
@Override
public double aic(double likelihood) {
// calculate negative loglikelihood specifically for GLM
if (!_algoName.equals("glm")) {
return Double.NaN;
} else {
long betasCount = Arrays.stream(((GlmMojoModelBase) this.genModel()).getBeta()).filter(b -> b != 0).count();
return -2 * likelihood + 2 * betasCount;
}
}
@Override
public double likelihood(double w, double y, double[] f) {
// calculate negative loglikelihood specifically for GLM
if(!_algoName.equals("glm")) {
return Double.NaN;
} else if (w == 0) {
return 0;
} else {
// time-consuming calculation for the final scoring for GLM model
return _glmParameters.likelihood(w, y, f);
}
}
PredictScoreResult predictScoreMojoImpl(Frame fr, String destination_key, Job<?> j, boolean computeMetrics) {
GenModel model = genModel();
String[] names = model.getOutputNames();
String[][] domains = model.getOutputDomains();
byte[] type = new byte[domains.length];
for (int i = 0; i < type.length; i++) {
type[i] = domains[i] != null ? Vec.T_CAT : Vec.T_NUM;
}
PredictScoreMojoTask bs = new PredictScoreMojoTask(computeMetrics, j);
Frame predictFr = bs.doAll(type, fr).outputFrame(Key.make(destination_key), names, domains);
return new PredictScoreResult(bs._mb, predictFr, predictFr);
}
private class PredictScoreMojoTask extends MRTask<PredictScoreMojoTask> {
private final boolean _computeMetrics;
private final Job<?> _j;
/** Output parameter: Metric builder */
private ModelMetrics.MetricBuilder<?> _mb;
public PredictScoreMojoTask(boolean computeMetrics, Job<?> j) {
_computeMetrics = computeMetrics;
_j = j;
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
if (isCancelled() || (_j != null && _j.stop_requested()))
return;
EasyPredictModelWrapper wrapper = makeWrapper();
GenModel model = wrapper.getModel();
String[] responseDomain = model.isSupervised() ? model.getDomainValues(model.getResponseName()) : null;
AdaptFrameParameters adaptFrameParameters = makeAdaptFrameParameters(
Parameters.CategoricalEncodingScheme.AUTO); // encoding will actually be handled by the MOJO itself
_mb = _computeMetrics ? GenericModel.this.makeMetricBuilder(responseDomain) : null;
try {
predict(wrapper, adaptFrameParameters, responseDomain, cs, ncs);
} catch (PredictException e) {
throw new RuntimeException(e);
}
}
private void predict(EasyPredictModelWrapper wrapper, AdaptFrameParameters adaptFrameParameters, String[] responseDomain,
Chunk[] cs, NewChunk[] ncs) throws PredictException {
final byte[] types = _fr.types();
final String offsetColumn = adaptFrameParameters.getOffsetColumn();
final String weightsColumn = adaptFrameParameters.getWeightsColumn();
final String responseColumn = adaptFrameParameters.getResponseColumn();
final String treatmentColumn = adaptFrameParameters.getTreatmentColumn();
final boolean isClassifier = wrapper.getModel().isClassifier();
final boolean isUplift = treatmentColumn != null;
final float[] yact;
if (isUplift) {
yact = new float[2];
} else {
yact = new float[1];
}
for (int row = 0; row < cs[0]._len; row++) {
RowData rowData = new RowData();
RowDataUtils.extractChunkRow(cs, _fr._names, types, row, rowData);
double offset = offsetColumn != null && rowData.containsKey(offsetColumn) ?
(double) rowData.get(offsetColumn) : 0.0;
double[] result = wrapper.predictRaw(rowData, offset);
for (int i = 0; i < ncs.length; i++) {
ncs[i].addNum(result[i]);
}
if (_mb != null) {
Object response = responseColumn != null && rowData.containsKey(responseColumn) ?
rowData.get(responseColumn) : null;
if (response == null)
continue;
double weight = weightsColumn != null && rowData.containsKey(weightsColumn) ?
(double) rowData.get(weightsColumn) : 1.0;
if (isClassifier) {
int idx = ArrayUtils.find(responseDomain, String.valueOf(response));
if (idx < 0)
continue;
yact[0] = (float) idx;
} else
yact[0] = ((Number) response).floatValue();
if (isUplift){
yact[1] = (float) rowData.get(treatmentColumn);
}
_mb.perRow(result, yact, weight, offset, GenericModel.this);
}
}
}
@Override
public void reduce(PredictScoreMojoTask bs) {
super.reduce(bs);
if (_mb != null) {
_mb.reduce(bs._mb);
}
}
EasyPredictModelWrapper makeWrapper() {
final EasyPredictModelWrapper.Config config = new EasyPredictModelWrapper.Config()
.setModel(genModel().internal_threadSafeInstance())
.setConvertUnknownCategoricalLevelsToNa(true);
return new EasyPredictModelWrapper(config);
}
}
private ModelMetrics.MetricBuilder<?> unsupportedMetricsBuilder() {
if (_parms._disable_algo_check) {
Log.warn("Model category `" + _output._modelCategory + "` currently doesn't support calculating model metrics. " +
"Model metrics will not be available.");
return new MetricBuilderGeneric(genModel().getPredsSize(_output._modelCategory));
} else {
throw new UnsupportedOperationException(_output._modelCategory + " is not supported.");
}
}
@Override
protected double[] score0(double[] data, double[] preds) {
return genModel().score0(data, preds);
}
@Override
protected double[] score0(double[] data, double[] preds, double offset) {
if (offset == 0) // MOJO doesn't like when score0 is called with 0 offset for problems that were trained without offset
return score0(data, preds);
else
return genModel().score0(data, offset, preds);
}
@Override
protected AdaptFrameParameters makeAdaptFrameParameters() {
CategoricalEncoding encoding = genModel().getCategoricalEncoding();
if (encoding.isParametrized()) {
throw new UnsupportedOperationException(
"Models with categorical encoding '" + encoding + "' are not currently supported for predicting and/or calculating metrics.");
}
return makeAdaptFrameParameters(Parameters.CategoricalEncodingScheme.fromGenModel(encoding));
}
protected AdaptFrameParameters makeAdaptFrameParameters(final Parameters.CategoricalEncodingScheme encodingScheme) {
final GenModel genModel = genModel();
final ModelDescriptor descriptor = getModelDescriptor();
return new AdaptFrameParameters() {
@Override
public Parameters.CategoricalEncodingScheme getCategoricalEncoding() {
return encodingScheme;
}
@Override
public String getWeightsColumn() {
return descriptor != null ? descriptor.weightsColumn() : null;
}
@Override
public String getOffsetColumn() {
return descriptor != null ? descriptor.offsetColumn() : null;
}
@Override
public String getFoldColumn() {
return descriptor != null ? descriptor.foldColumn() : null;
}
@Override
public String getResponseColumn() {
return genModel.isSupervised() ? genModel.getResponseName() : null;
}
@Override
public String getTreatmentColumn() {return descriptor != null ? descriptor.treatmentColumn() : null;}
@Override
public double missingColumnsType() {
return Double.NaN;
}
@Override
public int getMaxCategoricalLevels() {
return -1; // returned but won't be used
}
};
}
private ModelDescriptor getModelDescriptor() {
final GenModel genModel = genModel();
return genModel instanceof MojoModel ? ((MojoModel) genModel)._modelDescriptor : null;
}
@Override
protected String[] makeScoringNames() {
return genModel().getOutputNames();
}
@Override
protected boolean needsPostProcess() {
return false; // MOJO scoring includes post-processing
}
@Override
public GenericModelMojoWriter getMojo() {
if (_genModelSource instanceof MojoModelSource) {
return new GenericModelMojoWriter(_genModelSource.backingByteVec());
}
throw new IllegalStateException("Cannot create a MOJO from a POJO");
}
private GenModel genModel() {
GenericModel self = DKV.getGet(_key); // trick - always use instance cached in DKV to avoid model-reloading
return self._genModelSource.get();
}
@Override
protected BigScorePredict setupBigScorePredict(Model<GenericModel, GenericModelParameters, GenericModelOutput>.BigScore bs) {
GenModel genmodel = genModel();
assert genmodel != null;
return super.setupBigScorePredict(bs);
}
private static class MetricBuilderGeneric extends ModelMetrics.MetricBuilder<MetricBuilderGeneric> {
private MetricBuilderGeneric(int predsSize) {
_work = new double[predsSize];
}
@Override
public double[] perRow(double[] ds, float[] yact, Model m) {
return ds;
}
@Override
public ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
return null;
}
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
if (_parms._path != null) {
// user loaded the model by providing a path (not a Frame holding MOJO data) => we need to do the clean-up
_genModelSource.remove(fs, cascade);
}
return super.remove_impl(fs, cascade);
}
private static abstract class GenModelSource<T extends Iced<T>> extends Iced<T> {
private final Key<Frame> _source;
private transient volatile GenModel _genModel;
GenModelSource(Key<Frame> source, GenModel genModel) {
_source = source;
_genModel = genModel;
}
GenModel get() {
if (_genModel == null) {
synchronized (this) {
if (_genModel == null) {
_genModel = reconstructGenModel(backingByteVec());
}
}
}
assert _genModel != null;
return _genModel;
}
void remove(Futures fs, boolean cascade) {
Frame mojoFrame = _source.get();
if (mojoFrame != null) {
mojoFrame.remove(fs, cascade);
}
}
abstract GenModel reconstructGenModel(ByteVec bv);
ByteVec backingByteVec() {
return (ByteVec) _source.get().anyVec();
}
Key<Frame> getSourceKey() {
return _source;
}
ModelBehavior[] getModelBehaviors() {
return null;
}
}
private static class MojoModelSource extends GenModelSource<MojoModelSource> {
private final ModelBehavior[] _modelBehaviors;
MojoModelSource(Key<Frame> mojoSource, MojoModel mojoModel, ModelBehavior[] defaultModelBehaviors) {
super(mojoSource, mojoModel);
_modelBehaviors = mojoModeBehaviors(mojoModel, defaultModelBehaviors);
}
@Override
GenModel reconstructGenModel(ByteVec bv) {
return reconstructMojo(bv);
}
@Override
ModelBehavior[] getModelBehaviors() {
return _modelBehaviors;
}
static ModelBehavior[] mojoModeBehaviors(MojoModel mojoModel, ModelBehavior[] defaultModelBehaviors) {
boolean useMojoPredict = mojoModel.getCategoricalEncoding().isParametrized();
return useMojoPredict ?
ArrayUtils.append(defaultModelBehaviors, ModelBehavior.USE_MOJO_PREDICT)
:
defaultModelBehaviors;
}
}
private static class PojoModelSource extends GenModelSource<PojoModelSource> {
final String _model_id;
PojoModelSource(String modelId, Key<Frame> pojoSource, GenModel pojoModel) {
super(pojoSource, pojoModel);
_model_id = modelId;
}
@Override
GenModel reconstructGenModel(ByteVec bv) {
Key<Frame> pojoKey = getSourceKey();
try {
return PojoLoader.loadPojoFromSourceCode(bv, pojoKey, _model_id);
} catch (IOException e) {
throw new RuntimeException("Unable to load POJO source code from Vec " + pojoKey);
}
}
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key) {
return scoreContributions(frame, destination_key, null);
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> job) {
EasyPredictModelWrapper wrapper = makeWrapperWithContributions();
// keep only columns that the model actually needs
Frame adaptFrm = new Frame(frame);
GenModel model = wrapper.getModel();
String[] columnNames = model.getOrigNames() != null ? model.getOrigNames() : model.getNames();
adaptFrm.remove(ArrayUtils.difference(frame._names, columnNames));
String[] outputNames = wrapper.getContributionNames();
return new GenericScoreContributionsTask(wrapper)
.withPostMapAction(JobUpdatePostMap.forJob(job))
.doAll(outputNames.length, Vec.T_NUM, adaptFrm)
.outputFrame(destination_key, outputNames, null);
}
private class GenericScoreContributionsTask extends MRTask<GenericScoreContributionsTask> {
private transient EasyPredictModelWrapper _wrapper;
GenericScoreContributionsTask(EasyPredictModelWrapper wrapper) {
_wrapper = wrapper;
}
@Override
protected void setupLocal() {
if (_wrapper == null) {
_wrapper = makeWrapperWithContributions();
}
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
try {
predict(cs, ncs);
} catch (PredictException e) {
throw new RuntimeException(e);
}
}
private void predict(Chunk[] cs, NewChunk[] ncs) throws PredictException {
RowData rowData = new RowData();
byte[] types = _fr.types();
for (int i = 0; i < cs[0]._len; i++) {
RowDataUtils.extractChunkRow(cs, _fr._names, types, i, rowData);
float[] contributions = _wrapper.predictContributions(rowData);
NewChunk.addNums(ncs, contributions);
}
}
}
EasyPredictModelWrapper makeWrapperWithContributions() {
final EasyPredictModelWrapper.Config config;
try {
config = new EasyPredictModelWrapper.Config()
.setModel(genModel())
.setConvertUnknownCategoricalLevelsToNa(true)
.setEnableContributions(true);
} catch (IOException e) {
throw new RuntimeException(e);
}
return new EasyPredictModelWrapper(config);
}
@Override
protected String toJavaModelClassName() {
return ModelBuilder.make(_output._original_model_identifier, null, null).getClass()
.getSimpleName() + "Model";
}
@Override
protected String toJavaAlgo() {
return _output._original_model_identifier;
}
@Override
protected String toJavaUUID() {
return genModel().getUUID();
}
@Override
protected PojoWriter makePojoWriter() {
GenModel genModel = genModel();
if (!havePojo()) {
throw new UnsupportedOperationException("Only MOJO models can be converted to POJO.");
}
MojoModel mojoModel = (MojoModel) genModel;
ModelBuilder<?, ?, ?> builder = ModelBuilder.make(mojoModel._algoName, null, null);
return builder.makePojoWriter(this, mojoModel);
}
@Override
public boolean havePojo() {
GenModel genModel = genModel();
return genModel instanceof MojoModel;
}
boolean hasBehavior(ModelBehavior b) {
ModelBehavior[] modelBehaviors = _genModelSource.getModelBehaviors();
if (modelBehaviors == null)
return false;
return ArrayUtils.find(modelBehaviors, b) >= 0;
}
enum ModelBehavior {
USE_MOJO_PREDICT
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/generic/GenericModelMojoWriter.java
|
package hex.generic;
import hex.ModelMojoWriter;
import hex.genmodel.utils.IOUtils;
import water.api.StreamWriteOption;
import water.fvec.ByteVec;
import water.util.Log;
import java.io.*;
public class GenericModelMojoWriter extends ModelMojoWriter<GenericModel, GenericModelParameters, GenericModelOutput> {
private ByteVec _mojoBytes;
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public GenericModelMojoWriter() {
}
public GenericModelMojoWriter(ByteVec _mojoBytes) {
this._mojoBytes = _mojoBytes;
}
@Override
public String mojoVersion() {
return "1.00";
}
@Override
protected void writeModelData() throws IOException {
// Do nothing on purpose
}
@Override
public void writeTo(final OutputStream os, StreamWriteOption... options) {
try (final InputStream inputStream = _mojoBytes.openStream(null); OutputStream outputStream = os) {
IOUtils.copyStream(inputStream, outputStream);
} catch (IOException e) {
Log.throwErr(e);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/generic/GenericModelOutput.java
|
package hex.generic;
import hex.*;
import hex.genmodel.attributes.*;
import hex.genmodel.attributes.metrics.*;
import hex.genmodel.descriptor.ModelDescriptor;
import hex.tree.isofor.ModelMetricsAnomaly;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.TwoDimTable;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
public class GenericModelOutput extends Model.Output {
public final String _original_model_identifier;
public final String _original_model_full_name;
public final ModelCategory _modelCategory;
public final int _nfeatures;
public final double _defaultThreshold;
public TwoDimTable _variable_importances;
public GenericModelOutput(final ModelDescriptor modelDescriptor) {
_isSupervised = modelDescriptor.isSupervised();
_domains = modelDescriptor.scoringDomains();
_origDomains = modelDescriptor.getOrigDomains();
_hasOffset = modelDescriptor.offsetColumn() != null;
_hasWeights = modelDescriptor.weightsColumn() != null;
_hasFold = modelDescriptor.foldColumn() != null;
_hasTreatment = modelDescriptor.treatmentColumn() != null;
_modelClassDist = modelDescriptor.modelClassDist();
_priorClassDist = modelDescriptor.priorClassDist();
_names = modelDescriptor.columnNames();
_origNames = modelDescriptor.getOrigNames();
_modelCategory = modelDescriptor.getModelCategory();
_nfeatures = modelDescriptor.nfeatures();
_defaultThreshold = modelDescriptor.defaultThreshold();
_original_model_identifier = modelDescriptor.algoName();
_original_model_full_name = modelDescriptor.algoFullName();
}
public GenericModelOutput(final ModelDescriptor modelDescriptor, final ModelAttributes modelAttributes,
final Table[] reproducibilityInformation) {
this(modelDescriptor);
if (modelAttributes != null) {
_model_summary = convertTable(modelAttributes.getModelSummary());
_cross_validation_metrics_summary = convertTable(modelAttributes.getCrossValidationMetricsSummary());
if (modelAttributes instanceof SharedTreeModelAttributes) {
_variable_importances = convertVariableImportances(((SharedTreeModelAttributes) modelAttributes).getVariableImportances());
} else if (modelAttributes instanceof DeepLearningModelAttributes) {
_variable_importances = convertVariableImportances(((DeepLearningModelAttributes) modelAttributes).getVariableImportances());
} else if (modelAttributes instanceof ModelAttributesGLM) {
_variable_importances = convertVariableImportances(((ModelAttributesGLM) modelAttributes).getVariableImportances());
} else {
_variable_importances = null;
}
convertMetrics(modelAttributes, modelDescriptor);
_scoring_history = convertTable(modelAttributes.getScoringHistory());
}
if (reproducibilityInformation != null) {
_reproducibility_information_table = convertTables(reproducibilityInformation);
}
}
private void convertMetrics(final ModelAttributes modelAttributes, final ModelDescriptor modelDescriptor) {
// Training metrics
if (modelAttributes.getTrainingMetrics() != null) {
_training_metrics = convertModelMetrics(modelAttributes.getTrainingMetrics(), modelDescriptor, modelAttributes);
}
if (modelAttributes.getValidationMetrics() != null) {
_validation_metrics = (ModelMetrics) convertObjects(modelAttributes.getValidationMetrics(),
convertModelMetrics(modelAttributes.getValidationMetrics(), modelDescriptor, modelAttributes));
}
if (modelAttributes.getCrossValidationMetrics() != null) {
_cross_validation_metrics = (ModelMetrics) convertObjects(modelAttributes.getCrossValidationMetrics(),
convertModelMetrics(modelAttributes.getCrossValidationMetrics(), modelDescriptor, modelAttributes));
}
}
private ModelMetrics convertModelMetrics(final MojoModelMetrics mojoMetrics, final ModelDescriptor modelDescriptor,
final ModelAttributes modelAttributes) {
final ModelCategory modelCategory = modelDescriptor.getModelCategory();
switch (modelCategory) {
case Binomial:
assert mojoMetrics instanceof MojoModelMetricsBinomial;
final MojoModelMetricsBinomial binomial = (MojoModelMetricsBinomial) mojoMetrics;
final AUC2 auc = AUC2.emptyAUC();
auc._auc = binomial._auc;
auc._pr_auc = binomial._pr_auc;
auc._gini = binomial._gini;
if (mojoMetrics instanceof MojoModelMetricsBinomialGLM) {
assert modelAttributes instanceof ModelAttributesGLM;
final ModelAttributesGLM modelAttributesGLM = (ModelAttributesGLM) modelAttributes;
final MojoModelMetricsBinomialGLM glmBinomial = (MojoModelMetricsBinomialGLM) binomial;
return new ModelMetricsBinomialGLMGeneric(null, null, mojoMetrics._nobs, mojoMetrics._MSE,
_domains[_domains.length - 1], glmBinomial._sigma,
auc, binomial._logloss, convertTable(binomial._gains_lift_table),
customMetric(mojoMetrics), binomial._mean_per_class_error,
convertTable(binomial._thresholds_and_metric_scores), convertTable(binomial._max_criteria_and_metric_scores),
convertTable(binomial._confusion_matrix), glmBinomial._nullDegreesOfFreedom, glmBinomial._residualDegreesOfFreedom,
glmBinomial._resDev, glmBinomial._nullDev, glmBinomial._AIC, convertTable(modelAttributesGLM._coefficients_table),
glmBinomial._r2, glmBinomial._description, glmBinomial._loglikelihood);
} else {
return new ModelMetricsBinomialGeneric(null, null, mojoMetrics._nobs, mojoMetrics._MSE,
_domains[_domains.length - 1], binomial._sigma,
auc, binomial._logloss, convertTable(binomial._gains_lift_table),
customMetric(mojoMetrics), binomial._mean_per_class_error,
convertTable(binomial._thresholds_and_metric_scores), convertTable(binomial._max_criteria_and_metric_scores),
convertTable(binomial._confusion_matrix), binomial._r2, binomial._description);
}
case Multinomial:
assert mojoMetrics instanceof MojoModelMetricsMultinomial;
if (mojoMetrics instanceof MojoModelMetricsMultinomialGLM) {
assert modelAttributes instanceof ModelAttributesGLM;
final ModelAttributesGLM modelAttributesGLM = (ModelAttributesGLM) modelAttributes;
modelAttributesGLM.getModelParameters();
final MojoModelMetricsMultinomialGLM glmMultinomial = (MojoModelMetricsMultinomialGLM) mojoMetrics;
return new ModelMetricsMultinomialGLMGeneric(null, null, mojoMetrics._nobs, mojoMetrics._MSE,
_domains[_domains.length - 1], glmMultinomial._sigma,
convertTable(glmMultinomial._confusion_matrix), convertTable(glmMultinomial._hit_ratios),
glmMultinomial._logloss, customMetric(mojoMetrics),
glmMultinomial._mean_per_class_error, glmMultinomial._nullDegreesOfFreedom, glmMultinomial._residualDegreesOfFreedom,
glmMultinomial._resDev, glmMultinomial._nullDev, glmMultinomial._AIC, convertTable(modelAttributesGLM._coefficients_table),
glmMultinomial._r2, convertTable(glmMultinomial._multinomial_auc), convertTable(glmMultinomial._multinomial_aucpr),
MultinomialAucType.valueOf((String)modelAttributes.getParameterValueByName("auc_type")), glmMultinomial._description, glmMultinomial._loglikelihood);
} else {
final MojoModelMetricsMultinomial multinomial = (MojoModelMetricsMultinomial) mojoMetrics;
return new ModelMetricsMultinomialGeneric(null, null, mojoMetrics._nobs, mojoMetrics._MSE,
_domains[_domains.length - 1], multinomial._sigma,
convertTable(multinomial._confusion_matrix), convertTable(multinomial._hit_ratios),
multinomial._logloss, customMetric(mojoMetrics),
multinomial._mean_per_class_error, multinomial._r2, convertTable(multinomial._multinomial_auc), convertTable(multinomial._multinomial_aucpr),
MultinomialAucType.valueOf((String)modelAttributes.getParameterValueByName("auc_type")), multinomial._description);
}
case Regression:
assert mojoMetrics instanceof MojoModelMetricsRegression;
if (mojoMetrics instanceof MojoModelMetricsRegressionGLM) {
assert modelAttributes instanceof ModelAttributesGLM;
final ModelAttributesGLM modelAttributesGLM = (ModelAttributesGLM) modelAttributes;
final MojoModelMetricsRegressionGLM regressionGLM = (MojoModelMetricsRegressionGLM) mojoMetrics;
return new ModelMetricsRegressionGLMGeneric(null, null, regressionGLM._nobs, regressionGLM._MSE,
regressionGLM._sigma, regressionGLM._mae, regressionGLM._root_mean_squared_log_error, regressionGLM._mean_residual_deviance,
customMetric(regressionGLM), regressionGLM._r2,
regressionGLM._nullDegreesOfFreedom, regressionGLM._residualDegreesOfFreedom, regressionGLM._resDev,
regressionGLM._nullDev, regressionGLM._AIC, regressionGLM._loglikelihood, convertTable(modelAttributesGLM._coefficients_table));
} else {
MojoModelMetricsRegression metricsRegression = (MojoModelMetricsRegression) mojoMetrics;
return new ModelMetricsRegressionGeneric(null, null, metricsRegression._nobs, metricsRegression._MSE,
metricsRegression._sigma, metricsRegression._mae, metricsRegression._root_mean_squared_log_error, metricsRegression._mean_residual_deviance,
customMetric(mojoMetrics), mojoMetrics._description);
}
case AnomalyDetection:
assert mojoMetrics instanceof MojoModelMetricsAnomaly;
// There is no need to introduce new Generic alternatives to the original metric objects at the moment.
// The total values can be simply calculated. The extra calculation time is negligible.
MojoModelMetricsAnomaly metricsAnomaly = (MojoModelMetricsAnomaly) mojoMetrics;
return new ModelMetricsAnomaly(null, null, customMetric(mojoMetrics),
mojoMetrics._nobs, metricsAnomaly._mean_score * metricsAnomaly._nobs, metricsAnomaly._mean_normalized_score * metricsAnomaly._nobs,
metricsAnomaly._description);
case Ordinal:
assert mojoMetrics instanceof MojoModelMetricsOrdinal;
if (mojoMetrics instanceof MojoModelMetricsOrdinalGLM) {
assert modelAttributes instanceof ModelAttributesGLM;
final ModelAttributesGLM modelAttributesGLM = (ModelAttributesGLM) modelAttributes;
MojoModelMetricsOrdinalGLM ordinalMetrics = (MojoModelMetricsOrdinalGLM) mojoMetrics;
return new ModelMetricsOrdinalGLMGeneric(null, null, ordinalMetrics._nobs, ordinalMetrics._MSE,
ordinalMetrics._domain, ordinalMetrics._sigma, convertTable(ordinalMetrics._cm), ordinalMetrics._hit_ratios,
ordinalMetrics._logloss, customMetric(ordinalMetrics),
ordinalMetrics._r2, ordinalMetrics._nullDegreesOfFreedom, ordinalMetrics._residualDegreesOfFreedom, ordinalMetrics._resDev,
ordinalMetrics._nullDev, ordinalMetrics._AIC, ordinalMetrics._loglikelihood, convertTable(modelAttributesGLM._coefficients_table),
convertTable(ordinalMetrics._hit_ratio_table), ordinalMetrics._mean_per_class_error, ordinalMetrics._description);
} else {
MojoModelMetricsOrdinal ordinalMetrics = (MojoModelMetricsOrdinal) mojoMetrics;
return new ModelMetricsOrdinalGeneric(null, null, ordinalMetrics._nobs, ordinalMetrics._MSE,
ordinalMetrics._domain, ordinalMetrics._sigma, convertTable(ordinalMetrics._cm), ordinalMetrics._hit_ratios,
ordinalMetrics._logloss, customMetric(ordinalMetrics),
convertTable(ordinalMetrics._hit_ratio_table), ordinalMetrics._mean_per_class_error, ordinalMetrics._description);
}
case CoxPH:
assert mojoMetrics instanceof MojoModelMetricsRegressionCoxPH;
MojoModelMetricsRegressionCoxPH metricsCoxPH = (MojoModelMetricsRegressionCoxPH) mojoMetrics;
return new ModelMetricsRegressionCoxPH(null, null, metricsCoxPH._nobs, metricsCoxPH._MSE,
metricsCoxPH._sigma, metricsCoxPH._mae, metricsCoxPH._root_mean_squared_log_error, metricsCoxPH._mean_residual_deviance,
customMetric(mojoMetrics),
metricsCoxPH._concordance, metricsCoxPH._concordant, metricsCoxPH._discordant, metricsCoxPH._tied_y);
case BinomialUplift:
assert mojoMetrics instanceof MojoModelMetricsBinomialUplift;
MojoModelMetricsBinomialUplift metricsUplift = (MojoModelMetricsBinomialUplift) mojoMetrics;
AUUC.AUUCType auucType = AUUC.AUUCType.valueOf((String) modelAttributes.getParameterValueByName("auuc_type"));
AUUC auuc = createAUUC(auucType, metricsUplift._thresholds_and_metric_scores, metricsUplift._auuc_table, metricsUplift._aecu_table);
return new ModelMetricsBinomialUpliftGeneric(null, null, metricsUplift._nobs, _domains[_domains.length - 1],
metricsUplift._ate, metricsUplift._att, metricsUplift._atc, metricsUplift._sigma, auuc, customMetric(metricsUplift),
convertTable(metricsUplift._thresholds_and_metric_scores), convertTable(metricsUplift._auuc_table),
convertTable(metricsUplift._aecu_table), metricsUplift._description);
case Unknown:
case Clustering:
case AutoEncoder:
case DimReduction:
case WordEmbedding:
default:
return new ModelMetrics(null, null, mojoMetrics._nobs, mojoMetrics._MSE, mojoMetrics._description,
customMetric(mojoMetrics));
}
}
private static CustomMetric customMetric(MojoModelMetrics mojoModelMetrics) {
if (mojoModelMetrics._custom_metric_name == null)
return null;
return new CustomMetric(mojoModelMetrics._custom_metric_name, mojoModelMetrics._custom_metric_value);
}
@Override
public double defaultThreshold() {
return _defaultThreshold;
}
@Override
public ModelCategory getModelCategory() {
return _modelCategory; // Might be calculated as well, but the information in MOJO is the one to display.
}
@Override
public int nfeatures() {
return _nfeatures;
}
private static Object convertObjects(final Object source, final Object target) {
final Class<?> targetClass = target.getClass();
final Field[] targetDeclaredFields = targetClass.getFields();
final Class<?> sourceClass = source.getClass();
final Field[] sourceDeclaredFields = sourceClass.getFields();
// Create a map for faster search afterwards
final Map<String, Field> sourceFieldMap = new HashMap(sourceDeclaredFields.length);
for (Field sourceField : sourceDeclaredFields) {
sourceFieldMap.put(sourceField.getName(), sourceField);
}
for (int i = 0; i < targetDeclaredFields.length; i++) {
final Field targetField = targetDeclaredFields[i];
final String targetFieldName = targetField.getName();
final Field sourceField = sourceFieldMap.get(targetFieldName);
if(sourceField == null) {
Log.debug(String.format("Field '%s' not found in the source object. Ignoring.", targetFieldName));
continue;
}
final boolean targetAccessible = targetField.isAccessible();
final boolean sourceAccessible = sourceField.isAccessible();
try{
targetField.setAccessible(true);
sourceField.setAccessible(true);
if(targetField.getType().isAssignableFrom(sourceField.getType())){
targetField.set(target, sourceField.get(source));
}
} catch (IllegalAccessException e) {
Log.err(e);
continue;
} finally {
targetField.setAccessible(targetAccessible);
sourceField.setAccessible(sourceAccessible);
}
}
return target;
}
private static TwoDimTable convertVariableImportances(final VariableImportances variableImportances) {
if(variableImportances == null) return null;
TwoDimTable varImps = ModelMetrics.calcVarImp(variableImportances._importances, variableImportances._variables);
return varImps;
}
private static TwoDimTable[] convertTables(final Table[] inputTables) {
if (inputTables == null)
return null;
TwoDimTable[] tables = new TwoDimTable[inputTables.length];
for (int i = 0; i < inputTables.length; i++) {
tables[i] = convertTable(inputTables[i]);
}
return tables;
}
private static TwoDimTable convertTable(final Table convertedTable){
if(convertedTable == null) return null;
final TwoDimTable table = new TwoDimTable(convertedTable.getTableHeader(), convertedTable.getTableDescription(),
convertedTable.getRowHeaders(), convertedTable.getColHeaders(), convertedTable.getColTypesString(),
convertedTable.getColumnFormats(), convertedTable.getColHeaderForRowHeaders());
for (int i = 0; i < convertedTable.columns(); i++) {
for (int j = 0; j < convertedTable.rows(); j++) {
table.set(j, i, convertedTable.getCell(i,j));
}
}
return table;
}
private static AUUC createAUUC(AUUC.AUUCType auucType, Table thresholds_and_metric_scores, Table auuc_table, Table aecu_table){
int nbins = thresholds_and_metric_scores.rows();
double[] ths = new double[nbins];
long[] freq = new long[nbins];
AUUC.AUUCType[] auucTypes = AUUC.AUUCType.values();
double[][] uplift = new double[auucTypes.length][nbins];
double[][] upliftNorm = new double[auucTypes.length][nbins];
double[][] upliftRand = new double[auucTypes.length][nbins];
double[] auuc = new double[auucTypes.length];
double[] auucNorm = new double[auucTypes.length];
double[] auucRand = new double[auucTypes.length];
double[] aecu = new double[auucTypes.length];
String[] thrHeader = thresholds_and_metric_scores.getColHeaders();
// threshold column index
int thrIndex = ArrayUtils.find(thrHeader, "thresholds");
int freqIndex = ArrayUtils.find(thrHeader, "n");
// uplift type indices
int[] upliftIndices = new int[auucTypes.length];
int[] upliftNormIndices = new int[auucTypes.length];
int[] upliftRandIndices = new int[auucTypes.length];
for (int i = 1; i < auucTypes.length; i++) {
String auucTypeName = auucTypes[i].name();
upliftIndices[i] = ArrayUtils.find(thrHeader, auucTypeName);
upliftNormIndices[i] = ArrayUtils.find(thrHeader, auucTypeName+"_normalized");
upliftRandIndices[i] = ArrayUtils.find(thrHeader, auucTypeName+"_random");
// AUTO setting
if(auucTypeName.equals(AUUC.AUUCType.nameAuto())){
upliftIndices[0] = upliftIndices[i];
upliftNormIndices[0] = upliftNormIndices[i];
upliftRandIndices[0] = upliftRandIndices[i];
}
}
// fill thresholds and uplift values from table
for (int i = 0; i < thresholds_and_metric_scores.rows(); i++) {
ths[i] = (double) thresholds_and_metric_scores.getCell(thrIndex, i);
freq[i] = (long) thresholds_and_metric_scores.getCell(freqIndex, i);
for (int j = 0; j < auucTypes.length; j++) {
uplift[j][i] = (double) thresholds_and_metric_scores.getCell(upliftIndices[j], i);
upliftNorm[j][i] = (double) thresholds_and_metric_scores.getCell(upliftNormIndices[j], i);
upliftRand[j][i] = (double) thresholds_and_metric_scores.getCell(upliftRandIndices[j], i);
}
}
// fill auuc values and aecu values
String[] auucHeader = auuc_table.getColHeaders();
String[] aecuHeader = aecu_table.getColHeaders();
for (int i = 1; i < auucTypes.length; i++) {
AUUC.AUUCType type = auucTypes[i];
String auucTypeName = type.name();
int colIndex = ArrayUtils.find(auucHeader, auucTypeName);
auuc[i] = (double) auuc_table.getCell(colIndex, 0);
auucNorm[i] = (double) auuc_table.getCell(colIndex, 1);
auucRand[i] = (double) auuc_table.getCell(colIndex, 2);
colIndex = ArrayUtils.find(aecuHeader, auucTypeName);
aecu[i] = (double) aecu_table.getCell(colIndex, 0);
if(auucTypeName.equals(AUUC.AUUCType.nameAuto())){
auuc[0] = auuc[i];
auucNorm[0] = auucNorm[i];
auucRand[0] = auucRand[i];
aecu[0] = aecu[i];
}
}
return new AUUC(ths, freq, auuc, auucNorm, auucRand, aecu, auucType, uplift, upliftNorm, upliftRand);
}
@Override
public boolean hasTreatment() {
return super.hasTreatment();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/generic/GenericModelParameters.java
|
package hex.generic;
import hex.Model;
import hex.genmodel.attributes.parameters.ColumnSpecifier;
import hex.genmodel.attributes.parameters.KeyValue;
import hex.genmodel.attributes.parameters.ModelParameter;
import hex.genmodel.attributes.parameters.ParameterKey;
import water.Iced;
import water.IcedWrapper;
import water.Key;
import water.api.schemas3.FrameV3;
import water.api.schemas3.KeyValueV3;
import water.api.schemas3.ModelParameterSchemaV3;
import water.fvec.Frame;
import water.util.Log;
public class GenericModelParameters extends Model.Parameters {
/**
* Path of the file with embedded model
*/
public String _path;
/**
* Key to the file with embedded model
*/
public Key<Frame> _model_key;
/**
* Skip the check for white-listed algorithms, this allows load any MOJO.
* Use at your own risk - unsupported.
*/
public boolean _disable_algo_check;
/**
* Generic model parameters - might contain any parameters based on the state of the model in the training phase.
*/
public ModelParameterSchemaV3[] _modelParameters;
protected static ModelParameterSchemaV3[] convertParameters(final ModelParameter[] originalParams) {
final ModelParameterSchemaV3[] convertedParams = new ModelParameterSchemaV3[originalParams.length];
for (int i = 0; i < originalParams.length; i++) {
final ModelParameter originalParam = originalParams[i];
final ModelParameterSchemaV3 convertedParam = new ModelParameterSchemaV3();
// Hand-built mapping for better performance compared to reflection
convertedParam.name = originalParam.name;
convertedParam.label = originalParam.label;
convertedParam.is_mutually_exclusive_with = originalParam.is_mutually_exclusive_with;
convertedParam.is_member_of_frames = originalParam.is_member_of_frames;
convertedParam.values = originalParam.values;
convertedParam.help = originalParam.help;
convertedParam.level = originalParam.level;
convertedParam.gridable = originalParam.gridable;
convertedParam.required = originalParam.required;
convertedParam.type = originalParam.type;
convertedParam.actual_value = convertObjectToIced(originalParam.actual_value);
convertedParam.default_value = convertObjectToIced(originalParam.default_value);
convertedParam.input_value = convertObjectToIced(originalParam.input_value);
convertedParams[i] = convertedParam;
}
return convertedParams;
}
private static Iced convertObjectToIced(final Object original) {
final Iced converted;
if (original == null) {
converted = null;
} else if (original instanceof ParameterKey) {
final ParameterKey parameterKey = (ParameterKey) original;
converted = Key.makeUserHidden(parameterKey.getName());
} else if (original instanceof ColumnSpecifier) {
final ColumnSpecifier columnSpecifier = (ColumnSpecifier) original;
converted = new FrameV3.ColSpecifierV3(columnSpecifier.getColumnName(), columnSpecifier.getIsMemberOfFrames());
} else if (original instanceof KeyValue) {
final KeyValue keyValue = (KeyValue) original;
converted = new hex.KeyValue(keyValue.key, keyValue.value);
} else if ((original instanceof Object[]) && !(original instanceof String[])) {
Object[] originalArr = (Object[]) original;
Iced[] convertedArr = new Iced[originalArr.length];
for (int i = 0; i < originalArr.length; i++) {
convertedArr[i] = convertObjectToIced(originalArr.length);
}
converted = new IcedWrapper(convertedArr);
} else {
converted = new IcedWrapper(original);
}
return converted;
}
@Override
public String algoName() {
return "Generic";
}
@Override
public String fullName() {
return "Import MOJO Model";
}
@Override
public String javaName() {
return GenericModel.class.getName();
}
@Override
public long progressUnits() {
return 100;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/generic/PojoLoader.java
|
package hex.generic;
import hex.genmodel.GenModel;
import org.apache.commons.io.IOUtils;
import water.Key;
import water.fvec.ByteVec;
import water.fvec.Frame;
import water.util.JCodeGen;
import water.util.Log;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Method;
import java.net.URI;
import java.nio.charset.Charset;
import java.util.UUID;
class PojoLoader {
private static final String POJO_EXT = ".java";
static GenModel loadPojoFromSourceCode(ByteVec sourceVec, Key<Frame> pojoKey, String modelId) throws IOException {
final String pojoCode;
try (InputStream is = sourceVec.openStream()) {
pojoCode = IOUtils.toString(is, Charset.defaultCharset());
}
String className = null;
try {
className = inferClassName(pojoKey);
} catch (Exception e) {
Log.warn("Exception while trying to automatically infer POJO class name", e);
}
if (className == null) {
Log.warn("Unable automatically infer POJO class name, model_id = `" + modelId + "` will be used instead. " +
"If you encounter further errors make sure you set model_id to the correct class name in import/upload call.");
className = modelId;
}
try {
return compileAndInstantiate(className, pojoCode);
} catch (Exception e) {
boolean canCompile = JCodeGen.canCompile();
boolean selfCheck = compilationSelfCheck();
throw new IllegalArgumentException(String.format(
"POJO compilation failed: " +
"Please make sure key '%s' contains a valid POJO source code for class '%s' and you are running a Java JDK " +
"(compiler present: '%s', self-check passed: '%s').",
pojoKey, className, canCompile, selfCheck), e);
}
}
static String inferClassName(Key<Frame> pojoKey) {
String path = URI.create(pojoKey.toString()).getPath();
String fileName = new File(path).getName();
if (fileName.endsWith(POJO_EXT)) {
return fileName.substring(0, fileName.length() - POJO_EXT.length());
}
return null;
}
@SuppressWarnings("unchecked")
static <T> T compileAndInstantiate(String className, String src) throws Exception {
Class<?> clz = JCodeGen.compile(className, src, false);
return (T) clz.newInstance();
}
static boolean compilationSelfCheck() {
final String cls = "SelfCheck_" + UUID.randomUUID().toString().replaceAll("-","_");
final String src = "public class " + cls + " { public double score0() { return Math.E; } }";
try {
Object o = compileAndInstantiate(cls, src);
Method m = o.getClass().getMethod("score0");
Object result = m.invoke(o);
return result instanceof Double && (Double) result == Math.E;
} catch (Exception e) {
Log.err("Compilation self-check failed", e);
return false;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/CoefIndices.java
|
package hex.glm;
public interface CoefIndices {
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/ComputationState.java
|
package hex.glm;
import hex.DataInfo;
import hex.glm.GLM.BetaConstraint;
import hex.glm.GLM.GLMGradientInfo;
import hex.glm.GLM.GLMGradientSolver;
import hex.glm.GLMModel.GLMParameters;
import hex.glm.GLMModel.GLMParameters.Family;
import hex.gram.Gram;
import hex.optimization.ADMM;
import hex.optimization.OptimizationUtils.GradientInfo;
import hex.optimization.OptimizationUtils.GradientSolver;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import water.H2O;
import water.H2ORuntime;
import water.Job;
import water.MemoryManager;
import water.util.ArrayUtils;
import water.util.IcedHashMap;
import water.util.Log;
import water.util.MathUtils;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static hex.glm.ComputationState.GramGrad.findZeroCols;
import static hex.glm.ConstrainedGLMUtils.*;
import static hex.glm.GLMModel.GLMParameters.Family.gaussian;
import static hex.glm.GLMUtils.calSmoothNess;
import static hex.glm.GLMUtils.copyGInfo;
import static water.util.ArrayUtils.*;
public final class ComputationState {
private static final double R2_EPS = 1e-7;
public static final double EPS_CS = 1e-6;
public static final double EPS_CS_SQUARE = EPS_CS*EPS_CS;
private static final int MIN_PAR = 1000;
final boolean _intercept;
final int _nbetas;
public final GLMParameters _parms;
private BetaConstraint _bc;
double _alpha;
double[] _ymu;
double [] _u;
private double [] _zValues;
private boolean _dispersionEstimated;
boolean _allIn;
int _iter;
private double _lambda = 0;
private double _lambdaMax = Double.NaN;
private GLMGradientInfo _ginfo; // gradient info excluding l1 penalty
private double _likelihood;
private double _gradientErr;
private boolean _lambdaNull; // true if lambda was not provided by user
private double _gMax; // store max value of original gradient without dividing by math.max(1e-2, _parms._alpha[0])
private DataInfo _activeData;
private BetaConstraint _activeBC;
LinearConstraints[] _equalityConstraintsLinear = null;
LinearConstraints[] _lessThanEqualToConstraintsLinear = null;
LinearConstraints[] _equalityConstraintsBeta = null;
LinearConstraints[] _lessThanEqualToConstraintsBeta = null;
LinearConstraints[] _equalityConstraints = null;
LinearConstraints[] _lessThanEqualToConstraints = null;
double[] _lambdaEqual;
double[] _lambdaLessThanEqualTo;
ConstraintsDerivatives[] _derivativeEqual = null;
ConstraintsDerivatives[] _derivativeLess = null;
ConstraintsGram[] _gramEqual = null;
ConstraintsGram[] _gramLess = null;
private final GLM.BetaInfo _modelBetaInfo;
private double[] _beta; // vector of coefficients corresponding to active data
final DataInfo _dinfo;
private GLMGradientSolver _gslvr;
private final Job _job;
private int _activeClass = -1;
double[][][] _penaltyMatrix;
int[][] _gamBetaIndices;
int _totalBetaLength; // actual coefficient length without taking into account active columns only
int _betaLengthPerClass;
public boolean _noReg;
public ConstrainedGLMUtils.ConstraintGLMStates _csGLMState;
public ComputationState(Job job, GLMParameters parms, DataInfo dinfo, BetaConstraint bc, GLM.BetaInfo bi){
_job = job;
_parms = parms;
_bc = bc;
_activeBC = _bc;
_dinfo = dinfo;
_activeData = _dinfo;
_intercept = _parms._intercept;
_alpha = _parms._alpha[0];
_nbetas = bi._nBetas;
_betaLengthPerClass = dinfo.fullN()+1;
_totalBetaLength = _betaLengthPerClass * _nbetas;
_modelBetaInfo = bi;
}
/**
* This method calculates
* 1. the contribution of constraints to the gradient;
* 2. the contribution of ||h(beta)||^2 to the gradient and the hessian.
*
* Note that this calculation is only needed once since the contributions to the derivative and hessian depends only
* on the value of linear constraint coefficients and not the actual glm model parameters. Refer to the doc,
* section VI.
*/
public void initConstraintDerivatives(LinearConstraints[] equalityConstraints, LinearConstraints[] lessThanEqualToConstraints,
List<String> coeffNames) {
boolean hasEqualityConstraints = equalityConstraints != null;
boolean hasLessConstraints = lessThanEqualToConstraints != null;
_derivativeEqual = hasEqualityConstraints ? calDerivatives(equalityConstraints, coeffNames) : null;
_derivativeLess = hasLessConstraints ? calDerivatives(lessThanEqualToConstraints, coeffNames) : null;
// contribution to gradient and hessian from ||h(beta)||^2 without C, stays constant once calculated, active status can change
_gramEqual = hasEqualityConstraints ? calGram(_derivativeEqual) : null;
_gramLess = hasLessConstraints ? calGram(_derivativeLess) : null;
}
/***
* Any time when the glm coefficient changes, the constraints values will change and active constraints can be inactive
* and vice versa. In addition, the active status of the derivatives and 2nd derivatives can change as well. The
* derivative and 2nd derivatives are part of the ComputationState. It is the purpose of this method to change the
* active status of the constraint derivatives (transpose(lambda)*h(beta)) and the 2nd order derivatives of
* (ck/2*transpose(h(beta))*h(beta)).
*/
public void updateConstraintInfo(LinearConstraints[] equalityConstraints, LinearConstraints[] lessThanEqualToConstraints) {
updateDerivativeActive(_derivativeEqual, _gramEqual, equalityConstraints);
updateDerivativeActive(_derivativeLess, _gramLess, lessThanEqualToConstraints);
}
public void updateDerivativeActive(ConstraintsDerivatives[] derivativesConst, ConstraintsGram[] gramConst,
LinearConstraints[] constraints) {
if (constraints != null) {
IntStream.range(0, derivativesConst.length).forEach(index -> {
derivativesConst[index]._active = constraints[index]._active;
gramConst[index]._active = constraints[index]._active;
});
}
}
public void resizeConstraintInfo(LinearConstraints[] equalityConstraints,
LinearConstraints[] lessThanEqualToConstraints) {
boolean hasEqualityConstraints = _derivativeEqual != null;
boolean hasLessConstraints = _derivativeLess != null;
List<String> coeffNames = Arrays.stream(_activeData.coefNames()).collect(Collectors.toList());
_derivativeEqual = hasEqualityConstraints ? calDerivatives(equalityConstraints, coeffNames) : null;
_derivativeLess = hasLessConstraints ? calDerivatives(lessThanEqualToConstraints, coeffNames) : null;
_gramEqual = hasEqualityConstraints ? calGram(_derivativeEqual) : null;
_gramLess = hasLessConstraints ? calGram(_derivativeLess) : null;
}
public ComputationState(Job job, GLMParameters parms, DataInfo dinfo, BetaConstraint bc, GLM.BetaInfo bi,
double[][][] penaltyMat, int[][] gamColInd){
this (job, parms, dinfo, bc, bi);
_penaltyMatrix = penaltyMat;
_gamBetaIndices = gamColInd;
_lambdaNull = (_parms._lambda==null) && !(_parms._lambda_search);
}
// copy over parameters from _model to _state for checkpointing
// jest of this method is to restore the _state to be the same as before
void copyCheckModel2State(GLMModel model, int[][] _gamColIndices) {
GLMModel.GLMOutput modelOutput = model._output;
int submodelInd;
int coefLen = _nbetas > 2 ? (_dinfo.fullN() + 1) * _nbetas : (_dinfo.fullN() + 1);
if (modelOutput._submodels.length > 1) // lambda search or multiple alpha/lambda cases
submodelInd = modelOutput._submodels.length - 1; // submodel where the model building ends
else // no lambda search or multiple alpha/lambda case
submodelInd = 0;
setIter(modelOutput._submodels[submodelInd].iteration);
setAlpha(modelOutput._submodels[submodelInd].alpha_value);
if (submodelInd > 0) {
int preCurrSubmodelInd = gaussian.equals(_parms._family) ? submodelInd : (submodelInd - 1);
_activeData._activeCols = modelOutput._submodels[preCurrSubmodelInd].idxs;
double[] betaExpand = Family.multinomial.equals(_parms._family)
? ArrayUtils.expandAndScatter(modelOutput._submodels[preCurrSubmodelInd].beta, coefLen, _activeData._activeCols)
: expandBeta(modelOutput._submodels[preCurrSubmodelInd].beta);
GLMGradientInfo ginfo = new GLMGradientSolver(_job, _parms, _dinfo, 0, activeBC(), _modelBetaInfo, _penaltyMatrix,
_gamColIndices).getGradient(betaExpand); // gradient obtained with zero penalty
_activeData._activeCols = null;
updateState(betaExpand, ginfo);
setLambdaSimple(_parms._lambda[preCurrSubmodelInd]);
}
// this part must be done for single model before setting coefficients
if (!gaussian.equals(_parms._family)) // will build for new lambda for gaussian
setLambda(modelOutput._submodels[submodelInd].lambda_value);
// update _state with last submodelInd coefficients
double[] expandedBeta = modelOutput._submodels[submodelInd].idxs == null
? modelOutput._submodels[submodelInd].beta
: ArrayUtils.expandAndScatter(modelOutput._submodels[submodelInd].beta, coefLen,
modelOutput._submodels[submodelInd].idxs);
GLMGradientInfo ginfo = new GLMGradientSolver(_job, _parms, _dinfo, 0, activeBC(), _modelBetaInfo,
_penaltyMatrix, _gamColIndices).getGradient(expandedBeta); // gradient obtained with zero penalty
updateState(expandedBeta, ginfo);
// make sure model._betaCndCheckpoint is of the right size
if (model._betaCndCheckpoint != null) {
if (_activeData._activeCols == null || (_activeData._activeCols.length != model._betaCndCheckpoint.length)) {
double[] betaCndCheckpoint = ArrayUtils.expandAndScatter(model._betaCndCheckpoint, coefLen,
modelOutput._submodels[submodelInd].idxs); // expand betaCndCheckpoint out
if (_activeData._activeCols != null) // contract the betaCndCheckpoint to the right activeCol length
betaCndCheckpoint = extractSubRange(betaCndCheckpoint.length, 0, activeData()._activeCols, betaCndCheckpoint);
model._betaCndCheckpoint = betaCndCheckpoint;
}
}
}
public void setZValues(double[] zValues, boolean dispersionEstimated) {
_zValues = zValues;
_dispersionEstimated = dispersionEstimated;
}
public boolean getLambdaNull() { return _lambdaNull; }
public GLMGradientSolver gslvr(){return _gslvr;}
public double lambda(){return _lambda;}
public double alpha() {return _alpha;}
public double[] zValues() {return _zValues;}
public boolean dispersionEstimated() {return _dispersionEstimated;}
public void setLambdaMax(double lmax) {
_lambdaMax = lmax;
}
public void setgMax(double gmax) {
_gMax = gmax;
}
public void setAlpha(double alpha) {
_alpha=alpha;
setLambdaMax(_gMax/Math.max(1e-2,alpha)); // need to set _lmax every time alpha value changes
}
public void setLambda(double lambda) {
adjustToNewLambda(0, _lambda);
// strong rules are to be applied on the gradient with no l2 penalty
// NOTE: we start with lambdaOld being 0, not lambda_max
// non-recursive strong rules should use lambdaMax instead of _lambda
// However, it seems tobe working nicely to use 0 instead and be more aggressive on the predictor pruning
// (shoudl be safe as we check the KKTs anyways)
applyStrongRules(lambda, _lambda);
_lambda = lambda;
if (_penaltyMatrix == null)
_gslvr = new GLMGradientSolver(_job, _parms, _activeData, l2pen(), _activeBC, _modelBetaInfo);
else
_gslvr = new GLMGradientSolver(_job, _parms, _activeData, l2pen(), _activeBC, _modelBetaInfo, _penaltyMatrix, _gamBetaIndices);
adjustToNewLambda(lambda, 0);
}
public double [] beta(){
if(_activeClass != -1)
return betaMultinomial(_activeClass,_beta);
return _beta;
}
public GLMGradientInfo ginfo(){return _ginfo == null?(_ginfo = gslvr().getGradient(beta())):_ginfo;}
public BetaConstraint activeBC(){return _activeBC;}
public double likelihood() {return _likelihood;}
public boolean ginfoNull() {return _ginfo==null;}
public DataInfo activeData(){
if(_activeClass != -1)
return activeDataMultinomial(_activeClass);
return _activeData;
}
public DataInfo activeDataMultinomial(){return _activeData;}
public void dropActiveData(){_activeData = null;}
public String toString() {
return "iter=" + _iter + " lmb=" + GLM.lambdaFormatter.format(_lambda) + " alpha=" +
GLM.lambdaFormatter.format(_alpha)+ " obj=" + MathUtils.roundToNDigits(objective(),4) + " imp=" +
GLM.lambdaFormatter.format(_relImprovement) + " bdf=" + GLM.lambdaFormatter.format(_betaDiff);
}
private void adjustToNewLambda(double lambdaNew, double lambdaOld) {
double ldiff = lambdaNew - lambdaOld;
if(ldiff == 0 || l2pen() == 0) return;
double l2pen = .5*ArrayUtils.l2norm2(_beta,true);
if (_parms._family==Family.ordinal)
l2pen = l2pen/ _nbetas; // need only one set of parameters
if(l2pen > 0) {
if (_ginfo == null) _ginfo = ginfo();
if(_parms._family == Family.multinomial || _parms._family == Family.ordinal) {
l2pen = 0;
int off = 0;
for(int c = 0; c < _nbetas; ++c) {
DataInfo activeData = activeDataMultinomial(c);
for (int i = 0; i < activeData.fullN(); ++i) {
double b = _beta[off + i];
_ginfo._gradient[off + i] += ldiff * b;
l2pen += b*b;
}
if (_parms._family == Family.ordinal) // one beta for all classes
break;
off += activeData.fullN()+1;
}
l2pen *= .5;
} else for(int i = 0; i < _activeData.fullN(); ++i)
_ginfo._gradient[i] += ldiff*_beta[i];
}
_ginfo = new GLMGradientInfo(_ginfo._likelihood, _ginfo._objVal + ldiff * l2pen, _ginfo._gradient);
}
public double l1pen() {return _alpha*_lambda;}
public double l2pen() {return (1-_alpha)*_lambda;}
/**
* Apply strong rules to filter out expected inactive (with zero coefficient) predictors.
*
* @return indices of expected active predictors.
*/
protected void applyStrongRules(double lambdaNew, double lambdaOld) {
lambdaNew = Math.min(_lambdaMax,lambdaNew);
lambdaOld = Math.min(_lambdaMax,lambdaOld);
if (_parms._family == Family.multinomial || _parms._family == Family.ordinal/* && _parms._solver != GLMParameters.Solver.L_BFGS */) {
applyStrongRulesMultinomial(lambdaNew, lambdaOld);
return;
}
int P = _dinfo.fullN();
_activeBC = _bc;
_activeData = _activeData != null?_activeData:_dinfo;
// keep all predictors for the case of beta constraints or linear constraints
_allIn = _allIn || _alpha*lambdaNew == 0 || _activeBC.hasBounds() || _parms._linear_constraints != null;
if (!_allIn) {
int newlySelected = 0;
final double rhs = Math.max(0,_alpha * (2 * lambdaNew - lambdaOld));
int [] newCols = MemoryManager.malloc4(P);
int j = 0;
int[] oldActiveCols = _activeData._activeCols == null ? new int[]{P} : _activeData.activeCols();
for (int i = 0; i < P; ++i) {
if(j < oldActiveCols.length && oldActiveCols[j] == i)
j++;
else if (_ginfo._gradient[i] > rhs || -_ginfo._gradient[i] > rhs)
newCols[newlySelected++] = i; // choose active columns here
}
if(_parms._max_active_predictors != -1 && (oldActiveCols.length + newlySelected -1) > _parms._max_active_predictors){
Integer [] bigInts = ArrayUtils.toIntegers(newCols, 0, newlySelected);
Arrays.sort(bigInts, new Comparator<Integer>() {
@Override
public int compare(Integer o1, Integer o2) {
return (int)Math.signum(_ginfo._gradient[o2.intValue()]*_ginfo._gradient[o2.intValue()] - _ginfo._gradient[o1.intValue()]*_ginfo._gradient[o1.intValue()]);
}
});
newCols = ArrayUtils.toInt(bigInts,0,_parms._max_active_predictors - oldActiveCols.length + 1);
Arrays.sort(newCols);
} else newCols = Arrays.copyOf(newCols,newlySelected);
newCols = ArrayUtils.sortedMerge(oldActiveCols,newCols);
// merge already active columns in
int active = newCols.length;
_allIn = active == P;
if(!_allIn) {
int [] cols = newCols;
assert cols[active-1] == P; // intercept is always selected, even if it is false (it's gonna be dropped later, it is needed for other stuff too)
_beta = ArrayUtils.select(_beta, cols);
if(_u != null) _u = ArrayUtils.select(_u,cols);
_activeData = _dinfo.filterExpandedColumns(cols);
assert _activeData.activeCols().length == _beta.length;
assert _u == null || _activeData.activeCols().length == _u.length;
_ginfo = new GLMGradientInfo(_ginfo._likelihood, _ginfo._objVal, ArrayUtils.select(_ginfo._gradient, cols));
_activeBC = _bc.filterExpandedColumns(_activeData.activeCols());
_gslvr = _penaltyMatrix == null ? new GLMGradientSolver(_job,_parms,_activeData,(1-_alpha)*_lambda,_bc,_modelBetaInfo)
: new GLMGradientSolver(_job, _parms, _dinfo, (1 - _alpha) * _lambda, _bc, _modelBetaInfo, _penaltyMatrix,
_gamBetaIndices);
assert _beta.length == cols.length;
return;
}
}
_activeData = _dinfo;
}
public boolean _lsNeeded = false;
public DataInfo [] _activeDataMultinomial;
public DataInfo activeDataMultinomial(int c) {return _activeDataMultinomial != null?_activeDataMultinomial[c]:_dinfo;}
/**
* This method will return a double array that is extracted from src (which includes active and non-active columns)
* to only include active columns stated in ids.
*
* @param N
* @param c
* @param ids
* @param src
* @return
*/
public static double [] extractSubRange(int N, int c, int [] ids, double [] src) {
if(ids == null) return Arrays.copyOfRange(src,c*N,c*N+N);
double [] res = MemoryManager.malloc8d(ids.length);
int j = 0;
int off = c*N;
for(int i:ids)
res[j++] = src[off+i];
return res;
}
/**
* This method will extract coefficients from multinomial. The extracted coefficients are only from one class
* and it contains the active and non-active columns.
*
* @param N
* @param c
* @param ids
* @param src
* @param dst
*/
static void fillSubRange(int N, int c, int [] ids, double [] src, double [] dst) {
if(ids == null) {
System.arraycopy(src,0,dst,c*N,N);
} else {
int j = 0;
int off = c * N;
for (int i : ids)
dst[off + i] = src[j++];
}
}
public double [] betaMultinomial(){return _beta;}
public double [] betaMultinomial(int c, double [] beta) {
return extractSubRange(_activeData.fullN()+1,c,_activeDataMultinomial[c].activeCols(),beta);
}
public double [] betaMultinomialFull(int c, double [] beta) {
if (_parms._remove_collinear_columns)
return extractSubRange(_betaLengthPerClass,c,_activeDataMultinomial[c].activeCols(),beta);
else
return extractSubRange(_activeData.fullN()+1,c,_activeDataMultinomial[c].activeCols(),beta);
}
public double[] shrinkFullArray(double[] fullArray) {
if (_activeData.activeCols() == null)
return fullArray;
int[] activeColsAllClass = genActiveColsAllClass(_activeData.activeCols().length* _nbetas,
_betaLengthPerClass, _activeData.activeCols(), _nbetas);
return ArrayUtils.select(fullArray, activeColsAllClass);
}
public static double[] expandToFullArray(double[] shortenArr, int[] activeCols, int _totalBetaLength, int nclasses,
int betaLengthPerClass) {
if (activeCols == null)
return shortenArr;
int[] activeColsAllClass = genActiveColsAllClass(activeCols.length*nclasses,
betaLengthPerClass, activeCols, nclasses);
double[] fullArray = new double[_totalBetaLength];
fillSubRange(_totalBetaLength, 0, activeColsAllClass, shortenArr, fullArray);
return fullArray;
}
public static int[] genActiveColsAllClass(int activeColsLen, int numBetaPerClass, int[] activeColsOrig, int nclasses) {
int[] activeCols = new int[activeColsLen];
int offset = 0;
int[] activeColsOneClass = activeColsOrig;
for (int classIndex=0; classIndex < nclasses; classIndex++) {
int finalOffset = numBetaPerClass*classIndex;
int[] activeCols1Class = IntStream.of(activeColsOneClass).map(i->i+finalOffset).toArray();
int num2Copy = activeColsOneClass.length;
System.arraycopy(activeCols1Class, 0, activeCols, offset, num2Copy);
offset += num2Copy;
}
return activeCols;
}
public int[] genActiveColsIndClass(int activeColsLen, int numBetaPerClass, int[] activeColsOrig, int activeClass,
int nclasses) {
int[] activeCols = new int[activeColsLen];// total length
int offset = 0;
int[] activeColsOneClass = activeColsOrig;
for (int classIndex = 0; classIndex < activeClass; classIndex++) {
int finalOffset = numBetaPerClass*classIndex;
int num2Copy = activeColsOneClass.length;
int[] activeCols1Class = IntStream.of(activeColsOneClass).map(i->i+finalOffset).toArray();
System.arraycopy(activeCols1Class, 0, activeCols, offset, num2Copy);
offset += num2Copy;
}
for (int classInd = activeClass; classInd < nclasses; classInd++) {
int finalOffset = numBetaPerClass*classInd;
int[] activeCols1Class = IntStream.range(0, numBetaPerClass).map(i->i+finalOffset).toArray();
System.arraycopy(activeCols1Class, 0, activeCols, offset, numBetaPerClass);
offset += numBetaPerClass;
}
return activeCols;
}
public GLMSubsetGinfo ginfoMultinomial(int c) {
return new GLMSubsetGinfo(_ginfo,(_activeData.fullN()+1),c,_activeDataMultinomial[c].activeCols());
}
public GLMSubsetGinfo ginfoMultinomialRCC(int c) {
if (_activeData.fullN() + 1 == _activeData.activeCols().length)
return new GLMSubsetGinfo(_ginfo, (_activeData.fullN() + 1), c, IntStream.range(0,
_activeData.activeCols().length).toArray());
else
return new GLMSubsetGinfo(_ginfo, (_activeData.fullN() + 1), c, _activeData.activeCols());
}
public void setBC(BetaConstraint bc) {
_bc = bc;
_activeBC = _bc;
}
public void setLinearConstraints(LinearConstraints[] equalityC, LinearConstraints[] lessThanEqualToC, boolean forBeta) {
if (forBeta) {
_equalityConstraintsBeta = equalityC.length == 0 ? null : equalityC;
_lessThanEqualToConstraintsBeta = lessThanEqualToC.length == 0 ? null : lessThanEqualToC;
} else {
_equalityConstraintsLinear = equalityC.length == 0 ? null : equalityC;
_lessThanEqualToConstraintsLinear = lessThanEqualToC.length == 0 ? null : lessThanEqualToC;
}
}
public void setActiveClass(int activeClass) {_activeClass = activeClass;}
public double deviance() {
switch (_parms._family) {
case gaussian:
case binomial:
case quasibinomial:
case ordinal:
case multinomial:
case fractionalbinomial:
return 2*likelihood();
case poisson:
case gamma:
case negativebinomial:
case tweedie:
return likelihood();
default:
throw new RuntimeException("unknown family " + _parms._family);
}
}
/***
* This method will grab a subset of the gradient for each multinomial class. However, if remove_collinear_columns is
* on, fullInfo will only contains the gradient of active columns.
*/
public static class GLMSubsetGinfo extends GLM.GLMGradientInfo {
public final GLMGradientInfo _fullInfo;
public GLMSubsetGinfo(GLMGradientInfo fullInfo, int N, int c, int [] ids) {
super(fullInfo._likelihood, fullInfo._objVal, extractSubRange(N,c,ids,fullInfo._gradient));
_fullInfo = fullInfo; // fullInfo._gradient may not be full
}
}
public GradientSolver gslvrMultinomial(final int c) {
double[] betaCopy = new double[_totalBetaLength]; // make sure fullbeta is full length
if (_beta.length < _totalBetaLength) {
if (_beta.length == _activeData.activeCols().length* _nbetas) { // all classes converted
int[] activeCols = genActiveColsAllClass(_beta.length, _betaLengthPerClass, _activeData.activeCols(), _nbetas);
fillSubRange(_totalBetaLength, 0, activeCols, _beta, betaCopy);
} else {
int[] activeCols = genActiveColsIndClass(_beta.length, _betaLengthPerClass, _activeData.activeCols(), c, _nbetas);
fillSubRange(_totalBetaLength, 0, activeCols, _beta, betaCopy);
}
} else {
System.arraycopy(_beta, 0, betaCopy, 0, _totalBetaLength);
}
final double [] fullbeta = betaCopy; // make sure fullbeta contains everything
return new GradientSolver() {
// beta is full coeff Per class. Need to return gradient with full columns
@Override
public GradientInfo getGradient(double[] beta) {
// fill fullbeta with new values of beta for class c
fillSubRange(_dinfo.fullN()+1,c,_activeDataMultinomial[c].activeCols(),beta,fullbeta); // fullbeta contains everything
GLMGradientInfo fullGinfo = _gslvr.getGradient(fullbeta); // beta contains all columns
if (fullbeta.length > fullGinfo._gradient.length) { // fullGinfo only contains gradient for active columns here
double[] fullGinfoGradient = expandToFullArray(fullGinfo._gradient, _activeData.activeCols(),
_totalBetaLength, _nbetas, _betaLengthPerClass);
fullGinfo._gradient = fullGinfoGradient; // make sure fullGinfo contains full gradient
}
return new GLMSubsetGinfo(fullGinfo,_betaLengthPerClass,c,_activeData.activeCols());// fullGinfo has full gradient
//return new GLMSubsetGinfo(fullGinfo,_activeData.fullN()+1,c,_activeDataMultinomial[c].activeCols());
}
@Override
public GradientInfo getObjective(double[] beta) {return getGradient(beta);}
};
}
public void setBetaMultinomial(int c, double [] beta, double [] bc) {
if(_u != null) Arrays.fill(_u,0);
if (_parms._remove_collinear_columns)
fillSubRange(_betaLengthPerClass,c,_activeDataMultinomial[c].activeCols(),bc,beta);
else
fillSubRange(_activeData.fullN()+1,c,_activeDataMultinomial[c].activeCols(),bc,beta);
}
/**
* Apply strong rules to filter out expected inactive (with zero coefficient) predictors.
*
* @return indices of expected active predictors.
*/
/**
* Apply strong rules to filter out expected inactive (with zero coefficient) predictors.
*
* @return indices of expected active predictors.
*/
protected int applyStrongRulesMultinomial_old(double lambdaNew, double lambdaOld) {
int P = _dinfo.fullN();
int N = P+1;
int selected = 0;
_activeBC = _bc;
_activeData = _dinfo;
if (!_allIn) {
if(_activeDataMultinomial == null)
_activeDataMultinomial = new DataInfo[_nbetas];
final double rhs = _alpha * (2 * lambdaNew - lambdaOld);
int[] oldActiveCols = _activeData._activeCols == null ? new int[0] : _activeData.activeCols();
int [] cols = MemoryManager.malloc4(N* _nbetas);
int j = 0;
for(int c = 0; c < _nbetas; ++c) {
int start = selected;
for (int i = 0; i < P; ++i) {
if (j < oldActiveCols.length && i == oldActiveCols[j]) {
cols[selected++] = i;
++j;
} else if (_ginfo._gradient[c*N+i] > rhs || _ginfo._gradient[c*N+i] < -rhs) {
cols[selected++] = i;
}
}
cols[selected++] = P;// intercept
_activeDataMultinomial[c] = _dinfo.filterExpandedColumns(Arrays.copyOfRange(cols,start,selected));
for(int i = start; i < selected; ++i)
cols[i] += c*N;
}
_allIn = selected == cols.length;
}
return selected;
}
/**
* Apply strong rules to filter out expected inactive (with zero coefficient) predictors.
*
* @return indices of expected active predictors.
*/
protected void applyStrongRulesMultinomial(double lambdaNew, double lambdaOld) {
int P = _dinfo.fullN();
int N = P+1;
int selected = 0;
_activeBC = _bc;
_activeData = _dinfo;
if (!_allIn) {
if(_activeDataMultinomial == null)
_activeDataMultinomial = new DataInfo[_nbetas];
final double rhs = _alpha * (2 * lambdaNew - lambdaOld);
int [] cols = MemoryManager.malloc4(N* _nbetas);
int oldActiveColsTotal = 0;
for(int c = 0; c < _nbetas; ++c) {
int j = 0;
int[] oldActiveCols = _activeDataMultinomial[c] == null ? new int[]{P} : _activeDataMultinomial[c]._activeCols;
oldActiveColsTotal += oldActiveCols.length;
for (int i = 0; i < P; ++i) {
if (j < oldActiveCols.length && i == oldActiveCols[j]) {
++j;
} else { // need access to _ginfo
if (_ginfo == null) _ginfo = ginfo();
if (_ginfo._gradient[c * N + i] > rhs || _ginfo._gradient[c * N + i] < -rhs) {
cols[selected++] = c * N + i;
}
}
}
}
if(_parms._max_active_predictors != -1 && _parms._max_active_predictors - oldActiveColsTotal + _nbetas < selected) {
Integer[] bigInts = ArrayUtils.toIntegers(cols, 0, selected);
Arrays.sort(bigInts, new Comparator<Integer>() {
@Override
public int compare(Integer o1, Integer o2) {
return (int) Math.signum(_ginfo._gradient[o2.intValue()] * _ginfo._gradient[o2.intValue()] - _ginfo._gradient[o1.intValue()] * _ginfo._gradient[o1.intValue()]);
}
});
cols = ArrayUtils.toInt(bigInts, 0, _parms._max_active_predictors - oldActiveColsTotal + _nbetas);
Arrays.sort(cols);
selected = cols.length;
}
int i = 0;
int [] cs = new int[P+1];
int sum = 0;
for(int c = 0; c < _nbetas; ++c){
int [] classcols = cs;
int[] oldActiveCols = _activeDataMultinomial[c] == null ? new int[]{P} : _activeDataMultinomial[c]._activeCols;
int k = 0;
while(i < selected && cols[i] < (c+1)*N)
classcols[k++] = cols[i++]-c*N;
classcols = ArrayUtils.sortedMerge(oldActiveCols,Arrays.copyOf(classcols,k));
sum += classcols.length;
_activeDataMultinomial[c] = _dinfo.filterExpandedColumns(classcols);
}
assert _parms._max_active_predictors == -1 || sum <= _parms._max_active_predictors + _nbetas :"sum = " + sum + " max_active_preds = " + _parms._max_active_predictors + ", nclasses = " + _nbetas;
_allIn = sum == N* _nbetas;
}
}
protected boolean checkKKTsMultinomial(){
return true;
//if(_activeData._activeCols == null) return true;
// throw H2O.unimpl();
}
protected boolean checkKKTs() {
if(_parms._family == Family.multinomial || _parms._family == Family.ordinal) // always return true?
return checkKKTsMultinomial();
double [] beta = _beta;
double [] u = _u;
if(_activeData._activeCols != null) {
beta = ArrayUtils.expandAndScatter(beta, _dinfo.fullN() + 1, _activeData._activeCols);
if(_u != null)
u = ArrayUtils.expandAndScatter(_u, _dinfo.fullN() + 1, _activeData._activeCols);
}
int [] activeCols = _activeData.activeCols();
if(beta != _beta || _ginfo == null) {
_gslvr = _penaltyMatrix == null ? new GLMGradientSolver(_job, _parms, _dinfo, (1 - _alpha) * _lambda, _bc, _modelBetaInfo)
: new GLMGradientSolver(_job, _parms, _dinfo, (1 - _alpha) * _lambda, _bc, _modelBetaInfo, _penaltyMatrix, _gamBetaIndices);
_ginfo = _gslvr.getGradient(beta);
}
double[] grad = _ginfo._gradient.clone();
double err = 1e-4;
if(u != null && u != _u){ // fill in u for missing variables
int k = 0;
for(int i = 0; i < u.length; ++i) {
if(_activeData._activeCols[k] == i){
++k; continue;
}
assert u[i] == 0;
u[i] = -grad[i];
}
}
ADMM.subgrad(_alpha * _lambda, beta, grad);
for (int c : activeCols) // set the error tolerance to the highest error of included columns
if (grad[c] > err) err = grad[c];
else if (grad[c] < -err) err = -grad[c];
_gradientErr = err;
_beta = beta;
_u = u;
_activeBC = null;
if(_parms._max_active_predictors == _activeData.fullN()){
Log.info("skipping KKT check, reached maximum number of active predictors (" + _parms._max_active_predictors + ")");
} else if(!_allIn) {
int[] failedCols = new int[64];
int fcnt = 0;
for (int i = 0; i < grad.length - 1; ++i) {
if (Arrays.binarySearch(activeCols, i) >= 0) continue; // always include all previously active columns
if (grad[i] > err || -grad[i] > err) {
if (fcnt == failedCols.length)
failedCols = Arrays.copyOf(failedCols, failedCols.length << 1);
failedCols[fcnt++] = i;
}
}
if (fcnt > 0) {
Log.info(fcnt + " variables failed KKT conditions, adding them to the model and recomputing.");
final int n = activeCols.length;
int[] newCols = Arrays.copyOf(activeCols, activeCols.length + fcnt);
for (int i = 0; i < fcnt; ++i)
newCols[n + i] = failedCols[i];
Arrays.sort(newCols);
_beta = ArrayUtils.select(beta, newCols);
if(_u != null) _u = ArrayUtils.select(_u,newCols);
_ginfo = new GLMGradientInfo(_ginfo._likelihood, _ginfo._objVal, ArrayUtils.select(_ginfo._gradient, newCols));
_activeData = _dinfo.filterExpandedColumns(newCols);
_activeBC = _bc.filterExpandedColumns(_activeData.activeCols());
_gslvr = _penaltyMatrix == null ? new GLMGradientSolver(_job, _parms, _activeData,
(1 - _alpha) * _lambda, _activeBC, _modelBetaInfo) : new GLMGradientSolver(_job, _parms, _activeData,
(1 - _alpha) * _lambda, _activeBC, _modelBetaInfo, _penaltyMatrix, _gamBetaIndices);
return false;
}
}
return true;
}
public void addOffset2Cols(int[] cols) {
int offset = _activeClass*_activeData.activeCols().length;
int colsLen = cols.length;
for (int index = 0; index < colsLen; index++)
cols[index] = cols[index]+offset;
}
public int [] removeCols(int [] cols) { // cols is per class, not overall
int[] activeCols;
int[] colsWOffset = cols.clone();
if (_nbetas > 2 && _parms._remove_collinear_columns) {
activeCols = ArrayUtils.removeIds(_activeDataMultinomial[_activeClass].activeCols(), cols);
addOffset2Cols(colsWOffset);
} else {
activeCols = ArrayUtils.removeIds(_activeData.activeCols(), cols);
}
if (_beta != null)
_beta = ArrayUtils.removeIds(_beta, colsWOffset);
if(_u != null)
_u = ArrayUtils.removeIds(_u,colsWOffset);
if(_ginfo != null && _ginfo._gradient != null)
_ginfo._gradient = ArrayUtils.removeIds(_ginfo._gradient,colsWOffset);
_activeData = _dinfo.filterExpandedColumns(activeCols); // changed _adaptedFrame to excluded inactive columns
_activeBC = _bc.filterExpandedColumns(activeCols);
_gslvr = _penaltyMatrix == null ? new GLMGradientSolver(_job, _parms, _activeData,
(1 - _alpha) * _lambda, _activeBC, _modelBetaInfo) : new GLMGradientSolver(_job, _parms, _activeData,
(1 - _alpha) * _lambda, _activeBC, _modelBetaInfo, _penaltyMatrix, _gamBetaIndices);
_currGram = null;
return activeCols;
}
private double penalty(double [] beta) {
if(_lambda == 0) return 0;
double l1norm = 0, l2norm = 0;
if(_parms._family == Family.multinomial || _parms._family == Family.ordinal) {
int len = beta.length/ _nbetas;
assert len* _nbetas == beta.length;
for(int c = 0; c < _nbetas; ++c) {
for(int i = c*len; i < (c+1)*len-1; ++i) {
double d = beta[i];
l1norm += d >= 0?d:-d;
l2norm += d*d;
}
if (_parms._family == Family.ordinal) // done for ordinal, only one set of beta but numclass-1 intercepts
break;
}
} else
for(int i = 0; i < beta.length-1; ++i) {
double d = beta[i];
l1norm += d >= 0?d:-d;
l2norm += d*d;
}
return l1pen()*l1norm + .5*l2pen()*l2norm;
}
public double objective() {return _beta == null?Double.MAX_VALUE:objective(_beta,_likelihood);}
public double objective(double [] beta, double likelihood) {
double gamVal = 0;
if (_parms._glmType == GLMParameters.GLMType.gam) {
if (beta.length == _totalBetaLength)
gamVal = calSmoothNess(beta, _penaltyMatrix, _gamBetaIndices);
else
gamVal = calSmoothNess(expandBeta(beta), _penaltyMatrix, _gamBetaIndices); // take up memory
}
if (_csGLMState != null && (_equalityConstraints != null || _lessThanEqualToConstraints != null))
return _ginfo._objVal;
else
return likelihood * _parms._obj_reg + gamVal + penalty(beta) + (_activeBC == null?0:_activeBC.proxPen(beta));
}
/***
*
* This methold will calculate the first derivative of h(beta). Refer to the doc, section VI.I
*
*/
public static ConstrainedGLMUtils.ConstraintsDerivatives[] calDerivatives(LinearConstraints[] constraints, List<String> coefNames) {
int numConstraints = constraints.length;
ConstrainedGLMUtils.ConstraintsDerivatives[] constDeriv = new ConstrainedGLMUtils.ConstraintsDerivatives[numConstraints];
LinearConstraints oneConstraint;
for (int index=0; index<numConstraints; index++) {
oneConstraint = constraints[index];
constDeriv[index] = genOneDerivative(oneConstraint, coefNames);
}
return constDeriv;
}
/***
* Given a constraint, this method will calculate the first order derivative. Note that this derivative does not
* depend on the lambda applied to the constraint. It only changes when the number of coefficients in beta changes and it
* needs to be called again.
*/
public static ConstrainedGLMUtils.ConstraintsDerivatives genOneDerivative(LinearConstraints oneConstraints, List<String> coeffNames) {
ConstrainedGLMUtils.ConstraintsDerivatives constraintDerivative = new ConstrainedGLMUtils.ConstraintsDerivatives(oneConstraints._active);
IcedHashMap<String, Double> coeffNameValues = oneConstraints._constraints;
int index;
for (String coefName: coeffNameValues.keySet()) {
index = coeffNames.indexOf(coefName);
if (index >= 0)
constraintDerivative._constraintsDerivative.put(index, coeffNameValues.get(coefName));
}
return constraintDerivative;
}
/***
* This method to calculate contribution of penalty to gram (d2H/dbidbj), refer to the doc Section VI.II
*/
public static ConstrainedGLMUtils.ConstraintsGram[] calGram(ConstrainedGLMUtils.ConstraintsDerivatives[] derivativeEqual) {
return Arrays.stream(derivativeEqual).map(x -> constructGram(x)).toArray(ConstrainedGLMUtils.ConstraintsGram[]::new);
}
/***
* This method is not called often. If called, it will calculate the contribution of constraints to the
* hessian. Whenever there is a predictor number change, this function should be called again as it only looks
* at the predictor index. This predictor index will change when the number of predictors change. It calculates
* the second derivative regardless of the active status because an inactive constraint may become active in the
* future. Note that here, only half of the 2nd derivatives are calculated, namely d(tranpose(h(beta))*h(beta)/dCidCj
* and not d(tranpose(h(beta))*h(beta)/dCjdCi since they are symmetric.
*/
public static ConstrainedGLMUtils.ConstraintsGram constructGram(ConstrainedGLMUtils.ConstraintsDerivatives constDeriv) {
ConstrainedGLMUtils.ConstraintsGram cGram = new ConstrainedGLMUtils.ConstraintsGram();
List<Integer> predictorIndexc = constDeriv._constraintsDerivative.keySet().stream().collect(Collectors.toList());
Collections.sort(predictorIndexc);
while (!predictorIndexc.isEmpty()) {
Integer firstEle = predictorIndexc.get(0);
for (Integer oneCoeff : predictorIndexc) {
ConstrainedGLMUtils.CoefIndices coefPairs = new ConstrainedGLMUtils.CoefIndices(firstEle, oneCoeff);
cGram._coefIndicesValue.put(coefPairs, constDeriv._constraintsDerivative.get(firstEle)*constDeriv._constraintsDerivative.get(oneCoeff));
}
predictorIndexc.remove(0);
}
cGram._active = constDeriv._active; // calculate for active/inactive constraints, inactive may be active in future
return cGram;
}
protected double updateState(double [] beta, double likelihood) {
_betaDiff = ArrayUtils.linfnorm(_beta == null?beta:ArrayUtils.subtract(_beta,beta),false);
double objOld = objective();
_beta = beta;
_ginfo = null;
_likelihood = likelihood;
return (_relImprovement = (objOld - objective())/Math.abs(objOld));
}
private double _betaDiff;
private double _relImprovement;
String convergenceMsg = "";
public boolean converged(){
boolean converged = false;
if(_betaDiff < _parms._beta_epsilon) {
convergenceMsg = "betaDiff < eps; betaDiff = " + _betaDiff + ", eps = " + _parms._beta_epsilon;
converged = true;
} else if(_relImprovement < _parms._objective_epsilon) {
convergenceMsg = "relImprovement < eps; relImprovement = " + _relImprovement + ", eps = " + _parms._objective_epsilon;
converged = true;
} else convergenceMsg = "not converged, betaDiff = " + _betaDiff + ", relImprovement = " + _relImprovement;
return converged;
}
public double updateState(double [] beta,GLMGradientInfo ginfo) {
double objOld;
if (_beta != null && beta.length > _beta.length) { // beta is full while _beta only contains active columns
double[] shortBeta = shrinkFullArray(beta);
_betaDiff = ArrayUtils.linfnorm(_beta == null ? beta : ArrayUtils.subtract(_beta, shortBeta), false);
objOld = objective();
if(_beta == null)_beta = shortBeta.clone();
else System.arraycopy(shortBeta,0,_beta,0,shortBeta.length);
} else {
_betaDiff = ArrayUtils.linfnorm(_beta == null ? beta : ArrayUtils.subtract(_beta, beta), false);
objOld = objective();
if(_beta == null)_beta = beta.clone();
else System.arraycopy(beta,0,_beta,0,beta.length);
}
_ginfo = ginfo;
_likelihood = ginfo._likelihood;
_relImprovement = (objOld - objective()) / Math.abs(objOld);
return _relImprovement;
}
double getBetaDiff() {return _betaDiff;}
protected void setBetaDiff(double betaDiff) { _betaDiff = betaDiff; }
protected void setGradientErr(double gErr) { _gradientErr = gErr; }
protected void setGinfo(GLMGradientInfo ginfo) {
_ginfo = copyGInfo(ginfo);
}
protected void setBeta(double[] beta) {
if(_beta == null)_beta = beta.clone();
else System.arraycopy(beta,0, _beta, 0, beta.length);
}
protected void setIter(int iteration) {
_iter = iteration;
}
protected void setLikelihood(double llk) { _likelihood = llk; }
protected void setAllIn(boolean val) { _allIn = val; }
protected void setGslvrNull() { _gslvr = null; }
protected void setActiveDataMultinomialNull() { _activeDataMultinomial = null; }
protected void setActiveDataNull() { _activeData = null; }
protected void setLambdaSimple(double lambda) { _lambda=lambda; }
public double [] expandBeta(double [] beta) { // for multinomials
int fullCoefLen = (_dinfo.fullN() + 1) * _nbetas;
if(_activeData._activeCols == null || beta.length == fullCoefLen)
return beta;
if (_nbetas <= 2 || !_parms._remove_collinear_columns)
return ArrayUtils.expandAndScatter(beta, (_dinfo.fullN() + 1) * _nbetas,_activeData._activeCols);
else
return expandToFullArray(beta, _activeData.activeCols(), _totalBetaLength, _nbetas, _betaLengthPerClass);
}
public static class GramGrad {
public double[][] _gram;
public double[] beta;
public double[] _grad;
public double objective;
public double _sumOfRowWeights;
public double[] _xy;
public GramGrad(double[][] gramM, double[] grad, double[] b, double obj, double sumOfRowWeights, double[] xy) {
_gram = gramM;
beta = b;
_grad = grad;
objective = obj;
_sumOfRowWeights = sumOfRowWeights;
_xy = xy;
}
public Gram.Cholesky cholesky(Gram.Cholesky chol, double[][] xx) {
if( chol == null ) {
for( int i = 0; i < xx.length; ++i )
xx[i] = xx[i].clone();
chol = new Gram.Cholesky(xx, new double[0]);
}
final Gram.Cholesky fchol = chol;
final int sparseN = 0;
final int denseN = xx.length - sparseN;
// compute the cholesky of the diagonal and diagonal*dense parts
ForkJoinTask[] fjts = new ForkJoinTask[denseN];
// compute the outer product of diagonal*dense
//Log.info("SPARSEN = " + sparseN + " DENSEN = " + denseN);
final int[][] nz = new int[denseN][];
for( int i = 0; i < denseN; ++i ) {
final int fi = i;
fjts[i] = new RecursiveAction() {
@Override protected void compute() {
int[] tmp = new int[sparseN];
double[] rowi = fchol._xx[fi];
int n = 0;
for( int k = 0; k < sparseN; ++k )
if (rowi[k] != .0) tmp[n++] = k;
nz[fi] = Arrays.copyOf(tmp, n);
}
};
}
ForkJoinTask.invokeAll(fjts);
for( int i = 0; i < denseN; ++i ) {
final int fi = i;
fjts[i] = new RecursiveAction() {
@Override protected void compute() {
double[] rowi = fchol._xx[fi];
int[] nzi = nz[fi];
for( int j = 0; j <= fi; ++j ) {
double[] rowj = fchol._xx[j];
int[] nzj = nz[j];
double s = 0;
for (int t=0,z=0; t < nzi.length && z < nzj.length; ) {
int k1 = nzi[t];
int k2 = nzj[z];
if (k1 < k2) { t++; continue; }
else if (k1 > k2) { z++; continue; }
else {
s += rowi[k1] * rowj[k1];
t++; z++;
}
}
rowi[j + sparseN] = xx[fi][j + sparseN] - s;
}
}
};
}
ForkJoinTask.invokeAll(fjts);
// compute the cholesky of dense*dense-outer_product(diagonal*dense)
double[][] arr = new double[denseN][];
for( int i = 0; i < arr.length; ++i )
arr[i] = Arrays.copyOfRange(fchol._xx[i], sparseN, sparseN + denseN);
final int p = H2ORuntime.availableProcessors();
Gram.InPlaceCholesky d = Gram.InPlaceCholesky.decompose_2(arr, 10, p);
fchol.setSPD(d.isSPD());
arr = d.getL();
for( int i = 0; i < arr.length; ++i ) {
// See PUBDEV-5585: we use a manual array copy instead of System.arraycopy because of behavior on Java 10
// Used to be: System.arraycopy(arr[i], 0, fchol._xx[i], sparseN, i + 1);
for (int j = 0; j < i + 1; j++)
fchol._xx[i][sparseN + j] = arr[i][j];
}
return chol;
}
public Gram.Cholesky qrCholesky(List<Integer> dropped_cols, double[][] Z, boolean standardized) {
final double [][] R = new double[Z.length][];
final double [] Zdiag = new double[Z.length];
final double [] ZdiagInv = new double[Z.length];
for(int i = 0; i < Z.length; ++i)
ZdiagInv[i] = 1.0/(Zdiag[i] = Z[i][i]);
for(int j = 0; j < Z.length; ++j) {
final double [] gamma = R[j] = new double[j+1];
for(int l = 0; l <= j; ++l) // compute gamma_l_j
gamma[l] = Z[j][l]*ZdiagInv[l];
double zjj = Z[j][j];
for(int k = 0; k < j; ++k) // only need the diagonal, the rest is 0 (dot product of orthogonal vectors)
zjj += gamma[k] * (gamma[k] * Z[k][k] - 2*Z[j][k]);
// Check R^2 for the current column and ignore if too high (1-R^2 too low), R^2 = 1- rs_res/rs_tot
// rs_res = zjj (the squared residual)
// rs_tot = sum((yi - mean(y))^2) = mean(y^2) - mean(y)^2,
// mean(y^2) is on diagonal
// mean(y) is in the intercept (0 if standardized)
// might not be regularized with number of observations, that's why dividing by intercept diagonal
double rs_tot = standardized
?ZdiagInv[j]
:1.0/(Zdiag[j]-Z[j][0]*ZdiagInv[0]*Z[j][0]);
if (j > 0 && zjj*rs_tot < R2_EPS) {
zjj=0;
dropped_cols.add(j-1);
ZdiagInv[j] = 0;
} else {
ZdiagInv[j] = 1. / zjj;
}
Z[j][j] = zjj;
int jchunk = Math.max(1,MIN_PAR/(Z.length-j));
int nchunks = (Z.length - j - 1)/jchunk;
nchunks = Math.min(nchunks, H2O.NUMCPUS);
if(nchunks <= 1) { // single threaded update
updateZ(gamma,Z,j);
} else { // multi-threaded update
final int fjchunk = (Z.length - 1 - j)/nchunks;
int rem = Z.length - 1 - j - fjchunk*nchunks;
for(int i = Z.length-rem; i < Z.length; ++i)
updateZij(i,j,Z,gamma);
RecursiveAction[] ras = new RecursiveAction[nchunks];
final int fj = j;
int k = 0;
for (int i = j + 1; i < Z.length-rem; i += fjchunk) { // update xj to zj //
final int fi = i;
ras[k++] = new RecursiveAction() {
@Override
protected final void compute() {
int max_i = Math.min(fi+fjchunk,Z.length);
for(int i = fi; i < max_i; ++i)
updateZij(i,fj,Z,gamma);
}
};
}
ForkJoinTask.invokeAll(ras);
}
}
// update the R - we computed Rt/sqrt(diag(Z)) which we can directly use to solve the problem
if(R.length < 500)
for(int i = 0; i < R.length; ++i)
for (int j = 0; j <= i; ++j)
R[i][j] *= Math.sqrt(Z[j][j]);
else {
RecursiveAction[] ras = new RecursiveAction[R.length];
for(int i = 0; i < ras.length; ++i) {
final int fi = i;
final double [] Rrow = R[i];
ras[i] = new RecursiveAction() {
@Override
protected void compute() {
for (int j = 0; j <= fi; ++j)
Rrow[j] *= Math.sqrt(Z[j][j]);
}
};
}
ForkJoinTask.invokeAll(ras);
}
// deal with dropped_cols if present
if (dropped_cols.isEmpty())
return new Gram.Cholesky(R, new double[0], true);
else
return new Gram.Cholesky(dropIgnoredCols(R, Z, dropped_cols),new double[0], true);
}
public static double[][] dropIgnoredCols(double[][] R, double[][] Z, List<Integer> dropped_cols) {
double[][] Rnew = new double[R.length-dropped_cols.size()][];
for(int i = 0; i < Rnew.length; ++i)
Rnew[i] = new double[i+1];
int j = 0;
for(int i = 0; i < R.length; ++i) {
if(Z[i][i] == 0) continue;
int k = 0;
for(int l = 0; l <= i; ++l) {
if(k < dropped_cols.size() && l == (dropped_cols.get(k)+1)) {
++k;
continue;
}
Rnew[j][l - k] = R[i][l];
}
++j;
}
return Rnew;
}
private final void updateZij(int i, int j, double [][] Z, double [] gamma) {
double [] Zi = Z[i];
double Zij = Zi[j];
for (int k = 0; k < j; ++k)
Zij -= gamma[k] * Zi[k];
Zi[j] = Zij;
}
private final void updateZ(final double [] gamma, final double [][] Z, int j){
for (int i = j + 1; i < Z.length; ++i) // update xj to zj //
updateZij(i,j,Z,gamma);
}
public static double[][] dropCols(int[] cols, double[][] xx) {
Arrays.sort(cols);
int newXXLen = xx.length-cols.length;
double [][] xxNew = new double[newXXLen][newXXLen];
int oldXXLen = xx.length;
List<Integer> newIndices = IntStream.range(0, newXXLen).boxed().collect(Collectors.toList());
for (int index:cols)
newIndices.add(index,-1);
int newXindexX, newXindexY;
for (int rInd=0; rInd<oldXXLen; rInd++) {
newXindexX = newIndices.get(rInd);
for (int cInd=rInd; cInd<oldXXLen; cInd++) {
newXindexY = newIndices.get(cInd);
if (newXindexY >= 0 && newXindexX >= 0) {
xxNew[newXindexX][newXindexY] = xx[rInd][cInd];
xxNew[newXindexY][newXindexX] = xx[cInd][rInd];
}
}
}
return xxNew;
}
public static int[] findZeroCols(double[][] xx){
ArrayList<Integer> zeros = new ArrayList<>();
for(int i = 0; i < xx.length; ++i) {
if (sum(xx[i]) == 0)
zeros.add(i);
}
if(zeros.size() == 0) return new int[0];
int [] ary = new int[zeros.size()];
for(int i = 0; i < zeros.size(); ++i)
ary[i] = zeros.get(i);
return ary;
}
}
/**
* Cached state of COD (with covariate updates) solver.
*/
public static final class GramXY {
public final Gram gram;
final double[] beta;
final int[] activeCols;
int [] newCols;
public final double[] xy;
private double [] grads;
public double yy;
public final double likelihood;
public double sumOfRowWeights; // sum of all r.weight
public GramXY(Gram gram, double[] xy, double [] grads, double[] beta, int[] activeCols, int [] newActiveCols, double yy, double likelihood) {
this.gram = gram;
this.xy = xy;
this.grads = grads;
this.beta = beta == null ? null : beta.clone();
this.activeCols = activeCols == null ? null : activeCols.clone();
this.newCols = newActiveCols;
this.yy = yy;
this.likelihood = likelihood;
}
public final double [] getCODGradients(){
if(grads == null){
double [][] xx = gram.getXX();
grads = new double[xy.length];
for(int i = 0; i < grads.length; ++i)
grads[i] = xy[i] - ArrayUtils.innerProduct(xx[i], beta) + xx[i][i] * beta[i];
}
if(newCols != null) {
double [][] xx = gram.getXX();
for (int i : newCols)
grads[i] = xy[i] - ArrayUtils.innerProduct(xx[i], beta) + xx[i][i] * beta[i];
}
return grads;
}
public boolean match(double[] beta, int[] activeCols) {
return Arrays.equals(this.beta, beta) && Arrays.equals(this.activeCols, activeCols);
}
static double [] mergeRow(int k, double [] xrowOld, double [] xrow,int [] newColsIds, double [][] xxUpdate){
for(int i = 0; i < newColsIds.length; ++i){
int j = newColsIds[i];
xrow[j] = xxUpdate[i][k];
for(int l = i == 0?0:newColsIds[i-1]+1; l < j; ++l)
xrow[l] = xrowOld[l-i];
}
int l = newColsIds.length;
for(int j = newColsIds[newColsIds.length-1]+1; j < xrow.length; ++j)
xrow[j] = xrowOld[j-l];
return xrow;
}
public static GramXY addCols(double[] beta, final int[] newActiveCols, final int[] newColsIds, final GramXY oldGram, final double[][] xxUpdate, final double[] xyUpdate) {
// update the expanded matrix cache
final double[][] xxCacheNew = new double[newActiveCols.length][];
final double[] xyNew = new double[xxCacheNew.length];
final double[] gradsNew = oldGram.grads == null?null:new double[xxCacheNew.length];
double [][] xx = oldGram.gram.getXX();
for (int k = 0; k < newColsIds.length; ++k) {
int j = newColsIds[k];
xxCacheNew[j] = xxUpdate[k];
xyNew[j] = xyUpdate[k];
for (int i = k == 0 ? 0 : newColsIds[k - 1] + 1; i < j; i++) {
xxCacheNew[i] = mergeRow(i, xx[i - k], new double[newActiveCols.length], newColsIds, xxUpdate);
xyNew[i] = oldGram.xy[i - k];
if(oldGram.grads != null)gradsNew[i] = oldGram.grads[i - k];
}
}
int k = newColsIds.length;
for (int i = newColsIds[newColsIds.length - 1] + 1; i < xyNew.length; ++i) {
xxCacheNew[i] = mergeRow(i, xx[i - k], new double[newActiveCols.length], newColsIds, xxUpdate);
xyNew[i] = oldGram.xy[i - k];
if(oldGram.grads != null)gradsNew[i] = oldGram.grads[i - k];
}
return new GramXY(new Gram(xxCacheNew), xyNew, gradsNew, beta, newActiveCols, newColsIds, oldGram.yy, oldGram.likelihood);
}
}
protected GramXY computeNewGram(DataInfo activeData, double [] beta, GLMParameters.Solver s){
double obj_reg = _parms._obj_reg;
if(_glmw == null) _glmw = new GLMModel.GLMWeightsFun(_parms);
GLMTask.GLMIterationTask gt = new GLMTask.GLMIterationTask(_job._key, activeData, _glmw, beta,
_activeClass).doAll(activeData._adaptedFrame);
gt._gram.mul(obj_reg);
if (_parms._glmType.equals(GLMParameters.GLMType.gam)) { // add contribution from GAM smoothness factor
Integer[] activeCols=null;
int[] activeColumns = activeData.activeCols();
if (activeColumns.length<_dinfo.fullN()) { // columns are deleted
activeCols = ArrayUtils.toIntegers(activeColumns, 0, activeColumns.length);
}
gt._gram.addGAMPenalty(activeCols , _penaltyMatrix, _gamBetaIndices);
}
mult(gt._xy,obj_reg);
int [] activeCols = activeData.activeCols();
int [] zeros = gt._gram.findZeroCols();
GramXY res;
if(_parms._family != Family.multinomial && zeros.length > 0 && zeros.length <= activeData.activeCols().length) {
gt._gram.dropCols(zeros);
removeCols(zeros);
res = new ComputationState.GramXY(gt._gram,ArrayUtils.removeIds(gt._xy, zeros),null,gt._beta == null?null:ArrayUtils.removeIds(gt._beta, zeros),activeData().activeCols(),null,gt._yy,gt._likelihood);
} else res = new GramXY(gt._gram,gt._xy,null, beta,activeCols,null,gt._yy,gt._likelihood);
if (gaussian.equals(_parms._family))
res.sumOfRowWeights = gt.sumOfRowWeights;
return res;
}
GramXY _currGram;
GLMModel.GLMWeightsFun _glmw;
/***
* This method is used only for multinomial family. It differs from computeGram because it calls on _activeData
* which only contains only active columns in its _adaptedFrame. Note activeDataMultinomial(_activeClass) will
* always contains all predictors in its _adaptedFrame.
* @param beta
* @param s
* @return
*/
public GramXY computeGramRCC(double[] beta, GLMParameters.Solver s) {
return computeNewGram(_activeData, ArrayUtils.select(beta, _activeData.activeCols()), s);
}
/***
* This function calculates the following values:
* 1. the hessian
* 2. the xy which is basically (hessian * old_beta + gradient)
*/
protected GramGrad computeGram(double [] beta, GLMGradientInfo gradientInfo){
DataInfo activeData = activeData();
double obj_reg = _parms._obj_reg;
if(_glmw == null) _glmw = new GLMModel.GLMWeightsFun(_parms);
GLMTask.GLMIterationTask gt = new GLMTask.GLMIterationTask(_job._key, activeData, _glmw, beta,
_activeClass).doAll(activeData._adaptedFrame);
double[][] fullGram = gt._gram.getXX(); // only extract gram matrix
mult(fullGram, obj_reg);
if (_gramEqual != null)
elementwiseSumSymmetricArrays(fullGram, mult(sumGramConstribution(_gramEqual, fullGram.length), _csGLMState._ckCS));
if (_gramLess != null)
elementwiseSumSymmetricArrays(fullGram, mult(sumGramConstribution(_gramLess, fullGram.length), _csGLMState._ckCS));
if (_parms._glmType.equals(GLMParameters.GLMType.gam)) { // add contribution from GAM smoothness factor
gt._gram.addGAMPenalty(_penaltyMatrix, _gamBetaIndices, fullGram);
}
// form xy which is (Gram*beta_current + gradient)
double[] xy = formXY(fullGram, beta, gradientInfo._gradient);
// remove zeros in Gram matrix and throw an error if that coefficient is included in the constraint
int[] zeros = findZeroCols(fullGram);
if (_parms._family != Family.multinomial && zeros.length > 0 && zeros.length <= activeData.activeCols().length) {
fullGram = GramGrad.dropCols(zeros, fullGram); // shrink gram matrix
removeCols(zeros); // update activeData.activeCols(), _beta
return new GramGrad(fullGram, ArrayUtils.removeIds(gradientInfo._gradient, zeros),
ArrayUtils.removeIds(beta, zeros), gradientInfo._objVal, gt.sumOfRowWeights, ArrayUtils.removeIds(xy, zeros));
}
return new GramGrad(fullGram, gradientInfo._gradient, beta, gradientInfo._objVal, gt.sumOfRowWeights, xy);
}
/***
*
* This method adds to objective function the contribution of
* transpose(lambda)*constraint vector + ck/2*transpose(constraint vector)*constraint vector
*/
public static double addConstraintObj(double[] lambda, LinearConstraints[] constraints, double ckHalf) {
int numConstraints = constraints.length;
LinearConstraints oneC;
double objValueAdd = 0;
for (int index=0; index<numConstraints; index++) {
oneC = constraints[index];
if (oneC._active) {
objValueAdd += lambda[index]*oneC._constraintsVal; // from linear constraints
objValueAdd += ckHalf*oneC._constraintsVal*oneC._constraintsVal; // from penalty
}
}
return objValueAdd;
}
public static double[] formXY(double[][] fullGram, double[] beta, double[] grad) {
int len = grad.length;
double[] xy = new double[len];
multArrVec(fullGram, beta, xy);
return IntStream.range(0, len).mapToDouble(x -> xy[x]-grad[x]).toArray();
}
// get cached gram or incrementally update or compute new one
public GramXY computeGram(double [] beta, GLMParameters.Solver s){
double obj_reg = _parms._obj_reg;
boolean weighted = !gaussian.equals(_parms._family) || !GLMParameters.Link.identity.equals(_parms._link);
if(Family.multinomial.equals(_parms._family)) // no caching
return computeNewGram(activeDataMultinomial(_activeClass),beta,s); // activeDataMultinomial(_activeClass) returns all predictors
if(s != GLMParameters.Solver.COORDINATE_DESCENT)
// only cache for solver==COD
// caching only makes difference when running with lambda search
// and COD and IRLSM need matrix in different shape
// and COD is better for lambda search
return computeNewGram(activeData(),beta,s);
if(_currGram == null) // no cached value, compute new one and store
return _currGram = computeNewGram(activeData(),beta,s);
DataInfo activeData = activeData();
assert beta == null || beta.length == activeData.fullN()+1;
int [] activeCols = activeData.activeCols();
if (Arrays.equals(_currGram.activeCols,activeCols))
return (!weighted || Arrays.equals(_currGram.beta, beta)) ? _currGram : (_currGram = computeNewGram(activeData,
beta, s));
if(_glmw == null) _glmw = new GLMModel.GLMWeightsFun(_parms);
// check if we need full or just incremental update
if(_currGram != null){
int [] newCols = ArrayUtils.sorted_set_diff(activeCols,_currGram.activeCols);
int [] newColsIds = newCols.clone();
int jj = 0;
boolean matches = true;
int k = 0;
for (int i = 0; i < activeCols.length; ++i) {
if (jj < newCols.length && activeCols[i] == newCols[jj]) {
newColsIds[jj++] = i;
matches = matches && (beta == null || beta[i] == 0);
} else {
matches = matches && (beta == null || beta[i] == _currGram.beta[k++]);
}
}
if(!weighted || matches) {
GLMTask.GLMIncrementalGramTask gt = new GLMTask.GLMIncrementalGramTask(newColsIds, activeData, _glmw, beta).doAll(activeData._adaptedFrame); // dense
for (double[] d : gt._gram)
mult(d, obj_reg);
mult(gt._xy, obj_reg);
// glue the update and old gram together
return _currGram = GramXY.addCols(beta, activeCols, newColsIds, _currGram, gt._gram, gt._xy);
}
}
return _currGram = computeNewGram(activeData,beta,s);
}
public void setConstraintInfo(GLMGradientInfo gradientInfo, LinearConstraints[] equalityConstraints,
LinearConstraints[] lessThanEqualToConstraints, double[] lambdaEqual, double[] lambdaLessThan) {
_ginfo = gradientInfo;
_lessThanEqualToConstraints = lessThanEqualToConstraints;
_equalityConstraints = equalityConstraints;
_lambdaEqual = lambdaEqual;
_lambdaLessThanEqualTo = lambdaLessThan;
_likelihood = gradientInfo._likelihood;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/ConstrainedGLMUtils.java
|
package hex.glm;
import Jama.Matrix;
import hex.DataInfo;
import water.DKV;
import water.Iced;
import water.Key;
import water.fvec.Frame;
import water.util.ArrayUtils;
import water.util.IcedHashMap;
import water.util.TwoDimTable;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static java.util.Arrays.stream;
import static water.util.ArrayUtils.innerProduct;
public class ConstrainedGLMUtils {
// constant setting refer to Michel Bierlaire, Optimization: Principles and Algorithms, Chapter 19, EPEL Press,
// second edition, 2018.
public static final double EPS = 1e-15;
public static final double EPS2 = 1e-12;
public static class LinearConstraints extends Iced { // store one linear constraint
public IcedHashMap<String, Double> _constraints; // column names, coefficient of constraints
public double _constraintsVal; // contains evaluated constraint values
public boolean _active = true; // only applied to less than and equal to zero constraints
public LinearConstraints() {
_constraints = new IcedHashMap<>();
_constraintsVal = Double.NaN; // represent constraint not evaluated.
}
}
public static class ConstraintsDerivatives extends Iced {
public IcedHashMap<Integer, Double> _constraintsDerivative;
public boolean _active;
public ConstraintsDerivatives(boolean active) {
_constraintsDerivative = new IcedHashMap<>();
_active = active;
}
}
public static class ConstraintsGram extends Iced {
public IcedHashMap<CoefIndices, Double> _coefIndicesValue;
public boolean _active;
public ConstraintsGram() {
_coefIndicesValue = new IcedHashMap<>();
}
}
public static class CoefIndices implements hex.glm.CoefIndices {
final int _firstCoefIndex;
final int _secondCoefIndex;
public CoefIndices(int firstInd, int secondInd) {
_firstCoefIndex = firstInd;
_secondCoefIndex = secondInd;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
else if (o == null)
return false;
else if (this._firstCoefIndex == ((CoefIndices) o)._firstCoefIndex &&
this._secondCoefIndex == ((CoefIndices) o)._secondCoefIndex)
return true;
return false;
}
public String toString() {
return "first coefficient index: " + _firstCoefIndex + ", second coefficient index " + _secondCoefIndex;
}
}
public static class ConstraintGLMStates {
double _ckCS;
double _ckCSHalf; // = ck/2
double _epsilonkCS;
public double _epsilonkCSSquare;
double _etakCS;
double _etakCSSquare;
double _epsilon0;
String[] _constraintNames;
double[][] _initCSMatrix;
double _gradientMagSquare;
double _constraintMagSquare;
public ConstraintGLMStates(String[] constrainNames, double[][] initMatrix, GLMModel.GLMParameters parms) {
_constraintNames = constrainNames;
_initCSMatrix = initMatrix;
_ckCS = parms._constraint_c0;
_ckCSHalf = parms._constraint_c0*0.5;
_epsilonkCS = 1.0/parms._constraint_c0;
_epsilonkCSSquare =_epsilonkCS*_epsilonkCS;
_etakCS = parms._constraint_eta0/Math.pow(parms._constraint_c0, parms._constraint_alpha);
_etakCSSquare = _etakCS*_etakCS;
_epsilon0 = 1.0/parms._constraint_c0;
}
}
public static LinearConstraints[] combineConstraints(LinearConstraints[] const1, LinearConstraints[] const2) {
List<LinearConstraints> allList = new ArrayList<>();
if (const1 != null)
allList.addAll(stream(const1).collect(Collectors.toList()));
if (const2 != null)
allList.addAll(stream(const2).collect(Collectors.toList()));
return allList.size()==0 ? null : allList.stream().toArray(LinearConstraints[]::new);
}
/***
*
* This method will extract the constraints specified in beta constraint and combine it with the linear constraints
* later. Note that the linear constraints are only accepted in standard form, meaning we only accept the following
* constraint forms: 2*beta_1-3*beta_4-3 == 0 or 2*beta_1-3*beta_4-3 <= 0.
*
* The beta constraints on the other hand is specified in several forms:
* 1): -Infinity <= beta <= Infinity: ignored, no constrain here;
* 2): -Infinity <= beta <= high_val: transformed to beta - high_val <= 0, add to lessThanEqualTo constraint;
* 3): low_val <= beta <= Infinity: transformed to low_val - beta <= 0, add to lessThanEqualTo constraint;
* 4): low_val <= beta <= high_val: transformed to two constraints, low_val-beta <= 0, beta-high_val <= 0, add to
* lessThanEqualTo constraint.
* 5): val <= beta <= val: transformed to beta-val == 0, add to equalTo constraint.
*
* The newly extracted constraints will be added to fields in state.
*
*/
public static int[] extractBetaConstraints(ComputationState state, String[] coefNames) {
GLM.BetaConstraint betaC = state.activeBC();
List<LinearConstraints> equalityC = new ArrayList<>();
List<LinearConstraints> lessThanEqualToC = new ArrayList<>();
List<Integer> betaIndexOnOff = new ArrayList<>();
boolean bothEndsPresent = (betaC._betaUB != null) && (betaC._betaLB != null);
boolean lowerEndPresentOnly = (betaC._betaUB == null) && (betaC._betaLB != null);
boolean upperEndPresentOnly = (betaC._betaUB != null) && (betaC._betaLB == null);
int numCons = betaC._betaLB != null ? betaC._betaLB.length - 1 : betaC._betaUB.length - 1;
for (int index = 0; index < numCons; index++) {
if (bothEndsPresent && !Double.isInfinite(betaC._betaUB[index]) && (betaC._betaLB[index] == betaC._betaUB[index])) { // equality constraint
addBCEqualityConstraint(equalityC, betaC, coefNames, index);
betaIndexOnOff.add(1);
} else if (bothEndsPresent && !Double.isInfinite(betaC._betaUB[index]) && !Double.isInfinite(betaC._betaLB[index]) &&
(betaC._betaLB[index] < betaC._betaUB[index])) { // low < beta < high, generate two lessThanEqualTo constraints
addBCGreaterThanConstraint(lessThanEqualToC, betaC, coefNames, index);
addBCLessThanConstraint(lessThanEqualToC, betaC, coefNames, index);
betaIndexOnOff.add(1);
betaIndexOnOff.add(0);
} else if ((lowerEndPresentOnly || (betaC._betaUB != null && Double.isInfinite(betaC._betaUB[index]))) &&
betaC._betaLB != null && !Double.isInfinite(betaC._betaLB[index])) { // low < beta < infinity
addBCGreaterThanConstraint(lessThanEqualToC, betaC, coefNames, index);
betaIndexOnOff.add(1);
} else if ((upperEndPresentOnly || (betaC._betaLB != null && Double.isInfinite(betaC._betaLB[index]))) &&
betaC._betaUB != null && !Double.isInfinite(betaC._betaUB[index])) { // -infinity < beta < high
addBCLessThanConstraint(lessThanEqualToC, betaC, coefNames, index);
betaIndexOnOff.add(1);
}
}
state.setLinearConstraints(equalityC.toArray(new LinearConstraints[0]),
lessThanEqualToC.toArray(new LinearConstraints[0]), true);
return betaIndexOnOff.size() == 0 ? null : betaIndexOnOff.stream().mapToInt(x -> x).toArray();
}
/***
* This method will extract the equality constraint and add to equalityC from beta constraint by doing the following
* transformation: val <= beta <= val: transformed to beta-val == 0, add to equalTo constraint.
*/
public static void addBCEqualityConstraint(List<LinearConstraints> equalityC, GLM.BetaConstraint betaC,
String[] coefNames, int index) {
LinearConstraints oneEqualityConstraint = new LinearConstraints();
oneEqualityConstraint._constraints.put(coefNames[index], 1.0);
oneEqualityConstraint._constraints.put("constant", -betaC._betaLB[index]);
equalityC.add(oneEqualityConstraint);
}
/***
* This method will extract the greater than constraint and add to lessThanC from beta constraint by doing the following
* transformation: low_val <= beta <= Infinity: transformed to low_val - beta <= 0.
*/
public static void addBCGreaterThanConstraint(List<LinearConstraints> lessThanC, GLM.BetaConstraint betaC,
String[] coefNames, int index) {
LinearConstraints lessThanEqualToConstraint = new LinearConstraints();
lessThanEqualToConstraint._constraints.put(coefNames[index], -1.0);
lessThanEqualToConstraint._constraints.put("constant", betaC._betaLB[index]);
lessThanC.add(lessThanEqualToConstraint);
}
/***
* This method will extract the less than constraint and add to lessThanC from beta constraint by doing the following
* transformation: -Infinity <= beta <= high_val: transformed to beta - high_val <= 0.
*/
public static void addBCLessThanConstraint(List<LinearConstraints> lessThanC, GLM.BetaConstraint betaC,
String[] coefNames, int index) {
LinearConstraints greaterThanConstraint = new LinearConstraints();
greaterThanConstraint._constraints.put(coefNames[index], 1.0);
greaterThanConstraint._constraints.put("constant", -betaC._betaUB[index]);
lessThanC.add(greaterThanConstraint);
}
/***
* This method will extract the constraints specified in the Frame with key linearConstraintFrameKey. For example,
* the following constraints a*beta_1+b*beta_2-c*beta_5 == 0, d*beta_2+e*beta_6-f <= 0 can be specified as the
* following rows:
* names values Type constraint_numbers
* beta_1 a Equal 0
* beta_2 b Equal 0
* beta_5 -c Equal 0
* beta_2 d LessThanEqual 1
* beta_6 e LessThanEqual 1
* constant -f LessThanEqual 1
*/
public static void extractLinearConstraints(ComputationState state, Key<Frame> linearConstraintFrameKey, DataInfo dinfo) {
List<LinearConstraints> equalityC = new ArrayList<>();
List<LinearConstraints> lessThanEqualToC = new ArrayList<>();
Frame linearConstraintF = DKV.getGet(linearConstraintFrameKey);
List<String> colNamesList = Stream.of(dinfo._adaptedFrame.names()).collect(Collectors.toList());
List<String> coefNamesList = Stream.of(dinfo.coefNames()).collect(Collectors.toList());
int numberOfConstraints = linearConstraintF.vec("constraint_numbers").toCategoricalVec().domain().length;
int numRow = (int) linearConstraintF.numRows();
List<Integer> rowIndices = IntStream.range(0,numRow).boxed().collect(Collectors.toList());
String constraintType;
int rowIndex;
for (int conInd = 0; conInd < numberOfConstraints; conInd++) {
if (!rowIndices.isEmpty()) {
rowIndex = rowIndices.get(0);
constraintType = linearConstraintF.vec("types").stringAt(rowIndex).toLowerCase();
if ("equal".equals(constraintType)) {
extractConstraint(linearConstraintF, rowIndices, equalityC, dinfo, coefNamesList, colNamesList);
} else if ("lessthanequal".equals(constraintType)) {
extractConstraint(linearConstraintF, rowIndices, lessThanEqualToC, dinfo, coefNamesList,
colNamesList);
} else {
throw new IllegalArgumentException("Type of linear constraints can only be Equal to LessThanEqualTo.");
}
}
}
state.setLinearConstraints(equalityC.toArray(new LinearConstraints[0]),
lessThanEqualToC.toArray(new LinearConstraints[0]), false);
}
public static void extractConstraint(Frame constraintF, List<Integer> rowIndices, List<LinearConstraints> equalC,
DataInfo dinfo, List<String> coefNames, List<String> colNames) {
List<Integer> processedRowIndices = new ArrayList<>();
int constraintNumberFrame = (int) constraintF.vec("constraint_numbers").at(rowIndices.get(0));
LinearConstraints currentConstraint = new LinearConstraints();
String constraintType = constraintF.vec("types").stringAt(rowIndices.get(0)).toLowerCase();
boolean standardize = dinfo._normMul != null;
boolean constantFound = false;
for (Integer rowIndex : rowIndices) {
String coefName = constraintF.vec("names").stringAt(rowIndex);
String currType = constraintF.vec("types").stringAt(rowIndex).toLowerCase();
if (!coefNames.contains(coefName) && !"constant".equals(coefName))
throw new IllegalArgumentException("Coefficient name " + coefName + " is not a valid coefficient name. It " +
"be a valid coefficient name or it can be constant");
if ((int) constraintF.vec("constraint_numbers").at(rowIndex) == constraintNumberFrame) {
if (!constraintType.equals(currType))
throw new IllegalArgumentException("Constraint type "+" of the same constraint must be the same but is not." +
" Expected type: "+constraintType+". Actual type: "+currType);
if ("constant".equals(coefName))
constantFound = true;
processedRowIndices.add(rowIndex);
// coefNames is valid
int colInd = colNames.indexOf(coefName)-dinfo._cats;
if (standardize && colNames.contains(coefName) && colInd >= 0) { // numerical column with standardization
currentConstraint._constraints.put(coefName, constraintF.vec("values").at(rowIndex)*dinfo._normMul[colInd]);
} else { // categorical column, constant or numerical column without standardization
currentConstraint._constraints.put(coefName, constraintF.vec("values").at(rowIndex));
}
}
}
if (!constantFound)
currentConstraint._constraints.put("constant", 0.0); // put constant of 0.0
if (currentConstraint._constraints.size() < 3)
throw new IllegalArgumentException("Linear constraint must have at least two coefficients. For constraints on" +
" just one coefficient: "+ constraintF.vec("names").stringAt(0)+", use betaConstraints instead.");
equalC.add(currentConstraint);
rowIndices.removeAll(processedRowIndices);
}
public static double[][] formConstraintMatrix(ComputationState state, List<String> constraintNamesList, int[] betaEqualLessThanInd) {
// extract coefficient names from constraints
constraintNamesList.addAll(extractConstraintCoeffs(state));
// form double matrix
int numRow = (betaEqualLessThanInd == null ? 0 : ArrayUtils.sum(betaEqualLessThanInd)) +
(state._equalityConstraintsLinear == null ? 0 : state._equalityConstraintsLinear.length) +
(state._lessThanEqualToConstraintsLinear == null ? 0 : state._lessThanEqualToConstraintsLinear.length);
double[][] initConstraintMatrix = new double[numRow][constraintNamesList.size()];
fillConstraintValues(state, constraintNamesList, initConstraintMatrix, betaEqualLessThanInd);
return initConstraintMatrix;
}
public static void fillConstraintValues(ComputationState state, List<String> constraintNamesList,
double[][] initCMatrix, int[] betaLessThan) {
int rowIndex = 0;
if (state._equalityConstraintsBeta != null)
rowIndex = extractConstraintValues(state._equalityConstraintsBeta, constraintNamesList, initCMatrix, rowIndex,
null);
if (state._lessThanEqualToConstraintsBeta != null)
rowIndex= extractConstraintValues(state._lessThanEqualToConstraintsBeta, constraintNamesList, initCMatrix,
rowIndex, betaLessThan);
if (state._equalityConstraintsLinear != null)
rowIndex = extractConstraintValues(state._equalityConstraintsLinear, constraintNamesList, initCMatrix, rowIndex, null);
if (state._lessThanEqualToConstraintsLinear != null)
extractConstraintValues(state._lessThanEqualToConstraintsLinear, constraintNamesList, initCMatrix, rowIndex, null);
}
public static int extractConstraintValues(LinearConstraints[] constraints, List<String> constraintNamesList,
double[][] initCMatrix, int rowIndex, int[] betaLessThan) {
int numConstr = constraints.length;
for (int index=0; index<numConstr; index++) {
if (betaLessThan == null || betaLessThan[index] == 1) {
Set<String> coeffKeys = constraints[index]._constraints.keySet();
for (String oneKey : coeffKeys) {
if (constraintNamesList.contains(oneKey))
initCMatrix[rowIndex][constraintNamesList.indexOf(oneKey)] = constraints[index]._constraints.get(oneKey);
}
rowIndex++;
}
}
return rowIndex;
}
public static void printConstraintSummary(GLMModel model, ComputationState state, String[] coefNames) {
LinearConstraintConditions cCond = printConstraintSummary(state, coefNames);
model._output._linear_constraint_states = cCond._constraintDescriptions;
model._output._all_constraints_satisfied = cCond._allConstraintsSatisfied;
makeConstraintSummaryTable(model, cCond);
}
public static void makeConstraintSummaryTable(GLMModel model, LinearConstraintConditions cCond) {
int numRow = cCond._constraintBounds.length;
String[] colHeaders = new String[]{"constraint", "values", "condition", "condition_satisfied"};
String[] colTypes = new String[]{"string", "double", "string", "string"};
String[] colFormats = new String[]{"%s", "%5.2f", "%s", "%s"};
TwoDimTable cTable = new TwoDimTable("Beta (if exists) and Linear Constraints Table", null,
new String[numRow], colHeaders, colTypes, colFormats, "constraint");
for (int index=0; index<numRow; index++) {
cTable.set(index, 0, cCond._constraintNValues[index]);
cTable.set(index, 1, cCond._constraintValues[index]);
cTable.set(index, 2, cCond._constraintBounds[index]);
cTable.set(index, 3, cCond._constraintSatisfied[index]);
}
model._output._linear_constraints_table = cTable;
}
public static LinearConstraintConditions printConstraintSummary(ComputationState state, String[] coefNames) {
double[] beta = state.beta();
boolean constraintsSatisfied = true;
List<String> coefNameList = Arrays.stream(coefNames).collect(Collectors.toList());
List<String> constraintConditions = new ArrayList<>();
List<String> cSatisfied = new ArrayList<>();
List<Double> cValues = new ArrayList<>();
List<String> cConditions = new ArrayList<>();
List<String> constraintStrings = new ArrayList<>();
if (state._equalityConstraintsBeta != null)
constraintsSatisfied = evaluateConstraint(state, state._equalityConstraintsBeta, true, beta,
coefNameList, "Beta equality constraint: ", constraintConditions, cSatisfied, cValues,
cConditions, constraintStrings) && constraintsSatisfied;
if (state._lessThanEqualToConstraintsBeta != null)
constraintsSatisfied = evaluateConstraint(state, state._lessThanEqualToConstraintsBeta, false,
beta, coefNameList, "Beta inequality constraint: ", constraintConditions, cSatisfied, cValues,
cConditions, constraintStrings) && constraintsSatisfied;
if (state._equalityConstraintsLinear != null)
constraintsSatisfied = evaluateConstraint(state, state._equalityConstraintsLinear, true, beta,
coefNameList, "Linear equality constraint: ", constraintConditions, cSatisfied, cValues,
cConditions, constraintStrings) && constraintsSatisfied;
if (state._lessThanEqualToConstraints != null)
constraintsSatisfied = evaluateConstraint(state, state._lessThanEqualToConstraints, false, beta,
coefNameList, "Linear inequality constraint: ", constraintConditions, cSatisfied, cValues,
cConditions, constraintStrings) && constraintsSatisfied;
return new LinearConstraintConditions(constraintConditions.stream().toArray(String[]::new),
cSatisfied.stream().toArray(String[]::new), cValues.stream().mapToDouble(x->x).toArray(),
cConditions.stream().toArray(String[]::new), constraintStrings.stream().toArray(String[]::new),
constraintsSatisfied);
}
/**
* Print constraints without any standardization applied so that people can see the setting in their original
* form without standardization. The beta coefficients are non-standardized. However, if standardized, the
* constraint values are changed to accommodate the standardized coefficients.
*/
public static boolean evaluateConstraint(ComputationState state, LinearConstraints[] constraints, boolean equalityConstr,
double[] beta, List<String> coefNames, String startStr,
List<String> constraintCond, List<String> cSatisfied, List<Double> cValues,
List<String> cConditions, List<String> constraintsStrings) {
int constLen = constraints.length;
LinearConstraints oneC;
String constrainStr;
boolean allSatisfied = true;
for (int index=0; index<constLen; index++) {
oneC = constraints[index];
constrainStr = constraint2Str(oneC, startStr, state);
evalOneConstraint(oneC, beta, coefNames);
constraintsStrings.add(constrainStr + " = " + oneC._constraintsVal);
if (equalityConstr) {
if (Math.abs(oneC._constraintsVal) <= EPS) { // constraint satisfied
constraintCond.add( constrainStr + " == 0 is statisfied.");
cSatisfied.add("true");
} else {
constraintCond.add(constrainStr + " = " + oneC._constraintsVal + " and does not satisfy" +
" the condition == 0.");
cSatisfied.add("false");
allSatisfied = false;
}
cConditions.add("== 0");
} else {
if (oneC._constraintsVal <= 0) { // constraint satisfied
constraintCond.add(constrainStr + " <= " +oneC._constraintsVal + " which satisfies the" +
" constraint <= 0.");
cSatisfied.add("true");
} else {
constraintCond.add(constrainStr+" = " + oneC._constraintsVal + " and does not satisfy the" +
" condition <= 0");
cSatisfied.add("false");
allSatisfied = false;
}
cConditions.add("<= 0");
}
cValues.add(oneC._constraintsVal);
}
return allSatisfied;
}
public static String constraint2Str(LinearConstraints oneConst, String startStr, ComputationState state) {
boolean isBetaConstraint = oneConst._constraints.size() < 3;
StringBuilder sb = new StringBuilder();
sb.append(startStr);
DataInfo dinfo = state.activeData();
boolean standardize = dinfo._normMul != null;
List<String> trainNames = stream(dinfo.coefNames()).collect(Collectors.toList());
double constantVal = 0;
int colInd = -1;
int coefOffset = (dinfo._catOffsets == null || dinfo._catOffsets.length == 0) ? 0 : dinfo._catOffsets[dinfo._catOffsets.length - 1];
for (String coefName : oneConst._constraints.keySet()) {
double constrVal = oneConst._constraints.get(coefName);
if (constrVal != 0) {
if ("constant".equals(coefName)) {
constantVal = constrVal;
} else if (trainNames.contains(coefName)) {
colInd = trainNames.indexOf(coefName) - coefOffset;
if (standardize && colInd >= 0 && !isBetaConstraint) {
if (constrVal > 0)
sb.append('+');
sb.append(constrVal / dinfo._normMul[colInd]);
} else {
sb.append(constrVal);
}
sb.append('*');
sb.append(coefName);
}
}
}
// add constant value here
// add constant value to the end
if (constantVal != 0) {
if (constantVal > 0)
sb.append("+");
if (isBetaConstraint && colInd >= 0 && standardize)
sb.append(constantVal * dinfo._normMul[colInd]);
else
sb.append(constantVal);
}
return sb.toString();
}
public static List<String> extractConstraintCoeffs(ComputationState state) {
List<String> tConstraintCoeffName = new ArrayList<>();
boolean nonZeroConstant = false;
if (state._equalityConstraintsBeta != null)
nonZeroConstant = extractCoeffNames(tConstraintCoeffName, state._equalityConstraintsBeta);
if (state._lessThanEqualToConstraintsBeta != null)
nonZeroConstant = extractCoeffNames(tConstraintCoeffName, state._lessThanEqualToConstraintsBeta) || nonZeroConstant;
if (state._equalityConstraintsLinear != null)
nonZeroConstant = extractCoeffNames(tConstraintCoeffName, state._equalityConstraintsLinear) || nonZeroConstant;
if (state._lessThanEqualToConstraintsLinear != null)
nonZeroConstant = extractCoeffNames(tConstraintCoeffName, state._lessThanEqualToConstraintsLinear) || nonZeroConstant;
// remove duplicates in the constraints names
Set<String> noDuplicateNames = new HashSet<>(tConstraintCoeffName);
if (!nonZeroConstant) // no non-Zero constant present
noDuplicateNames.remove("constant");
return new ArrayList<>(noDuplicateNames);
}
public static boolean extractCoeffNames(List<String> coeffList, LinearConstraints[] constraints) {
int numConst = constraints.length;
boolean nonZeroConstant = false;
for (int index=0; index<numConst; index++) {
Set<String> keys = constraints[index]._constraints.keySet();
coeffList.addAll(keys);
if (keys.contains("constant"))
nonZeroConstant = constraints[index]._constraints.get("constant") != 0.0;
}
return nonZeroConstant;
}
public static List<String> foundRedundantConstraints(ComputationState state, final double[][] initConstraintMatrix) {
Matrix constMatrix = new Matrix(initConstraintMatrix);
Matrix matrixSquare = constMatrix.times(constMatrix.transpose());
int rank = matrixSquare.rank();
if (rank < constMatrix.getRowDimension()) { // redundant constraints are specified
double[][] rMatVal = matrixSquare.qr().getR().getArray();
List<Double> diag = IntStream.range(0, rMatVal.length).mapToDouble(x->Math.abs(rMatVal[x][x])).boxed().collect(Collectors.toList());
int[] sortedIndices = IntStream.range(0, diag.size()).boxed().sorted((i, j) -> diag.get(i).compareTo(diag.get(j))).mapToInt(ele->ele).toArray();
List<Integer> duplicatedEleIndice = IntStream.range(0, diag.size()-rank).map(x -> sortedIndices[x]).boxed().collect(Collectors.toList());
return genRedundantConstraint(state, duplicatedEleIndice);
}
return null;
}
public static List<String> genRedundantConstraint(ComputationState state, List<Integer> duplicatedEleIndics) {
List<String> redundantConstraint = new ArrayList<>();
for (Integer redIndex : duplicatedEleIndics)
redundantConstraint.add(grabRedundantConstraintMessage(state, redIndex));
return redundantConstraint;
}
public static String grabRedundantConstraintMessage(ComputationState state, Integer constraintIndex) {
// figure out which constraint among state._fromBetaConstraints, state._equalityConstraints,
// state._lessThanEqualToConstraints is actually redundant
LinearConstraints redundantConst = getConstraintFromIndex(state, constraintIndex);
if (redundantConst != null) {
boolean standardize = state.activeData()._normMul != null ? true : false;
boolean isBetaConstraint = redundantConst._constraints.size() < 3;
StringBuilder sb = new StringBuilder();
DataInfo dinfo = state.activeData();
List<String> trainNames = stream(dinfo.coefNames()).collect(Collectors.toList());
sb.append("This constraint is redundant ");
double constantVal = 0;
int colInd = -1;
int coefOffset = (dinfo._catOffsets == null || dinfo._catOffsets.length == 0) ? 0 : dinfo._catOffsets[dinfo._catOffsets.length - 1];
for (String coefName : redundantConst._constraints.keySet()) {
double constrVal = redundantConst._constraints.get(coefName);
if (constrVal != 0) {
if ("constant".equals(coefName)) {
constantVal = constrVal;
} else if (trainNames.contains(coefName)) {
colInd = trainNames.indexOf(coefName) - coefOffset;
if (standardize && colInd >= 0 && !isBetaConstraint) {
if (constrVal > 0)
sb.append('+');
sb.append(constrVal * dinfo._normMul[colInd]);
} else {
sb.append(constrVal);
}
sb.append('*');
sb.append(coefName);
}
}
}
// add constant value here
// add constant value to the end
if (constantVal != 0) {
if (constantVal > 0)
sb.append("+");
if (isBetaConstraint && colInd >= 0)
sb.append(constantVal * dinfo._normMul[colInd]);
else
sb.append(constantVal);
}
sb.append(" <= or == 0.");
sb.append(" Please remove it from your beta/linear constraints.");
return sb.toString();
} else {
return null;
}
}
public static LinearConstraints getConstraintFromIndex(ComputationState state, Integer constraintIndex) {
int constIndexWOffset = constraintIndex;
if (state._equalityConstraintsBeta != null) {
if (constIndexWOffset < state._equalityConstraintsBeta.length) {
return state._equalityConstraintsBeta[constIndexWOffset];
} else {
constIndexWOffset -= state._equalityConstraintsBeta.length;
}
}
if (state._lessThanEqualToConstraintsBeta != null) {
if (constIndexWOffset < state._lessThanEqualToConstraintsBeta.length) {
return state._lessThanEqualToConstraintsBeta[constIndexWOffset];
} else {
constIndexWOffset -= state._lessThanEqualToConstraintsBeta.length;
}
}
if (state._equalityConstraintsLinear != null) {
if (constIndexWOffset < state._equalityConstraintsLinear.length) {
return state._equalityConstraintsLinear[constIndexWOffset];
} else {
constIndexWOffset -= state._equalityConstraintsLinear.length;
}
}
if (state._lessThanEqualToConstraints != null && constIndexWOffset < state._lessThanEqualToConstraints.length) {
return state._lessThanEqualToConstraints[constIndexWOffset];
}
return null;
}
/***
*
* This method will evaluate the value of a constraint given the GLM coefficients and the coefficicent name list.
* Note that the beta should be the normalized beta if standardize = true and the coefficients to the coefficients
* are set correctly for the standardized coefficients.
*/
public static void evalOneConstraint(LinearConstraints constraint, double[] beta, List<String> coefNames) {
double sumV = 0.0;
Map<String, Double> constraints = constraint._constraints;
for (String coef : constraints.keySet()) {
if ("constant".equals(coef))
sumV += constraints.get(coef);
else
sumV += constraints.get(coef)*beta[coefNames.indexOf(coef)];
}
constraint._constraintsVal = sumV;
}
/***
*
* The initial value of lambda values really do not matter that much. The lambda update will take care of making
* sure it is the right sign at the end of the iteration.
*
*/
public static void genInitialLambda(Random randObj, LinearConstraints[] constraints, double[] lambda) {
int numC = constraints.length;
LinearConstraints oneC;
for (int index=0; index<numC; index++) {
lambda[index] = Math.abs(randObj.nextGaussian());
oneC = constraints[index];
if (oneC._active && oneC._constraintsVal < 0)
lambda[index] *= -1;
}
}
public static void adjustLambda(LinearConstraints[] constraints, double[] lambda) {
int numC = constraints.length;
LinearConstraints oneC;
for (int index=0; index<numC; index++) {
oneC = constraints[index];
if (!oneC._active)
lambda[index]=0.0;
}
}
public static double[][] sumGramConstribution(ConstraintsGram[] gramConstraints, int numCoefs) {
if (gramConstraints == null)
return null;
double[][] gramContr = new double[numCoefs][numCoefs]; // includes intercept terms
int cGramSize = gramConstraints.length;
ConstraintsGram oneGram;
int coef1, coef2;
for (int index=0; index < cGramSize; index++) {
oneGram = gramConstraints[index];
if (oneGram._active) { // only process the contribution if the constraint is active
for (CoefIndices key : oneGram._coefIndicesValue.keySet()) {
coef1 = key._firstCoefIndex;
coef2 = key._secondCoefIndex;
gramContr[coef1][coef2] += oneGram._coefIndicesValue.get(key);
if (coef1 != coef2)
gramContr[coef2][coef1] = gramContr[coef1][coef2];
}
}
}
return gramContr;
}
/***
*
* Add contribution of constraints to objective/likelihood/gradient.
*
*/
public static void addConstraintGradient(double[] lambda, ConstraintsDerivatives[] constraintD,
GLM.GLMGradientInfo gradientInfo) {
int numConstraints = lambda.length;
ConstraintsDerivatives oneC;
for (int index=0; index<numConstraints; index++) {
oneC = constraintD[index];
if (oneC._active) {
for (Integer key: oneC._constraintsDerivative.keySet()) {
gradientInfo._gradient[key] += lambda[index]*oneC._constraintsDerivative.get(key);
}
}
}
}
/***
* This method adds the contribution to the gradient from the penalty term ck/2*transpose(h(beta))*h(beta)
*/
public static void addPenaltyGradient(ConstraintsDerivatives[] constraintDeriv, LinearConstraints[] constraintD,
GLM.GLMGradientInfo gradientInfo, double ck) {
int numConstraints = constraintDeriv.length;
ConstraintsDerivatives oneD;
LinearConstraints oneConts;
for (int index=0; index<numConstraints; index++) {
oneD = constraintDeriv[index];
if (oneD._active) {
oneConts = constraintD[index];
for (Integer coefK : oneD._constraintsDerivative.keySet()) {
gradientInfo._gradient[coefK] += ck*oneConts._constraintsVal*oneD._constraintsDerivative.get(coefK);
}
}
}
}
/***
* This method will update the constraint parameter values cKCS, epsilonkCS, etakCS. Refer to the doc, Algorithm
* 19.1
*/
public static void updateConstraintParameters(ComputationState state, double[] lambdaEqual, double[]lambdaLessThan,
LinearConstraints[] equalConst, LinearConstraints[] lessThanConst,
GLMModel.GLMParameters parms) {
// calculate ||h(beta)|| square, ||gradient|| square
double hBetaMagSquare = state._csGLMState._constraintMagSquare;
if (hBetaMagSquare <= state._csGLMState._etakCSSquare) { // implement line 26 to line 29 of Algorithm 19.1
if (equalConst != null)
updateLambda(lambdaEqual, state._csGLMState._ckCS, equalConst);
if (lessThanConst != null)
updateLambda(lambdaLessThan, state._csGLMState._ckCS, lessThanConst);
state._csGLMState._epsilonkCS = state._csGLMState._epsilonkCS/state._csGLMState._ckCS;
state ._csGLMState._etakCS = state._csGLMState._etakCS/Math.pow(state._csGLMState._ckCS, parms._constraint_beta);
} else { // implement line 31 to 34 of Algorithm 19.1
state._csGLMState._ckCS = state._csGLMState._ckCS*parms._constraint_tau; // tau belongs to [4,10]
state._csGLMState._ckCSHalf = state._csGLMState._ckCS*0.5;
state._csGLMState._epsilonkCS = state._csGLMState._epsilon0/state._csGLMState._ckCS;
state._csGLMState._etakCS = parms._constraint_eta0/Math.pow(state._csGLMState._ckCS, parms._constraint_alpha);
}
state._csGLMState._epsilonkCSSquare = state._csGLMState._epsilonkCS*state._csGLMState._epsilonkCS;
state._csGLMState._etakCSSquare = state ._csGLMState._etakCS*state._csGLMState._etakCS;
}
public static void calculateConstraintSquare(ComputationState state, LinearConstraints[] equalConst,
LinearConstraints[] lessThanConst) {
double sumSquare = 0;
if (equalConst != null)
sumSquare += stream(equalConst).mapToDouble(x -> x._constraintsVal*x._constraintsVal).sum();
if (lessThanConst != null) // only counts magnitude when the constraint is active
sumSquare += stream(lessThanConst).filter(x -> x._active).mapToDouble(x -> x._constraintsVal*x._constraintsVal).sum();
state._csGLMState._constraintMagSquare = sumSquare;
}
public static void updateLambda(double[] lambda, double ckCS, LinearConstraints[] constraints) {
int numC = constraints.length;
LinearConstraints oneC;
for (int index=0; index<numC; index++) {
oneC = constraints[index];
if (oneC._active)
lambda[index] += ckCS*oneC._constraintsVal;
}
}
/***
* This method will check if the stopping conditions for constraint GLM are met and they are namely:
* 1. ||gradient of L with respect to beta and with respect to lambda|| <= epsilon
* 2. ||h(beta)|| square <= epsilon if satisfied is false and ||h(beta)|| square == 0 if satisfied is true
*
* If the stopping conditions are met, it will return true, else it will return false.
* See the doc, Algorithm 19.1, line 36.
*/
public static boolean constraintsStop(GLM.GLMGradientInfo gradientInfo, ComputationState state) {
state._csGLMState._gradientMagSquare = innerProduct(gradientInfo._gradient, gradientInfo._gradient);
if (state._csGLMState._constraintMagSquare <= ComputationState.EPS_CS &&
state._csGLMState._gradientMagSquare <= ComputationState.EPS_CS_SQUARE)
return true;
return false;
}
public static boolean activeConstraints(LinearConstraints[] equalityC, LinearConstraints[] lessThanEqualToC) {
if (equalityC != null)
return true;
return stream(lessThanEqualToC).filter(x -> x._active).count() > 0;
}
/***
* This method calls getGradient to calculate the gradient, likelhood and objective function values. In addition,
* it will add to the gradient and objective function the contribution from the linear constraints.
*/
public static GLM.GLMGradientInfo calGradient(double[] betaCnd, ComputationState state, GLM.GLMGradientSolver ginfo,
double[] lambdaE, double[] lambdaL, LinearConstraints[] constraintE,
LinearConstraints[] constraintL) {
// todo: need to add support for predictors removed for whatever reason
// calculate gradients
GLM.GLMGradientInfo gradientInfo = ginfo.getGradient(betaCnd, state); // gradient without constraints
boolean hasEqualConstraints = constraintE != null;
boolean hasLessConstraints = constraintL != null;
// add gradient, objective and likelihood contribution from constraints
if (hasEqualConstraints) {
addConstraintGradient(lambdaE, state._derivativeEqual, gradientInfo);
addPenaltyGradient(state._derivativeEqual, constraintE, gradientInfo, state._csGLMState._ckCS);
gradientInfo._objVal += state.addConstraintObj(lambdaE, constraintE, state._csGLMState._ckCSHalf);
}
if (hasLessConstraints) {
addConstraintGradient(lambdaL, state._derivativeLess, gradientInfo);
addPenaltyGradient(state._derivativeLess, constraintL, gradientInfo, state._csGLMState._ckCS);
gradientInfo._objVal += state.addConstraintObj(lambdaL, constraintL, state._csGLMState._ckCSHalf);
}
return gradientInfo;
}
/**
* Simple method to all linear constraints given the coefficient values. In addition, it will determine if a
* linear constraint is active. Only active constraints are included in the objective function calculations.
*
* For an equality constraint, any constraint value not equal to zero is active.
* For a less than or equality constraint, any constraint value that exceed zero is active.
*/
public static void updateConstraintValues(double[] betaCnd, List<String> coefNames,
LinearConstraints[] equalityConstraints,
LinearConstraints[] lessThanEqualToConstraints) {
if (equalityConstraints != null) // equality constraints
Arrays.stream(equalityConstraints).forEach(constraint -> {
evalOneConstraint(constraint, betaCnd, coefNames);
// constraint._active = (Math.abs(constraint._constraintsVal) > EPS2);
constraint._active = true;
});
if (lessThanEqualToConstraints != null) // less than or equal to constraints
Arrays.stream(lessThanEqualToConstraints).forEach(constraint -> {
evalOneConstraint(constraint, betaCnd, coefNames);
constraint._active = constraint._constraintsVal >= 0;
});
}
public static String[] collinearInConstraints(String[] collinear_cols, String[] constraintNames) {
List<String> cNames = Arrays.stream(constraintNames).collect(Collectors.toList());
return Arrays.stream(collinear_cols).filter(x -> (cNames.contains(x))).toArray(String[]::new);
}
public static int countNumConst(ComputationState state) {
int numConst = 0;
// check constraints from beta constrains
numConst += state._equalityConstraintsBeta == null ? 0 : state._equalityConstraintsBeta.length;
numConst += state._lessThanEqualToConstraintsBeta == null ? 0 : state._lessThanEqualToConstraintsBeta.length/2;
numConst += state._equalityConstraintsLinear == null ? 0 : state._equalityConstraintsLinear.length;
numConst += state._lessThanEqualToConstraints == null ? 0 : state._lessThanEqualToConstraints.length;
return numConst;
}
public static class LinearConstraintConditions {
final String[] _constraintDescriptions; // 0.5C2 + 1.3C2+3
final String[] _constraintSatisfied;
final double[] _constraintValues;
final String[] _constraintBounds; // == 0 for equality constraint, <= 0 for lessThanEqual to constraint
final String[] _constraintNValues; // 0.5C2+1.4C2-0.5 = 2.0
final boolean _allConstraintsSatisfied;
public LinearConstraintConditions(String[] constraintC, String[] cSatisfied, double[] cValues, String[] cBounds,
String[] cNV, boolean conditionS) {
_constraintDescriptions = constraintC;
_constraintSatisfied = cSatisfied;
_constraintValues = cValues;
_constraintBounds = cBounds;
_constraintNValues = cNV;
_allConstraintsSatisfied = conditionS;
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.