index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/DispersionTask.java
package hex.glm; import hex.DataInfo; import water.MRTask; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.util.FrameUtils; import water.util.Log; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.stream.IntStream; import static hex.glm.DispersionTask.ConstColNames.*; import static hex.glm.DispersionTask.InfoColNames.*; import static org.apache.commons.math3.special.Gamma.logGamma; public class DispersionTask { public static final int RESPIND = 0; public static final int MUIND = 1; public static final int WEIGHTIND = 2; public enum ConstColNames { JMaxConst, zConst, LogPart2Const, LogOneOverY, LogOneOverPiY, FirstOrderDerivConst, SecondOrderDerivConst } ; public enum InfoColNames { MaxValIndex, LOGZ, LOGWVMax, LOGDWVMax, LOGD2WVMax, JkL, JkU, DjkL, DjkU, D2jkL, D2jkU, SumWV, SumDWV, SumD2WV, LL, DLL, D2LL } ; /*** * Class to pre-calculate constants assocated with the following processes: * 1. maximum term index: jMaxConst (col 0) different between p < 2 and p > 2 * 2. constant term associated with z: zConst, different between p < 2 and p > 2 * 3. log likelihood: part2Const (col 1), same for all p * 4. log 1/y for 1<p<2 * 5. log 1/(Pi*y) for p>2 * 5. dlogf/dphi firstOrderDerivConst, same for all p * 6. d2logf/dphi2 secondOrderDerivConst, same for all p * In addition, we also have maximum term with maximum index: logMaxConst, not part of constFrame. */ public static class ComputeTweedieConstTsk extends MRTask<ComputeTweedieConstTsk> { double _variancePower; double _alpha; boolean _weightPresent; double _twoMinusP; double _oneOver2MinusP; double _oneMinusP; double _oneOver1MinusP; double _oneOverPi; double _pMinusOne; double _oneMinusAlpha; public ComputeTweedieConstTsk(double vPower, Frame infoFrame) { _variancePower = vPower; _alpha = (2.0 - vPower) / (1.0 - vPower); _weightPresent = infoFrame.numCols() > 2; _twoMinusP = 2 - vPower; _oneOver2MinusP = 1.0 / _twoMinusP; _oneMinusP = 1 - _variancePower; _oneOver1MinusP = 1.0 / _oneMinusP; _oneOverPi = 1.0 / Math.PI; _pMinusOne = _variancePower - 1; _oneMinusAlpha = 1.0 - _alpha; } public void map(Chunk[] chks, NewChunk[] constChks) { Map<ConstColNames, Integer> constColName2Ind = new HashMap<>(); ComputeMaxSumSeriesTsk.setConstIndices(constColName2Ind, 0); int chkLen = chks[0].len(); for (int rowInd = 0; rowInd < chkLen; rowInd++) { // calculate jMaxConst calJMaxConst(chks, constChks, rowInd, constColName2Ind.get(JMaxConst)); // calculate zConst calZConst(chks, constChks, rowInd, constColName2Ind.get(zConst)); // calculate part2Const for ll calPart2Const(chks, constChks, rowInd, constColName2Ind.get(LogPart2Const)); // calculate part1Const, the 1/y calPart1LogConst(chks, constChks, rowInd, constColName2Ind.get(LogOneOverY)); // calculate partConst, 1/(PI*y) calPart1LogPIConst(chks, constChks, rowInd, constColName2Ind.get(LogOneOverPiY)); // calculate constants for derivatives calDerivConst(chks, constChks, rowInd, new int[]{constColName2Ind.get(FirstOrderDerivConst), constColName2Ind.get(ConstColNames.SecondOrderDerivConst)}); } } public void calZConst(Chunk[] chks, NewChunk[] constChks, int rowInd, int newChkColInd) { double response = chks[RESPIND].atd(rowInd); if (Double.isFinite(response)) { if (response > 0) { double val = _variancePower < 2 ? Math.pow(response, -_alpha) * Math.pow(_pMinusOne, _alpha) * _oneOver2MinusP : -Math.pow(response, -_alpha) * Math.pow(_pMinusOne, _alpha) * _oneOver2MinusP; if (_weightPresent) val *= Math.pow(chks[WEIGHTIND].atd(rowInd), _oneMinusAlpha); constChks[newChkColInd].addNum(val); } else constChks[newChkColInd].addNum(0); } else { constChks[newChkColInd].addNA(); } } public void calDerivConst(Chunk[] chks, NewChunk[] constChks, int rowInd, int[] newChkColInd) { double response = chks[RESPIND].atd(rowInd); double mu = chks[MUIND].atd(rowInd); double val; double weight = _weightPresent ? chks[WEIGHTIND].atd(rowInd) : 1; if (Double.isFinite(response) && Double.isFinite(mu)) { val = -response * Math.pow(mu, _oneMinusP) * _oneOver1MinusP + Math.pow(mu, _twoMinusP) * _oneOver2MinusP; val *= weight * weight; constChks[newChkColInd[0]].addNum(val); // dll/dphi constant val *= -2 * weight; constChks[newChkColInd[1]].addNum(val); // d2ll/dphi2 constant } else { constChks[newChkColInd[0]].addNA(); constChks[newChkColInd[1]].addNA(); } } public void calPart1LogConst(Chunk[] chks, NewChunk[] constChks, int rowInd, int newChkColInd) { double response = chks[RESPIND].atd(rowInd); if (Double.isFinite(response) && response > 0) { constChks[newChkColInd].addNum(Math.log(1.0 / response)); } else { constChks[newChkColInd].addNA(); } } public void calPart1LogPIConst(Chunk[] chks, NewChunk[] constChks, int rowInd, int newChkColInd) { double response = chks[RESPIND].atd(rowInd); if (Double.isFinite(response) && response > 0) { constChks[newChkColInd].addNum(Math.log(_oneOverPi / response)); } else { constChks[newChkColInd].addNA(); } } public void calPart2Const(Chunk[] chks, NewChunk[] constChks, int rowInd, int newChkColInd) { double response = chks[RESPIND].atd(rowInd); double mu = chks[MUIND].atd(rowInd); if (Double.isFinite(response) && Double.isFinite(mu)) { double val; val = -Math.pow(mu, _twoMinusP) * _oneOver2MinusP; if (response > 0) val += response * Math.pow(mu, _oneMinusP) * _oneOver1MinusP; if (_weightPresent) val *= chks[WEIGHTIND].atd(rowInd); constChks[newChkColInd].addNum(val); } else { constChks[newChkColInd].addNA(); } } public void calJMaxConst(Chunk[] chks, NewChunk[] constChks, int rowInd, int newChkColInd) { double response = chks[RESPIND].atd(rowInd); double mu = chks[MUIND].atd(rowInd); if (Double.isFinite(response) && Double.isFinite(mu) && response > 0) { double val = _variancePower < 2 ? Math.pow(response, _twoMinusP) * _oneOver2MinusP : -Math.pow(response, _twoMinusP) * _oneOver2MinusP; if (_weightPresent) val *= chks[WEIGHTIND].atd(rowInd); constChks[newChkColInd].addNum(val); } else { constChks[newChkColInd].addNA(); } } } /*** * This class will compute the following for every row of the dataset: * 1. index of maximum magnitude of infinite series; * 2. log(z) * 3. W or V maximum * 5. dW or dV maximum * 6. d2W or d2V maximum * 7. KL or JL for W or V * 8. KU or JU for W or V * 9. KL or JL for dW or dV * 10. KU or JU for dW or dV * 11. KL or JL for d2W or d2V * 12. KU or JU for d2W or d2V * 13. log likelihood * 14. dlog likelihood / d phi * 15. d2log likelihood / d2 phi */ public static class ComputeMaxSumSeriesTsk extends MRTask<ComputeMaxSumSeriesTsk> { double _variancePower; double _dispersionParameter; double _alpha; boolean _weightPresent; Frame _infoFrame; int _constColOffset; int _workColOffset; int _nWorkCols; double _oneOverPhiPower; double _oneMinusAlpha; double _oneOverPhiSquare; double _oneOverPhi3; double _logLL; double _dLogLL; double _d2LogLL; boolean _debugOn; double _oneOverDispersion; double _alphaMinus1TLogDispersion; double _alphaTimesPI; double _alphaMinus1OverPhi; double _alphaMinus1SquareOverPhiSquare; int _nWVs = 3; int _indexBound; double _logDispersionEpsilon; boolean[] _computationAccuracy; // set to false when upper bound exceeds _indexBound int _constantColumnNumber; long _nobsLL; long _nobsDLL; long _nobsD2LL; long _nobNegSum; final boolean _calAll; // if false, speed up task to calculate loglikelihood only and skip other computations public ComputeMaxSumSeriesTsk(TweedieMLDispersionOnly tdispersion, GLMModel.GLMParameters parms, boolean calALL) { _variancePower = tdispersion._variancePower; _dispersionParameter = tdispersion._dispersionParameter; _alpha = (2.0 - _variancePower) / (1.0 - _variancePower); _weightPresent = tdispersion._weightPresent; _infoFrame = tdispersion._infoFrame; _nWorkCols = tdispersion._nWorkingCol; _constantColumnNumber = tdispersion._constFrameNames.length; _constColOffset = _infoFrame.numCols() - _nWorkCols - tdispersion._constNCol; _workColOffset = _infoFrame.numCols() - _nWorkCols; _oneMinusAlpha = 1 - _alpha; _oneOverPhiPower = 1.0 / Math.pow(_dispersionParameter, _oneMinusAlpha); _oneOverPhiSquare = 1.0 / (_dispersionParameter * _dispersionParameter); _oneOverPhi3 = _oneOverPhiSquare / _dispersionParameter; _debugOn = parms._debugTDispersionOnly; _oneOverDispersion = 1 / _dispersionParameter; _alphaMinus1TLogDispersion = (_alpha - 1) * Math.log(_dispersionParameter); _alphaTimesPI = _alpha * Math.PI; _indexBound = parms._max_series_index; _logDispersionEpsilon = Math.log(parms._tweedie_epsilon); _computationAccuracy = new boolean[_nWVs]; _alphaMinus1OverPhi = (_alpha - 1) / _dispersionParameter; _alphaMinus1SquareOverPhiSquare = _alphaMinus1OverPhi * _alphaMinus1OverPhi; _calAll = calALL; } public static void setInfoIndices(Map<InfoColNames, Integer> infoColName2Ind, int constOffset, boolean weightPresent) { int offset = weightPresent ? 3 : 2; InfoColNames[] infoC = InfoColNames.values(); offset += constOffset; int infoColLen = infoC.length; for (int index = 0; index < infoColLen; index++) { infoColName2Ind.put(infoC[index], index + offset); } } public static void setConstIndices(Map<ConstColNames, Integer> constColName2Ind, int offset) { ConstColNames[] constVal = ConstColNames.values(); int constantColNum = constVal.length; for (int index = 0; index < constantColNum; index++) constColName2Ind.put(constVal[index], index + offset); } public void map(Chunk[] chks) { int chkLen = chks[0].len(); int jKIndMax = 0, jKL = 0, jKU = 0, djKL = 0, djKU = 0, d2jKL = 0, d2jKU = 0; double wvMax = 0, dwvMax = 0, d2wvMax = 0, logZ = 0, sumWVj = 0, sumDWVj = 0, sumD2WVj = 0, oneOverSumWVj = 0; _logLL = 0; _dLogLL = 0; _d2LogLL = 0; _nobsLL = 0; _nobsDLL = 0; _nobsD2LL = 0; _nobNegSum = 0; double tempLL = 0, tempDLL = 0, tempD2LL = 0; Map<ConstColNames, Integer> constColName2Ind = new HashMap<>(); Map<InfoColNames, Integer> infoColName2Ind = new HashMap<>(); setConstIndices(constColName2Ind, _weightPresent ? 3 : 2); setInfoIndices(infoColName2Ind, constColName2Ind.size(), _weightPresent); double weight; for (int rInd = 0; rInd < chkLen; rInd++) { double response = chks[0].atd(rInd); weight = _weightPresent ? chks[WEIGHTIND].atd(rInd) : 1; if (response >= 0) { // this part is only valid for response >= 0 if (response > 0) { // calculate maximum index of series; jKIndMax = findMaxTermIndex(chks, rInd, constColName2Ind.get(JMaxConst)); // calculate log(z) logZ = calLogZ(chks, rInd, constColName2Ind.get(zConst)); // calculate maximum of Wj/Vk, dWj/dVk, d2Wj/dVk2 without derivative constants of (alpha-1)/Phi // and without 1/y or 1/(PI*y) wvMax = calLogWVMax(chks, rInd, jKIndMax, logZ); double logjKIndMax = Math.log(jKIndMax); dwvMax = wvMax + logjKIndMax; d2wvMax = dwvMax + logjKIndMax; // locate jL/kL, jU/kU for W/V, dW/dV, d2W/dV2; jKL = estimateLowerBound(jKIndMax, wvMax, logZ, new EvalLogWVEnv()); jKU = estimateUpperBound(jKIndMax, wvMax, logZ, 0, new EvalLogWVEnv()); if (_calAll) { djKL = estimateLowerBound(jKIndMax, dwvMax, logZ, new EvalLogDWVEnv()); djKU = estimateUpperBound(jKIndMax, dwvMax, logZ, 1, new EvalLogDWVEnv()); d2jKL = estimateLowerBound(jKIndMax, d2wvMax, logZ, new EvalLogD2WVEnv()); d2jKU = estimateUpperBound(jKIndMax, d2wvMax, logZ, 2, new EvalLogD2WVEnv()); } // sum the series W, dW, d2W but not include 1/y or 1/(PI*y) sumWVj = sumWV(jKL, jKU, wvMax, logZ, new EvalLogWVEnv()); if (sumWVj <= 0.0) _nobNegSum++; } if (sumWVj > 0) { tempLL = evalLogLikelihood(chks, rInd, sumWVj, constColName2Ind); if (Double.isFinite(tempLL)) { _logLL += tempLL; _nobsLL += weight; } if (_calAll) { if (response > 0) { oneOverSumWVj = 1.0 / sumWVj; sumDWVj = sumWV(djKL, djKU, dwvMax, logZ, new EvalLogDWVEnv()) * _alphaMinus1OverPhi; sumD2WVj = sumWV(d2jKL, d2jKU, d2wvMax, logZ, new EvalLogD2WVEnv()) * _alphaMinus1SquareOverPhiSquare - sumDWVj * _oneOverDispersion; } tempDLL = evalDlldPhi(chks, rInd, sumDWVj, oneOverSumWVj, constColName2Ind); if (Double.isFinite(tempDLL)) { _dLogLL += tempDLL; _nobsDLL += weight; } tempD2LL = evalD2lldPhi2(chks, rInd, sumDWVj, sumD2WVj, oneOverSumWVj, constColName2Ind); if (Double.isFinite(tempD2LL)) { _d2LogLL += tempD2LL; _nobsD2LL += weight; } } } } if (_debugOn) setDebugValues(rInd, jKIndMax, logZ, wvMax, dwvMax, d2wvMax, jKL, jKU, djKL, djKU, d2jKL, d2jKU, sumWVj, sumDWVj, sumD2WVj, tempLL, tempDLL, tempD2LL, chks, infoColName2Ind, response); } if (_debugOn && _variancePower > 2) Log.info("Chunk IDX " + chks[0].cidx() + " contains " + _nobNegSum + " rows of data with series" + " sum < 0."); } public void setDebugValues(int rInd, int jkIndMax, double logZ, double wvMax, double dwvMax, double d2wvMax, int jKL, int jKU, int djKL, int djKU, int d2jKL, int d2jKU, double sumWV, double sumDWV, double sumD2WV, double ll, double dll, double d2ll, Chunk[] chks, Map<InfoColNames, Integer> infoColName2Ind, double response) { if (response == 0) { chks[infoColName2Ind.get(MaxValIndex)].set(rInd, 0); chks[infoColName2Ind.get(LOGZ)].set(rInd, 0); chks[infoColName2Ind.get(LOGWVMax)].set(rInd, 0); chks[infoColName2Ind.get(LOGDWVMax)].set(rInd, 0); chks[infoColName2Ind.get(LOGD2WVMax)].set(rInd, 0); chks[infoColName2Ind.get(JkL)].set(rInd, 0); chks[infoColName2Ind.get(JkU)].set(rInd, 0); chks[infoColName2Ind.get(DjkL)].set(rInd, 0); chks[infoColName2Ind.get(DjkU)].set(rInd, 0); chks[infoColName2Ind.get(D2jkL)].set(rInd, 0); chks[infoColName2Ind.get(D2jkU)].set(rInd, 0); chks[infoColName2Ind.get(SumWV)].set(rInd, 0); chks[infoColName2Ind.get(SumDWV)].set(rInd, 0); chks[infoColName2Ind.get(SumD2WV)].set(rInd, 0); chks[infoColName2Ind.get(LL)].set(rInd, ll); chks[infoColName2Ind.get(DLL)].set(rInd, dll); chks[infoColName2Ind.get(D2LL)].set(rInd, d2ll); } else { chks[infoColName2Ind.get(MaxValIndex)].set(rInd, jkIndMax); chks[infoColName2Ind.get(LOGZ)].set(rInd, logZ); chks[infoColName2Ind.get(LOGWVMax)].set(rInd, wvMax); chks[infoColName2Ind.get(LOGDWVMax)].set(rInd, dwvMax); chks[infoColName2Ind.get(LOGD2WVMax)].set(rInd, d2wvMax); chks[infoColName2Ind.get(JkL)].set(rInd, jKL); chks[infoColName2Ind.get(JkU)].set(rInd, jKU); chks[infoColName2Ind.get(DjkL)].set(rInd, djKL); chks[infoColName2Ind.get(DjkU)].set(rInd, djKU); chks[infoColName2Ind.get(D2jkL)].set(rInd, d2jKL); chks[infoColName2Ind.get(D2jkU)].set(rInd, d2jKU); chks[infoColName2Ind.get(SumWV)].set(rInd, sumWV); chks[infoColName2Ind.get(SumDWV)].set(rInd, sumDWV); chks[infoColName2Ind.get(SumD2WV)].set(rInd, sumD2WV); chks[infoColName2Ind.get(LL)].set(rInd, ll); chks[infoColName2Ind.get(DLL)].set(rInd, dll); chks[infoColName2Ind.get(D2LL)].set(rInd, d2ll); } } @Override public void reduce(ComputeMaxSumSeriesTsk other) { this._logLL += other._logLL; this._dLogLL += other._dLogLL; this._d2LogLL += other._d2LogLL; this._nobsLL += other._nobsLL; this._nobsDLL += other._nobsDLL; this._nobsD2LL += other._nobsD2LL; this._nobNegSum += other._nobNegSum; } @Override public void postGlobal() { if (_variancePower > 2 && _debugOn) Log.info("number of data rows with negative sum " + _nobNegSum); } public int estimateLowerBound(int jOrkMax, double logWorVmax, double logZ, CalWVdWVd2WV cVal) { if (jOrkMax == 1) // small speedup return 1; double logWV1 = cVal.calculate(1, _alpha, logZ, logWorVmax, _variancePower); if ((logWV1 - logWorVmax) >= _logDispersionEpsilon) return 1; else { // call recursive function int indexLow = 1; int indexHigh = jOrkMax; int indexMid = (int) Math.round(0.5 * (indexLow + indexHigh)); double logVal; while ((indexLow < indexHigh) && (indexHigh != indexMid) && (indexLow != indexMid)) { logVal = cVal.calculate(indexMid, _alpha, logZ, logWorVmax, _variancePower); if (logVal - logWorVmax < _logDispersionEpsilon) indexLow = indexMid; else indexHigh = indexMid; indexMid = (int) Math.round(0.5 * (indexLow + indexHigh)); } if (cVal.calculate(indexLow, _alpha, logZ, logWorVmax, _variancePower)-logWorVmax < _logDispersionEpsilon) return indexLow; else return indexMid; // difference between indexLow and indexHigh is only 1 } } public int estimateUpperBound(int jOrkMax, double logWorVmax, double logZ, int wvIndex, CalWVdWVd2WV cVal) { double logWj = cVal.calculate(_indexBound, _alpha, logZ, logWorVmax, _variancePower); if ((logWj - logWorVmax) > _logDispersionEpsilon) { _computationAccuracy[wvIndex] = false; return _indexBound; } else { int indexLow = jOrkMax; int indexHigh = _indexBound; int indexMid = (int) Math.round(0.5 * (indexLow + indexHigh)); while ((indexLow < indexHigh) && (indexHigh != indexMid) && (indexLow != indexMid)) { logWj = cVal.calculate(indexMid, _alpha, logZ, logWorVmax, _variancePower); if (logWj - logWorVmax < _logDispersionEpsilon) indexHigh = indexMid; else indexLow = indexMid; indexMid = (int) Math.round(0.5 * (indexLow + indexHigh)); } return indexMid; } } double sumWV(int jkL, int jkU, double logWVMax, double logZ, CalWVdWVd2WV cCal) { if (_variancePower < 2) { return Math.exp(Math.log(IntStream.rangeClosed(jkL, jkU).mapToDouble(x -> Math.exp(cCal.calculate(x, _alpha, logZ, logWVMax, _variancePower) - logWVMax)).sum()) + logWVMax); } else { // dealing with Vk, not using logWVMax because the sum can be slightly negative... double seriesSum = IntStream.rangeClosed(jkL, jkU).mapToDouble(x -> Math.exp(cCal.calculate(x, _alpha, logZ, logWVMax, _variancePower) - logWVMax) * Math.pow(-1, x) * Math.sin(-x * _alphaTimesPI)).sum(); if (seriesSum > 0) return Math.exp(logWVMax + Math.log(seriesSum)); else return Math.exp(logWVMax) * seriesSum; } } public int findMaxTermIndex(Chunk[] chks, int rowInd, int colInd) { if (chks[RESPIND].atd(rowInd) != 0) return (int) Math.max(1, Math.ceil(chks[colInd].atd(rowInd) * _oneOverDispersion)); else return 0; } public double calLogZ(Chunk[] chks, int rInd, int zConstCol) { if (chks[RESPIND].atd(rInd) != 0) return Math.log(chks[zConstCol].atd(rInd)) + _alphaMinus1TLogDispersion; else return 0; } public double calLogWVMax(Chunk[] chks, int rowInd, int indexMax, double logZ) { double resp = chks[RESPIND].atd(rowInd); if (_variancePower < 2 && resp != 0) { // 1 < p < 2 return indexMax * logZ - logGamma(1 + indexMax) - logGamma(-_alpha * indexMax); } else { //p > 2 if (resp != 0) return indexMax * logZ + logGamma(1 + _alpha * indexMax) - logGamma(1 + indexMax); else return 0; } } public double evalDlldPhi(Chunk[] chks, int rowInd, double sumDWVj, double oneOverSumWVj, Map<ConstColNames, Integer> constColName2Ind) { double response = chks[RESPIND].atd(rowInd); if (response == 0) return chks[constColName2Ind.get(FirstOrderDerivConst)].atd(rowInd) * _oneOverPhiSquare; else if (Double.isFinite(response)) return chks[constColName2Ind.get(FirstOrderDerivConst)].atd(rowInd) * _oneOverPhiSquare + sumDWVj * oneOverSumWVj; else return 0.0; } public double evalD2lldPhi2(Chunk[] chks, int rowInd, double sumDWVj, double sumD2WVj, double oneOverSumWVj, Map<ConstColNames, Integer> constColName2Ind) { double response = chks[RESPIND].atd(rowInd); if (response == 0) { return chks[constColName2Ind.get(SecondOrderDerivConst)].atd(rowInd) * _oneOverPhi3; } else if (Double.isFinite(response)) { return chks[constColName2Ind.get(SecondOrderDerivConst)].atd(rowInd) * _oneOverPhi3 + sumD2WVj * oneOverSumWVj - sumDWVj * sumDWVj * oneOverSumWVj * oneOverSumWVj; } else { return 0.0; } } public double evalLogLikelihood(Chunk[] chks, int rowInd, double sumWV, Map<ConstColNames, Integer> constColName2Ind) { double response = chks[RESPIND].atd(rowInd); double logPart2 = _oneOverDispersion * chks[constColName2Ind.get(LogPart2Const)].atd(rowInd); if (Double.isFinite(response)) { if (response == 0.0) { return logPart2; } else { if (_variancePower < 2) return Math.log(sumWV) + chks[constColName2Ind.get(LogOneOverY)].atd(rowInd) + logPart2; else return Math.log(sumWV) + chks[constColName2Ind.get(LogOneOverPiY)].atd(rowInd) + logPart2; } } else { return 0.0; } } /*** * This interface is used to calculate one item of the series in log. */ public interface CalWVdWVd2WV { public double calculate(int jOrk, double alpha, double logZ, double funcMax, double varianceP); } public static class EvalLogWVEnv implements CalWVdWVd2WV { @Override public double calculate(int jOrk, double alpha, double logZ, double funcMax, double varianceP) { if (varianceP < 2) { return jOrk * logZ - logGamma(1 + jOrk) - logGamma(-alpha * jOrk); } else { return jOrk * logZ + logGamma(1 + alpha * jOrk) - logGamma(1 + jOrk); } } } public static class EvalLogDWVEnv implements CalWVdWVd2WV { @Override public double calculate(int jOrk, double alpha, double logZ, double funcMax, double varianceP) { return (new EvalLogWVEnv()).calculate(jOrk, alpha, logZ, funcMax, varianceP) + Math.log(jOrk); } } public static class EvalLogD2WVEnv implements CalWVdWVd2WV { @Override public double calculate(int jOrk, double alpha, double logZ, double funcMax, double varianceP) { return (new EvalLogWVEnv()).calculate(jOrk, alpha, logZ, funcMax, varianceP) + 2 * Math.log(jOrk); } } } public static class GenPrediction extends MRTask<GenPrediction> { final GLMModel _m; final DataInfo _dinfo; final boolean _sparse; private final double[] _beta; public GenPrediction(double[] beta, GLMModel m, DataInfo dinfo) { _beta = beta; _m = m; _dinfo = dinfo; _sparse = FrameUtils.sparseRatio(dinfo._adaptedFrame) < .5; } public void map(Chunk[] chks, NewChunk[] preds) { double[] ps; ps = new double[_m._output._nclasses + 1]; float[] res = new float[1]; final int nc = _m._output.nclasses(); final int ncols = nc == 1 ? 1 : nc + 1; // Regression has 1 predict col; classification also has class distribution // compute if (_sparse) { for (DataInfo.Row r : _dinfo.extractSparseRows(chks)) processRow(r, res, ps, preds, ncols); } else { DataInfo.Row r = _dinfo.newDenseRow(); for (int rid = 0; rid < chks[0]._len; ++rid) { _dinfo.extractDenseRow(chks, rid, r); processRow(r, res, ps, preds, ncols); } } } private void processRow(DataInfo.Row r, float[] res, double[] ps, NewChunk[] preds, int ncols) { if (_dinfo._responses != 0) res[0] = (float) r.response[0]; if (r.predictors_bad) { Arrays.fill(ps, Double.NaN); } else if (r.weight == 0) { Arrays.fill(ps, 0); } else { ps[0] = _m._parms.linkInv(r.innerProduct(_beta) + r.offset); } for (int c = 0; c < ncols; c++) // Output predictions; sized for train only (excludes extra test classes) preds[c].addNum(ps[c]); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/DispersionUtils.java
package hex.glm; import hex.DataInfo; import water.Job; import water.Key; import water.MRTask; import water.Scope; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import water.util.Log; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import static org.apache.commons.math3.special.Gamma.*; public class DispersionUtils { /*** * Estimate dispersion factor using maximum likelihood. I followed section IV of the doc in * https://github.com/h2oai/h2o-3/issues/7013. */ public static double estimateGammaMLSE(GLMTask.ComputeGammaMLSETsk mlCT, double seOld, double[] beta, GLMModel.GLMParameters parms, ComputationState state, Job job, GLMModel model) { double constantValue = mlCT._wsum + mlCT._sumlnyiOui - mlCT._sumyiOverui; DataInfo dinfo = state.activeData(); Frame adaptedF = dinfo._adaptedFrame; long currTime = System.currentTimeMillis(); long modelBuiltTime = currTime - model._output._start_time; long timeLeft = parms._max_runtime_secs > 0 ? (long) (parms._max_runtime_secs * 1000 - modelBuiltTime) : Long.MAX_VALUE; // stopping condition for while loop are: // 1. magnitude of iterative change to se < EPS // 2. there are more than MAXITERATIONS of updates // 2. for every 100th iteration, we check for additional stopping condition: // a. User requests stop via stop_requested; // b. User sets max_runtime_sec and that time has been exceeded. for (int index=0; index<parms._max_iterations_dispersion; index++) { GLMTask.ComputeDiTriGammaTsk ditrigammatsk = new GLMTask.ComputeDiTriGammaTsk(null, dinfo, job._key, beta, parms, seOld).doAll(adaptedF); double numerator = mlCT._wsum*Math.log(seOld)-ditrigammatsk._sumDigamma+constantValue; // equation 2 of doc double denominator = mlCT._wsum/seOld - ditrigammatsk._sumTrigamma; // equation 3 of doc double change = numerator/denominator; if (denominator == 0 || !Double.isFinite(change)) return seOld; if (Math.abs(change) < parms._dispersion_epsilon) // stop if magnitude of iterative updates to se < EPS return seOld-change; else { double se = seOld - change; if (se < 0) // heuristic to prevent seInit <= 0 seOld *= 0.5; else seOld = se; } if ((index % 100 == 0) && // check for additional stopping conditions for every 100th iterative steps (job.stop_requested() || // user requested stop via stop_requested() (System.currentTimeMillis()-currTime) > timeLeft)) { // time taken exceeds GLM building time Log.warn("gamma dispersion parameter estimation was interrupted by user or due to time out. " + "Estimation process has not converged. Increase your max_runtime_secs if you have set maximum" + " runtime for your model building process."); return seOld; } } Log.warn("gamma dispersion parameter estimation fails to converge within "+ parms._max_iterations_dispersion+" iterations. Increase max_iterations_dispersion or decrease " + "dispersion_epsilon."); return seOld; } private static double getTweedieLogLikelihood(GLMModel.GLMParameters parms, DataInfo dinfo, double phi, Vec mu) { final double llh = new TweedieEstimator( parms._tweedie_variance_power, phi, false, false, false, false, true) .compute(mu, dinfo._adaptedFrame.vec(parms._response_column), parms._weights_column == null ? dinfo._adaptedFrame.makeCompatible(new Frame(Vec.makeOne(dinfo._adaptedFrame.numRows())))[0] : dinfo._adaptedFrame.vec(parms._weights_column)) ._loglikelihood; Log.debug("Tweedie LogLikelihood(p=" + parms._tweedie_variance_power + ", phi=" + phi + ") = " + llh); return llh; } private static double goldenRatioDispersionSearch(GLMModel.GLMParameters parms, DataInfo dinfo, Vec mu, List<Double> logLikelihoods, List<Double> phis, Job job) { // make monotonic List<Double> sortedPhis = phis.stream().sorted().collect(Collectors.toList()); List<Double> sortedLLHs = new ArrayList<>(); for (int i = 0; i < sortedPhis.size(); i++) { double phi = sortedPhis.get(i); int index = phis.indexOf(phi); sortedLLHs.add(logLikelihoods.get(index)); } // did we already find a region where there is the maximum? boolean increasing = true; double lowerBound = 1e-16; double upperBound = sortedPhis.get(0); for (int i = 1; i < sortedPhis.size(); i++) { upperBound = sortedPhis.get(i); if (sortedLLHs.get(i - 1) > sortedLLHs.get(i)) { increasing = false; if (i > 2) lowerBound = sortedPhis.get(i - 2); else { sortedPhis.add(0, lowerBound); sortedLLHs.add(0, getTweedieLogLikelihood(parms, dinfo, lowerBound, mu)); } break; } } int counter = sortedPhis.size(); int iterationsLeft = parms._max_iterations_dispersion - 10 * counter; while (increasing && iterationsLeft > counter && !job.stop_requested()) { // not yet counter++; upperBound *= 2; sortedPhis.add(upperBound); double newLLH = getTweedieLogLikelihood(parms, dinfo, upperBound, mu); Log.debug("Tweedie looking for the region containing the max. likelihood; upper bound = " + upperBound + "; llh = " + newLLH); sortedLLHs.add(newLLH); if (sortedLLHs.get(counter - 2) > sortedLLHs.get(counter - 1)) { if (counter > 3) lowerBound = sortedPhis.get(counter - 3); Log.debug("Tweedie found the region containing the max. likelihood; phi lower bound = " + lowerBound + "; phi upper bound = " + upperBound); break; } } // now we should have the maximum between lowerBound and upperBound double d = (upperBound - lowerBound) * 0.618; // (hiPhi - lowPhi)/golden ratio double lowPhi = lowerBound; double hiPhi = upperBound; double midLoPhi = sortedPhis.get(counter - 2); double midLoLLH = sortedLLHs.get(counter - 2); double midHiPhi = lowPhi + d; double midHiLLH = getTweedieLogLikelihood(parms, dinfo, midHiPhi, mu); if (midLoPhi > midHiPhi) { midLoPhi = hiPhi - d; midLoLLH = getTweedieLogLikelihood(parms, dinfo, midLoPhi, mu); } assert lowerBound <= midLoPhi; assert midLoPhi <= midHiPhi; assert midHiPhi <= upperBound; for (; counter < iterationsLeft; counter++) { Log.info("Tweedie golden-section search[iter=" + counter + ", phis=(" + lowPhi + ", " + midLoPhi + ", " + midHiPhi + ", " + hiPhi + "), likelihoods=(" + "..., " + midLoLLH + ", " + midHiLLH + ", ...)]"); if (job.stop_requested()) { return (hiPhi + lowPhi) / 2; } if (midHiLLH > midLoLLH) { lowPhi = midLoPhi; } else { hiPhi = midHiPhi; } d = (hiPhi - lowPhi) * 0.618; // (hiPhi - lowPhi)/golden ratio if (hiPhi - lowPhi < parms._dispersion_epsilon) { return (hiPhi + lowPhi) / 2; } midLoPhi = hiPhi - d; midHiPhi = lowPhi + d; midLoLLH = getTweedieLogLikelihood(parms, dinfo, midLoPhi, mu); midHiLLH = getTweedieLogLikelihood(parms, dinfo, midHiPhi, mu); } return (hiPhi + lowPhi) / 2; } /** * This method estimates the tweedie dispersion parameter. It will use Newton's update if the new update will * increase the loglikelihood. Otherwise, the dispersion will be updated as * dispersionNew = dispersionCurr + learningRate * update. * In addition, line search is used to increase the magnitude of the update when the update magnitude is too small * (< 1e-3). * * Every 10th iteration it checks if the optimization doesn't diverge. If it looks like it diverged, it uses a * different likelihood estimation that should be more accurate (combination of Series and Fourier inversion method) * but without gradients. For this reason it will use a Golden section search which doesn't require gradients and * has a linear convergence. * * For details, please see sections IV.I, IV.II, and IV.III in document here: */ public static double estimateTweedieDispersionOnly(GLMModel.GLMParameters parms, GLMModel model, Job job, double[] beta, DataInfo dinfo) { if (parms._tweedie_variance_power >= 2 && dinfo._adaptedFrame.vec(parms._response_column).min() <= 0) { Log.warn("Response contains zeros or negative values but "+ "Tweedie variance power does not support zeros. "+ "Instances with response <= 0 will be skipped."); model.addWarning("Response contains zeros or negative values but "+ "Tweedie variance power does not support zeros. "+ "Instances with response <= 0 will be skipped."); } DispersionTask.GenPrediction gPred = new DispersionTask.GenPrediction(beta, model, dinfo).doAll( 1, Vec.T_NUM, dinfo._adaptedFrame); Vec mu = Scope.track(gPred.outputFrame(Key.make(), new String[]{"prediction"}, null)).vec(0); List<Double> logLikelihoodSanityChecks = new ArrayList<>(); List<Double> dispersionsSanityChecks = new ArrayList<>(); logLikelihoodSanityChecks.add(getTweedieLogLikelihood(parms, dinfo, parms._init_dispersion_parameter, mu)); dispersionsSanityChecks.add(parms._init_dispersion_parameter); final double dispersion = goldenRatioDispersionSearch(parms, dinfo, mu, logLikelihoodSanityChecks, dispersionsSanityChecks, job); Log.info("Tweedie dispersion estimate = "+dispersion); return dispersion; /* // FIXME: The Newton's method seems not to be reproducible on jenkins (runit_GLM_tweedie_ml_dispersion_estimation_only.R) long timeLeft = parms._max_runtime_secs > 0 ? (long) (parms._max_runtime_secs * 1000 - modelBuiltTime) : Long.MAX_VALUE; long currTime = System.currentTimeMillis(); long modelBuiltTime = currTime - model._output._start_time; TweedieMLDispersionOnly tDispersion = new TweedieMLDispersionOnly(parms.train(), parms, model, beta, dinfo); double dispersionCurr = tDispersion._dispersionParameter; // initial value of dispersion parameter double dispersionNew; double update; double logLLCurr, logLLNext; List<Double> loglikelihoodList = new ArrayList<>(); List<Double> llChangeList = new ArrayList<>(); List<Double> dispersionList = new ArrayList<>(); double bestLogLikelihoodFromSanityCheck = getTweedieLogLikelihood(parms, dinfo,dispersionCurr,mu); List<Double> logLikelihoodSanityChecks = new ArrayList<>(); List<Double> dispersionsSanityChecks = new ArrayList<>(); logLikelihoodSanityChecks.add(bestLogLikelihoodFromSanityCheck); dispersionsSanityChecks.add(dispersionCurr); for (int index = 0; index < parms._max_iterations_dispersion; index++) { Log.info("Tweedie dispersion ML estimation [iter="+index+", phi="+dispersionCurr+"]"); tDispersion.updateDispersionP(dispersionCurr); DispersionTask.ComputeMaxSumSeriesTsk computeTask = new DispersionTask.ComputeMaxSumSeriesTsk(tDispersion, parms, true); computeTask.doAll(tDispersion._infoFrame); logLLCurr = computeTask._logLL / computeTask._nobsLL; // record loglikelihood values loglikelihoodList.add(logLLCurr); dispersionList.add(dispersionCurr); if (loglikelihoodList.size() > 1) { llChangeList.add(loglikelihoodList.get(index) - loglikelihoodList.get(index - 1)); boolean converged = (Math.abs(llChangeList.get(llChangeList.size() - 1)) < parms._dispersion_epsilon); if (index % 10 == 0 || converged) { // do a sanity check once in a while and if we think we converged double newLogLikelihood = getTweedieLogLikelihood(parms, dinfo, dispersionCurr, mu); logLikelihoodSanityChecks.add(newLogLikelihood); dispersionsSanityChecks.add(dispersionCurr); if (newLogLikelihood < bestLogLikelihoodFromSanityCheck) { // we are getting worse. Log.info("Tweedie sanity check FAIL. Trying Golden-section search instead of Newton's method."); tDispersion.cleanUp(); final double dispersion = goldenRatioDispersionSearch(parms, dinfo, mu, logLikelihoodSanityChecks, dispersionsSanityChecks, job); Log.info("Tweedie dispersion estimate = "+dispersion); return dispersion; } bestLogLikelihoodFromSanityCheck = Math.max(bestLogLikelihoodFromSanityCheck, newLogLikelihood); Log.debug("Tweedie sanity check OK"); } if (converged) { tDispersion.cleanUp(); // early stop if loglikelihood has'n changed by > parms._dispersion_epsilon Log.info("last dispersion "+dispersionCurr); return dispersionList.get(loglikelihoodList.indexOf(Collections.max(loglikelihoodList))); } } if (loglikelihoodList.size() > 10) { if (loglikelihoodList.stream().skip(loglikelihoodList.size() - 3).noneMatch((x) -> x != null && Double.isFinite(x))) { Log.warn("tweedie dispersion parameter estimation got stuck in numerically unstable region."); tDispersion.cleanUp(); // If there's NaN Collections.max picks it return Double.NaN; } } // get new update to dispersion update = computeTask._dLogLL / computeTask._d2LogLL; if (Math.abs(update) < 1e-3) { // line search for speedup and increase magnitude of change update = dispersionLS(computeTask, tDispersion, parms); if (!Double.isFinite(update)) { Log.info("last dispersion "+dispersionCurr); return dispersionList.get(loglikelihoodList.indexOf(Collections.max(loglikelihoodList))); } dispersionNew = dispersionCurr - update; } else { dispersionNew = dispersionCurr - update; if (dispersionNew < 0) dispersionNew = dispersionCurr*0.5; tDispersion.updateDispersionP(dispersionNew); DispersionTask.ComputeMaxSumSeriesTsk computeTaskNew = new DispersionTask.ComputeMaxSumSeriesTsk(tDispersion, parms, false); computeTaskNew.doAll(tDispersion._infoFrame); logLLNext = computeTaskNew._logLL / computeTaskNew._nobsLL; if (logLLNext <= logLLCurr) dispersionNew = dispersionCurr + parms._dispersion_learning_rate * update; } if (dispersionNew < 0) dispersionCurr *= 0.5; else dispersionCurr = dispersionNew; if ((index % 100 == 0) && // check for additional stopping conditions for every 100th iterative steps (job.stop_requested() || // user requested stop via stop_requested() (System.currentTimeMillis() - currTime) > timeLeft)) { // time taken exceeds model build time Log.warn("tweedie dispersion parameter estimation was interrupted by user or due to time out." + " Estimation process has not converged. Increase your max_runtime_secs if you have set " + "maximum runtime for your model building process."); tDispersion.cleanUp(); Log.info("last dispersion "+dispersionCurr); return dispersionList.get(loglikelihoodList.indexOf(Collections.max(loglikelihoodList))); } } tDispersion.cleanUp(); if (dispersionList.size()>0) { Log.info("last dispersion "+dispersionCurr); return dispersionList.get(loglikelihoodList.indexOf(Collections.max(loglikelihoodList))); } else return dispersionCurr; */ } static class NegativeBinomialGradientAndHessian extends MRTask<NegativeBinomialGradientAndHessian> { double _grad; double _hess; double _theta; double _invTheta; double _invThetaSq; double _llh; NegativeBinomialGradientAndHessian(double theta) { assert theta > 0; _theta = theta; _invTheta = 1./theta; _invThetaSq = _invTheta*_invTheta; } @Override public void map(Chunk[] cs) { // mu, y, w for (int i = 0; i < cs[0]._len; i++) { final double mu = cs[0].atd(i); final double y = cs[1].atd(i); final double w = cs[2].atd(i); _grad += w * ( -mu*(y+_invTheta)/(mu*_theta+1) + ( y + ( Math.log(mu*_theta + 1) - digamma(y+_invTheta) + digamma(_invTheta) ) * _invTheta ) * _invTheta ); _hess += w * ( (mu*mu*(y+_invTheta)/Math.pow(mu*_theta+1, 2)) + (-y + (2 * mu) / (mu*_theta+1) + ((-2 * Math.log(mu*_theta + 1)) + 2*digamma(y + _invTheta) - 2*digamma(_invTheta) + ( trigamma(y+_invTheta) - trigamma(_invTheta) ) * _invTheta ) * _invTheta ) * _invThetaSq ); _llh += logGamma(y + _invTheta) - logGamma(_invTheta) - logGamma(y + 1) + y * Math.log(_theta * mu) - (y+_invTheta) * Math.log(1 + _theta * mu); } } @Override public void reduce(NegativeBinomialGradientAndHessian mrt) { _grad += mrt._grad; _hess += mrt._hess; _llh += mrt._llh; } }; static class CalculateNegativeBinomialScoreAndInfo extends MRTask<CalculateNegativeBinomialScoreAndInfo> { double _score; double _info; double _theta; CalculateNegativeBinomialScoreAndInfo(double theta) { _theta = theta; } @Override public void map(Chunk[] cs) { // mu, y, w for (int i = 0; i < cs[0]._len; i++) { final double w = cs[2].atd(i); _score += w * (digamma(_theta + cs[1].atd(i)) - digamma(_theta) + Math.log(_theta) + 1 - Math.log(_theta + cs[0].atd(i)) - (cs[1].atd(i) + _theta) / (cs[0].atd(i) + _theta)); _info += w * (-trigamma(_theta + cs[1].atd(i)) + trigamma(_theta) - 1/_theta + 2/(cs[0].atd(i) + _theta) - (cs[1].atd(i) + _theta)/ Math.pow(cs[0].atd(i) + _theta, 2)); } } @Override public void reduce(CalculateNegativeBinomialScoreAndInfo mrt) { _score += mrt._score; _info += mrt._info; } }; static class CalculateInitialTheta extends MRTask<CalculateInitialTheta> { double _theta0; @Override public void map(Chunk[] cs) { // mu, y, w for (int i = 0; i < cs[0]._len; i++) { _theta0 += cs[2].atd(i) * Math.pow(cs[1].atd(i)/cs[0].atd(i) - 1, 2); } } @Override public void reduce(CalculateInitialTheta mrt) { _theta0 += mrt._theta0; } }; public static double estimateNegBinomialDispersionMomentMethod(GLMModel model, double[] beta, DataInfo dinfo, Vec weights, Vec response, Vec mu) { class MomentMethodThetaEstimation extends MRTask<MomentMethodThetaEstimation> { double _muSqSum; double _sSqSum; double _muSum; double _wSum; @Override public void map(Chunk[] cs) { // mu, y, w for (int i = 0; i < cs[0]._len; i++) { final double w = cs[2].atd(i); _muSqSum += w * Math.pow(cs[0].atd(i), 2); _sSqSum += w * Math.pow(cs[1].atd(i) - cs[0].atd(i), 2); _muSum += w * cs[0].atd(i); _wSum += w; } } @Override public void reduce(MomentMethodThetaEstimation mrt) { _muSqSum += mrt._muSqSum; _sSqSum += mrt._sSqSum; _muSum += mrt._muSum; _wSum += mrt._wSum; } } ; MomentMethodThetaEstimation mm = new MomentMethodThetaEstimation().doAll(mu, response, weights); return mm._muSqSum / (mm._sSqSum - mm._muSum / mm._wSum); } public static double estimateNegBinomialDispersionFisherScoring(GLMModel.GLMParameters parms, GLMModel model, double[] beta, DataInfo dinfo) { Vec weights = dinfo._weights ? dinfo.getWeightsVec() : dinfo._adaptedFrame.makeCompatible(new Frame(Vec.makeOne(dinfo._adaptedFrame.numRows())))[0]; final double nRows = weights == null ? dinfo._adaptedFrame.numRows() : weights.mean() * weights.length(); DispersionTask.GenPrediction gPred = new DispersionTask.GenPrediction(beta, model, dinfo).doAll( 1, Vec.T_NUM, dinfo._adaptedFrame); Vec mu = gPred.outputFrame(Key.make(), new String[]{"prediction"}, null).vec(0); Vec response = dinfo._adaptedFrame.vec(dinfo.responseChunkId(0)); double invTheta = nRows / new CalculateInitialTheta().doAll(mu, response, weights)._theta0; double delta = 1; int i = 0; for (; i < parms._max_iterations_dispersion; i++) { if (Math.abs(delta) < parms._dispersion_epsilon) break; invTheta = Math.abs(invTheta); CalculateNegativeBinomialScoreAndInfo si = new CalculateNegativeBinomialScoreAndInfo(invTheta).doAll(mu, response, weights); delta = si._score/si._info; invTheta += delta; } if (invTheta < 0) Log.warn("Dispersion estimate truncated at zero."); if (i == parms._max_iterations_dispersion) Log.warn("Iteration limit reached."); return 1./invTheta; } public static double dispersionLS(DispersionTask.ComputeMaxSumSeriesTsk computeTsk, TweedieMLDispersionOnly tDispersion, GLMModel.GLMParameters parms) { double currObj = Double.NEGATIVE_INFINITY; double newObj; double dispersionCurr = tDispersion._dispersionParameter; double dispersionNew; double update = computeTsk._dLogLL/computeTsk._d2LogLL; for (int index=0; index < parms._max_iterations_dispersion; index++){ if (Double.isFinite(update)) { dispersionNew = dispersionCurr-update; tDispersion.updateDispersionP(dispersionNew); DispersionTask.ComputeMaxSumSeriesTsk computeTskNew = new DispersionTask.ComputeMaxSumSeriesTsk(tDispersion, parms, false).doAll(tDispersion._infoFrame); newObj = computeTskNew._logLL/computeTskNew._nobsLL; if (newObj > currObj) { currObj = newObj; update = 2*update; } else { return update; } } else { return Double.NaN; } } return update; } public static double[] makeZeros(double[] sourceCoeffs, double[] targetCoeffs) { int size = targetCoeffs.length; for (int valInd = 0; valInd < size; valInd++) targetCoeffs[valInd] = targetCoeffs[valInd]-sourceCoeffs[valInd]; return targetCoeffs; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLM.java
package hex.glm; import Jama.Matrix; import hex.*; import hex.glm.GLMModel.GLMOutput; import hex.glm.GLMModel.GLMParameters.Family; import hex.glm.GLMModel.GLMParameters.Link; import hex.glm.GLMModel.GLMParameters.MissingValuesHandling; import hex.glm.GLMModel.GLMParameters.Solver; import hex.glm.GLMModel.GLMWeightsFun; import hex.glm.GLMModel.Submodel; import hex.glm.GLMTask.*; import hex.gram.Gram; import hex.gram.Gram.Cholesky; import hex.gram.Gram.NonSPDMatrixException; import hex.optimization.ADMM; import hex.optimization.ADMM.L1Solver; import hex.optimization.ADMM.ProximalSolver; import hex.optimization.L_BFGS; import hex.optimization.L_BFGS.ProgressMonitor; import hex.optimization.L_BFGS.Result; import hex.optimization.OptimizationUtils.*; import hex.util.CheckpointUtils; import jsr166y.CountedCompleter; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import water.*; import water.exceptions.H2OFailException; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.Frame; import water.fvec.InteractionWrappedVec; import water.fvec.Vec; import water.parser.BufferedString; import water.udf.CFuncRef; import water.util.*; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.*; import java.util.stream.Collectors; import java.util.stream.IntStream; import static hex.ModelMetrics.calcVarImp; import static hex.gam.MatrixFrameUtils.GamUtils.keepFrameKeys; import static hex.glm.ComputationState.*; import static hex.glm.ConstrainedGLMUtils.*; import static hex.glm.DispersionUtils.*; import static hex.glm.GLMModel.GLMParameters; import static hex.glm.GLMModel.GLMParameters.CHECKPOINT_NON_MODIFIABLE_FIELDS; import static hex.glm.GLMModel.GLMParameters.DispersionMethod.*; import static hex.glm.GLMModel.GLMParameters.Family.*; import static hex.glm.GLMModel.GLMParameters.GLMType.gam; import static hex.glm.GLMModel.GLMParameters.Influence.dfbetas; import static hex.glm.GLMModel.GLMParameters.Solver.IRLSM; import static hex.glm.GLMUtils.*; import static water.util.ArrayUtils.copy2DArray; /** * Created by tomasnykodym on 8/27/14. * * Generalized linear model implementation. */ public class GLM extends ModelBuilder<GLMModel,GLMParameters,GLMOutput> { static double BAD_CONDITION_NUMBER = 20000; static NumberFormat lambdaFormatter = new DecimalFormat(".##E0"); static NumberFormat devFormatter = new DecimalFormat(".##"); private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss"); public static final int SCORING_INTERVAL_MSEC = 15000; // scoreAndUpdateModel every minute unless score every iteration is set public String _generatedWeights = null; public double[][][] _penaltyMatrix = null; public String[][] _gamColnames = null; public int[][] _gamColIndices = null; // corresponding column indices in dataInfo public BetaInfo _betaInfo; private boolean _earlyStopEnabled = false; private boolean _checkPointFirstIter = false; // indicate first iteration for checkpoint model private boolean _betaConstraintsOn = false; private boolean _tweedieDispersionOnly = false; private boolean _linearConstraintsOn = false; public GLM(boolean startup_once){super(new GLMParameters(),startup_once);} public GLM(GLMModel.GLMParameters parms) { super(parms); init(false); } /*** * This constructor is only called by GAM when it is trying to build a GAM model using GLM. * * Internal function, DO NOT USE. */ public GLM(GLMModel.GLMParameters parms, double[][][] penaltyMatrix, String[][] gamColnames) { super(parms); init(false); _penaltyMatrix = penaltyMatrix; _gamColnames = gamColnames; } public GLM(GLMModel.GLMParameters parms,Key dest) { super(parms,dest); init(false); } private transient GLMDriver _driver; public boolean isSupervised() { return true; } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.Regression, ModelCategory.Binomial, }; } @Override public boolean havePojo() { return true; } @Override public boolean haveMojo() { return true; } private double _lambdaCVEstimate = Double.NaN; // lambda cross-validation estimate private int _bestCVSubmodel; // best submodel index found during cv private boolean _doInit = true; // flag setting whether or not to run init private double [] _xval_deviances; // store cross validation average deviance private double [] _xval_sd; // store the standard deviation of cross-validation private double [][] _xval_zValues; // store cross validation average p-values private double [] _xval_deviances_generate_SH;// store cv average deviance for generate_scoring_history=True private double [] _xval_sd_generate_SH; // store the standard deviation of cv for generate_scoring_historty=True private int[] _xval_iters_generate_SH; // store cv iterations combined from the various cv models private boolean _insideCVCheck = false; // detect if we are running inside cv_computeAndSetOptimalParameters private boolean _enumInCS = false; // true if there are enum columns in beta constraints private Frame _betaConstraints = null; private boolean _cvRuns = false; /** * GLM implementation of N-fold cross-validation. * We need to compute the sequence of lambdas for the main model so the folds share the same lambdas. * We also want to set the _cv flag so that the dependent jobs know they're being run withing CV (so e.g. they do not unlock the models in the end) * @return Cross-validation Job * (builds N+1 models, all have train+validation metrics, the main model has N-fold cross-validated validation metrics) */ @Override public void computeCrossValidation() { // init computes global list of lambdas init(true); _cvRuns = true; if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(GLM.this); super.computeCrossValidation(); } /* This method aligns the submodels across CV folds. It will keep only those alpha values that were the best at least in * one CV fold. * * In the following depiction each submodel is represented by a tuple (alpha_value, lambda_value). Ideally each CV fold * should have the same submodels but there can be differences due to max_iteration and max_runtime_secs constraint. * Iterations are reset when alpha value change which can cause missing model in the middle and max_runtime_secs constraint * can cause missing submodels at the end of the submodel array. * * CV1: Best submodel * +------+--------+--------+--------+--------+------+--------+--------+--------+--------+--------+-V------+ * | 0, 1 | 0, 0.9 | 0, 0.8 | 0, 0.7 | 0, 0.6 | 1, 1 | 1, 0.9 | 1, 0.8 | 1, 0.7 | 1, 0.6 | 1, 0.5 | 1, 0.4 | * +------+--------+--------+--------+--------+------+--------+--------+--------+--------+--------+--------+ * CV2: Best submodel * +------+--------+--------+--------+--------+-V------+------+--------+--------+--------+--------+----------+ * | 0, 1 | 0, 0.9 | 0, 0.8 | 0, 0.7 | 0, 0.6 | 0, 0.5 | 1, 1 | 1, 0.9 | 1, 0.8 | 1, 0.7 | 0.8, 1 | 0.8, 0.9 | * +------+--------+--------+--------+--------+--------+------+--------+--------+--------+--------+----------+ * * || * || * \ / * \/ * * Aligned Submodels: * * CV1: * +------+--------+--------+--------+--------+--------+------+--------+--------+--------+--------+--------+--------+ * | 0, 1 | 0, 0.9 | 0, 0.8 | 0, 0.7 | 0, 0.6 | NULL | 1, 1 | 1, 0.9 | 1, 0.8 | 1, 0.7 | 1, 0.6 | 1, 0.5 | 1, 0.4 | * +------+--------+--------+--------+--------+--------+------+--------+--------+--------+--------+--------+--------+ * * CV2: * +------+--------+--------+--------+--------+--------+------+--------+--------+--------+--------+--------+--------+ * | 0, 1 | 0, 0.9 | 0, 0.8 | 0, 0.7 | 0, 0.6 | 0, 0.5 | 1, 1 | 1, 0.9 | 1, 0.8 | 1, 0.7 | NULL | NULL | NULL | * +------+--------+--------+--------+--------+--------+------+--------+--------+--------+--------+--------+--------+ * */ private double[] alignSubModelsAcrossCVModels(ModelBuilder[] cvModelBuilders) { // Get only the best alphas double[] alphas = Arrays.stream(cvModelBuilders) .mapToDouble(cv -> { GLM g = (GLM)cv; return g._model._output._submodels[g._model._output._selected_submodel_idx].alpha_value; }) .distinct() .toArray(); // Get corresponding indices for the best alphas and sort them in the same order as in _parms.alpha int[] alphaIndices = new int[alphas.length]; int k = 0; for (int i = 0; i < _parms._alpha.length; i++) { for (int j = 0; j < alphas.length; j++) { if (alphas[j] == _parms._alpha[i]) { alphaIndices[k] = i; if (k < j) { // swap to keep the same order as in _parms.alpha final double tmpAlpha = alphas[k]; alphas[k] = alphas[j]; alphas[j] = tmpAlpha; } k++; } } } // maximum index of alpha change across all the folds int[] alphaChangePoints = new int[alphas.length + 1]; int[] alphaSubmodels = new int[_parms._alpha.length]; for (int i = 0; i < cvModelBuilders.length; ++i) { GLM g = (GLM) cvModelBuilders[i]; Map<Double, List<Submodel>> submodels = Arrays.stream(g._model._output._submodels) .collect(Collectors.groupingBy(sm -> sm.alpha_value)); for (int j = 0; j < _parms._alpha.length; j++) { alphaSubmodels[j] = Math.max(alphaSubmodels[j], submodels.containsKey(_parms._alpha[j]) ? submodels.get(_parms._alpha[j]).size() : 0); } } for (int i = 0; i < alphas.length; i++) { alphaChangePoints[i + 1] = alphaChangePoints[i] + alphaSubmodels[alphaIndices[i]]; } double[] alphasAndLambdas = new double[alphaChangePoints[alphas.length]*2]; for (int i = 0; i < cvModelBuilders.length; ++i) { GLM g = (GLM) cvModelBuilders[i]; Submodel[] alignedSubmodels = new Submodel[alphaChangePoints[alphas.length]]; double lastAlpha = -1; int alphaIdx = -1; k = 0; int nNullsUntilSelectedSubModel = 0; for (int j = 0; j < g._model._output._submodels.length; j++) { if (lastAlpha != g._model._output._submodels[j].alpha_value) { lastAlpha = g._model._output._submodels[j].alpha_value; if (alphaIdx + 1 < alphas.length && lastAlpha == alphas[alphaIdx+1]) { k = 0; alphaIdx++; if (g._model._output._selected_submodel_idx >= j) nNullsUntilSelectedSubModel = alphaChangePoints[alphaIdx] - j; } } if (alphaIdx < 0 || g._model._output._submodels[j].alpha_value != alphas[alphaIdx]) continue; alignedSubmodels[alphaChangePoints[alphaIdx] + k] = g._model._output._submodels[j]; assert alphasAndLambdas[alphaChangePoints[alphaIdx] + k] == 0 || ( alphasAndLambdas[alphaChangePoints[alphaIdx] + k] == g._model._output._submodels[j].alpha_value && alphasAndLambdas[alphaChangePoints[alphas.length] + alphaChangePoints[alphaIdx] + k] == g._model._output._submodels[j].lambda_value ); alphasAndLambdas[alphaChangePoints[alphaIdx] + k] = g._model._output._submodels[j].alpha_value; alphasAndLambdas[alphaChangePoints[alphas.length] + alphaChangePoints[alphaIdx] + k] = g._model._output._submodels[j].lambda_value; k++; } assert g._model._output._selected_submodel_idx == g._model._output._best_submodel_idx; assert g._model._output._selected_submodel_idx == g._model._output._best_lambda_idx; assert (g._model._output._submodels[g._model._output._selected_submodel_idx].alpha_value == alignedSubmodels[g._model._output._selected_submodel_idx + nNullsUntilSelectedSubModel].alpha_value) && (g._model._output._submodels[g._model._output._selected_submodel_idx].lambda_value == alignedSubmodels[g._model._output._selected_submodel_idx + nNullsUntilSelectedSubModel].lambda_value); g._model._output._submodels = alignedSubmodels; g._model._output.setSubmodelIdx(g._model._output._selected_submodel_idx + nNullsUntilSelectedSubModel, g._parms); } return alphasAndLambdas; } /** * If run with lambda search, we need to take extra action performed after cross-val models are built. * Each of the folds have been computed with ots own private validation dataset and it performed early stopping based on it. * => We need to: * 1. compute cross-validated lambda estimate * 2. set the lambda estimate to all n-folds models (might require extra model fitting if the particular model * stopped too early!) * 3. compute cross-validated scoring history (cross-validated deviance standard error per lambda) * 4. unlock the n-folds models (they are changed here, so the unlocking happens here) */ @Override public void cv_computeAndSetOptimalParameters(ModelBuilder[] cvModelBuilders) { setMaxRuntimeSecsForMainModel(); double bestTestDev = Double.POSITIVE_INFINITY; double[] alphasAndLambdas = alignSubModelsAcrossCVModels(cvModelBuilders); int numOfSubmodels = alphasAndLambdas.length / 2; int lmin_max = 0; boolean lambdasSorted = _parms._lambda.length >= 1; for (int i = 1; i < _parms._lambda.length; i++) { if (_parms._lambda[i] >= _parms._lambda[i - 1]) { lambdasSorted = false; break; } } if (lambdasSorted) { for (int i = 0; i < cvModelBuilders.length; ++i) { // find the highest best_submodel_idx we need to go through GLM g = (GLM) cvModelBuilders[i]; lmin_max = Math.max(lmin_max, g._model._output._selected_submodel_idx + 1); // lmin_max is exclusive upper bound } } else { lmin_max = numOfSubmodels; // Number of submodels } _xval_deviances = new double[lmin_max]; _xval_sd = new double[lmin_max]; _xval_zValues = new double[lmin_max][_state._nbetas]; int lidx = 0; // index into submodel int bestId = 0; // submodel indedx with best Deviance from xval int cnt = 0; for (; lidx < lmin_max; ++lidx) { // search through submodel with same lambda and alpha values double testDev = 0; double testDevSq = 0; double[] zValues = new double[_state._nbetas]; double[] zValuesSq = new double[_state._nbetas]; for (int i = 0; i < cvModelBuilders.length; ++i) { // run cv for each lambda value GLM g = (GLM) cvModelBuilders[i]; if (g._model._output._submodels[lidx] == null) { double alpha = g._state.alpha(); try { g._insideCVCheck = true; g._state.setAlpha(alphasAndLambdas[lidx]); // recompute the submodel using the proper alpha value g._driver.computeSubmodel(lidx, alphasAndLambdas[lidx + numOfSubmodels], Double.NaN, Double.NaN); } finally { g._insideCVCheck = false; g._state.setAlpha(alpha); } } assert alphasAndLambdas[lidx] == g._model._output._submodels[lidx].alpha_value && alphasAndLambdas[lidx + numOfSubmodels] == g._model._output._submodels[lidx].lambda_value; testDev += g._model._output._submodels[lidx].devianceValid; testDevSq += g._model._output._submodels[lidx].devianceValid * g._model._output._submodels[lidx].devianceValid; if(g._model._output._submodels[lidx].zValues != null) { for (int z = 0; z < zValues.length; z++) { zValues[z] += g._model._output._submodels[lidx].zValues[z]; zValuesSq[z] += g._model._output._submodels[lidx].zValues[z] * g._model._output._submodels[lidx].zValues[z]; } } } double testDevAvg = testDev / cvModelBuilders.length; // average testDevAvg for fixed submodel index double testDevSE = testDevSq - testDevAvg * testDev; double[] zValuesAvg = Arrays.stream(zValues).map(z -> z / cvModelBuilders.length).toArray(); _xval_sd[lidx] = Math.sqrt(testDevSE / ((cvModelBuilders.length - 1) * cvModelBuilders.length)); _xval_deviances[lidx] = testDevAvg; _xval_zValues[lidx] = zValuesAvg; if (testDevAvg < bestTestDev) { bestTestDev = testDevAvg; bestId = lidx; } // early stopping - no reason to move further if we're overfitting // cannot be used if we have multiple alphas if ((_parms._alpha == null || _parms._alpha.length <= 1) && lambdasSorted && testDevAvg > bestTestDev && ++cnt == 3) { lmin_max = lidx; break; } } for (int i = 0; i < cvModelBuilders.length; ++i) { GLM g = (GLM) cvModelBuilders[i]; if (g._toRemove != null) for (Key k : g._toRemove) Keyed.remove(k); } for (int i = 0; i < cvModelBuilders.length; ++i) { GLM g = (GLM) cvModelBuilders[i]; g._model._output.setSubmodelIdx(bestId, g._parms); } double bestDev = _xval_deviances[bestId]; double bestDev1se = bestDev + _xval_sd[bestId]; int finalBestId = bestId; Integer[] orderedLambdaIndices = IntStream .range(0, lmin_max) .filter(i -> alphasAndLambdas[i] == alphasAndLambdas[finalBestId]) // get just the lambdas corresponding to the selected alpha .boxed() .sorted((a,b) -> (int) Math.signum(alphasAndLambdas[b+numOfSubmodels] - alphasAndLambdas[a+numOfSubmodels])) .toArray(Integer[]::new); int bestId1se = IntStream .range(0, orderedLambdaIndices.length) .filter(i -> orderedLambdaIndices[i] == finalBestId) .findFirst() .orElse(orderedLambdaIndices.length - 1); while (bestId1se > 0 && _xval_deviances[orderedLambdaIndices[bestId1se - 1]] <= bestDev1se) --bestId1se; // get the index into _parms.lambda/_xval_deviances etc _lambdaCVEstimate = alphasAndLambdas[numOfSubmodels + bestId]; bestId1se = orderedLambdaIndices[bestId1se]; _model._output._lambda_1se = alphasAndLambdas[numOfSubmodels + bestId1se]; // submodel ide with bestDev+one sigma // set the final selected alpha and lambda _parms._alpha = new double[]{alphasAndLambdas[bestId]}; if (_parms._lambda_search) { int newLminMax = 0; int newBestId = 0; for (int i = 0; i < lmin_max; i++) { if (alphasAndLambdas[i] == _parms._alpha[0]) { newLminMax++; if (i < bestId) newBestId++; } } _parms._lambda = Arrays.copyOf(_parms._lambda, newLminMax+1); _model._output._selected_submodel_idx = newBestId; _bestCVSubmodel = newBestId; _xval_deviances = Arrays.copyOfRange(_xval_deviances, bestId-newBestId, lmin_max + 1); _xval_sd = Arrays.copyOfRange(_xval_sd, bestId-newBestId, lmin_max + 1); _xval_zValues = Arrays.copyOfRange(_xval_zValues, bestId-newBestId, lmin_max + 1); } else { _parms._lambda = new double[]{alphasAndLambdas[numOfSubmodels + bestId]}; _model._output._selected_submodel_idx = 0; // set best submodel id here _bestCVSubmodel = 0; } // set max_iteration _parms._max_iterations = (int) Math.ceil(1 + // iter >= max_iter => stop; so +1 to be able to get to the same iteration value Arrays.stream(cvModelBuilders) .mapToDouble(cv -> ((GLM) cv)._model._output._submodels[finalBestId].iteration) .filter(Double::isFinite) .max() .orElse(_parms._max_iterations) ); if (_parms._generate_scoring_history) generateCVScoringHistory(cvModelBuilders); for (int i = 0; i < cvModelBuilders.length; ++i) { GLM g = (GLM) cvModelBuilders[i]; GLMModel gm = g._model; gm.write_lock(_job); gm.update(_job); gm.unlock(_job); } if (_betaConstraints != null) { DKV.remove(_betaConstraints._key); _betaConstraints.delete(); _betaConstraints = null; } _doInit = false; // disable init for CV main model } /*** * This method is only called when _parms.generate_scoring_hisory=True. We left the xval-deviance, xval-se * generation alone and create new xval-deviance and xval-se. * * @param cvModelBuilders: store model keys from models generated by cross validation. */ private void generateCVScoringHistory(ModelBuilder[] cvModelBuilders) { int devianceTestLength = Integer.MAX_VALUE; List<Integer>[] cvModelIters = new List[cvModelBuilders.length]; // find correct length for _xval_deviances_generate_SH, _xval_sd_generate_SH for (int i = 0; i < cvModelBuilders.length; ++i) { // find length of deviances from fold models GLM g = (GLM) cvModelBuilders[i]; if (_parms._lambda_search) { if (g._lambdaSearchScoringHistory._lambdaDevTest.size() < devianceTestLength) devianceTestLength = g._lambdaSearchScoringHistory._lambdaDevTest.size(); cvModelIters[i] = new ArrayList<>(g._lambdaSearchScoringHistory._lambdaIters); } else { if (g._scoringHistory._lambdaDevTest.size() < devianceTestLength) devianceTestLength = g._scoringHistory._lambdaDevTest.size(); cvModelIters[i] = new ArrayList<>(g._scoringHistory._scoringIters); } } _xval_deviances_generate_SH = new double[devianceTestLength]; _xval_sd_generate_SH = new double[devianceTestLength]; _xval_iters_generate_SH = new int[devianceTestLength]; int countIndex = 0; for (int index = 0; index < devianceTestLength; index++) { // access deviance for each fold and calculate average double testDev = 0; // and sd double testDevSq = 0; int[] foldIterIndex = findIterIndexAcrossFolds(cvModelIters, index); // find common iteration indices from folds if (foldIterIndex != null) { _xval_iters_generate_SH[countIndex] = cvModelIters[0].get(index); for (int modelIndex = 0; modelIndex < cvModelBuilders.length; modelIndex++) { GLM g = (GLM) cvModelBuilders[modelIndex]; if (_parms._lambda_search) { testDev += g._lambdaSearchScoringHistory._lambdaDevTest.get(foldIterIndex[modelIndex]); testDevSq += g._lambdaSearchScoringHistory._lambdaDevTest.get(foldIterIndex[modelIndex]) * g._lambdaSearchScoringHistory._lambdaDevTest.get(foldIterIndex[modelIndex]); } else { testDev += g._scoringHistory._lambdaDevTest.get(foldIterIndex[modelIndex]); testDevSq += g._scoringHistory._lambdaDevTest.get(foldIterIndex[modelIndex]) * g._scoringHistory._lambdaDevTest.get(foldIterIndex[modelIndex]); } } double testDevAvg = testDev / cvModelBuilders.length; double testDevSE = testDevSq - testDevAvg * testDevAvg; _xval_sd_generate_SH[countIndex] = Math.sqrt(testDevSE / ((cvModelBuilders.length - 1) * cvModelBuilders.length)); _xval_deviances_generate_SH[countIndex++] = testDevAvg; } } _xval_sd_generate_SH = Arrays.copyOf(_xval_sd_generate_SH, countIndex); _xval_deviances_generate_SH = Arrays.copyOf(_xval_deviances_generate_SH, countIndex); _xval_iters_generate_SH = Arrays.copyOf(_xval_iters_generate_SH, countIndex); } /*** * This method is used to locate common iteration indices across all folds. Since scoring history is scored by * user specifying the scoring interval and by a time interval determined by system computation speed, there can * be scoring history with different scoring indices across folds. This method is used to find the commen iteration * indices and return it. * * @param cvModelIters: store model keys from models generated by cross validation. * @param fold0Index: iteration index of fold 0 model * @return: array containing the indices into deviance_test for all folds */ public static int[] findIterIndexAcrossFolds(List<Integer>[] cvModelIters, int fold0Index) { int numFolds = cvModelIters.length; int[] iterIndexAcrossFolds = new int[numFolds]; int fold0Iter = cvModelIters[0].get(fold0Index); iterIndexAcrossFolds[0] = fold0Index; for (int foldIndex = 1; foldIndex < numFolds; foldIndex++) { if (cvModelIters[foldIndex].get(fold0Index) == fold0Iter) { // iterIndexAcrossFolds[foldIndex] = fold0Index; } else { // current fold model iteration index differs from fold0Iter, need to search int currFoldIterIndex = ArrayUtils.find(cvModelIters[foldIndex].toArray(), fold0Iter); if (currFoldIterIndex < 0) return null; else iterIndexAcrossFolds[foldIndex] = currFoldIterIndex; } } return iterIndexAcrossFolds; } protected void checkMemoryFootPrint(DataInfo activeData) { if (IRLSM.equals(_parms._solver)|| Solver.COORDINATE_DESCENT.equals(_parms._solver)) { int p = activeData.fullN(); HeartBeat hb = H2O.SELF._heartbeat; long mem_usage = (long) (hb._cpus_allowed * (p * p + activeData.largestCat()) * 8/*doubles*/ * (1 + .5 * Math.log((double) _train.lastVec().nChunks()) / Math.log(2.))); //one gram per core long max_mem = hb.get_free_mem(); if (mem_usage > max_mem) { String msg = "Gram matrices (one per thread) won't fit in the driver node's memory (" + PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem) + ") - try reducing the number of columns and/or the number of categorical factors (or switch to the L-BFGS solver)."; error("_train", msg); } } } DataInfo _dinfo; private transient DataInfo _validDinfo; // time per iteration in ms static class ScoringHistory { private ArrayList<Integer> _scoringIters = new ArrayList<>(); private ArrayList<Long> _scoringTimes = new ArrayList<>(); private ArrayList<Double> _likelihoods = new ArrayList<>(); private ArrayList<Double> _objectives = new ArrayList<>(); private ArrayList<Double> _lambdas; // thest are only used when _parms.generate_scoring_history=true private ArrayList<Double> _lambdaDevTrain; private ArrayList<Double> _lambdaDevTest; private ArrayList<Double> _alphas; public ArrayList<Integer> getScoringIters() { return _scoringIters;} public ArrayList<Long> getScoringTimes() { return _scoringTimes;} public ArrayList<Double> getLikelihoods() { return _likelihoods;} public ArrayList<Double> getObjectives() { return _objectives;} public ScoringHistory(boolean hasTest, boolean hasXval, boolean generate_scoring_historty) { if(hasTest)_lambdaDevTest = new ArrayList<>(); if (generate_scoring_historty) { _lambdas = new ArrayList<>(); // these are only used when _parms.generate_scoring_history=true _lambdaDevTrain = new ArrayList<>(); if (hasTest) _lambdaDevTest = new ArrayList<>(); _alphas = new ArrayList<>(); } } public synchronized void addIterationScore(int iter, double likelihood, double obj) { if (_scoringIters.size() > 0 && _scoringIters.get(_scoringIters.size() - 1) >= iter) return; // do not record result of the same iterations more than once _scoringIters.add(iter); _scoringTimes.add(System.currentTimeMillis()); _likelihoods.add(likelihood); _objectives.add(obj); } public synchronized void addIterationScore(boolean updateTrain, boolean updateValid, int iter, double likelihood, double obj, double dev, double devValid, long nobs, long nobsValid, double lambda, double alpha) { if (_scoringIters.size() > 0 && _scoringIters.get(_scoringIters.size() - 1) >= iter) return; // do not record twice, happens for the last iteration, need to record scoring history in checkKKTs because of gaussian fam. if (updateTrain) { _scoringIters.add(iter); _scoringTimes.add(System.currentTimeMillis()); _likelihoods.add(likelihood); _objectives.add(obj); _lambdaDevTrain.add(dev / nobs); _lambdas.add(lambda); _alphas.add(alpha); } if (updateValid) _lambdaDevTest.add(devValid/nobsValid); } public synchronized TwoDimTable to2dTable(GLMParameters parms, double[] xvalDev, double[] xvalSE) { String[] cnames = new String[]{"timestamp", "duration", "iterations", "negative_log_likelihood", "objective"}; String[] ctypes = new String[]{"string", "string", "int", "double", "double"}; String[] cformats = new String[]{"%s", "%s", "%d", "%.5f", "%.5f"}; if (parms._generate_scoring_history) { cnames = ArrayUtils.append(cnames, new String[]{"alpha", "lambda", "deviance_train"}); ctypes = ArrayUtils.append(ctypes, new String[]{"double", "double", "double"}); cformats= ArrayUtils.append(cformats, new String[]{"%.5f", "%.5f", "%.5f"}); if (_lambdaDevTest != null) { cnames = ArrayUtils.append(cnames, new String[]{"deviance_test"}); ctypes = ArrayUtils.append(ctypes, new String[]{"double"}); cformats = ArrayUtils.append(cformats, new String[]{"%.5f"}); } if (xvalDev != null && xvalDev.length > 0) { cnames = ArrayUtils.append(cnames, new String[]{"deviance_xval", "deviance_se"}); ctypes = ArrayUtils.append(ctypes, new String[]{"double", "double"}); cformats = ArrayUtils.append(cformats, new String[]{"%.5f", "%.5f"}); } } TwoDimTable res = new TwoDimTable("Scoring History", "", new String[_scoringIters.size()], cnames, ctypes, cformats, ""); for (int i = 0; i < _scoringIters.size(); ++i) { int col = 0; res.set(i, col++, DATE_TIME_FORMATTER.print(_scoringTimes.get(i))); res.set(i, col++, PrettyPrint.msecs(_scoringTimes.get(i) - _scoringTimes.get(0), true)); res.set(i, col++, _scoringIters.get(i)); res.set(i, col++, _likelihoods.get(i)); res.set(i, col++, _objectives.get(i)); if (parms._generate_scoring_history) { res.set(i, col++, _alphas.get(i)); res.set(i, col++, _lambdas.get(i)); res.set(i, col++, _lambdaDevTrain.get(i)); if (_lambdaDevTest != null) res.set(i, col++, _lambdaDevTest.get(i)); if (xvalDev != null && (i < xvalDev.length)) { // cv model may run with fewer iterations res.set(i, col++, xvalDev[i]); res.set(i, col, xvalSE[i]); } } } return res; } void restoreFromCheckpoint(TwoDimTable sHist, int[] colIndices) { int numRows = sHist.getRowDim(); for (int rowInd = 0; rowInd < numRows; rowInd++) { // if lambda_search is enabled, _sc is not updated _scoringIters.add((Integer) sHist.get(rowInd, colIndices[0])); _scoringTimes.add(DATE_TIME_FORMATTER.parseMillis((String) sHist.get(rowInd, colIndices[1]))); _likelihoods.add((Double) sHist.get(rowInd, colIndices[2])); _objectives.add((Double) sHist.get(rowInd, colIndices[3])); } } } static class LambdaSearchScoringHistory { ArrayList<Long> _scoringTimes = new ArrayList<>(); private ArrayList<Double> _lambdas = new ArrayList<>(); private ArrayList<Integer> _lambdaIters = new ArrayList<>(); private ArrayList<Integer> _lambdaPredictors = new ArrayList<>(); private ArrayList<Double> _lambdaDevTrain = new ArrayList<>(); private ArrayList<Double> _lambdaDevTest; private ArrayList<Double> _lambdaDevXval; private ArrayList<Double> _lambdaDevXvalSE; private ArrayList<Double> _alphas = new ArrayList<>(); public LambdaSearchScoringHistory(boolean hasTest, boolean hasXval) { if(hasTest) _lambdaDevTest = new ArrayList<>(); if(hasXval){ _lambdaDevXval = new ArrayList<>(); _lambdaDevXvalSE = new ArrayList<>(); } } public ArrayList<Integer> getScoringIters() { return _lambdaIters;} public ArrayList<Long> getScoringTimes() { return _scoringTimes;} public ArrayList<Double> getLambdas() { return _lambdas;} public ArrayList<Double> getAlphas() { return _alphas;} public ArrayList<Double> getDevTrain() { return _lambdaDevTrain;} public ArrayList<Double> getDevTest() { return _lambdaDevTest;} public ArrayList<Integer> getPredictors() { return _lambdaPredictors;} public synchronized void addLambdaScore(int iter, int predictors, double lambda, double devRatioTrain, double devRatioTest, double devRatioXval, double devRatoioXvalSE, double alpha) { if (_lambdaIters.size() > 0 && (iter <= _lambdaIters.get(_lambdaIters.size()-1))) { // prevent duplicated records return; } _scoringTimes.add(System.currentTimeMillis()); _lambdaIters.add(iter); _alphas.add(alpha); _lambdas.add(lambda); _lambdaPredictors.add(predictors); _lambdaDevTrain.add(devRatioTrain); if(_lambdaDevTest != null)_lambdaDevTest.add(devRatioTest); if(_lambdaDevXval != null)_lambdaDevXval.add(devRatioXval); if(_lambdaDevXvalSE != null)_lambdaDevXvalSE.add(devRatoioXvalSE); } public synchronized TwoDimTable to2dTable() { String[] cnames = new String[]{"timestamp", "duration", "iteration", "lambda", "predictors", "deviance_train"}; if(_lambdaDevTest != null) cnames = ArrayUtils.append(cnames,"deviance_test"); if(_lambdaDevXval != null) cnames = ArrayUtils.append(cnames,new String[]{"deviance_xval","deviance_se"}); String[] ctypes = new String[]{"string", "string", "int", "string","int", "double"}; if(_lambdaDevTest != null) ctypes = ArrayUtils.append(ctypes,"double"); if(_lambdaDevXval != null) ctypes = ArrayUtils.append(ctypes, new String[]{"double","double"}); String[] cformats = new String[]{"%s", "%s", "%d","%s", "%d", "%.3f"}; if(_lambdaDevTest != null) cformats = ArrayUtils.append(cformats,"%.3f"); if(_lambdaDevXval != null) cformats = ArrayUtils.append(cformats,new String[]{"%.3f","%.3f"}); cnames = ArrayUtils.append(cnames, "alpha"); ctypes = ArrayUtils.append(ctypes, "double"); cformats = ArrayUtils.append(cformats, "%.6f"); TwoDimTable res = new TwoDimTable("Scoring History", "", new String[_lambdaIters.size()], cnames, ctypes, cformats, ""); for (int i = 0; i < _lambdaIters.size(); ++i) { int col = 0; res.set(i, col++, DATE_TIME_FORMATTER.print(_scoringTimes.get(i))); res.set(i, col++, PrettyPrint.msecs(_scoringTimes.get(i) - _scoringTimes.get(0), true)); res.set(i, col++, _lambdaIters.get(i)); res.set(i, col++, lambdaFormatter.format(_lambdas.get(i))); res.set(i, col++, _lambdaPredictors.get(i)); res.set(i, col++, _lambdaDevTrain.get(i)); if(_lambdaDevTest != null) res.set(i, col++, _lambdaDevTest.get(i)); if(_lambdaDevXval != null && _lambdaDevXval.size() > i) { res.set(i, col++, _lambdaDevXval.get(i)); res.set(i, col++, _lambdaDevXvalSE.get(i)); } res.set(i, col++, _alphas.get(i)); } return res; } void restoreFromCheckpoint(TwoDimTable sHist, int[] colIndices) { int numRows = sHist.getRowDim(); for (int rowInd = 0; rowInd < numRows; rowInd++) { _scoringTimes.add(DATE_TIME_FORMATTER.parseMillis((String) sHist.get(rowInd, colIndices[1]))); _lambdaIters.add((int) sHist.get(rowInd, colIndices[0])); _lambdas.add(Double.valueOf((String) sHist.get(rowInd, colIndices[2]))); _alphas.add((Double) sHist.get(rowInd, colIndices[6])); _lambdaPredictors.add((int) sHist.get(rowInd, colIndices[3])); _lambdaDevTrain.add((double) sHist.get(rowInd, colIndices[4])); if (colIndices[5] > -1) // may not have deviance test, check before applying _lambdaDevTest.add((double) sHist.get(rowInd, colIndices[5])); } } } private transient ScoringHistory _scoringHistory; private transient LambdaSearchScoringHistory _lambdaSearchScoringHistory; long _t0 = System.currentTimeMillis(); private transient double _iceptAdjust = 0; private double _lmax; private double _gmax; // gradient max without dividing by math.max(1e-2, _parms._alpha[0]) private transient long _nobs; private transient GLMModel _model; private boolean _earlyStop = false; // set by earlyStopping private GLMGradientInfo _ginfoStart; private double _betaDiffStart; private double[] _betaStart; private int _initIter = 0; @Override public int nclasses() { if (multinomial.equals(_parms._family) || ordinal.equals(_parms._family) || AUTO.equals(_parms._family)) return _nclass; if (binomial.equals(_parms._family) || quasibinomial.equals(_parms._family) || fractionalbinomial.equals(_parms._family)) return 2; return 1; } private transient double[] _nullBeta; private double[] getNullBeta() { if (_nullBeta == null) { if (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) { _nullBeta = MemoryManager.malloc8d((_dinfo.fullN() + 1) * nclasses()); int N = _dinfo.fullN() + 1; if(_parms._intercept) if (ordinal.equals(_parms._family)) { // ordinal regression use random sorted start values Random rng = RandomUtils.getRNG(_parms._seed); int lastClass = nclasses()-1; double[] tempIcpt = new double[lastClass]; for (int i = 0; i < lastClass; i++) { // only contains nclass-2 thresholds here tempIcpt[i] = (-1+2*rng.nextDouble()) * nclasses(); // generate threshold from -nclasses to +nclasses } Arrays.sort(tempIcpt); for (int i = 0; i < lastClass; i++) _nullBeta[_dinfo.fullN() + i * N] = tempIcpt[i]; } else { for (int i = 0; i < nclasses(); ++i) _nullBeta[_dinfo.fullN() + i * N] = Math.log(_state._ymu[i]); } } else { _nullBeta = MemoryManager.malloc8d(_dinfo.fullN() + 1); if (_parms._intercept && !quasibinomial.equals(_parms._family)) _nullBeta[_dinfo.fullN()] = new GLMModel.GLMWeightsFun(_parms).link(_state._ymu[0]); else _nullBeta[_dinfo.fullN()] = 0; } } return _nullBeta; } protected boolean computePriorClassDistribution() { return multinomial.equals(_parms._family) || ordinal.equals(_parms._family) || (AUTO.equals(_parms._family) && nclasses() > 2); } @Override public void init(boolean expensive) { super.init(expensive); hide("_balance_classes", "Not applicable since class balancing is not required for GLM."); hide("_max_after_balance_size", "Not applicable since class balancing is not required for GLM."); hide("_class_sampling_factors", "Not applicable since class balancing is not required for GLM."); if (_parms._influence != null && (_parms._nfolds > 0 || _parms._fold_column != null)) { error("influence", " cross-validation is not allowed when influence is set to dfbetas."); } _parms.validate(this); if(_response != null) { if(!isClassifier() && _response.isCategorical()) error("_response", H2O.technote(2, "Regression requires numeric response, got categorical.")); if ((Solver.GRADIENT_DESCENT_LH.equals(_parms._solver) || Solver.GRADIENT_DESCENT_SQERR.equals(_parms._solver)) && !ordinal.equals(_parms._family)) error("_solver", "Solvers GRADIENT_DESCENT_LH and GRADIENT_DESCENT_SQERR are only " + "supported for ordinal regression. Do not choose them unless you specify your family to be ordinal"); switch (_parms._family) { case AUTO: if (nclasses() == 1 & _parms._link != Link.family_default && _parms._link != Link.identity && _parms._link != Link.log && _parms._link != Link.inverse) { error("_family", H2O.technote(2, "AUTO for underlying response requires the link to be family_default, identity, log or inverse.")); } else if (nclasses() == 2 & _parms._link != Link.family_default && _parms._link != Link.logit) { error("_family", H2O.technote(2, "AUTO for underlying response requires the link to be family_default or logit.")); } else if (nclasses() > 2 & _parms._link != Link.family_default & _parms._link != Link.multinomial) { error("_family", H2O.technote(2, "AUTO for underlying response requires the link to be family_default or multinomial.")); } break; case binomial: if (!_response.isBinary() && _nclass != 2) error("_family", H2O.technote(2, "Binomial requires the response to be a 2-class categorical or a binary column (0/1)")); break; case multinomial: if (_nclass <= 2) error("_family", H2O.technote(2, "Multinomial requires a categorical response with at least 3 levels (for 2 class problem use family=binomial.")); break; case poisson: case negativebinomial: if (_nclass != 1) error("_family", "Poisson and Negative Binomial require the response" + " to be numeric."); if (_response.min() < 0) error("_family", "Poisson and Negative Binomial require response >= 0"); if (!_response.isInt()) warn("_family", "Poisson and Negative Binomial expect non-negative integer response," + " got floats."); if (Family.negativebinomial.equals(_parms._family)) if (_parms._theta <= 0)// || _parms._theta > 1) error("_family", "Illegal Negative Binomial theta value. Valid theta values be > 0" + " and <= 1."); else _parms._invTheta = 1 / _parms._theta; break; case gamma: if (_nclass != 1) error("_distribution", H2O.technote(2, "Gamma requires the response to be numeric.")); if (_response.min() <= 0) error("_family", "Response value for gamma distribution must be greater than 0."); break; case tweedie: if (_nclass != 1) error("_family", H2O.technote(2, "Tweedie requires the response to be numeric.")); if (ml.equals(_parms._dispersion_parameter_method)) { // Check if response contains zeros if so limit variance power to (1,2) if (_response.min() <= 0) warn("_tweedie_var_power", "Response contains zeros and/or values lower than zero. "+ "Tweedie variance power ML estimation will be limited between 1 and 2. Values lower than zero will be ignored."); } break; case quasibinomial: if (_nclass != 1) error("_family", H2O.technote(2, "Quasi_binomial requires the response to be numeric.")); break; case ordinal: if (_nclass <= 2) error("_family", H2O.technote(2, "Ordinal requires a categorical response with at least 3 levels (for 2 class problem use family=binomial.")); if (_parms._link == Link.oprobit || _parms._link == Link.ologlog) error("_link", "Ordinal regression only supports ologit as link."); break; case gaussian: // if (_nclass != 1) error("_family", H2O.technote(2, "Gaussian requires the response to be numeric.")); break; case fractionalbinomial: final Vec resp = (train()).vec(_parms._response_column); if ((resp.min() < 0) || (resp.max() > 1)) { error("response", String.format("Response '%s' must be between 0 and 1 for fractional_binomial family. Min: %f, Max: %f", _parms._response_column, resp.min(), resp.max())); } break; default: error("_family", "Invalid distribution: " + _parms._distribution); } } if ((_parms._plug_values != null) && (_parms.missingValuesHandling() != MissingValuesHandling.PlugValues)) { error("_missing_values_handling", "When plug values are provided - Missing Values Handling needs to be explicitly set to PlugValues."); } if (_parms._plug_values == null && _parms.missingValuesHandling() == MissingValuesHandling.PlugValues) { error("_missing_values_handling", "No plug values frame provided for Missing Values Handling = PlugValues."); } if (_parms._max_iterations == 0) { warn("max_iterations", "for GLM, must be >= 1 (or -1 for unlimited or default setting) " + "to obtain proper model. Setting it to be 0 will only return the correct coefficient names and an empty" + " model."); warn("_max_iterations", H2O.technote(2 , "for GLM, if specified, must be >= 1 or == -1.")); } if (_parms._linear_constraints != null) { checkInitLinearConstraints(); } if (expensive) { if (_parms._build_null_model) { if (!(tweedie.equals(_parms._family) || gamma.equals(_parms._family) || negativebinomial.equals(_parms._family))) error("build_null_model", " is only supported for tweedie, gamma and negativebinomial familes"); else removePredictors(_parms, _train); } if (error_count() > 0) return; if (_parms._lambda_search && (_parms._stopping_rounds > 0)) { error("early stop", " cannot run when lambda_search=True. Lambda_search has its own " + "early-stopping mechanism"); } if (!_parms._lambda_search && (_parms._stopping_rounds > 0)) // early stop is on! _earlyStopEnabled = true; if (_parms._alpha == null) _parms._alpha = new double[]{_parms._solver == Solver.L_BFGS ? 0 : .5}; if (_parms._lambda_search &&_parms._nlambdas == -1) _parms._nlambdas = _parms._alpha[0] == 0?30:100; // fewer lambdas needed for ridge _lambdaSearchScoringHistory = new LambdaSearchScoringHistory(_parms._valid != null,_parms._nfolds > 1); _scoringHistory = new ScoringHistory(_parms._valid != null,_parms._nfolds > 1, _parms._generate_scoring_history); _train.bulkRollups(); // make sure we have all the rollups computed in parallel _t0 = System.currentTimeMillis(); if ((_parms._lambda_search || !_parms._intercept || _parms._lambda == null || _parms._lambda[0] > 0)) _parms._use_all_factor_levels = true; if (_parms._family == Family.AUTO) { if (_nclass == 1) { _parms._family = Family.gaussian; } else if (_nclass == 2) { _parms._family = binomial; } else { _parms._family = multinomial; } } if (_parms._link == Link.family_default) _parms._link = _parms._family.defaultLink; if (_parms._plug_values != null) { Frame plugValues = _parms._plug_values.get(); if (plugValues == null) { error("_plug_values", "Supplied plug values frame with key=`" + _parms._plug_values + "` doesn't exist."); } else if (plugValues.numRows() != 1) { error("_plug_values", "Plug values frame needs to have exactly 1 row."); } } if (hasOffsetCol() && multinomial.equals(_parms._family)) { // offset has no effect on multinomial warn("offset_column", " has no effect on multinomial and will be ignored."); if (_parms._ignored_columns != null) { List<String> ignoredCols = Arrays.asList(_parms._ignored_columns); ignoredCols.add(_parms._offset_column); _parms._ignored_columns = ignoredCols.toArray(new String[0]); } else { _parms._ignored_columns = new String[]{_parms._offset_column}; } _parms._offset_column = null; _offset = null; } if (hasOffsetCol() && ordinal.equals(_parms._family)) error("offset_column", " does not work with ordinal family right now. Will be fixed in" + " the future."); if ((_parms._family.equals(multinomial) || _parms._family.equals(ordinal)) && (_parms._beta_constraints != null || _parms._non_negative)) { error(_parms._non_negative ? "non_negative" : "beta_constraints", " does not work with " + _parms._family + " family."); } // maximum likelhood is only allowed for families tweedie, gamma and negativebinomial if (ml.equals(_parms._dispersion_parameter_method) && !gamma.equals(_parms._family) && !tweedie.equals(_parms._family) && !negativebinomial.equals(_parms._family)) error("dispersion_parameter_mode", " ml can only be used for family gamma, tweedie, negative binomial."); if (ml.equals(_parms._dispersion_parameter_method)) { if ((_parms._lambda == null && _parms._lambda_search) || _parms._lambda != null && Arrays.stream(_parms._lambda).anyMatch(v -> v != 0)) { error("dispersion_parameter_method", "ML is supported only without regularization!"); } else { if (_parms._lambda == null) { info("lambda", "Setting lambda to 0 to disable regularization which is unsupported with ML dispersion estimation."); _parms._lambda = new double[]{0.0}; } } if (_parms._fix_dispersion_parameter) if (!(tweedie.equals(_parms._family) || gamma.equals(_parms._family) || negativebinomial.equals(_parms._family))) error("fix_dispersion_parameter", " is only supported for gamma, tweedie, " + "negativebinomial families."); if (_parms._fix_tweedie_variance_power && tweedie.equals(_parms._family)) { double minResponse = _parms.train().vec(_parms._response_column).min(); if (minResponse < 0) error("response_column", " must >= 0 for tweedie_variance_power > 1 when using ml to" + " estimate the dispersion parameter."); if (_parms._tweedie_variance_power > 2 && minResponse <= 0) warn("response_column", " must > 0 when tweedie_variance_power > 2, such rows will be ignored."); } if (_parms._dispersion_learning_rate <= 0 && tweedie.equals(_parms._family)) error("dispersion_learning_rate", "must > 0 and is only used with tweedie dispersion" + " parameter estimation using ml."); if (_parms._fix_tweedie_variance_power && tweedie.equals(_parms._family) && _parms._tweedie_variance_power > 1 && _parms._tweedie_variance_power < 1.2) warn("tweedie_variance_power", "when tweedie_variance_power is close to 1 and < 1.2, " + "there is a potential of tweedie density function being multimodal. This will cause the optimization" + " procedure to generate a dispsersion parameter estimation that is suboptimal. To overcome this, " + "try to run the model building process with different init_dispersion_parameter values."); if (!_parms._fix_tweedie_variance_power && !tweedie.equals(_parms._family)) error("fix_tweedie_variance_power", " can only be set to false for tweedie family."); if (_parms._max_iterations_dispersion <= 0) error("max_iterations_dispersion", " must > 0."); if (_parms._dispersion_epsilon < 0) error("dispersion_epsilon", " must >= 0."); if (tweedie.equals(_parms._family)) { if (_parms._tweedie_variance_power <= 1) error("tweedie_variance_power", " must exceed 1."); if (_parms._tweedie_variance_power == 1) error("tweedie_variance_power", "Tweedie family with tweedie_variance_power=1.0 is " + "equivalent to the Poisson family. Please use Poisson family instead."); if (_parms._tweedie_variance_power == 2) error("tweedie_variance_power", "Tweedie family with tweedie_variance_power=2.0 is " + "equivalent to the Gamma family. Please use Gamma family instead."); if (_parms._tweedie_epsilon <= 0) error("tweedie_epsilon", " must exceed 0."); if (_parms._tweedie_variance_power == 0) // Later Null GLM model can fail if dispersion estimation is on (assert in TweedieEstimator) // This way we make sure it will yield nice error from the model builder (i.e. not an assert error) _parms._tweedie_variance_power = 1.5; } } if (_parms._init_dispersion_parameter <= 0) error("init_dispersion_parameter", " must exceed 0.0."); boolean standardizeQ = _parms._standardize; _dinfo = new DataInfo(_train.clone(), _valid, 1, _parms._use_all_factor_levels || _parms._lambda_search, standardizeQ ? DataInfo.TransformType.STANDARDIZE : DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, _parms.missingValuesHandling() == MissingValuesHandling.Skip, _parms.imputeMissing(), _parms.makeImputer(), false, hasWeightCol(), hasOffsetCol(), hasFoldCol(), _parms.interactionSpec()); if (_parms._generate_variable_inflation_factors) { String[] vifPredictors = GLMModel.getVifPredictors(_train, _parms, _dinfo); if (vifPredictors == null || vifPredictors.length == 0) error("generate_variable_inflation_factors", " cannot be enabled for GLM models with " + "only non-numerical predictors."); } // for multiclass and fractional binomial we have one beta per class // for binomial and regression we have just one set of beta coefficients int nBetas = fractionalbinomial.equals(_parms._family) ? 2 : (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) ? nclasses() : 1; _betaInfo = new BetaInfo(nBetas, _dinfo.fullN() + 1); if (gam.equals(_parms._glmType)) _gamColIndices = extractAdaptedFrameIndices(_dinfo._adaptedFrame, _gamColnames, _dinfo._numOffsets[0]-_dinfo._cats); if (_parms._max_iterations == -1) { // fill in default max iterations int numclasses = (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) ? nclasses() : 1; if (_parms._solver == Solver.L_BFGS) { _parms._max_iterations = _parms._lambda_search ? _parms._nlambdas * 100 * numclasses : numclasses * Math.max(20, _dinfo.fullN() >> 2); if(_parms._alpha[0] > 0) _parms._max_iterations *= 10; } else _parms._max_iterations = _parms._lambda_search ? 10 * _parms._nlambdas : 50; } if (_valid != null) _validDinfo = _dinfo.validDinfo(_valid); _state = new ComputationState(_job, _parms, _dinfo, null, _betaInfo, _penaltyMatrix, _gamColIndices); // skipping extra rows? (outside of weights == 0)GLMT boolean skippingRows = (_parms.missingValuesHandling() == GLMParameters.MissingValuesHandling.Skip && _train.hasNAs()); if (hasWeightCol() || skippingRows) { // need to re-compute means and sd boolean setWeights = skippingRows;// && _parms._lambda_search && _parms._alpha[0] > 0; if (setWeights) { Vec wc = _weights == null ? _dinfo._adaptedFrame.anyVec().makeCon(1) : _weights.makeCopy(); _dinfo.setWeights(_generatedWeights = "__glm_gen_weights", wc); } YMUTask ymt = new YMUTask(_dinfo, (multinomial.equals(_parms._family)||ordinal.equals(_parms._family))?nclasses():1, setWeights, skippingRows,true,false).doAll(_dinfo._adaptedFrame); if (ymt.wsum() == 0) throw new IllegalArgumentException("No rows left in the dataset after filtering out rows with missing values. Ignore columns with many NAs or impute your missing values prior to calling glm."); Log.info(LogMsg("using " + ymt.nobs() + " nobs out of " + _dinfo._adaptedFrame.numRows() + " total")); // if sparse data, need second pass to compute variance _nobs = ymt.nobs(); if (_parms._obj_reg == -1) _parms._obj_reg = 1.0 / ymt.wsum(); if(!_parms._stdOverride) _dinfo.updateWeightedSigmaAndMean(ymt.predictorSDs(), ymt.predictorMeans()); if (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) { _state._ymu = MemoryManager.malloc8d(_nclass); for (int i = 0; i < _state._ymu.length; ++i) _state._ymu[i] = _priorClassDist[i];//ymt.responseMeans()[i]; } else _state._ymu = _parms._intercept?ymt._yMu:new double[]{_parms.linkInv(0)}; } else { _nobs = _train.numRows(); if (_parms._obj_reg == -1) _parms._obj_reg = 1.0 / _nobs; if ( multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) { _state._ymu = MemoryManager.malloc8d(_nclass); for (int i = 0; i < _state._ymu.length; ++i) _state._ymu[i] = _priorClassDist[i]; } else { _state._ymu = new double[]{_parms._intercept ? _train.lastVec().mean() : _parms.linkInv(0)}; } } boolean betaContsOn = _parms._beta_constraints != null || _parms._non_negative; _linearConstraintsOn = _parms._linear_constraints != null; _betaConstraintsOn = (betaContsOn && (Solver.AUTO.equals(_parms._solver) || Solver.COORDINATE_DESCENT.equals(_parms._solver) || IRLSM.equals(_parms._solver )|| Solver.L_BFGS.equals(_parms._solver))); if (_parms._beta_constraints != null && !_enumInCS) { // will happen here if there is no CV if (findEnumInBetaCS(_parms._beta_constraints.get(), _parms)) { if (_betaConstraints == null) { _betaConstraints = expandedCatCS(_parms._beta_constraints.get(), _parms); DKV.put(_betaConstraints); } _enumInCS = true; // make sure we only do this once } } if (_parms._expose_constraints && _parms._linear_constraints == null) error("_expose_constraints", "can only be enabled when there are linear constraints."); BetaConstraint bc = _parms._beta_constraints != null ? new BetaConstraint(_parms._beta_constraints.get()) : new BetaConstraint(); if (betaContsOn && !_betaConstraintsOn) { warn("Beta Constraints", "will be disabled except for solver AUTO, COORDINATE_DESCENT, " + "IRLSM or L_BFGS. It is not available for ordinal or multinomial families."); } if (bc.hasProximalPenalty() && _parms._compute_p_values) error("_compute_p_values","P-values can not be computed for constrained problems with proximal penalty"); if (bc.hasBounds() && _parms._early_stopping) warn("beta constraint and early_stopping", "if both are enabled may degrade model performance."); _state.setBC(bc); if(hasOffsetCol() && _parms._intercept && !ordinal.equals(_parms._family)) { // fit intercept GLMGradientSolver gslvr = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job,_parms, _dinfo.filterExpandedColumns(new int[0]), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job,_parms, _dinfo.filterExpandedColumns(new int[0]), 0, _state.activeBC(), _betaInfo); double [] x = new L_BFGS().solve(gslvr,new double[]{-_offset.mean()}).coefs; Log.info(LogMsg("fitted intercept = " + x[0])); x[0] = _parms.linkInv(x[0]); _state._ymu = x; } if (_parms._prior > 0) _iceptAdjust = -Math.log(_state._ymu[0] * (1 - _parms._prior) / (_parms._prior * (1 - _state._ymu[0]))); ArrayList<Vec> vecs = new ArrayList<>(); if(_weights != null) vecs.add(_weights); if(_offset != null) vecs.add(_offset); vecs.add(_response); double[] beta = getNullBeta(); if (_parms._startval != null) { // allow user start set initial beta values if (_parms._startval.length != beta.length) { throw new IllegalArgumentException("Initial coefficient length (" + _parms._startval.length + ") does not " + "equal to actual GLM coefficient length(" + beta.length + ").\n The order of coefficients should" + " be the following:\n" + String.join("\n", _dinfo._adaptedFrame._names) + "\n Intercept.\n " + "Run your model without specifying startval to find out the actual coefficients names and " + "lengths."); } else { System.arraycopy(_parms._startval, 0, beta, 0, beta.length); } } else if (_parms._linear_constraints != null && _parms._init_optimal_glm) { // start value is not assigned beta = genInitBeta(); } GLMGradientInfo ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices).getGradient(beta) : new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo).getGradient(beta); // gradient with L2 penalty, no constraints _lmax = lmax(ginfo._gradient); _gmax = _lmax * Math.max(1e-2, _parms._alpha[0]); // each alpha should have its own best lambda _state.setLambdaMax(_lmax); _state.setgMax(_gmax); if (_parms._lambda_min_ratio == -1) { _parms._lambda_min_ratio = (_nobs >> 4) > _dinfo.fullN() ? 1e-4 : 1e-2; if (_parms._alpha[0] == 0) _parms._lambda_min_ratio *= 1e-2; // smalelr lambda min for ridge as we are starting quite high } _betaStart = new double[beta.length]; System.arraycopy(beta, 0, _betaStart, 0, beta.length); _state.updateState(beta, ginfo); if (_parms._lambda == null) { // no lambda given, we will base lambda as a fraction of lambda max if (_parms._lambda_search) { _parms._lambda = new double[_parms._nlambdas]; double dec = Math.pow(_parms._lambda_min_ratio, 1.0 / (_parms._nlambdas - 1)); _parms._lambda[0] = _lmax; double l = _lmax; for (int i = 1; i < _parms._nlambdas; ++i) _parms._lambda[i] = (l *= dec); // todo set the null submodel } else _parms._lambda = new double[]{10 * _parms._lambda_min_ratio * _lmax}; } if (!Double.isNaN(_lambdaCVEstimate)) { // in main model, shrink the lambda range to search for (int i = 0; i < _parms._lambda.length; ++i) if (_parms._lambda[i] < _lambdaCVEstimate) { _parms._lambda = Arrays.copyOf(_parms._lambda, i + 1); break; } _parms._lambda[_parms._lambda.length - 1] = _lambdaCVEstimate; _parms._lambda[_parms._lambda.length - 1] = _lambdaCVEstimate; } if (_parms._objective_epsilon == -1) { if (_parms._lambda_search) _parms._objective_epsilon = 1e-4; else // lower default objective epsilon for non-standardized problems (mostly to match classical tools) _parms._objective_epsilon = _parms._lambda[0] == 0 ? 1e-6 : 1e-4; } if (_parms._gradient_epsilon == -1) { _parms._gradient_epsilon = _parms._lambda[0] == 0 ? 1e-6 : 1e-4; if (_parms._lambda_search) _parms._gradient_epsilon *= 1e-2; } // check for correct setting for Tweedie ML dispersion parameter setting if (_parms._fix_dispersion_parameter) { // only tweeide, NB, gamma are allowed to use this if (!tweedie.equals(_parms._family) && !gamma.equals(_parms._family) && !negativebinomial.equals(_parms._family)) error("fix_dispersion_parameter", " is only allowed for tweedie, gamma and " + "negativebinomial families"); } if (_parms._fix_tweedie_variance_power && !_parms._fix_dispersion_parameter) _tweedieDispersionOnly = true; // likelihood calculation for gaussian, gamma, negativebinomial and tweedie families requires dispersion parameter estimation // _dispersion_parameter_method: gaussian - pearson (default); gamma, negativebinomial, tweedie - ml. if(_parms._calc_like) { switch (_parms._family) { case gaussian: _parms._compute_p_values = true; _parms._remove_collinear_columns = true; break; case gamma: case negativebinomial: _parms._compute_p_values = true; _parms._remove_collinear_columns = true; case tweedie: // dispersion value estimation for tweedie family does not require // parameters compute_p_values and remove_collinear_columns _parms._dispersion_parameter_method = ml; // disable regularization as ML is supported only without regularization _parms._lambda = new double[] {0.0}; default: // other families does not require dispersion parameter estimation } } if (_parms.hasCheckpoint()) { if (!Family.gaussian.equals(_parms._family)) // Gaussian it not iterative and therefore don't care _checkPointFirstIter = true; // mark the first iteration during iteration process of training if (!IRLSM.equals(_parms._solver)) error("_checkpoint", "GLM checkpoint is supported only for IRLSM. Please specify it " + "explicitly. Do not use AUTO or default"); Value cv = DKV.get(_parms._checkpoint); CheckpointUtils.getAndValidateCheckpointModel(this, CHECKPOINT_NON_MODIFIABLE_FIELDS, cv); } if (_parms._influence != null) { if (!(gaussian.equals(_parms._family) || binomial.equals(_parms._family))) error("influence", " can only be specified for the gaussian and binomial families."); if (_parms._lambda == null) _parms._lambda = new double[]{0.0}; if (_parms._lambda != null && Arrays.stream(_parms._lambda).filter(x -> x>0).count() > 0) error("regularization", "regularization is not allowed when influence is set to dfbetas. " + "Please set all lambdas to 0.0."); if (_parms._lambda_search) error("lambda_search", "lambda_search and regularization are not allowed when influence is set to dfbetas."); if (Solver.AUTO.equals(_parms._solver)) _parms._solver = IRLSM; else if (!Solver.IRLSM.equals(_parms._solver)) error("solver", "regression influence diagnostic is only calculated for IRLSM solver."); _parms._compute_p_values = true; // automatically turned these on _parms._remove_collinear_columns = true; } if (_parms._linear_constraints != null) { checkAssignLinearConstraints(); } buildModel(); } } public double[] genInitBeta() { if (_checkPointFirstIter) return _model._betaCndCheckpoint; Key<Frame> linear_constr = _parms._linear_constraints; Key<Frame> beta_constr = _parms._beta_constraints; _parms._linear_constraints = _parms._beta_constraints = null; GLMModel model = new GLM(_parms).trainModel().get(); Scope.track_generic(model); _parms._linear_constraints= linear_constr; _parms._beta_constraints = beta_constr; ScoringInfo[] scInfo = model.getScoringInfo(); _initIter = ((GLMScoringInfo) scInfo[scInfo.length-1]).iterations; return _parms._standardize ? model._output.getNormBeta() : model._output.beta(); // constraint values evaluation will take care of normalization } void checkInitLinearConstraints() { if (!IRLSM.equals(_parms._solver)) { // only solver irlsm is allowed error("solver", "constrained GLM is only available for IRLSM. PLease set solver to" + " IRLSM/irlsm explicitly."); return; } if (_parms._constraint_eta0 <= 0) { error("constraint_eta0", "must be > 0."); return; } if (_parms._constraint_tau <= 0) { error("constraint_tau", "must be > 0."); return; } if (_parms._constraint_c0 <= 0) { error("constraint_c0", "must be > 0."); return; } if (!_parms._intercept) { error("intercept", "constrained GLM is only supported with when intercept=true."); return; } // no regularization for constrainted GLM except during testing if ((notZeroLambdas(_parms._lambda) || _parms._lambda_search) && !_parms._testCSZeroGram) { error("lambda or lambda_search", "Regularization is not allowed for constrained GLM. Set" + " lambda to 0.0."); return; } if ("multinomial".equals(_parms._solver) || "ordinal".equals(_parms._solver)) { error("solver", "Constrained GLM is not supported for multinomial and ordinal families"); return; } if ("ml".equals(_parms._dispersion_parameter_method)) { error("dispersion_parameter_method", "Only pearson and deviance is supported for dipsersion" + " parameter calculation."); return; } } /** * This method will extract the constraints from beta_constraints followed by the constraints specified in the * linear_constraints. The constraints are extracted into equality and lessthanequalto constraints from * beta_constraints and linear constraints. * * In addition, we extract all the constraints into a matrix and if the matrix is not full rank, constraints * are redundant and an error will be thrown and the redundant constraints will be included in the error message * so users can know which constraints to remove. */ void checkAssignLinearConstraints() { String[] coefNames = _dinfo.coefNames(); int[] betaEqualLessThanArr = null; if (_parms._beta_constraints != null) betaEqualLessThanArr = extractBetaConstraints(_state, coefNames); // extract constraints from linear_constraints into equality of lessthanequalto constraints extractLinearConstraints(_state, _parms._linear_constraints, _dinfo); // make sure constraints have full rank. If not, generate messages stating what constraints are redundant, error out List<String> constraintNames = new ArrayList<>(); double[][] initConstraintMatrix = formConstraintMatrix(_state, constraintNames, betaEqualLessThanArr); String[] constraintCoefficientNames = constraintNames.toArray(new String[0]); if (countNumConst(_state) > coefNames.length) warn("number of constraints", " exceeds the number of coefficients. The system is" + " over-constraints with duplicated constraints. Consider reducing the number of constraints."); List<String> redundantConstraints = foundRedundantConstraints(_state, initConstraintMatrix); if (redundantConstraints != null) { int numRedundant = redundantConstraints.size(); for (int index = 0; index < numRedundant; index++) error("redundant linear constraints", redundantConstraints.get(index)); } else { _state._csGLMState = new ConstraintGLMStates(constraintCoefficientNames, initConstraintMatrix, _parms); } } // copy from scoring_history back to _sc or _lsc private void restoreScoringHistoryFromCheckpoint() { TwoDimTable scoringHistory = _model._output._scoring_history; String[] colHeaders2Restore = _parms._lambda_search ? new String[]{"iteration", "timestamp", "lambda", "predictors", "deviance_train", "deviance_test", "alpha"} : new String[]{"iteration", "timestamp", "negative_log_likelihood", "objective", "sum(etai-eta0)^2", "convergence"}; int num2Copy = _parms._lambda_search ? colHeaders2Restore.length : colHeaders2Restore.length-2; int[] colHeadersIndex = grabHeaderIndex(scoringHistory, num2Copy, colHeaders2Restore); if (_parms._lambda_search) _lambdaSearchScoringHistory.restoreFromCheckpoint(scoringHistory, colHeadersIndex); else _scoringHistory.restoreFromCheckpoint(scoringHistory, colHeadersIndex); } static int[] grabHeaderIndex(TwoDimTable sHist, int numHeaders, String[] colHeadersUseful) { int[] colHeadersIndex = new int[numHeaders]; List<String> colHeadersList = Arrays.asList(sHist.getColHeaders()); for (int colInd = 0; colInd < numHeaders; colInd++) { if (colInd == 0) { int indexFound = colHeadersList.indexOf(colHeadersUseful[colInd]); if (indexFound < 0) indexFound = colHeadersList.indexOf(colHeadersUseful[colInd]+"s"); colHeadersIndex[colInd] = indexFound; } else { colHeadersIndex[colInd] = colHeadersList.indexOf(colHeadersUseful[colInd]); } } return colHeadersIndex; } // FIXME: contrary to other models, GLM output duration includes computation of CV models: // ideally the model should be instantiated in the #computeImpl() method instead of init private void buildModel() { if (_parms.hasCheckpoint()) { GLMModel model = ((GLMModel)DKV.getGet(_parms._checkpoint)).deepClone(_result); // Override original parameters by new parameters model._parms = _parms; // We create a new model _model = model; restoreScoringHistoryFromCheckpoint(); // copy over scoring history and related data structure } else { _model = new GLMModel(_result, _parms, this, _state._ymu, _dinfo._adaptedFrame.lastVec().sigma(), _lmax, _nobs); } _model._output.setLambdas(_parms); // set lambda_min and lambda_max if lambda_search is enabled _model._output._ymu = _state._ymu.clone(); _model.delete_and_lock(_job); } protected static final long WORK_TOTAL = 1000000; transient Key [] _toRemove; private Key[] removeLater(Key ...k){ _toRemove = _toRemove == null?k:ArrayUtils.append(_toRemove,k); return k; } @Override protected GLMDriver trainModelImpl() { return _driver = new GLMDriver(); } private final double lmax(double[] grad) { if (gam.equals(_parms._glmType)) { // do not take into account gam col gradients. They can be too big int totGamCols = 0; for (int numG = 0; numG < _penaltyMatrix.length; numG++) { totGamCols += _penaltyMatrix[numG].length; } int endIndex = grad.length - totGamCols; return Math.max(ArrayUtils.maxValue(grad,0,endIndex), -ArrayUtils.minValue(grad,0,endIndex)) / Math.max(1e-2, _parms._alpha[0]); } else return Math.max(ArrayUtils.maxValue(grad), -ArrayUtils.minValue(grad)) / Math.max(1e-2, _parms._alpha[0]); } private transient ComputationState _state; /** * Main loop of the glm algo. */ public final class GLMDriver extends Driver implements ProgressMonitor { private long _workPerIteration; private transient double[][] _vcov; private transient GLMTask.GLMIterationTask _gramInfluence; private transient double[][] _cholInvInfluence; private void doCleanup() { try { if (_parms._lambda_search && _parms._is_cv_model) Scope.untrack(removeLater(_dinfo.getWeightsVec()._key)); } catch (Exception e) { Log.err("Error while cleaning up GLM " + _result); Log.err(e); } } private transient Cholesky _chol; private transient L1Solver _lslvr; /*** * Use cholesky decomposition to solve for GLM Coefficients from the augmented Langrangian objective. In addition, * it will check for collinear columns and removed them when found. */ private double[] constraintGLM_solve(GramGrad gram) { if (!_parms._intercept) throw H2O.unimpl(); ArrayList<Integer> ignoredCols = new ArrayList<>(); double[] xy = gram._xy.clone(); Cholesky chol = ((_state._iter == 0) ? gram.qrCholesky(ignoredCols, copy2DArray(gram._gram), _parms._standardize) : gram.cholesky(null, gram._gram)); if (!chol.isSPD()) throw new NonSPDMatrixException(); if (!ignoredCols.isEmpty()) { int[] collinearCols = ignoredCols.stream().mapToInt(x -> x).toArray(); String[] ignoredConstraints = collinearInConstraints(ArrayUtils.select(_dinfo.coefNames(), collinearCols), _state._csGLMState._constraintNames); String collinearColNames = Arrays.toString(ArrayUtils.select(_dinfo.coefNames(), collinearCols)); if (ignoredConstraints != null && ignoredConstraints.length > 0) throw new IllegalArgumentException("Found constraints " + Arrays.toString(ignoredConstraints) + " included on collinear columns that are going to be removed. Please remove any constraints " + "involving collinear columns."); if (!_parms._remove_collinear_columns) throw new Gram.CollinearColumnsException("Found collinear columns in the dataset. Set " + "remove_collinear_columns flag to true to remove collinear columns automatically. " + "Found collinear columns " + collinearColNames); _model.addWarning("Removed collinear columns "+collinearColNames); Log.warn("Removed collinear columns "+collinearColNames); _state.removeCols(collinearCols); gram._gram = GramGrad.dropCols(collinearCols, gram._gram); gram._grad = ArrayUtils.removeIds(gram._grad, collinearCols); xy = ArrayUtils.removeIds(xy, collinearCols); } _chol = chol; chol.solve(xy); return xy; } private double[] ADMM_solve(Gram gram, double[] xy) { if (_parms._remove_collinear_columns || _parms._compute_p_values) { if (!_parms._intercept) throw H2O.unimpl(); ArrayList<Integer> ignoredCols = new ArrayList<>(); Cholesky chol = ((_state._iter == 0) ? gram.qrCholesky(ignoredCols, _parms._standardize) : gram.cholesky(null)); if (!ignoredCols.isEmpty() && !_parms._remove_collinear_columns) { int[] collinearCols = new int[ignoredCols.size()]; for (int i = 0; i < collinearCols.length; ++i) collinearCols[i] = ignoredCols.get(i); throw new Gram.CollinearColumnsException("Found collinear columns in the dataset. P-values can not be " + "computed with collinear columns in the dataset. Set remove_collinear_columns flag to true to remove " + "collinear columns automatically. Found collinear columns " + Arrays.toString(ArrayUtils.select(_dinfo.coefNames(), collinearCols))); } if (!chol.isSPD()) throw new NonSPDMatrixException(); _chol = chol; if (!ignoredCols.isEmpty()) { // got some redundant cols int[] collinearCols = new int[ignoredCols.size()]; for (int i = 0; i < collinearCols.length; ++i) collinearCols[i] = ignoredCols.get(i); String[] collinearColNames = ArrayUtils.select(_state.activeData().coefNames(), collinearCols); // need to drop the cols from everywhere _model.addWarning("Removed collinear columns " + Arrays.toString(collinearColNames)); Log.warn("Removed collinear columns " + Arrays.toString(collinearColNames)); _state.removeCols(collinearCols); gram.dropCols(collinearCols); xy = ArrayUtils.removeIds(xy, collinearCols); } xy = xy.clone(); chol.solve(xy); } else { // ADMM solve is only used when there is l1 regularization (lasso). gram = gram.deep_clone(); xy = xy.clone(); GramSolver slvr = new GramSolver(gram.clone(), xy.clone(), _parms._intercept, _state.l2pen(), _state.l1pen(), _state.activeBC()._betaGiven, _state.activeBC()._rho, _state.activeBC()._betaLB, _state.activeBC()._betaUB); _chol = slvr._chol; if (_state.l1pen() == 0 && !_state.activeBC().hasBounds()) { slvr.solve(xy); } else { xy = MemoryManager.malloc8d(xy.length); if (_state._u == null && !multinomial.equals(_parms._family)) _state._u = MemoryManager.malloc8d(_state.activeData().fullN() + 1); (_lslvr = new ADMM.L1Solver(1e-4, 10000, _state._u)).solve(slvr, xy, _state.l1pen(), _parms._intercept, _state.activeBC()._betaLB, _state.activeBC()._betaUB); } } return xy; } private void fitCOD_multinomial(Solver s) { double[] beta = _state.betaMultinomial(); LineSearchSolver ls; do { beta = beta.clone(); for (int c = 0; c < _nclass; ++c) { _state.setActiveClass(c); boolean onlyIcpt = _state.activeDataMultinomial(c).fullN() == 0; if (_state.l1pen() == 0) { if (_state.ginfoNull()) _state.updateState(beta, _state.gslvr().getGradient(beta)); ls = new MoreThuente(_state.gslvrMultinomial(c), _state.betaMultinomial(c, beta), _state.ginfoMultinomial(c)); } else ls = new SimpleBacktrackingLS(_state.gslvrMultinomial(c), _state.betaMultinomial(c, beta), _state.l1pen()); new GLMMultinomialUpdate(_state.activeDataMultinomial(), _job._key, beta, c).doAll(_state.activeDataMultinomial()._adaptedFrame); ComputationState.GramXY gram = _state.computeGram(_state.betaMultinomial(c, beta), s); double[] betaCnd = COD_solve(gram, _state._alpha, _state.lambda()); if (!onlyIcpt && !ls.evaluate(ArrayUtils.subtract(betaCnd, ls.getX(), betaCnd))) { Log.info(LogMsg("Ls failed " + ls)); continue; } _state.setBetaMultinomial(c, beta, ls.getX()); // set new beta } _state.setActiveClass(-1); // only reset after going through a whole set of classes. Not sure about this } while (progress(beta, _state.gslvr().getMultinomialLikelihood(beta))); // only need likelihood inside loop if (_parms._lambda_search) { _state.updateState(beta, _state.gslvr().getGradient(beta)); // only calculate _gradient here when needed } } private void fitIRLSM_multinomial(Solver s) { assert _dinfo._responses == 3 : "IRLSM for multinomial needs extra information encoded in additional reponses, expected 3 response vecs, got " + _dinfo._responses; if (Solver.COORDINATE_DESCENT.equals(s)) { fitCOD_multinomial(s); } else { double[] beta = _state.betaMultinomial(); // full with active/inactive columns do { beta = beta.clone(); for (int c = 0; c < _nclass; ++c) { boolean onlyIcpt = _state.activeDataMultinomial(c).fullN() == 0; _state.setActiveClass(c); // _state.gslvrMultinomial(c) get beta, _state info, _state.betaMultinomial(c, beta) get coef per class // _state.ginfoMultinomial(c) get gradient for one class LineSearchSolver ls; if (_parms._remove_collinear_columns) ls = (_state.l1pen() == 0) // at first iteration, state._beta, ginfo.gradient have not shrunk here ? new MoreThuente(_state.gslvrMultinomial(c), _state.betaMultinomialFull(c, beta), _state._iter==0?_state.ginfoMultinomial(c):_state.ginfoMultinomialRCC(c)) : new SimpleBacktrackingLS(_state.gslvrMultinomial(c), _state.betaMultinomialFull(c, beta), _state.l1pen()); else ls = (_state.l1pen() == 0) // normal case with rcc = false, nothing changes ? new MoreThuente(_state.gslvrMultinomial(c), _state.betaMultinomial(c, beta), _state.ginfoMultinomial(c)) : new SimpleBacktrackingLS(_state.gslvrMultinomial(c), _state.betaMultinomial(c, beta), _state.l1pen()); /* if (_parms._remove_collinear_columns && _state._iter < 1) ls = (_state.l1pen() == 0) // at first iteration, state._beta, ginfo.gradient have not shrunk here ? new MoreThuente(_state.gslvrMultinomial(c), _state.betaMultinomialFull(c, beta), _state.ginfoMultinomial(c)) : new SimpleBacktrackingLS(_state.gslvrMultinomial(c), _state.betaMultinomialFull(c, beta), _state.l1pen()); else if (_parms._remove_collinear_columns && _state._iter > 0) ls = (_state.l1pen() == 0) // after first iteration over all classes, state._beta, ginfo.gradient are all smaller size ? new MoreThuente(_state.gslvrMultinomial(c), _state.betaMultinomialFull(c, beta), _state.ginfoMultinomialRCC(c)) : new SimpleBacktrackingLS(_state.gslvrMultinomial(c), _state.betaMultinomialFull(c, beta), _state.l1pen()); else ls = (_state.l1pen() == 0) // normal case with rcc = false, nothing changes ? new MoreThuente(_state.gslvrMultinomial(c), _state.betaMultinomial(c, beta), _state.ginfoMultinomial(c)) : new SimpleBacktrackingLS(_state.gslvrMultinomial(c), _state.betaMultinomial(c, beta), _state.l1pen());*/ long t1 = System.currentTimeMillis(); // GLMMultinomialUpdate needs to take beta that contains active columns described in _state.activeDataMultinomial() if (_parms._remove_collinear_columns && _state.activeDataMultinomial()._activeCols != null && _betaInfo._betaLenPerClass != _state.activeDataMultinomial().activeCols().length) { // beta full length, need short beta double[] shortBeta = _state.shrinkFullArray(beta); new GLMMultinomialUpdate(_state.activeDataMultinomial(), _job._key, shortBeta, c).doAll(_state.activeDataMultinomial()._adaptedFrame); } else { new GLMMultinomialUpdate(_state.activeDataMultinomial(), _job._key, beta, c).doAll(_state.activeDataMultinomial()._adaptedFrame); } long t2 = System.currentTimeMillis(); ComputationState.GramXY gram; if (_parms._remove_collinear_columns && _state._iter > 0) gram = _state.computeGramRCC(ls.getX(), s); // only use beta for the active columns only else gram = _state.computeGram(ls.getX(), s); long t3 = System.currentTimeMillis(); double[] betaCnd = ADMM_solve(gram.gram, gram.xy); // remove collinear columns here from ginfo._gradient, betaCnd long t4 = System.currentTimeMillis(); if (_parms._remove_collinear_columns) { // betaCnd contains only active columns but ls.getX() could be full length int lsLength = ls.getX().length; if (lsLength != betaCnd.length) { double[] wideBetaCnd = new double[lsLength]; fillSubRange(lsLength, 0, _state.activeDataMultinomial()._activeCols, betaCnd, wideBetaCnd); betaCnd = wideBetaCnd; } } if (!onlyIcpt && !ls.evaluate(ArrayUtils.subtract(betaCnd, ls.getX(), betaCnd))) {// betaCnd full size Log.info(LogMsg("Ls failed " + ls)); continue; } long t5 = System.currentTimeMillis(); _state.setBetaMultinomial(c, beta, ls.getX()); // update multinomial Log.info(LogMsg("computed in " + (t2 - t1) + "+" + (t3 - t2) + "+" + (t4 - t3) + "+" + (t5 - t4) + "=" + (t5 - t1) + "ms, step = " + ls.step() + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } _state.setActiveClass(-1); _model._output._activeColsPerClass = _state.activeDataMultinomial().activeCols(); } while (progress(beta, _state.gslvr().getGradient(beta))); } } // use regular gradient descend here. Need to figure out how to adjust for the alpha, lambda for the elastic net private void fitIRLSM_ordinal_default(Solver s) { assert _dinfo._responses == 3 : "Solver for ordinal needs extra information encoded in additional reponses, " + "expected 3 response vecs, got " + _dinfo._responses; double[] beta = _state.betaMultinomial(); int predSize = _dinfo.fullN(); int predSizeP1 = predSize + 1; int numClass = _state._nbetas; int numIcpt = numClass - 1; double[] betaCnd = new double[predSize]; // number of predictors _state.gslvr().getGradient(beta); // get new gradient info with correct l2pen value. double l1pen = _state.lambda() * _state._alpha; // l2pen already calculated in gradient boolean stopNow = false; do { beta = beta.clone(); // copy over the coefficients // perform updates only on the betas excluding the intercept double[] grads = _state.ginfo()._gradient; for (int pindex = 0; pindex < numIcpt; pindex++) { // check and then update the intercepts int icptindex = (pindex + 1) * predSizeP1 - 1; beta[icptindex] -= grads[icptindex]; if (pindex > 0) { int previousIcpt = pindex * predSizeP1 - 1; if (beta[icptindex] < beta[previousIcpt]) { warn("Ordinal regression training: ", " intercepts of previous class exceed that " + "of current class. Make sure your training parameters are set properly. Training will " + "stop now with the last eligible parameters."); stopNow = true; for (int index = 0; index <= pindex; index++) { // restore threshold value to old ones icptindex = (index + 1) * predSizeP1 - 1; beta[icptindex] += grads[icptindex]; } break; } } } if (stopNow) // break out of while loop break; // update all parameters with new gradient; for (int pindex = 0; pindex < predSize; pindex++) { // add l1pen is necessary and coefficient updates betaCnd[pindex] = grads[pindex]; if (l1pen > 0) { betaCnd[pindex] += beta[pindex] > 0 ? l1pen : (beta[pindex] == 0 ? 0 : -l1pen); } beta[pindex] -= betaCnd[pindex]; // take the negative of the gradient and stuff } for (int indC = 1; indC < numIcpt; indC++) { int indOffset = indC * predSizeP1; for (int index = 0; index < predSize; index++) { // copy beta to all classes beta[indOffset + index] = beta[index]; } } _state.setActiveClass(-1); } while (progress(beta, _state.gslvr().getGradient(beta))); } private void fitLSM(Solver s) { long t0 = System.currentTimeMillis(); ComputationState.GramXY gramXY = _state.computeGram(_state.beta(), s); Log.info(LogMsg("Gram computed in " + (System.currentTimeMillis() - t0) + "ms")); final BetaConstraint bc = _state.activeBC(); double[] beta = _parms._solver == Solver.COORDINATE_DESCENT ? COD_solve(gramXY, _state._alpha, _state.lambda()) : ADMM_solve(gramXY.gram, gramXY.xy); if (_betaConstraintsOn) // apply beta constraints bc.applyAllBounds(beta); // compute mse double[] x = ArrayUtils.mmul(gramXY.gram.getXX(), beta); for (int i = 0; i < x.length; ++i) x[i] = (x[i] - 2 * gramXY.xy[i]); double l = .5 * (ArrayUtils.innerProduct(x, beta) / _parms._obj_reg + gramXY.yy); _state._iter++; _state.updateState(beta, l); } private void fitIRLSM(Solver s) { GLMWeightsFun glmw = new GLMWeightsFun(_parms); double[] betaCnd = _checkPointFirstIter ? _model._betaCndCheckpoint : _state.beta(); LineSearchSolver ls = null; int iterCnt = _checkPointFirstIter ? _state._iter : 0; boolean firstIter = iterCnt == 0; final BetaConstraint bc = _state.activeBC(); try { while (true) { iterCnt++; long t1 = System.currentTimeMillis(); ComputationState.GramXY gram = _state.computeGram(betaCnd, s); long t2 = System.currentTimeMillis(); if (!_state._lsNeeded && (Double.isNaN(gram.likelihood) || _state.objective(gram.beta, gram.likelihood) > _state.objective() + _parms._objective_epsilon) && !_checkPointFirstIter) { _state._lsNeeded = true; } else { if (!firstIter && !_state._lsNeeded && !progress(gram.beta, gram.likelihood) && !_checkPointFirstIter) { Log.info("DONE after " + (iterCnt - 1) + " iterations (1)"); _model._betaCndCheckpoint = betaCnd; return; } if (!_checkPointFirstIter) betaCnd = s == Solver.COORDINATE_DESCENT ? COD_solve(gram, _state._alpha, _state.lambda()) : ADMM_solve(gram.gram, gram.xy); // this will shrink betaCnd if needed but this call may be skipped } firstIter = false; _checkPointFirstIter = false; long t3 = System.currentTimeMillis(); if (_state._lsNeeded) { if (ls == null) ls = (_state.l1pen() == 0 && !_state.activeBC().hasBounds()) ? new MoreThuente(_state.gslvr(), _state.beta(), _state.ginfo()) : new SimpleBacktrackingLS(_state.gslvr(), _state.beta().clone(), _state.l1pen(), _state.ginfo()); double[] oldBetaCnd = ls.getX(); if (betaCnd.length != oldBetaCnd.length) { // if ln 1453 is skipped and betaCnd.length != _state.beta() betaCnd = extractSubRange(betaCnd.length, 0, _state.activeData()._activeCols, betaCnd); } if (!ls.evaluate(ArrayUtils.subtract(betaCnd, oldBetaCnd, betaCnd))) { // ls.getX() get the old beta value Log.info(LogMsg("Ls failed " + ls)); return; } betaCnd = ls.getX(); if (_betaConstraintsOn) bc.applyAllBounds(betaCnd); if (!progress(betaCnd, ls.ginfo())) return; long t4 = System.currentTimeMillis(); Log.info(LogMsg("computed in " + (t2 - t1) + "+" + (t3 - t2) + "+" + (t4 - t3) + "=" + (t4 - t1) + "ms, step = " + ls.step() + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } else { if (_betaConstraintsOn) // apply beta constraints without LS bc.applyAllBounds(betaCnd); Log.info(LogMsg("computed in " + (t2 - t1) + "+" + (t3 - t2) + "=" + (t3 - t1) + "ms, step = " + 1 + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } } } catch (NonSPDMatrixException e) { Log.warn(LogMsg("Got Non SPD matrix, stopped.")); } } /*** * This method fits the constraint GLM for IRLSM. We implemented the algorithm depicted in the document (H2O * Constrained GLM Implementation.pdf) attached to this issue: https://github.com/h2oai/h2o-3/issues/6722. We will * hereby use the word the doc to refere to this document. In particular, we following the algorithm described in * Section VII (and table titled Algorithm 19.1) of the doc. Not as good as when considering magnitude of gradient. */ private void fitIRLSMCS9() { double[] betaCnd = _checkPointFirstIter ? _model._betaCndCheckpoint : _state.beta(); double[] tempBeta = _parms._separate_linear_beta ? new double[betaCnd.length] : null; List<String> coefNames = Arrays.stream(_state.activeData()._coefNames).collect(Collectors.toList()); LinearConstraints[] equalityConstraints; LinearConstraints[] lessThanEqualToConstraints; final BetaConstraint bc = _state.activeBC(); if (_parms._separate_linear_beta) { // keeping linear and beta constraints separate in this case equalityConstraints = _state._equalityConstraintsLinear; lessThanEqualToConstraints = _state._lessThanEqualToConstraintsLinear; } else { // combine beta and linear constraints together equalityConstraints = combineConstraints(_state._equalityConstraintsBeta, _state._equalityConstraintsLinear); lessThanEqualToConstraints = combineConstraints(_state._lessThanEqualToConstraintsBeta, _state._lessThanEqualToConstraintsLinear); } boolean hasEqualityConstraints = equalityConstraints != null; boolean hasLessConstraints = lessThanEqualToConstraints != null; double[] lambdaEqual = hasEqualityConstraints ? new double[equalityConstraints.length] : null; double[] lambdaLessThan = hasLessConstraints ? new double[lessThanEqualToConstraints.length] : null; Long startSeed = _parms._seed == -1 ? new Random().nextLong() : _parms._seed; Random randObj = new Random(startSeed); updateConstraintValues(betaCnd, coefNames, equalityConstraints, lessThanEqualToConstraints); if (hasEqualityConstraints) // set lambda values for constraints genInitialLambda(randObj, equalityConstraints, lambdaEqual); if (hasLessConstraints) genInitialLambda(randObj, lessThanEqualToConstraints, lambdaLessThan); ExactLineSearch ls = null; int iterCnt = (_checkPointFirstIter ? _state._iter : 0)+_initIter; // contribution to gradient and hessian from constraints _state.initConstraintDerivatives(equalityConstraints, lessThanEqualToConstraints, coefNames); GLMGradientSolver ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo); GLMGradientInfo gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); // add dpenalty/dx to gradient from penalty term _state.setConstraintInfo(gradientInfo, equalityConstraints, lessThanEqualToConstraints, lambdaEqual, lambdaLessThan); // update state ginfo with contributions from GLMGradientInfo boolean predictorSizeChange; boolean applyBetaConstraints = _parms._separate_linear_beta && _betaConstraintsOn; // short circuit check here: if gradient magnitude is small and all constraints are satisfied, quit right away if (constraintsStop(gradientInfo, _state)) { Log.info(LogMsg("GLM with constraints model building completed successfully!!")); return; } double gradMagSquare = ArrayUtils.innerProduct(gradientInfo._gradient, gradientInfo._gradient); boolean done; boolean gradSmallEnough = (gradMagSquare <= _state._csGLMState._epsilonkCSSquare); int origIter = iterCnt+1; boolean lineSearchSuccess; try { while (true) { do { // implement Algorithm 11.8 of the doc to find coefficients with epsilon k as the precision iterCnt++; long t1 = System.currentTimeMillis(); ComputationState.GramGrad gram = _state.computeGram(betaCnd, gradientInfo); // calculate gram (hessian), xy, objective values if (iterCnt == origIter) { Matrix gramMatrix = new Matrix(gram._gram); if (gramMatrix.cond() >= BAD_CONDITION_NUMBER) if (_parms._init_optimal_glm) { warn("init_optimal_glm", " should be disabled. This lead to gram matrix being close to" + " singular. Please re-run with init_optimal_glm set to false."); } } predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[coefNames.size()]; } // solve for GLM coefficients betaCnd = constraintGLM_solve(gram); // beta_k+1 = beta_k+dk where dk = beta_k+1-beta_k predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[betaCnd.length]; } // add exact line search for GLM coefficients. Refer to the doc, Algorithm 11.5 if (ls == null) ls = new ExactLineSearch(betaCnd, _state, coefNames); else ls.reset(betaCnd, _state, coefNames); // line search can fail when the gradient is close to zero. In this case, we need to update the // constraint parameters. lineSearchSuccess = ls.findAlpha(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo); gradMagSquare = ArrayUtils.innerProduct(ls._ginfoOriginal._gradient, ls._ginfoOriginal._gradient); gradSmallEnough = gradMagSquare <= _state._csGLMState._epsilonkCSSquare; if (lineSearchSuccess) { betaCnd = ls._newBeta; gradientInfo = ls._ginfoOriginal; } else { // ls failed, reset to if (applyBetaConstraints) // separate beta and linear constraints bc.applyAllBounds(_state.beta()); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, _state.beta()); Log.info(LogMsg("Line search failed " + ls)); return; } if (applyBetaConstraints) { // if beta constraints are applied, may need to update constraints, derivatives, gradientInfo System.arraycopy(betaCnd, 0, tempBeta, 0, betaCnd.length); bc.applyAllBounds(betaCnd); ArrayUtils.subtract(betaCnd, tempBeta, tempBeta); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, betaCnd); gradientInfo = ls._ginfoOriginal; } // check for stopping conditions which also updates the variables in state. // stopping condition is to stop us getting stuck in improvements that are too insignificant. // However, we will only exit the while loop when the gradMagSquare is still too high. There is no hope // for improvement here anymore since the beta values and gradient values are not changing much anymore. done = stop_requested() || (_state._iter >= _parms._max_iterations) || _earlyStop; // time to go if (!progress(betaCnd, gradientInfo)) { checkKKTConditions(betaCnd, gradientInfo, iterCnt); return; } Log.info(LogMsg("computed in " + (System.currentTimeMillis() - t1) + "ms, step = " + iterCnt + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } while (!gradSmallEnough); // update constraint parameters, ck, lambdas and others updateConstraintParameters(_state, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints, _parms); // update gradient calculation with new value (lambda and/or ck). gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); _state.updateState(betaCnd, gradientInfo); // update computation state with new info } } catch (NonSPDMatrixException e) { Log.warn(LogMsg("Got Non SPD matrix, stopped.")); } } // original algo, set lambda = 0 for inactive constraints, no good effect. private void fitIRLSMCS8() { double[] betaCnd = _checkPointFirstIter ? _model._betaCndCheckpoint : _state.beta(); double[] tempBeta = _parms._separate_linear_beta ? new double[betaCnd.length] : null; List<String> coefNames = Arrays.stream(_state.activeData()._coefNames).collect(Collectors.toList()); LinearConstraints[] equalityConstraints; LinearConstraints[] lessThanEqualToConstraints; final BetaConstraint bc = _state.activeBC(); if (_parms._separate_linear_beta) { // keeping linear and beta constraints separate in this case equalityConstraints = _state._equalityConstraintsLinear; lessThanEqualToConstraints = _state._lessThanEqualToConstraintsLinear; } else { // combine beta and linear constraints together equalityConstraints = combineConstraints(_state._equalityConstraintsBeta, _state._equalityConstraintsLinear); lessThanEqualToConstraints = combineConstraints(_state._lessThanEqualToConstraintsBeta, _state._lessThanEqualToConstraintsLinear); } boolean hasEqualityConstraints = equalityConstraints != null; boolean hasLessConstraints = lessThanEqualToConstraints != null; double[] lambdaEqual = hasEqualityConstraints ? new double[equalityConstraints.length] : null; double[] lambdaLessThan = hasLessConstraints ? new double[lessThanEqualToConstraints.length] : null; Long startSeed = _parms._seed == -1 ? new Random().nextLong() : _parms._seed; Random randObj = new Random(startSeed); updateConstraintValues(betaCnd, coefNames, equalityConstraints, lessThanEqualToConstraints); if (hasEqualityConstraints) // set lambda values for constraints genInitialLambda(randObj, equalityConstraints, lambdaEqual); if (hasLessConstraints) { genInitialLambda(randObj, lessThanEqualToConstraints, lambdaLessThan); adjustLambda(lessThanEqualToConstraints, lambdaLessThan); } ExactLineSearch ls = null; int iterCnt = (_checkPointFirstIter ? _state._iter : 0)+_initIter; // contribution to gradient and hessian from constraints _state.initConstraintDerivatives(equalityConstraints, lessThanEqualToConstraints, coefNames); GLMGradientSolver ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo); GLMGradientInfo gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); // add dpenalty/dx to gradient from penalty term _state.setConstraintInfo(gradientInfo, equalityConstraints, lessThanEqualToConstraints, lambdaEqual, lambdaLessThan); // update state ginfo with contributions from GLMGradientInfo boolean predictorSizeChange; boolean applyBetaConstraints = _parms._separate_linear_beta && _betaConstraintsOn; // short circuit check here: if gradient magnitude is small and all constraints are satisfied, quit right away if (constraintsStop(gradientInfo, _state)) { Log.info(LogMsg("GLM with constraints model building completed successfully!!")); return; } double gradMagSquare = ArrayUtils.innerProduct(gradientInfo._gradient, gradientInfo._gradient); boolean done; boolean gradSmallEnough = (gradMagSquare <= _state._csGLMState._epsilonkCSSquare); int origIter = iterCnt+1; boolean lineSearchSuccess; try { while (true) { do { // implement Algorithm 11.8 of the doc to find coefficients with epsilon k as the precision iterCnt++; long t1 = System.currentTimeMillis(); ComputationState.GramGrad gram = _state.computeGram(betaCnd, gradientInfo); // calculate gram (hessian), xy, objective values if (iterCnt == origIter) { Matrix gramMatrix = new Matrix(gram._gram); if (gramMatrix.cond() >= BAD_CONDITION_NUMBER) if (_parms._init_optimal_glm) { warn("init_optimal_glm", " should be disabled. This lead to gram matrix being close to" + " singular. Please re-run with init_optimal_glm set to false."); } } predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[coefNames.size()]; } // solve for GLM coefficients betaCnd = constraintGLM_solve(gram); // beta_k+1 = beta_k+dk where dk = beta_k+1-beta_k predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[betaCnd.length]; } // add exact line search for GLM coefficients. Refer to the doc, Algorithm 11.5 if (ls == null) ls = new ExactLineSearch(betaCnd, _state, coefNames); else ls.reset(betaCnd, _state, coefNames); // line search can fail when the gradient is close to zero. In this case, we need to update the // constraint parameters. lineSearchSuccess = ls.findAlpha(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo); gradMagSquare = ArrayUtils.innerProduct(ls._ginfoOriginal._gradient, ls._ginfoOriginal._gradient); gradSmallEnough = gradMagSquare <= _state._csGLMState._epsilonkCSSquare; if (lineSearchSuccess) { betaCnd = ls._newBeta; gradientInfo = ls._ginfoOriginal; } else { // ls failed, reset to if (applyBetaConstraints) // separate beta and linear constraints bc.applyAllBounds(_state.beta()); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, _state.beta()); Log.info(LogMsg("Line search failed " + ls)); return; } if (applyBetaConstraints) { // if beta constraints are applied, may need to update constraints, derivatives, gradientInfo System.arraycopy(betaCnd, 0, tempBeta, 0, betaCnd.length); bc.applyAllBounds(betaCnd); ArrayUtils.subtract(betaCnd, tempBeta, tempBeta); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, betaCnd); gradientInfo = ls._ginfoOriginal; } // check for stopping conditions which also updates the variables in state. // stopping condition is to stop us getting stuck in improvements that are too insignificant. // However, we will only exit the while loop when the gradMagSquare is still too high. There is no hope // for improvement here anymore since the beta values and gradient values are not changing much anymore. done = stop_requested() || (_state._iter >= _parms._max_iterations) || _earlyStop; // time to go if (!progress(betaCnd, gradientInfo)) { checkKKTConditions(betaCnd, gradientInfo, iterCnt); return; } Log.info(LogMsg("computed in " + (System.currentTimeMillis() - t1) + "ms, step = " + iterCnt + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } while (!gradSmallEnough); // update constraint parameters, ck, lambdas and others updateConstraintParameters(_state, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints, _parms); if (hasLessConstraints) adjustLambda(lessThanEqualToConstraints, lambdaLessThan); // update gradient calculation with new value (lambda and/or ck). gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); _state.updateState(betaCnd, gradientInfo); // update computation state with new info } } catch (NonSPDMatrixException e) { Log.warn(LogMsg("Got Non SPD matrix, stopped.")); } } // original implementation but will not quit when magnitude of gradient is small. If exit condition is triggered // (either ls failed or no progress is made, if the magnitude of gradient is small, we will exit thw while loop // but will arrive at the part to change the constrained parameters. This seems to help. private void fitIRLSMCS() { double[] betaCnd = _checkPointFirstIter ? _model._betaCndCheckpoint : _state.beta(); double[] tempBeta = _parms._separate_linear_beta ? new double[betaCnd.length] : null; List<String> coefNames = Arrays.stream(_state.activeData()._coefNames).collect(Collectors.toList()); LinearConstraints[] equalityConstraints; LinearConstraints[] lessThanEqualToConstraints; final BetaConstraint bc = _state.activeBC(); if (_parms._separate_linear_beta) { // keeping linear and beta constraints separate in this case equalityConstraints = _state._equalityConstraintsLinear; lessThanEqualToConstraints = _state._lessThanEqualToConstraintsLinear; } else { // combine beta and linear constraints together equalityConstraints = combineConstraints(_state._equalityConstraintsBeta, _state._equalityConstraintsLinear); lessThanEqualToConstraints = combineConstraints(_state._lessThanEqualToConstraintsBeta, _state._lessThanEqualToConstraintsLinear); } boolean hasEqualityConstraints = equalityConstraints != null; boolean hasLessConstraints = lessThanEqualToConstraints != null; double[] lambdaEqual = hasEqualityConstraints ? new double[equalityConstraints.length] : null; double[] lambdaLessThan = hasLessConstraints ? new double[lessThanEqualToConstraints.length] : null; Long startSeed = _parms._seed == -1 ? new Random().nextLong() : _parms._seed; Random randObj = new Random(startSeed); updateConstraintValues(betaCnd, coefNames, equalityConstraints, lessThanEqualToConstraints); if (hasEqualityConstraints) // set lambda values for constraints genInitialLambda(randObj, equalityConstraints, lambdaEqual); if (hasLessConstraints) genInitialLambda(randObj, lessThanEqualToConstraints, lambdaLessThan); ExactLineSearch ls = null; int iterCnt = (_checkPointFirstIter ? _state._iter : 0)+_initIter; // contribution to gradient and hessian from constraints _state.initConstraintDerivatives(equalityConstraints, lessThanEqualToConstraints, coefNames); GLMGradientSolver ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo); GLMGradientInfo gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); // add dpenalty/dx to gradient from penalty term _state.setConstraintInfo(gradientInfo, equalityConstraints, lessThanEqualToConstraints, lambdaEqual, lambdaLessThan); // update state ginfo with contributions from GLMGradientInfo boolean predictorSizeChange; boolean applyBetaConstraints = _parms._separate_linear_beta && _betaConstraintsOn; // short circuit check here: if gradient magnitude is small and all constraints are satisfied, quit right away if (constraintsStop(gradientInfo, _state)) { Log.info(LogMsg("GLM with constraints model building completed successfully!!")); return; } double gradMagSquare = ArrayUtils.innerProduct(gradientInfo._gradient, gradientInfo._gradient); boolean done; boolean gradSmallEnough = (gradMagSquare <= _state._csGLMState._epsilonkCSSquare); int origIter = iterCnt+1; boolean lineSearchSuccess; try { while (true) { do { // implement Algorithm 11.8 of the doc to find coefficients with epsilon k as the precision iterCnt++; long t1 = System.currentTimeMillis(); ComputationState.GramGrad gram = _state.computeGram(betaCnd, gradientInfo); // calculate gram (hessian), xy, objective values if (iterCnt == origIter) { Matrix gramMatrix = new Matrix(gram._gram); if (gramMatrix.cond() >= BAD_CONDITION_NUMBER) if (_parms._init_optimal_glm) { warn("init_optimal_glm", " should be disabled. This lead to gram matrix being close to" + " singular. Please re-run with init_optimal_glm set to false."); } } predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[coefNames.size()]; } // solve for GLM coefficients betaCnd = constraintGLM_solve(gram); // beta_k+1 = beta_k+dk where dk = beta_k+1-beta_k predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[betaCnd.length]; } // add exact line search for GLM coefficients. Refer to the doc, Algorithm 11.5 if (ls == null) ls = new ExactLineSearch(betaCnd, _state, coefNames); else ls.reset(betaCnd, _state, coefNames); // line search can fail when the gradient is close to zero. In this case, we need to update the // constraint parameters. lineSearchSuccess = ls.findAlpha(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo); gradMagSquare = ArrayUtils.innerProduct(ls._ginfoOriginal._gradient, ls._ginfoOriginal._gradient); gradSmallEnough = gradMagSquare <= _state._csGLMState._epsilonkCSSquare; if (lineSearchSuccess ||gradSmallEnough) { betaCnd = ls._newBeta; gradientInfo = ls._ginfoOriginal; } else { // ls failed, reset to if (_betaConstraintsOn) // separate beta and linear constraints bc.applyAllBounds(_state.beta()); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, _state.beta()); Log.info(LogMsg("Line search failed " + ls)); return; } if (applyBetaConstraints) { // if beta constraints are applied separately, may need to update constraints, derivatives, gradientInfo System.arraycopy(betaCnd, 0, tempBeta, 0, betaCnd.length); bc.applyAllBounds(betaCnd); ArrayUtils.subtract(betaCnd, tempBeta, tempBeta); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, betaCnd); gradientInfo = ls._ginfoOriginal; } // check for stopping conditions which also updates the variables in state. // stopping condition is to stop us getting stuck in improvements that are too insignificant. // However, we will only exit the while loop when the gradMagSquare is still too high. There is no hope // for improvement here anymore since the beta values and gradient values are not changing much anymore. done = stop_requested() || (_state._iter >= _parms._max_iterations) || _earlyStop; // time to go if ((!progress(betaCnd, gradientInfo) && !gradSmallEnough) || done) { checkKKTConditions(betaCnd, gradientInfo, iterCnt); if (_betaConstraintsOn) bc.applyAllBounds(_state.beta()); return; } Log.info(LogMsg("computed in " + (System.currentTimeMillis() - t1) + "ms, step = " + iterCnt + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } while (!gradSmallEnough); // update constraint parameters, ck, lambdas and others updateConstraintParameters(_state, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints, _parms); // update gradient calculation with new value (lambda and/or ck). gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); _state.updateState(betaCnd, gradientInfo); // update computation state with new info } } catch (NonSPDMatrixException e) { Log.warn(LogMsg("Got Non SPD matrix, stopped.")); } } // only has penalty and no constrained multipliers, original algorithm private void fitIRLSMCS2() { double[] betaCnd = _checkPointFirstIter ? _model._betaCndCheckpoint : _state.beta(); double[] tempBeta = _parms._separate_linear_beta ? new double[betaCnd.length] : null; List<String> coefNames = Arrays.stream(_state.activeData()._coefNames).collect(Collectors.toList()); LinearConstraints[] equalityConstraints; LinearConstraints[] lessThanEqualToConstraints; final BetaConstraint bc = _state.activeBC(); if (_parms._separate_linear_beta) { // keeping linear and beta constraints separate in this case equalityConstraints = _state._equalityConstraintsLinear; lessThanEqualToConstraints = _state._lessThanEqualToConstraintsLinear; } else { // combine beta and linear constraints together equalityConstraints = combineConstraints(_state._equalityConstraintsBeta, _state._equalityConstraintsLinear); lessThanEqualToConstraints = combineConstraints(_state._lessThanEqualToConstraintsBeta, _state._lessThanEqualToConstraintsLinear); } boolean hasEqualityConstraints = equalityConstraints != null; boolean hasLessConstraints = lessThanEqualToConstraints != null; double[] lambdaEqual = hasEqualityConstraints ? new double[equalityConstraints.length] : null; double[] lambdaLessThan = hasLessConstraints ? new double[lessThanEqualToConstraints.length] : null; Long startSeed = _parms._seed == -1 ? new Random().nextLong() : _parms._seed; Random randObj = new Random(startSeed); updateConstraintValues(betaCnd, coefNames, equalityConstraints, lessThanEqualToConstraints); /* if (hasEqualityConstraints) // set lambda values for constraints genInitialLambda(randObj, equalityConstraints, lambdaEqual); if (hasLessConstraints) genInitialLambda(randObj, lessThanEqualToConstraints, lambdaLessThan); */ ExactLineSearch ls = null; int iterCnt = (_checkPointFirstIter ? _state._iter : 0)+_initIter; // contribution to gradient and hessian from constraints _state.initConstraintDerivatives(equalityConstraints, lessThanEqualToConstraints, coefNames); GLMGradientSolver ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _dinfo, 0, _state.activeBC(), _betaInfo); GLMGradientInfo gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); // add dpenalty/dx to gradient from penalty term _state.setConstraintInfo(gradientInfo, equalityConstraints, lessThanEqualToConstraints, lambdaEqual, lambdaLessThan); // update state ginfo with contributions from GLMGradientInfo boolean predictorSizeChange; boolean applyBetaConstraints = _parms._separate_linear_beta && _betaConstraintsOn; // short circuit check here: if gradient magnitude is small and all constraints are satisfied, quit right away if (constraintsStop(gradientInfo, _state)) { Log.info(LogMsg("GLM with constraints model building completed successfully!!")); return; } double gradMagSquare = ArrayUtils.innerProduct(gradientInfo._gradient, gradientInfo._gradient); boolean done; boolean gradSmallEnough = (gradMagSquare <= _state._csGLMState._epsilonkCSSquare); int origIter = iterCnt+1; boolean lineSearchSuccess; try { while (true) { do { // implement Algorithm 11.8 of the doc to find coefficients with epsilon k as the precision iterCnt++; long t1 = System.currentTimeMillis(); ComputationState.GramGrad gram = _state.computeGram(betaCnd, gradientInfo); // calculate gram (hessian), xy, objective values if (iterCnt == origIter) { Matrix gramMatrix = new Matrix(gram._gram); if (gramMatrix.cond() >= BAD_CONDITION_NUMBER) if (_parms._init_optimal_glm) { warn("init_optimal_glm", " should be disabled. This lead to gram matrix being close to" + " singular. Please re-run with init_optimal_glm set to false."); } } predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[coefNames.size()]; } // solve for GLM coefficients betaCnd = constraintGLM_solve(gram); // beta_k+1 = beta_k+dk where dk = beta_k+1-beta_k predictorSizeChange = !coefNames.equals(Arrays.asList(_state.activeData().coefNames())); if (predictorSizeChange) { // reset if predictors changed coefNames = changeCoeffBetainfo(_state.activeData()._coefNames); _state.resizeConstraintInfo(equalityConstraints, lessThanEqualToConstraints); ginfo = gam.equals(_parms._glmType) ? new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo, _penaltyMatrix, _gamColIndices) : new GLMGradientSolver(_job, _parms, _state.activeData(), 0, _state.activeBC(), _betaInfo); tempBeta = new double[betaCnd.length]; } // add exact line search for GLM coefficients. Refer to the doc, Algorithm 11.5 if (ls == null) ls = new ExactLineSearch(betaCnd, _state, coefNames); else ls.reset(betaCnd, _state, coefNames); // line search can fail when the gradient is close to zero. In this case, we need to update the // constraint parameters. lineSearchSuccess = ls.findAlpha(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo); if (lineSearchSuccess) { betaCnd = ls._newBeta; gradientInfo = ls._ginfoOriginal; gradMagSquare = ArrayUtils.innerProduct(ls._ginfoOriginal._gradient, ls._ginfoOriginal._gradient); gradSmallEnough = gradMagSquare <= _state._csGLMState._epsilonkCSSquare; } else { // ls failed, reset to if (applyBetaConstraints) // separate beta and linear constraints bc.applyAllBounds(_state.beta()); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, _state.beta()); Log.info(LogMsg("Line search failed " + ls)); return; } if (applyBetaConstraints) { // if beta constraints are applied, may need to update constraints, derivatives, gradientInfo System.arraycopy(betaCnd, 0, tempBeta, 0, betaCnd.length); bc.applyAllBounds(betaCnd); ArrayUtils.subtract(betaCnd, tempBeta, tempBeta); ls.setBetaConstraintsDeriv(lambdaEqual, lambdaLessThan, _state, equalityConstraints, lessThanEqualToConstraints, ginfo, betaCnd); gradientInfo = ls._ginfoOriginal; } // check for stopping conditions which also updates the variables in state. // stopping condition is to stop us getting stuck in improvements that are too insignificant. // However, we will only exit the while loop when the gradMagSquare is still too high. There is no hope // for improvement here anymore since the beta values and gradient values are not changing much anymore. done = stop_requested() || (_state._iter >= _parms._max_iterations) || _earlyStop; // time to go if (!progress(betaCnd, gradientInfo)) { checkKKTConditions(betaCnd, gradientInfo, iterCnt); return; } Log.info(LogMsg("computed in " + (System.currentTimeMillis() - t1) + "ms, step = " + iterCnt + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } while (!gradSmallEnough); // update constraint parameters, ck, lambdas and others updateConstraintParameters(_state, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints, _parms); // update gradient calculation with new value (lambda and/or ck). // set lambda to all zeros lambdaEqual = hasEqualityConstraints ? new double[lambdaEqual.length] : null; lambdaLessThan = hasLessConstraints ? new double[lambdaLessThan.length] : null; gradientInfo = calGradient(betaCnd, _state, ginfo, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); _state.updateState(betaCnd, gradientInfo); // update computation state with new info } } catch (NonSPDMatrixException e) { Log.warn(LogMsg("Got Non SPD matrix, stopped.")); } } /*** * We will check it the constraint stopping conditions are met. */ public void checkKKTConditions(double[] betaCnd, GLMGradientInfo gradientInfo, int iterCnt) { // check for stopping conditions _model._betaCndCheckpoint = betaCnd; boolean kktAchieved = constraintsStop(gradientInfo, _state); if (kktAchieved) Log.info("KKT Conditions achieved after " + iterCnt + " iterations "); else Log.warn("KKT Conditions not achieved but no further progress made due to time out or no changes" + " to coefficients after " + iterCnt + " iterations"); } public List<String> changeCoeffBetainfo(String[] coefNames) { _betaInfo = new BetaInfo(fractionalbinomial.equals(_parms._family) ? 2 : (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) ? nclasses() : 1, coefNames.length); return Arrays.stream(coefNames).collect(Collectors.toList()); } private void fitIRLSMML(Solver s) { double[] betaCnd = _checkPointFirstIter ? _model._betaCndCheckpoint : _state.beta(); LineSearchSolver ls = null; int iterCnt = _checkPointFirstIter ? _state._iter : 0; boolean firstIter = iterCnt == 0; final BetaConstraint bc = _state.activeBC(); double previousLLH = Double.POSITIVE_INFINITY; boolean converged = false; int sameLLH = 0; Vec weights = _dinfo._weights ? _dinfo.getWeightsVec() : _dinfo._adaptedFrame.makeCompatible(new Frame(Vec.makeOne(_dinfo._adaptedFrame.numRows())))[0]; Vec response = _dinfo._adaptedFrame.vec(_dinfo.responseChunkId(0)); try { while (!converged && iterCnt < _parms._max_iterations && !_job.stop_requested()) { iterCnt++; long t1 = System.currentTimeMillis(); ComputationState.GramXY gram = _state.computeGram(betaCnd, s); long t2 = System.currentTimeMillis(); if (!_state._lsNeeded && (Double.isNaN(gram.likelihood) || _state.objective(gram.beta, gram.likelihood) > _state.objective() + _parms._objective_epsilon) && !_checkPointFirstIter) { _state._lsNeeded = true; } else { if (!firstIter && !_state._lsNeeded && !progress(gram.beta, gram.likelihood) && !_checkPointFirstIter) { Log.info("DONE after " + (iterCnt - 1) + " iterations (1)"); _model._betaCndCheckpoint = betaCnd; converged = true; } if (!_checkPointFirstIter) betaCnd = s == Solver.COORDINATE_DESCENT ? COD_solve(gram, _state._alpha, _state.lambda()) : ADMM_solve(gram.gram, gram.xy); // this will shrink betaCnd if needed but this call may be skipped } firstIter = false; _checkPointFirstIter = false; long t3 = System.currentTimeMillis(); if (_state._lsNeeded) { if (ls == null) ls = (_state.l1pen() == 0 && !_state.activeBC().hasBounds()) ? new MoreThuente(_state.gslvr(), _state.beta(), _state.ginfo()) : new SimpleBacktrackingLS(_state.gslvr(), _state.beta().clone(), _state.l1pen(), _state.ginfo()); double[] oldBetaCnd = ls.getX(); if (betaCnd.length != oldBetaCnd.length) { // if ln 1453 is skipped and betaCnd.length != _state.beta() betaCnd = extractSubRange(betaCnd.length, 0, _state.activeData()._activeCols, betaCnd); } if (!ls.evaluate(ArrayUtils.subtract(betaCnd, oldBetaCnd, betaCnd))) { // ls.getX() get the old beta value Log.info(LogMsg("Ls failed " + ls)); converged = true; } betaCnd = ls.getX(); if (_betaConstraintsOn) bc.applyAllBounds(betaCnd); if (!progress(betaCnd, ls.ginfo())) converged = true; long t4 = System.currentTimeMillis(); Log.info(LogMsg("computed in " + (t2 - t1) + "+" + (t3 - t2) + "+" + (t4 - t3) + "=" + (t4 - t1) + "ms, step = " + ls.step() + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } else { if (_betaConstraintsOn) // apply beta constraints without LS bc.applyAllBounds(betaCnd); Log.info(LogMsg("computed in " + (t2 - t1) + "+" + (t3 - t2) + "=" + (t3 - t1) + "ms, step = " + 1 + ((_lslvr != null) ? ", l1solver " + _lslvr : ""))); } // Dispersion estimation part if (negativebinomial.equals(_parms._family)){ converged = updateNegativeBinomialDispersion(iterCnt, _state.beta(), previousLLH, weights, response) && converged; Log.info("GLM negative binomial dispersion estimation: iteration = "+iterCnt+"; theta = " + _parms._theta); } else if (tweedie.equals(_parms._family)){ if (!_parms._fix_tweedie_variance_power) { if (!_parms._fix_dispersion_parameter) { converged = updateTweediePandPhi(iterCnt, _state.expandBeta(betaCnd), weights, response) && converged; Log.info("GLM Tweedie p and phi estimation: iteration = " + iterCnt + "; p = " + _parms._tweedie_variance_power + "; phi = " + _parms._dispersion_estimated); } else { converged = updateTweedieVariancePower(iterCnt, _state.expandBeta(betaCnd), weights, response) && converged; Log.info("GLM Tweedie variance power estimation: iteration = " + iterCnt + "; p = " + _parms._tweedie_variance_power); } } } if (Math.abs(previousLLH - gram.likelihood) < _parms._objective_epsilon) sameLLH ++; else sameLLH = 0; converged = converged || sameLLH > 10; previousLLH = gram.likelihood; } } catch (NonSPDMatrixException e) { Log.warn(LogMsg("Got Non SPD matrix, stopped.")); warn("Regression with MLE training", "Got Non SPD matrix, stopped."); } } private boolean updateTweedieVariancePower(int iterCnt, double[] betaCnd, Vec weights, Vec response) { final double newtonThreshold = 0.1; // when d < newtonThreshold => use Newton's method final double phi = _parms._init_dispersion_parameter; final double originalP = _parms._tweedie_variance_power; double bestLLH = Double.NEGATIVE_INFINITY, bestP = 1.5, lowerBound, upperBound, p = originalP; int newtonFailures = 0; boolean converged = false; Scope.enter(); DispersionTask.GenPrediction gPred = new DispersionTask.GenPrediction(betaCnd, _model, _dinfo).doAll( 1, Vec.T_NUM, _dinfo._adaptedFrame); Vec mu = Scope.track(gPred.outputFrame(Key.make(), new String[]{"prediction"}, null)).vec(0); // Golden section search for p between 1 and 2 lowerBound = 1 + 1e-16; if (response.min() <= 0) { upperBound = 2 - 1e-16; } else { upperBound = Double.POSITIVE_INFINITY; } // Let's assume the var power will be close to the one in last iteration and hopefully save some time if (iterCnt > 1) { boolean forceInversion = (originalP > 1.95 && originalP < 2.1); TweedieEstimator lo = new TweedieEstimator( Math.max(lowerBound, p - 0.01), phi, forceInversion).compute(mu, response, weights); TweedieEstimator mid = new TweedieEstimator(p, phi, forceInversion).compute(mu, response, weights); TweedieEstimator hi = new TweedieEstimator( Math.min(upperBound, p + 0.01), phi, forceInversion).compute(mu, response, weights); if (mid._loglikelihood > lo._loglikelihood && !Double.isNaN(lo._loglikelihood) && !Double.isNaN(mid._loglikelihood)) lowerBound = lo._p; if (mid._loglikelihood > hi._loglikelihood && !Double.isNaN(mid._loglikelihood) && !Double.isNaN(hi._loglikelihood)) upperBound = hi._p; if (bestLLH < lo._loglikelihood && lo._loglikelihood != 0 && !forceInversion) { bestLLH = lo._loglikelihood; bestP = lo._p; } if (bestLLH < mid._loglikelihood && mid._loglikelihood != 0 && !forceInversion) { bestLLH = mid._loglikelihood; bestP = mid._p; } if (bestLLH < hi._loglikelihood && hi._loglikelihood != 0 && !forceInversion) { bestLLH = hi._loglikelihood; bestP = hi._p; } } if (upperBound == Double.POSITIVE_INFINITY) { // look at p=2 and p=3 (cheap to compute) TweedieEstimator tvp2 = new TweedieEstimator(2, phi).compute(mu, response, weights); TweedieEstimator tvp3 = new TweedieEstimator(3, phi).compute(mu, response, weights); double llhI = tvp3._loglikelihood, llhIm1 = tvp2._loglikelihood; // pI == p_i, pIm1 == p_{i-1}, ... double pI = 3, pIm1 = 2, pIm2 = lowerBound; if (bestLLH < llhI && llhI != 0) { bestLLH = llhI; bestP = pI; } if (bestLLH < llhIm1 && llhIm1 != 0) { bestLLH = llhIm1; bestP = pIm1; } // if p(2) > p(3) => search in [1, 3] // look at 3,6,12,24,48,96, ... until p(x_{i-1}) > p(x_i) => search in [x_{i-2}, x_i] while (llhIm1 < llhI) { pIm2 = pIm1; pIm1 = pI; llhIm1 = llhI; pI *= 2; TweedieEstimator tvp = new TweedieEstimator(pI, phi).compute(mu, response, weights); llhI = tvp._loglikelihood; if (bestLLH < llhI && llhI != 0) { bestLLH = llhI; bestP = pI; } } lowerBound = pIm2; upperBound = pI; } double d = upperBound - lowerBound; p = (upperBound + lowerBound) / 2; for (int i = 0; i < _parms._max_iterations_dispersion; i++) { // likelihood, grad, and hess get unstable for the series method near 2, so I'm not using Newton's method for // this region and instead use "hybrid" (series+inversion) likelihood calculation if (d < newtonThreshold && p >= lowerBound && p <= upperBound && newtonFailures < 3 && !(p >= 1.95 && p <= 2.1) && p < 2) { // Use Newton's method in bracketed space TweedieEstimator tvp = new TweedieEstimator( p, phi, false, true, true, false).compute(mu, response, weights); if (tvp._loglikelihood > bestLLH && tvp._loglikelihood != 0) { bestLLH = tvp._loglikelihood; bestP = p; } else { newtonFailures++; } double delta = tvp._llhDp / tvp._llhDpDp; p = p - delta; if (Math.abs(delta) < _parms._dispersion_epsilon) { converged = true; break; } if (_job.stop_requested()) break; } else { if (d < newtonThreshold) { newtonFailures++; } boolean forceInversion = (lowerBound > 1.95 && upperBound < 2.1); // behaves more stable - less -oo but tends to be lower sometimes than series+inversion hybrid d *= 0.618; // division by golden ratio final double lowerBoundProposal = upperBound - d; final double upperBoundProposal = lowerBound + d; TweedieEstimator lowerEst = new TweedieEstimator( lowerBoundProposal, phi, forceInversion).compute(mu, response, weights); TweedieEstimator upperEst = new TweedieEstimator( upperBoundProposal, phi, forceInversion).compute(mu, response, weights); if (forceInversion) bestLLH = Math.max(lowerEst._loglikelihood, upperEst._loglikelihood); if (lowerEst._loglikelihood >= upperEst._loglikelihood) { upperBound = upperBoundProposal; if (lowerEst._loglikelihood >= bestLLH && lowerEst._loglikelihood != 0) { bestLLH = lowerEst._loglikelihood; bestP = lowerEst._p; } } else { lowerBound = lowerBoundProposal; if (upperEst._loglikelihood >= bestLLH && upperEst._loglikelihood != 0) { bestLLH = upperEst._loglikelihood; bestP = upperEst._p; } } p = (upperBound + lowerBound) / 2; if (Math.abs((upperBoundProposal - lowerBoundProposal)) < _parms._dispersion_epsilon || _job.stop_requested()) { bestP = (upperBoundProposal + lowerBoundProposal) / 2; converged = true; break; } if (!Double.isFinite(upperEst._loglikelihood) && !Double.isFinite(lowerEst._loglikelihood)) { break; } if (upperEst._loglikelihood == 0 && lowerEst._loglikelihood == 0) { break; } } } updateTweedieParms(bestP, phi); Scope.exit(); return Math.abs(originalP - bestP) < _parms._dispersion_epsilon && converged; } private boolean updateTweediePandPhi(int iterCnt, double[] betaCnd, Vec weights, Vec response) { final double originalP = _parms._tweedie_variance_power; final double originalPhi = _parms._dispersion_estimated; final double contractRatio = 0.5; final double pMin = 1 + 1e-10; final double pZeroMax = 2 - 1e-10; final double phiMin = 1e-10; double bestLLH = Double.NEGATIVE_INFINITY, bestP = 1.5, bestPhi = 1, p, phi, centroidP, centroidPhi, diffP, diffPhi, diffInParams, diffInLLH; boolean converged = false; Scope.enter(); DispersionTask.GenPrediction gPred = new DispersionTask.GenPrediction(betaCnd, _model, _dinfo).doAll( 1, Vec.T_NUM, _dinfo._adaptedFrame); Vec mu = Scope.track(gPred.outputFrame(Key.make(), new String[]{"prediction"}, null)).vec(0); try { // Nelder-Mead TweedieEstimator teTmp, teReflected, teExtended, teContractedIn, teContractedOut, teHigh, teMiddle, teLow; if (iterCnt > 1) { // keep the previous estimate as the centroid of the triangle (unless we encounter some constraint) // using the previous estimate as one of the points can lead to getting stuck in local optimum since the likelihood is not perfectly smooth final double radius = 0.05 * _parms._dispersion_learning_rate; // rotate the simplex every iteration by 2/15*PI => every 5th iteration has the same points just with different names // this should make it less likely to get stuck in a local optimum double aP = originalP + Math.cos(Math.PI / 7.5 * iterCnt) * radius, aPhi = originalPhi + Math.sin(Math.PI / 7.5 * iterCnt) * radius, bP = originalP + Math.cos(Math.PI / 7.5 * iterCnt + 2 * Math.PI / 3) * radius, bPhi = originalPhi + Math.sin(Math.PI / 7.5 * iterCnt + 2 * Math.PI / 3) * radius, cP = originalP + Math.cos(Math.PI / 7.5 * iterCnt + 4 * Math.PI / 3) * radius, cPhi = originalPhi + Math.sin(Math.PI / 7.5 * iterCnt + 4 * Math.PI / 3) * radius; final double minPDiff = Math.min(aP - pMin, Math.min(bP - pMin, cP - pMin)), minPhiDiff = Math.min(aPhi - phiMin, Math.min(bPhi - phiMin, cPhi - phiMin)); if (minPDiff < 0) { // shift the simplex to be in the allowed region aP -= minPDiff; bP -= minPDiff; cP -= minPDiff; } if (minPhiDiff < 0) { // shift the simplex to be in the allowed region aPhi -= minPhiDiff; bPhi -= minPhiDiff; cPhi -= minPhiDiff; } if (response.min() <= 0) { // makes no sense to evaluate p >= 2 as it has -Infty log likelihood => shift the simplex final double maxPDiff = Math.max(aP - pZeroMax, Math.max(bP - pZeroMax, cP - pZeroMax)); if (maxPDiff > 0) { // shift the simplex aP -= maxPDiff; bP -= maxPDiff; cP -= maxPDiff; } } // high likelihood (to be sorted later) teHigh = new TweedieEstimator(aP, aPhi).compute(mu, response, weights); // middle likelihood teMiddle = new TweedieEstimator(bP, bPhi).compute(mu, response, weights); // low likelihood teLow = new TweedieEstimator(cP, cPhi).compute(mu, response, weights); } else { // high likelihood (to be sorted later) teHigh = new TweedieEstimator(1.5, 1) .compute(mu, response, weights); // middle likelihood teMiddle = new TweedieEstimator(response.min() > 0 ? 3 : 1.75, 0.5) .compute(mu, response, weights); // low likelihood teLow = new TweedieEstimator(response.min() > 0 ? 2 : 1.2, 2) .compute(mu, response, weights); } // 1. Sort if (teLow._loglikelihood > teHigh._loglikelihood) { teTmp = teLow; teLow = teHigh; teHigh = teTmp; } if (teMiddle._loglikelihood > teHigh._loglikelihood) { teTmp = teMiddle; teMiddle = teHigh; teHigh = teTmp; } if (teLow._loglikelihood > teMiddle._loglikelihood) { teTmp = teLow; teLow = teMiddle; teMiddle = teTmp; } for (int i = 0; i < _parms._max_iterations_dispersion; i++) { if (!(Double.isFinite(teLow._loglikelihood) || Double.isFinite(teHigh._loglikelihood))) { Log.info("Nelder-Mead dispersion (phi) and variance power (p) estimation: beta iter: " + iterCnt + "; Nelder-Mead iter: " + i + "; estimated p: " + bestP + "; estimated phi: " + bestPhi + "; log(Likelihood): " + bestLLH + "; Not finite likelihoods for both high and low point - skipping p and phi estimation for this iteration."); return false; } // 2. Reflect centroidP = (teMiddle._p + teHigh._p) / 2.0; centroidPhi = (teMiddle._phi + teHigh._phi) / 2.0; diffP = centroidP - teLow._p; diffPhi = centroidPhi - teLow._phi; // reflected p and phi p = teLow._p + 2 * diffP; phi = teLow._phi + 2 * diffPhi; p = Math.max(p, pMin); if (response.min() <= 0) p = Math.min(p, pZeroMax); phi = Math.max(phi, phiMin); teReflected = new TweedieEstimator(p, phi).compute(mu, response, weights); if (teReflected._loglikelihood > teMiddle._loglikelihood && teReflected._loglikelihood < teHigh._loglikelihood) { teLow = teReflected; } else if (teReflected._loglikelihood > teHigh._loglikelihood) { // 3. Extend p += diffP; phi += diffPhi; p = Math.max(p, pMin); if (response.min() <= 0) p = Math.min(p, pZeroMax); phi = Math.max(phi, phiMin); if (p == teReflected._p && phi == teReflected._phi) teExtended = teReflected; // if it gets out of bounds don't let it go further else teExtended = new TweedieEstimator(p, phi).compute(mu, response, weights); if (teExtended._loglikelihood > teReflected._loglikelihood) teLow = teExtended; else teLow = teReflected; } else { // 4. Contract if (teReflected._loglikelihood < teMiddle._loglikelihood) { // Contract out p = Math.max(teLow._p + (1 + contractRatio) * diffP, pMin); phi = Math.max(teLow._phi + (1 + contractRatio) * diffPhi, phiMin); teContractedOut = new TweedieEstimator(p, phi).compute(mu, response, weights); // Contract in p = Math.max(teLow._p + (1 - contractRatio) * diffP, pMin); phi = Math.max(teLow._phi + (1 - contractRatio) * diffPhi, phiMin); teContractedIn = new TweedieEstimator(p, phi).compute(mu, response, weights); if (teContractedOut._loglikelihood > teContractedIn._loglikelihood) teTmp = teContractedOut; else teTmp = teContractedIn; } else teTmp = teMiddle; if (teTmp._loglikelihood > teMiddle._loglikelihood) { teLow = teTmp; } else { // shrink p = (teLow._p + teHigh._p) / 2; phi = (teLow._phi + teHigh._phi) / 2; teLow = new TweedieEstimator(p, phi).compute(mu, response, weights); p = (teMiddle._p + teHigh._p) / 2; phi = (teMiddle._phi + teHigh._phi) / 2; teMiddle = new TweedieEstimator(p, phi).compute(mu, response, weights); } } // 1. Sort if (teLow._loglikelihood > teHigh._loglikelihood) { teTmp = teLow; teLow = teHigh; teHigh = teTmp; } if (teMiddle._loglikelihood > teHigh._loglikelihood) { teTmp = teMiddle; teMiddle = teHigh; teHigh = teTmp; } if (teLow._loglikelihood > teMiddle._loglikelihood) { teTmp = teLow; teLow = teMiddle; teMiddle = teTmp; } if (bestLLH < teHigh._loglikelihood) { bestLLH = teHigh._loglikelihood; bestP = teHigh._p; bestPhi = teHigh._phi; } diffInParams = Math.max( Math.max(Math.abs(teHigh._p - teMiddle._p), Math.abs(teHigh._p - teLow._p)), Math.max( Math.max(Math.abs(teLow._p - teMiddle._p), Math.abs(teHigh._phi - teMiddle._phi)), Math.max(Math.abs(teHigh._phi - teLow._phi), Math.abs(teLow._phi - teMiddle._phi)) ) ); diffInLLH = Math.abs((teMiddle._loglikelihood - teLow._loglikelihood) / (teHigh._loglikelihood + _parms._dispersion_epsilon)); Log.info("Nelder-Mead dispersion (phi) and variance power (p) estimation: beta iter: " + iterCnt + "; Nelder-Mead iter: " + i + "; estimated p: " + bestP + "; estimated phi: " + bestPhi + "; log(Likelihood): " + bestLLH + "; diff in params: " + diffInParams + "; diff in LLH: " + diffInLLH + "; dispersion_epsilon: " + _parms._dispersion_epsilon); converged = diffInParams < _parms._dispersion_epsilon && diffInLLH < _parms._dispersion_epsilon; if (converged) break; } updateTweedieParms(bestP, bestPhi); return Math.abs(originalP - bestP) < _parms._dispersion_epsilon && Math.abs(originalPhi - bestPhi) < _parms._dispersion_epsilon && converged; } finally { Scope.exit(); } } private void updateTweedieParms(double p, double dispersion) { if (!Double.isFinite(p)) return; _parms.updateTweedieParams(p, _parms._tweedie_link_power, dispersion); _model._parms.updateTweedieParams(p, _model._parms._tweedie_link_power, dispersion); if (_state._glmw != null) { _state._glmw = new GLMWeightsFun(_parms); } } private void updateTheta(double theta){ if (_state._glmw != null) { _state._glmw._theta = theta; _state._glmw._invTheta = 1./theta; } _parms._theta = theta; _parms._invTheta =1./theta; _model._parms._theta = theta; _model._parms._invTheta = 1./theta; } private boolean updateNegativeBinomialDispersion(int iterCnt, double[] betaCnd, double previousNLLH, Vec weights, Vec response) { double delta; double theta; boolean converged = false; try { Scope.enter(); DispersionTask.GenPrediction gPred = new DispersionTask.GenPrediction(betaCnd, _model, _dinfo).doAll( 1, Vec.T_NUM, _dinfo._adaptedFrame); Vec mu = Scope.track(gPred.outputFrame(Key.make(), new String[]{"prediction"}, null)).vec(0); if (iterCnt == 1) { theta = estimateNegBinomialDispersionMomentMethod(_model, betaCnd, _dinfo, weights, response, mu); } else { theta = _parms._theta; NegativeBinomialGradientAndHessian nbGrad = new NegativeBinomialGradientAndHessian(theta).doAll(mu, response, weights); delta = _parms._dispersion_learning_rate * nbGrad._grad / nbGrad._hess; double bestLLH = Math.max(-previousNLLH, nbGrad._llh); double bestTheta = theta; delta = Double.isFinite(delta) ? delta : 1; // NaN can occur in extreme datasets so try to get out of this neighborhood just by linesearch // Golden section search for the optimal size of delta // Set lowerbound to -10 or lowest value that will keep theta > 0 which ever is bigger // Negative value here helps with datasets where we use to diverge, I'm not sure yet if it's caused by some // numerical issues or if the likelihood can get multimodal for some cases. double lowerBound = (theta + 10 * delta < 0) ? (1 - 1e-15) * theta / delta : -10; double upperBound = (theta - 1e3 * delta < 0) ? (1 - 1e-15) * theta / delta : 1e3; double d = upperBound - lowerBound; for (int i = 0; i < _parms._max_iterations_dispersion; i++) { d *= 0.618; // division by golden ratio final double lowerBoundProposal = upperBound - d; final double upperBoundProposal = lowerBound + d; NegativeBinomialGradientAndHessian nbLower = new NegativeBinomialGradientAndHessian(theta - lowerBoundProposal * delta).doAll(mu, response, weights); NegativeBinomialGradientAndHessian nbUpper = new NegativeBinomialGradientAndHessian(theta - upperBoundProposal * delta).doAll(mu, response, weights); if (nbLower._llh >= nbUpper._llh) { upperBound = upperBoundProposal; if (nbLower._llh > bestLLH) { bestLLH = nbLower._llh; bestTheta = nbLower._theta; } } else { lowerBound = lowerBoundProposal; if (nbUpper._llh > bestLLH) { bestLLH = nbUpper._llh; bestTheta = nbUpper._theta; } } if (Math.abs((upperBoundProposal - lowerBoundProposal) * Math.max(1, delta / Math.max(_parms._theta, bestTheta))) < _parms._dispersion_epsilon || _job.stop_requested()) { break; } } theta = bestTheta; converged = (nbGrad._llh + previousNLLH) <= _parms._objective_epsilon || !Double.isFinite(theta); } delta = _parms._theta - theta; converged = converged && (Math.abs(delta) / Math.max(_parms._theta, theta) < _parms._dispersion_epsilon); updateTheta(theta); return converged; } finally { Scope.exit(); } } private void fitLBFGS() { double[] beta = _state.beta(); final double l1pen = _state.l1pen(); GLMGradientSolver gslvr = _state.gslvr(); GLMWeightsFun glmw = new GLMWeightsFun(_parms); if (beta == null && (multinomial.equals(_parms._family) || ordinal.equals(_parms._family))) { beta = MemoryManager.malloc8d((_state.activeData().fullN() + 1) * _nclass); int P = _state.activeData().fullN() + 1; if (_parms._intercept) for (int i = 0; i < _nclass; ++i) beta[i * P + P - 1] = glmw.link(_state._ymu[i]); } if (beta == null) { beta = MemoryManager.malloc8d(_state.activeData().fullN() + 1); if (_parms._intercept) beta[beta.length - 1] = glmw.link(_state._ymu[0]); } L_BFGS lbfgs = new L_BFGS().setObjEps(_parms._objective_epsilon).setGradEps(_parms._gradient_epsilon).setMaxIter(_parms._max_iterations); assert beta.length == _state.ginfo()._gradient.length; int P = _dinfo.fullN(); if (l1pen > 0 || _state.activeBC().hasBounds()) { double[] nullBeta = MemoryManager.malloc8d(beta.length); // compute ginfo at null beta to get estimate for rho if (_dinfo._intercept) { if (multinomial.equals(_parms._family)) { for (int c = 0; c < _nclass; c++) nullBeta[(c + 1) * (P + 1) - 1] = glmw.link(_state._ymu[c]); } else nullBeta[nullBeta.length - 1] = glmw.link(_state._ymu[0]); } GradientInfo ginfo = gslvr.getGradient(nullBeta); double[] direction = ArrayUtils.mult(ginfo._gradient.clone(), -1); double t = 1; if (l1pen > 0) { MoreThuente mt = new MoreThuente(gslvr, nullBeta); mt.evaluate(direction); t = mt.step(); } double[] rho = MemoryManager.malloc8d(beta.length); double r = _state.activeBC().hasBounds() ? 1 : .1; BetaConstraint bc = _state.activeBC(); // compute rhos for (int i = 0; i < rho.length - 1; ++i) rho[i] = r * ADMM.L1Solver.estimateRho(nullBeta[i] + t * direction[i], l1pen, bc._betaLB == null ? Double.NEGATIVE_INFINITY : bc._betaLB[i], bc._betaUB == null ? Double.POSITIVE_INFINITY : bc._betaUB[i]); for (int ii = P; ii < rho.length; ii += P + 1) rho[ii] = r * ADMM.L1Solver.estimateRho(nullBeta[ii] + t * direction[ii], 0, bc._betaLB == null ? Double.NEGATIVE_INFINITY : bc._betaLB[ii], bc._betaUB == null ? Double.POSITIVE_INFINITY : bc._betaUB[ii]); final double[] objvals = new double[2]; objvals[1] = Double.POSITIVE_INFINITY; double reltol = L1Solver.DEFAULT_RELTOL; double abstol = L1Solver.DEFAULT_ABSTOL; double ADMM_gradEps = 1e-3; ProximalGradientSolver innerSolver = new ProximalGradientSolver(gslvr, beta, rho, _parms._objective_epsilon * 1e-1, _parms._gradient_epsilon, _state.ginfo(), this); // new ProgressMonitor() { // @Override // public boolean progress(double[] betaDiff, GradientInfo ginfo) { // return ++_state._iter < _parms._max_iterations; // } // }); ADMM.L1Solver l1Solver = new ADMM.L1Solver(ADMM_gradEps, 250, reltol, abstol, _state._u); l1Solver._pm = this; l1Solver.solve(innerSolver, beta, l1pen, true, _state.activeBC()._betaLB, _state.activeBC()._betaUB); _state._u = l1Solver._u; _state.updateState(beta, gslvr.getGradient(beta)); } else { if (!_parms._lambda_search && _state._iter == 0) updateProgress(false); Result r = lbfgs.solve(gslvr, beta, _state.ginfo(), new ProgressMonitor() { @Override public boolean progress(double[] beta, GradientInfo ginfo) { if (_state._iter < 4 || ((_state._iter & 3) == 0)) Log.info(LogMsg("LBFGS, gradient norm = " + ArrayUtils.linfnorm(ginfo._gradient, false))); return GLMDriver.this.progress(beta, ginfo); } }); Log.info(LogMsg(r.toString())); _state.updateState(r.coefs, (GLMGradientInfo) r.ginfo); } } private void fitCOD() { double[] beta = _state.beta(); int p = _state.activeData().fullN() + 1; double wsum, wsumu; // intercept denum double[] denums; boolean skipFirstLevel = !_state.activeData()._useAllFactorLevels; double[] betaold = beta.clone(); double objold = _state.objective(); int iter2 = 0; // total cd iters // get reweighted least squares vectors Vec[] newVecs = _state.activeData()._adaptedFrame.anyVec().makeZeros(3); Vec w = newVecs[0]; // fixed before each CD loop Vec z = newVecs[1]; // fixed before each CD loop Vec zTilda = newVecs[2]; // will be updated at every variable within CD loop long startTimeTotalNaive = System.currentTimeMillis(); // generate new IRLS iteration while (iter2++ < 30) { Frame fr = new Frame(_state.activeData()._adaptedFrame); fr.add("w", w); // fr has all data fr.add("z", z); fr.add("zTilda", zTilda); GLMGenerateWeightsTask gt = new GLMGenerateWeightsTask(_job._key, _state.activeData(), _parms, beta).doAll(fr); double objVal = objVal(gt._likelihood, gt._betaw, _state.lambda()); denums = gt.denums; wsum = gt.wsum; wsumu = gt.wsumu; int iter1 = 0; // coordinate descent loop while (iter1++ < 100) { Frame fr2 = new Frame(); fr2.add("w", w); fr2.add("z", z); fr2.add("zTilda", zTilda); // original x%*%beta if first iteration for (int i = 0; i < _state.activeData()._cats; i++) { Frame fr3 = new Frame(fr2); int level_num = _state.activeData()._catOffsets[i + 1] - _state.activeData()._catOffsets[i]; int prev_level_num = 0; fr3.add("xj", _state.activeData()._adaptedFrame.vec(i)); boolean intercept = (i == 0); // prev var is intercept if (!intercept) { prev_level_num = _state.activeData()._catOffsets[i] - _state.activeData()._catOffsets[i - 1]; fr3.add("xjm1", _state.activeData()._adaptedFrame.vec(i - 1)); // add previous categorical variable } int start_old = _state.activeData()._catOffsets[i]; GLMCoordinateDescentTaskSeqNaive stupdate; if (intercept) stupdate = new GLMCoordinateDescentTaskSeqNaive(intercept, false, 4, Arrays.copyOfRange(betaold, start_old, start_old + level_num), new double[]{beta[p - 1]}, _state.activeData()._catLvls[i], null, null, null, null, null, skipFirstLevel).doAll(fr3); else stupdate = new GLMCoordinateDescentTaskSeqNaive(intercept, false, 1, Arrays.copyOfRange(betaold, start_old, start_old + level_num), Arrays.copyOfRange(beta, _state.activeData()._catOffsets[i - 1], _state.activeData()._catOffsets[i]), _state.activeData()._catLvls[i], _state.activeData()._catLvls[i - 1], null, null, null, null, skipFirstLevel).doAll(fr3); for (int j = 0; j < level_num; ++j) beta[_state.activeData()._catOffsets[i] + j] = ADMM.shrinkage(stupdate._temp[j] / wsumu, _state.lambda() * _parms._alpha[0]) / (denums[_state.activeData()._catOffsets[i] + j] / wsumu + _state.lambda() * (1 - _parms._alpha[0])); } int cat_num = 2; // if intercept, or not intercept but not first numeric, then both are numeric . for (int i = 0; i < _state.activeData()._nums; ++i) { GLMCoordinateDescentTaskSeqNaive stupdate; Frame fr3 = new Frame(fr2); fr3.add("xj", _state.activeData()._adaptedFrame.vec(i + _state.activeData()._cats)); // add current variable col boolean intercept = (i == 0 && _state.activeData().numStart() == 0); // if true then all numeric case and doing beta_1 double[] meannew = null, meanold = null, varnew = null, varold = null; if (i > 0 || intercept) {// previous var is a numeric var cat_num = 3; if (!intercept) fr3.add("xjm1", _state.activeData()._adaptedFrame.vec(i - 1 + _state.activeData()._cats)); // add previous one if not doing a beta_1 update, ow just pass it the intercept term if (_state.activeData()._normMul != null) { varold = new double[]{_state.activeData()._normMul[i]}; meanold = new double[]{_state.activeData()._normSub[i]}; if (i != 0) { varnew = new double[]{_state.activeData()._normMul[i - 1]}; meannew = new double[]{_state.activeData()._normSub[i - 1]}; } } stupdate = new GLMCoordinateDescentTaskSeqNaive(intercept, false, cat_num, new double[]{betaold[_state.activeData().numStart() + i]}, new double[]{beta[(_state.activeData().numStart() + i - 1 + p) % p]}, null, null, varold, meanold, varnew, meannew, skipFirstLevel).doAll(fr3); beta[i + _state.activeData().numStart()] = ADMM.shrinkage(stupdate._temp[0] / wsumu, _state.lambda() * _parms._alpha[0]) / (denums[i + _state.activeData().numStart()] / wsumu + _state.lambda() * (1 - _parms._alpha[0])); } else if (i == 0 && !intercept) { // previous one is the last categorical variable int prev_level_num = _state.activeData().numStart() - _state.activeData()._catOffsets[_state.activeData()._cats - 1]; fr3.add("xjm1", _state.activeData()._adaptedFrame.vec(_state.activeData()._cats - 1)); // add previous categorical variable if (_state.activeData()._normMul != null) { varold = new double[]{_state.activeData()._normMul[i]}; meanold = new double[]{_state.activeData()._normSub[i]}; } stupdate = new GLMCoordinateDescentTaskSeqNaive(intercept, false, cat_num, new double[]{betaold[_state.activeData().numStart()]}, Arrays.copyOfRange(beta, _state.activeData()._catOffsets[_state.activeData()._cats - 1], _state.activeData().numStart()), null, _state.activeData()._catLvls[_state.activeData()._cats - 1], varold, meanold, null, null, skipFirstLevel).doAll(fr3); beta[_state.activeData().numStart()] = ADMM.shrinkage(stupdate._temp[0] / wsumu, _state.lambda() * _parms._alpha[0]) / (denums[_state.activeData().numStart()] / wsumu + _state.lambda() * (1 - _parms._alpha[0])); } } if (_state.activeData()._nums + _state.activeData()._cats > 0) { // intercept update: preceded by a categorical or numeric variable Frame fr3 = new Frame(fr2); fr3.add("xjm1", _state.activeData()._adaptedFrame.vec(_state.activeData()._cats + _state.activeData()._nums - 1)); // add last variable updated in cycle to the frame GLMCoordinateDescentTaskSeqNaive iupdate; if (_state.activeData()._adaptedFrame.vec(_state.activeData()._cats + _state.activeData()._nums - 1).isCategorical()) { // only categorical vars cat_num = 2; iupdate = new GLMCoordinateDescentTaskSeqNaive(false, true, cat_num, new double[]{betaold[betaold.length - 1]}, Arrays.copyOfRange(beta, _state.activeData()._catOffsets[_state.activeData()._cats - 1], _state.activeData()._catOffsets[_state.activeData()._cats]), null, _state.activeData()._catLvls[_state.activeData()._cats - 1], null, null, null, null, skipFirstLevel).doAll(fr3); } else { // last variable is numeric cat_num = 3; double[] meannew = null, varnew = null; if (_state.activeData()._normMul != null) { varnew = new double[]{_state.activeData()._normMul[_state.activeData()._normMul.length - 1]}; meannew = new double[]{_state.activeData()._normSub[_state.activeData()._normSub.length - 1]}; } iupdate = new GLMCoordinateDescentTaskSeqNaive(false, true, cat_num, new double[]{betaold[betaold.length - 1]}, new double[]{beta[beta.length - 2]}, null, null, null, null, varnew, meannew, skipFirstLevel).doAll(fr3); } if (_parms._intercept) beta[beta.length - 1] = iupdate._temp[0] / wsum; } double maxdiff = ArrayUtils.linfnorm(ArrayUtils.subtract(beta, betaold), false); // false to keep the intercept System.arraycopy(beta, 0, betaold, 0, beta.length); if (maxdiff < _parms._beta_epsilon) break; } double percdiff = Math.abs((objold - objVal) / objold); if (percdiff < _parms._objective_epsilon & iter2 > 1) break; objold = objVal; Log.debug("iter1 = " + iter1); } Log.debug("iter2 = " + iter2); long endTimeTotalNaive = System.currentTimeMillis(); long durationTotalNaive = (endTimeTotalNaive - startTimeTotalNaive) / 1000; Log.info("Time to run Naive Coordinate Descent " + durationTotalNaive); _state._iter = iter2; for (Vec v : newVecs) v.remove(); _state.updateState(beta, objold); } private void fitModel() { Solver solver = (_parms._solver == Solver.AUTO) ? defaultSolver() : _parms._solver; switch (solver) { case COORDINATE_DESCENT: // fall through to IRLSM case IRLSM: if (multinomial.equals(_parms._family)) fitIRLSM_multinomial(solver); else if (ordinal.equals(_parms._family)) fitIRLSM_ordinal_default(solver); else if (gaussian.equals(_parms._family) && Link.identity.equals(_parms._link) && _parms._linear_constraints == null) fitLSM(solver); // not constrained GLM else { if (_parms._dispersion_parameter_method.equals(ml)) fitIRLSMML(solver); else if (_parms._linear_constraints == null) fitIRLSM(solver); else fitIRLSMCS(); // constrained GLM IRLSM } break; case GRADIENT_DESCENT_LH: case GRADIENT_DESCENT_SQERR: if (ordinal.equals(_parms._family)) fitIRLSM_ordinal_default(solver); break; case L_BFGS: fitLBFGS(); break; case COORDINATE_DESCENT_NAIVE: fitCOD(); break; default: throw H2O.unimpl(); } // Make sure if we set dispersion for Tweedie p and phi estimation even without calculating p values if (tweedie.equals(_parms._family) && !_parms._fix_dispersion_parameter && !_parms._fix_tweedie_variance_power) { _model.setDispersion(_parms._dispersion_estimated, true); } if (_parms._compute_p_values) { // compute p-values, standard error, estimate dispersion parameters... double se = _parms._init_dispersion_parameter; boolean seEst = false; double[] beta = _state.beta(); // standardized if _parms._standardize=true, original otherwise Log.info("estimating dispersion parameter using method: " + _parms._dispersion_parameter_method); if (_parms._family != binomial && _parms._family != Family.poisson && !_parms._fix_dispersion_parameter) { seEst = true; if (pearson.equals(_parms._dispersion_parameter_method) || deviance.equals(_parms._dispersion_parameter_method)) { if (_parms._useDispersion1) { se = 1; } else { ComputeSEorDEVIANCETsk ct = new ComputeSEorDEVIANCETsk(null, _state.activeData(), _job._key, beta, _parms, _model).doAll(_state.activeData()._adaptedFrame); se = ct._sumsqe / (_nobs - 1 - _state.activeData().fullN()); // dispersion parameter estimate } } else if (ml.equals(_parms._dispersion_parameter_method)) { if (gamma.equals(_parms._family)) { ComputeGammaMLSETsk mlCT = new ComputeGammaMLSETsk(null, _state.activeData(), _job._key, beta, _parms).doAll(_state.activeData()._adaptedFrame); double oneOverSe = estimateGammaMLSE(mlCT, 1.0 / se, beta, _parms, _state, _job, _model); se = 1.0 / oneOverSe; } else if (negativebinomial.equals(_parms._family)) { se = _parms._theta; } else if (_tweedieDispersionOnly) { se = estimateTweedieDispersionOnly(_parms, _model, _job, beta, _state.activeData()); if (!Double.isFinite(se)) Log.warn("Tweedie dispersion parameter estimation diverged. "+ "Estimation of both dispersion and variance power might have better luck."); } } // save estimation to the _params, so it is available for params.likelihood computation _parms._dispersion_estimated = se; } double[] zvalues = MemoryManager.malloc8d(_state.activeData().fullN() + 1); // double[][] inv = cholInv(); // from non-standardized predictors Cholesky chol = _chol; DataInfo activeData = _state.activeData(); if (_parms._standardize) { // compute non-standardized t(X)%*%W%*%X double[] beta_nostd = activeData.denormalizeBeta(beta); DataInfo.TransformType transform = activeData._predictor_transform; activeData.setPredictorTransform(DataInfo.TransformType.NONE); _gramInfluence = new GLMIterationTask(_job._key, activeData, new GLMWeightsFun(_parms), beta_nostd).doAll(activeData._adaptedFrame); activeData.setPredictorTransform(transform); // just in case, restore the transform beta = beta_nostd; } else { // just rebuild gram with latest GLM coefficients _gramInfluence = new GLMIterationTask(_job._key, activeData, new GLMWeightsFun(_parms), beta).doAll(activeData._adaptedFrame); } Gram g = _gramInfluence._gram; g.mul(_parms._obj_reg); chol = g.cholesky(null); double[][] inv = chol.getInv(); if (_parms._influence != null) { _cholInvInfluence = new double[inv.length][inv.length]; copy2DArray(inv, _cholInvInfluence); ArrayUtils.mult(_cholInvInfluence, _parms._obj_reg); g.mul(1.0/_parms._obj_reg); } ArrayUtils.mult(inv, _parms._obj_reg * se); _vcov = inv; for (int i = 0; i < zvalues.length; ++i) zvalues[i] = beta[i] / Math.sqrt(inv[i][i]); // set z-values for the final model (might be overwritten later) _model.setZValues(expandVec(zvalues, _state.activeData()._activeCols, _dinfo.fullN() + 1, Double.NaN), se, seEst); // save z-values to assign to the new submodel _state.setZValues(expandVec(zvalues, _state.activeData()._activeCols, _dinfo.fullN() + 1, Double.NaN), seEst); } } private long _lastScore = System.currentTimeMillis(); private long timeSinceLastScoring() { return System.currentTimeMillis() - _lastScore; } private void scoreAndUpdateModel() { // compute full validation on train and test Log.info(LogMsg("Scoring after " + timeSinceLastScoring() + "ms")); long t1 = System.currentTimeMillis(); Frame train = DKV.<Frame>getGet(_parms._train); // need to keep this frame to get scoring metrics back _model.score(_parms.train(), null, CFuncRef.from(_parms._custom_metric_func)).delete(); scorePostProcessing(train, t1); } private void scorePostProcessing(Frame train, long t1) { ModelMetrics mtrain = ModelMetrics.getFromDKV(_model, train); // updated by model.scoreAndUpdateModel long t2 = System.currentTimeMillis(); if (!(mtrain == null)) { _model._output._training_metrics = mtrain; _model._output._training_time_ms = t2 - _model._output._start_time; // remember training time ScoreKeeper trainScore = new ScoreKeeper(Double.NaN); trainScore.fillFrom(mtrain); Log.info(LogMsg(mtrain.toString())); } else { Log.info(LogMsg("ModelMetrics mtrain is null")); } Log.info(LogMsg("Training metrics computed in " + (t2 - t1) + "ms")); if (_valid != null) { Frame valid = DKV.<Frame>getGet(_parms._valid); _model.score(_parms.valid(), null, CFuncRef.from(_parms._custom_metric_func)).delete(); _model._output._validation_metrics = ModelMetrics.getFromDKV(_model, valid); //updated by model.scoreAndUpdateModel ScoreKeeper validScore = new ScoreKeeper(Double.NaN); validScore.fillFrom(_model._output._validation_metrics); } _model.addScoringInfo(_parms, nclasses(), t2, _state._iter); // add to scoringInfo for early stopping if (_parms._generate_scoring_history) { // update scoring history with deviance train and valid if available double xval_deviance = Double.NaN; double xval_se = Double.NaN; if (_xval_deviances_generate_SH != null) { int xval_iter_index = ArrayUtils.find(_xval_iters_generate_SH, _state._iter); if (xval_iter_index > -1) { xval_deviance = _xval_deviances_generate_SH[xval_iter_index]; xval_se = _xval_sd_generate_SH[xval_iter_index]; } } if (!(mtrain == null) && !(_valid == null)) { if (_parms._lambda_search) { double trainDev = _state.deviance() / mtrain._nobs; double validDev = ((GLMMetrics) _model._output._validation_metrics).residual_deviance() / _model._output._validation_metrics._nobs; _lambdaSearchScoringHistory.addLambdaScore(_state._iter, ArrayUtils.countNonzeros(_state.beta()), _state.lambda(), trainDev, validDev, xval_deviance, xval_se, _state.alpha()); } else { _scoringHistory.addIterationScore(!(mtrain == null), !(_valid == null), _state._iter, _state.likelihood(), _state.objective(), _state.deviance(), ((GLMMetrics) _model._output._validation_metrics).residual_deviance(), mtrain._nobs, _model._output._validation_metrics._nobs, _state.lambda(), _state.alpha()); } } else if (!(mtrain == null)) { // only doing training deviance if (_parms._lambda_search) { _lambdaSearchScoringHistory.addLambdaScore(_state._iter, ArrayUtils.countNonzeros(_state.beta()), _state.lambda(), _state.deviance() / mtrain._nobs, Double.NaN, xval_deviance, xval_se, _state.alpha()); } else { _scoringHistory.addIterationScore(!(mtrain == null), !(_valid == null), _state._iter, _state.likelihood(), _state.objective(), _state.deviance(), Double.NaN, mtrain._nobs, 1, _state.lambda(), _state.alpha()); } } _job.update(_workPerIteration, _state.toString()); } if (_parms._lambda_search) _model._output._scoring_history = _lambdaSearchScoringHistory.to2dTable(); else _model._output._scoring_history = _scoringHistory.to2dTable(_parms, _xval_deviances_generate_SH, _xval_sd_generate_SH); _model.update(_job._key); _model.generateSummary(_parms._train, _state._iter); _lastScore = System.currentTimeMillis(); long scoringTime = System.currentTimeMillis() - t1; _scoringInterval = Math.max(_scoringInterval, 20 * scoringTime); // at most 5% overhead for scoring } private void coldStart(double[] devHistoryTrain, double[] devHistoryTest) { _state.setBeta(_betaStart); // reset beta to original starting condition _state.setIter(0); _state.setLambdaSimple(0.0); // reset to 0 before new lambda is assigned _state._currGram = null; _state.setBetaDiff(_betaDiffStart); _state.setGradientErr(0.0); _state.setGinfo(_ginfoStart); _state.setLikelihood(_ginfoStart._likelihood); _state.setAllIn(false); _state.setGslvrNull(); _state.setActiveDataMultinomialNull(); _state.setActiveDataNull(); int histLen = devHistoryTrain.length; for (int ind = 0; ind < histLen; ind++) { devHistoryTrain[ind] = 0; devHistoryTest[ind] = 0; } } private void addGLMVec(Vec[] vecs, boolean deleteFirst, DataInfo dinfo) { String[] vecNames; if (ordinal.equals(_parms._family)) vecNames = new String[]{"__glm_ExpC","__glm_ExpNPC"}; else vecNames = new String[]{"__glm_sumExp", "__glm_maxRow"}; if (deleteFirst) { dinfo._adaptedFrame.remove(vecNames); dinfo._responses -= vecNames.length; } dinfo.addResponse(vecNames, vecs); } protected Submodel computeSubmodel(int i, double lambda, double nullDevTrain, double nullDevValid) { Submodel sm; boolean continueFromPreviousSubmodel = _parms.hasCheckpoint() && (_parms._alpha.length > 1 || _parms._lambda.length > 1) && _checkPointFirstIter && !Family.gaussian.equals(_parms._family); if (lambda >= _lmax && _state.l1pen() > 0) { if (continueFromPreviousSubmodel) sm = _model._output._submodels[i]; else _model.addSubmodel(i, sm = new Submodel(lambda, _state.alpha(), getNullBeta(), _state._iter, nullDevTrain, nullDevValid, _betaInfo.totalBetaLength(), null, false)); } else { if (continueFromPreviousSubmodel) { sm = _model._output._submodels[i]; } else { sm = new Submodel(lambda, _state.alpha(), _state.beta(), _state._iter, -1, -1, _betaInfo.totalBetaLength(), _state.zValues(), _state.dispersionEstimated());// restart from last run _model.addSubmodel(i, sm); } if (_insideCVCheck && _parms._generate_scoring_history && !Solver.L_BFGS.equals(_parms._solver) && (multinomial.equals(_parms._family) || ordinal.equals(_parms._family))) { boolean nullVecsFound = (ordinal.equals(_parms._family) && (DKV.get(_dinfo._adaptedFrame.vec("__glm_ExpC")._key)==null)) || (multinomial.equals(_parms._family) && DKV.get(_dinfo._adaptedFrame.vec("__glm_sumExp")._key)==null); if (nullVecsFound) { // check for deleted vectors and add them back DataInfo[] dataInfos = _state._activeDataMultinomial; if (dataInfos != null) { for (int cInd = 0; cInd < _nclass; cInd++) { Vec[] vecs = genGLMVectors(dataInfos[cInd], _state.beta()); addGLMVec(vecs, true, dataInfos[cInd]); } } Vec[] vecs = genGLMVectors(_dinfo, _state.beta()); addGLMVec(vecs, true, _dinfo); } } if (!_checkPointFirstIter) _state.setLambda(lambda); checkMemoryFootPrint(_state.activeData()); do { if (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) for (int c = 0; c < _nclass; ++c) Log.info(LogMsg("Class " + c + " got " + _state.activeDataMultinomial(c).fullN() + " active columns out of " + _state._dinfo.fullN() + " total")); else Log.info(LogMsg("Got " + _state.activeData().fullN() + " active columns out of " + _state._dinfo.fullN() + " total")); fitModel(); } while (!_state.checkKKTs()); Log.info(LogMsg("solution has " + ArrayUtils.countNonzeros(_state.beta()) + " nonzeros")); double trainDev = _state.deviance() / _nobs; double validDev = Double.NaN; // calculated from validation dataset below if present if (_validDinfo != null) { // calculate deviance for validation set and save as testDev if (ordinal.equals(_parms._family)) validDev = new GLMResDevTaskOrdinal(_job._key, _validDinfo, _dinfo.denormalizeBeta(_state.beta()), _nclass).doAll(_validDinfo._adaptedFrame).avgDev(); else validDev = multinomial.equals(_parms._family) ? new GLMResDevTaskMultinomial(_job._key, _validDinfo, _dinfo.denormalizeBeta(_state.beta()), _nclass).doAll(_validDinfo._adaptedFrame).avgDev() : new GLMResDevTask(_job._key, _validDinfo, _parms, _dinfo.denormalizeBeta(_state.beta())).doAll(_validDinfo._adaptedFrame).avgDev(); } Log.info(LogMsg("train deviance = " + trainDev + ", valid deviance = " + validDev)); double xvalDev = ((_xval_deviances == null) || (_xval_deviances.length <= i)) ? -1 : _xval_deviances[i]; double xvalDevSE = ((_xval_sd == null) || (_xval_deviances.length <= i)) ? -1 : _xval_sd[i]; if (_parms._lambda_search) _lambdaSearchScoringHistory.addLambdaScore(_state._iter, ArrayUtils.countNonzeros(_state.beta()), _state.lambda(), trainDev, validDev, xvalDev, xvalDevSE, _state.alpha()); // add to scoring history _model.updateSubmodel(i, sm = new Submodel(_state.lambda(), _state.alpha(), _state.beta(), _state._iter, trainDev, validDev, _betaInfo.totalBetaLength(), _state.zValues(), _state.dispersionEstimated())); } return sm; } @Override public void computeImpl() { try { doCompute(); } finally { final List<Key> keep = new ArrayList<>(); if ((!_doInit || !_cvRuns) && _betaConstraints != null) { DKV.remove(_betaConstraints._key); _betaConstraints.delete(); } if ((!_doInit || !_cvRuns) && _parms._linear_constraints != null) { keepFrameKeys(keep, _parms._linear_constraints); } if (_model != null) { if (_parms._influence != null) { keepFrameKeys(keep, _model._output._regression_influence_diagnostics); if (_parms._keepBetaDiffVar) keepFrameKeys(keep, _model._output._betadiff_var); Scope.untrack(keep.toArray(new Key[keep.size()])); } _model.unlock(_job); } } } private Vec[] genGLMVectors(DataInfo dinfo, double[] nb) { double maxRow = ArrayUtils.maxValue(nb); double sumExp = 0; if (_parms._family == multinomial) { int P = dinfo.fullN(); int N = dinfo.fullN() + 1; for (int i = 1; i < _nclass; ++i) sumExp += Math.exp(nb[i * N + P] - maxRow); } Vec[] vecs = dinfo._adaptedFrame.anyVec().makeDoubles(2, new double[]{sumExp, maxRow}); if (_parms._lambda_search && _parms._is_cv_model) { Scope.untrack(vecs[0]._key, vecs[1]._key); removeLater(vecs[0]._key, vecs[1]._key); } return vecs; } private void doCompute() { double nullDevTrain = Double.NaN; double nullDevValid = Double.NaN; if (_doInit) init(true); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(GLM.this); _model._output._start_time = System.currentTimeMillis(); //quickfix to align output duration with other models if (_parms._expose_constraints && _parms._linear_constraints != null) { _model._output._equalityConstraintsBeta = _state._equalityConstraintsBeta; _model._output._lessThanEqualToConstraintsBeta = _state._lessThanEqualToConstraintsBeta; _model._output._equalityConstraintsLinear = _state._equalityConstraintsLinear; _model._output._lessThanEqualToConstraintsLinear = _state._lessThanEqualToConstraintsLinear; _model._output._constraintCoefficientNames = _state._csGLMState._constraintNames; _model._output._initConstraintMatrix = _state._csGLMState._initCSMatrix; } if (_parms._max_iterations == 0) { return; } else { if (_parms._lambda_search) { if (ordinal.equals(_parms._family)) nullDevTrain = new GLMResDevTaskOrdinal(_job._key, _state._dinfo, getNullBeta(), _nclass).doAll(_state._dinfo._adaptedFrame).avgDev(); else nullDevTrain = multinomial.equals(_parms._family) ? new GLMResDevTaskMultinomial(_job._key, _state._dinfo, getNullBeta(), _nclass).doAll(_state._dinfo._adaptedFrame).avgDev() : new GLMResDevTask(_job._key, _state._dinfo, _parms, getNullBeta()).doAll(_state._dinfo._adaptedFrame).avgDev(); if (_validDinfo != null) { if (ordinal.equals(_parms._family)) nullDevValid = new GLMResDevTaskOrdinal(_job._key, _validDinfo, getNullBeta(), _nclass).doAll(_validDinfo._adaptedFrame).avgDev(); else nullDevValid = multinomial.equals(_parms._family) ? new GLMResDevTaskMultinomial(_job._key, _validDinfo, getNullBeta(), _nclass).doAll(_validDinfo._adaptedFrame).avgDev() : new GLMResDevTask(_job._key, _validDinfo, _parms, getNullBeta()).doAll(_validDinfo._adaptedFrame).avgDev(); } _workPerIteration = WORK_TOTAL / _parms._nlambdas; } else _workPerIteration = 1 + (WORK_TOTAL / _parms._max_iterations); if (!Solver.L_BFGS.equals(_parms._solver) && (multinomial.equals(_parms._family) || ordinal.equals(_parms._family))) { Vec[] vecs = genGLMVectors(_dinfo, getNullBeta()); addGLMVec(vecs, false, _dinfo); } _ginfoStart = GLMUtils.copyGInfo(_state.ginfo()); _betaDiffStart = _state.getBetaDiff(); double oldDevTrain = nullDevTrain; double oldDevTest = nullDevValid; double[] devHistoryTrain = new double[5]; double[] devHistoryTest = new double[5]; if (_parms.hasCheckpoint()) { // restore _state parameters _state.copyCheckModel2State(_model, _gamColIndices); if (_model._output._submodels.length == 1) _model._output._submodels = null; // null out submodel only for single alpha/lambda values } if (!_parms._lambda_search) updateProgress(false); // alpha, lambda search loop int alphaStart = 0; int lambdaStart = 0; int submodelCount = 0; if (_parms.hasCheckpoint() && _model._output._submodels != null) { // multiple alpha/lambdas or lambda search submodelCount = Family.gaussian.equals(_parms._family) ? _model._output._submodels.length : _model._output._submodels.length - 1; alphaStart = submodelCount / _parms._lambda.length; lambdaStart = submodelCount % _parms._lambda.length; } _model._output._lambda_array_size = _parms._lambda.length; for (int alphaInd = alphaStart; alphaInd < _parms._alpha.length; alphaInd++) { _state.setAlpha(_parms._alpha[alphaInd]); // loop through the alphas if ((alphaInd > 0) && !_checkPointFirstIter) // no need for cold start during the first iteration coldStart(devHistoryTrain, devHistoryTest); // reset beta, lambda, currGram for (int i = lambdaStart; i < _parms._lambda.length; ++i) { // for lambda search, can quit before it is done if (_job.stop_requested() || (timeout() && _model._output._submodels.length > 0)) break; //need at least one submodel on timeout to avoid issues. if (_parms._max_iterations != -1 && _state._iter >= _parms._max_iterations) break; // iterations accumulate across all lambda/alpha values when coldstart = false if ((_parms._cold_start || (!_parms._lambda_search && _parms._cold_start)) && (i > 0) && !_checkPointFirstIter) // default: cold_start for non lambda_search coldStart(devHistoryTrain, devHistoryTest); Submodel sm = computeSubmodel(submodelCount, _parms._lambda[i], nullDevTrain, nullDevValid); if (_checkPointFirstIter) _checkPointFirstIter = false; double trainDev = sm.devianceTrain; // this is stupid, they are always -1 except for lambda_search=True double testDev = sm.devianceValid; devHistoryTest[submodelCount % devHistoryTest.length] = (oldDevTest - testDev) / oldDevTest; // only remembers 5 oldDevTest = testDev; devHistoryTrain[submodelCount % devHistoryTrain.length] = (oldDevTrain - trainDev) / oldDevTrain; oldDevTrain = trainDev; if (_parms._lambda[i] < _lmax && Double.isNaN(_lambdaCVEstimate) /** if we have cv lambda estimate we should use it, can not stop before reaching it */) { if (_parms._early_stopping && _state._iter >= devHistoryTrain.length) { // implement early stopping for lambda search double s = ArrayUtils.maxValue(devHistoryTrain); if (s < 1e-4) { Log.info(LogMsg("converged at lambda[" + i + "] = " + _parms._lambda[i] + "alpha[" + alphaInd + "] = " + _parms._alpha[alphaInd] + ", improvement on train = " + s)); break; // started overfitting } if (_validDinfo != null && _parms._nfolds <= 1) { // check for early stopping on test with no xval s = ArrayUtils.maxValue(devHistoryTest); if (s < 0) { Log.info(LogMsg("converged at lambda[" + i + "] = " + _parms._lambda[i] + "alpha[" + alphaInd + "] = " + _parms._alpha[alphaInd] + ", improvement on test = " + s)); break; // started overfitting } } } } if ((_parms._lambda_search || _parms._generate_scoring_history) && (_parms._score_each_iteration || timeSinceLastScoring() > _scoringInterval || ((_parms._score_iteration_interval > 0) && ((_state._iter % _parms._score_iteration_interval) == 0)))) { _model._output.setSubmodelIdx(_model._output._best_submodel_idx = submodelCount, _parms); // quick and easy way to set submodel parameters scoreAndUpdateModel(); // update partial results } _job.update(_workPerIteration, "iter=" + _state._iter + " lmb=" + lambdaFormatter.format(_state.lambda()) + " alpha=" + lambdaFormatter.format(_state.alpha()) + "deviance trn/tst= " + devFormatter.format(trainDev) + "/" + devFormatter.format(testDev) + " P=" + ArrayUtils.countNonzeros(_state.beta())); submodelCount++; // updata submodel index count here } } // if beta constraint is enabled, check and make sure coefficients are within bounds if (_betaConstraintsOn && betaConstraintsCheckEnabled() && (!_linearConstraintsOn || _parms._separate_linear_beta)) checkCoeffsBounds(); if (stop_requested() || _earlyStop) { if (timeout()) { Log.info("Stopping GLM training because of timeout"); } else if (_earlyStop) { Log.info("Stopping GLM training due to hitting early stopping criteria."); } else { throw new Job.JobCancelledException(); } } if (_state._iter >= _parms._max_iterations) _job.warn("Reached maximum number of iterations " + _parms._max_iterations + "!"); if (_parms._nfolds > 1 && !Double.isNaN(_lambdaCVEstimate) && _bestCVSubmodel < _model._output._submodels.length) _model._output.setSubmodelIdx(_model._output._best_submodel_idx = _bestCVSubmodel, _model._parms); // reset best_submodel_idx to what xval has found else _model._output.pickBestModel(_model._parms); if (_vcov != null) { // should move this up, otherwise, scoring will never use info in _vcov _model.setVcov(_vcov); _model.update(_job._key); } _model._finalScoring = true; // enables likelihood calculation while scoring scoreAndUpdateModel(); _model._finalScoring = false; // avoid calculating likelihood in case of further updates if (dfbetas.equals(_parms._influence)) genRID(); if (_parms._generate_variable_inflation_factors) { _model._output._vif_predictor_names = _model.buildVariableInflationFactors(_train, _dinfo); }// build variable inflation factors for numerical predictors TwoDimTable scoring_history_early_stop = ScoringInfo.createScoringHistoryTable(_model.getScoringInfo(), (null != _parms._valid), false, _model._output.getModelCategory(), false, _parms.hasCustomMetricFunc()); _model._output._scoring_history = combineScoringHistory(_model._output._scoring_history, scoring_history_early_stop); _model._output._varimp = _model._output.calculateVarimp(); _model._output._variable_importances = calcVarImp(_model._output._varimp); if (_linearConstraintsOn) printConstraintSummary(_model, _state, _dinfo.coefNames()); _model.update(_job._key); /* if (_vcov != null) { _model.setVcov(_vcov); _model.update(_job._key); }*/ if (!(_parms)._lambda_search && _state._iter < _parms._max_iterations) { _job.update(_workPerIteration * (_parms._max_iterations - _state._iter)); } if (_iceptAdjust != 0) { // apply the intercept adjust according to prior probability assert _parms._intercept; double[] b = _model._output._global_beta; b[b.length - 1] += _iceptAdjust; for (Submodel sm : _model._output._submodels) sm.beta[sm.beta.length - 1] += _iceptAdjust; _model.update(_job._key); } } } /*** * Generate the regression influence diagnostic for gaussian and binomial families. It takes the following steps: * 1. generate the inverse of gram matrix; * 2. generate sum of all columns of gram matrix; * 3. task is called to generate the hat matrix equation 2 or equation 6 and the diagnostic in equation 3 or 7. * * Note that redundant predictors are excluded. */ public void genRID() { final double[] beta = _dinfo.denormalizeBeta(_state.beta());// if standardize=true, standardized coeff, else non-standardized coeff final double[] stdErr = _model._output.stdErr().clone(); final String[] names = genDfbetasNames(_model); // exclude redundant predictors DataInfo.TransformType transform = DataInfo.TransformType.NONE; // concatenate RIDFrame to the training data frame if (_parms._standardize) { // remove standardization transform = _dinfo._predictor_transform; _dinfo.setPredictorTransform(DataInfo.TransformType.NONE); } Frame RIDFrame = gaussian.equals(_parms._family) ? genRIDGaussian(_model._output.beta(), _cholInvInfluence, names) : genRIDBinomial(beta, _cholInvInfluence, stdErr, names); Scope.track(RIDFrame); Frame combinedFrame = buildRIDFrame(_parms, _train.deepCopy(Key.make().toString()), RIDFrame); _model._output._regression_influence_diagnostics = combinedFrame.getKey(); DKV.put(combinedFrame); if (_parms._standardize) _dinfo.setPredictorTransform(transform); } private Frame genRIDBinomial(double[] beta, double[][] inv, double[] stdErr, String[] names) { RegressionInfluenceDiagnosticsTasks.RegressionInfluenceDiagBinomial ridBinomial = new RegressionInfluenceDiagnosticsTasks.RegressionInfluenceDiagBinomial(_job, beta, inv, _parms, _dinfo, stdErr); ridBinomial.doAll(names.length, Vec.T_NUM, _dinfo._adaptedFrame); return ridBinomial.outputFrame(Key.make(), names, new String[names.length][]); } private Frame genRIDGaussian(final double[] beta, final double[][] inv, final String[] names) { final double[] stdErr = _model._output.stdErr(); final double[][] gramMat = _gramInfluence._gram.getXX(); // not scaled by parms._obj_reg RegressionInfluenceDiagnosticsTasks.ComputeNewBetaVarEstimatedGaussian betaMinusOne = new RegressionInfluenceDiagnosticsTasks.ComputeNewBetaVarEstimatedGaussian(inv, _gramInfluence._xy, _job, _dinfo, gramMat, _gramInfluence.sumOfRowWeights, _gramInfluence._yy, stdErr); betaMinusOne.doAll(names.length+1, Vec.T_NUM, _dinfo._adaptedFrame); String[] namesVar = new String[names.length+1]; System.arraycopy(names, 0, namesVar, 0, names.length); namesVar[names.length] = "varEstimate"; Frame newBetasVarEst = betaMinusOne.outputFrame(Key.make(), namesVar, new String[namesVar.length][]); if (_parms._keepBetaDiffVar) { DKV.put(newBetasVarEst); _model._output._betadiff_var = newBetasVarEst._key; } else { Scope.track(newBetasVarEst); } double[] betaReduced; // exclude redundant predictors if present if (names.length==beta.length) { // no redundant column betaReduced = beta; } else { betaReduced = new double[names.length]; removeRedCols(beta, betaReduced, stdErr); } RegressionInfluenceDiagnosticsTasks.RegressionInfluenceDiagGaussian genRid = new RegressionInfluenceDiagnosticsTasks.RegressionInfluenceDiagGaussian(inv, betaReduced, _job); genRid.doAll(names.length, Vec.T_NUM, newBetasVarEst); return genRid.outputFrame(Key.make(), names, new String[names.length][]); } private boolean betaConstraintsCheckEnabled() { return Boolean.parseBoolean(getSysProperty("glm.beta.constraints.checkEnabled", "true")) && !multinomial.equals(_parms._family) && !ordinal.equals(_parms._family); } /*** * When beta constraints are turned on, verify coefficients are either 0.0 or falls within the * beta constraints bounds. This check only applies when the beta constraints has a lower and upper bound. */ private void checkCoeffsBounds() { BetaConstraint bc = _parms._beta_constraints != null ? new BetaConstraint(_parms._beta_constraints.get()) : new BetaConstraint(); // bounds for columns _dinfo.fullN()+1 only double[] coeffs = _parms._standardize ? _model._output.getNormBeta() :_model._output.beta(); if (coeffs == null) return; if (bc._betaLB == null && bc._betaUB == null) return; int coeffsLen = bc._betaLB != null ? bc._betaLB.length : bc._betaUB.length; StringBuffer errorMessage = new StringBuffer(); boolean lowerBoundNull = bc._betaLB == null; boolean upperBoundNull = bc._betaUB == null; for (int index=0; index < coeffsLen; index++) { if (coeffs[index] != 0) { if (lowerBoundNull && !Double.isInfinite(bc._betaUB[index]) && (coeffs[index] > bc._betaUB[index])) { errorMessage.append("GLM model coefficient " + coeffs[index]+" exceeds beta constraint upper bounds: " + "upper: "+bc._betaUB[index]+"\n"); } else if (upperBoundNull && !Double.isInfinite(bc._betaLB[index]) && (coeffs[index] < bc._betaLB[index])) { errorMessage.append("GLM model coefficient " + coeffs[index]+" falls below beta constraint lower bounds: " + "upper: "+bc._betaLB[index]+"\n"); } else if (!lowerBoundNull && !upperBoundNull && (coeffs[index] < bc._betaLB[index] && coeffs[index] > bc._betaUB[index])) { errorMessage.append("GLM model coefficient " + coeffs[index]+" exceeds beta constraint bounds. Lower: " +bc._betaLB[index]+", upper: "+bc._betaUB[index]+"\n"); } } } if (errorMessage.length() > 0) throw new H2OFailException("\n"+errorMessage.toString()); } @Override public void onCompletion(CountedCompleter caller) { doCleanup(); super.onCompletion(caller); } @Override public boolean onExceptionalCompletion(Throwable t, CountedCompleter caller) { doCleanup(); return super.onExceptionalCompletion(t, caller); } @Override public boolean progress(double[] beta, GradientInfo ginfo) { _state._iter++; if (ginfo instanceof ProximalGradientInfo) { ginfo = ((ProximalGradientInfo) ginfo)._origGinfo; GLMGradientInfo gginfo = (GLMGradientInfo) ginfo; _state.updateState(beta, gginfo); if (!_parms._lambda_search) updateProgress(false); return !stop_requested() && _state._iter < _parms._max_iterations && !_earlyStop; } else { GLMGradientInfo gginfo = (GLMGradientInfo) ginfo; if (gginfo._gradient == null) _state.updateState(beta, gginfo._likelihood); else _state.updateState(beta, gginfo); if ((!_parms._lambda_search || _parms._generate_scoring_history) && !_insideCVCheck) updateProgress(true); boolean converged = !_earlyStopEnabled && _state.converged(); // GLM specific early stop. Disabled if early stop is enabled if (converged) Log.info(LogMsg(_state.convergenceMsg)); return !stop_requested() && !converged && _state._iter < _parms._max_iterations && !_earlyStop; } } public boolean progress(double[] beta, double likelihood) { _state._iter++; _state.updateState(beta, likelihood); // updateProgress is not run originally when lambda_search is not enabled inside or outside of // cv_computeAndSetOptimalParameters. However, with generate_scoring_history on, I am adding deviance_* and // hence I need to make sure that updateProgress is not run inside cv_computeAndSetOptimalParameters. if ((!_parms._lambda_search || _parms._generate_scoring_history) && !_insideCVCheck) updateProgress(true); boolean converged = !_earlyStopEnabled && _state.converged(); if (converged) Log.info(LogMsg(_state.convergenceMsg)); return !stop_requested() && !converged && _state._iter < _parms._max_iterations && !_earlyStop; } private transient long _scoringInterval = SCORING_INTERVAL_MSEC; // update user visible progress protected void updateProgress(boolean canScore) { assert !_parms._lambda_search || _parms._generate_scoring_history; if (!_parms._generate_scoring_history && !_parms._lambda_search) { // same as before, _state._iter is not updated _scoringHistory.addIterationScore(_state._iter, _state.likelihood(), _state.objective()); _job.update(_workPerIteration, _state.toString()); // glm specific scoring history is updated every iteration } if (canScore && (_parms._score_each_iteration || timeSinceLastScoring() > _scoringInterval || ((_parms._score_iteration_interval > 0) && ((_state._iter % _parms._score_iteration_interval) == 0)))) { _model.update(_state.expandBeta(_state.beta()), -1, -1, _state._iter); scoreAndUpdateModel(); _earlyStop = _earlyStopEnabled && updateEarlyStop(); } } } private boolean updateEarlyStop() { return _earlyStop || ScoreKeeper.stopEarly(_model.scoreKeepers(), _parms._stopping_rounds, ScoreKeeper.ProblemType.forSupervised(_nclass > 1), _parms._stopping_metric, _parms._stopping_tolerance, "model's last", true); } private Solver defaultSolver() { Solver s = IRLSM; if (_parms._remove_collinear_columns) { // choose IRLSM if remove_collinear_columns is true Log.info(LogMsg("picked solver " + s)); _parms._solver = s; return s; } int max_active = 0; if(multinomial.equals(_parms._family)) for(int c = 0; c < _nclass; ++c) max_active += _state.activeDataMultinomial(c).fullN(); else max_active = _state.activeData().fullN(); if(max_active >= 5000) // cutoff has to be somewhere s = Solver.L_BFGS; else if(_parms._lambda_search) { // lambda search prefers coordinate descent // l1 lambda search is better with coordinate descent! s = Solver.COORDINATE_DESCENT; } else if(_state.activeBC().hasBounds() && !_state.activeBC().hasProximalPenalty()) { s = Solver.COORDINATE_DESCENT; } else if(multinomial.equals(_parms._family) && _parms._alpha[0] == 0) s = Solver.L_BFGS; // multinomial does better with lbfgs else Log.info(LogMsg("picked solver " + s)); if(s != Solver.L_BFGS && _parms._max_active_predictors == -1) _parms._max_active_predictors = 5000; _parms._solver = s; return s; } double objVal(double likelihood, double[] beta, double lambda) { double alpha = _parms._alpha[0]; double proximalPen = 0; BetaConstraint bc = _state.activeBC(); if (_state.activeBC()._betaGiven != null && bc._rho != null) { for (int i = 0; i < bc._betaGiven.length; ++i) { double diff = beta[i] - bc._betaGiven[i]; proximalPen += diff * diff * bc._rho[i]; } } return (likelihood * _parms._obj_reg + .5 * proximalPen + lambda * (alpha * ArrayUtils.l1norm(beta, _parms._intercept) + (1 - alpha) * .5 * ArrayUtils.l2norm2(beta, _parms._intercept))); } private String LogMsg(String msg) {return "GLM[dest=" + dest() + ", " + _state + "] " + msg;} private static final double[] expandVec(double[] beta, final int[] activeCols, int fullN) { return expandVec(beta, activeCols, fullN, 0); } private static final double[] expandVec(double[] beta, final int[] activeCols, int fullN, double filler) { assert beta != null; if (activeCols == null) return beta; double[] res = MemoryManager.malloc8d(fullN); Arrays.fill(res, filler); int i = 0; for (int c : activeCols) res[c] = beta[i++]; res[res.length - 1] = beta[beta.length - 1]; return res; } private static double [] doUpdateCD(double [] grads, double [] ary, double diff , int variable_min, int variable_max) { for (int i = 0; i < variable_min; i++) grads[i] += diff * ary[i]; for (int i = variable_max; i < grads.length; i++) grads[i] += diff * ary[i]; return grads; } public double [] COD_solve(ComputationState.GramXY gram, double alpha, double lambda) { double [] res = COD_solve(gram.gram.getXX(),gram.xy,gram.getCODGradients(),gram.newCols,alpha,lambda); gram.newCols = new int[0]; return res; } private double [] COD_solve(double [][] xx, double [] xy, double [] grads, int [] newCols, double alpha, double lambda) { double wsumInv = 1.0/(xx[xx.length-1][xx.length-1]); final double betaEpsilon = _parms._beta_epsilon*_parms._beta_epsilon; double updateEpsilon = 0.01*betaEpsilon; double l1pen = lambda * alpha; double l2pen = lambda*(1-alpha); double [] diagInv = MemoryManager.malloc8d(xx.length); for(int i = 0; i < diagInv.length; ++i) diagInv[i] = 1.0/(xx[i][i] + l2pen); DataInfo activeData = _state.activeData(); int [][] nzs = new int[activeData.numStart()][]; int sparseCnt = 0; if(nzs.length > 1000) { final int [] nzs_ary = new int[xx.length]; for (int i = 0; i < activeData._cats; ++i) { int var_min = activeData._catOffsets[i]; int var_max = activeData._catOffsets[i + 1]; for(int l = var_min; l < var_max; ++l) { int k = 0; double [] x = xx[l]; for (int j = 0; j < var_min; ++j) if (x[j] != 0) nzs_ary[k++] = j; for (int j = var_max; j < activeData.numStart(); ++j) if (x[j] != 0) nzs_ary[k++] = j; if (k < ((nzs_ary.length - var_max + var_min) >> 3)) { sparseCnt++; nzs[l] = Arrays.copyOf(nzs_ary, k); } } } } final BetaConstraint bc = _state.activeBC(); double [] beta = _state.beta().clone(); int numStart = activeData.numStart(); if(newCols != null) { for (int id : newCols) { double b = bc.applyBounds(ADMM.shrinkage(grads[id], l1pen) * diagInv[id], id); if (b != 0) { doUpdateCD(grads, xx[id], -b, id, id + 1); beta[id] = b; } } } int iter1 = 0; int P = xy.length - 1; double maxDiff = 0; // // CD loop while (iter1++ < Math.max(P,500)) { maxDiff = 0; for (int i = 0; i < activeData._cats; ++i) { for(int j = activeData._catOffsets[i]; j < activeData._catOffsets[i+1]; ++j) { // can do in parallel double b = bc.applyBounds(ADMM.shrinkage(grads[j], l1pen) * diagInv[j],j); // new beta value here double bd = beta[j] - b; if(bd != 0) { double diff = bd*bd*xx[j][j]; if(diff > maxDiff) maxDiff = diff; if (nzs[j] == null) doUpdateCD(grads, xx[j], bd, activeData._catOffsets[i], activeData._catOffsets[i + 1]); else { double[] x = xx[j]; int[] ids = nzs[j]; for (int id : ids) grads[id] += bd * x[id]; doUpdateCD(grads, x, bd, 0, activeData.numStart()); } beta[j] = b; } } } for (int i = numStart; i < P; ++i) { double b = bc.applyBounds(ADMM.shrinkage(grads[i], l1pen) * diagInv[i],i); double bd = beta[i] - b; double diff = bd * bd * xx[i][i]; if (diff > maxDiff) maxDiff = diff; if(diff > updateEpsilon) { doUpdateCD(grads, xx[i], bd, i, i + 1); beta[i] = b; } } // intercept if(_parms._intercept) { double b = bc.applyBounds(grads[P] * wsumInv,P); double bd = beta[P] - b; double diff = bd * bd * xx[P][P]; if (diff > maxDiff) maxDiff = diff; doUpdateCD(grads, xx[P], bd, P, P + 1); beta[P] = b; } if (maxDiff < betaEpsilon) // stop if beta not changing much break; } return beta; } /** * Created by tomasnykodym on 3/30/15. */ public static final class GramSolver implements ProximalSolver { private final Gram _gram; private Cholesky _chol; private final double[] _xy; final double _lambda; double[] _rho; boolean _addedL2; double _betaEps; private static double boundedX(double x, double lb, double ub) { if (x < lb) x = lb; if (x > ub) x = ub; return x; } public GramSolver(Gram gram, double[] xy, double lmax, double betaEps, boolean intercept) { _gram = gram; _lambda = 0; _betaEps = betaEps; _xy = xy; double[] rhos = MemoryManager.malloc8d(xy.length); computeCholesky(gram, rhos, lmax * 1e-8,intercept); _addedL2 = rhos[0] != 0; _rho = _addedL2 ? rhos : null; } // solve non-penalized problem public void solve(double[] result) { System.arraycopy(_xy, 0, result, 0, _xy.length); _chol.solve(result); double gerr = Double.POSITIVE_INFINITY; if (_addedL2) { // had to add l2-pen to turn the gram to be SPD double[] oldRes = MemoryManager.arrayCopyOf(result, result.length); for (int i = 0; i < 1000; ++i) { solve(oldRes, result); double[] g = gradient(result)._gradient; gerr = Math.max(-ArrayUtils.minValue(g), ArrayUtils.maxValue(g)); if (gerr < 1e-4) return; System.arraycopy(result, 0, oldRes, 0, result.length); } Log.warn("Gram solver did not converge, gerr = " + gerr); } } public GramSolver(Gram gram, double[] xy, boolean intercept, double l2pen, double l1pen, double[] beta_given, double[] proxPen, double[] lb, double[] ub) { if (ub != null && lb != null) for (int i = 0; i < ub.length; ++i) { assert ub[i] >= lb[i] : i + ": ub < lb, ub = " + Arrays.toString(ub) + ", lb = " + Arrays.toString(lb); } _lambda = l2pen; _gram = gram; // Try to pick optimal rho constant here used in ADMM solver. // // Rho defines the strength of proximal-penalty and also the strentg of L1 penalty aplpied in each step. // Picking good rho constant is tricky and greatly influences the speed of convergence and precision with which we are able to solve the problem. // // Intuitively, we want the proximal l2-penalty ~ l1 penalty (l1 pen = lambda/rho, where lambda is the l1 penalty applied to the problem) // Here we compute the rho for each coordinate by using equation for computing coefficient for single coordinate and then making the two penalties equal. // int ii = intercept ? 1 : 0; int icptCol = gram.fullN()-1; double[] rhos = MemoryManager.malloc8d(xy.length); double min = Double.POSITIVE_INFINITY; for (int i = 0; i < xy.length - ii; ++i) { double d = xy[i]; d = d >= 0 ? d : -d; if (d < min && d != 0) min = d; } double ybar = xy[icptCol]; for (int i = 0; i < rhos.length - ii; ++i) { double y = xy[i]; if (y == 0) y = min; double xbar = gram.get(icptCol, i); double x = ((y - ybar * xbar) / ((gram.get(i, i) - xbar * xbar) + l2pen));///gram.get(i,i); rhos[i] = ADMM.L1Solver.estimateRho(x, l1pen, lb == null ? Double.NEGATIVE_INFINITY : lb[i], ub == null ? Double.POSITIVE_INFINITY : ub[i]); } // do the intercept separate as l1pen does not apply to it if (intercept && (lb != null && !Double.isInfinite(lb[icptCol]) || ub != null && !Double.isInfinite(ub[icptCol]))) { int icpt = xy.length - 1; rhos[icpt] = 1;//(xy[icpt] >= 0 ? xy[icpt] : -xy[icpt]); } if (l2pen > 0) gram.addDiag(l2pen); if (proxPen != null && beta_given != null) { gram.addDiag(proxPen); xy = xy.clone(); for (int i = 0; i < xy.length; ++i) xy[i] += proxPen[i] * beta_given[i]; } _xy = xy; _rho = rhos; computeCholesky(gram, rhos, 1e-5,intercept); } private void computeCholesky(Gram gram, double[] rhos, double rhoAdd, boolean intercept) { gram.addDiag(rhos); if(!intercept) { gram.dropIntercept(); rhos = Arrays.copyOf(rhos,rhos.length-1); _xy[_xy.length-1] = 0; } _chol = gram.cholesky(null, true, null); if (!_chol.isSPD()) { // make sure rho is big enough gram.addDiag(ArrayUtils.mult(rhos, -1)); gram.addDiag(rhoAdd,!intercept); Log.info("Got NonSPD matrix with original rho, re-computing with rho = " + (_rho[0]+rhoAdd)); _chol = gram.cholesky(null, true, null); int cnt = 0; double rhoAddSum = rhoAdd; while (!_chol.isSPD() && cnt++ < 5) { gram.addDiag(rhoAdd,!intercept); rhoAddSum += rhoAdd; Log.warn("Still NonSPD matrix, re-computing with rho = " + (rhos[0] + rhoAddSum)); _chol = gram.cholesky(null, true, null); } if (!_chol.isSPD()) throw new NonSPDMatrixException(); } gram.addDiag(ArrayUtils.mult(rhos, -1)); ArrayUtils.mult(rhos, -1); } @Override public double[] rho() { return _rho; } @Override public boolean solve(double[] beta_given, double[] result) { if (beta_given != null) for (int i = 0; i < _xy.length; ++i) result[i] = _xy[i] + _rho[i] * beta_given[i]; else System.arraycopy(_xy, 0, result, 0, _xy.length); _chol.solve(result); return true; } @Override public boolean hasGradient() { return false; } @Override public GradientInfo gradient(double[] beta) { double[] grad = _gram.mul(beta); for (int i = 0; i < _xy.length; ++i) grad[i] -= _xy[i]; return new GradientInfo(Double.NaN,grad); // todo compute the objective } @Override public int iter() { return 0; } } public static class ProximalGradientInfo extends GradientInfo { final GradientInfo _origGinfo; public ProximalGradientInfo(GradientInfo origGinfo, double objVal, double[] gradient) { super(objVal, gradient); _origGinfo = origGinfo; } } /** * Simple wrapper around ginfo computation, adding proximal penalty */ public static class ProximalGradientSolver implements GradientSolver, ProximalSolver { final GradientSolver _solver; double[] _betaGiven; double[] _beta; private ProximalGradientInfo _ginfo; private final ProgressMonitor _pm; final double[] _rho; private final double _objEps; private final double _gradEps; public ProximalGradientSolver(GradientSolver s, double[] betaStart, double[] rho, double objEps, double gradEps, GradientInfo ginfo,ProgressMonitor pm) { super(); _solver = s; _rho = rho; _objEps = objEps; _gradEps = gradEps; _pm = pm; _beta = betaStart; _betaGiven = MemoryManager.malloc8d(betaStart.length); // _ginfo = new ProximalGradientInfo(ginfo,ginfo._objVal,ginfo._gradient); } public static double proximal_gradient(double[] grad, double obj, double[] beta, double[] beta_given, double[] rho) { for (int i = 0; i < beta.length; ++i) { double diff = (beta[i] - beta_given[i]); double pen = rho[i] * diff; if(grad != null) grad[i] += pen; obj += .5 * pen * diff; } return obj; } private ProximalGradientInfo computeProxGrad(GradientInfo ginfo, double [] beta) { assert !(ginfo instanceof ProximalGradientInfo); double[] gradient = ginfo._gradient.clone(); double obj = proximal_gradient(gradient, ginfo._objVal, beta, _betaGiven, _rho); return new ProximalGradientInfo(ginfo, obj, gradient); } @Override public ProximalGradientInfo getGradient(double[] beta) { return computeProxGrad(_solver.getGradient(beta),beta); } @Override public GradientInfo getObjective(double[] beta) { GradientInfo ginfo = _solver.getObjective(beta); double obj = proximal_gradient(null, ginfo._objVal, beta, _betaGiven, _rho); return new ProximalGradientInfo(ginfo,obj,null); } @Override public double[] rho() { return _rho; } private int _iter; @Override public boolean solve(double[] beta_given, double[] beta) { GradientInfo origGinfo = (_ginfo == null || !Arrays.equals(_beta,beta)) ?_solver.getGradient(beta) :_ginfo._origGinfo; System.arraycopy(beta_given,0,_betaGiven,0,beta_given.length); L_BFGS.Result r = new L_BFGS().setObjEps(_objEps).setGradEps(_gradEps).solve(this, beta, _ginfo = computeProxGrad(origGinfo,beta), _pm); System.arraycopy(r.coefs,0,beta,0,r.coefs.length); _beta = r.coefs; _iter += r.iter; _ginfo = (ProximalGradientInfo) r.ginfo; return r.converged; } @Override public boolean hasGradient() { return true; } @Override public GradientInfo gradient(double[] beta) { return getGradient(beta)._origGinfo; } @Override public int iter() { return _iter; } } public static class GLMGradientInfo extends GradientInfo { final double _likelihood; public GLMGradientInfo(double likelihood, double objVal, double[] grad) { super(objVal, grad); _likelihood = likelihood; } public String toString(){ return "GLM grad info: likelihood = " + _likelihood + super.toString(); } } /** * Gradient and line search computation for L_BFGS and also L_BFGS solver wrapper (for ADMM) */ public static final class GLMGradientSolver implements GradientSolver { final GLMParameters _parms; final DataInfo _dinfo; final BetaConstraint _bc; final double _l2pen; // l2 penalty final Job _job; final BetaInfo _betaInfo; double[][] _betaMultinomial; double[][][] _penaltyMatrix; int[][] _gamColIndices; public GLMGradientSolver(Job job, GLMParameters glmp, DataInfo dinfo, double l2pen, BetaConstraint bc, BetaInfo bi) { _job = job; _bc = bc; _parms = glmp; _dinfo = dinfo; _l2pen = l2pen; _betaInfo = bi; } public GLMGradientSolver(Job job, GLMParameters glmp, DataInfo dinfo, double l2pen, BetaConstraint bc, BetaInfo bi, double[][][] penaltyMat, int[][] gamColInd) { this(job, glmp, dinfo, l2pen, bc, bi); _penaltyMatrix = penaltyMat; _gamColIndices=gamColInd; } /* Only update the likelihood function for multinomial while leaving all else stale and old. This is only used by multinomial with COD. */ public GLMGradientInfo getMultinomialLikelihood(double[] beta) { assert multinomial.equals(_parms._family) : "GLMGradientInfo.getMultinomialLikelihood is only used by multinomial GLM"; assert _betaMultinomial != null : "Multinomial coefficents cannot be null."; int off = 0; for (int i = 0; i < _betaMultinomial.length; ++i) { System.arraycopy(beta, off, _betaMultinomial[i], 0, _betaMultinomial[i].length); off += _betaMultinomial[i].length; } GLMMultinomialGradientBaseTask gt = new GLMMultinomialLikelihoodTask(_job, _dinfo, _l2pen, _betaMultinomial, _parms).doAll(_dinfo._adaptedFrame); double l2pen = 0; for (double[] b : _betaMultinomial) { l2pen += ArrayUtils.l2norm2(b, _dinfo._intercept); } double smoothval = gam.equals(_parms._glmType)?calSmoothNess(_betaMultinomial, _penaltyMatrix, _gamColIndices):0; return new GLMGradientInfo(gt._likelihood, gt._likelihood * _parms._obj_reg + .5 * _l2pen * l2pen + smoothval, null); } /*** * * This method calculates the gradient for constrained GLM without taking into account the contribution of the * constraints in this case. The likelihood, objective are calculated without the contribution of the constraints * either. */ public GLMGradientInfo getGradient(double[] beta, ComputationState state) { DataInfo dinfo = state.activeData()._activeCols == null ? _dinfo : state.activeData(); assert beta.length == dinfo.fullN() + 1; assert _parms._intercept || (beta[beta.length-1] == 0); GLMGradientTask gt; if((_parms._family == binomial && _parms._link == Link.logit) || (_parms._family == Family.fractionalbinomial && _parms._link == Link.logit)) gt = new GLMBinomialGradientTask(_job == null?null:_job._key,dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(dinfo._adaptedFrame); else if(_parms._family == Family.gaussian && _parms._link == Link.identity) gt = new GLMGaussianGradientTask(_job == null?null:_job._key,dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(dinfo._adaptedFrame); else if (Family.negativebinomial.equals(_parms._family)) gt = new GLMNegativeBinomialGradientTask(_job == null?null:_job._key,dinfo, _parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(dinfo._adaptedFrame); else if(_parms._family == Family.poisson && _parms._link == Link.log) gt = new GLMPoissonGradientTask(_job == null?null:_job._key,dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(dinfo._adaptedFrame); else if(_parms._family == Family.quasibinomial) gt = new GLMQuasiBinomialGradientTask(_job == null?null:_job._key,dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(dinfo._adaptedFrame); else gt = new GLMGenericGradientTask(_job == null?null:_job._key, dinfo, _parms, _l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(dinfo._adaptedFrame); double [] gradient = gt._gradient; double likelihood = gt._likelihood; if (!_parms._intercept) // no intercept, null the ginfo gradient[gradient.length - 1] = 0; double gamSmooth = gam.equals(_parms._glmType)? calSmoothNess(expandVec(beta, dinfo._activeCols, _betaInfo.totalBetaLength()), _penaltyMatrix, _gamColIndices):0; double obj = likelihood * _parms._obj_reg + .5 * _l2pen * ArrayUtils.l2norm2(beta, true)+gamSmooth; return new GLMGradientInfo(likelihood, obj, gradient); } @Override public GLMGradientInfo getGradient(double[] beta) { if (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) { // beta could contain active cols only for some classes and full predictors for other classes at this point if (_betaMultinomial == null) { // assert beta.length % (_dinfo.fullN() + 1) == 0:"beta len = " + beta.length + ", fullN +1 == " + (_dinfo.fullN()+1); _betaMultinomial = new double[_betaInfo._nBetas][]; // contains only active columns if rcc=true for (int i = 0; i < _betaInfo._nBetas; ++i) _betaMultinomial[i] = MemoryManager.malloc8d(_dinfo.fullN() + 1); // only active columns here } int off = 0; for (int i = 0; i < _betaMultinomial.length; ++i) { // fill _betaMultinomial class by coeffPerClass if (!_parms._remove_collinear_columns || _dinfo._activeCols == null || _dinfo._activeCols.length == _betaInfo._betaLenPerClass) System.arraycopy(beta, off, _betaMultinomial[i], 0, _betaMultinomial[i].length); else // _betaMultinomial only contains active columns _betaMultinomial[i] = extractSubRange(_betaInfo._betaLenPerClass, i, _dinfo._activeCols, beta); off += _betaMultinomial[i].length; } GLMMultinomialGradientBaseTask gt = new GLMMultinomialGradientTask(_job, _dinfo, _l2pen, _betaMultinomial, _parms, _penaltyMatrix, _gamColIndices).doAll(_dinfo._adaptedFrame); double l2pen = 0; for (double[] b : _betaMultinomial) { l2pen += ArrayUtils.l2norm2(b, _dinfo._intercept); if (ordinal.equals(_parms._family)) break; // only one beta for all classes, l2pen needs to count beta for one class only } double[] grad = gt.gradient(); // gradient is nclass by coefByClass if (!_parms._intercept) { for (int i = _dinfo.fullN(); i < beta.length; i += _dinfo.fullN() + 1) grad[i] = 0; } double smoothVal = gam.equals(_parms._glmType)?calSmoothNess(_betaMultinomial, _penaltyMatrix, _gamColIndices):0.0; return new GLMGradientInfo(gt._likelihood, gt._likelihood * _parms._obj_reg + .5 * _l2pen * l2pen + smoothVal, grad); } else { assert beta.length == _dinfo.fullN() + 1; assert _parms._intercept || (beta[beta.length-1] == 0); GLMGradientTask gt; if((_parms._family == binomial && _parms._link == Link.logit) || (_parms._family == Family.fractionalbinomial && _parms._link == Link.logit)) gt = new GLMBinomialGradientTask(_job == null?null:_job._key,_dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(_dinfo._adaptedFrame); else if(_parms._family == Family.gaussian && _parms._link == Link.identity) gt = new GLMGaussianGradientTask(_job == null?null:_job._key,_dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(_dinfo._adaptedFrame); else if (Family.negativebinomial.equals(_parms._family)) gt = new GLMNegativeBinomialGradientTask(_job == null?null:_job._key,_dinfo, _parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(_dinfo._adaptedFrame); else if(_parms._family == Family.poisson && _parms._link == Link.log) gt = new GLMPoissonGradientTask(_job == null?null:_job._key,_dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(_dinfo._adaptedFrame); else if(_parms._family == Family.quasibinomial) gt = new GLMQuasiBinomialGradientTask(_job == null?null:_job._key,_dinfo,_parms,_l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(_dinfo._adaptedFrame); else gt = new GLMGenericGradientTask(_job == null?null:_job._key, _dinfo, _parms, _l2pen, beta, _penaltyMatrix, _gamColIndices).doAll(_dinfo._adaptedFrame); double [] gradient = gt._gradient; double likelihood = gt._likelihood; if (!_parms._intercept) // no intercept, null the ginfo gradient[gradient.length - 1] = 0; double gamSmooth = gam.equals(_parms._glmType)? calSmoothNess(expandVec(beta, _dinfo._activeCols, _betaInfo.totalBetaLength()), _penaltyMatrix, _gamColIndices):0; double obj = likelihood * _parms._obj_reg + .5 * _l2pen * ArrayUtils.l2norm2(beta, true)+gamSmooth; if (_bc != null && _bc._betaGiven != null && _bc._rho != null) obj = ProximalGradientSolver.proximal_gradient(gradient, obj, beta, _bc._betaGiven, _bc._rho); return new GLMGradientInfo(likelihood, obj, gradient); } } @Override public GradientInfo getObjective(double[] beta) { double l = new GLMResDevTask(_job._key,_dinfo,_parms,beta).doAll(_dinfo._adaptedFrame)._likelihood; double smoothness = gam.equals(_parms._glmType)? calSmoothNess(expandVec(beta, _dinfo._activeCols, _betaInfo.totalBetaLength()), _penaltyMatrix, _gamColIndices):0; return new GLMGradientInfo(l,l*_parms._obj_reg + .5*_l2pen*ArrayUtils.l2norm2(beta,true) +smoothness,null); } } protected static double sparseOffset(double[] beta, DataInfo dinfo) { double etaOffset = 0; if (dinfo._normMul != null && dinfo._normSub != null && beta != null) { int ns = dinfo.numStart(); for (int i = 0; i < dinfo._nums; ++i) etaOffset -= beta[i + ns] * dinfo._normSub[i] * dinfo._normMul[i]; } return etaOffset; } public static final class BetaInfo extends Iced<BetaInfo> { public final int _nBetas; public final int _betaLenPerClass; public BetaInfo(int nBetas, int betaLenPerClass) { _nBetas = nBetas; _betaLenPerClass = betaLenPerClass; } public int totalBetaLength() { return _nBetas * _betaLenPerClass; } } public final class BetaConstraint extends Iced { double[] _betaStart; double[] _betaGiven; double[] _rho; double[] _betaLB; double[] _betaUB; public BetaConstraint() { if (_parms._non_negative) setNonNegative(); } public void setNonNegative() { if (_betaLB == null) { _betaLB = MemoryManager.malloc8d(_dinfo.fullN() + 1); _betaLB[_dinfo.fullN()] = Double.NEGATIVE_INFINITY; } else for (int i = 0; i < _betaLB.length - 1; ++i) _betaLB[i] = Math.max(0, _betaLB[i]); if (_betaUB == null) { _betaUB = MemoryManager.malloc8d(_dinfo.fullN() + 1); Arrays.fill(_betaUB, Double.POSITIVE_INFINITY); } } public void setNonNegative(Frame otherConst) { List<String> constraintNames = extractVec2List(otherConst); // maximum size = number of predictors, an integer List<String> coefNames = Arrays.stream(_dinfo.coefNames()).collect(Collectors.toList()); int numCoef = coefNames.size(); for (int index=0; index<numCoef; index++) { // only changes beta constraints if not been specified before if (!constraintNames.contains(coefNames.get(index))) { _betaLB[index] = 0; _betaUB[index] = Double.POSITIVE_INFINITY; } } } /** * Extract predictor names in the constraint frame constraintF into a list. Okay to extract into a list as * the number of predictor is an integer and not long. */ public List<String> extractVec2List(Frame constraintF) { List<String> constraintNames = new ArrayList<>(); Vec.Reader vr = constraintF.vec(0).new Reader(); long numRows = constraintF.numRows(); for (long index=0; index<numRows; index++) { BufferedString bs = vr.atStr(new BufferedString(), index); constraintNames.add(bs.toString()); } return constraintNames; } public void applyAllBounds(double[] beta) { int betaLength = beta.length; for (int index=0; index<betaLength; index++) beta[index] = applyBounds(beta[index], index); } public double applyBounds(double d, int i) { if(_betaLB != null && d < _betaLB[i]) return _betaLB[i]; if(_betaUB != null && d > _betaUB[i]) return _betaUB[i]; return d; } private Frame encodeCategoricalsIfPresent(Frame beta_constraints) { String[] coefNames = _dinfo.coefNames(); String[] coefOriginalNames = _dinfo.coefOriginalNames(); Frame transformedFrame = FrameUtils.encodeBetaConstraints(null, coefNames, coefOriginalNames, beta_constraints); return transformedFrame; } public BetaConstraint(Frame beta_constraints) { // beta_constraints = encodeCategoricalsIfPresent(beta_constraints); // null key if (_enumInCS) { if (_betaConstraints == null) { beta_constraints = expandedCatCS(_parms._beta_constraints.get(), _parms); DKV.put(beta_constraints); _betaConstraints = beta_constraints; } else { beta_constraints = _betaConstraints; } } Vec v = beta_constraints.vec("names"); // add v to scope.track does not work String[] dom; int[] map; if (v.isString()) { dom = new String[(int) v.length()]; map = new int[dom.length]; BufferedString tmpStr = new BufferedString(); for (int i = 0; i < dom.length; ++i) { dom[i] = v.atStr(tmpStr, i).toString(); if ("intercept".equals(dom[i])) throw new IllegalArgumentException("Beta constraints cannot be applied to the intercept right row."); map[i] = i; } // check for dups String[] sortedDom = dom.clone(); Arrays.sort(sortedDom); for (int i = 1; i < sortedDom.length; ++i) if (sortedDom[i - 1].equals(sortedDom[i])) throw new IllegalArgumentException("Illegal beta constraints file, got duplicate constraint for predictor '" + sortedDom[i - 1] + "'!"); } else if (v.isCategorical()) { dom = v.domain(); map = FrameUtils.asInts(v); // check for dups int[] sortedMap = MemoryManager.arrayCopyOf(map, map.length); Arrays.sort(sortedMap); for (int i = 1; i < sortedMap.length; ++i) if (sortedMap[i - 1] == sortedMap[i]) throw new IllegalArgumentException("Illegal beta constraints file, got duplicate constraint for predictor '" + dom[sortedMap[i - 1]] + "'!"); } else throw new IllegalArgumentException("Illegal beta constraints file, names column expected to contain column names (strings)"); // for now only categoricals allowed here String[] names = ArrayUtils.append(_dinfo.coefNames(), "Intercept"); if (!Arrays.deepEquals(dom, names)) { // need mapping HashMap<String, Integer> m = new HashMap<String, Integer>(); for (int i = 0; i < names.length; ++i) m.put(names[i], i); int[] newMap = MemoryManager.malloc4(dom.length); for (int i = 0; i < map.length; ++i) { if (_removedCols.contains(dom[map[i]])) { newMap[i] = -1; continue; } Integer I = m.get(dom[map[i]]); if (I == null) { throw new IllegalArgumentException("Unrecognized coefficient name in beta-constraint file, unknown name '" + dom[map[i]] + "'"); } newMap[i] = I; } map = newMap; } final int numoff = _dinfo.numStart(); String[] valid_col_names = new String[]{"names", "beta_given", "beta_start", "lower_bounds", "upper_bounds", "rho", "mean", "std_dev"}; Arrays.sort(valid_col_names); for (String s : beta_constraints.names()) if (Arrays.binarySearch(valid_col_names, s) < 0) error("beta_constraints", "Unknown column name '" + s + "'"); if ((v = beta_constraints.vec("beta_start")) != null) { _betaStart = MemoryManager.malloc8d(_dinfo.fullN() + (_dinfo._intercept ? 1 : 0)); for (int i = 0; i < (int) v.length(); ++i) if (map[i] != -1) _betaStart[map[i]] = v.at(i); } if ((v = beta_constraints.vec("beta_given")) != null) { _betaGiven = MemoryManager.malloc8d(_dinfo.fullN() + (_dinfo._intercept ? 1 : 0)); for (int i = 0; i < (int) v.length(); ++i) if (map[i] != -1) _betaGiven[map[i]] = v.at(i); } if ((v = beta_constraints.vec("upper_bounds")) != null) { _betaUB = MemoryManager.malloc8d(_dinfo.fullN() + (_dinfo._intercept ? 1 : 0)); Arrays.fill(_betaUB, Double.POSITIVE_INFINITY); for (int i = 0; i < (int) v.length(); ++i) if (map[i] != -1) _betaUB[map[i]] = v.at(i); } if ((v = beta_constraints.vec("lower_bounds")) != null) { _betaLB = MemoryManager.malloc8d(_dinfo.fullN() + (_dinfo._intercept ? 1 : 0)); Arrays.fill(_betaLB, Double.NEGATIVE_INFINITY); for (int i = 0; i < (int) v.length(); ++i) if (map[i] != -1) _betaLB[map[i]] = v.at(i); } if ((v = beta_constraints.vec("rho")) != null) { _rho = MemoryManager.malloc8d(_dinfo.fullN() + (_dinfo._intercept ? 1 : 0)); for (int i = 0; i < (int) v.length(); ++i) if (map[i] != -1) _rho[map[i]] = v.at(i); } // mean override (for data standardization) if ((v = beta_constraints.vec("mean")) != null) { _parms._stdOverride = true; for (int i = 0; i < v.length(); ++i) { if (!v.isNA(i) && map[i] != -1) { int idx = map == null ? i : map[i]; if (idx >= _dinfo.numStart() && idx < _dinfo.fullN()) { _dinfo._normSub[idx - _dinfo.numStart()] = v.at(i); } else { // categorical or Intercept, will be ignored } } } } // standard deviation override (for data standardization) if ((v = beta_constraints.vec("std_dev")) != null) { _parms._stdOverride = true; for (int i = 0; i < v.length(); ++i) { if (!v.isNA(i) && map[i] != -1) { int idx = map == null ? i : map[i]; if (idx >= _dinfo.numStart() && idx < _dinfo.fullN()) { _dinfo._normMul[idx - _dinfo.numStart()] = 1.0 / v.at(i); } else { // categorical or Intercept, will be ignored } } } } if (_dinfo._normMul != null) { double normG = 0, normS = 0, normLB = 0, normUB = 0; for (int i = numoff; i < _dinfo.fullN(); ++i) { double s = _dinfo._normSub[i - numoff]; double d = 1.0 / _dinfo._normMul[i - numoff]; if (_betaUB != null && !Double.isInfinite(_betaUB[i])) { normUB *= s; _betaUB[i] *= d; } if (_betaLB != null && !Double.isInfinite(_betaLB[i])) { normLB *= s; _betaLB[i] *= d; } if (_betaGiven != null) { normG += _betaGiven[i] * s; _betaGiven[i] *= d; } if (_betaStart != null) { normS += _betaStart[i] * s; _betaStart[i] *= d; } } if (_dinfo._intercept) { int n = _dinfo.fullN(); if (_betaGiven != null) _betaGiven[n] += normG; if (_betaStart != null) _betaStart[n] += normS; if (_betaLB != null) _betaLB[n] += normLB; if (_betaUB != null) _betaUB[n] += normUB; } } if (_betaStart == null && _betaGiven != null) _betaStart = _betaGiven.clone(); if (_betaStart != null) { if (_betaLB != null || _betaUB != null) { for (int i = 0; i < _betaStart.length; ++i) { if (_betaLB != null && _betaLB[i] > _betaStart[i]) _betaStart[i] = _betaLB[i]; if (_betaUB != null && _betaUB[i] < _betaStart[i]) _betaStart[i] = _betaUB[i]; } } } if (_parms._non_negative) { if (gam.equals(_parms._glmType)) setNonNegative(beta_constraints); else setNonNegative(); } check(); } public String toString() { double[][] ary = new double[_betaGiven.length][3]; for (int i = 0; i < _betaGiven.length; ++i) { ary[i][0] = _betaGiven[i]; ary[i][1] = _betaLB[i]; ary[i][2] = _betaUB[i]; } return ArrayUtils.pprint(ary); } public boolean hasBounds() { if (_betaLB != null) for (double d : _betaLB) if (!Double.isInfinite(d)) return true; if (_betaUB != null) for (double d : _betaUB) if (!Double.isInfinite(d)) return true; return false; } public boolean hasProximalPenalty() { return _betaGiven != null && _rho != null && ArrayUtils.countNonzeros(_rho) > 0; } public void adjustGradient(double[] beta, double[] grad) { if (_betaGiven != null && _rho != null) { for (int i = 0; i < _betaGiven.length; ++i) { double diff = beta[i] - _betaGiven[i]; grad[i] += _rho[i] * diff; } } } double proxPen(double[] beta) { double res = 0; if (_betaGiven != null && _rho != null) { for (int i = 0; i < _betaGiven.length; ++i) { double diff = beta[i] - _betaGiven[i]; res += _rho[i] * diff * diff; } res *= .5; } return res; } public void check() { if (_betaLB != null && _betaUB != null) for (int i = 0; i < _betaLB.length; ++i) if (!(_betaLB[i] <= _betaUB[i])) throw new IllegalArgumentException("lower bounds must be <= upper bounds, " + _betaLB[i] + " !<= " + _betaUB[i]); } public BetaConstraint filterExpandedColumns(int[] activeCols) { BetaConstraint res = new BetaConstraint(); if (_betaLB != null) res._betaLB = ArrayUtils.select(_betaLB, activeCols); if (_betaUB != null) res._betaUB = ArrayUtils.select(_betaUB, activeCols); if (_betaGiven != null) res._betaGiven = ArrayUtils.select(_betaGiven, activeCols); if (_rho != null) res._rho = ArrayUtils.select(_rho, activeCols); if (_betaStart != null) res._betaStart = ArrayUtils.select(_betaStart, activeCols); return res; } } public static class PlugValuesImputer implements DataInfo.Imputer { // make public to allow access to other algos private final Frame _plug_vals; public PlugValuesImputer(Frame plugValues) { _plug_vals = plugValues; } @Override public int imputeCat(String name, Vec v, boolean useAllFactorLevels) { String[] domain = v.domain(); Vec pvec = pvec(name); String value; if (pvec.isCategorical()) { value = pvec.domain()[(int) pvec.at(0)]; } else if (pvec.isString()) { value = pvec.stringAt(0); } else { throw new IllegalStateException("Plug value for a categorical column `" + name + "` cannot by of type " + pvec.get_type_str() + "!"); } int valueIndex = ArrayUtils.find(domain, value); if (valueIndex < 0) { throw new IllegalStateException("Plug value `" + value + "` of column `" + name + "` is not a member of the column's domain!"); } return valueIndex; } @Override public double imputeNum(String name, Vec v) { Vec pvec = pvec(name); if (v.isNumeric() || v.isTime()) { return pvec.at(0); } else { throw new IllegalStateException("Plug value for a column `" + name + "` of type " + v.get_type_str() + " cannot by of type " + pvec.get_type_str() + "!"); } } @Override public double[] imputeInteraction(String name, InteractionWrappedVec iv, double[] means) { if (iv.isNumericInteraction()) { return new double[]{imputeNum(name, iv)}; } assert iv.v1Domain() == null || iv.v2Domain() == null; // case when both vecs are categorical is handled by imputeCat String[] domain = iv.v1Domain() != null ? iv.v1Domain() : iv.v2Domain(); double[] vals = new double[domain.length]; for (int i = 0; i < domain.length; i++) { vals[i] = pvec(name + "." + domain[i]).at(0); } return vals; } private Vec pvec(String name) { Vec pvec = _plug_vals.vec(name); if (pvec == null) { throw new IllegalStateException("Plug value for column `" + name + "` is not defined!"); } return pvec; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMMetricBuilder.java
package hex.glm; import hex.*; import hex.ModelMetrics.MetricBuilder; import hex.ModelMetricsBinomial.MetricBuilderBinomial; import hex.ModelMetricsBinomialGLM.ModelMetricsMultinomialGLM; import hex.ModelMetricsBinomialGLM.ModelMetricsOrdinalGLM; import hex.ModelMetricsMultinomial.MetricBuilderMultinomial; import hex.ModelMetricsOrdinal.MetricBuilderOrdinal; import hex.ModelMetricsRegression.MetricBuilderRegression; import hex.ModelMetricsSupervised.MetricBuilderSupervised; import hex.glm.GLMModel.GLMParameters.Family; import hex.glm.GLMModel.GLMWeightsFun; import water.H2O; import water.fvec.Frame; import water.fvec.Vec; import water.util.ArrayUtils; import water.util.MathUtils; import java.util.Arrays; /** * Class for GLMValidation. * * @author tomasnykodym * */ public class GLMMetricBuilder extends MetricBuilderSupervised<GLMMetricBuilder> { double residual_deviance; double null_devince; long _nobs; double _log_likelihood; double _aic; private double _aic2;// internal AIC used only for poisson family! final GLMModel.GLMWeightsFun _glmf; final private int _rank; MetricBuilder _metricBuilder; final boolean _intercept; private final double [] _ymu; final boolean _computeMetrics; private final boolean _familyAllowsFinalLikelihoodCalculation; public GLMMetricBuilder(String[] domain, double [] ymu, GLMWeightsFun glmf, int rank, boolean computeMetrics, boolean intercept, MultinomialAucType aucType){ super(domain == null?0:domain.length, domain); _glmf = glmf; _rank = rank; _computeMetrics = computeMetrics; _intercept = intercept; _ymu = ymu; _familyAllowsFinalLikelihoodCalculation = _glmf == null ? false : Arrays.asList(Family.multinomial, Family.gaussian, Family.binomial, Family.quasibinomial, Family.fractionalbinomial, Family.poisson, Family.negativebinomial, Family.gamma, Family.tweedie) .contains(_glmf._family); if (_computeMetrics) { switch (_glmf._family) { case binomial: case quasibinomial: case fractionalbinomial: _metricBuilder = new MetricBuilderBinomial(domain); break; case multinomial: _metricBuilder = new MetricBuilderMultinomial(domain.length, domain, aucType); ((MetricBuilderMultinomial) _metricBuilder)._priorDistribution = ymu; break; case ordinal: _metricBuilder = new MetricBuilderOrdinal(domain.length, domain); ((MetricBuilderOrdinal) _metricBuilder)._priorDistribution = ymu; break; default: _metricBuilder = new MetricBuilderRegression(); break; } } } public double explainedDev(){ throw H2O.unimpl(); } @Override public double[] perRow(double ds[], float[] yact, Model m) { return perRow(ds, yact, 1, 0, m); } @Override public double[] perRow(double ds[], float[] yact, double weight, double offset, Model m) { if(weight == 0)return ds; _metricBuilder.perRow(ds,yact,weight,offset,m); GLMModel gm = (GLMModel) m; if ((gm._finalScoring && gm._parms._calc_like && _familyAllowsFinalLikelihoodCalculation) /*final scoring, _calc_like flag is on*/ || (!gm._finalScoring && _glmf._family.equals(Family.negativebinomial)) /*model build*/) { _log_likelihood += m.likelihood(weight, yact[0], ds); } if(!ArrayUtils.hasNaNsOrInfs(ds) && !ArrayUtils.hasNaNsOrInfs(yact)) { if(_glmf._family == Family.multinomial || _glmf._family == Family.ordinal) add2(yact[0], ds, weight, offset); else if (_glmf._family == Family.binomial || _glmf._family == Family.quasibinomial || _glmf._family.equals(Family.fractionalbinomial)) add2(yact[0], ds[2], weight, offset); else add2(yact[0], ds[0], weight, offset); } return ds; } // public GLMValidation(Key dataKey, double ymu, GLMParameters glm, int rank){ // _rank = rank; // _ymu = ymu; // _glm = glm; // _auc_bldr = (glm._family == Family.binomial) ? new AUC2.AUCBuilder(AUC2.NBINS) : null; // this.dataKey = dataKey; // } // @Override public double[] perRow(double ds[], float[] yact, Model m, double[] mean) { // super.perRow(ds, yact, m, mean); // return ds; // Flow coding // } transient double [] _ds = new double[3]; transient float [] _yact = new float[1]; public void add(double yreal, double [] ymodel, double weight, double offset) { if(weight == 0)return; _yact[0] = (float) yreal; if(_computeMetrics) _metricBuilder.perRow(ymodel, _yact, weight, offset, null); add2(yreal, ymodel, weight, offset ); } public void add(double yreal, double ymodel, double weight, double offset) { if(weight == 0)return; _yact[0] = (float) yreal; if(_glmf._family == Family.binomial || _glmf._family == Family.quasibinomial) { _ds[1] = 1 - ymodel; _ds[2] = ymodel; } else { _ds[0] = ymodel; } if(_computeMetrics) { assert (!(_metricBuilder instanceof MetricBuilderMultinomial) && !(_metricBuilder instanceof MetricBuilderOrdinal)):"using incorrect add call for multinomial/ordinal"; _metricBuilder.perRow(_ds, _yact, weight, offset, null); } add2(yreal, ymodel, weight, offset ); } private void add2(double yreal, double ymodel [] , double weight, double offset) { _wcount += weight; ++_nobs; int c = (int)yreal; residual_deviance -= 2 * weight * Math.log(ymodel[c+1]); null_devince -= 2 * weight * Math.log(_ymu[c]); } private void add2(double yreal, double ymodel, double weight, double offset) { _wcount += weight; ++_nobs; residual_deviance += weight * _glmf.deviance(yreal, ymodel); if(offset == 0) null_devince += weight * _glmf.deviance(yreal, _ymu[0]); else null_devince += weight * _glmf.deviance(yreal, _glmf.linkInv(offset +_glmf.link(_ymu[0]))); if (_glmf._family == Family.poisson) { // AIC for poisson long y = Math.round(yreal); double logfactorial = MathUtils.logFactorial(y); _aic2 += weight * (yreal * Math.log(ymodel) - logfactorial - ymodel); } } public void reduce(GLMMetricBuilder v){ if(_computeMetrics) _metricBuilder.reduce(v._metricBuilder); residual_deviance += v.residual_deviance; null_devince += v.null_devince; _log_likelihood += v._log_likelihood; _nobs += v._nobs; _aic2 += v._aic2; _wcount += v._wcount; } public final double residualDeviance() { return residual_deviance;} public final long nullDOF() { return _nobs - (_intercept?1:0);} public final long resDOF() { if (_glmf._family == Family.ordinal) // rank counts all non-zero multinomial coeffs: nclasses-1 sets of non-zero coeffss return _nobs-(_rank/(_nclasses-1)+_nclasses-2); // rank/nclasses-1 represent one beta plus one intercept. Need nclasses-2 more intercepts. else return _nobs - _rank; } protected void computeAIC(GLMModel gm) { if (gm._parms._calc_like && gm._finalScoring) { // uses likelihood which is calculated for the final scoring _aic = 2 * _log_likelihood + 2 * Arrays.stream(gm.beta()).filter(b -> b != 0).count(); } else { // original calculation for the model build _aic = 0; switch (_glmf._family) { case gaussian: _aic = _nobs * (Math.log(residual_deviance / _nobs * 2 * Math.PI) + 1) + 2; break; case quasibinomial: case binomial: case fractionalbinomial: _aic = residual_deviance; break; case poisson: _aic = -2 * _aic2; break; // AIC is set during the validation task case gamma: _aic = Double.NaN; break; case ordinal: case tweedie: case multinomial: _aic = Double.NaN; break; case negativebinomial: _aic = 2 * _log_likelihood; break; default: assert false : "missing implementation for family " + _glmf._family; } _aic += 2 * _rank; } } @Override public ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) { GLMModel gm = (GLMModel) m; computeAIC(gm); ModelMetrics metrics = _metricBuilder.makeModelMetrics(gm, f, null, null); if (_glmf._family == Family.binomial || _glmf._family == Family.quasibinomial || _glmf._family == Family.fractionalbinomial) { ModelMetricsBinomial metricsBinommial = (ModelMetricsBinomial) metrics; GainsLift gl = null; if (preds != null) { Vec resp = f.vec(m._parms._response_column); Vec weights = f.vec(m._parms._weights_column); if (resp != null && Family.fractionalbinomial != _glmf._family) { // don't calculate for frac binomial gl = new GainsLift(preds.lastVec(), resp, weights); gl._groups = m._parms._gainslift_bins; gl.exec(m._output._job); } } metrics = new ModelMetricsBinomialGLM(m, f, metrics._nobs, metrics._MSE, _domain, metricsBinommial._sigma, metricsBinommial._auc, metricsBinommial._logloss, residualDeviance(), null_devince, _aic, nullDOF(), resDOF(), gl, _customMetric, _log_likelihood); } else if (_glmf._family == Family.multinomial) { ModelMetricsMultinomial metricsMultinomial = (ModelMetricsMultinomial) metrics; metrics = new ModelMetricsMultinomialGLM(m, f, metricsMultinomial._nobs, metricsMultinomial._MSE, metricsMultinomial._domain, metricsMultinomial._sigma, metricsMultinomial._cm, metricsMultinomial._hit_ratios, metricsMultinomial._logloss, residualDeviance(), null_devince, _aic, nullDOF(), resDOF(), metricsMultinomial._auc, _customMetric, _log_likelihood); } else if (_glmf._family == Family.ordinal) { // ordinal should have a different resDOF() ModelMetricsOrdinal metricsOrdinal = (ModelMetricsOrdinal) metrics; metrics = new ModelMetricsOrdinalGLM(m, f, metricsOrdinal._nobs, metricsOrdinal._MSE, metricsOrdinal._domain, metricsOrdinal._sigma, metricsOrdinal._cm, metricsOrdinal._hit_ratios, metricsOrdinal._logloss, residualDeviance(), null_devince, _aic, nullDOF(), resDOF(), _customMetric, _log_likelihood); } else { ModelMetricsRegression metricsRegression = (ModelMetricsRegression) metrics; metrics = new ModelMetricsRegressionGLM(m, f, metricsRegression._nobs, metricsRegression._MSE, metricsRegression._sigma, metricsRegression._mean_absolute_error, metricsRegression._root_mean_squared_log_error, residualDeviance(), residualDeviance() / _wcount, null_devince, _aic, nullDOF(), resDOF(), _customMetric, _log_likelihood); } return gm.addModelMetrics(metrics); // Update the metrics in-place with the GLM version, do DKV.put } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMModel.java
package hex.glm; import hex.*; import hex.DataInfo.TransformType; import hex.api.MakeGLMModelHandler; import hex.deeplearning.DeepLearningModel; import hex.genmodel.utils.DistributionFamily; import hex.glm.GLMModel.GLMParameters.Family; import hex.glm.GLMModel.GLMParameters.Link; import hex.util.EffectiveParametersUtils; import org.apache.commons.math3.distribution.NormalDistribution; import org.apache.commons.math3.distribution.RealDistribution; import org.apache.commons.math3.distribution.TDistribution; import org.apache.commons.math3.special.Gamma; import water.*; import water.codegen.CodeGenerator; import water.codegen.CodeGeneratorPipeline; import water.exceptions.JCodeSB; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import water.udf.CFuncRef; import water.util.*; import java.io.Serializable; import java.util.*; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; import static hex.DistributionFactory.LogExpUtil.log; import static hex.genmodel.utils.ArrayUtils.flat; import static hex.glm.ComputationState.expandToFullArray; import static hex.glm.GLMModel.GLMOutput.calculatePValuesFromZValues; import static hex.glm.GLMModel.GLMOutput.calculateStdErrFromZValues; import static hex.glm.GLMUtils.genGLMParameters; import static hex.modelselection.ModelSelectionUtils.extractPredictorNames; import static hex.schemas.GLMModelV3.GLMModelOutputV3.calculateVarimpMultinomial; import static hex.schemas.GLMModelV3.calculateVarimpBase; import static hex.util.DistributionUtils.distributionToFamily; import static hex.util.DistributionUtils.familyToDistribution; import static java.util.stream.Collectors.toMap; /** * Created by tomasnykodym on 8/27/14. */ public class GLMModel extends Model<GLMModel,GLMModel.GLMParameters,GLMModel.GLMOutput> implements Model.Contributions{ final static public double _EPS = 1e-6; final static public double _OneOEPS = 1e6; public GLMModel(Key selfKey, GLMParameters parms, GLM job, double [] ymu, double ySigma, double lambda_max, long nobs) { super(selfKey, parms, job == null?new GLMOutput():new GLMOutput(job)); _ymu = ymu; _ySigma = ySigma; _lambda_max = lambda_max; _nobs = nobs; _nullDOF = nobs - (parms._intercept?1:0); } public Frame getRIDFrame() { if (_output._regression_influence_diagnostics != null) return DKV.getGet(_output._regression_influence_diagnostics); else return null; } @Override public void initActualParamValues() { super.initActualParamValues(); EffectiveParametersUtils.initFoldAssignment(_parms); } public ScoreKeeper[] scoreKeepers() { int size = scoringInfo==null?0:scoringInfo.length; ScoreKeeper[] sk = new ScoreKeeper[size]; for (int i=0;i<size;++i) { if (scoringInfo[i].cross_validation) // preference is to use xval first, then valid and last train. sk[i] = scoringInfo[i].scored_xval; else if (scoringInfo[i].validation) sk[i] = scoringInfo[i].scored_valid; else sk[i] = scoringInfo[i].scored_train; } return sk; } public ScoringInfo[] getScoringInfo() { return scoringInfo;} public void addScoringInfo(GLMParameters parms, int nclasses, long currTime, int iter) { if (scoringInfo != null && (((GLMScoringInfo) scoringInfo[scoringInfo.length-1]).iterations() >= iter)) { // no duplication return; } GLMScoringInfo currInfo = new GLMScoringInfo(); currInfo.is_classification = nclasses > 1; currInfo.validation = parms.valid() != null; currInfo.cross_validation = parms._nfolds > 1; currInfo.iterations = iter; currInfo.time_stamp_ms = scoringInfo==null?_output._start_time:currTime; currInfo.total_training_time_ms = _output._training_time_ms; if (_output._training_metrics != null) { currInfo.scored_train = new ScoreKeeper(Double.NaN); currInfo.scored_train.fillFrom(_output._training_metrics); } if (_output._validation_metrics != null) { currInfo.scored_valid = new ScoreKeeper(Double.NaN); currInfo.scored_valid.fillFrom(_output._validation_metrics); } scoringInfo = ScoringInfo.prependScoringInfo(currInfo, scoringInfo); } public void setVcov(double[][] inv) {_output._vcov = inv;} /*** * This method will calculate the variable inflation factor of each numerical predictor using the following * procedure: * 1. For each numerical predictor, choose it as the response variable and leave all the other predictors as the * predictors; * 2. Build a GLM model and obtained the R2 of the model; * 3. the variable inflation factor for that predictor is calculated as 1.0/(1.0-R2). * * All the variable inflation factors for all valid predictors are saved in a double array. No variable inflation * factors are generated for non-numerical predictors. * */ public String[] buildVariableInflationFactors(Frame train, DataInfo dinfo) { String[] predictorNames = extractPredictorNames(_parms, dinfo, _parms._fold_column); // only calculate VIF for numerical columns String[] vifPredictors = getVifPredictors(train, _parms, dinfo); return buildVariableInflationFactors(_parms, vifPredictors, predictorNames); } static String[] getVifPredictors(Frame train, GLMParameters parms, DataInfo dinfo) { String[] predictorNames = extractPredictorNames(parms, dinfo, parms._fold_column); return Stream.of(predictorNames) .filter(x -> train.find(x) >= 0 && train.vec(x).isNumeric()) .toArray(String[]::new); } public String[] buildVariableInflationFactors(GLMParameters parms, String[] validPredictors, String[] predictorNames) { // set variable inflation factors to NaN to start with _output._variable_inflation_factors = IntStream.range(0, validPredictors.length).mapToDouble(x -> Double.NaN).boxed(). collect(Collectors.toList()).stream().mapToDouble(Double::doubleValue).toArray(); GLMParameters[] allParams = genGLMParameters(parms, validPredictors, predictorNames); GLM[] glmBuilder = Stream.of(allParams).map(x -> new GLM(x)).collect(Collectors.toList()).stream().toArray(GLM[]::new); int parallelization = nVIFModelsInParallel(parms); GLM[] glmResults = ModelBuilderHelper.trainModelsParallel(glmBuilder, parallelization); Double[] r2 = Arrays.stream(glmResults).mapToDouble(x ->x.get().r2()).boxed().collect(Collectors.toList()).stream() .toArray(Double[]::new); for (GLM glm : glmResults) glm.get().remove(); _output._variable_inflation_factors = IntStream.range(0, validPredictors.length) .mapToDouble(x -> 1.0 / (1.0 - r2[x])).boxed().collect(Collectors.toList()).stream() .mapToDouble(Double::doubleValue).toArray(); return validPredictors; } static int nVIFModelsInParallel(GLMParameters parms) { if (parms._is_cv_model) { // CV is already parallelized return 1; } String userSpec = H2O.getSysProperty("glm.vif." + parms._train + ".nparallelism", null); if (userSpec != null) { try { final int vifParallelization = Integer.parseInt(userSpec); if (vifParallelization <= 0 || vifParallelization > H2O.ARGS.nthreads) { Log.warn("Ignoring user-specified parallelization level for VIF calculation. " + "Value '" + userSpec + "' is out of range (0, nthreads]."); } return vifParallelization; } catch (Exception e) { Log.err("Invalid user-specified parallelization level. Cannot parse value '" + userSpec + "' as a number.", e); } } Frame train = parms.train(); if (train != null && train.byteSize() < 1e6) { return H2O.ARGS.nthreads; // VIF is relatively lightweight, on small data run all concurrently } return 2; // same strategy as in CV of "big data" - run 2 models concurrently } class GLMContributionsWithBackground extends ContributionsWithBackgroundFrameTask<GLMContributionsWithBackground> { double[] _betas; DataInfo _dinfo; boolean _compact; boolean _outputSpace; GLMContributionsWithBackground(DataInfo dinfo, double[] betas, Key<Frame> frameKey, Key<Frame> backgroundFrameKey, boolean perReference, boolean compact, boolean outputSpace) { super(frameKey, backgroundFrameKey, perReference); _dinfo = dinfo; _betas = betas; _compact = compact; _outputSpace = outputSpace; } @Override public void map(Chunk[] cs, Chunk[] bgCs, NewChunk[] ncs) { DataInfo.Row row = _dinfo.newDenseRow(); DataInfo.Row bgRow = _dinfo.newDenseRow(); int idx; double[] result = MemoryManager.malloc8d(ncs.length - 1); // used only when output_format == compact double transformationRatio = 1; // used for transforming to output space for (int j = 0; j < cs[0]._len; j++) { _dinfo.extractDenseRow(cs, j, row); for (int k = 0; k < bgCs[0]._len; k++) { _dinfo.extractDenseRow(bgCs, k, bgRow); Arrays.fill(result, 0); double biasTerm = (_dinfo._intercept ? _betas[_betas.length - 1] : 0); for (int i = 0; i < _betas.length - (_dinfo._intercept ? 1 : 0); i++) { idx = _compact ? _dinfo.coefOriginalColumnIndices()[i] : i; result[idx] += _betas[i] * (row.get(i) - bgRow.get(i)); biasTerm += _betas[i] * bgRow.get(i); } if (_outputSpace) { final double linkSpaceX = Arrays.stream(result).sum()+biasTerm; final double linkSpaceBg = biasTerm; final double outSpaceX = _parms.linkInv(linkSpaceX); final double outSpaceBg = _parms.linkInv(linkSpaceBg); transformationRatio = Math.abs(linkSpaceX - linkSpaceBg) < 1e-6 ? 0 : (outSpaceX - outSpaceBg) / (linkSpaceX - linkSpaceBg); biasTerm = outSpaceBg; } for (int i = 0; i < result.length; i++) { ncs[i].addNum(result[i] * transformationRatio); } ncs[ncs.length - 1].addNum(biasTerm); } } } } @Override public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options, Frame backgroundFrame) { assert GLMParameters.GLMType.glm.equals(_parms._glmType); if (ContributionsOutputFormat.Compact.equals(options._outputFormat)) { if (_parms._interactions != null && _parms._interactions.length > 0) { throw H2O.unimpl("SHAP for GLM with interactions is not supported"); } } if (null == backgroundFrame) throw H2O.unimpl("GLM supports contribution calculation only with a background frame."); Log.info("Starting contributions calculation for " + this._key + "..."); try (Scope.Safe s = Scope.safe(frame, backgroundFrame)) { Frame adaptedBgFrame = adaptFrameForScore(backgroundFrame, false); Frame adaptedFrame = adaptFrameForScore(frame, false); DKV.put(adaptedBgFrame); DKV.put(adaptedFrame); DataInfo dinfo = _output._dinfo.clone(); dinfo._adaptedFrame = adaptedFrame; GLMContributionsWithBackground contributions = new GLMContributionsWithBackground(dinfo, _parms._standardize ? _output.getNormBeta() : _output.beta(), adaptedFrame._key, adaptedBgFrame._key, options._outputPerReference, ContributionsOutputFormat.Compact.equals(options._outputFormat), options._outputSpace); String[] colNames = new String[ContributionsOutputFormat.Compact.equals(options._outputFormat) ? dinfo.coefOriginalNames().length + 1 // +1 for bias term : beta().length + (_output._dinfo._intercept ? 0 : 1)]; System.arraycopy(ContributionsOutputFormat.Compact.equals(options._outputFormat) ? Arrays.stream(dinfo.coefOriginalNames()).map(name -> { if (!dinfo._useAllFactorLevels && name.contains(".") && frame.find(name) == -1) { // We can have binary vars encoded as single variable + some contribution in intercept; we need to convert // the name of the encoded variable to the original variable name (e.g., sex.female -> sex) for (int i = 0; i < name.length(); i++) { name = name.substring(0, name.lastIndexOf(".")); if (frame.find(name) >= 0) return name; } } return name; }).toArray(String[]::new) : _output._coefficient_names, 0, colNames, 0, colNames.length - 1); colNames[colNames.length - 1] = "BiasTerm"; return Scope.untrack(contributions.runAndGetOutput(j, destination_key, colNames)); } finally { Log.info("Finished contributions calculation for " + this._key + "..."); } } public static class RegularizationPath extends Iced { public double [] _lambdas; public double[] _alphas; public double [] _explained_deviance_train; public double [] _explained_deviance_valid; public double [][] _coefficients; public double [][] _coefficients_std; public String [] _coefficient_names; public double [][] _z_values; public double [][] _p_values; public double [][] _std_errs; } // go through all submodels, copy lambda, alpha, coefficient values and deviance value\s public RegularizationPath getRegularizationPath() { // will be invoked even without lambda_search=true RegularizationPath rp = new RegularizationPath(); rp._coefficient_names = _output._coefficient_names; int N = _output._submodels.length; int P = _output._dinfo.fullN() + 1; if(_parms._family == Family.multinomial || _parms._family == Family.ordinal){ String [] classNames = _output._domains[_output._domains.length-1]; String [] coefNames = new String[P*_output.nclasses()]; for(int c = 0; c < _output.nclasses(); ++c){ for(int i = 0; i < P; ++i) coefNames[c*P+i] = _output._coefficient_names[i] + "_" + classNames[c]; } rp._coefficient_names = coefNames; P*=_output.nclasses(); } rp._lambdas = new double[N]; rp._alphas = new double[N]; rp._coefficients = new double[N][]; rp._explained_deviance_train = new double[N]; if (_parms._valid != null) rp._explained_deviance_valid = new double[N]; if (_parms._standardize) rp._coefficients_std = new double[N][]; if (_parms._compute_p_values) { rp._z_values = new double[N][]; rp._p_values = new double[N][]; rp._std_errs = new double[N][]; } for (int i = 0; i < N; ++i) { Submodel sm = _output._submodels[i]; rp._lambdas[i] = sm.lambda_value; rp._alphas[i] = sm.alpha_value; rp._coefficients[i] = sm.getBeta(MemoryManager.malloc8d(P)); if (_parms._standardize) { rp._coefficients_std[i] = rp._coefficients[i]; rp._coefficients[i] = _output._dinfo.denormalizeBeta(rp._coefficients_std[i]); } if (_parms._compute_p_values) { // need to expand vectors to be of size numcols rp._z_values[i] = sm.getZValues(MemoryManager.malloc8d(P)); rp._p_values[i] = sm.pValues(rp._z_values[i], _output._training_metrics.residual_degrees_of_freedom()); rp._std_errs[i] = sm.stdErr(rp._z_values[i], rp._coefficients[i]); } rp._explained_deviance_train[i] = 1 - (_output._training_metrics._nobs*sm.devianceTrain)/((GLMMetrics)_output._training_metrics).null_deviance(); if (rp._explained_deviance_valid != null) rp._explained_deviance_valid[i] = 1 - _output._validation_metrics._nobs*sm.devianceValid /((GLMMetrics)_output._validation_metrics).null_deviance(); } return rp; } @Override protected boolean toJavaCheckTooBig() { if(beta() != null && beta().length > 10000) { Log.warn("toJavaCheckTooBig must be overridden for this model type to render it in the browser"); return true; } return false; } public DataInfo dinfo() { return _output._dinfo; } private int rank(double [] ds) { int res = 0; for(double d:ds) if(d != 0) ++res; return res; } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { if(domain == null && (_parms._family == Family.binomial || _parms._family == Family.quasibinomial || _parms._family == Family.fractionalbinomial)) domain = binomialClassNames; return new GLMMetricBuilder(domain, _ymu, new GLMWeightsFun(_parms), _output.bestSubmodel().rank(), true, _parms._intercept, _parms._auc_type); } protected double [] beta_internal(){ if(_parms._family == Family.multinomial || _parms._family == Family.ordinal) return flat(_output._global_beta_multinomial); return _output._global_beta; } public double [] beta() { return beta_internal();} public double [] beta(double lambda) { for(int i = 0 ; i < _output._submodels.length; ++i) if(_output._submodels[i].lambda_value == lambda) return _output._dinfo.denormalizeBeta(_output._submodels[i].getBeta(MemoryManager.malloc8d(_output._dinfo.fullN()+1))); throw new RuntimeException("no such lambda value, lambda = " + lambda); } public double [] beta_std(double lambda) { for(int i = 0 ; i < _output._submodels.length; ++i) if(_output._submodels[i].lambda_value == lambda) return _output._submodels[i].getBeta(MemoryManager.malloc8d(_output._dinfo.fullN()+1)); throw new RuntimeException("no such lambda value, lambda = " + lambda); } public String [] names(){ return _output._names;} @Override public double deviance(double w, double y, double f) { if (w == 0) { return 0; } else { return w*_parms.deviance(y,f); } } @Override public double likelihood(double w, double y, double[] f) { if (w == 0) { return 0; } else if (_finalScoring){ // time-consuming calculation for the final scoring return _parms.likelihood(w, y, f); } else { // optimized calculation for model build return w*(_parms.likelihood(y, f[0])); } } public GLMModel addSubmodel(int idx, Submodel sm) { // copy from checkpoint model if (_output._submodels != null && _output._submodels.length > idx) { _output._submodels[idx] = sm; } else { assert _output._submodels == null || idx == _output._submodels.length; _output._submodels = ArrayUtils.append(_output._submodels, sm); } _output.setSubmodelIdx(idx, _parms); return this; } public GLMModel updateSubmodel(int idx, Submodel sm) { assert sm.lambda_value == _output._submodels[idx].lambda_value && sm.alpha_value == _output._submodels[idx].alpha_value; _output._submodels[idx] = sm; return this; } public void update(double [] beta, double devianceTrain, double devianceTest,int iter){ int id = _output._submodels.length-1; _output._submodels[id] = new Submodel(_output._submodels[id].lambda_value,_output._submodels[id].alpha_value,beta, iter, devianceTrain, devianceTest, _output._totalBetaLength, _output._submodels[id].zValues, _output._submodels[id].dispersionEstimated); _output.setSubmodelIdx(id, _parms); } public void update(double [] beta, double[] ubeta, double devianceTrain, double devianceTest,int iter){ int id = _output._submodels.length-1; Submodel sm = new Submodel(_output._submodels[id].lambda_value,_output._submodels[id].alpha_value,beta,iter, devianceTrain, devianceTest, _output._totalBetaLength, _output._submodels[id].zValues, _output._submodels[id].dispersionEstimated); _output._submodels[id] = sm; _output.setSubmodelIdx(id, _parms); } protected GLMModel deepClone(Key<GLMModel> result) { GLMModel newModel = IcedUtils.deepCopy(this); newModel._key = result; // Do not clone model metrics newModel._output.clearModelMetrics(false); newModel._output._training_metrics = null; newModel._output._validation_metrics = null; return newModel; } public static class GLMParameters extends Model.Parameters { static final String[] CHECKPOINT_NON_MODIFIABLE_FIELDS = {"_response_column", "_family", "_solver"}; final static double LOG2PI = Math.log(2 * Math.PI); public enum MissingValuesHandling { MeanImputation, PlugValues, Skip } public enum Constraints {EqualTo, LessThanEqualTo}; public String algoName() { return "GLM"; } public String fullName() { return "Generalized Linear Modeling"; } public String javaName() { return GLMModel.class.getName(); } @Override public long progressUnits() { return GLM.WORK_TOTAL; } public boolean _standardize = true; public boolean _useDispersion1 = false; // internal use only, not for users public Family _family; public Link _link; public Solver _solver = Solver.AUTO; public double _tweedie_variance_power; public double _tweedie_link_power; public double _dispersion_estimated; public double _theta; // 1/k and is used by negative binomial distribution only public double _invTheta; public double [] _alpha; public double [] _lambda; public double[] _startval; // initialize GLM coefficients public boolean _calc_like; public int[] _random_columns; public int _score_iteration_interval = -1; // Has to be Serializable for backwards compatibility (used to be DeepLearningModel.MissingValuesHandling) public Serializable _missing_values_handling = MissingValuesHandling.MeanImputation; public double _prior = -1; public boolean _lambda_search = false; public boolean _cold_start = false; // start GLM model from scratch if true public int _nlambdas = -1; public boolean _non_negative = false; public double _lambda_min_ratio = -1; // special public boolean _use_all_factor_levels = false; public int _max_iterations = -1; public boolean _intercept = true; public double _beta_epsilon = 1e-4; public double _dispersion_epsilon = 1e-4; // 1e-4 for gamma, 1e-4 for tweedie public int _max_iterations_dispersion = 3000; public double _objective_epsilon = -1; // -1 to set to default public double _gradient_epsilon = -1; // -1 to set to default public double _obj_reg = -1; public boolean _compute_p_values = false; public boolean _remove_collinear_columns = false; public String[] _interactions=null; public StringPair[] _interaction_pairs=null; public boolean _early_stopping = true; public Key<Frame> _beta_constraints = null; public Key<Frame> _linear_constraints = null; public boolean _expose_constraints = false; // internal parameter for testing only. public Key<Frame> _plug_values = null; // internal parameter, handle with care. GLM will stop when there is more than this number of active predictors (after strong rule screening) public int _max_active_predictors = -1; public boolean _stdOverride; // standardization override by beta constraints final static NormalDistribution _dprobit = new NormalDistribution(0,1); // get the normal distribution public GLMType _glmType = GLMType.glm; public boolean _generate_scoring_history = false; // if true, will generate scoring history but will slow algo down public DispersionMethod _dispersion_parameter_method = DispersionMethod.pearson; public double _init_dispersion_parameter = 1.0; public boolean _fix_dispersion_parameter = false; public boolean _build_null_model = false; public boolean _generate_variable_inflation_factors = false; // if enabled, will generate variable_inflation_factors for numeric predictors public double _tweedie_epsilon = 8e-17; // paper suggests 8e-17 public boolean _fix_tweedie_variance_power = true; public int _max_series_index = 5000; public boolean _debugTDispersionOnly = false; // debug only and will slow down model building public double _dispersion_learning_rate = 0.5; public Influence _influence; // if set to dfbetas will calculate the difference of betas obtained from including and excluding a data row public boolean _keepBetaDiffVar = false; // if true, will keep the frame generating the beta without from i and the variance estimation boolean _testCSZeroGram = false; // internal parameter, to test zero gram dropped column is correctly implemented public boolean _separate_linear_beta = false; // if true, will perform the beta and linear constraint separately public boolean _init_optimal_glm = false; // only used when there is linear constraints public double _constraint_eta0 = 0.1258925; // eta_k = constraint_eta0/pow(constraint_c0, constraint_alpha) public double _constraint_tau = 10; // ck+1 = tau*ck public double _constraint_alpha = 0.1; // eta_k = constraint_eta_0/pow(constraint_c0, constraint_alpha) public double _constraint_beta = 0.9; // eta_k+1 = eta_k/pow(c_k, beta) public double _constraint_c0 = 10; // set initial epsilon k as 1/c0 public void validate(GLM glm) { if (_remove_collinear_columns) { if (!(Solver.IRLSM.equals(_solver) || Solver.AUTO.equals(_solver))) glm.warn("remove_collinear_columns", "remove_collinear_columns only works when IRLSM (or " + "AUTO which will default to IRLSM when remove_collinear_columns=true) is chosen as the solver. " + "Otherwise, remove_collinear_columns is not enabled."); if (_lambda_search) { glm.warn("remove_collinear_columns", "remove_collinear_columns should only be used with " + "no regularization, i.e. lambda=0.0. It is used improperly here with lambda_search. " + "Please disable lambda_search and set lambda=0."); } else if (_lambda != null) { boolean nonZeroLambda = Arrays.stream(_lambda).sum() > 0; if (nonZeroLambda) glm.warn("remove_collinear_columns", "remove_collinear_columns should only be used with " + "no regularization, i.e. lambda=0.0. It is used improperly here. Please set lambda=0."); } } if (_solver.equals(Solver.COORDINATE_DESCENT_NAIVE) && _family.equals(Family.multinomial)) throw H2O.unimpl("Naive coordinate descent is not supported for multinomial."); if ((_lambda != null) && _lambda_search) glm.warn("lambda_search", "disabled when user specified any lambda value(s)."); if(_alpha != null && (1 < _alpha[0] || _alpha[0] < 0)) glm.error("_alpha","alpha parameter must from (inclusive) [0,1] range"); if(_compute_p_values && _solver != Solver.AUTO && _solver != Solver.IRLSM) glm.error("_compute_p_values","P values can only be computed with IRLSM solver, go solver = " + _solver); if(_compute_p_values && (_family == Family.multinomial || _family==Family.ordinal)) glm.error("_compute_p_values","P values are currently not supported for " + "family=multinomial or ordinal"); if(_compute_p_values && _non_negative) glm.error("_compute_p_values","P values are currently not supported for " + "family=multinomial or ordinal"); if(_weights_column != null && _offset_column != null && _weights_column.equals(_offset_column)) glm.error("_offset_column", "Offset must be different from weights"); if(_alpha != null && (_alpha[0] < 0 || _alpha[0] > 1)) glm.error("_alpha", "Alpha value must be between 0 and 1"); if(_lambda != null && _lambda[0] < 0) glm.error("_lambda", "Lambda value must be >= 0"); if(_obj_reg != -1 && _obj_reg <= 0) glm.error("obj_reg","Must be positive or -1 for default"); if(_prior != -1 && _prior <= 0 || _prior >= 1) glm.error("_prior","Prior must be in (exclusive) range (0,1)"); if(_prior != -1 && _family != Family.binomial) glm.error("_prior","Prior is only allowed with family = binomial."); if(_family != Family.tweedie) { glm.hide("_tweedie_variance_power","Only applicable with Tweedie family"); glm.hide("_tweedie_link_power","Only applicable with Tweedie family"); } if(_family != Family.negativebinomial) { glm.hide("_theta","Only applicable with Negative Binomial family"); } if(_remove_collinear_columns && !_intercept) glm.error("_intercept","Remove colinear columns option is currently not supported without intercept"); if(_beta_constraints != null) { if(_family == Family.multinomial || _family==Family.ordinal) glm.error("beta_constraints","beta constraints are not supported for " + "family = multionomial or ordinal"); Frame f = _beta_constraints.get(); if(f == null) glm.error("beta_constraints","Missing frame for beta constraints"); Vec v = f.vec("names"); if(v == null)glm.error("beta_constraints","Beta constraints parameter must have names column with valid coefficient names"); // todo: check the coefficient names v = f.vec("upper_bounds"); if(v != null && !v.isNumeric()) glm.error("beta_constraints","upper_bounds must be numeric if present");v = f.vec("upper_bounds"); v = f.vec("lower_bounds"); if(v != null && !v.isNumeric()) glm.error("beta_constraints","lower_bounds must be numeric if present"); v = f.vec("beta_given"); if(v != null && !v.isNumeric()) glm.error("beta_constraints","beta_given must be numeric if present");v = f.vec("upper_bounds"); v = f.vec("beta_start"); if(v != null && !v.isNumeric()) glm.error("beta_constraints","beta_start must be numeric if present"); } if(!_lambda_search) { glm.hide("_lambda_min_ratio", "only applies if lambda search is on."); glm.hide("_nlambdas", "only applies if lambda search is on."); glm.hide("_early_stopping","only applies if lambda search is on."); } if (_family==Family.ordinal) { if (_intercept == false) glm.error("Ordinal regression", "must have intercepts. set _intercept to true."); if (!(_solver.equals(Solver.AUTO) || _solver.equals(Solver.GRADIENT_DESCENT_SQERR) || _solver.equals(Solver.GRADIENT_DESCENT_LH))) glm.error("Ordinal regression","Ordinal regression only supports gradient descend. " + "Do not set Solver or set Solver to auto, GRADIENT_DESCENT_LH or GRADIENT_DESCENT_SQERR."); if (_lambda_search) glm.error("ordinal regression", "Ordinal regression do not support lambda search."); } if(_link != Link.family_default) { // check we have compatible link switch (_family) { case AUTO: if (_link != Link.family_default & _link != Link.identity & _link != Link.log & _link != Link.inverse & _link != Link.logit & _link != Link.multinomial) throw new IllegalArgumentException("Incompatible link function for selected family. Only family_default, identity, log, inverse, logit and multinomial are allowed for family=AUTO"); break; case gaussian: if (_link != Link.identity && _link != Link.log && _link != Link.inverse) throw new IllegalArgumentException("Incompatible link function for selected family. Only identity, log and inverse links are allowed for family=gaussian."); break; case quasibinomial: case binomial: case fractionalbinomial: if (_link != Link.logit) // fixme: R also allows log, but it's not clear when can be applied and what should we do in case the predictions are outside of 0/1. throw new IllegalArgumentException("Incompatible link function for selected family. Only logit is allowed for family=" + _family + ". Got " + _link); break; case poisson: case negativebinomial: if (_link != Link.log && _link != Link.identity) throw new IllegalArgumentException("Incompatible link function for selected family. Only log and " + "identity links are allowed for family=poisson and family=negbinomimal."); break; case gamma: if (_link != Link.inverse && _link != Link.log && _link != Link.identity) throw new IllegalArgumentException("Incompatible link function for selected family. Only inverse, log and identity links are allowed for family=gamma."); break; case tweedie: if (_link != Link.tweedie) throw new IllegalArgumentException("Incompatible link function for selected family. Only tweedie link allowed for family=tweedie."); break; case multinomial: if(_link != Link.multinomial) throw new IllegalArgumentException("Incompatible link function for selected family. Only multinomial link allowed for family=multinomial."); break; case ordinal: if (_link != Link.ologit && _link!=Link.oprobit && _link!=Link.ologlog) throw new IllegalArgumentException("Incompatible link function for selected family. Only ologit, oprobit or ologlog links allowed for family=ordinal."); break; default: H2O.fail(); } } if (_missing_values_handling != null) { if (!(_missing_values_handling instanceof DeepLearningModel.DeepLearningParameters.MissingValuesHandling) && !(_missing_values_handling instanceof MissingValuesHandling)) { throw new IllegalArgumentException("Missing values handling should be specified as an instance of " + MissingValuesHandling.class.getName()); } } } public GLMParameters() { this(Family.AUTO, Link.family_default); assert _link == Link.family_default; _stopping_rounds = 0; // early-stopping is disabled by default } public GLMParameters(Family f){this(f,f.defaultLink);} public GLMParameters(Family f, Link l){this(f,l, null, null, 0, 1);} public GLMParameters(Family f, Link l, double[] lambda, double[] alpha, double twVar, double twLnk) { this(f,l,lambda,alpha,twVar,twLnk,null); } public GLMParameters(Family f, Link l, double [] lambda, double [] alpha, double twVar, double twLnk, String[] interactions) { this(f,l,lambda,alpha,twVar,twLnk,interactions,GLMTask.EPS); } public GLMParameters(Family f, Link l, double [] lambda, double [] alpha, double twVar, double twLnk, String[] interactions, double theta){ this(f,l,lambda,alpha,twVar,twLnk,interactions, theta, Double.NaN); } public GLMParameters(Family f, Link l, double [] lambda, double [] alpha, double twVar, double twLnk, String[] interactions, double theta, double dispersion_estimated){ this._lambda = lambda; this._alpha = alpha; this._tweedie_variance_power = twVar; this._tweedie_link_power = twLnk; _interactions=interactions; _family = f; _link = l; this._theta=theta; this._invTheta = 1.0/theta; this._dispersion_estimated = Double.isNaN(dispersion_estimated) ? _init_dispersion_parameter : dispersion_estimated; } public final double variance(double mu){ switch(_family) { case gaussian: return 1; case binomial: case multinomial: case ordinal: case quasibinomial: case fractionalbinomial: return mu * (1 - mu); case poisson: return mu; case gamma: return mu * mu; case tweedie: return Math.pow(mu, _tweedie_variance_power); default: throw new RuntimeException("unknown family Id " + this._family); } } public final boolean canonical(){ switch(_family){ case gaussian: return _link == Link.identity; case binomial: case quasibinomial: case fractionalbinomial: return _link == Link.logit; case poisson: return _link == Link.log; case gamma: return _link == Link.inverse; // case tweedie: // return false; default: throw H2O.unimpl(); } } public final double deviance(double yr, double ym){ double y1 = yr == 0?.1:yr; switch(_family){ case gaussian: return (yr - ym) * (yr - ym); case quasibinomial: case binomial: case fractionalbinomial: return 2 * ((y_log_y(yr, ym)) + y_log_y(1 - yr, 1 - ym)); case poisson: if( yr == 0 ) return 2 * ym; return 2 * ((yr * Math.log(yr / ym)) - (yr - ym)); case negativebinomial: return (yr==0||ym==0)?0:2*((_invTheta+yr)*Math.log((1+_theta*ym)/(1+_theta*yr))+yr*Math.log(yr/ym)); case gamma: if( yr == 0 ) return -2; return -2 * (Math.log(yr / ym) - (yr - ym) / ym); case tweedie: if (DispersionMethod.ml.equals(_dispersion_parameter_method)) { return TweedieEstimator.deviance(yr, ym, _tweedie_variance_power); } double theta = _tweedie_variance_power == 1 ?Math.log(y1/ym) :(Math.pow(y1,1.-_tweedie_variance_power) - Math.pow(ym,1 - _tweedie_variance_power))/(1-_tweedie_variance_power); double kappa = _tweedie_variance_power == 2 ?Math.log(y1/ym) :(Math.pow(yr,2-_tweedie_variance_power) - Math.pow(ym,2-_tweedie_variance_power))/(2 - _tweedie_variance_power); return 2 * (yr * theta - kappa); default: throw new RuntimeException("unknown family " + _family); } } public final double deviance(float yr, float ym){ return deviance((double)yr,(double)ym); } // likelihood calculation used for the model building public final double likelihood(double yr, double ym){ if (_family.equals(Family.negativebinomial)) { return ((yr>0 && ym>0)? (-GLMTask.sumOper(yr, _invTheta, 0)+_invTheta*Math.log(1+_theta*ym)-yr*Math.log(ym)- yr*Math.log(_theta)+yr*Math.log(1+_theta*ym)): ((yr==0 && ym>0)?(_invTheta*Math.log(1+_theta*ym)):0)); // with everything } else if (Family.tweedie.equals(_family) && DispersionMethod.ml.equals(_dispersion_parameter_method) && !_fix_tweedie_variance_power) { return -TweedieEstimator.logLikelihood(yr, ym, _tweedie_variance_power, _dispersion_estimated); } else return .5 * deviance(yr,ym); } // more time-consuming likelihood calculation used for the final scoring public final double likelihood(double w, double yr, double[] ym) { double prediction = ym[0]; double probabilityOf1; switch (_family) { case gaussian: return -.5 * (w * Math.pow(yr - prediction , 2) / _dispersion_estimated + log(_dispersion_estimated / w) + LOG2PI); case binomial: // if probability is not given, then it is 1.0 if 1 is predicted and 0.0 if 0 is predicted probabilityOf1 = ym.length > 1 ? ym[2] : ym[0]; // probability of 1 equals prediction return w * (yr * log(probabilityOf1) + (1-yr) * log(1 - probabilityOf1)); case quasibinomial: // if probability is not given, then it is 1.0 if 1 is predicted and 0.0 if 0 is predicted probabilityOf1 = ym.length > 1 ? ym[2] : ym[0]; // probability of 1 equals prediction if (yr == prediction) return 0; else if (prediction > 1) // check what are possible values? return -w * (yr * log(probabilityOf1)); else return -w * (yr * log(probabilityOf1) + (1 - yr) * log(1 - probabilityOf1)); case fractionalbinomial: // if probability is not given, then it is 1.0 if 1 is predicted and 0.0 if 0 is predicted probabilityOf1 = ym.length > 1 ? ym[2] : ym[0]; // probability of 1 equals prediction if (yr == prediction) return 0; return w * ((MathUtils.y_log_y(yr, probabilityOf1)) + MathUtils.y_log_y(1 - yr, 1 - probabilityOf1)); case poisson: return w * (yr * log(prediction) - prediction - Gamma.logGamma(yr + 1)); // gamma(n) = (n-1)! case negativebinomial: // the estimated dispersion parameter is theta. The likelihood formula requires k. Theta=1/k. double invThetaEstimated = 1 / _dispersion_estimated; return yr * log(invThetaEstimated * prediction / w) - (yr + w/invThetaEstimated) * log(1 + invThetaEstimated * prediction / w) + log(Gamma.gamma(yr + w / invThetaEstimated) / (Gamma.gamma(yr + 1) * Gamma.gamma(w / invThetaEstimated))); case gamma: double invPhiEst = 1 / _dispersion_estimated; return w * invPhiEst * log(w * yr * invPhiEst / prediction) - w * yr * invPhiEst / prediction - log(yr) - Gamma.logGamma(w * invPhiEst); case tweedie: return -TweedieEstimator.logLikelihood(yr, ym[0], _tweedie_variance_power, _dispersion_estimated); case multinomial: // if probability is not given, then it is 1.0 if prediction equals to the real y and 0 othervice double predictedProbabilityOfActualClass = ym.length > 1 ? ym[(int) yr + 1] : (prediction == yr ? 1.0 : 0.0); return w * log(predictedProbabilityOfActualClass); default: throw new RuntimeException("unknown family " + _family); } } public final double linkDeriv(double x) { // note: compute an inverse of what R does switch(_link) { case ologit: case logit: // case multinomial: double div = (x * (1 - x)); if(div < _EPS) return _OneOEPS; // avoid numerical instability return 1.0 / div; case identity: return 1; case log: return 1.0 / x; case inverse: return -1.0 / (x * x); case ologlog: double oneMx = 1.0-x; double divsor = -1.0*oneMx*Math.log(oneMx); return (divsor<_EPS)?_OneOEPS:(1.0/divsor); case tweedie: // double res = _tweedie_link_power == 0 // ?Math.max(2e-16,Math.exp(x)) // // (1/lambda) * eta^(1/lambda - 1) // :(1.0/_tweedie_link_power) * Math.pow(link(x), 1.0/_tweedie_link_power - 1.0); return _tweedie_link_power == 0 ?1.0/Math.max(2e-16,x) :_tweedie_link_power * Math.pow(x,_tweedie_link_power-1); default: throw H2O.unimpl(); } } public final double linkInv(double x) { switch(_link) { // case multinomial: // should not be used case identity: return x; case ologlog: return 1.0-Math.exp(-1.0*Math.exp(x)); case oprobit: return _dprobit.cumulativeProbability(x); case ologit: case logit: return 1.0 / (Math.exp(-x) + 1.0); case log: return Math.exp(x); case inverse: double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x); return 1.0 / xx; case tweedie: return _tweedie_link_power == 0 ? Math.max(2e-16, Math.exp(x)) : Math.pow(x, 1/ _tweedie_link_power); default: throw new RuntimeException("unexpected link function id " + this); } } // supported families public enum Family { AUTO(Link.family_default), gaussian(Link.identity), binomial(Link.logit), fractionalbinomial(Link.logit), quasibinomial(Link.logit),poisson(Link.log), gamma(Link.inverse), multinomial(Link.multinomial), tweedie(Link.tweedie), ordinal(Link.ologit), negativebinomial(Link.log); public final Link defaultLink; Family(Link link){defaultLink = link;} } public enum DispersionMethod {pearson, ml, deviance} // methods used to estimate dispersion parameter, ML = maximum likelhood public static enum GLMType {glm, gam, hglm} // special functions are performed depending on GLMType. Internal use public static enum Link {family_default, identity, logit, log, inverse, tweedie, multinomial, ologit, oprobit, ologlog} public static enum Influence {dfbetas}; public static enum Solver {AUTO, IRLSM, L_BFGS, COORDINATE_DESCENT_NAIVE, COORDINATE_DESCENT, GRADIENT_DESCENT_LH, GRADIENT_DESCENT_SQERR} // helper function static final double y_log_y(double y, double mu) { if(y == 0)return 0; if(mu < Double.MIN_NORMAL) mu = Double.MIN_NORMAL; return y * Math.log(y / mu); } public InteractionSpec interactionSpec() { return InteractionSpec.create(_interactions, _interaction_pairs); } public MissingValuesHandling missingValuesHandling() { if (_missing_values_handling instanceof MissingValuesHandling) return (MissingValuesHandling) _missing_values_handling; assert _missing_values_handling instanceof DeepLearningModel.DeepLearningParameters.MissingValuesHandling; switch ((DeepLearningModel.DeepLearningParameters.MissingValuesHandling) _missing_values_handling) { case MeanImputation: return MissingValuesHandling.MeanImputation; case Skip: return MissingValuesHandling.Skip; default: throw new IllegalStateException("Unsupported missing values handling value: " + _missing_values_handling); } } public boolean imputeMissing() { return missingValuesHandling() == MissingValuesHandling.MeanImputation || missingValuesHandling() == MissingValuesHandling.PlugValues; } public DataInfo.Imputer makeImputer() { if (missingValuesHandling() == MissingValuesHandling.PlugValues) { if (_plug_values == null || _plug_values.get() == null) { throw new IllegalStateException("Plug values frame needs to be specified when Missing Value Handling = PlugValues."); } return new GLM.PlugValuesImputer(_plug_values.get()); } else { // mean/mode imputation and skip (even skip needs an imputer right now! PUBDEV-6809) return new DataInfo.MeanImputer(); } } @Override public void setDistributionFamily(DistributionFamily distributionFamily) { _family = distributionToFamily(distributionFamily); _link = Link.family_default; } @Override public DistributionFamily getDistributionFamily() { return familyToDistribution(_family); } public void updateTweedieParams(double tweedieVariancePower, double tweedieLinkPower, double dispersion){ _tweedie_variance_power = tweedieVariancePower; _tweedie_link_power = tweedieLinkPower; _dispersion_estimated = dispersion; _init_dispersion_parameter = dispersion; } } // GLMParameters public static class GLMWeights { public double mu = 0; public double w = 1; public double z = 0; public double l = 0; public double dev = Double.NaN; } public static class GLMWeightsFun extends Iced { final public Family _family; final Link _link; final double _var_power; final double _link_power; final double _oneOoneMinusVarPower; final double _oneOtwoMinusVarPower; final double _oneMinusVarPower; final double _twoMinusVarPower; final double _oneOLinkPower; final double _oneOLinkPowerSquare; double _theta; // used by negative binomial, 0 < _theta <= 1 double _invTheta; double _dispersion; double _oneOeta; double _oneOetaSquare; boolean _varPowerEstimation; final NormalDistribution _dprobit = new NormalDistribution(0,1); // get the normal distribution public GLMWeightsFun(GLMParameters parms) { this(parms._family,parms._link, parms._tweedie_variance_power, parms._tweedie_link_power, parms._theta, parms._init_dispersion_parameter, GLMParameters.DispersionMethod.ml.equals(parms._dispersion_parameter_method) && !parms._fix_tweedie_variance_power); } public GLMWeightsFun(Family fam, Link link, double var_power, double link_power, double theta, double dispersion, boolean varPowerEstimation) { _family = fam; _link = link; _var_power = var_power; _link_power = link_power; _oneMinusVarPower = 1-_var_power; _twoMinusVarPower = 2-_var_power; _oneOoneMinusVarPower = _var_power==1?1:1.0/(1-_var_power); _oneOtwoMinusVarPower = _var_power==2?1:1.0/(2-_var_power); _oneOLinkPower = 1.0/_link_power; _oneOLinkPowerSquare = _oneOLinkPower*_oneOLinkPower; _theta = theta; _invTheta = 1/theta; _dispersion = dispersion; _varPowerEstimation = varPowerEstimation; } /*** * Given the estimated model output x, we want to find the linear part which is transpose(beta)*p+intercept if * beta does not contain the intercept. */ public final double link(double x) { switch(_link) { case identity: return x; case ologit: // note: x here is the CDF case logit: assert 0 <= x && x <= 1:"x out of bounds, expected <0,1> range, got " + x; return Math.log(x / (1 - x)); case ologlog: return Math.log(-1.0*Math.log(1-x)); // x here is CDF case oprobit: // x is normal with 0 mean and variance 1 return _dprobit.inverseCumulativeProbability(x); case multinomial: case log: return Math.log(x); case inverse: double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x); return 1.0 / xx; case tweedie: return _link_power == 0?Math.log(x):Math.pow(x, _link_power); default: throw new RuntimeException("unknown link function " + this); } } public final double linkInvDeriv(double x) { switch(_link) { case identity: return 1; case logit: double g = Math.exp(-x); double gg = (g + 1) * (g + 1); return g / gg; case ologit: return (x-x*x); case log: return Math.max(x, Double.MIN_NORMAL); case inverse: double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x); return -1 / (xx * xx); case tweedie: return _link_power==0?Math.max(x, Double.MIN_NORMAL):x*_oneOLinkPower*_oneOeta; default: throw new RuntimeException("unexpected link function id " + this); } } public final double linkInvDeriv2(double x) { switch(_link) { case identity: return 0; case log: return Math.max(x, Double.MIN_NORMAL); case tweedie: return _link_power==0?Math.max(x, Double.MIN_NORMAL):x*_oneOLinkPower*(_oneOLinkPower-1)*_oneOetaSquare; default: throw new RuntimeException("unexpected link function id " + this); } } // calculate the derivative of the link function public final double linkDeriv(double x) { // note: compute an inverse of what R does switch(_link) { case ologit: // note, x is CDF not PDF case logit: // case multinomial: double div = (x * (1 - x)); if(div < _EPS) return _OneOEPS; // avoid numerical instability return 1.0 / div; case ologlog: double oneMx = 1.0-x; double divsor = -1.0*oneMx*Math.log(oneMx); return (divsor<_EPS)?_OneOEPS:(1.0/divsor); case identity: return 1; case log: return 1.0 / x; case inverse: return -1.0 / (x * x); case tweedie: return _link_power == 0 ?1.0/Math.max(2e-16,x) :_link_power * Math.pow(x,_link_power-1); default: throw H2O.unimpl(); } } /*** * Given the linear combination transpose(beta)*p+intercept (if * beta does not contain the intercept), this method will provide the estimated model output. */ public final double linkInv(double x) { switch(_link) { case ologlog: return 1.0-Math.exp(-1.0*Math.exp(x)); case oprobit: return _dprobit.cumulativeProbability(x); case identity: return x; case ologit: case logit: return 1.0 / (Math.exp(-x) + 1.0); case log: return Math.exp(x); case inverse: double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x); return 1.0 / xx; case tweedie: return _link_power == 0 ?Math.max(2e-16,Math.exp(x)) :Math.pow(x, _oneOLinkPower); default: throw new RuntimeException("unexpected link function id " + _link); } } public final double variance(double mu){ switch(_family) { case gaussian: return 1; case quasibinomial: case binomial: case fractionalbinomial: double res = mu * (1 - mu); return res < _EPS?_EPS:res; case poisson: return mu; case negativebinomial: return (mu+mu*mu*_theta); case gamma: return mu * mu; case tweedie: return Math.pow(mu,_var_power); default: throw new RuntimeException("unknown family Id " + this._family); } } public final double deviance(double yr, double ym){ double y1 = yr == 0?0.1:yr; // this must be kept as 0.1, otherwise, answer differs from R. switch(_family){ case gaussian: return (yr - ym) * (yr - ym); case quasibinomial: if(yr == ym) return 0; if(ym > 1) return -2 * (yr*Math.log(ym)); double res = -2 * (yr*Math.log(ym) + (1-yr)*Math.log(1-ym)); return res; case binomial: case fractionalbinomial: return 2 * ((MathUtils.y_log_y(yr, ym)) + MathUtils.y_log_y(1 - yr, 1 - ym)); case poisson: if( yr == 0 ) return 2 * ym; return 2 * ((yr * Math.log(yr / ym)) - (yr - ym)); case negativebinomial: if( yr == 0 && ym <= 0 ) return 0; if( yr == 0 ) return 2 * _invTheta * Math.log(1 + _theta * ym); return 2*((_invTheta+yr)*Math.log((1+_theta*ym)/(1+_theta*yr))+yr*Math.log(yr/ym)); case gamma: if( yr == 0 ) return -2; return -2 * (Math.log(yr / ym) - (yr - ym) / ym); case tweedie: if (_varPowerEstimation) return TweedieEstimator.deviance(yr, ym, _var_power); double val; if (_var_power==1) { val = yr*Math.log(y1/ym)-(yr-ym); } else if (_var_power==2) { val = yr*(1/ym-1/y1)-Math.log(y1/ym); } else { val = (yr==0?0:yr*_oneOoneMinusVarPower*(Math.pow(yr,_oneMinusVarPower)-Math.pow(ym, _oneMinusVarPower)))- (Math.pow(yr,_twoMinusVarPower)-Math.pow(ym, _twoMinusVarPower))*_oneOtwoMinusVarPower; } return 2 * val; default: throw new RuntimeException("unknown family " + _family); } } public final double deviance(float yr, float ym){ return deviance((double)yr,(double)ym); } public final void likelihoodAndDeviance(double yr, GLMWeights x, double w) { double ym = x.mu; switch (_family) { case gaussian: x.dev = w * (yr - ym) * (yr - ym); x.l = .5 * x.dev; break; case quasibinomial: if(yr == ym) x.l = 0; else if (ym > 1) x.l = -(yr*Math.log(ym)); else x.l = - (yr*Math.log(ym) + (1-yr)*Math.log(1-ym)); x.dev = 2*x.l; break; case binomial: case fractionalbinomial: x.l = ym == yr?0:w*((MathUtils.y_log_y(yr, ym)) + MathUtils.y_log_y(1 - yr, 1 - ym)); x.dev = 2*x.l; break; case poisson: case gamma: case tweedie: x.dev = w*deviance(yr,ym); x.l = likelihood(w, yr, ym); break; case negativebinomial: x.dev = w*deviance(yr,ym); // CHECKED-log/CHECKED-identity x.l = w*likelihood(yr,ym); // CHECKED-log/CHECKED-identity break; default: throw new RuntimeException("unknown family " + _family); } } public final double likelihood(double w, double yr, double ym) { if (w==0) return 0; return w*likelihood(yr, ym); } public final double likelihood(double yr, double ym) { switch (_family) { case gaussian: return .5 * (yr - ym) * (yr - ym); case binomial: case quasibinomial: case fractionalbinomial: if (yr == ym) return 0; return .5 * deviance(yr, ym); case poisson: if (yr == 0) return 2 * ym; return 2 * ((yr * Math.log(yr / ym)) - (yr - ym)); case negativebinomial: return ((yr>0 && ym>0)? (-GLMTask.sumOper(yr, _invTheta, 0)+_invTheta*Math.log(1+_theta*ym)-yr*Math.log(ym)- yr*Math.log(_theta)+yr*Math.log(1+_theta*ym)): ((yr==0 && ym>0)?(_invTheta*Math.log(1+_theta*ym)):0)); // with everything case gamma: if (yr == 0) return -2; return -2 * (Math.log(yr / ym) - (yr - ym) / ym); case tweedie: if (_varPowerEstimation) return -TweedieEstimator.logLikelihood(yr, ym, _var_power, _dispersion); // we ignore the a(y,phi,p) term in the likelihood calculation here since we are not optimizing over them double temp = 0; if (_var_power==1) { temp = Math.pow(ym, _twoMinusVarPower)*_oneOtwoMinusVarPower-yr*Math.log(ym); } else if (_var_power==2) { temp = Math.log(ym)-yr*Math.pow(ym, _oneMinusVarPower)*_oneOoneMinusVarPower; } else { temp = Math.pow(ym, _twoMinusVarPower)*_oneOtwoMinusVarPower-yr*Math.pow(ym, _oneMinusVarPower)*_oneOoneMinusVarPower; } return temp; // ignored the a(y,phi,p) term as it is a constant for us default: throw new RuntimeException("unknown family " + _family); } } public GLMWeights computeWeights(double y, double eta, double off, double w, GLMWeights x) { double etaOff = eta + off; x.mu = linkInv(etaOff); double var = variance(x.mu);//Math.max(1e-5, variance(x.mu)); // avoid numerical problems with 0 variance double d = linkDeriv(x.mu); if (_family.equals(Family.negativebinomial)) { double invSum = 1.0/(1+_theta*x.mu); double d2 = linkInvDeriv(x.mu); if (y>0 && (x.mu>0)) { double sumr = 1.0+_theta*y; d = (y/(x.mu*x.mu)-_theta*sumr*invSum*invSum) * d2 * d2 + (sumr*invSum-y/x.mu) * linkInvDeriv2(x.mu); //CHECKED-log/CHECKED-identity x.w = w*d; x.z = eta + (y-x.mu) *invSum * d2/(d*x.mu); // CHECKED-identity } else if (y==0 && x.mu > 0) { d = linkInvDeriv2(x.mu)*invSum-_theta*invSum*invSum*d2*d2; // CHECKED //d = Math.min(1e10, Math.max(1e-10, d)); x.w = w*d; x.z = eta - invSum*d2/d; } else { x.w = 0; x.z = 0; } } else if (_family.equals(Family.tweedie)) { // here, the x.z is actually wz double oneOxmu = x.mu==0?_OneOEPS:1.0/x.mu; double oneOxmuSquare = oneOxmu*oneOxmu; _oneOeta = etaOff==0?_OneOEPS:1.0/etaOff; // use etaOff here since the derivative is wrt to eta+offset _oneOetaSquare = _oneOeta*_oneOeta; double diffOneSquare = linkInvDeriv(x.mu)*linkInvDeriv(x.mu); double xmuPowMP = Math.pow(x.mu, -_var_power); if (_var_power==1) { x.w = y*oneOxmuSquare*diffOneSquare-(y*oneOxmu-1)*linkInvDeriv2(x.mu); x.z = (x.w*eta + (y*oneOxmu-1)*linkInvDeriv(x.mu))*w; } else if (_var_power == 2) { x.w = (oneOxmu-y*xmuPowMP)*linkInvDeriv2(x.mu)+ (y*2*Math.pow(x.mu, -3)-oneOxmuSquare)*diffOneSquare; x.z = (x.w*eta+(y*oneOxmuSquare-oneOxmu)*linkInvDeriv(x.mu))*w; } else { x.w = (_var_power*y*Math.pow(x.mu, -_var_power-1)+_oneMinusVarPower*xmuPowMP)*diffOneSquare- (y*xmuPowMP-Math.pow(x.mu, _oneMinusVarPower))*linkInvDeriv2(x.mu); x.z = (x.w*eta+(y*Math.pow(x.mu, -_var_power)-Math.pow(x.mu, _oneMinusVarPower))*linkInvDeriv(x.mu))*w; } x.w *= w; } else { x.w = w / (var * d * d); // formula did not quite work with negative binomial x.z = eta + (y - x.mu) * d; // only eta and no r.offset should be applied. I derived this. } likelihoodAndDeviance(y,x,w); return x; } } public static class Submodel extends Iced { public final double lambda_value; public final double alpha_value; public final int iteration; public final double devianceTrain; public final double devianceValid; public final int [] idxs; public final double [] beta; public double[] zValues; public boolean dispersionEstimated; public double [] getBeta(double [] beta) { if(idxs != null){ for(int i = 0; i < idxs.length; ++i) beta[idxs[i]] = this.beta[i]; // beta[beta.length-1] = this.beta[this.beta.length-1]; } else System.arraycopy(this.beta,0,beta,0,beta.length); return beta; } public double [] getZValues(double [] zValues) { Arrays.fill(zValues, Double.NaN); // non-active z-values should not be 0 but Double.NaN if(idxs != null){ for(int i = 0; i < idxs.length; ++i) zValues[idxs[i]] = this.zValues[i]; } else System.arraycopy(this.zValues, 0, zValues, 0, zValues.length); return zValues; } public int rank(){ return idxs != null?idxs.length:(ArrayUtils.countNonzeros(beta)); } public double[] zValues() { return zValues.clone(); } public double[] pValues(long residualDegreesOfFreedom) { return calculatePValuesFromZValues(zValues, dispersionEstimated, residualDegreesOfFreedom); } public double[] pValues(double[] zValues, long residualDegreesOfFreedom) { return calculatePValuesFromZValues(zValues, dispersionEstimated, residualDegreesOfFreedom); } public double[] stdErr() { return calculateStdErrFromZValues(zValues, beta); } public double[] stdErr(double[] zValues, double[] beta) { return calculateStdErrFromZValues(zValues, beta); } public Submodel(double lambda, double alpha, double[] beta, int iteration, double devTrain, double devValid, int totBetaLen, double[] zValues, boolean dispersionEstimated) { this.lambda_value = lambda; this.alpha_value = alpha; this.iteration = iteration; this.devianceTrain = devTrain; this.devianceValid = devValid; this.zValues = zValues == null ? null : zValues.clone(); this.dispersionEstimated = dispersionEstimated; int r = 0; if(beta != null){ // grab the indices of non-zero coefficients for (int i = 0; i < beta.length; ++i) if (beta[i] != 0) ++r; if (r < beta.length && beta.length == totBetaLen) { // not been shorten before for multinomial idxs = MemoryManager.malloc4(r); int j = 0; for (int i = 0; i < beta.length; ++i) if (beta[i] != 0) idxs[j++] = i; this.beta = ArrayUtils.select(beta, idxs); if(zValues != null && zValues.length > this.beta.length) { // zValues not shorten yet this.zValues = ArrayUtils.select(zValues, idxs); // zValues must correspond to beta } } else { this.beta = beta.clone(); idxs = null; } } else { this.beta = null; idxs = null; } } } public final double _lambda_max; public final double [] _ymu; public final long _nullDOF; public final double _ySigma; public final long _nobs; public double[] _betaCndCheckpoint; // store temporary beta coefficients for checkpointing purposes public boolean _finalScoring = false; // used while scoring to indicate if it is a final or partial scoring private static String[] binomialClassNames = new String[]{"0", "1"}; @Override protected String[][] scoringDomains(){ String [][] domains = _output._domains; if ((_parms._family == Family.binomial || _parms._family == Family.quasibinomial || _parms._family == Family.fractionalbinomial) && _output._domains[_output._dinfo.responseChunkId(0)] == null) { domains = domains.clone(); domains[_output._dinfo.responseChunkId(0)] = binomialClassNames; } return domains; } public void setZValues(double [] zValues, double dispersion, boolean dispersionEstimated) { _output._zvalues = zValues; setDispersion(dispersion, dispersionEstimated); } public void setDispersion(double dispersion, boolean dispersionEstimated) { _output._dispersion = dispersion; _output._dispersionEstimated = dispersionEstimated; } public static class GLMOutput extends Model.Output { Submodel[] _submodels = new Submodel[0]; DataInfo _dinfo; double[] _ymu; public String[] _coefficient_names; public long _training_time_ms; public TwoDimTable _variable_importances; public VarImp _varimp; // should contain the same content as standardized coefficients int _lambda_array_size; // store number of lambdas to iterate over public double _lambda_1se = -1; // lambda_best+sd(lambda) submodel index; applicable if running lambda search with cv public double _lambda_min = -1; // starting lambda value when lambda search is enabled public double _lambda_max = -1; // minimum lambda value calculated when lambda search is enabled public int _selected_lambda_idx; // lambda index with best deviance public int _selected_alpha_idx; // alpha index with best deviance public int _selected_submodel_idx; // submodel index with best deviance public int _best_submodel_idx; // submodel index with best deviance public int _best_lambda_idx; // the same as best_submodel_idx, kept to ensure backward compatibility public String[] _linear_constraint_states; public boolean _all_constraints_satisfied; public TwoDimTable _linear_constraints_table; public Key<Frame> _regression_influence_diagnostics; public Key<Frame> _betadiff_var; // for debugging only, no need to serialize public double lambda_best(){return _submodels.length == 0 ? -1 : _submodels[_best_submodel_idx].lambda_value;} public double dispersion(){ return _dispersion;} public boolean dispersionEstimated() {return _dispersionEstimated;} public double alpha_best() { return _submodels.length == 0 ? -1 : _submodels[_selected_submodel_idx].alpha_value;} public double lambda_1se(){ return _lambda_1se; // (_lambda_1se==-1 || _submodels.length==0 || _lambda_1se>=_submodels.length) ? -1 : _submodels[_lambda_1se].lambda_value; } public DataInfo getDinfo() { return _dinfo; } public int bestSubmodelIndex() { return _selected_submodel_idx; } public double lambda_selected(){ return _submodels[_selected_submodel_idx].lambda_value; } final int _totalBetaLength; double[] _global_beta; private double[] _zvalues; double[] _variable_inflation_factors; String[] _vif_predictor_names; // predictor names corresponding to the variableInflationFactors double [][] _vcov; private double _dispersion; private boolean _dispersionEstimated; public int[] _activeColsPerClass; public ConstrainedGLMUtils.LinearConstraints[] _equalityConstraintsLinear = null; public ConstrainedGLMUtils.LinearConstraints[] _lessThanEqualToConstraintsLinear = null; public ConstrainedGLMUtils.LinearConstraints[] _equalityConstraintsBeta = null; public ConstrainedGLMUtils.LinearConstraints[] _lessThanEqualToConstraintsBeta = null; public String[] _constraintCoefficientNames = null; public double[][] _initConstraintMatrix = null; public boolean hasPValues(){return _zvalues != null;} public boolean hasVIF() { return _vif_predictor_names != null; } public double[] stdErr() { return calculateStdErrFromZValues(_zvalues, _global_beta); } public static double[] calculateStdErrFromZValues(double[] zValues, double[] beta) { double[] res = zValues.clone(); for (int i = 0; i < res.length; ++i) { if(beta[i] == 0) { res[i] = Double.NaN; } else { res[i] = beta[i] / zValues[i]; } } return res; } public double[] getZValues() { return _zvalues; } public double[] getVariableInflationFactors() { return _variable_inflation_factors; } public String[] getVIFPredictorNames() { return _vif_predictor_names; } public Map<String, Double> getVIFAndNames() { if (_variable_inflation_factors != null) return IntStream.range(0, _vif_predictor_names.length).boxed().collect(toMap(s ->_vif_predictor_names[s], s -> _variable_inflation_factors[s])); else return null; } @Override public TwoDimTable getVariableImportances() { return _variable_importances; } @Override public ModelCategory getModelCategory() { return _binomial?ModelCategory.Binomial:(_multinomial?ModelCategory.Multinomial:(_ordinal?ModelCategory.Ordinal:ModelCategory.Regression)); } @Override protected long checksum_impl() { long d = _global_beta == null?1:Arrays.hashCode(_global_beta); return d*super.checksum_impl(); } public double [] zValues(){return _zvalues.clone();} public static double[] calculatePValuesFromZValues(double[] zValues, boolean dispersionEstimated, long residualDegreesOfFreedom) { double[] res = zValues.clone(); RealDistribution rd = dispersionEstimated ? new TDistribution(residualDegreesOfFreedom) : new NormalDistribution(); for(int i = 0; i < res.length; ++i) { if(!Double.isNaN(zValues[i])) { // if zValues[i] is Nan, then res[i] is already set to NaN (desired value) res[i] = 2 * rd.cumulativeProbability(-Math.abs(res[i])); } } return res; } public double[] pValues() { return calculatePValuesFromZValues(_zvalues, _dispersionEstimated, _training_metrics.residual_degrees_of_freedom()); } public double[] variableInflationFactors() { return _variable_inflation_factors; // predictor orders the same as in coefficientNames } double[][] _global_beta_multinomial; final int _nclasses; public boolean _binomial; public boolean _multinomial; public boolean _ordinal; public void setLambdas(GLMParameters parms) { if (parms._lambda_search) { _lambda_max = parms._lambda[0]; _lambda_min = parms._lambda[parms._lambda.length-1]; } } public int rank() { return _submodels[_selected_submodel_idx].rank();} public double[] ymu() { return _ymu;} public boolean isStandardized() { return _dinfo._predictor_transform == TransformType.STANDARDIZE; } public String[] coefficientNames() { return _coefficient_names; } // This method is to take the coefficient names of one class and extend it to // coefficient names for all N classes. public String[] multiClassCoeffNames() { String[] responseDomain = _domains[_domains.length-1]; String[] multinomialNames = new String[_coefficient_names.length*responseDomain.length]; int coeffLen = _coefficient_names.length; int responseLen = responseDomain.length; int counter = 0; for (int respInd = 0; respInd < responseLen; respInd++) { for (int coeffInd = 0; coeffInd < coeffLen; coeffInd++) { multinomialNames[counter++] = _coefficient_names[coeffInd] + "_" + responseDomain[respInd]; } } return multinomialNames; } // GLM is always supervised public boolean isSupervised() { return true; } @Override public InteractionBuilder interactionBuilder() { return _dinfo._interactionSpec != null ? new GLMInteractionBuilder() : null; } private class GLMInteractionBuilder implements InteractionBuilder { @Override public Frame makeInteractions(Frame f) { InteractionPair[] interactionPairs = _dinfo._interactionSpec.makeInteractionPairs(f); f.add(Model.makeInteractions(f, false, interactionPairs, true, true, false)); return f; } } public static Frame expand(Frame fr, InteractionSpec interactions, boolean useAll, boolean standardize, boolean skipMissing) { return MakeGLMModelHandler.oneHot(fr,interactions,useAll,standardize,false,skipMissing); } public GLMOutput(DataInfo dinfo, String[] column_names, String[] column_types, String[][] domains, String[] coefficient_names, double[] beta, boolean binomial, boolean multinomial, boolean ordinal) { super(dinfo._weights, dinfo._offset, dinfo._fold); _dinfo = dinfo.clone(); setNames(column_names, column_types); _domains = domains; _coefficient_names = coefficient_names; _binomial = binomial; _multinomial = multinomial; _ordinal = ordinal; _nclasses = _binomial?2:(_multinomial || _ordinal?beta.length/coefficient_names.length:1); _totalBetaLength = beta.length; if(_binomial && domains[domains.length-1] != null) { assert domains[domains.length - 1].length == 2:"Unexpected domains " + Arrays.toString(domains); binomialClassNames = domains[domains.length - 1]; } assert !ArrayUtils.hasNaNsOrInfs(beta): "Coefficients contain NA or Infs."; if (_ordinal || _multinomial) _global_beta_multinomial=ArrayUtils.convertTo2DMatrix(beta, coefficient_names.length); else _global_beta=beta; _submodels = new Submodel[]{new Submodel(0, 0, beta, -1, Double.NaN, Double.NaN, _totalBetaLength, null, false)}; } public GLMOutput() { _isSupervised = true; _nclasses = -1; _totalBetaLength = -1; } public GLMOutput(GLM glm) { super(glm); _dinfo = glm._dinfo.clone(); _dinfo._adaptedFrame = null; String[] cnames = glm._dinfo.coefNames(); String [] names = glm._dinfo._adaptedFrame._names; String [][] domains = glm._dinfo._adaptedFrame.domains(); if(glm._parms._family == Family.quasibinomial){ double [] mins = glm._dinfo._adaptedFrame.lastVec().mins(); double [] maxs = glm._dinfo._adaptedFrame.lastVec().maxs(); double l = mins[0]; double u = maxs[0]; if(!(l<u)) throw new IllegalArgumentException("quasibinomial family expects response to have two distinct values"); for(int i = 0; i < mins.length; ++i){ if((mins[i]-l)*(mins[i]-u) != 0) throw new IllegalArgumentException("quasibinomial family expects response to have two distinct values, got mins = " + Arrays.toString(mins) + ", maxs = " + Arrays.toString(maxs)); if((maxs[i]-l)*(maxs[i]-u) != 0) throw new IllegalArgumentException("quasibinomial family expects response to have two distinct values, got mins = " + Arrays.toString(mins) + ", maxs = " + Arrays.toString(maxs)); } domains[domains.length-1] = new String[]{Double.toString(l),Double.toString(u)}; } int id = glm._generatedWeights == null?-1:ArrayUtils.find(names, glm._generatedWeights); if(id >= 0) { _dinfo._weights = false; String [] ns = new String[names.length-1]; String[][] ds = new String[domains.length-1][]; System.arraycopy(names,0,ns,0,id); System.arraycopy(domains,0,ds,0,id); System.arraycopy(names,id+1,ns,id,ns.length-id); System.arraycopy(domains,id+1,ds,id,ds.length-id); names = ns; domains = ds; } setNames(names, glm._dinfo._adaptedFrame.typesStr()); _domains = domains; _coefficient_names = Arrays.copyOf(cnames, cnames.length + 1); _coefficient_names[_coefficient_names.length-1] = "Intercept"; _nclasses = glm.nclasses(); _totalBetaLength = glm._betaInfo.totalBetaLength(); _binomial = (glm._parms._family == Family.binomial || glm._parms._family == Family.quasibinomial || Family.fractionalbinomial == glm._parms._family); _multinomial = glm._parms._family == Family.multinomial; _ordinal = glm._parms._family == Family.ordinal; } /** * Variance Covariance matrix accessor. Available only if odel has been built with p-values. * @return */ public double [][] vcov(){return _vcov;} @Override public int nclasses() { return _nclasses; } @Override public String[] classNames() { String [] res = super.classNames(); if(res == null && _binomial) return binomialClassNames; return res; } public Submodel pickBestModel(GLMParameters parms) { int bestId = 0; Submodel best = _submodels[0]; for(int i = 1; i < _submodels.length; ++i) { Submodel sm = _submodels[i]; if(!(sm.devianceValid > best.devianceValid) && sm.devianceTrain < best.devianceTrain){ bestId = i; best = sm; } } setSubmodelIdx(_best_submodel_idx = bestId, parms); return best; } public double[] getNormBeta() { if(this.isStandardized()) { return _submodels[_selected_submodel_idx].getBeta(MemoryManager.malloc8d(_dinfo.fullN()+1)); } else { return _dinfo.normalizeBeta(_submodels[_selected_submodel_idx].getBeta(MemoryManager.malloc8d(_dinfo.fullN()+1)), this.isStandardized()); } } public double[][] getNormBetaMultinomial() { return getNormBetaMultinomial(_selected_submodel_idx, this.isStandardized()); } public double[][] getNormBetaMultinomial(int idx) { if(_submodels == null || _submodels.length == 0) // no model yet return null; double [][] res = new double[nclasses()][]; Submodel sm = _submodels[idx]; int N = _dinfo.fullN()+1; double [] beta = sm.beta; if(sm.idxs != null) { beta = ArrayUtils.expandAndScatter(beta, nclasses() * (_dinfo.fullN() + 1), sm.idxs); } else if (beta.length < _totalBetaLength && sm.idxs == null) { // need to expand beta to full length beta = expandToFullArray(beta, _activeColsPerClass, _totalBetaLength, nclasses(), _totalBetaLength/nclasses()); } for(int i = 0; i < res.length; ++i) res[i] = Arrays.copyOfRange(beta,i*N,(i+1)*N); return res; } public double[][] getNormBetaMultinomial(int idx, boolean standardized) { if(_submodels == null || _submodels.length == 0) // no model yet return null; double [][] res = new double[nclasses()][]; Submodel sm = _submodels[idx]; int N = _dinfo.fullN()+1; double [] beta = sm.beta; if(sm.idxs != null) beta = ArrayUtils.expandAndScatter(beta,nclasses()*(_dinfo.fullN()+1),sm.idxs); else if (beta.length < _totalBetaLength) // sm.idxs is null but beta too short beta = expandToFullArray(beta, _activeColsPerClass, _totalBetaLength, nclasses(), _totalBetaLength/nclasses()); for(int i = 0; i < res.length; ++i) if(standardized) { res[i] = Arrays.copyOfRange(beta, i * N, (i + 1) * N); } else { res[i] = _dinfo.normalizeBeta(Arrays.copyOfRange(beta, i * N, (i + 1) * N), standardized); } return res; } public double[][] get_global_beta_multinomial(){return _global_beta_multinomial;} private int indexOf(double needle, double[] haystack) { for (int i = 0; i < haystack.length; i++) { if (needle == haystack[i]) return i; } return -1; } // set model coefficients to that of submodel index l public void setSubmodelIdx(int l, GLMParameters parms){ _selected_submodel_idx = l; _best_lambda_idx = l; // kept to ensure backward compatibility _selected_alpha_idx = indexOf(_submodels[l].alpha_value, parms._alpha); _selected_lambda_idx = indexOf(_submodels[l].lambda_value, parms._lambda); if(_multinomial || _ordinal) { _global_beta_multinomial = getNormBetaMultinomial(l); for(int i = 0; i < _global_beta_multinomial.length; ++i) _global_beta_multinomial[i] = _dinfo.denormalizeBeta(_global_beta_multinomial[i]); } else { if (_global_beta == null) _global_beta = MemoryManager.malloc8d(_coefficient_names.length); else Arrays.fill(_global_beta, 0); _submodels[l].getBeta(_global_beta); _global_beta = _dinfo.denormalizeBeta(_global_beta); } } public double [] beta() { return _global_beta;} public Submodel bestSubmodel() { return _submodels[_selected_submodel_idx]; } // given lambda value, return the corresponding submodel index public Submodel getSubmodel(double lambdaCVEstimate) { for(int i = 0; i < _submodels.length; ++i) if(_submodels[i] != null && _submodels[i].lambda_value == lambdaCVEstimate) { return _submodels[i]; } return null; } public Submodel getSubmodel(int submodel_index) { assert submodel_index < _submodels.length : "submodel_index specified exceeds the submodels length."; return _submodels[submodel_index]; } // calculate variable importance which is derived from the standardized coefficients public VarImp calculateVarimp() { String[] names = coefficientNames(); final double [] magnitudes = new double[names.length]; int len = magnitudes.length - 1; if (len == 0) // GLM model contains only intercepts and no predictor coefficients. return null; int[] indices = new int[len]; for (int i = 0; i < indices.length; ++i) indices[i] = i; float[] magnitudesSort = new float[len]; // stored sorted coefficient magnitudes String[] namesSort = new String[len]; if (_nclasses > 2) calculateVarimpMultinomial(magnitudes, indices, getNormBetaMultinomial()); else calculateVarimpBase(magnitudes, indices, getNormBeta()); for (int index = 0; index < len; index++) { magnitudesSort[index] = (float) magnitudes[indices[index]]; namesSort[index] = names[indices[index]]; } return new VarImp(magnitudesSort, namesSort); } } /** * get beta coefficients in a map indexed by name * @return the estimated coefficients */ public HashMap<String,Double> coefficients(){ HashMap<String, Double> res = new HashMap<>(); final double [] b = beta(); if(b == null) return res; if(_parms._family == Family.multinomial || _parms._family == Family.ordinal){ String [] responseDomain = _output._domains[_output._domains.length-1]; int len = b.length/_output.nclasses(); assert b.length == len*_output.nclasses(); for(int c = 0; c < _output.nclasses(); ++c) { String postfix = "_"+responseDomain[c]; for (int i = 0; i < len; ++i) res.put(_output._coefficient_names[i]+postfix, b[c*len+i]); } } else for (int i = 0; i < b.length; ++i) res.put(_output._coefficient_names[i], b[i]); return res; } public HashMap<String,Double> coefficients(boolean standardized){ HashMap<String, Double> res = new HashMap<>(); double [] b = beta(); if(_parms._family == Family.multinomial || _parms._family == Family.ordinal){ if (standardized) b = flat(this._output.getNormBetaMultinomial()); if(b == null) return res; String [] responseDomain = _output._domains[_output._domains.length-1]; int len = b.length/_output.nclasses(); assert b.length == len*_output.nclasses(); for(int c = 0; c < _output.nclasses(); ++c) { String postfix = "_" + responseDomain[c]; for (int i = 0; i < len; ++i) res.put(_output._coefficient_names[i]+postfix, b[c*len+i]); } } else { if (standardized) b = this._output.getNormBeta(); for (int i = 0; i < b.length; ++i) res.put(_output._coefficient_names[i], b[i]); } return res; } // TODO: Shouldn't this be in schema? have it here for now to be consistent with others... /** * Re-do the TwoDim table generation with updated model. */ public TwoDimTable generateSummary(Key train, int iter){ String[] names = new String[]{"Family", "Link", "Regularization", "Number of Predictors Total", "Number of Active Predictors", "Number of Iterations", "Training Frame"}; String[] types = new String[]{"string", "string", "string", "int", "int", "int", "string"}; String[] formats = new String[]{"%s", "%s", "%s", "%d", "%d", "%d", "%s"}; if (_parms._lambda_search) { names = new String[]{"Family", "Link", "Regularization", "Lambda Search", "Number of Predictors Total", "Number of Active Predictors", "Number of Iterations", "Training Frame"}; types = new String[]{"string", "string", "string", "string", "int", "int", "int", "string"}; formats = new String[]{"%s", "%s", "%s", "%s", "%d", "%d", "%d", "%s"}; } _output._model_summary = new TwoDimTable("GLM Model", "summary", new String[]{""}, names, types, formats, ""); _output._model_summary.set(0, 0, _parms._family.toString()); _output._model_summary.set(0, 1, _parms._link.toString()); String regularization = "None"; if (_parms._lambda != null && !(_parms._lambda.length == 1 && _parms._lambda[0] == 0)) { // have regularization if (_output.bestSubmodel().alpha_value == 0) regularization = "Ridge ( lambda = "; else if (_output.bestSubmodel().alpha_value == 1) regularization = "Lasso (lambda = "; else regularization = "Elastic Net (alpha = " + MathUtils.roundToNDigits(_output.bestSubmodel().alpha_value, 4) + ", lambda = "; regularization = regularization + MathUtils.roundToNDigits(_output.bestSubmodel().lambda_value, 4) + " )"; } _output._model_summary.set(0, 2, regularization); int lambdaSearch = 0; if (_parms._lambda_search) { lambdaSearch = 1; iter = _output._submodels[_output._selected_submodel_idx].iteration; _output._model_summary.set(0, 3, "nlambda = " + _parms._nlambdas + ", lambda.max = " + MathUtils.roundToNDigits(_lambda_max, 4) + ", lambda.min = " + MathUtils.roundToNDigits(_output.lambda_best(), 4) + ", lambda.1se = " + MathUtils.roundToNDigits(_output.lambda_1se(), 4)); } int intercept = _parms._intercept ? 1 : 0; if (_output.nclasses() > 2) { _output._model_summary.set(0, 3 + lambdaSearch, _output.nclasses() * _output._coefficient_names.length); _output._model_summary.set(0, 4 + lambdaSearch, Integer.toString(_output.rank() - _output.nclasses() * intercept)); } else { _output._model_summary.set(0, 3 + lambdaSearch, beta().length - 1); _output._model_summary.set(0, 4 + lambdaSearch, Integer.toString(_output.rank() - intercept)); } _output._model_summary.set(0, 5 + lambdaSearch, Integer.valueOf(iter)); _output._model_summary.set(0, 6 + lambdaSearch, train.toString()); return _output._model_summary; } @Override public long checksum_impl(){ if(_parms._train == null) return 0; return super.checksum_impl(); } private static ThreadLocal<double[]> _eta = new ThreadLocal<>(); @Override protected double[] score0(double[] data, double[] preds){return score0(data,preds,0);} @Override protected double[] score0(double[] data, double[] preds, double o) { if(_parms._family == Family.multinomial || _parms._family == Family.ordinal) { if (o != 0) throw H2O.unimpl("Offset is not implemented for multinomial/ordinal."); double[] eta = _eta.get(); Arrays.fill(preds, 0.0); if (eta == null || eta.length != _output.nclasses()) _eta.set(eta = MemoryManager.malloc8d(_output.nclasses())); final double[][] bm = _output._global_beta_multinomial; double sumExp = 0; double maxRow = 0; int classInd = bm.length; int icptInd = bm[0].length-1; if (_parms._family == Family.ordinal) // only need one eta for all classes classInd -= 1; // last class all zeros for (int c = 0; c < classInd; ++c) { double e = bm[c][icptInd]; // grab the intercept, replace the bm[0].length-1 double [] b = bm[c]; for(int i = 0; i < _output._dinfo._cats; ++i) { int l = _output._dinfo.getCategoricalId(i, data[i]); if (l >= 0) e += b[l]; } int coff = _output._dinfo._cats; int boff = _output._dinfo.numStart(); for(int i = 0; i < _output._dinfo._nums; ++i) { double d = data[coff+i]; if(!_output._dinfo._skipMissing && Double.isNaN(d)) d = _output._dinfo._numNAFill[i]; e += d*b[boff+i]; } if(e > maxRow) maxRow = e; eta[c] = e; } if (_parms._family == Family.multinomial) { for (int c = 0; c < bm.length; ++c) sumExp += eta[c] = Math.exp(eta[c]-maxRow); // intercept sumExp = 1.0 / sumExp; for (int c = 0; c < bm.length; ++c) preds[c + 1] = eta[c] * sumExp; preds[0] = ArrayUtils.maxIndex(eta); } else { // scoring for ordinal int nclasses = _output._nclasses; int lastClass = nclasses-1; // first assign the class Arrays.fill(preds,1e-10); // initialize to small number preds[0] = lastClass; // initialize to last class by default here double previousCDF = 0.0; for (int cInd = 0; cInd < lastClass; cInd++) { // classify row and calculate PDF of each class double currEta = eta[cInd]; double currCDF = 1.0 / (1 + Math.exp(-currEta)); preds[cInd + 1] = currCDF - previousCDF; previousCDF = currCDF; if (currEta > 0) { // found the correct class preds[0] = cInd; break; } } for (int cInd = (int) preds[0] + 1; cInd < lastClass; cInd++) { // continue PDF calculation double currCDF = 1.0 / (1 + Math.exp(-eta[cInd])); preds[cInd + 1] = currCDF - previousCDF; previousCDF = currCDF; } preds[nclasses] = 1-previousCDF; } } else { double[] b = beta(); double eta = b[b.length - 1] + o; // intercept + offset for (int i = 0; i < _output._dinfo._cats && !Double.isNaN(eta); ++i) { int l = _output._dinfo.getCategoricalId(i, data[i]); if (l >= 0) eta += b[l]; } int numStart = _output._dinfo.numStart(); int ncats = _output._dinfo._cats; for (int i = 0; i < _output._dinfo._nums && !Double.isNaN(eta); ++i) { double d = data[ncats + i]; if (!_output._dinfo._skipMissing && Double.isNaN(d)) d = _output._dinfo._numNAFill[i]; eta += b[numStart + i] * d; } double mu = _parms.linkInv(eta); if (_parms._family == Family.binomial) { // threshold for prediction preds[0] = mu >= defaultThreshold()?1:0; preds[1] = 1.0 - mu; // class 0 preds[2] = mu; // class 1 } else preds[0] = mu; } return preds; } @Override protected boolean needsPostProcess() { return false; /* pred[0] is already set by score0 */ } @Override public double score(double[] data) { double[] pred = score0(data, new double[_output.nclasses() + 1], 0); return pred[0]; } @Override protected void toJavaPredictBody(SBPrintStream body, CodeGeneratorPipeline classCtx, CodeGeneratorPipeline fileCtx, final boolean verboseCode) { // Generate static fields classCtx.add(new CodeGenerator() { @Override public void generate(JCodeSB out) { JCodeGen.toClassWithArray(out, "public static", "BETA", beta_internal()); // "The Coefficients" JCodeGen.toClassWithArray(out, "static", "NUM_MEANS", _output._dinfo._numNAFill,"Imputed numeric values"); JCodeGen.toClassWithArray(out, "static", "CAT_MODES", _output._dinfo.catNAFill(),"Imputed categorical values."); JCodeGen.toStaticVar(out, "CATOFFS", dinfo()._catOffsets, "Categorical Offsets"); } }); body.ip("final double [] b = BETA.VALUES;").nl(); if(_parms.imputeMissing()){ body.ip("for(int i = 0; i < " + _output._dinfo._cats + "; ++i) if(Double.isNaN(data[i])) data[i] = CAT_MODES.VALUES[i];").nl(); body.ip("for(int i = 0; i < " + _output._dinfo._nums + "; ++i) if(Double.isNaN(data[i + " + _output._dinfo._cats + "])) data[i+" + _output._dinfo._cats + "] = NUM_MEANS.VALUES[i];").nl(); } if(_parms._family != Family.multinomial && _parms._family != Family.ordinal) { body.ip("double eta = 0.0;").nl(); if (!_parms._use_all_factor_levels) { // skip level 0 of all factors body.ip("for(int i = 0; i < CATOFFS.length-1; ++i) if(data[i] != 0) {").nl(); body.ip(" int ival = (int)data[i] - 1;").nl(); body.ip(" if(ival != data[i] - 1) throw new IllegalArgumentException(\"categorical value out of range\");").nl(); body.ip(" ival += CATOFFS[i];").nl(); body.ip(" if(ival < CATOFFS[i + 1])").nl(); body.ip(" eta += b[ival];").nl(); } else { // do not skip any levels body.ip("for(int i = 0; i < CATOFFS.length-1; ++i) {").nl(); body.ip(" int ival = (int)data[i];").nl(); body.ip(" if(ival != data[i]) throw new IllegalArgumentException(\"categorical value out of range\");").nl(); body.ip(" ival += CATOFFS[i];").nl(); body.ip(" if(ival < CATOFFS[i + 1])").nl(); body.ip(" eta += b[ival];").nl(); } body.ip("}").nl(); final int noff = dinfo().numStart() - dinfo()._cats; body.ip("for(int i = ").p(dinfo()._cats).p("; i < b.length-1-").p(noff).p("; ++i)").nl(); body.ip("eta += b[").p(noff).p("+i]*data[i];").nl(); body.ip("eta += b[b.length-1]; // reduce intercept").nl(); if(_parms._family != Family.tweedie) body.ip("double mu = hex.genmodel.GenModel.GLM_").p(_parms._link.toString()).p("Inv(eta"); else body.ip("double mu = hex.genmodel.GenModel.GLM_tweedieInv(eta," + _parms._tweedie_link_power); body.p(");").nl(); if (_parms._family == Family.binomial || _parms._family == Family.fractionalbinomial) { body.ip("preds[0] = (mu >= ").p(defaultThreshold()).p(") ? 1 : 0").p("; // threshold given by ROC").nl(); body.ip("preds[1] = 1.0 - mu; // class 0").nl(); body.ip("preds[2] = mu; // class 1").nl(); } else { body.ip("preds[0] = mu;").nl(); } } else { int P = _output._global_beta_multinomial[0].length; body.ip("preds[0] = 0;").nl(); body.ip("for(int c = 0; c < " + _output._nclasses + "; ++c){").nl(); body.ip(" preds[c+1] = 0;").nl(); if(dinfo()._cats > 0) { if (!_parms._use_all_factor_levels) { // skip level 0 of all factors body.ip(" for(int i = 0; i < CATOFFS.length-1; ++i) if(data[i] != 0) {").nl(); body.ip(" int ival = (int)data[i] - 1;").nl(); body.ip(" if(ival != data[i] - 1) throw new IllegalArgumentException(\"categorical value out of range\");").nl(); body.ip(" ival += CATOFFS[i];").nl(); body.ip(" if(ival < CATOFFS[i + 1])").nl(); body.ip(" preds[c+1] += b[ival+c*" + P + "];").nl(); } else { // do not skip any levels body.ip(" for(int i = 0; i < CATOFFS.length-1; ++i) {").nl(); body.ip(" int ival = (int)data[i];").nl(); body.ip(" if(ival != data[i]) throw new IllegalArgumentException(\"categorical value out of range\");").nl(); body.ip(" ival += CATOFFS[i];").nl(); body.ip(" if(ival < CATOFFS[i + 1])").nl(); body.ip(" preds[c+1] += b[ival+c*" + P + "];").nl(); } body.ip(" }").nl(); } final int noff = dinfo().numStart(); body.ip(" for(int i = 0; i < " + dinfo()._nums + "; ++i)").nl(); body.ip(" preds[c+1] += b[" + noff + "+i + c*" + P + "]*data[i+"+dinfo()._cats+"];").nl(); body.ip(" preds[c+1] += b[" + (P-1) +" + c*" + P + "]; // reduce intercept").nl(); body.ip("}").nl(); if (_parms._family == Family.multinomial) { body.ip("double max_row = 0;").nl(); body.ip("for(int c = 1; c < preds.length; ++c) if(preds[c] > max_row) max_row = preds[c];").nl(); body.ip("double sum_exp = 0;").nl(); body.ip("for(int c = 1; c < preds.length; ++c) { sum_exp += (preds[c] = Math.exp(preds[c]-max_row));}").nl(); body.ip("sum_exp = 1/sum_exp;").nl(); body.ip("double max_p = 0;").nl(); body.ip("for(int c = 1; c < preds.length; ++c) if((preds[c] *= sum_exp) > max_p){ max_p = preds[c]; preds[0] = c-1;};").nl(); } else { // special for ordinal. preds contains etas for all classes int lastClass = _output._nclasses-1; body.ip("int lastClass = "+lastClass+";").nl(); body.ip("preds[0]=0;").nl(); body.ip("double previousCDF = 0.0;").nl(); body.ip("for (int cInd = 0; cInd < lastClass; cInd++) { // classify row and calculate PDF of each class").nl(); body.ip(" double eta = preds[cInd+1];").nl(); body.ip(" double currCDF = 1.0/(1+Math.exp(-eta));").nl(); body.ip(" preds[cInd+1] = currCDF-previousCDF;").nl(); body.ip(" previousCDF = currCDF;").nl(); body.ip("}").nl(); body.ip("preds[nclasses()] = 1-previousCDF;").nl(); body.ip("double max_p = 0;").nl(); body.ip("for(int c = 1; c < preds.length; ++c) if(preds[c] > max_p){ max_p = preds[c]; preds[0] = c-1;};").nl(); } } } @Override protected SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileCtx) { sb.nl(); sb.ip("public boolean isSupervised() { return true; }").nl(); sb.ip("public int nfeatures() { return "+_output.nfeatures()+"; }").nl(); sb.ip("public int nclasses() { return "+_output.nclasses()+"; }").nl(); return sb; } private GLMScore makeScoringTask(Frame adaptFrm, boolean generatePredictions, Job j, boolean computeMetrics, CFuncRef customMetric){ int responseId = adaptFrm.find(_output.responseName()); if(responseId > -1 && adaptFrm.vec(responseId).isBad()) { // remove inserted invalid response adaptFrm = new Frame(adaptFrm.names(),adaptFrm.vecs()); adaptFrm.remove(responseId); } // Build up the names & domains. final boolean detectedComputeMetrics = computeMetrics && (adaptFrm.vec(_output.responseName()) != null && !adaptFrm.vec(_output.responseName()).isBad()); String [] domain = _output.nclasses()<=1 ? null : !detectedComputeMetrics ? _output._domains[_output._domains.length-1] : adaptFrm.lastVec().domain(); // Score the dataset, building the class distribution & predictions return new GLMScore(j, this, _output._dinfo.scoringInfo(_output._names,adaptFrm),domain,detectedComputeMetrics, generatePredictions, customMetric); } /** Score an already adapted frame. Returns a new Frame with new result * vectors, all in the DKV. Caller responsible for deleting. Input is * already adapted to the Model's domain, so the output is also. Also * computes the metrics for this frame. * * @param adaptFrm Already adapted frame * @param computeMetrics * @return A Frame containing the prediction column, and class distribution */ @Override protected PredictScoreResult predictScoreImpl(Frame fr, Frame adaptFrm, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) { String [] names = makeScoringNames(); String [][] domains = new String[names.length][]; GLMScore gs = makeScoringTask(adaptFrm,true,j, computeMetrics, customMetricFunc); assert gs._dinfo._valid:"_valid flag should be set on data info when doing scoring"; gs.doAll(names.length,Vec.T_NUM,gs._dinfo._adaptedFrame); ModelMetrics.MetricBuilder<?> mb = null; Frame rawFrame = null; if (gs._computeMetrics) { mb = gs._mb; rawFrame = gs.outputFrame(); } domains[0] = gs._domain; Frame outputFrame = gs.outputFrame(Key.make(destination_key), names, domains); return new PredictScoreResult(mb, rawFrame, outputFrame); } @Override public String [] makeScoringNames(){ String [] res = super.makeScoringNames(); if(_output._vcov != null) res = ArrayUtils.append(res,"StdErr"); return res; } /** Score an already adapted frame. Returns a MetricBuilder that can be used to make a model metrics. * @param adaptFrm Already adapted frame * @return MetricBuilder */ @Override protected ModelMetrics.MetricBuilder scoreMetrics(Frame adaptFrm) { GLMScore gs = makeScoringTask(adaptFrm,false,null, true, CFuncRef.from(_parms._custom_metric_func));// doAll(names.length,Vec.T_NUM,adaptFrm); assert gs._dinfo._valid:"_valid flag should be set on data info when doing scoring"; return gs.doAll(gs._dinfo._adaptedFrame)._mb; } @Override public boolean haveMojo() { if (_parms.interactionSpec() == null) return super.haveMojo(); else return false; } @Override public boolean havePojo() { if (_parms.interactionSpec() == null && _parms._offset_column == null) return super.havePojo(); else return false; } @Override public GLMMojoWriter getMojo() { return new GLMMojoWriter(this); } private boolean isFeatureUsedInPredict(int featureIdx, double[] beta){ if (featureIdx < _output._dinfo._catOffsets.length - 1 && _output._column_types[featureIdx].equals("Enum")) { for (int i = _output._dinfo._catOffsets[featureIdx]; i < _output._dinfo._catOffsets[featureIdx + 1]; i++) { if (beta[i] != 0) return true; } return false; } else { featureIdx += _output._dinfo._numOffsets[0] - _output._dinfo._catOffsets.length + 1; } return beta[featureIdx] != 0; } @Override protected boolean isFeatureUsedInPredict(int featureIdx) { if (_parms._interactions != null) return true; if (_output.isMultinomialClassifier()) { for (double[] classBeta : _output._global_beta_multinomial) { if (isFeatureUsedInPredict(featureIdx, classBeta)) return true; } return false; } else { return isFeatureUsedInPredict(featureIdx, _output._global_beta); } } @Override protected Futures remove_impl(Futures fs, boolean cascade) { super.remove_impl(fs, cascade); Keyed.remove(_output._regression_influence_diagnostics, fs, cascade); return fs; } @Override protected AutoBuffer writeAll_impl(AutoBuffer ab) { if (_output._regression_influence_diagnostics != null) ab.putKey(_output._regression_influence_diagnostics); return super.writeAll_impl(ab); } @Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { if (_output._regression_influence_diagnostics!= null) ab.getKey(_output._regression_influence_diagnostics, fs); return super.readAll_impl(ab, fs); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMMojoWriter.java
package hex.glm; import hex.ModelMojoWriter; import java.io.IOException; public class GLMMojoWriter extends ModelMojoWriter<GLMModel, GLMModel.GLMParameters, GLMModel.GLMOutput> { @SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler public GLMMojoWriter() {} public GLMMojoWriter(GLMModel model) { super(model); } @Override public String mojoVersion() { return "1.00"; } @Override protected void writeModelData() throws IOException { writekv("use_all_factor_levels", model._parms._use_all_factor_levels); writekv("cats", model.dinfo()._cats); writekv("cat_offsets", model.dinfo()._catOffsets); writekv("nums", model._output._dinfo._nums); boolean imputeMeans = model._parms.imputeMissing(); writekv("mean_imputation", imputeMeans); if (imputeMeans) { writekv("num_means", model.dinfo().numNAFill()); writekv("cat_modes", model.dinfo().catNAFill()); } writekv("beta", model.beta_internal()); writekv("family", model._parms._family); writekv("link", model._parms._link); if (GLMModel.GLMParameters.Family.tweedie.equals(model._parms._family)) writekv("tweedie_link_power", model._parms._tweedie_link_power); writekv("dispersion_estimated", (model._parms._compute_p_values ? model._parms._dispersion_estimated : 1.0)); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMScore.java
package hex.glm; import hex.CMetricScoringTask; import hex.DataInfo; import hex.ModelMetrics; import water.Job; import water.MRTask; import water.MemoryManager; import water.fvec.Chunk; import water.fvec.NewChunk; import water.udf.CFuncRef; import water.util.ArrayUtils; import water.util.FrameUtils; import java.util.Arrays; /** * Created by tomas on 3/15/16. */ public class GLMScore extends CMetricScoringTask<GLMScore> { final GLMModel _m; final Job _j; ModelMetrics.MetricBuilder _mb; final DataInfo _dinfo; final boolean _sparse; final String[] _domain; final boolean _computeMetrics; final boolean _generatePredictions; transient double [][] _vcov; transient double [] _tmp; transient double [] _eta; final int _nclasses; private final double []_beta; private final double [][] _beta_multinomial; private final double _defaultThreshold; public GLMScore(Job j, GLMModel m, DataInfo dinfo, String[] domain, boolean computeMetrics, boolean generatePredictions, CFuncRef customMetric) { super(customMetric); _j = j; _m = m; _computeMetrics = computeMetrics; _sparse = FrameUtils.sparseRatio(dinfo._adaptedFrame) < .5; _domain = domain; _generatePredictions = generatePredictions; _m._parms = m._parms; _nclasses = m._output.nclasses(); if(_m._parms._family == GLMModel.GLMParameters.Family.multinomial || _m._parms._family == GLMModel.GLMParameters.Family.ordinal){ _beta = null; _beta_multinomial = m._output._global_beta_multinomial; } else { double [] beta = m.beta(); int [] ids = new int[beta.length-1]; int k = 0; for(int i = 0; i < beta.length-1; ++i){ // pick out beta that is not zero in ids if(beta[i] != 0) ids[k++] = i; } if(k < beta.length-1) { ids = Arrays.copyOf(ids,k); dinfo = dinfo.filterExpandedColumns(ids); double [] beta2 = MemoryManager.malloc8d(ids.length+1); int l = 0; for(int x:ids) beta2[l++] = beta[x]; beta2[l] = beta[beta.length-1]; beta = beta2; } _beta_multinomial = null; _beta = beta; } _dinfo = dinfo; _dinfo._valid = true; // marking dinfo as validation data set disables an assert on unseen levels (which should not happen in train) _defaultThreshold = m.defaultThreshold(); } public double [] scoreRow(DataInfo.Row r, double o, double [] preds) { int lastClass = _nclasses-1; if(_m._parms._family == GLMModel.GLMParameters.Family.ordinal) { // todo: change this to take various link func final double[][] bm = _beta_multinomial; Arrays.fill(preds,0); double previousCDF = 0.0; for (int cInd = 0; cInd < lastClass; cInd++) { double eta = r.innerProduct(bm[cInd]) + o; double currCDF = 1.0 / (1 + Math.exp(-eta)); preds[cInd + 1] = currCDF - previousCDF; previousCDF = currCDF; } preds[_nclasses] = 1-previousCDF; preds[0] = ArrayUtils.maxIndex(preds)-1; } else if (_m._parms._family == GLMModel.GLMParameters.Family.multinomial) { double[] eta = _eta; final double[][] bm = _beta_multinomial; double sumExp = 0; double maxRow = 0; for (int c = 0; c < bm.length; ++c) { eta[c] = r.innerProduct(bm[c]) + o; if(eta[c] > maxRow) maxRow = eta[c]; } for (int c = 0; c < bm.length; ++c) sumExp += eta[c] = Math.exp(eta[c]-maxRow); // intercept sumExp = 1.0 / sumExp; for (int c = 0; c < bm.length; ++c) preds[c + 1] = eta[c] * sumExp; preds[0] = ArrayUtils.maxIndex(eta); } else { double mu = _m._parms.linkInv(r.innerProduct(_beta) + o); if (_m._parms._family == GLMModel.GLMParameters.Family.binomial || _m._parms._family == GLMModel.GLMParameters.Family.quasibinomial || _m._parms._family == GLMModel.GLMParameters.Family.fractionalbinomial) { // threshold for prediction preds[0] = mu >= _defaultThreshold?1:0; preds[1] = 1.0 - mu; // class 0 preds[2] = mu; // class 1 } else preds[0] = mu; } return preds; } private void processRow(DataInfo.Row r, float [] res, double [] ps, NewChunk [] preds, int ncols) { if(_dinfo._responses != 0)res[0] = (float) r.response[0]; if (r.predictors_bad) { Arrays.fill(ps,Double.NaN); } else if(r.weight == 0) { Arrays.fill(ps,0); } else { scoreRow(r, r.offset, ps); if (_computeMetrics && !r.response_bad) { _mb.perRow(ps, res, r.weight, r.offset, _m); customMetricPerRow(ps, res, r.weight, r.offset, _m); } } if (_generatePredictions) { for (int c = 0; c < ncols; c++) // Output predictions; sized for train only (excludes extra test classes) preds[c].addNum(ps[c]); if(_vcov != null) { // compute standard error on prediction preds[ncols].addNum(Math.sqrt(r.innerProduct(r.mtrxMul(_vcov, _tmp)))); } } } public void map(Chunk[] chks, NewChunk[] preds) { if (isCancelled() || _j != null && _j.stop_requested()) return; if(_m._parms._family == GLMModel.GLMParameters.Family.multinomial || _m._parms._family == GLMModel.GLMParameters.Family.ordinal) _eta = MemoryManager.malloc8d(_nclasses); double[] ps; _vcov = _m._output._vcov; if(_generatePredictions){ if(_vcov != null){ _tmp = MemoryManager.malloc8d(_vcov.length); } } if (_computeMetrics) { _mb = _m.makeMetricBuilder(_domain); ps = _mb._work; // Sized for the union of test and train classes } else ps = new double[_m._output._nclasses+1]; float[] res = new float[1]; final int nc = _m._output.nclasses(); final int ncols = nc == 1 ? 1 : nc + 1; // Regression has 1 predict col; classification also has class distribution // compute if (_sparse) { for (DataInfo.Row r : _dinfo.extractSparseRows(chks)) processRow(r,res,ps,preds,ncols); } else { DataInfo.Row r = _dinfo.newDenseRow(); for (int rid = 0; rid < chks[0]._len; ++rid) { _dinfo.extractDenseRow(chks, rid, r); processRow(r,res,ps,preds,ncols); } } if (_j != null) _j.update(1); } @Override public void reduce(GLMScore bs) { super.reduce(bs); if (_mb != null) _mb.reduce(bs._mb); } @Override protected void postGlobal() { super.postGlobal(); if(_mb != null) { _mb.postGlobal(getComputedCustomMetric()); if (null != cFuncRef) _mb._CMetricScoringTask = (CMetricScoringTask) this; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMScoringInfo.java
package hex.glm; import hex.ScoringInfo; public class GLMScoringInfo extends ScoringInfo implements ScoringInfo.HasIterations { public int iterations; public int iterations() { return iterations; }; }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMTask.java
package hex.glm; import hex.DataInfo; import hex.DataInfo.Row; import hex.FrameTask2; import hex.glm.GLMModel.GLMParameters; import hex.glm.GLMModel.GLMParameters.Family; import hex.glm.GLMModel.GLMParameters.Link; import hex.glm.GLMModel.GLMWeights; import hex.glm.GLMModel.GLMWeightsFun; import hex.gram.Gram; import water.*; import water.H2O.H2OCountedCompleter; import water.fvec.C0DChunk; import water.fvec.Chunk; import water.util.ArrayUtils; import water.util.FrameUtils; import water.util.MathUtils; import water.util.MathUtils.BasicStats; import java.util.Arrays; import static hex.glm.GLMModel.GLMParameters.DispersionMethod.deviance; import static hex.glm.GLMModel.GLMParameters.Family.gaussian; import static hex.glm.GLMUtils.updateGradGam; import static hex.glm.GLMUtils.updateGradGamMultinomial; import static org.apache.commons.math3.special.Gamma.*; /** * All GLM related distributed tasks: * * YMUTask - computes response means on actual datasets (if some rows are ignored - e.g ignoring rows with NA and/or doing cross-validation) * GLMGradientTask - computes gradient at given Beta, used by L-BFGS, for KKT condition check * GLMLineSearchTask - computes residual deviance(s) at given beta(s), used by line search (both L-BFGS and IRLSM) * GLMIterationTask - used by IRLSM to compute Gram matrix and response t(X) W X, t(X)Wz * * @author tomasnykodym */ public abstract class GLMTask { final static double EPS=1e-10; static class NullDevTask extends MRTask<NullDevTask> { double _nullDev; final double [] _ymu; final GLMWeightsFun _glmf; final boolean _hasWeights; final boolean _hasOffset; public NullDevTask(GLMWeightsFun glmf, double [] ymu, boolean hasWeights, boolean hasOffset) { _glmf = glmf; _ymu = ymu; _hasWeights = hasWeights; _hasOffset = hasOffset; } @Override public void map(Chunk [] chks) { int i = 0; int len = chks[0]._len; Chunk w = _hasWeights?chks[i++]:new C0DChunk(1.0,len); Chunk o = _hasOffset?chks[i++]:new C0DChunk(0.0,len); Chunk r = chks[i]; if(_glmf._family != Family.multinomial) { double ymu = _glmf.link(_ymu[0]); for (int j = 0; j < len; ++j) _nullDev += w.atd(j)*_glmf.deviance(r.atd(j), _glmf.linkInv(ymu + o.atd(j))); } else { throw H2O.unimpl(); } } @Override public void reduce(NullDevTask ndt) {_nullDev += ndt._nullDev;} } static class GLMResDevTask extends FrameTask2<GLMResDevTask> { final GLMWeightsFun _glmf; final double [] _beta; double _resDev = 0; long _nobs; double _likelihood; public GLMResDevTask(Key jobKey, DataInfo dinfo,GLMParameters parms, double [] beta) { super(null,dinfo, jobKey); _glmf = new GLMWeightsFun(parms); _beta = beta; _sparseOffset = _sparse?GLM.sparseOffset(_beta,_dinfo):0; } private transient GLMWeights _glmw; private final double _sparseOffset; @Override public boolean handlesSparseData(){return true;} @Override public void chunkInit() { _glmw = new GLMWeights(); } @Override protected void processRow(Row r) { _glmf.computeWeights(r.response(0), r.innerProduct(_beta) + _sparseOffset, r.offset, r.weight, _glmw); _resDev += _glmw.dev; _likelihood += _glmw.l; ++_nobs; } @Override public void reduce(GLMResDevTask gt) {_nobs += gt._nobs; _resDev += gt._resDev; _likelihood += gt._likelihood;} public double avgDev(){return _resDev/_nobs;} public double dev(){return _resDev;} } static class GLMResDevTaskOrdinal extends FrameTask2<GLMResDevTaskOrdinal> { final double [][] _beta; double _likelihood; final int _nclasses; final int _lastClass; final int _secondToLast; long _nobs; public GLMResDevTaskOrdinal(Key jobKey, DataInfo dinfo, double [] beta, int nclasses) { super(null,dinfo, jobKey); _beta = ArrayUtils.convertTo2DMatrix(beta,beta.length/nclasses); _nclasses = nclasses; _lastClass = nclasses-1; _secondToLast = _lastClass - 1; } @Override public boolean handlesSparseData(){return true;} private transient double [] _sparseOffsets; @Override public void chunkInit() { _sparseOffsets = MemoryManager.malloc8d(_nclasses); if(_sparse) for(int c = 0; c < _nclasses; ++c) _sparseOffsets[c] = GLM.sparseOffset(_beta[c],_dinfo); } @Override protected void processRow(Row r) { _nobs++; int c = (int)r.response(0); // true response category if (c==0) { // for category 0 double eta = r.innerProduct(_beta[0])+ _sparseOffsets[c]; _likelihood -= r.weight * (eta-Math.log(1+Math.exp(eta))); } else if (c==_lastClass) { // for class nclass-1 _likelihood += r.weight * Math.log(1+Math.exp(r.innerProduct(_beta[_secondToLast])+ _sparseOffsets[c])); } else { // for category from 1 to nclass-2 double eta = Math.exp(r.innerProduct(_beta[c])+_sparseOffsets[c]); double etaM1 = Math.exp(r.innerProduct(_beta[c])+_sparseOffsets[c-1]); _likelihood -= r.weight * Math.log(eta/(1+eta)-etaM1/(1+etaM1)); } } @Override public void reduce(GLMResDevTaskOrdinal gt) {_nobs += gt._nobs; _likelihood += gt._likelihood;} public double avgDev(){return _likelihood*2/_nobs;} public double dev(){return _likelihood*2;} } static class GLMResDevTaskMultinomial extends FrameTask2<GLMResDevTaskMultinomial> { final double [][] _beta; double _likelihood; final int _nclasses; long _nobs; public GLMResDevTaskMultinomial(Key jobKey, DataInfo dinfo, double [] beta, int nclasses) { super(null,dinfo, jobKey); _beta = ArrayUtils.convertTo2DMatrix(beta,beta.length/nclasses); _nclasses = nclasses; } @Override public boolean handlesSparseData(){return true;} private transient double [] _sparseOffsets; @Override public void chunkInit() { _sparseOffsets = MemoryManager.malloc8d(_nclasses); if(_sparse) for(int c = 0; c < _nclasses; ++c) _sparseOffsets[c] = GLM.sparseOffset(_beta[c],_dinfo); } @Override protected void processRow(Row r) { _nobs++; double sumExp = 0; for(int c = 0; c < _nclasses; ++c) sumExp += Math.exp(r.innerProduct(_beta[c]) + _sparseOffsets[c]); int c = (int)r.response(0); _likelihood -= r.weight * ((r.innerProduct(_beta[c]) + _sparseOffsets[c]) - Math.log(sumExp)); } @Override public void reduce(GLMResDevTaskMultinomial gt) {_nobs += gt._nobs; _likelihood += gt._likelihood;} public double avgDev(){return _likelihood*2/_nobs;} public double dev(){return _likelihood*2;} } static class WeightedSDTask extends MRTask<WeightedSDTask> { final int _weightId; final double [] _mean; public double [] _varSum; public WeightedSDTask(int wId, double [] mean){ _weightId = wId; _mean = mean; } @Override public void map(Chunk [] chks){ double [] weights = null; if(_weightId != - 1){ weights = MemoryManager.malloc8d(chks[_weightId]._len); chks[_weightId].getDoubles(weights,0,weights.length); chks = ArrayUtils.remove(chks,_weightId); } _varSum = MemoryManager.malloc8d(_mean.length); double [] vals = MemoryManager.malloc8d(chks[0]._len); int [] ids = MemoryManager.malloc4(chks[0]._len); for(int c = 0; c < _mean.length; ++c){ double mu = _mean[c]; int n = chks[c].getSparseDoubles(vals,ids); double s = 0; for(int i = 0; i < n; ++i) { double d = vals[i]; if(Double.isNaN(d)) // NAs are either skipped or replaced with mean (i.e. can also be skipped) continue; d = d - mu; if(_weightId != -1) s += weights[ids[i]]*d*d; else s += d*d; } _varSum[c] = s; } } public void reduce(WeightedSDTask t){ ArrayUtils.add(_varSum,t._varSum); } } static public class YMUTask extends MRTask<YMUTask> { double _yMin = Double.POSITIVE_INFINITY, _yMax = Double.NEGATIVE_INFINITY; final int _responseId; final int _weightId; final int _offsetId; final int _nums; // number of numeric columns final int _numOff; final boolean _skipNAs; final boolean _computeWeightedMeanSigmaResponse; private BasicStats _basicStats; private BasicStats _basicStatsResponse; double [] _yMu; final int _nClasses; private double [] _predictorSDs; private final boolean _expandedResponse; // true iff family == multinomial and response has been maually expanded into binary columns public double [] predictorMeans(){return _basicStats.mean();} public double [] predictorSDs(){ if(_predictorSDs != null) return _predictorSDs; return (_predictorSDs = _basicStats.sigma()); } public double [] responseMeans(){return _basicStatsResponse.mean();} public double [] responseSDs(){ return _basicStatsResponse.sigma(); } public YMUTask(DataInfo dinfo, int nclasses, boolean computeWeightedMeanSigmaResponse, boolean skipNAs, boolean haveResponse, boolean expandedResponse) { _nums = dinfo._nums; _numOff = dinfo._cats; _responseId = haveResponse ? dinfo.responseChunkId(0) : -1; _weightId = dinfo._weights?dinfo.weightChunkId():-1; _offsetId = dinfo._offset?dinfo.offsetChunkId():-1; _nClasses = nclasses; _computeWeightedMeanSigmaResponse = computeWeightedMeanSigmaResponse; _skipNAs = skipNAs; _expandedResponse = _nClasses == 1 || expandedResponse; } @Override public void setupLocal(){} @Override public void map(Chunk [] chunks) { _yMu = new double[_nClasses]; double [] ws = MemoryManager.malloc8d(chunks[0].len()); if(_weightId != -1) chunks[_weightId].getDoubles(ws,0,ws.length); else Arrays.fill(ws,1); boolean changedWeights = false; if(_skipNAs) { // first find the rows to skip, need to go over all chunks including categoricals double [] vals = MemoryManager.malloc8d(chunks[0]._len); int [] ids = MemoryManager.malloc4(vals.length); for (int i = 0; i < chunks.length; ++i) { int n = vals.length; if(chunks[i].isSparseZero()) n = chunks[i].getSparseDoubles(vals,ids); else chunks[i].getDoubles(vals,0,n); for (int r = 0; r < n; ++r) { if (ws[r] != 0 && Double.isNaN(vals[r])) { ws[r] = 0; changedWeights = true; } } } if(changedWeights && _weightId != -1) chunks[_weightId].set(ws); } Chunk response = _responseId < 0 ? null : chunks[_responseId]; double [] numsResponse = null; _basicStats = new BasicStats(_nums); if(_computeWeightedMeanSigmaResponse) { _basicStatsResponse = new BasicStats(_nClasses); numsResponse = MemoryManager.malloc8d(_nClasses); } // compute basic stats for numeric predictors for(int i = 0; i < _nums; ++i) { Chunk c = chunks[i + _numOff]; double w; for (int r = c.nextNZ(-1); r < c._len; r = c.nextNZ(r)) { if ((w = ws[r]) == 0) continue; double d = c.atd(r); _basicStats.add(d, w, i); } } if (response == null) return; long nobs = 0; double wsum = 0; for(double w:ws) { if(w != 0)++nobs; wsum += w; } _basicStats.setNobs(nobs,wsum); // compute the mean for the response // autoexpand categoricals into binary vecs for(int r = 0; r < response._len; ++r) { double w; if((w = ws[r]) == 0) continue; if(_computeWeightedMeanSigmaResponse) { //FIXME: Add support for subtracting offset from response if(_expandedResponse) { for (int i = 0; i < _nClasses; ++i) numsResponse[i] = chunks[chunks.length - _nClasses + i].atd(r); } else { Arrays.fill(numsResponse,0); double d = response.atd(r); if(Double.isNaN(d)) Arrays.fill(numsResponse,Double.NaN); else numsResponse[(int)d] = 1; } _basicStatsResponse.add(numsResponse,w); } double d = response.atd(r); if(!Double.isNaN(d)) { if (_nClasses > 2) _yMu[(int) d] += w; else _yMu[0] += w*d; if (d < _yMin) _yMin = d; if (d > _yMax) _yMax = d; } } if(_basicStatsResponse != null)_basicStatsResponse.setNobs(nobs,wsum); for(int i = 0; i < _nums; ++i) { if(chunks[i+_numOff].isSparseZero()) _basicStats.fillSparseZeros(i); else if(chunks[i+_numOff].isSparseNA()) _basicStats.fillSparseNAs(i); } } @Override public void postGlobal() { ArrayUtils.mult(_yMu,1.0/_basicStats._wsum); } @Override public void reduce(YMUTask ymt) { if(ymt._basicStats.nobs() > 0 && ymt._basicStats.nobs() > 0) { ArrayUtils.add(_yMu,ymt._yMu); if(_yMin > ymt._yMin) _yMin = ymt._yMin; if(_yMax < ymt._yMax) _yMax = ymt._yMax; _basicStats.reduce(ymt._basicStats); if(_computeWeightedMeanSigmaResponse) _basicStatsResponse.reduce(ymt._basicStatsResponse); } else if (_basicStats.nobs() == 0) { _yMu = ymt._yMu; _yMin = ymt._yMin; _yMax = ymt._yMax; _basicStats = ymt._basicStats; _basicStatsResponse = ymt._basicStatsResponse; } } public double wsum() {return _basicStats._wsum;} public long nobs() {return _basicStats.nobs();} } static double computeMultinomialEtas(double [] etas, double [] exps) { double maxRow = ArrayUtils.maxValue(etas); double sumExp = 0; int K = etas.length; for(int c = 0; c < K; ++c) { double x = Math.exp(etas[c] - maxRow); sumExp += x; exps[c+1] = x; } double reg = 1.0/(sumExp); for(int c = 0; c < etas.length; ++c) exps[c+1] *= reg; exps[0] = 0; exps[0] = ArrayUtils.maxIndex(exps)-1; return Math.log(sumExp) + maxRow; } static abstract class GLMGradientTask extends MRTask<GLMGradientTask> { final double [] _beta; public double [] _gradient; public double _likelihood; final transient double _currentLambda; final transient double _reg; protected final DataInfo _dinfo; public double[][][] _penalty_mat; // for gam only public int[][] _gamBetaIndices; // for gam only protected GLMGradientTask(Key jobKey, DataInfo dinfo, double reg, double lambda, double[] beta){ _dinfo = dinfo; _beta = beta.clone(); _reg = reg; _currentLambda = lambda; } protected GLMGradientTask(Key jobKey, DataInfo dinfo, double reg, double lambda, double[] beta, double[][][] penaltyMat, int[][] gamBetaInd){ this(jobKey, dinfo, reg, lambda, beta); _penalty_mat = penaltyMat; _gamBetaIndices = gamBetaInd; } protected abstract void computeGradientMultipliers(double [] es, double [] ys, double [] ws); private final void computeCategoricalEtas(Chunk [] chks, double [] etas, double [] vals, int [] ids) { // categoricals for(int cid = 0; cid < _dinfo._cats; ++cid){ Chunk c = chks[cid]; if(c.isSparseZero()) { int nvals = c.getSparseDoubles(vals,ids,-1); for(int i = 0; i < nvals; ++i){ int id = _dinfo.getCategoricalId(cid,(int)vals[i]); if(id >=0) etas[ids[i]] += _beta[id]; } } else { c.getIntegers(ids, 0, c._len,-1); for(int i = 0; i < ids.length; ++i){ int id = _dinfo.getCategoricalId(cid,ids[i]); if(id >=0) etas[i] += _beta[id]; } } } } private final void computeCategoricalGrads(Chunk [] chks, double [] etas, double [] vals, int [] ids) { // categoricals for(int cid = 0; cid < _dinfo._cats; ++cid){ Chunk c = chks[cid]; if(c.isSparseZero()) { int nvals = c.getSparseDoubles(vals,ids,-1); for(int i = 0; i < nvals; ++i){ int id = _dinfo.getCategoricalId(cid,(int)vals[i]); if(id >=0) _gradient[id] += etas[ids[i]]; } } else { c.getIntegers(ids, 0, c._len,-1); for(int i = 0; i < ids.length; ++i){ int id = _dinfo.getCategoricalId(cid,ids[i]); if(id >=0) _gradient[id] += etas[i]; } } } } private final void computeNumericEtas(Chunk [] chks, double [] etas, double [] vals, int [] ids) { int numOff = _dinfo.numStart(); for(int cid = 0; cid < _dinfo._nums; ++cid){ double scale = _dinfo._normMul != null?_dinfo._normMul[cid]:1; double off = _dinfo._normSub != null?_dinfo._normSub[cid]:0; double NA = _dinfo._numNAFill[cid]; Chunk c = chks[cid+_dinfo._cats]; double b = scale*_beta[numOff+cid]; if(c.isSparseZero()){ int nvals = c.getSparseDoubles(vals,ids,NA); for(int i = 0; i < nvals; ++i) etas[ids[i]] += vals[i] * b; } else if(c.isSparseNA()){ int nvals = c.getSparseDoubles(vals,ids,NA); for(int i = 0; i < nvals; ++i) etas[ids[i]] += (vals[i] - off) * b; } else { c.getDoubles(vals,0,vals.length,NA); for(int i = 0; i < vals.length; ++i) etas[i] += (vals[i] - off) * b; } } } private final void computeNumericGrads(Chunk [] chks, double [] etas, double [] vals, int [] ids) { int numOff = _dinfo.numStart(); for(int cid = 0; cid < _dinfo._nums; ++cid){ double NA = _dinfo._numNAFill[cid]; Chunk c = chks[cid+_dinfo._cats]; double scale = _dinfo._normMul == null?1:_dinfo._normMul[cid]; double offset = _dinfo._normSub == null?0:_dinfo._normSub[cid]; if(c.isSparseZero()){ double g = 0; int nVals = c.getSparseDoubles(vals,ids,NA); for(int i = 0; i < nVals; ++i) g += (vals[i]-offset)*scale*etas[ids[i]]; _gradient[numOff+cid] = g; } else if(c.isSparseNA()){ double off = _dinfo._normSub == null?0:_dinfo._normSub[cid]; double g = 0; int nVals = c.getSparseDoubles(vals,ids,NA); for(int i = 0; i < nVals; ++i) g += (vals[i]-off)*scale*etas[ids[i]]; _gradient[numOff+cid] = g; } else { double off = _dinfo._normSub == null?0:_dinfo._normSub[cid]; c.getDoubles(vals,0,vals.length,NA); double g = 0; for(int i = 0; i < vals.length; ++i) g += (vals[i]-off)*scale*etas[i]; _gradient[numOff+cid] = g; } } } public void map(Chunk [] chks) { _gradient = MemoryManager.malloc8d(_beta.length); Chunk response = chks[chks.length-_dinfo._responses]; Chunk weights = _dinfo._weights?chks[_dinfo.weightChunkId()]:new C0DChunk(1,response._len); double [] ws = weights.getDoubles(MemoryManager.malloc8d(weights._len),0,weights._len); double [] ys = response.getDoubles(MemoryManager.malloc8d(weights._len),0,response._len); double [] etas = MemoryManager.malloc8d(response._len); if(_dinfo._offset) chks[_dinfo.offsetChunkId()].getDoubles(etas,0,etas.length); double sparseOffset = 0; int numStart = _dinfo.numStart(); if(_dinfo._normSub != null) for(int i = 0; i < _dinfo._nums; ++i) if(chks[_dinfo._cats + i].isSparseZero()) sparseOffset -= _beta[numStart + i]*_dinfo._normSub[i]*_dinfo._normMul[i]; ArrayUtils.add(etas,sparseOffset + _beta[_beta.length-1]); double [] vals = MemoryManager.malloc8d(response._len); int [] ids = MemoryManager.malloc4(response._len); computeCategoricalEtas(chks,etas,vals,ids); computeNumericEtas(chks,etas,vals,ids); computeGradientMultipliers(etas,ys,ws); // walk the chunks again, add to the gradient computeCategoricalGrads(chks,etas,vals,ids); computeNumericGrads(chks,etas,vals,ids); // add intercept _gradient[_gradient.length-1] = ArrayUtils.sum(etas); if(_dinfo._normSub != null) { double icpt = _gradient[_gradient.length-1]; for(int i = 0; i < _dinfo._nums; ++i) { if(chks[_dinfo._cats+i].isSparseZero()) { double d = _dinfo._normSub[i] * _dinfo._normMul[i]; _gradient[numStart + i] -= d * icpt; } } } } @Override public final void reduce(GLMGradientTask gmgt){ ArrayUtils.add(_gradient,gmgt._gradient); _likelihood += gmgt._likelihood; } @Override public final void postGlobal(){ ArrayUtils.mult(_gradient,_reg); // reg is obj_reg for(int j = 0; j < _beta.length - 1; ++j) _gradient[j] += _currentLambda * _beta[j]; // add L2 constraint for gradient if ((_penalty_mat != null) && (_gamBetaIndices != null)) // update contribution from gam smoothness constraint updateGradGam(_gradient, _penalty_mat, _gamBetaIndices, _beta, _dinfo._activeCols); } } static class GLMGenericGradientTask extends GLMGradientTask { private final GLMWeightsFun _glmf; public GLMGenericGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta) { super(jobKey, dinfo, parms._obj_reg, lambda, beta); _glmf = new GLMWeightsFun(parms); } public GLMGenericGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta, double[][][] penaltyMat, int[][] gamCols) { super(jobKey, dinfo, parms._obj_reg, lambda, beta, penaltyMat, gamCols); _glmf = new GLMWeightsFun(parms); } @Override protected void computeGradientMultipliers(double [] es, double [] ys, double [] ws){ double l = 0; for(int i = 0; i < es.length; ++i) { if (Double.isNaN(ys[i]) || ws[i] == 0) { es[i] = 0; } else { double mu = _glmf.linkInv(es[i]); mu = mu==0?hex.glm.GLMModel._EPS:mu; l += ws[i] * _glmf.likelihood(ys[i], mu); double var = _glmf.variance(mu); if (var < hex.glm.GLMModel._EPS) var = hex.glm.GLMModel._EPS; // es is the gradient without the predictor term if (_glmf._family.equals(Family.tweedie)) { _glmf._oneOeta = 1.0/(es[i]==0?hex.glm.GLMModel._EPS:es[i]); _glmf._oneOetaSquare = _glmf._oneOeta*_glmf._oneOeta; es[i] = ws[i]*_glmf.linkInvDeriv(mu)*(_glmf._var_power==1?(1-ys[i]/mu): (_glmf._var_power==2?(1/mu-ys[i]*Math.pow(mu, -_glmf._var_power)): (Math.pow(mu, _glmf._oneMinusVarPower)-ys[i]*Math.pow(mu, -_glmf._var_power)))); } else { es[i] = ws[i] * (mu - ys[i]) / (var * _glmf.linkDeriv(mu)); } } } _likelihood = l; } } static class GLMPoissonGradientTask extends GLMGradientTask { private final GLMWeightsFun _glmf; public GLMPoissonGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta) { super(jobKey, dinfo, parms._obj_reg, lambda, beta); _glmf = new GLMWeightsFun(parms); } public GLMPoissonGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta, double[][][] penaltyMat, int[][] gamCols) { super(jobKey, dinfo, parms._obj_reg, lambda, beta, penaltyMat, gamCols); _glmf = new GLMWeightsFun(parms); } @Override protected void computeGradientMultipliers(double [] es, double [] ys, double [] ws){ double l = 0; for(int i = 0; i < es.length; ++i) { if (Double.isNaN(ys[i]) || ws[i] == 0) { es[i] = 0; } else { double eta = es[i]; double mu = Math.exp(eta); double yr = ys[i]; double diff = mu - yr; l += ws[i] * (yr == 0?mu:yr*Math.log(yr/mu) + diff); // todo: looks wrong to me... double check es[i] = ws[i]*diff; } } _likelihood = 2*l; } } static class GLMNegativeBinomialGradientTask extends GLMGradientTask { private final GLMWeightsFun _glmf; public GLMNegativeBinomialGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta) { super(jobKey, dinfo, parms._obj_reg, lambda, beta); _glmf = new GLMWeightsFun(parms); } public GLMNegativeBinomialGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta, double[][][] penaltyMat, int[][] gamCols) { super(jobKey, dinfo, parms._obj_reg, lambda, beta, penaltyMat, gamCols); _glmf = new GLMWeightsFun(parms); } @Override protected void computeGradientMultipliers(double [] es, double [] ys, double [] ws) { double l = 0; for(int i = 0; i < es.length; ++i) { if (Double.isNaN(ys[i]) || ws[i] == 0) { es[i] = 0; } else { double eta = es[i]; double mu = _glmf.linkInv(eta); double yr = ys[i]; if ((mu > 0) && (yr > 0)) { // response and predictions are all nonzeros double invSum =1.0/(1.0+mu*_glmf._theta); double muDeriv = _glmf.linkInvDeriv(mu); es[i] = ws[i] * (invSum-yr/mu+_glmf._theta*yr*invSum) * muDeriv; // gradient of -llh. CHECKED-log/CHECKED-identity l -= ws[i] * (sumOper(yr, _glmf._invTheta, 0)-(yr+_glmf._invTheta)*Math.log(1+_glmf._theta*mu)+ yr*Math.log(mu)+yr*Math.log(_glmf._theta)); // store the -llh, with everything. CHECKED-Log/CHECKED-identity } else if ((mu > 0) && (yr==0)) { es[i] = ws[i]*(_glmf.linkInvDeriv(mu)/(1.0+_glmf._theta*mu)); // CHECKED-log/CHECKED-identity l += _glmf._invTheta*Math.log(1+_glmf._theta*mu); //CHECKED-log/CHECKED-identity } // no update otherwise } } _likelihood = l; } } static double sumOper(double y, double multiplier, int opVal) { double summation = 0.0; if (opVal == 0){ return logGamma(y + multiplier) - logGamma(multiplier) - logGamma(y + 1); } for (int val = 0; val < y; val++) { double temp = 1.0/(val*multiplier*multiplier+multiplier); summation += opVal==1?temp:(opVal==2?temp*temp*(2*val*multiplier+1):Math.log(multiplier+val)); } return summation; } static class GLMQuasiBinomialGradientTask extends GLMGradientTask { private final GLMWeightsFun _glmf; public GLMQuasiBinomialGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta) { super(jobKey, dinfo, parms._obj_reg, lambda, beta); _glmf = new GLMWeightsFun(parms); } public GLMQuasiBinomialGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double[] beta, double[][][] penaltyMat, int[][] gamCols) { super(jobKey, dinfo, parms._obj_reg, lambda, beta, penaltyMat, gamCols); _glmf = new GLMWeightsFun(parms); } @Override protected void computeGradientMultipliers(double [] es, double [] ys, double [] ws){ double l = 0; for(int i = 0; i < es.length; ++i){ double p = _glmf.linkInv(es[i]); if(p == 0) p = 1e-15; if(p == 1) p = 1 - 1e-15; es[i] = -ws[i]*(ys[i]-p); l += ys[i]*Math.log(p) + (1-ys[i])*Math.log(1-p); } _likelihood = -l; } } static class GLMBinomialGradientTask extends GLMGradientTask { public GLMBinomialGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double [] beta) { super(jobKey,dinfo,parms._obj_reg,lambda,beta); assert (parms._family == Family.binomial && parms._link == Link.logit) || (parms._family == Family.fractionalbinomial && parms._link == Link.logit); } public GLMBinomialGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double [] beta, double[][][] penaltyMat, int[][] gamCol) { super(jobKey,dinfo,parms._obj_reg,lambda,beta, penaltyMat, gamCol); assert (parms._family == Family.binomial && parms._link == Link.logit) || (parms._family == Family.fractionalbinomial && parms._link == Link.logit); } @Override protected void computeGradientMultipliers(double[] es, double[] ys, double[] ws) { for(int i = 0; i < es.length; ++i) { if(Double.isNaN(ys[i]) || ws[i] == 0){es[i] = 0; continue;} double e = es[i], w = ws[i]; double yr = ys[i]; double ym = 1.0 / (Math.exp(-e) + 1.0); if(ym != yr) _likelihood += w*((MathUtils.y_log_y(yr, ym)) + MathUtils.y_log_y(1 - yr, 1 - ym)); es[i] = ws[i] * (ym - yr); } } } public static class GLMGaussianGradientTask extends GLMGradientTask { public GLMGaussianGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double [] beta) { super(jobKey,dinfo,parms._obj_reg,lambda,beta); assert parms._family == gaussian && parms._link == Link.identity; } public GLMGaussianGradientTask(Key jobKey, DataInfo dinfo, GLMParameters parms, double lambda, double [] beta, double[][][] penaltyMat, int[][] gamCol) { super(jobKey,dinfo,parms._obj_reg,lambda,beta, penaltyMat, gamCol); assert parms._family == gaussian && parms._link == Link.identity; } @Override protected void computeGradientMultipliers(double[] es, double[] ys, double[] ws) { for(int i = 0; i < es.length; ++i) { double w = ws[i]; if(w == 0 || Double.isNaN(ys[i])){ es[i] = 0; continue; } double e = es[i], y = ys[i]; double d = (e-y); double wd = w*d; _likelihood += wd*d; es[i] = wd; } } } static class GLMMultinomialLikelihoodTask extends GLMMultinomialGradientBaseTask { public GLMMultinomialLikelihoodTask(Job job, DataInfo dinfo, double lambda, double[][] beta, double reg) { super(job, dinfo, lambda, beta, reg); } public GLMMultinomialLikelihoodTask(Job job, DataInfo dinfo, double lambda, double[][] beta, GLMParameters glmp) { super(job, dinfo, lambda, beta, glmp); } @Override public void calMultipliersNGradients(double[][] etas, double[][] etasOffset, double[] ws, double[] vals, int[] ids, Chunk response, Chunk[] chks, int M, int P, int numStart) { computeGradientMultipliers(etas, response.getDoubles(vals, 0, M), ws); } } // share between multinomial and ordinal regression public static abstract class GLMMultinomialGradientBaseTask extends MRTask<GLMMultinomialGradientBaseTask> { final double[][] _beta; final transient double _currentLambda; final transient double _reg; public double[][] _gradient; // this is arranged such that we have double[ncoeff][nclass] double _likelihood; Job _job; final boolean _sparse; final DataInfo _dinfo; // parameters used by ordinal regression Link _link; // link function, e.g. ologit, ologlog, oprobit GLMParameters _glmp; // parameter used to access linkinv and linkinvderiv functions int _secondToLast; // denote class label nclass-2 int _theLast; // denote class label nclass-1 int _interceptId; // index of offset/intercept in double[][] _beta double[][][] _penaltyMat; int[][] _gamBetaIndices; /** * @param job * @param dinfo * @param lambda * @param beta coefficients as 2D array [P][K] * @param reg */ public GLMMultinomialGradientBaseTask(Job job, DataInfo dinfo, double lambda, double[][] beta, double reg) { _currentLambda = lambda; _reg = reg; // need to flip the beta _beta = new double[beta[0].length][beta.length]; for (int i = 0; i < _beta.length; ++i) for (int j = 0; j < _beta[i].length; ++j) _beta[i][j] = beta[j][i]; _job = job; _sparse = FrameUtils.sparseRatio(dinfo._adaptedFrame) < .125; _dinfo = dinfo; if (_dinfo._offset) throw H2O.unimpl(); } public GLMMultinomialGradientBaseTask(Job job, DataInfo dinfo, double lambda, double[][] beta, GLMParameters glmp) { this(job, dinfo, lambda, beta, glmp._obj_reg); _theLast = beta.length - 1; // initialize ordinal regression parameters _secondToLast = _theLast - 1; _interceptId = _beta.length - 1; _link = glmp._link; _glmp = glmp; } public GLMMultinomialGradientBaseTask(Job job, DataInfo dinfo, double lambda, double[][] beta, GLMParameters glmp, double[][][] penaltyMat, int[][] gamCols) { this(job, dinfo, lambda, beta, glmp._obj_reg); _theLast = beta.length - 1; // initialize ordinal regression parameters _secondToLast = _theLast - 1; _interceptId = _beta.length - 1; _link = glmp._link; _glmp = glmp; _penaltyMat = penaltyMat; _gamBetaIndices = gamCols; } // common between multinomial and ordinal public final void computeCategoricalEtas(Chunk[] chks, double[][] etas, double[] vals, int[] ids) { // categoricals for (int cid = 0; cid < _dinfo._cats; ++cid) { Chunk c = chks[cid]; if (c.isSparseZero()) { int nvals = c.getSparseDoubles(vals, ids, -1); for (int i = 0; i < nvals; ++i) { int id = _dinfo.getCategoricalId(cid, (int) vals[i]); if (id >= 0) ArrayUtils.add(etas[ids[i]], _beta[id]); } } else { c.getIntegers(ids, 0, c._len, -1); for (int i = 0; i < ids.length; ++i) { int id = _dinfo.getCategoricalId(cid, ids[i]); if (id >= 0) ArrayUtils.add(etas[i], _beta[id]); } } } } public final void computeCategoricalGrads(Chunk[] chks, double[][] etas, double[] vals, int[] ids) { // categoricals for (int cid = 0; cid < _dinfo._cats; ++cid) { Chunk c = chks[cid]; if (c.isSparseZero()) { int nvals = c.getSparseDoubles(vals, ids, -1); for (int i = 0; i < nvals; ++i) { int id = _dinfo.getCategoricalId(cid, (int) vals[i]); if (id >= 0) ArrayUtils.add(_gradient[id], etas[ids[i]]); } } else { c.getIntegers(ids, 0, c._len, -1); for (int i = 0; i < ids.length; ++i) { int id = _dinfo.getCategoricalId(cid, ids[i]); if (id >= 0) ArrayUtils.add(_gradient[id], etas[i]); } } } } public final void computeNumericEtas(Chunk[] chks, double[][] etas, double[] vals, int[] ids) { int numOff = _dinfo.numStart(); for (int cid = 0; cid < _dinfo._nums; ++cid) { double[] b = _beta[numOff + cid]; double scale = _dinfo._normMul != null ? _dinfo._normMul[cid] : 1; double NA = _dinfo._numNAFill[cid]; Chunk c = chks[cid + _dinfo._cats]; if (c.isSparseZero() || c.isSparseNA()) { int nvals = c.getSparseDoubles(vals, ids, NA); for (int i = 0; i < nvals; ++i) { double d = vals[i] * scale; ArrayUtils.wadd(etas[ids[i]], b, d); } } else { c.getDoubles(vals, 0, vals.length, NA); double off = _dinfo._normSub != null ? _dinfo._normSub[cid] : 0; for (int i = 0; i < vals.length; ++i) { double d = (vals[i] - off) * scale; ArrayUtils.wadd(etas[i], b, d); } } } } public final void computeNumericGrads(Chunk[] chks, double[][] etas, double[] vals, int[] ids) { int numOff = _dinfo.numStart(); for (int cid = 0; cid < _dinfo._nums; ++cid) { double[] g = _gradient[numOff + cid]; double NA = _dinfo._numNAFill[cid]; Chunk c = chks[cid + _dinfo._cats]; double scale = _dinfo._normMul == null ? 1 : _dinfo._normMul[cid]; if (c.isSparseZero() || c.isSparseNA()) { int nVals = c.getSparseDoubles(vals, ids, NA); for (int i = 0; i < nVals; ++i) ArrayUtils.wadd(g, etas[ids[i]], vals[i] * scale); } else { double off = _dinfo._normSub == null ? 0 : _dinfo._normSub[cid]; c.getDoubles(vals, 0, vals.length, NA); for (int i = 0; i < vals.length; ++i) { ArrayUtils.wadd(g, etas[i], (vals[i] - off) * scale); } } } } // This method will compute the multipliers for gradient calculation of the betas in etas and for // the intercepts in etas_offsets for each row of data final void computeGradientMultipliersLH(double [][] etas, double [][] etasOffset, double [] ys, double [] ws) { int K = _beta[0].length; // number of class double[] tempEtas = new double[K]; // store original etas int y; // get and store response class category double yJ, yJm1; for (int row = 0; row < etas.length; row++) { // calculate the multiplier from each row double w = ws[row]; if (w==0) { Arrays.fill(etas[row], 0); // zero out etas for current row continue; } // note that, offset is different for all class, beta is shared for all classes System.arraycopy(etas[row], 0, tempEtas, 0, K); // copy data over to tempEtas Arrays.fill(etas[row], 0); // zero out etas for current row y = (int) ys[row]; // get response class category if (y==0) { // response is in 0th category etasOffset[row][0] = _glmp.linkInv(tempEtas[0])-1; etas[row][0] = etasOffset[row][0]; _likelihood -= w*tempEtas[y]-Math.log(1+Math.exp(tempEtas[y])); } else if (y==_theLast) { // response is in last category etasOffset[row][_secondToLast] = _glmp.linkInv(tempEtas[_secondToLast]); etas[row][0] = etasOffset[row][_secondToLast]; _likelihood += w*Math.log(1+Math.exp(tempEtas[_secondToLast])); } else { // perform update for response between 1 to K-2, y can affect class y and y-1 int lastC = y-1; // previous class yJ = _glmp.linkInv(tempEtas[y]); yJm1 = _glmp.linkInv(tempEtas[lastC]); double den = yJ-yJm1; den = den==0.0?1e-10:den; _likelihood -= w*Math.log(den); etas[row][0] = yJ+yJm1-1.0; // for non-intercepts double oneMcdfPC = 1-yJm1; oneMcdfPC = oneMcdfPC==0.0?1e-10:oneMcdfPC; double oneOthreshold = 1-Math.exp(_beta[_interceptId][lastC]-_beta[_interceptId][y]); oneOthreshold = oneOthreshold==0.0?1e-10:oneOthreshold; double oneOverThreshold = 1.0/oneOthreshold; etasOffset[row][y] = (yJ-1)*oneOverThreshold/oneMcdfPC; yJ = yJ==0?1e-10:yJ; etasOffset[row][lastC] = yJm1*oneOverThreshold/yJ; } for (int c=1; c<K; c++) // set beta of all classes to be the same etas[row][c]=etas[row][0]; } } // This method will compute the multipliers for gradient calculation of the betas in etas and for // the intercepts in etas_offsets for each row of data final void computeGradientMultipliersSQERR(double [][] etas, double [][] etasOffset, double [] ys, double [] ws) { int K = _beta[0].length; // number of class double[] tempEtas = new double[K]; // store original etas int y; // get and store response class category double yJ, yJm1; for (int row = 0; row < etas.length; row++) { // calculate the multiplier from each row double w = ws[row]; if (w==0) { Arrays.fill(etas[row], 0); // zero out etas for current row continue; } // note that, offset is different for all class, beta is shared for all classes System.arraycopy(etas[row], 0, tempEtas, 0, K); // copy data over to tempEtas Arrays.fill(etas[row], 0); // zero out etas for current row y = (int) ys[row]; // get response class category for (int c = 0; c < y; c++) { // classes < yresp, should be negative if (tempEtas[c] > 0) { etasOffset[row][c] = tempEtas[c]; etas[row][0] += tempEtas[c]; _likelihood += w*0.5*tempEtas[c]*tempEtas[c]; } } for (int c = y; c < _theLast; c++) { // class >= yresp, should be positive if (tempEtas[c] <= 0) { etasOffset[row][c] = tempEtas[c]; etas[row][0] += tempEtas[c]; _likelihood += w*0.5*tempEtas[c]*tempEtas[c]; } } for (int c=1; c<K; c++) // set beta of all classes to be the same etas[row][c]=etas[row][0]; } } final void computeGradientMultipliers(double [][] etas, double [] ys, double [] ws){ int K = _beta[0].length; double [] exps = new double[K+1]; for(int i = 0; i < etas.length; ++i) { double w = ws[i]; if(w == 0){ Arrays.fill(etas[i],0); continue; } int y = (int) ys[i]; double logSumExp = computeMultinomialEtas(etas[i], exps); _likelihood -= w * (etas[i][y] - logSumExp); for (int c = 0; c < K; ++c) etas[i][c] = w * (exps[c + 1] - (y == c ? 1 : 0)); // dllh/dbc for all class except xik } } @Override public void map(Chunk[] chks) { if(_job != null && _job.stop_requested()) throw new Job.JobCancelledException(); int numStart = _dinfo.numStart(); int K = _beta[0].length;// number of classes int P = _beta.length; // number of predictors (+ intercept) int M = chks[0]._len; // number of rows in this chunk of data _gradient = new double[P][K]; double [][] etas = new double[M][K]; // store multiplier for non-intercept parameters double [][] etasOffset = new double[M][K]; // store multiplier for intercept parameters double[] offsets = new double[K]; for(int k = 0; k < K; ++k) offsets[k] = _beta[P-1][k]; // intercept // sparse offset + intercept if(_dinfo._normSub != null) { for(int i = 0; i < _dinfo._nums; ++i) if(chks[_dinfo._cats + i].isSparseZero()) ArrayUtils.wadd(offsets,_beta[numStart + i], -_dinfo._normSub[i]*_dinfo._normMul[i]); } for (int i = 0; i < chks[0]._len; ++i) System.arraycopy(offsets, 0, etas[i], 0, K); Chunk response = chks[_dinfo.responseChunkId(0)]; double [] ws = MemoryManager.malloc8d(M); if(_dinfo._weights) ws = chks[_dinfo.weightChunkId()].getDoubles(ws,0,M); else Arrays.fill(ws,1); chks = Arrays.copyOf(chks,chks.length-1-(_dinfo._weights?1:0)); double [] vals = MemoryManager.malloc8d(M); int [] ids = MemoryManager.malloc4(M); computeCategoricalEtas(chks,etas,vals,ids); computeNumericEtas(chks,etas,vals,ids); calMultipliersNGradients(etas, etasOffset, ws, vals, ids, response, chks, M, P, numStart); } public abstract void calMultipliersNGradients(double[][] etas, double[][] etasOffset, double[] ws, double[] vals, int[] ids, Chunk response, Chunk[] chks, int M, int P, int numStart); @Override public void reduce(GLMMultinomialGradientBaseTask gmgt){ if(_gradient != gmgt._gradient) ArrayUtils.add(_gradient,gmgt._gradient); _likelihood += gmgt._likelihood; } @Override public void postGlobal(){ ArrayUtils.mult(_gradient, _reg); int P = _beta.length; // number of predictors // add l2 penalty if (_currentLambda > 0) { for (int c = 0; c < P - 1; ++c) for (int j = 0; j < _beta[0].length; ++j) // iterate over each class _gradient[c][j] += _currentLambda * _beta[c][j]; } if ((_penaltyMat!=null) && (_gamBetaIndices!=null)) updateGradGamMultinomial(_gradient, _penaltyMat, _gamBetaIndices, _beta); // beta is coeff index by class } /** This method changes the _gradient that is coeffPerClss by number of classes back to number of classes by * coeffPerClass. Also, if only active columns are included, that is what is returned. If both active and * non-active columns are included, both will be returned. * * @return */ public double [] gradient(){ double [] res = MemoryManager.malloc8d(_gradient.length*_gradient[0].length); int P = _gradient.length; for(int k = 0; k < _gradient[0].length; ++k) for(int i = 0; i < _gradient.length; ++i) res[k*P + i] = _gradient[i][k]; return res; } } // share between multinomial and ordinal regression public static class GLMMultinomialGradientTask extends GLMMultinomialGradientBaseTask { public GLMMultinomialGradientTask(Job job, DataInfo dinfo, double lambda, double[][] beta, double reg) { super(job, dinfo, lambda, beta, reg); } public GLMMultinomialGradientTask(Job job, DataInfo dinfo, double lambda, double[][] beta, GLMParameters glmp) { super(job, dinfo, lambda, beta, glmp); } public GLMMultinomialGradientTask(Job job, DataInfo dinfo, double lambda, double[][] beta, GLMParameters glmp, double[][][] penaltyMat, int[][] gamCols) { super(job, dinfo, lambda, beta, glmp, penaltyMat, gamCols); } @Override public void calMultipliersNGradients(double[][] etas, double[][] etasOffset, double[] ws, double[] vals, int[] ids, Chunk response, Chunk[] chks, int M, int P, int numStart) { if (_glmp != null && _link == Link.ologit && (_glmp._solver.equals(GLMParameters.Solver.AUTO) || _glmp._solver.equals((GLMParameters.Solver.GRADIENT_DESCENT_LH)))) // gradient is stored in etas computeGradientMultipliersLH(etas, etasOffset, response.getDoubles(vals, 0, M), ws); else if (_glmp != null && _link == Link.ologit && _glmp._solver.equals(GLMParameters.Solver.GRADIENT_DESCENT_SQERR)) computeGradientMultipliersSQERR(etas, etasOffset, response.getDoubles(vals, 0, M), ws); else computeGradientMultipliers(etas, response.getDoubles(vals, 0, M), ws); computeCategoricalGrads(chks, etas, vals, ids); computeNumericGrads(chks, etas, vals, ids); double [] g = _gradient[P-1]; // get the intercept gradient. // sum up the gradient over the data rows in this chk[] if (_link == Link.ologit) { for (int i = 0; i < etasOffset.length; ++i) ArrayUtils.add(g, etasOffset[i]); } else { for (int i = 0; i < etas.length; ++i) ArrayUtils.add(g, etas[i]); } if(_dinfo._normSub != null) { double [] icpt = _gradient[P-1]; for(int i = 0; i < _dinfo._normSub.length; ++i) { if(chks[_dinfo._cats+i].isSparseZero()) ArrayUtils.wadd(_gradient[numStart+i],icpt,-_dinfo._normSub[i]*_dinfo._normMul[i]); } } } } // public static class GLMCoordinateDescentTask extends MRTask<GLMCoordinateDescentTask> { // final double [] _betaUpdate; // final double [] _beta; // final double _xOldSub; // final double _xOldMul; // final double _xNewSub; // final double _xNewMul; // // double [] _xy; // // public GLMCoordinateDescentTask(double [] betaUpdate, double [] beta, double xOldSub, double xOldMul, double xNewSub, double xNewMul) { // _betaUpdate = betaUpdate; // _beta = beta; // _xOldSub = xOldSub; // _xOldMul = xOldMul; // _xNewSub = xNewSub; // _xNewMul = xNewMul; // } // // public void map(Chunk [] chks) { // Chunk xOld = chks[0]; // Chunk xNew = chks[1]; // if(xNew.vec().isCategorical()){ // _xy = MemoryManager.malloc8d(xNew.vec().domain().length); // } else // _xy = new double[1]; // Chunk eta = chks[2]; // Chunk weights = chks[3]; // Chunk res = chks[4]; // for(int i = 0; i < eta._len; ++i) { // double w = weights.atd(i); // double e = eta.atd(i); // if(_betaUpdate != null) { // if (xOld.vec().isCategorical()) { // int cid = (int) xOld.at8(i); // e = +_betaUpdate[cid]; // } else // e += _betaUpdate[0] * (xOld.atd(i) - _xOldSub) * _xOldMul; // eta.set(i, e); // } // int cid = 0; // double x = w; // if(xNew.vec().isCategorical()) { // cid = (int) xNew.at8(i); // e -= _beta[cid]; // } else { // x = (xNew.atd(i) - _xNewSub) * _xNewMul; // e -= _beta[0] * x; // x *= w; // } // _xy[cid] += x * (res.atd(i) - e); // } // } // @Override public void reduce(GLMCoordinateDescentTask t) { // ArrayUtils.add(_xy, t._xy); // } // } // /** // * Compute initial solution for multinomial problem (Simple weighted LR with all weights = 1/4) // */ // public static final class GLMMultinomialInitTsk extends MRTask<GLMMultinomialInitTsk> { // double [] _mu; // DataInfo _dinfo; // Gram _gram; // double [][] _xy; // // @Override public void map(Chunk [] chks) { // Rows rows = _dinfo.rows(chks); // _gram = new Gram(_dinfo); // _xy = new double[_mu.length][_dinfo.fullN()+1]; // int numStart = _dinfo.numStart(); // double [] ds = new double[_mu.length]; // for(int i = 0; i < ds.length; ++i) // ds[i] = 1.0/(_mu[i] * (1-_mu[i])); // for(int i = 0; i < rows._nrows; ++i) { // Row r = rows.row(i); // double y = r.response(0); // _gram.addRow(r,.25); // for(int c = 0; c < _mu.length; ++c) { // double iY = y == c?1:0; // double z = (y-_mu[c]) * ds[i]; // for(int j = 0; j < r.nBins; ++j) // _xy[c][r.binIds[j]] += z; // for(int j = 0; j < r.nNums; ++j){ // int id = r.numIds == null?(j + numStart):r.numIds[j]; // double val = r.numVals[j]; // _xy[c][id] += z*val; // } // } // } // } // @Override public void reduce(){ // // } // } /** * Task to compute t(X) %*% W %*% X and t(X) %*% W %*% y */ public static class LSTask extends FrameTask2<LSTask> { public double[] _xy; public Gram _gram; final int numStart; public LSTask(H2OCountedCompleter cmp, DataInfo dinfo, Key jobKey) { super(cmp, dinfo, jobKey); numStart = _dinfo.numStart(); } @Override public void chunkInit() { _gram = new Gram(_dinfo.fullN(), _dinfo.largestCat(), _dinfo.numNums(), _dinfo._cats, true); _xy = MemoryManager.malloc8d(_dinfo.fullN() + 1); } @Override protected void processRow(Row r) { double wz = r.weight * (r.response(0) - r.offset); for (int i = 0; i < r.nBins; ++i) { _xy[r.binIds[i]] += wz; } for (int i = 0; i < r.nNums; ++i) { int id = r.numIds == null ? (i + numStart) : r.numIds[i]; double val = r.numVals[i]; _xy[id] += wz * val; } if (_dinfo._intercept) _xy[_xy.length - 1] += wz; _gram.addRow(r, r.weight); } @Override public void reduce(LSTask lst) { ArrayUtils.add(_xy, lst._xy); _gram.add(lst._gram); } @Override public void postGlobal() { if (_sparse && _dinfo._normSub != null) { // need to adjust gram for missing centering! int ns = _dinfo.numStart(); int interceptIdx = _xy.length - 1; double[] interceptRow = _gram._xx[interceptIdx - _gram._diagN]; double nobs = interceptRow[interceptRow.length - 1]; // weighted _nobs for (int i = ns; i < _dinfo.fullN(); ++i) { double iMean = _dinfo._normSub[i - ns] * _dinfo._normMul[i - ns]; for (int j = 0; j < ns; ++j) _gram._xx[i - _gram._diagN][j] -= interceptRow[j] * iMean; for (int j = ns; j <= i; ++j) { double jMean = _dinfo._normSub[j - ns] * _dinfo._normMul[j - ns]; _gram._xx[i - _gram._diagN][j] -= interceptRow[i] * jMean + interceptRow[j] * iMean - nobs * iMean * jMean; } } if (_dinfo._intercept) { // do the intercept row for (int j = ns; j < _dinfo.fullN(); ++j) interceptRow[j] -= nobs * _dinfo._normSub[j - ns] * _dinfo._normMul[j - ns]; } // and the xy vec as well for (int i = ns; i < _dinfo.fullN(); ++i) { _xy[i] -= _xy[_xy.length - 1] * _dinfo._normSub[i - ns] * _dinfo._normMul[i - ns]; } } } } public static class GLMWLSTask extends LSTask { final GLMWeightsFun _glmw; final double [] _beta; double _sparseOffset; public GLMWLSTask(H2OCountedCompleter cmp, DataInfo dinfo, Key jobKey, GLMWeightsFun glmw, double [] beta) { super(cmp, dinfo, jobKey); _glmw = glmw; _beta = beta; } private transient GLMWeights _ws; @Override public void chunkInit(){ super.chunkInit(); _ws = new GLMWeights(); } @Override public void processRow(Row r) { // update weights double eta = r.innerProduct(_beta) + _sparseOffset; _glmw.computeWeights(r.response(0),eta,r.weight,r.offset,_ws); r.weight = _ws.w; r.offset = 0; // handled offset here r.setResponse(0,_ws.z); super.processRow(r); } } public static class GLMMultinomialWLSTask extends LSTask { final GLMWeightsFun _glmw; final double [] _beta; double _sparseOffset; public GLMMultinomialWLSTask(H2OCountedCompleter cmp, DataInfo dinfo, Key jobKey, GLMWeightsFun glmw, double [] beta) { super(cmp, dinfo, jobKey); _glmw = glmw; _beta = beta; } private transient GLMWeights _ws; @Override public void chunkInit(){ super.chunkInit(); _ws = new GLMWeights(); } @Override public void processRow(Row r) { // update weights double eta = r.innerProduct(_beta) + _sparseOffset; _glmw.computeWeights(r.response(0),eta,r.weight,r.offset,_ws); r.weight = _ws.w; r.offset = 0; // handled offset here r.setResponse(0,_ws.z); super.processRow(r); } } public static class GLMIterationTaskMultinomial extends FrameTask2<GLMIterationTaskMultinomial> { final int _c; final double [] _beta; // current beta to compute update of predictors for the current class double [] _xy; Gram _gram; transient double _sparseOffset; public GLMIterationTaskMultinomial(DataInfo dinfo, Key jobKey, double [] beta, int c) { super(null, dinfo, jobKey); _beta = beta; _c = c; } @Override public void chunkInit(){ // initialize _gram = new Gram(_dinfo.fullN(), _dinfo.largestCat(), _dinfo.numNums(), _dinfo._cats,true); _xy = MemoryManager.malloc8d(_dinfo.fullN()+1); // + 1 is for intercept if(_sparse) _sparseOffset = GLM.sparseOffset(_beta,_dinfo); } @Override protected void processRow(Row r) { double y = r.response(0); double sumExp = r.response(1); double maxRow = r.response(2); int numStart = _dinfo.numStart(); y = (y == _c)?1:0; double eta = r.innerProduct(_beta) + _sparseOffset; if(eta > maxRow) maxRow = eta; double etaExp = Math.exp(eta-maxRow); sumExp += etaExp; double mu = (etaExp == Double.POSITIVE_INFINITY?1:(etaExp / sumExp)); if(mu < 1e-16) mu = 1e-16;// double d = mu*(1-mu); double wz = r.weight * (eta * d + (y-mu)); double w = r.weight * d; for(int i = 0; i < r.nBins; ++i) { _xy[r.binIds[i]] += wz; } for(int i = 0; i < r.nNums; ++i){ int id = r.numIds == null?(i + numStart):r.numIds[i]; double val = r.numVals[i]; _xy[id] += wz*val; } if(_dinfo._intercept) _xy[_xy.length-1] += wz; _gram.addRow(r, w); } @Override public void reduce(GLMIterationTaskMultinomial glmt) { ArrayUtils.add(_xy,glmt._xy); _gram.add(glmt._gram); } } public static class GLMMultinomialUpdate extends FrameTask2<GLMMultinomialUpdate> { private final double [][] _beta; // updated value of beta private final int _c; private transient double [] _sparseOffsets; private transient double [] _etas; public GLMMultinomialUpdate(DataInfo dinfo, Key jobKey, double [] beta, int c) { super(null, dinfo, jobKey); _beta = ArrayUtils.convertTo2DMatrix(beta,dinfo.fullN()+1); _c = c; } @Override public void chunkInit(){ // initialize _sparseOffsets = MemoryManager.malloc8d(_beta.length); _etas = MemoryManager.malloc8d(_beta.length); if(_sparse) { for(int i = 0; i < _beta.length; ++i) _sparseOffsets[i] = GLM.sparseOffset(_beta[i],_dinfo); } } private transient Chunk _sumExpChunk; private transient Chunk _maxRowChunk; @Override public void map(Chunk [] chks) { _sumExpChunk = chks[chks.length-2]; _maxRowChunk = chks[chks.length-1]; super.map(chks); } @Override protected void processRow(Row r) { double maxrow = 0; for(int i = 0; i < _beta.length; ++i) { _etas[i] = r.innerProduct(_beta[i]) + _sparseOffsets[i]; if(_etas[i] > maxrow) maxrow = _etas[i]; } double sumExp = 0; for(int i = 0; i < _beta.length; ++i) // if(i != _c) sumExp += Math.exp(_etas[i]-maxrow); _maxRowChunk.set(r.cid,_etas[_c]); _sumExpChunk.set(r.cid,Math.exp(_etas[_c]-maxrow)/sumExp); } } /** * One iteration of glm, computes weighted gram matrix and t(x)*y vector and t(y)*y scalar. * * @author tomasnykodym */ public static class GLMIterationTask extends FrameTask2<GLMIterationTask> { final GLMWeightsFun _glmf; double [][]_beta_multinomial; double []_beta; protected Gram _gram; // wx%*%x double [] _xy; // wx^t%*%z, double _yy; final double [] _ymu; long _nobs; public double _likelihood; private transient GLMWeights _w; private transient GLMWeightsFun _glmfTweedie; // only needed for Tweedie // final double _lambda; double wsum, sumOfRowWeights; double _sumsqe; int _c = -1; boolean _hasConstraints = false; public double[] getXY() { return _xy; } public double getYY() { return _yy; } public GLMIterationTask(Key jobKey, DataInfo dinfo, GLMWeightsFun glmw,double [] beta) { super(null,dinfo,jobKey); _beta = beta; _ymu = null; _glmf = glmw; } public GLMIterationTask(Key jobKey, DataInfo dinfo, GLMWeightsFun glmw, double [] beta, int c) { super(null,dinfo,jobKey); _beta = beta; _ymu = null; _glmf = glmw; _c = c; } @Override public boolean handlesSparseData(){return true;} transient private double _sparseOffset; @Override public void chunkInit() { // initialize _gram = new Gram(_dinfo.fullN(), _dinfo.largestCat(), _dinfo.numNums(), _dinfo._cats,true); _xy = MemoryManager.malloc8d(_dinfo.fullN()+1); // + 1 is for intercept if(_sparse) _sparseOffset = GLM.sparseOffset(_beta,_dinfo); _w = new GLMWeights(); if (_glmf._family.equals(Family.tweedie)) { _glmfTweedie = new GLMModel.GLMWeightsFun(_glmf._family, _glmf._link, _glmf._var_power, _glmf._link_power, _glmf._theta, _glmf._dispersion, _glmf._varPowerEstimation); } } public Gram getGram() { return _gram; } @Override protected void processRow(Row r) { // called for every row in the chunk if(r.isBad() || r.weight == 0) return; ++_nobs; double y = r.response(0); _yy += r.weight*y*y; final int numStart = _dinfo.numStart(); double wz,w; if(_glmf._family == Family.multinomial) { y = (y == _c)?1:0; double mu = r.response(1); double eta = r.response(2); double d = mu*(1-mu); if(d == 0) d = 1e-10; wz = r.weight * (eta * d + (y-mu)); w = r.weight * d; } else if(_beta != null) { if (_glmf._family.equals(Family.tweedie)) _glmfTweedie.computeWeights(y, r.innerProduct(_beta) + _sparseOffset, r.offset, r.weight, _w); else _glmf.computeWeights(y, r.innerProduct(_beta) + _sparseOffset, r.offset, r.weight, _w); w = _w.w; // hessian without the xij xik part if (_glmf._family.equals(Family.tweedie)) // already multiplied with w for w.z wz = _w.z; else wz = w*_w.z; _likelihood += _w.l; } else { w = r.weight; wz = w*(y - r.offset); } wsum+=w; sumOfRowWeights +=r.weight; // just add the user observation weight for the scaling. for(int i = 0; i < r.nBins; ++i) _xy[r.binIds[i]] += wz; for(int i = 0; i < r.nNums; ++i){ int id = r.numIds == null?(i + numStart):r.numIds[i]; double val = r.numVals[i]; _xy[id] += wz*val; } if(_dinfo._intercept) _xy[_xy.length-1] += wz; _gram.addRow(r,w); } @Override public void chunkDone(){adjustForSparseStandardizedZeros();} @Override public void reduce(GLMIterationTask git){ ArrayUtils.add(_xy, git._xy); _gram.add(git._gram); _nobs += git._nobs; wsum += git.wsum; sumOfRowWeights += git.sumOfRowWeights; _likelihood += git._likelihood; _sumsqe += git._sumsqe; _yy += git._yy; super.reduce(git); } private void adjustForSparseStandardizedZeros(){ if(_sparse && _dinfo._normSub != null) { // need to adjust gram for missing centering! int ns = _dinfo.numStart(); int interceptIdx = _xy.length - 1; double[] interceptRow = _gram._xx[interceptIdx - _gram._diagN]; double nobs = interceptRow[interceptRow.length - 1]; // weighted _nobs for (int i = ns; i < _dinfo.fullN(); ++i) { double iMean = _dinfo._normSub[i - ns] * _dinfo._normMul[i - ns]; for (int j = 0; j < ns; ++j) _gram._xx[i - _gram._diagN][j] -= interceptRow[j] * iMean; for (int j = ns; j <= i; ++j) { double jMean = _dinfo._normSub[j - ns] * _dinfo._normMul[j - ns]; _gram._xx[i - _gram._diagN][j] -= interceptRow[i] * jMean + interceptRow[j] * iMean - nobs * iMean * jMean; } } if (_dinfo._intercept) { // do the intercept row for (int j = ns; j < _dinfo.fullN(); ++j) interceptRow[j] -= nobs * _dinfo._normSub[j - ns] * _dinfo._normMul[j - ns]; } // and the xy vec as well for (int i = ns; i < _dinfo.fullN(); ++i) { _xy[i] -= _xy[_xy.length - 1] * _dinfo._normSub[i - ns] * _dinfo._normMul[i - ns]; } } } public boolean hasNaNsOrInf() { return ArrayUtils.hasNaNsOrInfs(_xy) || _gram.hasNaNsOrInfs(); } } /* public static class GLMCoordinateDescentTask extends FrameTask2<GLMCoordinateDescentTask> { final GLMParameters _params; final double [] _betaw; final double [] _betacd; public double [] _temp; public double [] _varsum; public double _ws=0; long _nobs; public double _likelihoods; public GLMCoordinateDescentTask(Key jobKey, DataInfo dinfo, double lambda, GLMModel.GLMParameters glm, boolean validate, double [] betaw, double [] betacd, double ymu, Vec rowFilter, H2OCountedCompleter cmp) { super(cmp,dinfo,jobKey,rowFilter); _params = glm; _betaw = betaw; _betacd = betacd; } @Override public boolean handlesSparseData(){return false;} @Override public void chunkInit() { _temp=MemoryManager.malloc8d(_dinfo.fullN()+1); // using h2o memory manager _varsum=MemoryManager.malloc8d(_dinfo.fullN()); } @Override protected void processRow(Row r) { if(r.bad || r.weight == 0) return; ++_nobs; final double y = r.response(0); assert ((_params._family != Family.gamma) || y > 0) : "illegal response column, y must be > 0 for family=Gamma."; assert ((_params._family != Family.binomial) || (0 <= y && y <= 1)) : "illegal response column, y must be <0,1> for family=Binomial. got " + y; final double w, eta, mu, var, z; final int numStart = _dinfo.numStart(); double d = 1; if( _params._family == Family.gaussian && _params._link == Link.identity){ w = r.weight; z = y - r.offset; mu = 0; eta = mu; } else { eta = r.innerProduct(_betaw); mu = _params.linkInv(eta + r.offset); var = Math.max(1e-6, _params.variance(mu)); // avoid numerical problems with 0 variance d = _params.linkDeriv(mu); z = eta + (y-mu)*d; w = r.weight/(var*d*d); } _likelihoods += r.weight*_params.likelihood(y,mu); assert w >= 0|| Double.isNaN(w) : "invalid weight " + w; // allow NaNs - can occur if line-search is needed! _ws+=w; double xb = r.innerProduct(_betacd); for(int i = 0; i < r.nBins; ++i) { // go over cat variables _temp[r.binIds[i]] += (z - xb + _betacd[r.binIds[i]]) *w; _varsum[r.binIds[i]] += w ; } for(int i = 0; i < r.nNums; ++i){ // num vars int id = r.numIds == null?(i + numStart):r.numIds[i]; _temp[id] += (z- xb + r.get(id)*_betacd[id] )*(r.get(id)*w); _varsum[id] += w*r.get(id)*r.get(id); } _temp[_temp.length-1] += w*(z-r.innerProduct(_betacd)+_betacd[_betacd.length-1]); } @Override public void reduce(GLMCoordinateDescentTask git){ // adding contribution of all the chunks ArrayUtils.add(_temp, git._temp); ArrayUtils.add(_varsum, git._varsum); _ws+= git._ws; _nobs += git._nobs; _likelihoods += git._likelihoods; super.reduce(git); } } */ public static class GLMCoordinateDescentTaskSeqNaive extends MRTask<GLMCoordinateDescentTaskSeqNaive> { public double [] _normMulold; public double [] _normSubold; public double [] _normMulnew; public double [] _normSubnew; final double [] _betaold; // current old value at j final double [] _betanew; // global beta @ j-1 that was just updated. final int [] _catLvls_new; // sorted list of indices of active levels only for one categorical variable final int [] _catLvls_old; public double [] _temp; boolean _skipFirst; long _nobs; int _cat_num; // 1: c and p categorical, 2:c numeric and p categorical, 3:c and p numeric , 4: c categorical and previous num. boolean _interceptnew; boolean _interceptold; public GLMCoordinateDescentTaskSeqNaive(boolean interceptold, boolean interceptnew, int cat_num , double [] betaold, double [] betanew, int [] catLvlsold, int [] catLvlsnew, double [] normMulold, double [] normSubold, double [] normMulnew, double [] normSubnew, boolean skipFirst ) { // pass it norm mul and norm sup - in the weights already done. norm //mul and mean will be null without standardization. _normMulold = normMulold; _normSubold = normSubold; _normMulnew = normMulnew; _normSubnew = normSubnew; _cat_num = cat_num; _betaold = betaold; _betanew = betanew; _interceptold = interceptold; // if updating beta_1, then the intercept is the previous column _interceptnew = interceptnew; // if currently updating the intercept value _catLvls_old = catLvlsold; _catLvls_new = catLvlsnew; _skipFirst = skipFirst; } @Override public void map(Chunk [] chunks) { int cnt = 0; Chunk wChunk = chunks[cnt++]; Chunk zChunk = chunks[cnt++]; Chunk ztildaChunk = chunks[cnt++]; Chunk xpChunk=null, xChunk=null; _temp = new double[_betaold.length]; if (_interceptnew) { xChunk = new C0DChunk(1,chunks[0]._len); xpChunk = chunks[cnt++]; } else { if (_interceptold) { xChunk = chunks[cnt++]; xpChunk = new C0DChunk(1,chunks[0]._len); } else { xChunk = chunks[cnt++]; xpChunk = chunks[cnt++]; } } // For each observation, add corresponding term to temp - or if categorical variable only add the term corresponding to its active level and the active level // of the most recently updated variable before it (if also cat). If for an obs the active level corresponds to an inactive column, we just dont want to include // it - same if inactive level in most recently updated var. so set these to zero ( Wont be updating a betaj which is inactive) . for (int i = 0; i < chunks[0]._len; ++i) { // going over all the rows in the chunk double betanew = 0; // most recently updated prev variable double betaold = 0; // old value of current variable being updated double w = wChunk.atd(i); if(w == 0) continue; ++_nobs; int observation_level = 0, observation_level_p = 0; double val = 1, valp = 1; if(_cat_num == 1) { observation_level = (int) xChunk.at8(i); // only need to change one temp value per observation. if (_catLvls_old != null) observation_level = Arrays.binarySearch(_catLvls_old, observation_level); observation_level_p = (int) xpChunk.at8(i); // both cat if (_catLvls_new != null) observation_level_p = Arrays.binarySearch(_catLvls_new, observation_level_p); if(_skipFirst){ observation_level--; observation_level_p--; } } else if(_cat_num == 2){ val = xChunk.atd(i); // current num and previous cat if (_normMulold != null && _normSubold != null) val = (val - _normSubold[0]) * _normMulold[0]; observation_level_p = (int) xpChunk.at8(i); if (_catLvls_new != null) observation_level_p = Arrays.binarySearch(_catLvls_new, observation_level_p); if(_skipFirst){ observation_level_p--; } } else if(_cat_num == 3){ val = xChunk.atd(i); // both num if (_normMulold != null && _normSubold != null) val = (val - _normSubold[0]) * _normMulold[0]; valp = xpChunk.atd(i); if (_normMulnew != null && _normSubnew != null) valp = (valp - _normSubnew[0]) * _normMulnew[0]; } else if(_cat_num == 4){ observation_level = (int) xChunk.at8(i); // current cat if (_catLvls_old != null) observation_level = Arrays.binarySearch(_catLvls_old, observation_level); // search to see if this level is active. if(_skipFirst){ observation_level--; } valp = xpChunk.atd(i); //prev numeric if (_normMulnew != null && _normSubnew != null) valp = (valp - _normSubnew[0]) * _normMulnew[0]; } if(observation_level >= 0) betaold = _betaold[observation_level]; if(observation_level_p >= 0) betanew = _betanew[observation_level_p]; if (_interceptnew) { ztildaChunk.set(i, ztildaChunk.atd(i) - betaold + valp * betanew); // _temp[0] += w * (zChunk.atd(i) - ztildaChunk.atd(i)); } else { ztildaChunk.set(i, ztildaChunk.atd(i) - val * betaold + valp * betanew); if(observation_level >=0 ) // if the active level for that observation is an "inactive column" don't want to add contribution to temp for that observation _temp[observation_level] += w * val * (zChunk.atd(i) - ztildaChunk.atd(i)); } } } @Override public void reduce(GLMCoordinateDescentTaskSeqNaive git){ ArrayUtils.add(_temp, git._temp); _nobs += git._nobs; super.reduce(git); } } public static class GLMCoordinateDescentTaskSeqIntercept extends MRTask<GLMCoordinateDescentTaskSeqIntercept> { final double [] _betaold; public double _temp; DataInfo _dinfo; public GLMCoordinateDescentTaskSeqIntercept( double [] betaold, DataInfo dinfo) { _betaold = betaold; _dinfo = dinfo; } @Override public void map(Chunk [] chunks) { int cnt = 0; Chunk wChunk = chunks[cnt++]; Chunk zChunk = chunks[cnt++]; Chunk filterChunk = chunks[cnt++]; Row r = _dinfo.newDenseRow(); for(int i = 0; i < chunks[0]._len; ++i) { if(filterChunk.atd(i)==1) continue; _dinfo.extractDenseRow(chunks,i,r); _temp = wChunk.at8(i)* (zChunk.atd(i)- r.innerProduct(_betaold) ); } } @Override public void reduce(GLMCoordinateDescentTaskSeqIntercept git){ _temp+= git._temp; super.reduce(git); } } public static class GLMGenerateWeightsTask extends MRTask<GLMGenerateWeightsTask> { final GLMParameters _params; final double [] _betaw; double [] denums; double wsum,wsumu; DataInfo _dinfo; double _likelihood; public GLMGenerateWeightsTask(Key jobKey, DataInfo dinfo, GLMModel.GLMParameters glm, double[] betaw) { _params = glm; _betaw = betaw; _dinfo = dinfo; } @Override public void map(Chunk [] chunks) { Chunk wChunk = chunks[chunks.length-3]; Chunk zChunk = chunks[chunks.length-2]; Chunk zTilda = chunks[chunks.length-1]; chunks = Arrays.copyOf(chunks,chunks.length-3); denums = new double[_dinfo.fullN()+1]; // full N is expanded variables with categories Row r = _dinfo.newDenseRow(); for(int i = 0; i < chunks[0]._len; ++i) { _dinfo.extractDenseRow(chunks,i,r); if (r.isBad() || r.weight == 0) { wChunk.set(i,0); zChunk.set(i,0); zTilda.set(i,0); continue; } final double y = r.response(0); assert ((_params._family != Family.gamma) || y > 0) : "illegal response column, y must be > 0 for family=Gamma."; assert ((_params._family != Family.binomial) || (0 <= y && y <= 1)) : "illegal response column, y must be <0,1> for family=Binomial. got " + y; final double w, eta, mu, var, z; final int numStart = _dinfo.numStart(); double d = 1; eta = r.innerProduct(_betaw); if (_params._family == gaussian && _params._link == Link.identity) { w = r.weight; z = y - r.offset; mu = 0; } else { mu = _params.linkInv(eta + r.offset); var = Math.max(1e-6, _params.variance(mu)); // avoid numerical problems with 0 variance d = _params.linkDeriv(mu); z = eta + (y - mu) * d; w = r.weight / (var * d * d); } _likelihood += _params.likelihood(y,mu); zTilda.set(i,eta-_betaw[_betaw.length-1]); assert w >= 0 || Double.isNaN(w) : "invalid weight " + w; // allow NaNs - can occur if line-search is needed! wChunk.set(i,w); zChunk.set(i,z); wsum+=w; wsumu+=r.weight; // just add the user observation weight for the scaling. for(int j = 0; j < r.nBins; ++j) { // go over cat variables denums[r.binIds[j]] += w; // binIds skips the zeros. } for(int j = 0; j < r.nNums; ++j){ // num vars int id = r.numIds == null?(j + numStart):r.numIds[j]; denums[id]+= w*r.get(id)*r.get(id); } } } @Override public void reduce(GLMGenerateWeightsTask git){ // adding contribution of all the chunks ArrayUtils.add(denums, git.denums); wsum+=git.wsum; wsumu += git.wsumu; _likelihood += git._likelihood; super.reduce(git); } } public static class ComputeSEorDEVIANCETsk extends FrameTask2<ComputeSEorDEVIANCETsk> { final double [] _betaNew; double _sumsqe; double _wsum; GLMParameters _parms; GLMModel _model; public ComputeSEorDEVIANCETsk(H2OCountedCompleter cmp, DataInfo dinfo, Key jobKey, double [] betaNew, GLMParameters parms, GLMModel model) { super(cmp, dinfo, jobKey); _glmf = new GLMWeightsFun(parms); _betaNew = betaNew; _parms = parms; _model = model; } transient double _sparseOffsetNew = 0; final GLMWeightsFun _glmf; transient GLMWeights _glmw; @Override public void chunkInit(){ if(_sparse) { _sparseOffsetNew = GLM.sparseOffset(_betaNew, _dinfo); } _glmw = new GLMWeights(); } @Override protected void processRow(Row r) { double z = r.response(0) - r.offset; double w = r.weight; if (_glmf._family != gaussian) { double etaOld = r.innerProduct(_betaNew) + _sparseOffsetNew; _glmf.computeWeights(r.response(0), etaOld, r.offset, r.weight, _glmw); z = _glmw.z; w = _glmw.w; } double eta = _glmf._family.equals(Family.tweedie) ? r.innerProduct(_betaNew) + _sparseOffsetNew + r.offset : r.innerProduct(_betaNew) + _sparseOffsetNew; double xmu = _glmf._family.equals(Family.tweedie) ? _glmf.linkInv(eta) : 0; if (deviance.equals(_parms._dispersion_parameter_method)) { // deviance option if (!gaussian.equals(_glmf._family)) { _sumsqe += Double.isNaN(_glmw.dev) ? 0 : _glmw.dev; } else { double d = _model.deviance(r.weight, z, _glmf.linkInv(eta)); _sumsqe += Double.isNaN(d) ? 0 : d; } } else { // default or pearson _sumsqe += _glmf._family.equals(Family.tweedie) ? ((r.response(0) - xmu) * (r.response(0) - xmu)) * r.weight / Math.pow(xmu, _glmf._var_power) : w * (eta - z) * (eta - z); } _wsum += Math.sqrt(w); } @Override public void reduce(ComputeSEorDEVIANCETsk c){_sumsqe += c._sumsqe; _wsum += c._wsum;} } /*** * This function will assist in the estimation of dispersion factors using maximum likelihood */ public static class ComputeGammaMLSETsk extends FrameTask2<ComputeGammaMLSETsk> { final double [] _betaNew; double _sumlnyiOui; double _sumyiOverui; double _wsum; public ComputeGammaMLSETsk(H2OCountedCompleter cmp, DataInfo dinfo, Key jobKey, double [] betaNew, GLMParameters parms) { super(cmp, dinfo, jobKey); _glmf = new GLMWeightsFun(parms); _betaNew = betaNew; } transient double _sparseOffsetNew = 0; final GLMWeightsFun _glmf; transient GLMWeights _glmw; @Override public void chunkInit(){ if(_sparse) { _sparseOffsetNew = GLM.sparseOffset(_betaNew, _dinfo); } _glmw = new GLMWeights(); } @Override protected void processRow(Row r) { double z = r.response(0) - r.offset; // response double w = r.weight; if (z > 0 & w > 0) { double eta = _glmf._family.equals(Family.tweedie) ? r.innerProduct(_betaNew) + _sparseOffsetNew + r.offset : r.innerProduct(_betaNew) + _sparseOffsetNew; double xmu = _glmf.linkInv(eta); // ui double temp = w * z / xmu; _sumyiOverui += temp; _sumlnyiOui += w*Math.log(temp); _wsum += w; } } @Override public void reduce(ComputeGammaMLSETsk c){ _sumlnyiOui += c._sumlnyiOui; _sumyiOverui += c._sumyiOverui; _wsum += c._wsum;} } /*** * This function will assist in the estimation of dispersion factors using maximum likelihood */ public static class ComputeDiTriGammaTsk extends FrameTask2<ComputeDiTriGammaTsk> { double _sumDigamma; double _sumTrigamma; double _alpha; double[] _betaNew; public ComputeDiTriGammaTsk(H2OCountedCompleter cmp, DataInfo dinfo, Key jobKey, double[] betaNew, GLMParameters parms, double alpha) { super(cmp, dinfo, jobKey); _glmf = new GLMWeightsFun(parms); _alpha = alpha; _betaNew = betaNew; } transient double _sparseOffsetNew = 0; final GLMWeightsFun _glmf; transient GLMWeights _glmw; @Override public void chunkInit(){ if(_sparse) { _sparseOffsetNew = GLM.sparseOffset(_betaNew, _dinfo); } _glmw = new GLMWeights(); } @Override protected void processRow(Row r) { double z = r.response(0) - r.offset; // response double w = r.weight; if (z > 0 & w > 0) { _sumDigamma += w*digamma(w*_alpha); _sumTrigamma += w*w*trigamma(w*_alpha); } } @Override public void reduce(ComputeDiTriGammaTsk c){ _sumDigamma += c._sumDigamma; _sumTrigamma += c._sumTrigamma; } } static class GLMIncrementalGramTask extends MRTask<GLMIncrementalGramTask> { final int[] _newCols; final DataInfo _dinfo; double[][] _gram; double [] _xy; final double [] _beta; final GLMWeightsFun _glmf; public GLMIncrementalGramTask(int [] newCols, DataInfo dinfo, GLMWeightsFun glmf, double [] beta){ this._newCols = newCols; _glmf = glmf; _dinfo = dinfo; _beta = beta; } public void map(Chunk[] chks) { GLMWeights glmw = new GLMWeights(); double [] wsum = new double[_dinfo.fullN()+1]; double ywsum = 0; DataInfo.Rows rows = _dinfo.rows(chks); double [][] gram = new double[_newCols.length][_dinfo.fullN() + 1]; double [] xy = new double[_newCols.length]; final int ns = _dinfo.numStart(); double sparseOffset = rows._sparse?GLM.sparseOffset(_beta,_dinfo):0; for (int rid = 0; rid < rows._nrows; ++rid) { int j = 0; Row r = rows.row(rid); if(r.weight == 0) continue; if(_beta != null) { _glmf.computeWeights(r.response(0), r.innerProduct(_beta) + sparseOffset, r.offset, r.weight, glmw); } else { glmw.w = r.weight; glmw.z = r.response(0); } r.addToArray(glmw.w,wsum); ywsum += glmw.z*glmw.w; // first cats for (int i = 0; i < r.nBins; i++) { while (j < _newCols.length && _newCols[j] < r.binIds[i]) j++; if (j == _newCols.length || _newCols[j] >= ns) break; if (r.binIds[i] == _newCols[j]) { r.addToArray(glmw.w, gram[j]); xy[j] += glmw.w*glmw.z; j++; } } while (j < _newCols.length && _newCols[j] < ns) j++; // nums if (r.numIds != null) { // sparse for (int i = 0; i < r.nNums; i++) { while (j < _newCols.length && _newCols[j] < r.numIds[i]) j++; if (j == _newCols.length) break; if (r.numIds[i] == _newCols[j]) { double wx = glmw.w * r.numVals[i]; r.addToArray(wx, gram[j]); xy[j] += wx*glmw.z; j++; } } } else { // dense for (; j < _newCols.length; j++) { int id = _newCols[j]; double x = r.numVals[id - _dinfo.numStart()]; if(x == 0) continue; double wx = glmw.w * x; r.addToArray(wx, gram[j]); xy[j] += wx*glmw.z; } assert j == _newCols.length; } } if(rows._sparse && _dinfo._normSub != null){ // adjust for sparse zeros (skipped centering) int numstart = Arrays.binarySearch(_newCols,ns); if(numstart < 0) numstart = -numstart-1; for(int k = 0; k < numstart; ++k){ int i = _newCols[k]; double [] row = gram[k]; for(int j = ns; j < row.length-1; ++j){ double mean_j = _dinfo.normSub(j-ns); double scale_j = _dinfo.normMul(j-ns); gram[k][j] = gram[k][j] - mean_j*scale_j*wsum[i]; } } for(int k = numstart; k < gram.length; ++k){ int i = _newCols[k]; double mean_i = _dinfo.normSub(i-ns); double scale_i = _dinfo.normMul(i-ns); // categoricals for(int j = 0; j < _dinfo.numStart(); ++j){ gram[k][j]-=mean_i*scale_i*wsum[j]; } //nums for(int j = ns; j < gram[k].length-1; ++j){ double mean_j = _dinfo.normSub(j-ns); double scale_j = _dinfo.normMul(j-ns); gram[k][j] = gram[k][j] - mean_j*scale_j*wsum[i] - mean_i*scale_i*wsum[j] + mean_i*mean_j*scale_i*scale_j*wsum[wsum.length-1]; } gram[k][gram[k].length-1] -= mean_i*scale_i*wsum[gram[k].length-1]; xy[k] -= ywsum * mean_i * scale_i; } } _gram = gram; _xy = xy; } public void reduce(GLMIncrementalGramTask gt) { ArrayUtils.add(_xy,gt._xy); for(int i = 0; i< _gram.length; ++i) ArrayUtils.add(_gram[i],gt._gram[i]); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMUtils.java
package hex.glm; import water.DKV; import water.Key; import water.MemoryManager; import water.fvec.Frame; import water.fvec.Vec; import water.util.ArrayUtils; import water.util.FrameUtils; import water.util.TwoDimTable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; import static hex.glm.GLMModel.GLMParameters.Family.gaussian; import static water.fvec.Vec.T_NUM; import static water.fvec.Vec.T_STR; public class GLMUtils { /*** * From the gamColnames, this method attempts to translate to the column indices in adaptFrame. * @param adaptFrame * @param gamColnames * @return */ public static int[][] extractAdaptedFrameIndices(Frame adaptFrame, String[][] gamColnames, int numOffset) { String[] frameNames = adaptFrame.names(); List<String> allColNames = new ArrayList<>(); for (String name:frameNames) allColNames.add(name); int[][] gamColIndices = new int[gamColnames.length][]; int numFrame = gamColnames.length; for (int frameNum=0; frameNum < numFrame; frameNum++) { int numCols = gamColnames[frameNum].length; gamColIndices[frameNum] = MemoryManager.malloc4(numCols); for (int index=0; index < numCols; index++) { gamColIndices[frameNum][index] = numOffset+allColNames.indexOf(gamColnames[frameNum][index]); } } return gamColIndices; } public static GLMModel.GLMParameters[] genGLMParameters(GLMModel.GLMParameters parms, String[] validPreds, String[] predictorNames) { int numPreds = validPreds.length; if (numPreds > 0) { GLMModel.GLMParameters[] params = new GLMModel.GLMParameters[numPreds]; String[] frameNames = parms.train().names(); List<String> predList = Stream.of(predictorNames).collect(Collectors.toList()); if (parms._weights_column != null) predList.add(parms._weights_column); String[] ignoredCols = Arrays.stream(frameNames).filter(x -> !predList.contains(x)).collect(Collectors.toList()).toArray(new String[0]); for (int index=0; index < numPreds; index++) { params[index] = new GLMModel.GLMParameters(gaussian); params[index]._response_column = validPreds[index]; params[index]._train = parms.train()._key; params[index]._lambda = new double[]{0.0}; params[index]._alpha = new double[]{0.0}; params[index]._compute_p_values = true; params[index]._ignored_columns = ignoredCols; params[index]._weights_column = parms._weights_column; } return params; } else { return null; } } public static void removePredictors(GLMModel.GLMParameters parms, Frame train) { List<String> nonPredictors = Arrays.stream(parms.getNonPredictors()).collect(Collectors.toList()); String[] colNames = parms.train().names(); List<String> removeCols = Arrays.stream(colNames).filter(x -> !nonPredictors.contains(x)).collect(Collectors.toList()); for (String removeC : removeCols) train.remove(removeC); } public static Frame expandedCatCS(Frame beta_constraints, GLMModel.GLMParameters parms) { byte[] csByteType = new byte[]{T_STR, T_NUM, T_NUM}; String[] bsColNames = beta_constraints.names(); Frame betaCSCopy = beta_constraints.deepCopy(Key.make().toString()); betaCSCopy.replace(0, betaCSCopy.vec(0).toStringVec()).remove(); DKV.put(betaCSCopy); FrameUtils.ExpandCatBetaConstraints expandCatBS = new FrameUtils.ExpandCatBetaConstraints(beta_constraints, parms.train()).doAll(csByteType, betaCSCopy, true); Frame csWithEnum = expandCatBS.outputFrame(Key.make(), bsColNames, null); betaCSCopy.delete(); return csWithEnum; } public static boolean findEnumInBetaCS(Frame betaCS, GLMModel.GLMParameters parms) { List<String> colNames = Arrays.asList(parms.train().names()); String[] types = parms.train().typesStr(); Vec v = betaCS.vec("names"); int nRow = (int) betaCS.numRows(); for (int index=0; index<nRow; index++) { int colIndex = colNames.indexOf(v.stringAt(index)); if (colIndex >= 0 && "Enum".equals(types[colIndex])) return true; } return false; } public static GLM.GLMGradientInfo copyGInfo(GLM.GLMGradientInfo ginfo) { double[] gradient = ginfo._gradient.clone(); GLM.GLMGradientInfo tempGinfo = new GLM.GLMGradientInfo(ginfo._likelihood, ginfo._objVal, gradient); return tempGinfo; } public static TwoDimTable combineScoringHistory(TwoDimTable glmSc1, TwoDimTable earlyStopSc2) { String[] esColTypes = earlyStopSc2.getColTypes(); String[] esColFormats = earlyStopSc2.getColFormats(); List<String> finalColHeaders = new ArrayList<>(Arrays.asList(glmSc1.getColHeaders())); final List<String> earlyStopScHeaders = new ArrayList<>(Arrays.asList(earlyStopSc2.getColHeaders())); final int overlapSize = 3; // for "Timestamp", "Duration", "Iterations int earlyStopSCIterIndex = earlyStopScHeaders.indexOf("Iterations"); int indexOfIter = finalColHeaders.indexOf("iteration"); if (indexOfIter < 0) indexOfIter = finalColHeaders.indexOf("iterations"); List<String> finalColTypes = new ArrayList<>(Arrays.asList(glmSc1.getColTypes())); List<String> finalColFormats = new ArrayList<>(Arrays.asList(glmSc1.getColFormats())); List<Integer> earlyStopColIndices = new ArrayList<>(); int colCounter = 0; for (String colName : earlyStopSc2.getColHeaders()) { // collect final table colHeaders, RowHeaders, ColFormats, ColTypes if (!finalColHeaders.contains(colName.toLowerCase())) { finalColHeaders.add(colName); finalColTypes.add(esColTypes[colCounter]); finalColFormats.add(esColFormats[colCounter]); earlyStopColIndices.add(colCounter); } colCounter++; } final int tableSize = finalColHeaders.size(); String[] rowHeaders = generateRowHeaders(glmSc1, earlyStopSc2, indexOfIter, earlyStopSCIterIndex); TwoDimTable res = new TwoDimTable("Scoring History", "", rowHeaders, finalColHeaders.toArray(new String[tableSize]), finalColTypes.toArray(new String[tableSize]), finalColFormats.toArray(new String[tableSize]), ""); res = combineTableContents(glmSc1, earlyStopSc2, res, earlyStopColIndices, indexOfIter, earlyStopSCIterIndex, overlapSize); return res; } public static String[] generateRowHeaders(TwoDimTable glmSc1, TwoDimTable earlyStopSc2, int glmIterIndex, int earlyStopIterIndex) { int glmRowSize = glmSc1.getRowDim(); int earlyStopRowSize = earlyStopSc2.getRowDim(); List<Integer> iterList = new ArrayList<>(); for (int index = 0; index < glmRowSize; index++) iterList.add((Integer) glmSc1.get(index, glmIterIndex)); for (int index = 0; index < earlyStopRowSize; index++) { Integer iter = (Integer) earlyStopSc2.get(index, earlyStopIterIndex); if (!iterList.contains(iter)) iterList.add(iter); } String[] rowHeader = new String[iterList.size()]; for (int index=0; index < rowHeader.length; index++) rowHeader[index] = ""; return rowHeader; } // glmSc1 is updated for every iteration while earlyStopSc2 is updated per scoring interval. Hence, glmSc1 is // very likely to be longer than earlyStopSc2. We only add earlyStopSc2 to the table when the iteration // indices align with each other. public static TwoDimTable combineTableContents(final TwoDimTable glmSc1, final TwoDimTable earlyStopSc2, TwoDimTable combined, final List<Integer> earlyStopColIndices, final int indexOfIter, final int indexOfIterEarlyStop, final int overlapSize) { final int rowSize = glmSc1.getRowDim(); // array size from GLM Scoring, contains more iterations final int rowSize2 = earlyStopSc2.getRowDim(); // array size from scoringHistory final int glmColSize = glmSc1.getColDim(); final int earlyStopColSize = earlyStopColIndices.size(); int sc2RowIndex = 0; int glmRowIndex = 0; int rowIndex = 0; List<Integer> iterRecorded = new ArrayList<>(); while ((sc2RowIndex < rowSize2) && (glmRowIndex < rowSize)) { int glmScIter = (int) glmSc1.get(glmRowIndex, indexOfIter); int earlyStopScIter = (int) earlyStopSc2.get(sc2RowIndex, indexOfIterEarlyStop); if (glmScIter == earlyStopScIter) { if (!iterRecorded.contains(glmScIter)) { addOneRow2ScoringHistory(glmSc1, earlyStopSc2, glmColSize, earlyStopColSize, glmRowIndex, sc2RowIndex, rowIndex, true, true, earlyStopColIndices, combined, overlapSize); iterRecorded.add(glmScIter); } sc2RowIndex++; glmRowIndex++; } else if (glmScIter < earlyStopScIter) { // add GLM scoring history if (!iterRecorded.contains(glmScIter)) { addOneRow2ScoringHistory(glmSc1, earlyStopSc2, glmColSize, earlyStopColSize, glmRowIndex, sc2RowIndex, rowIndex, true, false, earlyStopColIndices, combined, overlapSize); iterRecorded.add(glmScIter); } glmRowIndex++; } else if (glmScIter > earlyStopScIter) { // add GLM scoring history if (!iterRecorded.contains(earlyStopScIter)) { addOneRow2ScoringHistory(glmSc1, earlyStopSc2, glmColSize, earlyStopColSize, glmRowIndex, sc2RowIndex, rowIndex, false, true, earlyStopColIndices, combined, overlapSize); iterRecorded.add(earlyStopScIter); } sc2RowIndex++; } rowIndex++; } for (int index = glmRowIndex; index < rowSize; index++) { // add left over glm scoring history int iter = (int) glmSc1.get(index, indexOfIter); if (!iterRecorded.contains(iter) && iterRecorded.get(iterRecorded.size()-1) < iter) { addOneRow2ScoringHistory(glmSc1, earlyStopSc2, glmColSize, earlyStopColSize, index, -1, rowIndex++, true, false, earlyStopColIndices, combined, overlapSize); iterRecorded.add(iter); } } for (int index = sc2RowIndex; index < rowSize2; index++) { // add left over scoring history int iter = (int) earlyStopSc2.get(index, indexOfIterEarlyStop); if (!iterRecorded.contains(iter) && iterRecorded.get(iterRecorded.size()-1) < iter) { addOneRow2ScoringHistory(glmSc1, earlyStopSc2, glmColSize, earlyStopColSize, -1, index, rowIndex++, false, true, earlyStopColIndices, combined, overlapSize); iterRecorded.add(iter); } } return combined; } public static void addOneRow2ScoringHistory(final TwoDimTable glmSc1, final TwoDimTable earlyStopSc2, int glmColSize, int earlyStopColSize, int glmRowIndex, int earlyStopRowIndex, int rowIndex, boolean addGlmSC, boolean addEarlyStopSC, final List<Integer> earlyStopColIndices, TwoDimTable combined, final int overlapSize) { if (addGlmSC) for (int glmIndex = 0; glmIndex < glmColSize; glmIndex++) combined.set(rowIndex, glmIndex, glmSc1.get(glmRowIndex, glmIndex)); if (addEarlyStopSC) for (int earlyStopIndex = 0; earlyStopIndex < earlyStopColSize; earlyStopIndex++) { if (!addGlmSC && earlyStopIndex < overlapSize) combined.set(rowIndex, earlyStopIndex, earlyStopSc2.get(earlyStopRowIndex, earlyStopIndex)); combined.set(rowIndex, earlyStopIndex + glmColSize, earlyStopSc2.get(earlyStopRowIndex, earlyStopColIndices.get(earlyStopIndex))); } } public static void updateGradGam(double[] gradient, double[][][] penalty_mat, int[][] gamBetaIndices, double[] beta, int[] activeCols) { // update gradient due to gam smoothness constraint int numGamCol = gamBetaIndices.length; // number of predictors used for gam for (int gamColInd = 0; gamColInd < numGamCol; gamColInd++) { // update each gam col separately int penaltyMatSize = penalty_mat[gamColInd].length; for (int betaInd = 0; betaInd < penaltyMatSize; betaInd++) { // derivative of each beta in penalty matrix int currentBetaIndex = gamBetaIndices[gamColInd][betaInd]; if (activeCols != null) { currentBetaIndex = ArrayUtils.find(activeCols, currentBetaIndex); } if (currentBetaIndex >= 0) { // only add if coefficient is active double tempGrad = 2 * beta[currentBetaIndex] * penalty_mat[gamColInd][betaInd][betaInd]; for (int rowInd = 0; rowInd < penaltyMatSize; rowInd++) { if (rowInd != betaInd) { int currBetaInd = gamBetaIndices[gamColInd][rowInd]; if (activeCols != null) { currBetaInd = ArrayUtils.find(activeCols, currBetaInd); } if (currBetaInd >= 0) tempGrad += beta[currBetaInd] * penalty_mat[gamColInd][betaInd][rowInd]; } } gradient[currentBetaIndex] += tempGrad; } } } } // Note that gradient is [ncoeff][nclass]. public static void updateGradGamMultinomial(double[][] gradient, double[][][] penaltyMat, int[][] gamBetaIndices, double[][] beta) { int numClass = beta[0].length; int numGamCol = gamBetaIndices.length; for (int classInd = 0; classInd < numClass; classInd++) { for (int gamInd = 0; gamInd < numGamCol; gamInd++) { int numKnots = gamBetaIndices[gamInd].length; for (int rowInd = 0; rowInd < numKnots; rowInd++) { // calculate dpenalty/dbeta rowInd double temp = 0.0; int betaIndR = gamBetaIndices[gamInd][rowInd]; // dGradient/dbeta_betaIndR for (int colInd = 0; colInd < numKnots; colInd++) { int betaIndC = gamBetaIndices[gamInd][colInd]; temp += (betaIndC==betaIndR)?(2*penaltyMat[gamInd][rowInd][colInd]*beta[betaIndC][classInd]) :penaltyMat[gamInd][rowInd][colInd]*beta[betaIndC][classInd]; } gradient[betaIndR][classInd] += temp; } } } } public static double calSmoothNess(double[] beta, double[][][] penaltyMatrix, int[][] gamColIndices) { int numGamCols = gamColIndices.length; double smoothval = 0; for (int gamCol=0; gamCol < numGamCols; gamCol++) { smoothval += ArrayUtils.innerProductPartial(beta, gamColIndices[gamCol], ArrayUtils.multArrVecPartial(penaltyMatrix[gamCol], beta, gamColIndices[gamCol])); } return smoothval; } /** * * @param beta multinomial number of class by number of predictors * @param penaltyMatrix * @param gamColIndices * @return */ public static double calSmoothNess(double[][] beta, double[][][] penaltyMatrix, int[][] gamColIndices) { int numClass = beta.length; double smoothval=0; for (int classInd=0; classInd < numClass; classInd++) { smoothval += calSmoothNess(beta[classInd], penaltyMatrix, gamColIndices); } return smoothval; } public static String[] genDfbetasNames(GLMModel model) { double[] stdErr = model._output.stdErr(); String[] names = Arrays.stream(model._output.coefficientNames()).map(x -> "DFBETA_"+x).toArray(String[]::new); List<String> namesList = new ArrayList<>(); int numCoeff = names.length; for (int index=0; index<numCoeff; index++) if (!Double.isNaN(stdErr[index])) namesList.add(names[index]); return namesList.stream().toArray(String[]::new); } public static double[] genNewBeta(int newBetaLength, double[] beta, double[] stdErr) { double[] newBeta = new double[newBetaLength]; int oldLen = stdErr.length; int count = 0; for (int index=0; index<oldLen; index++) if (!Double.isNaN(stdErr[index])) newBeta[count++] = beta[index]; return newBeta; } public static void removeRedCols(double[] row2Array, double[] reducedArray, double[] stdErr) { int count=0; int betaSize = row2Array.length; for (int index=0; index<betaSize; index++) if (!Double.isNaN(stdErr[index])) reducedArray[count++] = row2Array[index]; } public static Frame buildRIDFrame(GLMModel.GLMParameters parms, Frame train, Frame RIDFrame) { Vec responseVec = train.remove(parms._response_column); Vec weightsVec = null; Vec offsetVec = null; Vec foldVec = null; if (parms._offset_column != null) offsetVec = train.remove(parms._offset_column); if (parms._weights_column != null) // move weight vector to be the last vector before response variable weightsVec = train.remove(parms._weights_column); train.add(RIDFrame.names(), RIDFrame.removeAll()); if (weightsVec != null) train.add(parms._weights_column, weightsVec); if (offsetVec != null) train.add(parms._offset_column, offsetVec); if (responseVec != null) train.add(parms._response_column, responseVec); return train; } public static boolean notZeroLambdas(double[] lambdas) { if (lambdas == null) { return false; } else { return ((int) Arrays.stream(lambdas).filter(x -> x != 0.0).boxed().count()) > 0; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/GLMValidationTsk.java
package hex.glm; import hex.glm.GLMModel.GLMParameters; import water.MRTask; import water.fvec.Chunk; ///** // * Created by tomasnykodym on 9/12/14. // */ //public class GLMValidationTsk extends MRTask<GLMValidationTsk> { // final GLMParameters _params; // final double _ymu; // final int _rank; // GLMValidation _val; // // public GLMValidationTsk(GLMParameters params, double ymu, int rank){ // _params = params; // _ymu = ymu; // _rank = rank; // } // @Override // public void map(Chunk actual, Chunk predict){ // GLMValidation val = new GLMValidation(null,_ymu,_params,_rank); // for(int i = 0; i < actual._len; ++i) { // double predicted = predict.atd(i); // double real = actual.atd(i); // if(!Double.isNaN(real) && !Double.isNaN(predicted)) // val.add(real, predicted); // } // _val = val; // } // @Override // public void reduce(GLMValidationTsk gmt){ _val.add(gmt._val); } //}
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/RegressionInfluenceDiagnosticsTasks.java
package hex.glm; import hex.DataInfo; import water.Job; import water.MRTask; import water.fvec.Chunk; import water.fvec.NewChunk; import water.util.ArrayUtils; import java.util.Arrays; import static hex.glm.GLMUtils.removeRedCols; import static hex.util.LinearAlgebraUtils.matrixMultiply; import static water.util.ArrayUtils.*; /*** * Classes defined here implemented the various pieces of regression influence diagnostics described in this doc: * https://github.com/h2oai/h2o-3/issues/7044. Hence, whenever I refer to the document, I mean the one in the * http link. */ public class RegressionInfluenceDiagnosticsTasks { public static class RegressionInfluenceDiagBinomial extends MRTask<RegressionInfluenceDiagBinomial> { final double[] _beta; final double[][] _gramInv; // could be with standardized or non-standardized predictors, not scaled with obj_reg final Job _j; final int _betaSize; final int _reducedBetaSize; final GLMModel.GLMParameters _parms; final DataInfo _dinfo; final double[] _stdErr; final boolean _foundRedCols; final double[] _oneOverStdErr; public RegressionInfluenceDiagBinomial(Job j, double[] beta, double[][] gramInv, GLMModel.GLMParameters parms, DataInfo dinfo, double[] stdErr) { _j = j; _beta = beta; // denormalized beta _betaSize = beta.length; _reducedBetaSize = gramInv.length; _foundRedCols = !(_betaSize == _reducedBetaSize); _gramInv = gramInv; // not scaled by parms._obj_reg _parms = parms; _dinfo = dinfo; _stdErr = stdErr; _oneOverStdErr = Arrays.stream(_stdErr).map(x -> 1.0/x).toArray(); } @Override public void map(Chunk[] chks, NewChunk[] nc) { if (isCancelled() || _j != null && _j.stop_requested()) return; double[] dfbetas = new double[_betaSize]; double[] dfbetasReduced = new double[_reducedBetaSize]; double[] row2Array = new double[_betaSize]; double[] row2ArrayReduced = new double[_reducedBetaSize]; double[] xTimesGramInv = new double[_reducedBetaSize]; DataInfo.Row r = _dinfo.newDenseRow(); for (int rid = 0; rid < chks[0]._len; ++rid) { _dinfo.extractDenseRow(chks, rid, r); genDfBetasRow(r, nc, row2Array, row2ArrayReduced, dfbetas, dfbetasReduced, xTimesGramInv); } if (_j != null) _j.update(1); } private void genDfBetasRow(DataInfo.Row r, NewChunk[] nc, double[] row2Array, double[] row2ArrayRed, double[] dfbetas, double[] dfbetasRed, double[] xTimesGramInv) { if (r.response_bad) { Arrays.fill(dfbetas, Double.NaN); } else if (r.weight == 0) { Arrays.fill(dfbetas, 0.0); } else { r.expandCatsPredsOnly(row2Array); // change Row to array if (_foundRedCols) { removeRedCols(row2Array, row2ArrayRed, _stdErr); genDfBeta(r, row2ArrayRed, xTimesGramInv, dfbetasRed, nc); } else { genDfBeta(r, row2Array, xTimesGramInv, dfbetas, nc); } } } private void genDfBeta(DataInfo.Row r, double[] row2Array, double[] xTimesGramInv, double[] dfbetas, NewChunk[] nc) { double mu = _parms.linkInv(r.innerProduct(_beta)+r.offset); // generate p hat // generate residual double residual = r.response(0)-mu; double oneOverMLL = gen1OverMLL(row2Array, xTimesGramInv, mu, r.weight); // 1.0/(oneOverObjReg-hjj) genDfBetas(oneOverMLL, residual, row2Array, dfbetas, r.weight); for (int c = 0; c < _reducedBetaSize; c++) // copy dfbetas over to new chunks nc[c].addNum(dfbetas[c]); } /*** * implement operations on and in between equation 5, 6 of the document */ public void genDfBetas(double oneOverMLL, double residual, double[] row2Array, double[] dfbetas, double weight) { double resOverMLL = oneOverMLL*residual*weight; int count=0; for (int index=0; index<_betaSize; index++) { if (!Double.isNaN(_stdErr[index])) { dfbetas[count] = resOverMLL * _oneOverStdErr[index] * ArrayUtils.innerProduct(row2Array, _gramInv[count]); count++; } } } /*** * Generate 1.0/(1.0-hjj) for each data row j. Implement equation 8 of the document for binomial family. */ public double gen1OverMLL(double[] row2Array, double[] xTimesGramInv, double mu, double weight) { for (int index = 0; index< _reducedBetaSize; index++) { // form X*invGram xTimesGramInv[index] = ArrayUtils.innerProduct(row2Array, _gramInv[index]); } double hjj = weight*mu*(1-mu)*ArrayUtils.innerProduct(xTimesGramInv, row2Array); return 1.0/(1.0-hjj); } } /*** * generate DFBETAS as in equation 4 of the document. */ public static class RegressionInfluenceDiagGaussian extends MRTask<RegressionInfluenceDiagGaussian> { final double[] _oneOverSqrtXTXDiag; final double[] _betas; // Exclude redundant columns if present final int _betaSize; final Job _j; public RegressionInfluenceDiagGaussian(double[][] xTx, double[] betas, Job j) { _betas = betas; _betaSize = betas.length; _j = j; _oneOverSqrtXTXDiag = new double[_betaSize]; for (int index = 0; index< _betaSize; index++) _oneOverSqrtXTXDiag[index] = 1.0/Math.sqrt(xTx[index][index]); } @Override public void map(Chunk[] chks, NewChunk[] ncs) { if (isCancelled() || (_j != null && _j.stop_requested())) return; double[] betaDiff = new double[_betaSize]; int numCols = chks.length; double[] row2Array = new double[numCols]; // contains new beta and var estimate of ith row int len = chks[0]._len; for (int index=0; index<len; index++) { readRow2Array(row2Array, chks, index, numCols); setBetaDiff(betaDiff, row2Array, ncs); } } private void setBetaDiff(double[] betaDiff, double[] row2Array, NewChunk[] nc) { if (!Double.isFinite(row2Array[0])) { Arrays.fill(betaDiff, Double.NaN); } else { double oneOverVarEst = 1.0 / Math.sqrt(row2Array[_betaSize]); for (int index = 0; index < _betaSize; index++) betaDiff[index] = (_betas[index] - row2Array[index]) * oneOverVarEst * _oneOverSqrtXTXDiag[index]; } for (int colIndex = 0; colIndex< _betaSize; colIndex++) // write new beta to new chunk nc[colIndex].addNum(betaDiff[colIndex]); } private void readRow2Array(double[] row2Array, Chunk[] chks, int rInd, int nCol) { for (int index=0; index<nCol; index++) row2Array[index] = chks[index].atd(rInd); } } public static class ComputeNewBetaVarEstimatedGaussian extends MRTask<ComputeNewBetaVarEstimatedGaussian> { final double[][] _cholInv; // XTX inverse: store cholInv without redundant predictors, not scaled by parms._obj_reg final double[] _xTransY; // store XTY of full dataset final double[] _xTransYReduced; // same as xTransY, but changed when there is redundant columns final int _betaSize; final int _reducedBetaSize; final int _newChunkWidth; final Job _j; final DataInfo _dinfo; final double[][] _xTx; // not scaled by parms._obj_reg final double _weightedNobs; final double _sumRespSq; final boolean _foundRedCols; final double[] _stdErr; // used to tell which predict is redundant public ComputeNewBetaVarEstimatedGaussian(double[][] cholInv, double[] xTY, Job j, DataInfo dinfo, double[][] gram, double nobs, double sumRespSq, double[] stdErr) { _cholInv = cholInv; _xTransYReduced = xTY; // if redundant columns present, is reduced _betaSize = stdErr.length; _reducedBetaSize = cholInv.length; _foundRedCols = !(_betaSize == _reducedBetaSize); _newChunkWidth = _betaSize+1; // last one is for estimated variance _j = j; _dinfo = dinfo; _xTx = gram; _weightedNobs = nobs-_reducedBetaSize; // intercept already included in gram/chol _sumRespSq = sumRespSq; // YTY _stdErr = stdErr; _xTransY = new double[_betaSize]; if (_foundRedCols) { // shrink xTransY int count=0; for (int index=0; index<_betaSize; index++) if (!Double.isNaN(stdErr[index])) _xTransY[index] = _xTransYReduced[count++]; } else { System.arraycopy(_xTransYReduced, 0, _xTransY, 0, _reducedBetaSize); } } @Override public void map(Chunk[] chks, NewChunk[] nc) { if (isCancelled() || (_j != null && _j.stop_requested())) return; // timeout double[] newBeta = new double[_betaSize]; double[] newBetaRed = new double[_reducedBetaSize]; double[] row2Array = new double[_betaSize]; double[] row2ArrayRed = new double[_reducedBetaSize]; double[][] tmpDoubleArray = new double[_reducedBetaSize][_reducedBetaSize]; double[] tmpArray = new double[_betaSize]; double[] tmpArrayRed = new double[_reducedBetaSize]; final int chkLen = chks[0]._len; DataInfo.Row r = _dinfo.newDenseRow(); for (int rowIndex=0; rowIndex<chkLen; rowIndex++) { _dinfo.extractDenseRow(chks, rowIndex, r); getNewBetaVarEstimate(r, nc, row2Array, row2ArrayRed, newBeta, newBetaRed, tmpArray, tmpArrayRed, tmpDoubleArray); } if (_j != null) _j.update(1); } private void getNewBetaVarEstimate(DataInfo.Row r, NewChunk[] newBetasChunk, double[] row2Array, double[] row2ArrayRed, double[] newBetas, double[] newBetaRed, double[] tmpArray, double[] tmpArrayRed, double[][] xiTransxi) { double varEstimate; if (r.response_bad) { varEstimate = Double.NaN; if (_foundRedCols) { Arrays.fill(newBetaRed, Double.NaN); writeNewChunk(newBetaRed, newBetasChunk, varEstimate); } else { Arrays.fill(newBetas, Double.NaN); writeNewChunk(newBetas, newBetasChunk, varEstimate); } } else if (r.weight == 0.0) { varEstimate = 0.0; if (_foundRedCols) { Arrays.fill(newBetaRed, 0.0); writeNewChunk(newBetaRed, newBetasChunk, varEstimate); } else { Arrays.fill(newBetas, 0.0); writeNewChunk(newBetas, newBetasChunk, varEstimate); } } else { r.expandCatsPredsOnly(row2Array); // contains redundant columns if present if (_foundRedCols) { // form xi*trans(xi) with only non-redundant columns removeRedCols(row2Array, row2ArrayRed, _stdErr); ArrayUtils.outerProduct(xiTransxi, row2ArrayRed, row2ArrayRed); } else { ArrayUtils.outerProduct(xiTransxi, row2Array, row2Array); } double[][] cholInvTimesOuterProduct = matrixMultiply(_cholInv, xiTransxi); // form inv XTX*trans(xi)*xi double[][] cholInvOuterCholInv = matrixMultiply(cholInvTimesOuterProduct, _cholInv); // form inv(XTX)*xi*trans(xi)*inv(XTX) if (_foundRedCols) { genNewBetas(row2ArrayRed, tmpArrayRed, newBetaRed, r, cholInvOuterCholInv); fillBetaRed2Full(newBetaRed, newBetas); varEstimate = genVarEstimate(r, tmpArrayRed, newBetaRed, newBetas); writeNewChunk(newBetaRed, newBetasChunk, varEstimate); } else { genNewBetas(row2Array, tmpArray, newBetas, r, cholInvOuterCholInv); varEstimate = genVarEstimate(r, tmpArray, newBetas, newBetas); writeNewChunk(newBetas, newBetasChunk, varEstimate); } } } private void fillBetaRed2Full(double[] newBetaRed, double[] newBetas) { int count=0; for (int index=0; index<_betaSize; index++) { if (Double.isNaN(_stdErr[index])) { newBetas[index] = 0.0; } else { newBetas[index] = newBetaRed[count++]; } } } /*** * calculate beta without current row r as in equation 8 of the document. */ private void genNewBetas(double[] row2Array, double[] tmpArray, double[] newBetas, DataInfo.Row r, double[][] cholInvOuterCholInv) { multArrVec(_cholInv, row2Array, tmpArray); // inv(gram)*xiTrans(xi) double oneOverdenom = 1.0/(1-innerProduct(row2Array, tmpArray)); // 1.0/(1-tran(xi)*inv(gram)*xi mult(cholInvOuterCholInv, oneOverdenom); add(cholInvOuterCholInv, _cholInv); tmpArray = mult(row2Array, -r.response(0)); // xi*response add(tmpArray, _xTransYReduced); // -xi*response + xTransY multArrVec(cholInvOuterCholInv, tmpArray, newBetas); } private void writeNewChunk(double[] newBetas, NewChunk[] newBetasChunk, double varEstimate) { for (int colIndex=0; colIndex<_reducedBetaSize; colIndex++) // write new beta to new chunk newBetasChunk[colIndex].addNum(newBetas[colIndex]); newBetasChunk[_reducedBetaSize].addNum(varEstimate); } /*** * Generate the variance estimate as in equation 11 of the document. */ private double genVarEstimate(DataInfo.Row r, double[] tmpArray, double[] newBetasRed, double[] newBetas) { double temp = r.response(0)-r.innerProduct(newBetas); // r contains redundant columns here double ithVarEst = r.weight*temp*temp; multArrVec(_xTx, newBetasRed, tmpArray); return (_sumRespSq-2*ArrayUtils.innerProduct(newBetasRed, _xTransYReduced)+ ArrayUtils.innerProduct(newBetasRed, tmpArray)-ithVarEst)/(_weightedNobs-r.weight); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/TweedieEstimator.java
package hex.glm; import water.MRTask; import water.MemoryManager; import water.fvec.Chunk; import water.fvec.Vec; import water.util.fp.Function2; import water.util.fp.Function3; import static hex.glm.TweedieEstimator.LikelihoodEstimator.gamma; import static hex.glm.TweedieEstimator.LikelihoodEstimator.*; import static java.lang.Math.*; import static org.apache.commons.math3.special.Gamma.*; @SuppressWarnings({"SuspiciousNameCombination", "SameParameterValue"}) public class TweedieEstimator extends MRTask<TweedieEstimator> { private final long _max_iter_cnt; double _loglikelihood; double _llhDp; double _llhDpDp; final double _p; final double _phi; private final double _pp; private final double _ppp; private final double _p1; private final double _p2; private final double _p1sq; private final double _p2sq; private final double _invp1; private final double _invp1sq; private final double _invp2; private final double _invp2sq; private final double _logp1; private final double _logp2; private final double _log_phi; private final double _alpha; private final double _logDenom_p; private final double _logPhip1inv; private final double _pialpha; private final double _logInvDenom2ConstPart; private final double _pisq; private final double _epsilon = 1e-12; // used as a stopping criterion for series method when gradient and hessian are required private double _wSum, _wDpSum, _wDpDpSum, _logWMax; double _logVSum, _logVDpSum, _logVDpDpSum, _logVMax; private double _vDpSumSgn, _vDpDpSumSgn; private final boolean _useSaddlepoint; private final boolean _needDp; private final boolean _needDpDp; private final boolean _forceInversion; private final boolean _skipZerosIfVarPowerGT2; public long _skippedRows; public long _totalRows; enum LikelihoodEstimator { series, inversion, saddlepoint, gamma, poisson, invGaussian } public LikelihoodEstimator _method; TweedieEstimator(double variancePower, double dispersion) { this(variancePower, dispersion, false, false, false, false, false); } TweedieEstimator(double variancePower, double dispersion, boolean forceInversion) { this(variancePower, dispersion, false, false, false, forceInversion, false); } TweedieEstimator(double variancePower, double dispersion, boolean useSaddlepointApprox, boolean needDp, boolean needDpDp, boolean forceInversion) { this(variancePower, dispersion, useSaddlepointApprox, needDp, needDpDp, forceInversion, false); } TweedieEstimator(double variancePower, double dispersion, boolean useSaddlepointApprox, boolean needDp, boolean needDpDp, boolean forceInversion, boolean skipZerosIfVarPowerGT2) { assert variancePower >= 1 : "Tweedie variance power has to be greater than 1!"; assert (forceInversion || useSaddlepointApprox) && !(needDp || needDpDp) || !forceInversion || !useSaddlepointApprox; _p = variancePower; _phi = dispersion; _max_iter_cnt = 25000; _pp = _p * _p; _ppp = _pp * _p; _p1 = (_p - 1); _p2 = (_p - 2); _p1sq = _p1 * _p1; _p2sq = _p2 * _p2; _invp1 = 1 / _p1; _invp1sq = _invp1 * _invp1; _invp2 = 1 / _p2; _invp2sq = _invp2 * _invp2; _logp1 = log(_p1); _logp2 = log(abs(_p2)); _log_phi = log(_phi); _alpha = _p2 / _p1; _logDenom_p = (2 * _p * _invp1 + 4) * _logp1 + 2 * _logp2; _logPhip1inv = -_invp1 * _log_phi; _pialpha = -Math.PI * _alpha; _logInvDenom2ConstPart = -4 * _logp1 - 2 * _logp2; _pisq = Math.PI * Math.PI; _useSaddlepoint = useSaddlepointApprox; _needDp = needDp; _needDpDp = needDpDp; _forceInversion = forceInversion; // useful when bracketing close to p=2 _skipZerosIfVarPowerGT2 = skipZerosIfVarPowerGT2; } public double logLikelihood(double y, double mu) { return logLikelihood(y, mu, 1); } public double logLikelihood(double y, double mu, double w) { return logLikelihood(y, mu, w, false); } public static double logLikelihood(double y, double mu, double p, double phi) { TweedieEstimator tweedieVariancePowerMLEstimator = new TweedieEstimator(p, phi); return tweedieVariancePowerMLEstimator.logLikelihood(y, mu); } public static double deviance(double y, double mu, double p) { double dev; if (p == 1) { if (y != 0) dev = y * log(y / mu) - (y - mu); else dev = mu; } else { if (p == 2) { dev = log(mu / y) + (y / mu) - 1; } else { if (p == 0) { dev = pow(y - mu, 2); dev = dev / 2; } else { dev = pow(y, 2-p) / ((p-1) * (p-2)) + (y * pow(mu, 1-p)) / (p-1) - pow(mu, 2-p) / (p-2); } } } return 2 * dev; } private double gammaLLH(double y, double mu, double w) { final double a = 1 / _phi; final double b = 1 / (_phi * mu); if (y == 0) return Double.NEGATIVE_INFINITY; return w * (a * log(b) - logGamma(a) + log(y) * (a - 1) + (-b * y)); } private double poissonLLH(double y, double mu, double w) { final double lambda = mu / _phi; if (abs(y / _phi - Math.round(y / _phi)) > _epsilon) return 0; return w * (y / _phi * log(lambda) - logGamma((y + 1) / _phi) - lambda); } private double invGaussLLH(double y, double mu, double w) { if (y == 0) return Double.NEGATIVE_INFINITY; y = y / mu; double dispersion = _phi * mu; return w * ((-log(dispersion) - log(2 * PI) - 3 * log(y) - pow(y - 1, 2) / dispersion / y) / 2 - log(mu)); } private void accumulate(double llh, double grad, double hess) { _loglikelihood += llh; if (Double.isFinite(grad)) _llhDp += grad; if (Double.isFinite(hess)) _llhDpDp += hess; } private double logLikelihood(double y, double mu, double w, boolean accumulate) { if (w == 0) return 0; if (_p >= 2 && y <= 0) { if (accumulate && !_skipZerosIfVarPowerGT2) accumulate(Double.NEGATIVE_INFINITY, 0, 0); return Double.NEGATIVE_INFINITY; } double[] llh_llhDp_llhDpDp = MemoryManager.malloc8d(3); if (!_useSaddlepoint) { _method = series; final double xi = _phi / pow(y, 2 - _p); // decide whether we want the Fourier inversion approach if (_p == 1) { llh_llhDp_llhDpDp[0] = poissonLLH(y, mu, w); _method = poisson; } else if (_p == 2) { llh_llhDp_llhDpDp[0] = gammaLLH(y, mu, w); _method = gamma; } else if (_p == 3) { llh_llhDp_llhDpDp[0] = invGaussLLH(y, mu, w); _method = invGaussian; } else if (_p < 2) { // this "xi-based" heuristic is proposed in section 8 in // DUNN, Peter and SMYTH, Gordon, 2008. Evaluation of Tweedie exponential dispersion model densities by Fourier inversion. if (xi <= 0.01) _method = inversion; } else if (_p > 2) { if (xi <= 1.0) _method = inversion; } if (_forceInversion) _method = inversion; if (series.equals(_method)) tweedieSeries(y, mu, w, llh_llhDp_llhDpDp); if ((inversion.equals(_method) || Double.isNaN(llh_llhDp_llhDpDp[0])) && _p != 1 && _p != 2) { llh_llhDp_llhDpDp[0] = tweedieInversion(y, mu, w); _method = inversion; if (!Double.isFinite(llh_llhDp_llhDpDp[0])) { tweedieSeries(y, mu, w, llh_llhDp_llhDpDp); _method = series; } else if (_needDp || _needDpDp) { final double llh = llh_llhDp_llhDpDp[0]; tweedieSeries(y, mu, w, llh_llhDp_llhDpDp); llh_llhDp_llhDpDp[0] = llh; } } } // Use saddlepoint approx. if the series method failed. See [1] for description and comparison of the // saddlepoint approximation. // [1] DUNN, Peter and SMYTH, Gordon, 2001. Tweedie Family Densities: Methods of Evaluation if (_useSaddlepoint || (Double.isNaN(llh_llhDp_llhDpDp[0]))) { // Try to use Saddlepoint approximation _method = saddlepoint; if (y == 0) { if (_p > 1 && _p < 2) llh_llhDp_llhDpDp[0] = pow(mu, 2 - _p) / (_phi * (2 - _p)); else llh_llhDp_llhDpDp[0] = Double.NEGATIVE_INFINITY; } else { double dev = deviance(mu, y, _p); if (_p < 2) y += 1. / 6.; llh_llhDp_llhDpDp[0] = -0.5 * (log(2 * PI * _phi) + _p * log(y)) + (-dev / (2 * _phi)); } } if (accumulate) accumulate(llh_llhDp_llhDpDp[0], llh_llhDp_llhDpDp[1], llh_llhDp_llhDpDp[2]); return llh_llhDp_llhDpDp[0]; } private void tweedieSeries(double y, double mu, double w, double[] out_llh_dp_dpdp) { out_llh_dp_dpdp[0] = 0; // llh out_llh_dp_dpdp[1] = 0; // llhDp - gradient with respect to p out_llh_dp_dpdp[2] = 0; // llhDpDp - hessian with respect to p double llhDpPart = (pow(mu, -_p1) * y * log(mu) * _invp1 - pow(mu, -_p2) * log(mu) * _invp2 + pow(mu, -_p1) * y * _invp1sq - pow(mu, -_p2) * _invp2sq) * w / _phi; double llhDpDpPart = -(pow(mu, -_p1) * y * pow(log(mu), 2) * _invp1 - pow(mu, -_p2) * pow(log(mu), 2) * _invp2 + 2 * (pow(mu, -_p1) * y * log(mu) * _invp1sq - pow(mu, -_p2) * log(mu) * _invp2sq + pow(mu, -_p1) * y * _invp1sq * _invp1 - pow(mu, -_p2) * _invp2sq * _invp2)) * w / _phi; if (_p < 2) { if (y == 0) { out_llh_dp_dpdp[0] = -w * pow(mu, 2 - _p) / (_phi * (2 - _p)); if (_needDp) out_llh_dp_dpdp[1] = -(pow(mu, -_p2) * w * log(mu) * _invp2 + pow(mu, -_p2) * w * _invp2sq) / _phi; if (_needDpDp) out_llh_dp_dpdp[2] = (pow(mu, -_p2) * w * pow(log(mu), 2) * _invp2 + 2 * pow(mu, -_p2) * w * log(mu) * _invp2sq + 2 * pow(mu, -_p2) * w * _invp2sq * _invp2) / _phi; } else { calculateWjSums(y, w); out_llh_dp_dpdp[0] = -log(y) + log(_wSum) + _logWMax - w * (pow(mu, -_p1) * y * _invp1 - pow(mu, -_p2) / _p2) / _phi; if (out_llh_dp_dpdp[0] == Double.POSITIVE_INFINITY) // can happen with p approaching one (wSum == Inf) out_llh_dp_dpdp[0] = Double.NEGATIVE_INFINITY; if (_needDp) out_llh_dp_dpdp[1] = llhDpPart + _wDpSum / _wSum; if (_needDpDp) out_llh_dp_dpdp[2] = llhDpDpPart + (_wDpDpSum / _wSum - _wDpSum / _wSum * _wDpSum / _wSum); } } else { // _p > 2 //mu = max(1e-16, mu); // no mass at 0 if (y == 0) return; calculateVkSums(y, w); out_llh_dp_dpdp[0] = -log(PI * y) + _logVSum + _logVMax - w * (pow(mu, -_p1) * y * _invp1 - pow(mu, -_p2) / _p2) / _phi; if (_needDp) out_llh_dp_dpdp[1] = llhDpPart + exp(_logVDpSum - _logVSum) * _vDpSumSgn; if (_needDpDp) out_llh_dp_dpdp[2] = llhDpDpPart + (exp(_logVDpDpSum - _logVSum) * _vDpDpSumSgn - exp(_logVDpSum - _logVSum + _logVDpSum - _logVSum)); } } public TweedieEstimator compute(Vec mu, Vec y, Vec weights) { if (_p >= 2 && y.min() <= 0 && !_skipZerosIfVarPowerGT2) { _loglikelihood = Double.NEGATIVE_INFINITY; _llhDp = 0; _llhDpDp = 0; return this; } return doAll(mu, y, weights); } @Override public void map(Chunk[] cs) { double mu, y, w, llh; _totalRows += cs[0]._len; // cs = {mu, response, weight} for (int i = 0; i < cs[0]._len; i++) { mu = max(0, cs[0].atd(i)); // In first iteration it sometimes generates negative responses if (!Double.isFinite(mu)) { _skippedRows++; continue; } y = cs[1].atd(i); w = cs[2].atd(i); llh = logLikelihood(y, mu, w, true); if (llh == 0 || !Double.isFinite(llh)) _skippedRows++; if (!Double.isFinite(llh)) { if ((!_needDp || !Double.isFinite(_llhDp)) && (!_needDpDp || !Double.isFinite(_llhDpDp))) { _skippedRows += cs[0]._len - i; return; } } // early stop if bestllh > current llh } } @Override public void reduce(TweedieEstimator mrt) { _loglikelihood += mrt._loglikelihood; _llhDp += mrt._llhDp; _llhDpDp += mrt._llhDpDp; _skippedRows += mrt._skippedRows; _totalRows += mrt._totalRows; } void cleanSums() { _logVSum = 0; _logVDpSum = 0; _logVDpDpSum = 0; _logVMax = 0; _logWMax = 0; _wSum = 0; _wDpSum = 0; _wDpDpSum = 0; } void calculateWjSums(double y, double w) { assert y > 0; final double negwy = (-1) * pow(w, _invp1) * pow(y, 2 * _invp1); final double denom_W_dp_dp_exped = pow(_p1, 2 * _invp1) * _p2 * pow(_phi, _invp1) * pow(y, _p * _invp1); final double log_p1_phi_wy = log(_p1 * _phi / (w * y)); final double logs_sumWdp = -(2 - log(_p1 * _phi / (w * y))) * _p + 3 + 2 * log(w * y / (_p1 * _phi)); final double p1alphaw = pow(_p1, _alpha) * pow(w, _invp1); final double ps_sumWdp = _p1sq * _p2; final double pphiy_sumWdp = -_p2 * pow(_phi, _invp1) * pow(y, _alpha); final double log_y = log(y); final double log_w = log(w); boolean WjLB = false; boolean WjUB = false; boolean WjDpLB = !_needDp; boolean WjDpUB = !_needDp; boolean WjDpDpLB = !_needDpDp; boolean WjDpDpUB = !_needDpDp; cleanSums(); final long jMax = max(2, (long) Math.ceil(w * pow(y, 2 - _p) / ((2 - _p) * _phi))); long j; int cnt = 0; // Start at the maximum and spread out until the change is negligible while (!(WjLB && WjUB && WjDpLB && WjDpUB && WjDpDpLB && WjDpDpUB) && cnt < _max_iter_cnt) { j = jMax + cnt; if (!WjUB) WjUB = Wj(log_y, log_w, j); if (!WjDpUB) WjDpUB = WjDp(logs_sumWdp, p1alphaw, ps_sumWdp, pphiy_sumWdp, j); if (!WjDpDpUB) WjDpDpUB = WjDpDp(y, w, negwy, denom_W_dp_dp_exped, log_p1_phi_wy, j); j = jMax - cnt - 1; if (j < 1) { WjLB = true; WjDpLB = true; WjDpDpLB = true; } else { if (!WjLB) WjLB = Wj(log_y, log_w, j); if (!WjDpLB) WjDpLB = WjDp(logs_sumWdp, p1alphaw, ps_sumWdp, pphiy_sumWdp, j); if (!WjDpDpLB) WjDpDpLB = WjDpDp(y, w, negwy, denom_W_dp_dp_exped, log_p1_phi_wy, j); } cnt++; } } private boolean Wj(double log_y, double log_w, long j) { final double expInner = ((1 - _alpha) * j) * log_w + (_alpha * j) * (_logp1 - log_y) - j * (1 - _alpha) * _log_phi - j * _logp2 - logGamma(j + 1) - logGamma(-j * _alpha); if (_logWMax == 0) { _logWMax = expInner; } double wSumInc; wSumInc = exp(expInner - _logWMax); _wSum += wSumInc; if (_needDp || _needDpDp) // can use more precision in grad and hess return (abs(wSumInc) + _epsilon) / (abs(_wSum) + _epsilon) < _epsilon && expInner - _logWMax < -37; return expInner - _logWMax < -37; } private boolean WjDp(double logs_sumWdp, double p1alphaw, double ps_sumWdp, double pphiy_sumWdp, long j) { double wDpSumInc; final double log2_inner = _p2 * digamma(-j * _alpha) + logs_sumWdp; wDpSumInc = Math.signum(log2_inner) * Math.signum(ps_sumWdp) * exp( log(j) + log(Math.abs(log2_inner)) + j * log(p1alphaw) - log(abs(ps_sumWdp)) - j * log(pphiy_sumWdp) - logGamma(j + 1) - logGamma(-j * _alpha) - _logWMax); _wDpSum += wDpSumInc; return (Math.abs(wDpSumInc) + _epsilon) / (abs(_wDpSum) + _epsilon) < _epsilon; } private boolean WjDpDp(double y, double w, double negwy, double denom_W_dp_dp_exped, double log_p1_phi_wy, long j) { if (j <= 1) return false; // Undefined double wDpDpSumInc; final double mja = -(_alpha * j); final double psi_mja = digamma(mja); final double p1jp2p = pow(_p1, (j * _p + 2 * _p) * _invp1); final double p1jp2 = pow(_p1, (j * _p + 2) * _invp1); final double jlogp1 = j * log(_p - 1); final double logdpdp1 = ((_p * (psi_mja - 2) + _p * log_p1_phi_wy - 2 * psi_mja + 3) * j * p1jp2p * _p2 * log(y) - _p1sq * _p2 * (_p1 + _p2 - _p2 * psi_mja) * j * p1jp2 * log(w) - (_p2sq * j * pow(psi_mja, 2) - 2 * (jlogp1 - 2 * j + 11) * pow(_p, 2) + 5 * pow(_p, 3) - ((_p1 + _p2) * _p2 - _p2sq * psi_mja) * j * log(_phi) - _p2sq * j * trigamma(mja) + (7 * jlogp1 - 12 * j + 32) * _p - 6 * jlogp1 + ((jlogp1 - 4 * j + 10) * pow(_p, 2) - 2 * pow(_p, 3) - 2 * (2 * jlogp1 - 7 * j + 8) * _p + 4 * jlogp1 - 12 * j + 8) * psi_mja + 9 * j - 15) * p1jp2p + (_p1sq * _p2 * j * p1jp2 * _p * log(w) - (j * _p2 * _p * log(_phi) + j * _p2 * _p * psi_mja + (jlogp1 - 2 * j + 4) * pow(_p, 2) - 2 * pow(_p, 3) - j * _p * (2 * log(_p - 1) - 3) - 2) * p1jp2p) * log_p1_phi_wy + 2 * (_p1sq * _p2 * j * p1jp2 * log(w) + j * p1jp2p * _p2 * log(y) - (j * _p2 * log(_phi) + j * _p2 * psi_mja + (jlogp1 - 2 * j + 8) * _p - 3 * pow(_p, 2) - 2 * jlogp1 + 3 * j - 5) * p1jp2p) * (-log_p1_phi_wy)); wDpDpSumInc = -Math.signum(logdpdp1) * pow(Math.signum(negwy) * Math.signum(denom_W_dp_dp_exped), j) * exp(log(abs(logdpdp1)) + j * log(abs(negwy)) - _logDenom_p - log(j - 1) - j * log(abs(denom_W_dp_dp_exped)) - logGamma(j - 1) - logGamma(mja) - _logWMax); _wDpDpSum += wDpDpSumInc; return (abs(wDpDpSumInc) + _epsilon) / (abs(_wDpDpSum) + _epsilon) < _epsilon; } void calculateVkSums(double y, double w) { assert y > 0; final double logs = _logp1 + log(_phi) - log(w) - log(y); final double logssq = -pow(_logp1, 2) - pow(log(_phi), 2) - pow(log(w), 2) - pow(log(y), 2); final double log_w = log(w); final double log_y = log(y); final double log_pwy = _logp2 + (_alpha - 1) * log_w + _alpha * log_y; final double vkR = (_alpha - 1) * _log_phi + _alpha * _logp1 - (_alpha - 1) * log_w - _logp2 - _alpha * log_y; // r in R's tweedie::dtweedie.series.bigp // Indicators whether lower and upper bounds were reached. boolean VkLB = false; boolean VkUB = false; boolean VkDpLB = !_needDp; boolean VkDpUB = !_needDp; boolean VkDpDpLB = !_needDpDp; boolean VkDpDpUB = !_needDpDp; cleanSums(); final long kMax = max(1, (long) (w * pow(y, 2 - _p) / ((_p - 2) * _phi))); long k; long cnt = 0; double mPiAlphaK, logGammaK1, logGammaKalpha1, digammaKalpha1; // Start at the maximum and spread out until the change is negligible while (!(VkLB && VkUB && VkDpLB && VkDpUB && VkDpDpLB && VkDpDpUB) && cnt < _max_iter_cnt) { k = kMax + cnt; mPiAlphaK = _pialpha * k; if (k > Integer.MAX_VALUE) { // prevents StackOverflowError from digamma _logVSum = Double.NEGATIVE_INFINITY; _logVMax = 0; _logVDpSum = Double.NaN; _logVDpDpSum = Double.NaN; break; } logGammaK1 = logGamma(k + 1); logGammaKalpha1 = logGamma(k * _alpha + 1); digammaKalpha1 = digamma(k * _alpha + 1); if (!VkUB) VkUB = Vk(vkR, k, mPiAlphaK, logGammaKalpha1, logGammaK1); if (!VkDpUB) VkDpUB = VkDp(log_y, log_w, k, log_pwy, mPiAlphaK, logGammaKalpha1, logGammaK1, digammaKalpha1); if (!VkDpDpUB) VkDpDpUB = VkDpDp(k, log_y, log_w, logs, logssq, mPiAlphaK, logGammaKalpha1, digammaKalpha1); k = kMax - cnt - 1; if (k < 1) { VkLB = true; VkDpLB = true; VkDpDpLB = true; } else { mPiAlphaK = _pialpha * k; logGammaK1 = logGamma(k + 1); logGammaKalpha1 = logGamma(k * _alpha + 1); digammaKalpha1 = digamma(k * _alpha + 1); if (!VkLB) VkLB = Vk(vkR, k, mPiAlphaK, logGammaKalpha1, logGammaK1); if (!VkDpLB) VkDpLB = VkDp(log_y, log_w, k, log_pwy, mPiAlphaK, logGammaKalpha1, logGammaK1, digammaKalpha1); if (!VkDpDpLB) VkDpDpLB = VkDpDp(k, log_y, log_w, logs, logssq, mPiAlphaK, logGammaKalpha1, digammaKalpha1); } cnt++; } _vDpSumSgn = Math.signum(_logVDpSum); _vDpDpSumSgn = Math.signum(_logVDpDpSum); // not adding _logVMax here since it gets eliminated in both grad and hess; in llh _logVSum = log(max(0, _logVSum)); _logVDpSum = log(abs(_logVDpSum)); _logVDpDpSum = log(abs(_logVDpDpSum)); } boolean Vk(double r, long k, double mPiAlphaK, double logGammaKalpha1, double logGammaK1) { double expInner = logGammaKalpha1 - logGammaK1 + k * r; if (_logVMax == 0) { _logVMax = expInner; } double vSumInc = exp(expInner - _logVMax) * sin(mPiAlphaK); if (k % 2 == 1) vSumInc *= -1; _logVSum += vSumInc; // logVSum wasn't transformed by log at this point if (_needDp || _needDpDp) // can use more precision for grad and hess return (abs(vSumInc) + _epsilon) / (abs(_logVSum) + _epsilon) < _epsilon && expInner - _logVMax < -37; return expInner - _logVMax < -37; } boolean VkDp(double log_y, double log_w, long k, double log_pwy, double mPiAlphaK, double logGammaKalpha1, double logGammaK1, double digammaKalpha1) { final double logInvDenom = -k * log_pwy - logGammaK1; final double logInner = sin(mPiAlphaK) * (_invp1sq * (_log_phi - log_w - log_y + digammaKalpha1) - _invp2 + _invp1sq * (_logp1 + _p2)) - PI * _invp1sq * cos(mPiAlphaK); double vDpSumInc = Math.signum(logInner) * exp(k * _alpha * _logp1 + logInvDenom + logGammaKalpha1 + log(abs(logInner)) + log(k) + k * _logPhip1inv - _logVMax); if (k % 2 == 1) vDpSumInc *= -1; _logVDpSum += vDpSumInc; return (abs(vDpSumInc) + _epsilon) / (abs(_logVDpSum) + _epsilon) < _epsilon; } boolean VkDpDp(long k, double log_y, double log_w, double logs, double logssq, double mPiAlphaK, double logGammaKalpha1, double digammaKalpha1) { if (k < 1) return true; double vDpDpSumInc = -(2 * PI * (k * _p2 * digammaKalpha1 + (_p * (logs - 2) - 2 * logs + 3) * k - _p2sq - _p2) * _p2 * cos(mPiAlphaK) - (_ppp * (2 * logs - 5) - _p2sq * k * pow(digammaKalpha1, 2) + 4 * _pisq * k + (_pisq * k + k * logssq - 2 * (k * _log_phi - k * log_w - k * log_y - 2 * k + 5) * _logp1 + 2 * (k * log_w + k * log_y + 2 * k - 5) * _log_phi - 2 * (k * log_y + 2 * k - 5) * log_w - 2 * (2 * k - 5) * log_y - 4 * k + 22) * _pp + 4 * k * logssq - _p2sq * k * trigamma(k * _alpha + 1) - 2 * (2 * _pisq * k + 2 * k * logssq - (4 * k * _log_phi - 4 * k * log_w - 4 * k * log_y - 7 * k + 8) * _logp1 + (4 * k * log_w + 4 * k * log_y + 7 * k - 8) * _log_phi - (4 * k * log_y + 7 * k - 8) * log_w - (7 * k - 8) * log_y - 6 * k + 16) * _p - 4 * (2 * k * _log_phi - 2 * k * log_w - 2 * k * log_y - 3 * k + 2) * _logp1 + 4 * (2 * k * log_w + 2 * k * log_y + 3 * k - 2) * _log_phi - 4 * (2 * k * log_y + 3 * k - 2) * log_w - 4 * (3 * k - 2) * log_y - 2 * ((k * logs - 2 * k + 5) * _pp - _ppp - (4 * k * logs - 7 * k + 8) * _p + 4 * k * logs - 6 * k + 4) * digammaKalpha1 - 9 * k + 15) * sin(-mPiAlphaK)) * pow(-1, k) * exp(k * _alpha * _logp1 + (k - k * _alpha) * log_w + (-k * _alpha) * log_y + logGammaKalpha1 + _logInvDenom2ConstPart - k * _logp2 - k * _invp1 * _log_phi - logGamma(k) - _logVMax); _logVDpDpSum += vDpDpSumInc; return (abs(vDpDpSumInc) + _epsilon) / (abs(_logVDpDpSum) + _epsilon) < _epsilon; } // Fourier inversion part ------------------------------------------------------------------------------------------ private final static double[] absc = new double[]{ 0.003064962185159399599837515282, 0.009194771386432905660446301965, 0.015324235084898182521206955187, 0.021453122959774875710969865850, 0.027581204711919792005314633343, 0.033708250072480593073631638390, 0.039834028811548446991075422829, 0.045958310746809061253514983036, 0.052080865752192069539905361353, 0.058201463766518225784185602834, 0.064319874802144239023249383536, 0.070435868953604666153900382142, 0.076549216406251049948927800415, 0.082659687444887164353701791697, 0.088767052462401033197103572547, 0.094871081968392542704826553290, 0.100971546597796779654032661711, 0.107068217119502664957941817647, 0.113160864444966535735659363127, 0.119249259636820398311485291742, 0.125333173917474477443434466295, 0.131412378677713714836272629327, 0.137486645485288105916765744041, 0.143555746093496028326086388915, 0.149619452449761269896555404557, 0.155677536704201868733576930026, 0.161729771218192097670396378817, 0.167775928572916122050173726166, 0.173815781577913441857674570201, 0.179849103279615923911549657532, 0.185875666969875702472236866925, 0.191895246194484031532212497950, 0.197907614761680478165928320777, 0.203912546750652373672707540209, 0.209909816520023939645511745766, 0.215899198716335033454427616562, 0.221880468282509041300087915261, 0.227853400466309585770119383596, 0.233817770828785853609588230029, 0.239773355252706182882960206371, 0.245719929950979243393760498293, 0.251657271475063337717870126653, 0.257585156723362629360707387605, 0.263503362949610298038294331491, 0.269411667771238594326632664888, 0.275309849177735044278847453825, 0.281197685538984665232220550024, 0.287074955613597970760508815147, 0.292941438557224431704639755480, 0.298796913930850727147969791986, 0.304641161709084229425315015760, 0.310473962288420446409276109989, 0.316295096495494865163067288449, 0.322104345595318808381790631756, 0.327901491299498415443736121233, 0.333686315774437136649765989205, 0.339458601649521019005817379366, 0.345218132025286672526220854706, 0.350964690481571417457473671675, 0.356698061085645612422467820579, 0.362418028400326441840206825873, 0.368124377492073107109860075070, 0.373816893939063366048003445030, 0.379495363839250532400626525487, 0.385159573818401101963360133595, 0.390809311038112505709563038181, 0.396444363203810545837058043617, 0.402064518572726958822727283405, 0.407669565961855551172732248233, 0.413259294755887629513324554864, 0.418833494915126280933037605791, 0.424391956983378670908990670796, 0.429934472095826580861910315434, 0.435460831986874741250659326397, 0.440970828997976571628214514931, 0.446464256085437494192547092098, 0.451940906828194099986717446882, 0.457400575435571277171931114935, 0.462843056755014803371750531369, 0.468268146279799957198974880157, 0.473675640156716648565549121486, 0.479065335193728458751394327919, 0.484437028867608643345477048570, 0.489790519331549878412346288314, 0.495125605422748638062557802186, 0.500442086669964369960439398710, 0.505739763301052192012718933256, 0.511018436250469942905283460277, 0.516277907166757477064322756632, 0.521517978419990813065965085116, 0.526738453109207749314180091460, 0.531939135069806612321485772554, 0.537119828880917804525552128325, 0.542280339872746153240257172001, 0.547420474133886614254151936620, 0.552540038518610221451865527342, 0.557638840654121947792987157300, 0.562716688947789034358493154286, 0.567773392594340675643138638407, 0.572808761583037395759276932949, 0.577822606704811114752828871133, 0.582814739559374461741469986009, 0.587784972562300778164967596240, 0.592733118952072146612408687361, 0.597658992797097665672367838852, 0.602562409002699417293058559153, 0.607443183318068347098517278937, 0.612301132343186949036351052200, 0.617136073535721196847703140520, 0.621947825217879390891084767645, 0.626736206583239252587702594610, 0.631501037703541601153744977637, 0.636242139535451722842651633982, 0.640959333927286434295922390447, 0.645652443625708949426211802347, 0.650321292282389107342055467598, 0.654965704460630293581857586105, 0.659585505641960279099578201567, 0.664180522232690528916521088831, 0.668750581570438429324099161022, 0.673295511930615209195138959331, 0.677815142532878778247606987861, 0.682309303547550927149245580949, 0.686777826101999111507723227987, 0.691220542286981598500972268084, 0.695637285162956975348436117201, 0.700027888766357242467108790152, 0.704392188115823825178551942372, 0.708730019218407059078401744046, 0.713041219075728482934550811478, 0.717325625690105272980190420640, 0.721583078070637928824737628020, 0.725813416239259323603505436040, 0.730016481236746561656048015720, 0.734192115128692979197921886225, 0.738340161011444173766449239338, 0.742460463017992289280755358050, 0.746552866323834107831203255046, 0.750617217152788063216917180398, 0.754653362782772507699746711296, 0.758661151551544898907764036267, 0.762640432862400241553757496149, 0.766591057189829894191746006982, 0.770512876085140518966909439769, 0.774405742182031731069002944423, 0.778269509202133780156884768076, 0.782104031960504153531132942589, 0.785909166371082990032448378770, 0.789684769452107193643541904748, 0.793430699331483024749900323513, 0.797146815252117502126338877133, 0.800832977577207061337105642451, 0.804489047795484579772562483413, 0.808114888526424324233232709958, 0.811710363525404265949703130900, 0.815275337688824874859960800677, 0.818809677059186835634818635299, 0.822313248830123577626238784433, 0.825785921351392504519139947661, 0.829227564133821259950707371900, 0.832638047854211360565557242808, 0.836017244360197420149916069931, 0.839365026675062742000932303199, 0.842681269002510502375002943154, 0.845965846731390636037417607440, 0.849218636440382645957924978575, 0.852439515902632671817684695270, 0.855628364090346482662141625042, 0.858785061179337283476797892945, 0.861909488553529001819697441533, 0.865001528809411501796944321541, 0.868061065760453942630192614160, 0.871087984441469842522565159015, 0.874082171112937289514377425803, 0.877043513265272300927222204336, 0.879971899623057107753254513227, 0.882867220149221032521325014386, 0.885729366049175403929893946042, 0.888558229774901842112910799187, 0.891353705028992693293332649773, 0.894115686768646500404145172070, 0.896844071209613846740182907524, 0.899538755830097902510544827237, 0.902199639374606787711741162639, 0.904826621857757973366176429408, 0.907419604568035498282085882238, 0.909978490071499224178808162833, 0.912503182215446018155091678636, 0.914993586132022862500434712274, 0.917449608241791114693342024111, 0.919871156257243582921034885658, 0.922258139186271863607657905959, 0.924610467335585606285519588710, 0.926928052314082817630946919962, 0.929210807036171093642451523920, 0.931458645725040335072719699383, 0.933671483915885391802191861643, 0.935849238459080523533373252576, 0.937991827523303123292919281084, 0.940099170598609368276754594262, 0.942171188499458911458361853875, 0.944207803367690501339382080914, 0.946208938675447974731014255667, 0.948174519228055068253979698056, 0.950104471166841935136915253679, 0.951998721971919814599516485032, 0.953857200464905963244177655724, 0.955679836811598848456128507678, 0.957466562524601938477530893579, 0.959217310465897199378559889738, 0.960932014849367743813957076782, 0.962610611243270297698870763270, 0.964253036572656041514051139529, 0.965859229121740714418820061837, 0.967429128536223759127210541919, 0.968962675825556618569578404276, 0.970459813365158741049754098640, 0.971920484898583625366086380382, 0.973344635539632463405723683536, 0.974732211774417045546670124168, 0.976083161463370263533079196350, 0.977397433843205876158322098490, 0.978674979528826316510503602331, 0.979915750515178207713518077071, 0.981119700179057141475880143844, 0.982286783280859610023583172733, 0.983416955966283978796127485111, 0.984510175767978390481971473491, 0.985566401607137931861757351726, 0.986585593795049176080169672787, 0.987567714034582877502543851733, 0.988512725421635041200829618901, 0.989420592446515700935094628221, 0.990291280995286848920500233362, 0.991124758351048074089817419008, 0.991920993195171463163717362477, 0.992679955608486541684953863296, 0.993401617072414810927227790671, 0.994085950470055879080177874130, 0.994732930087228184312664325262, 0.995342531613465753004277303262, 0.995914732142977210394008125149, 0.996449510175577368720212234621, 0.996946845617603827349739731289, 0.997406719782849782163225427212, 0.997829115393562893210344100225, 0.998214016581612795242506308568, 0.998561408890039747809908021736, 0.998871279275449386325647083140, 0.999143616112378230020851788140, 0.999378409202599238270181558619, 0.999575649798310816862567662611, 0.999735330671042699002271092468, 0.999857446369979419031892575731, 0.999941994606845629967040167685, 0.999988990984381875826159102871 }; private final static double[] weights = new double[]{ 0.006129905175405764294893629085, 0.006129674838036492517945319491, 0.006129214171953068987508395082, 0.006128523194465529920493818139, 0.006127601931538031489188345091, 0.006126450417787949499770494555, 0.006125068696484561869830542946, 0.006123456819547496918221263229, 0.006121614847544605726714639360, 0.006119542849689838838467270676, 0.006117240903840640703359454733, 0.006114709096494903503571372028, 0.006111947522787882815242799239, 0.006108956286488514790533610466, 0.006105735499995448845034218266, 0.006102285284333060395856040969, 0.006098605769146656953305640769, 0.006094697092697685079920599804, 0.006090559401858643313876218173, 0.006086192852107506767733724473, 0.006081597607521639116401335201, 0.006076773840772089693706980995, 0.006071721733116766488158599913, 0.006066441474393663782493923975, 0.006060933263013820911091489307, 0.006055197305953895908769979428, 0.006049233818748141547350094527, 0.006043043025480822859341056841, 0.006036625158776993613218841972, 0.006029980459794644954973907858, 0.006023109178214984885113558732, 0.006016011572233289327049643447, 0.006008687908549399311897154519, 0.006001138462357170390293337192, 0.005993363517334814559445188564, 0.005985363365633674000154673678, 0.005977138307867538649653660343, 0.005968688653101249068366751516, 0.005960014718839098078750904364, 0.005951116831012847295523382485, 0.005941995323969674266950669050, 0.005932650540459486442068648415, 0.005923082831621807528565959444, 0.005913292556972981305063452595, 0.005903280084392509806379134574, 0.005893045790109107881504790782, 0.005882590058686672750132284904, 0.005871913283009922226995946914, 0.005861015864269402374231443531, 0.005849898211946678167061364206, 0.005838560743798747003363569519, 0.005827003885842343793022291010, 0.005815228072338094258975083051, 0.005803233745774116596194414086, 0.005791021356849213735928927349, 0.005778591364456383931702543322, 0.005765944235664991271428370112, 0.005753080445703687324787711788, 0.005740000477942362212824267687, 0.005726704823874017614981912772, 0.005713193983096204360550007806, 0.005699468463292455683300019587, 0.005685528780212967606133567244, 0.005671375457655504839782345528, 0.005657009027445287531465911712, 0.005642430029415474758425208535, 0.005627639011386637545031330632, 0.005612636529146218002106483169, 0.005597423146427572132610706035, 0.005581999434888915492813943331, 0.005566365974091757977404437696, 0.005550523351479155591270409076, 0.005534472162353648236332581689, 0.005518213009854874839810179310, 0.005501746504936822455833489443, 0.005485073266345072764971213530, 0.005468193920593387644113470003, 0.005451109101940144682774125329, 0.005433819452364725861859273692, 0.005416325621543047544315108155, 0.005398628266823572718902113365, 0.005380728053202113274344764449, 0.005362625653297344377468114374, 0.005344321747325165260222856745, 0.005325817023073333225657854939, 0.005307112175875508715272577120, 0.005288207908585203231854876549, 0.005269104931549262356427210108, 0.005249803962581399939535398147, 0.005230305726934937789185386947, 0.005210610957275768270746674204, 0.005190720393654706284192190680, 0.005170634783479783128101736622, 0.005150354881487986119514843608, 0.005129881449717141328470404460, 0.005109215257477111096773292331, 0.005088357081320884003905469228, 0.005067307705015408093862649963, 0.005046067919512328692199787383, 0.005024638522917936923894988155, 0.005003020320463469512717313847, 0.004981214124474673925202505842, 0.004959220754341320605562692947, 0.004937041036486572963271068915, 0.004914675804335549846868502755, 0.004892125898284490834178050989, 0.004869392165668924923882521227, 0.004846475460731644938072726347, 0.004823376644591045349363955808, 0.004800096585208414069756432951, 0.004776636157355448886185911306, 0.004752996242581400063165197878, 0.004729177729179922379243450337, 0.004705181512155669557029291639, 0.004681008493190663179162047669, 0.004656659580610528897937072657, 0.004632135689350181870227451952, 0.004607437740919578979259529916, 0.004582566663369058192201155322, 0.004557523391254390821014652602, 0.004532308865601840722203696998, 0.004506924033872596394023624100, 0.004481369849927314963355939881, 0.004455647273990272390353784004, 0.004429757272613176269371315641, 0.004403700818638965619467029455, 0.004377478891165111941907728266, 0.004351092475507059922912311833, 0.004324542563160944756706083325, 0.004297830151766558401393858446, 0.004270956245069621078080945864, 0.004243921852884336397282449838, 0.004216727991055227442451780462, 0.004189375681419113366110718033, 0.004161865951766540415446282708, 0.004134199835803465360173358789, 0.004106378373112003904443767510, 0.004078402609111729873458962459, 0.004050273595020140865452518142, 0.004021992387813410133046154726, 0.003993560050186265031335608455, 0.003964977650512617468603338011, 0.003936246262804875099827750518, 0.003907366966673973297796695903, 0.003878340847288519813162999128, 0.003849168995334298088578650621, 0.003819852506973011631308256852, 0.003790392483801296400619529336, 0.003760790032809268722963080833, 0.003731046266338848560462082560, 0.003701162302042068034252375597, 0.003671139262839005247551771305, 0.003640978276875699460451984990, 0.003610680477481611767159863646, 0.003580247003126992098170910950, 0.003549678997380481711154676105, 0.003518977608865717348479718041, 0.003488143991218293615136358810, 0.003457179303042459076605874557, 0.003426084707867667507319442421, 0.003394861374104690254077665301, 0.003363510475001769365471782081, 0.003332033188600574697552092474, 0.003300430697691895606804557417, 0.003268704189771169145439788650, 0.003236854856993954046573414018, 0.003204883896131119781075513586, 0.003172792508523682164511825476, 0.003140581900037948612225413569, 0.003108253281020026750902651713, 0.003075807866250348642650491726, 0.003043246874898155977795521920, 0.003010571530475542479515782546, 0.002977783060791477226514345489, 0.002944882697905871343779793392, 0.002911871678083000243575373389, 0.002878751241745274112165953184, 0.002845522633426492749991743025, 0.002812187101725080462522043945, 0.002778745899257360849748943465, 0.002745200282610227044549633391, 0.002711551512294096445698787790, 0.002677800852695407917564152100, 0.002643949572029331059747070398, 0.002609998942291816281802141475, 0.002575950239212147514084039202, 0.002541804742204615413792012646, 0.002507563734320777774911004343, 0.002473228502201043829678006603, 0.002438800336026468017908142016, 0.002404280529470125687963033556, 0.002369670379648584207510353394, 0.002334971187073220984936616773, 0.002300184255601201484958684418, 0.002265310892386644160689801453, 0.002230352407831378593050519754, 0.002195310115535779524331694290, 0.002160185332249389255493410289, 0.002124979377821471521886609324, 0.002089693575151347661178480308, 0.002054329250138731913916112504, 0.002018887731633921007318166474, 0.001983370351387804628867650436, 0.001947778444001947023567211659, 0.001912113346878267219550173728, 0.001876376400168962114978210565, 0.001840568946726033397465194241, 0.001804692332050859783151852689, 0.001768747904243635031551473702, 0.001732737013952766495436530469, 0.001696661014324110389878130789, 0.001660521260950091485333879326, 0.001624319111818701803773290493, 0.001588055927262728948823333752, 0.001551733069908424120231238419, 0.001515351904624347286962282588, 0.001478913798470224069681044909, 0.001442420120645365689757144700, 0.001405872242437510889756513421, 0.001369271537171097780430373270, 0.001332619380155811508736896087, 0.001295917148634917904354013629, 0.001259166221733550722339245453, 0.001222367980406950313532199459, 0.001185523807388666516285380403, 0.001148635087138642285956025013, 0.001111703205791432849669497784, 0.001074729551104118682389176875, 0.001037715512404510350211173098, 0.001000662480539097265105907830, 0.000963571847821184873997268916, 0.000926445007979156964772471383, 0.000889283356104514192963517161, 0.000852088288600481814569209682, 0.000814861203130772785048485662, 0.000777603498568675576864406285, 0.000740316574946985831578993853, 0.000703001833408759078253291719, 0.000665660676159933917435396200, 0.000628294506424452014331505367, 0.000590904728403224407604077406, 0.000553492747240409460780796724, 0.000516059969000759975743530816, 0.000478607800667961002377692736, 0.000441137650179552207388433693, 0.000403650926533314074063502064, 0.000366149040035628262328842863, 0.000328633402852310262977353350, 0.000291105430251488786486113725, 0.000253566543570602365847976856, 0.000216018177976967721805670597, 0.000178461805545972196102716412, 0.000140899017388190165439576518, 0.000103331903496931828490348892, 0.000065765731659236768705603660, 0.000028252637373961186168999649 }; private double tweedieInversion(double y, double mu, double w) { assert _p != 1 && _p != 2; if (_p < 2 && _p > 1) { if (y < 0) return Double.NEGATIVE_INFINITY; // skip; should be -Inf if we wouldn't skip that point else if (y == 0) { return pow(mu, 2 - _p) / (_phi * _p2); } } else if (_p > 2) { if (y <= 0) return Double.NEGATIVE_INFINITY; // skip; should be -Inf if we wouldn't skip that point } double dev = deviance(y, mu, _p); // method 3 in the paper - transform phi and estimate density on the mu=1, y=1 where it should be highest and // then transform the result so it corresponds to the value of the untransformed pdf double phi = _phi / pow(y, -_p2); if (_p > 1 && _p < 2) return w * (log(max(0, smallP(1, 1, phi))) - log(y) - dev / (2 * _phi)); else if (_p > 2) return w * (log(max(0, bigP(1, 1, phi))) - log(y) - dev / (2 * _phi)); return 0; } private final static class ZeroBounds { final double lowerBound; final double upperBound; final double funcLo; final double funcHi; private ZeroBounds(double lowerBound, double upperBound, double funcLo, double funcHi) { this.lowerBound = lowerBound; this.upperBound = upperBound; this.funcLo = funcLo; this.funcHi = funcHi; } } private final static class FindKMaxResult { int _mMax; double _kMax, _tMax; FindKMaxResult(double kMax, double tMax, int mMax) { _kMax = kMax; _tMax = tMax; _mMax = mMax; } } double calcCGFRe(double x, double phi) { // calccgf in fortran double psi = atan((1. - _p) * x * phi); double front = 1 / (phi * (2 - _p)); double denom = pow(cos(psi), _alpha); return front * cos(psi * _alpha) / denom - front; } double calcCGFIm(double y, double x, double phi) { // calccgf and imgcgf in fortran double psi = atan((1. - _p) * x * phi); double front = 1 / (phi * (2 - _p)); double denom = pow(cos(psi), _alpha); return front * sin(psi * _alpha) / denom - x * y; } double calcDCGFRe(double x, double phi) { double psi = atan((1. - _p) * x * phi); double alpha = 1. / (1. - _p); double denom = pow(cos(psi), alpha); return -(sin(psi * alpha) / denom); } double calcDCGFIm(double y, double x, double phi) { double psi = atan((1. - _p) * x * phi); double alpha = 1. / (1. - _p); double denom = pow(cos(psi), alpha); return cos(psi * alpha) / denom - y; } double imgdcgf(double x, double phi) { double psi = atan((1 - _p) * x * phi); double alpha = 1 / (1 - _p); return cos(psi * alpha) / exp(alpha * log(cos(psi))); } private final class ImgDDCGF implements Function2<Double, Double, Double> { private final double _transformedPhi; public ImgDDCGF(double phi) { _transformedPhi = phi; } @Override public Double apply(Double x, Double unusedHere) { final double psi = atan((1.0 - _p) * x * _transformedPhi); final double alpha = _p / (1.0 - _p); final double top = sin(psi * alpha); final double bottom = exp(alpha * log(abs(cos(psi)))); return -_transformedPhi * top / bottom; } } private final class DK implements Function2<Double, Double, Double> { private final double _transformedPhi; public DK(double phi) { _transformedPhi = phi; } @Override public Double apply(Double y, Double x) { return calcDCGFIm(y, x, _transformedPhi); } } private FindKMaxResult findKMax(double y, double phi) { final double largest = 1e30; final int largeInt = 100000000; double psi = (PI / 2) * (1 - _p) / (2 * _p - 1); double z2 = 1 / (phi * (1 - _p)) * tan(psi); double dz2 = imgdcgf(z2, phi) - y; double z1, dz1, z, dz, zLo, zHi, fLo, fHi, tMax, kMax; int mMax; DK dk = new DK(phi); if (_p > 2) { double front = -1 / (phi * (1 - _p)); double inner = (1 / y) * cos(-PI / (2. * (1. - _p))); z1 = (front * pow(inner, _p - 1.)); dz1 = imgdcgf(z1, phi) - y; if (dz1 > 0) { if (z1 > z2) { z = z1; dz = dz1; } else { z = z2; dz = dz2; } } else { if (dz2 < 0) { if (z1 > z2) { z = z2; dz = dz2; } else { z = z1; dz = dz1; } } else { z = z2; dz = dz2; } } } else { // 1 < p <2 z = z2; dz = dz2; } if (dz > 0) { zLo = z; zHi = z + 10; fLo = dk.apply(y, zLo); fHi = dk.apply(y, zHi); while (fHi > 0 && zHi <= largest / 10.) { zLo = zHi; zHi = 1.1 * zHi + 1; fLo = fHi; fHi = dk.apply(y, zHi); } } else { zLo = z / 2; zHi = z; fLo = dk.apply(y, zLo); fHi = dk.apply(y, zHi); while (fLo < 0) { zHi = zLo; zLo = zLo / 2.0; fHi = fLo; fLo = dk.apply(y, zLo); } } if (zLo == zHi) z = zLo; else z = zLo - fLo * (zHi - zLo) / (fHi - fLo); z = newtonMethodWithBisection(y, zLo, zHi, z, dk, new ImgDDCGF(phi)); tMax = z; kMax = calcCGFIm(y, tMax, phi); // in fortran imgcgf( y, tMax); if (kMax < 0) { kMax = abs(kMax); mMax = largeInt; } else { int dpmMax = (int) ((kMax / PI) - 0.5); mMax = Math.min(dpmMax, largeInt); } return new FindKMaxResult(kMax, tMax, mMax); } private final class IntegrateImCGF implements Function3<Double, Double, Integer, Double> { // intim in fortran private final double _transformedPhi; public IntegrateImCGF(double phi) { _transformedPhi = phi; } @Override public Double apply(Double y, Double x, Integer m) { double im = calcCGFIm(y, x, _transformedPhi); return -PI / 2. - m * PI + im; } } private double otherZero(double y, double phi) { // othzero in fortran double psi = (PI / 2) * (1.0 - _p) / (2 * _p - 1); double inflec = atan(psi) / ((1 - _p) * phi); double smallest = 1e-30; int m; double tLo, tHi, fLo, fHi, zStep, t0, kMax, tMax; IntegrateImCGF intIm = new IntegrateImCGF(phi); DK dk = new DK(phi); if (y >= 1) { m = -1; tLo = min(1e-5, inflec); tHi = max(inflec, 1e-5); } else { FindKMaxResult fndKMaxRes = findKMax(y, phi); kMax = fndKMaxRes._kMax; tMax = fndKMaxRes._tMax; if (kMax >= PI / 2.) { m = 0; tLo = smallest; tHi = tMax; } else { m = -1; tLo = min(tMax, inflec); tHi = max(tMax, inflec); } } fLo = intIm.apply(y, tLo, m); fHi = intIm.apply(y, tHi, m); zStep = abs(tHi - tLo); while ((fLo * fHi) > 0) { tLo = tHi; tHi = tHi + 0.2 * zStep; fLo = intIm.apply(y, tLo, m); fHi = intIm.apply(y, tHi, m); } t0 = tLo - fLo * (tHi - tLo) / (fHi - fLo); return newtonMethodWithBisectionWithM(y, tLo, tHi, t0, intIm, dk, m); } private ZeroBounds findBounds(double y, double phi) { // findsp in fortran ZeroFunction zeroFunction = new ZeroFunction(phi); double t = PI / y; double f1 = zeroFunction.apply(y, 0.01); double f2; double t3 = otherZero(y, phi); double tOld = t, tStep; t = min(t, t3); f2 = zeroFunction.apply(y, t); tStep = 0.2 * t; while ((f1 * f2) > 0 && f1 != f2) { tOld = t; t = tOld + tStep; f1 = f2; f2 = zeroFunction.apply(y, t); } return new ZeroBounds(tOld, t, f1, f2); } private double newtonMethodWithBisection(double y, double x1, double x2, double x0, Function2<Double, Double, Double> fun, Function2<Double, Double, Double> dfun) { // in fortran sfzro double maxit = 100, xl, xh, result, dx, dxOld, f, df; double fl = fun.apply(y, x1); double fh = fun.apply(y, x2); if (fl == 0) return x1; else if (fh == 0) return x2; else if (fl < 0) { xl = x1; xh = x2; } else { xl = x2; xh = x1; } result = x0; dxOld = abs(x2 - x1); dx = dxOld; f = fun.apply(y, result); df = dfun.apply(y, result); for (int i = 0; i < maxit; i++) { if (((result - xh * df - f) * (result - xl) * df - f) > 0 || abs(2 * f) > abs(dxOld * df)) { // use bisection dxOld = dx; dx = 0.5 * (xh - xl); result = xl + dx; if (xl == result) return result; } else { //Newton 's method dxOld = dx; if (df == 0) return result; dx = f / df; if (result == result - dx) return result; } f = fun.apply(y, result); df = dfun.apply(y, result); if (f < 0) xl = result; else xh = result; } return result; } private double newtonMethodWithBisectionWithM(double y, double x1, double x2, double x0, Function3<Double, Double, Integer, Double> fun, Function2<Double, Double, Double> dfun, int m) { // in fortran sfzro2 int maxit = 100; double xl, xh, result, dxOld, dx, f, df; double fl = fun.apply(y, x1, m); double fh = fun.apply(y, x2, m); if (fl == 0) { return x1; } else if (fh == 0) { return x2; } else if (fl < 0) { xl = x1; xh = x2; } else { xl = x2; xh = x1; } if (x0 > xl && x0 < xh) result = x0; else result = (xl + xh) / 2; dxOld = abs(x2 - x1); dx = dxOld; f = fun.apply(y, result, m); df = dfun.apply(y, result); for (int i = 0; i < maxit; i++) { if (((result - xh * df - f) * (result - xl) * df - f) > 0 || abs(2 * f) > abs(dxOld * df)) { // use bisection dxOld = dx; dx = 0.5 * (xh - xl); result = xl + dx; if (xl == result) return result; } else { // Newton 's method dxOld = dx; dx = f / df; if (result == result - dx) return result; } if (abs(dx) < 1e-11) return result; f = fun.apply(y, result, m); df = dfun.apply(y, result); if (f < 0) xl = result; else xh = result; } return result; } private double gaussQuad(Function3<Double, Double, Double, Double> fun, double a, double b, double y, double mu) { double sum = 0, xLower, xUpper; for (int i = 0; i < weights.length; i++) { xLower = (b - a) / 2. * absc[i] + (b + a) / 2.; xUpper = (a - b) / 2. * absc[i] + (b + a) / 2.; sum += weights[i] * (fun.apply(y, mu, xLower) + fun.apply(y, mu, xUpper)); } return sum * (b - a) / 2.; } private final class FunctionForVariancePowerBetween1And2 implements Function3<Double, Double, Double, Double> { // f2 in fortran private final double _transformedPhi; public FunctionForVariancePowerBetween1And2(double phi) { _transformedPhi = phi; } @Override public Double apply(Double y, Double mu, Double x) { final double lambda = pow(mu, 2 - _p) / (_transformedPhi * (2 - _p)); if (x == 0) { return 1.; } else { final double rl = calcCGFRe(x, _transformedPhi); final double im = calcCGFIm(y, x, _transformedPhi); return (exp(rl) * cos(im) - exp(-lambda) * cos(x * y)); } } } private final class FunctionForVariancePowerGreaterThan2 implements Function3<Double, Double, Double, Double> { private final double _transformedPhi; public FunctionForVariancePowerGreaterThan2(double phi) { _transformedPhi = phi; } @Override public Double apply(Double y, Double mu, Double x) { final double rl = calcCGFRe(x, _transformedPhi); final double im = calcCGFIm(y, x, _transformedPhi); return exp(rl) * cos(im); } } private final class ZeroFunction implements Function2<Double, Double, Double> { private final double _transformedPhi; public ZeroFunction(double phi) { _transformedPhi = phi; } @Override public Double apply(Double y, Double x) { final double rl = calcCGFRe(x, _transformedPhi); final double im = calcCGFIm(y, x, _transformedPhi); final double lambda = 1 / (_transformedPhi * (2 - _p)); // lambda with mu == 1 return exp(rl) * cos(im) - exp(-lambda) * cos(x * y); } } private final class ZeroDerivFunction implements Function2<Double, Double, Double> { private final double _transformedPhi; public ZeroDerivFunction(double phi) { _transformedPhi = phi; } @Override public Double apply(Double y, Double x) { final double rl = calcCGFRe(x, _transformedPhi); final double im = calcCGFIm(y, x, _transformedPhi); final double drl = calcDCGFRe(x, _transformedPhi); final double dim = calcDCGFIm(y, x, _transformedPhi); final double lambda = 1 / (_transformedPhi * (2 - _p)); //lambda with mu == 1 return (exp(rl) * (-dim * sin(im)) + exp(rl) * drl * cos(im) + exp(-lambda) * y * sin(x * y)); } } private static class SidiAcceleration { final double[][] _mMatrix, _nMatrix; // references final double[] _wOld, _xVec; // references double _w; double _relErr, _absErr; private SidiAcceleration(double[][] mMatrix, double[][] nMatrix, double[] wOld, double[] xVec) { this._mMatrix = mMatrix; this._nMatrix = nMatrix; this._wOld = wOld; this._xVec = xVec; } void apply(double FF, double psi, double w, int znum) { final double largest = 1e30; _w = w; if (abs(psi) < 1e-31) { _w = FF; _relErr = 0; return; } _mMatrix[1][0] = FF / psi; _nMatrix[1][0] = 1 / psi; for (int i = 1; i < znum; i++) { double denom = 1. / _xVec[znum - i] - 1. / _xVec[znum]; _mMatrix[1][i] = (_mMatrix[0][i - 1] - _mMatrix[1][i - 1]) / denom; _nMatrix[1][i] = (_nMatrix[0][i - 1] - _nMatrix[1][i - 1]) / denom; } if (!(abs(_mMatrix[1][znum - 1]) > largest || abs(_nMatrix[1][znum - 1]) > largest)) { if (znum > 1) _w = _mMatrix[1][znum - 1] / _nMatrix[1][znum - 1]; _wOld[0] = _wOld[1]; _wOld[1] = _wOld[2]; _wOld[2] = _w; } if (znum > 2) { _relErr = abs(_w - _wOld[0]) + abs(_w - _wOld[1]) / _w; _absErr = abs(_wOld[2] - _wOld[1]); } else _relErr = 1.0; System.arraycopy(_mMatrix[1], 0, _mMatrix[0], 0, znum); System.arraycopy(_nMatrix[1], 0, _nMatrix[0], 0, znum); } } private double smallP(double y, double mu, double phi) { double[][] mMatrix = MemoryManager.malloc8d(2, 101); double[][] nMatrix = MemoryManager.malloc8d(2, 101); double area, area0 = 0, area1 = 0, result, w = 0, tStep, zero1, lower, upper, fLo, fHi; ZeroFunction zeroFunction = new ZeroFunction(phi); ZeroDerivFunction zeroDerivFunction = new ZeroDerivFunction(phi); ZeroBounds zb = findBounds(y, phi); upper = zb.upperBound; lower = zb.lowerBound; fHi = zb.funcHi; fLo = zb.funcLo; double t0 = (upper - fHi * (upper - lower) / (fHi - fLo)); double zero2 = newtonMethodWithBisection(y, lower, upper, t0, zeroFunction, zeroDerivFunction); int iteration; double[] wOld = new double[3]; int numZr = 20; double zDelta = zero2 / numZr; double z1Lo; double z1Hi = 0; FunctionForVariancePowerBetween1And2 f2 = new FunctionForVariancePowerBetween1And2(phi); for (int i = 0; i < numZr; i++) { z1Lo = z1Hi; z1Hi = z1Hi + zDelta; area0 += gaussQuad(f2, z1Lo, z1Hi, y, mu); } zero1 = zero2; tStep = zero2 / 2; for (int i = 0; i < 4; i++) { lower = zero1 + tStep * 0.05; upper = zero1 + tStep * 0.3; fLo = zeroFunction.apply(y, lower); fHi = zeroFunction.apply(y, upper); while (fLo * fHi > 0 && lower != upper) { lower = upper; upper = upper + 0.5 * tStep; fLo = zeroFunction.apply(y, lower); fHi = zeroFunction.apply(y, upper); } zero2 = newtonMethodWithBisection(y, lower, upper, t0, zeroFunction, zeroDerivFunction); result = gaussQuad(f2, zero1, zero2, y, mu); area1 = area1 + result; tStep = zero2 - zero1; zero1 = zero2; t0 = zero2 + (0.8 * tStep); } iteration = 0; area = 0; double[] xVec = MemoryManager.malloc8d(101); //new double[101]; //500 xVec[0] = zero2; double relErr = Double.POSITIVE_INFINITY; SidiAcceleration sidi = new SidiAcceleration(mMatrix, nMatrix, wOld, xVec); while (iteration < 3 || (iteration < 100 && abs(relErr) > 1e-10)) { iteration++; lower = zero1 + 0.05 * tStep; upper = zero1 + 0.8 * tStep; fLo = zeroFunction.apply(y, lower); fHi = zeroFunction.apply(y, upper); while (fLo * fHi > 0 && lower != upper) { lower = upper; upper = upper + 0.5 * tStep; fLo = zeroFunction.apply(y, lower); fHi = zeroFunction.apply(y, upper); } t0 = lower - fLo * (upper - lower) / (fHi - fLo); zero2 = newtonMethodWithBisection(y, lower, upper, t0, zeroFunction, zeroDerivFunction); result = gaussQuad(f2, zero1, zero2, y, mu); xVec[iteration] = zero2; sidi.apply(area, result, w, iteration); w = sidi._w; relErr = sidi._relErr; if (iteration >= 3) { relErr = (area0 + area1 + w) == 0 ? Double.POSITIVE_INFINITY : (abs(w - wOld[0]) + abs(w - wOld[1])) / (area0 + area1 + w); } area += result; tStep = zero2 - zero1; zero1 = zero2; } result = (area0 + area1 + w) / PI; return result; } private double bigP(double y, double mu, double phi) { final int maxit = 100; final double aimRelErr = 1e-10; double[][] mMatrix = MemoryManager.malloc8d(2, maxit + 1); //new double[2][maxit + 1]; double[][] nMatrix = MemoryManager.malloc8d(2, maxit + 1); // new double[2][maxit + 1]; double area, area0, result, w = 0, fLo, fHi, zero, zLo, zHi, zero1, zero2; IntegrateImCGF intIm = new IntegrateImCGF(phi); DK dk = new DK(phi); FunctionForVariancePowerGreaterThan2 f = new FunctionForVariancePowerGreaterThan2(phi); double largest = 1.e30; double smallest = 1.e-30; int m = -1; area = 0.0; int iteration = 0; double relErr = 1.0; boolean allOk; double[] wOld = new double[3]; if (y >= 1) { zero1 = 0; zero = PI / (2 * y); zLo = 0.9 * PI / (2.0 * y); if (y > 1.0) { zHi = PI / (2.0 * (y - 1.0)); fHi = intIm.apply(y, zHi, m); } else { zHi = zero * 2.0; fHi = intIm.apply(y, zHi, m); } fLo = intIm.apply(y, zLo, m); allOk = true; while (allOk && (fHi * fLo) > 0 && zHi != zLo) { zLo = zHi; zHi = zHi * 1.5; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); if (zHi > largest / 10.0) allOk = false; } if (zHi > largest / 10.0) allOk = false; if (!allOk) { result = 0.0; return result; } zero2 = newtonMethodWithBisectionWithM(y, zLo, zHi, zero, intIm, dk, m); double[] xVec = MemoryManager.malloc8d(maxit + 1); //new double[maxit + 1]; xVec[0] = zero2; SidiAcceleration sidi = new SidiAcceleration(mMatrix, nMatrix, wOld, xVec); area0 = gaussQuad(f, zero1, zero2, y, mu); while (iteration < 4 || (iteration < maxit && abs(relErr) > aimRelErr)) { m = m - 1; zero1 = zero2; zero = zero2; zLo = zero2; zHi = zero2 * 1.5; if (zHi > largest / 10.0) { allOk = false; fLo = 0.0; fHi = 0.0; } else { fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); } while (allOk && (fHi * fLo) > 0 && zHi != zLo) { zLo = zHi; zHi = zHi * 1.5; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); zero = zHi - fHi * (zHi - zLo) / (fHi - fLo); if (zHi > largest / 10.0) allOk = false; } if (zHi > largest / 10.0) allOk = false; if (!allOk) { result = 0.0; return result; } zero2 = newtonMethodWithBisectionWithM(y, zLo, zHi, zero, intIm, dk, m); result = gaussQuad(f, zero1, zero2, y, mu); iteration += 1; xVec[iteration] = zero2; sidi.apply(area, result, w, iteration); w = sidi._w; //relErr = sidi._relErr; if ((area0 + w) == 0) relErr = Double.POSITIVE_INFINITY; else relErr = (abs(w - wOld[0]) + abs((w - wOld[1]))) / (area0 + w); area += result; } result = area0 + w; } else { // y < 1 FindKMaxResult fndKmax = findKMax(y, phi); final double kMax = fndKmax._kMax; final double tMax = fndKmax._tMax; final double mMax = fndKmax._mMax; if (kMax < PI / 2) { zero1 = 0.0; zero = tMax + PI / (2.0 * y); zLo = tMax; zHi = zero * 2.0; if (zHi > largest / 10.0) { allOk = false; fLo = 0.0; fHi = 0.0; } else { allOk = true; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); } while (allOk && (fHi * fLo) > 0 && zHi != zLo) { zLo = zHi; zHi = zHi * 1.5; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); if (zHi > largest / 10.0) allOk = false; } if (zHi > largest / 10.0) allOk = false; if (!allOk) { result = 0.0; return result; } zero2 = newtonMethodWithBisectionWithM(y, zLo, zHi, zero, intIm, dk, m); double[] xVec = MemoryManager.malloc8d(maxit + 1); xVec[0] = zero2; SidiAcceleration sidi = new SidiAcceleration(mMatrix, nMatrix, wOld, xVec); area0 = gaussQuad(f, zero1, zero2, y, mu); while (iteration < 4 || (iteration < maxit && abs(relErr) > aimRelErr)) { m = m - 1; double diff = zero2 - zero1; zero1 = zero2; zLo = zero2 - 0.01 * diff; zHi = zero2 + 2.0 * diff; if (zHi > largest / 10.) { allOk = false; fLo = 0.0; fHi = 0.0; } else { fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); } while (allOk && (fHi * fLo) > 0 && zHi != zLo) { zLo = zHi; zHi = zHi * 1.5; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); if (zHi > largest / 10.0) allOk = false; } if (zHi > largest / 10.0) allOk = false; if (!allOk) { result = 0.0; return result; } zero = zLo - fLo * (zHi - zLo) / (fHi - fLo); zero2 = newtonMethodWithBisectionWithM(y, zLo, zHi, zero, intIm, dk, m); result = gaussQuad(f, zero1, zero2, y, mu); iteration += 1; xVec[iteration] = zero2; sidi.apply(area, result, w, iteration); w = sidi._w; if (area0 + w == 0) relErr = Double.POSITIVE_INFINITY; else relErr = (abs(w - wOld[0]) + abs((w - wOld[1]))) / (area0 + w); area += result; } result = area0 + w; } else { // kMax > PI/2 zero1 = 0; zero = PI / (2. * (1 - y)); m = 0; int firstM = 1; zLo = smallest; zHi = tMax; if (zHi > largest / 10.) { allOk = false; fLo = 0; fHi = 0; } else { allOk = true; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); } double diff = zHi - zLo; while (allOk && (fHi * fLo) > 0 && zHi != zLo) { zLo = zHi; zHi = zHi + 0.1 * diff; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); if (zHi > largest / 10.0) allOk = false; } if (zHi > largest / 10.0) allOk = false; if (!allOk) { result = 0.0; return result; } zero2 = newtonMethodWithBisectionWithM(y, zLo, zHi, zero, intIm, dk, m); double[] xVec = MemoryManager.malloc8d(maxit + 1); xVec[0] = zero2; area0 = gaussQuad(f, zero1, zero2, y, mu); diff = zero2 - zero1; while (iteration < 4 || (iteration < maxit && abs(relErr) > aimRelErr)) { zLo = zero2 - 1e-05 * diff; zHi = zero2 + 2.0 * diff; zero1 = zero2; // m, firstm, zlo, zhi, zero = nextm(tMax, mMax, zero2, m, firstM, zLo); // expanded below if (m < mMax) { if (firstM == 1) { m = m + 1; zHi = tMax; } else { m = m - 1; zLo = max(zLo, tMax); } } else if (m == mMax) { if (firstM == 1) { firstM++; zero = tMax + (tMax - zero2); zLo = tMax; } else m = m - 1; } if (zHi > largest / 10.) { allOk = false; fLo = 0; fHi = 0; } else { fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); } while (allOk && (fHi * fLo) > 0 && zHi != zLo) { zLo = zHi; zHi = zHi * 1.5; fLo = intIm.apply(y, zLo, m); fHi = intIm.apply(y, zHi, m); if (zHi > largest / 10.0) allOk = false; } if (zHi > largest / 10.0) allOk = false; if (!allOk) { result = 0.0; return result; } zero2 = newtonMethodWithBisectionWithM(y, zLo, zHi, zero, intIm, dk, m); result = gaussQuad(f, zero1, zero2, y, mu); iteration += 1; xVec[iteration] = zero2; SidiAcceleration sidi = new SidiAcceleration(mMatrix, nMatrix, wOld, xVec); sidi.apply(area, result, w, iteration); w = sidi._w; relErr = sidi._relErr; area += result; } result = area0 + w; } } result = abs(result / PI); return result; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glm/TweedieMLDispersionOnly.java
package hex.glm; import hex.DataInfo; import water.*; import water.fvec.Frame; import water.fvec.Vec; import java.util.stream.DoubleStream; /*** * class to find bounds on infinite series approximation to calculate tweedie dispersion parameter using the * maximum likelihood function in Dunn et.al. in Series evalatuino of Tweedie exponential dispersion model * densities, statistics and computing, Vol 15, 2005. */ public class TweedieMLDispersionOnly { double _dispersionParameter; // parameter of optimization final double _variancePower; int _constNCol; int _nWorkingCol = 0; Frame _infoFrame; // contains response, mu, weightColumn, constants, max value index, ... Frame _mu; final String[] _constFrameNames; String[] _workFrameNames; boolean _weightPresent; int _indexBound; // denotes maximum index we are willing to try int _nWVs = 3; boolean[] _computationAccuracy; // set to false when upper bound exceeds _indexBound boolean _debugOn; public TweedieMLDispersionOnly(Frame train, GLMModel.GLMParameters parms, GLMModel model, double[] beta, DataInfo dinfo) { _variancePower = parms._tweedie_variance_power; _dispersionParameter = parms._init_dispersion_parameter; _constFrameNames = new String[]{"jMaxConst", "zConst", "part2Const", "oneOverY", "oneOverPiY", "firstOrderDerivConst", "secondOrderDerivConst"}; _constNCol = _constFrameNames.length; DispersionTask.GenPrediction gPred = new DispersionTask.GenPrediction(beta, model, dinfo).doAll( 1, Vec.T_NUM, dinfo._adaptedFrame); _mu = gPred.outputFrame(Key.make(), new String[]{"prediction"}, null); // generate prediction DKV.put(_mu); // form info frame which contains response, mu and weight column if specified _infoFrame = formInfoFrame(train, _mu, parms); DKV.put(_infoFrame); // generate constants used during dispersion parameter update DispersionTask.ComputeTweedieConstTsk _tweedieConst = new DispersionTask.ComputeTweedieConstTsk(_variancePower, _infoFrame); _tweedieConst.doAll(_constNCol, Vec.T_NUM, _infoFrame); _infoFrame.add(Scope.track(_tweedieConst.outputFrame(Key.make(), _constFrameNames, null))); _debugOn = parms._debugTDispersionOnly; if (_debugOn) { // only expand frame when debug is turned on _workFrameNames = new String[]{"jOrKMax", "logZ", "_WOrVMax", "dWOrVMax", "d2WOrVMax", "jOrkL", "jOrkU", "djOrkL", "djOrkU", "d2jOrkL", "d2jOrKU", "sumWV", "sumDWV", "sumD2WV", "ll", "dll", "d2ll"}; _nWorkingCol = _workFrameNames.length; Vec[] vecs = _infoFrame.anyVec().makeDoubles(_nWorkingCol, DoubleStream.generate(() -> Math.random()).limit(_nWorkingCol).map(x -> 0.0).toArray()); _infoFrame.add(_workFrameNames, vecs); DKV.put(_infoFrame); } _weightPresent = parms._weights_column != null; _indexBound = parms._max_series_index; _computationAccuracy = new boolean[_nWVs]; } public static Frame formInfoFrame(Frame train, Frame mu, GLMModel.GLMParameters parms) { Frame infoFrame = new Frame(Key.make()); String[] colNames; Vec[] vecs; if (parms._weights_column != null) { colNames = new String[]{parms._response_column, mu.names()[0], parms._weights_column}; vecs = new Vec[]{train.vec(parms._response_column), mu.vec(0), train.vec(parms._weights_column)}; } else { colNames = new String[]{parms._response_column, mu.names()[0]}; vecs = new Vec[]{train.vec(parms._response_column), mu.vec(0)}; } infoFrame.add(colNames, vecs); return infoFrame; } public void updateDispersionP(double phi) { _dispersionParameter = phi; } public void cleanUp() { DKV.remove(_mu._key); DKV.remove(_infoFrame._key); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glrm/GLRM.java
package hex.glrm; import Jama.CholeskyDecomposition; import Jama.Matrix; import Jama.QRDecomposition; import Jama.SingularValueDecomposition; import hex.DMatrix; import hex.DataInfo; import hex.ModelBuilder; import hex.ModelCategory; import hex.genmodel.algos.glrm.GlrmInitialization; import hex.genmodel.algos.glrm.GlrmLoss; import hex.genmodel.algos.glrm.GlrmMojoModel; import hex.genmodel.algos.glrm.GlrmRegularizer; import hex.glrm.GLRMModel.GLRMParameters; import hex.gram.Gram; import hex.gram.Gram.Cholesky; import hex.gram.Gram.GramTask; import hex.kmeans.KMeans; import hex.kmeans.KMeansModel; import hex.svd.SVD; import hex.svd.SVDModel; import hex.svd.SVDModel.SVDParameters; import hex.util.LinearAlgebraUtils; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import water.*; import water.api.ModelCacheManager; import water.fvec.C0DChunk; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import water.util.*; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import static hex.genmodel.algos.glrm.GlrmLoss.Quadratic; import static hex.util.DimensionReductionUtils.generateIPC; import static water.util.ArrayUtils.transpose; /** * Generalized Low Rank Models * This is an algorithm for dimensionality reduction of a dataset. It is a general, parallelized * optimization algorithm that applies to a variety of loss and regularization functions. * Categorical columns are handled by expansion into 0/1 indicator columns for each level. * <a href = "http://web.stanford.edu/~boyd/papers/pdf/glrm.pdf">Generalized Low Rank Models</a> * @author anqi_fu */ public class GLRM extends ModelBuilder<GLRMModel, GLRMModel.GLRMParameters, GLRMModel.GLRMOutput> { // Convergence tolerance private static final double TOLERANCE = 1e-10; // Number of columns in the training set (n) private transient int _ncolA; // Number of columns in the resulting matrix Y, taking into account expansion of the categoricals private transient int _ncolY; // Number of columns in the resulting matrix X (k), also number of rows in Y. private transient int _ncolX; boolean _wideDataset = false; // default with wideDataset set to be false. // Loss function for each column private transient GlrmLoss[] _lossFunc; private ArrayList<Integer> _binaryColumnIndices; // store binary columns using binary loss functions. @Override protected GLRMDriver trainModelImpl() { return new GLRMDriver(); } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ModelCategory.Clustering}; } @Override public boolean isSupervised() { return false; } @Override public boolean havePojo() { return false; } @Override public boolean haveMojo() { return true; } @Override protected void checkMemoryFootPrint_impl() { HeartBeat hb = H2O.SELF._heartbeat; double p = hex.util.LinearAlgebraUtils.numColsExp(_train,true); double r = _train.numRows(); long mem_usage = (long)(hb._cpus_allowed * p*_parms._k * 8*2); // loose estimation of memory usage long mem_usage_w = (long)(hb._cpus_allowed * r*_parms._k * 8*2); // loose estimation of memory usage long max_mem = H2O.SELF._heartbeat.get_free_mem(); if ((mem_usage > max_mem) && (mem_usage_w > max_mem)) { String msg = "Archtypes in matrix Y cannot fit in the driver node's memory (" + PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem) + ") - try reducing k, the number of columns and/or the number of categorical factors."; error("_train", msg); } if (mem_usage > mem_usage_w) { // choose the most memory efficient one _wideDataset = true; // set to true if wide dataset is detected } } /* Set value of wideDataset. Note that this routine is used for test purposes only and not for users. */ public void setWideDataset(boolean isWide) { _wideDataset = isWide; } //-------------------------------------------------------------------------------------------------------------------- // Model initialization //-------------------------------------------------------------------------------------------------------------------- // Called from an http request public GLRM(GLRMParameters parms) { super(parms); init(false); } public GLRM(GLRMParameters parms, Job<GLRMModel> job) { super(parms, job); init(false); } public GLRM(boolean startup_once) { super(new GLRMParameters(), startup_once); } /** * Validate all parameters, and prepare the model for training. */ @Override public void init(boolean expensive) { super.init(expensive); _ncolX = _parms._k; _ncolA = _train == null? -1 : _train.numCols(); _ncolY = _train == null? -1 : LinearAlgebraUtils.numColsExp(_train, true); initLoss(); if (_parms._gamma_x < 0) error("_gamma_x", "gamma must be a non-negative number"); if (_parms._gamma_y < 0) error("_gamma_y", "gamma_y must be a non-negative number"); if (_parms._max_iterations < 1 || _parms._max_iterations > 1e6) error("_max_iterations", "max_iterations must be between 1 and 1e6 inclusive"); if (_parms._init_step_size <= 0) error ("_init_step_size", "init_step_size must be a positive number"); if (_parms._min_step_size < 0 || _parms._min_step_size > _parms._init_step_size) error("_min_step_size", "min_step_size must be between 0 and " + _parms._init_step_size); // Cannot recover SVD of original _train from XY of transformed _train if (_parms._recover_svd && (_parms._impute_original && _parms._transform != DataInfo.TransformType.NONE)) error("_recover_svd", "_recover_svd and _impute_original cannot both be true if _train" + " is transformed"); if (null == _train) return; if (_ncolA < 2) error("_train", "_train must have more than one column"); if (_valid != null && _valid.numRows() != _train.numRows()) error("_valid", "_valid must have same number of rows as _train"); if (_ncolY > 5000) warn("_train", "_train has " + _ncolY + " columns when categoricals are expanded. Algorithm" + " may be slow."); if (null != _parms._loading_name) { warn("loading_name", "loading_name is deprecated, use representation_name instead."); if (null == _parms._representation_name) _parms._representation_name = _parms._loading_name; } if ((null != _parms._representation_name) && (null != _parms._loading_name)) { if (!(_parms._representation_name.equals(_parms._loading_name))) warn("_representation_name and _loading_name", "Are not equal. _representation_name will" + " be used since _loading_name is deprecated."); } if (null != _parms._representation_name) { if (dest().toString().equals(_parms._representation_name)) error("representation_name", "representation_name and model_id cannot use the same string."); } if (null != _parms._loading_name) { if (dest().toString().equals(_parms._loading_name)) error("loading_name", "loading_name and model_id cannot use the same string."); } if (_parms._k < 1 || _parms._k > _ncolY) error("_k", "_k must be between 1 and " + _ncolY + " inclusive"); if (null != _parms._user_y) { // Check dimensions of user-specified initial Y if (_parms._init != GlrmInitialization.User) error("_init", "init must be 'User' if providing user-specified points"); Frame user_y = _parms._user_y.get(); assert user_y != null; int user_y_cols = _parms._expand_user_y ? _ncolA : _ncolY; // Check dimensions of user-specified initial Y if (user_y.numCols() != user_y_cols) error("_user_y", "The user-specified Y must have the same number of columns (" + user_y_cols + ") " + "as the training observations"); else if (user_y.numRows() != _parms._k) error("_user_y", "The user-specified Y must have k = " + _parms._k + " rows"); else { int zero_vec = 0; Vec[] centersVecs = user_y.vecs(); for (int c = 0; c < _ncolA; c++) { if (centersVecs[c].naCnt() > 0) { error("_user_y", "The user-specified Y cannot contain any missing values"); break; } else if (centersVecs[c].isConst() && centersVecs[c].max() == 0) zero_vec++; } if (zero_vec == _ncolA) error("_user_y", "The user-specified Y cannot all be zero"); } } if (_parms._user_x != null) { // Check dimensions of user-specified initial X if (_parms._init != GlrmInitialization.User) error("_init", "init must be 'User' if providing user-specified points"); Frame user_x = _parms._user_x.get(); assert user_x != null; if (user_x.numCols() != _parms._k) error("_user_x", "The user-specified X must have k = " + _parms._k + " columns"); else if (user_x.numRows() != _train.numRows()) error("_user_x", "The user-specified X must have the same number of rows " + "(" + _train.numRows() + ") as the training observations"); else { int zero_vec = 0; Vec[] centersVecs = user_x.vecs(); for (int c = 0; c < _parms._k; c++) { if (centersVecs[c].naCnt() > 0) { error("_user_x", "The user-specified X cannot contain any missing values"); break; } else if (centersVecs[c].isConst() && centersVecs[c].max() == 0) zero_vec++; } if (zero_vec == _parms._k) error("_user_x", "The user-specified X cannot all be zero"); } } for (int i = 0; i < _ncolA; i++) { if (_train.vec(i).isString() || _train.vec(i).isUUID()) throw H2O.unimpl("GLRM cannot handle String or UUID data"); } if (expensive && error_count() == 0) checkMemoryFootPrint(); // check to make sure we can fit. } /** Validate all Loss-related parameters, and fill in the `_lossFunc` array. */ private void initLoss() { int num_loss_by_cols = _parms._loss_by_col == null? 0 : _parms._loss_by_col.length; int num_loss_by_cols_idx = _parms._loss_by_col_idx == null? 0 : _parms._loss_by_col_idx.length; // First validate the parameters that do not require access to the training frame if (_parms._period <= 0) error("_period", "_period must be a positive integer"); if (!_parms._loss.isForNumeric()) error("_loss", _parms._loss + " is not a numeric loss function"); if (!_parms._multi_loss.isForCategorical()) error("_multi_loss", _parms._multi_loss + " is not a multivariate loss function"); if (num_loss_by_cols != num_loss_by_cols_idx && num_loss_by_cols_idx > 0) error("_loss_by_col", "Sizes of arrays _loss_by_col and _loss_by_col_idx must be the same"); if (_train == null) return; _binaryColumnIndices = new ArrayList<Integer>(); // Initialize the default loss functions for each column // Note: right now for binary columns `.isCategorical()` returns true. It has the undesired consequence that // such variables will get categorical loss function, and will get expanded into 2 columns. _lossFunc = new GlrmLoss[_ncolA]; for (int i = 0; i < _ncolA; i++) { Vec vi = _train.vec(i); _lossFunc[i] = vi.isCategorical()? _parms._multi_loss : _parms._loss; } String[] origColumnNames = _parms.train().names(); // grab original frame column names before change ArrayList<String> newColumnNames = new ArrayList<String>(Arrays.asList(_train._names)); // If _loss_by_col is provided, then override loss functions on the specified columns if (num_loss_by_cols > 0) { if (num_loss_by_cols_idx == 0) { if (num_loss_by_cols == origColumnNames.length) assignLossByCol(num_loss_by_cols, newColumnNames, origColumnNames); else error("_loss_by_col", "Number of override loss functions should be the same as the " + "number of columns in the input frame; or otherwise an explicit _loss_by_col_idx should be " + "provided."); } if (num_loss_by_cols_idx == num_loss_by_cols) assignLossByCol(num_loss_by_cols, newColumnNames, origColumnNames); // Otherwise we have already reported an error at the start of this method } // Check that all loss functions correspond to their actual type for (int i = 0; i < _ncolA; i++) { Vec vi = _train.vec(i); GlrmLoss lossi = _lossFunc[i]; if (vi.isNumeric()) { // numeric columns if (!vi.isBinary()) { // non-binary numeric columns if (!lossi.isForNumeric()) error("_loss_by_col", "Loss function "+lossi+" cannot be applied to numeric column "+i); } else { // binary numeric columns if (!lossi.isForBinary() && !lossi.isForNumeric()) { error("_loss_by_col", "Loss function "+lossi+" cannot be applied to binary column "+i); } } } else if (vi.isCategorical()) { // categorical columns if (vi.isBinary()) { // categorical binary columns if (!lossi.isForBinary() && !lossi.isForCategorical()) error("_loss_by_col", "Loss function "+lossi+" cannot be applied to binary column "+i); else if (lossi.isForBinary()) _binaryColumnIndices.add(i); // collect column indices storing binary columns with binary loss function. } else { // categorical non-binary columns if (!lossi.isForCategorical()) error("_loss_by_col","Loss function "+lossi+" cannot be applied to categorical column" + " "+i); } } // For "Periodic" loss function supply the period. We currently have no support for different periods for // different columns. if (lossi == GlrmLoss.Periodic) lossi.setParameters(_parms._period); } } /* Need to assign column loss for each column. However, due to constant columns being dropping, the loss function specified for a constant columns will no longer apply since we dropped that column. Need to take care of this case to avoid errors. */ private void assignLossByCol(int num_loss_by_cols, ArrayList<String> newColumnNames, String[] origColumnNames) { for (int i = 0; i < num_loss_by_cols; i++) { int cidx = _parms._loss_by_col_idx==null?i:_parms._loss_by_col_idx[i]; String colNames = origColumnNames[cidx]; if (cidx < 0 || cidx >= origColumnNames.length) error("_loss_by_col_idx", "Column index " + cidx + " must be in [0," + _ncolA + ")"); else if (newColumnNames.contains(colNames)) _lossFunc[newColumnNames.indexOf(colNames)] = _parms._loss_by_col[i]; } } // Squared Frobenius norm of a matrix (sum of squared entries) public static double frobenius2(double[][] x) { if (x == null) return 0; double frob = 0; for (double[] xs : x) for (double j : xs) frob += j*j; return frob; } // Closed-form solution for X and Y may exist if both loss and regularizers are quadratic. public final boolean hasClosedForm(long na_cnt) { if (na_cnt != 0) return false; for (GlrmLoss lossi : _lossFunc) if (lossi != Quadratic) return false; return (_parms._gamma_x == 0 || _parms._regularization_x == GlrmRegularizer.None || _parms._regularization_x == GlrmRegularizer.Quadratic) && (_parms._gamma_y == 0 || _parms._regularization_y == GlrmRegularizer.None || _parms._regularization_y == GlrmRegularizer.Quadratic); } // Transform each column of a 2-D array, assuming categoricals sorted before numeric cols public static double[][] transform(double[][] centers, double[] normSub, double[] normMul, int ncats, int nnums) { int K = centers.length; int N = centers[0].length; assert ncats + nnums == N; double[][] value = new double[K][N]; double[] means = normSub == null ? MemoryManager.malloc8d(nnums) : normSub; double[] mults = normMul == null ? MemoryManager.malloc8d(nnums) : normMul; if (normMul == null) Arrays.fill(mults, 1.0); for (int clu = 0; clu < K; clu++) { System.arraycopy(centers[clu], 0, value[clu], 0, ncats); for (int col = 0; col < nnums; col++) value[clu][ncats+col] = (centers[clu][ncats+col] - means[col]) * mults[col]; } return value; } // More efficient implementation assuming sdata cols aligned with adaptedFrame public static double[][] expandCats(double[][] sdata, DataInfo dinfo) { if (sdata == null || dinfo._cats == 0) return sdata; assert sdata[0].length == dinfo._adaptedFrame.numCols(); // Column count for expanded matrix int catsexp = dinfo._catOffsets[dinfo._catOffsets.length-1]; double[][] cexp = new double[sdata.length][catsexp + dinfo._nums]; for (int i = 0; i < sdata.length; i++) LinearAlgebraUtils.expandRow(sdata[i], dinfo, cexp[i], false); return cexp; } class GLRMDriver extends Driver { // Initialize Y and X matrices // tinfo = original training data A, dfrm = [A,X,W] where W is working copy of X (initialized here) private double[][] initialXY(DataInfo tinfo, Frame dfrm, GLRMModel model, long na_cnt) { double[][] centers, centers_exp = null; if (_parms._init == GlrmInitialization.User) { // Set X and Y to user-specified points if available, // Gaussian matrix if not Frame userYFrame = _parms._user_y == null? null : _parms._user_y.get(); if (userYFrame != null) { // Set Y = user-specified initial points Vec[] yVecs = userYFrame.vecs(); if (_parms._expand_user_y) { // Categorical cols must be one-hot expanded // Get the centers and put into array centers = new double[_parms._k][_ncolA]; for (int c = 0; c < _ncolA; c++) { for (int r = 0; r < _parms._k; r++) centers[r][c] = yVecs[c].at(r); } // Permute cluster columns to align with dinfo and expand out categoricals centers = ArrayUtils.permuteCols(centers, tinfo._permutation); centers_exp = expandCats(centers, tinfo); } else { // User Y already has categoricals expanded centers_exp = new double[_parms._k][_ncolY]; for (int c = 0; c < _ncolY; c++) { for (int r = 0; r < _parms._k; r++) centers_exp[r][c] = yVecs[c].at(r); } } } else centers_exp = ArrayUtils.gaussianArray(_parms._k, _ncolY, _parms._seed); if (_parms._user_x != null) { // Set X = user-specified initial points Frame tmp = new Frame(dfrm); tmp.add(_parms._user_x.get()); // [A,X,W,U] where U = user-specified X // Set X and W to the same values as user-specified initial X new MRTask() { @Override public void map(Chunk[] cs) { for (int row = 0; row < cs[0]._len; row++) { for (int i = _ncolA; i < _ncolA+_ncolX; i++) { double x = cs[2*_ncolX + i].atd(row); cs[i].set(row, x); cs[_ncolX + i].set(row, x); } } } }.doAll(tmp); } else { InitialXProj xtsk = new InitialXProj(_parms, _ncolA, _ncolX); xtsk.doAll(dfrm); } return centers_exp; // Don't project or change Y in any way if user-specified, just return it } else if (_parms._init == GlrmInitialization.Random) { // Generate X and Y from standard normal distribution centers_exp = ArrayUtils.gaussianArray(_parms._k, _ncolY, _parms._seed); InitialXProj xtsk = new InitialXProj(_parms, _ncolA, _ncolX); xtsk.doAll(dfrm); } else if (_parms._init == GlrmInitialization.SVD) { // Run SVD on A'A/n (Gram), set Y = right singular vectors SVDParameters parms = new SVDParameters(); parms._train = _parms._train; parms._ignored_columns = _parms._ignored_columns; parms._ignore_const_cols = _parms._ignore_const_cols; parms._score_each_iteration = _parms._score_each_iteration; parms._use_all_factor_levels = true; // Since GLRM requires Y matrix to have fully expanded ncols parms._nv = _parms._k; parms._transform = _parms._transform; parms._svd_method = _parms._svd_method; parms._max_iterations = parms._svd_method == SVDParameters.Method.Randomized ? _parms._k : _parms._max_iterations; parms._seed = _parms._seed; parms._keep_u = true; parms._impute_missing = true; parms._save_v_frame = false; SVDModel svd = ModelCacheManager.get(parms); if (svd == null) { SVD svdP = new SVD(parms, _job, true, model); svdP.setWideDataset(_wideDataset); // force to treat dataset as wide even though it is not. // Build an SVD model svd = svdP.trainModelNested(_rebalancedTrain); } model._output._init_key = svd._key; // Ensure SVD centers align with adapted training frame cols assert svd._output._permutation.length == tinfo._permutation.length; for (int i = 0; i < tinfo._permutation.length; i++) assert svd._output._permutation[i] == tinfo._permutation[i]; centers_exp = transpose(svd._output._v); // Set X and Y appropriately given SVD of A = UDV' // a) Set Y = D^(1/2)V'S where S = diag(\sigma) _parms._k = svd._parms._nv; // parameter k may have been reduced due to rank deficient dataset double[] dsqrt = new double[_parms._k]; for (int i = 0; i < _parms._k; i++) { dsqrt[i] = Math.sqrt(svd._output._d[i]); ArrayUtils.mult(centers_exp[i], dsqrt[i]); // This gives one row of D^(1/2)V' } // b) Set X = UD^(1/2) = AVD^(-1/2) Frame uFrm = DKV.get(svd._output._u_key).get(); assert uFrm.numCols() == _parms._k; assert uFrm.isCompatible(dfrm); Frame fullFrm = (new Frame(uFrm)).add(dfrm); // Jam matrices together into frame [U,A,X,W] InitialXSVD xtsk = new InitialXSVD(dsqrt, _parms._k, _ncolA, _ncolX); xtsk.doAll(fullFrm); } else if (_parms._init == GlrmInitialization.PlusPlus) { // Run k-means++ and set Y = resulting cluster centers, X = indicator matrix of assignments KMeansModel.KMeansParameters parms = new KMeansModel.KMeansParameters(); parms._train = _parms._train; parms._ignored_columns = _parms._ignored_columns; parms._ignore_const_cols = _parms._ignore_const_cols; parms._score_each_iteration = _parms._score_each_iteration; parms._init = KMeans.Initialization.PlusPlus; parms._k = _parms._k; parms._max_iterations = _parms._max_iterations; parms._standardize = true; parms._seed = _parms._seed; parms._pred_indicator = true; KMeansModel km = ModelCacheManager.get(parms); if (km == null) km = new KMeans(parms, _job).trainModelNested(_rebalancedTrain); model._output._init_key = km._key; // Score only if clusters well-defined and closed-form solution does not exist double frob = frobenius2(km._output._centers_raw); if (frob != 0 && !Double.isNaN(frob) && !hasClosedForm(na_cnt)) { // Frame pred = km.score(_parms.train()); Log.info("Initializing X to matrix of weights inversely correlated with cluster distances"); InitialXKMeans xtsk = new InitialXKMeans(_parms, km, _ncolA, _ncolX); xtsk.doAll(dfrm); } // Permute cluster columns to align with dinfo, normalize nums, and expand out cats to indicator cols centers = ArrayUtils.permuteCols(km._output._centers_raw, tinfo.mapNames(km._output._names)); centers = transform(centers, tinfo._normSub, tinfo._normMul, tinfo._cats, tinfo._nums); centers_exp = expandCats(centers, tinfo); // expand categorical columns to N binary columns } else error("_init", "Initialization method " + _parms._init + " is undefined"); // If all centers are zero or any are NaN, initialize to standard Gaussian random matrix assert centers_exp != null && centers_exp.length == _parms._k && centers_exp[0].length == _ncolY : "Y must have " + _parms._k + " rows and " + _ncolY + " columns"; double frob = frobenius2(centers_exp); if (frob == 0 || Double.isNaN(frob)) { warn("_init", "Initialization failed. Setting initial Y to standard normal random" + " matrix instead"); centers_exp = ArrayUtils.gaussianArray(_parms._k, _ncolY); } // Project rows of Y into appropriate subspace for regularizer Random rand = RandomUtils.getRNG(_parms._seed); for (int i = 0; i < _parms._k; i++) centers_exp[i] = _parms._regularization_y.project(centers_exp[i], rand); return centers_exp; } // In case of quadratic loss and regularization, initialize closed form X = AY'(YY' + \gamma)^(-1) private void initialXClosedForm(DataInfo dinfo, Archetypes yt_arch, double[] normSub, double[] normMul) { Log.info("Initializing X = AY'(YY' + gamma I)^(-1) where A = training data"); double[][] ygram = ArrayUtils.formGram(yt_arch._archetypes); if (_parms._gamma_y > 0) { for (int i = 0; i < ygram.length; i++) ygram[i][i] += _parms._gamma_y; } CholeskyDecomposition yychol = regularizedCholesky(ygram, 10, false); if(!yychol.isSPD()) Log.warn("Initialization failed: (YY' + gamma I) is non-SPD. Setting initial X to standard normal" + " random matrix. Results will be numerically unstable"); else { CholMulTask cmtsk = new CholMulTask(yychol, yt_arch, _ncolA, _ncolX, dinfo._cats, normSub, normMul); cmtsk.doAll(dinfo._adaptedFrame); } } // Stopping criteria private boolean isDone(GLRMModel model, int steps_in_row, double step) { if (stop_requested()) return true; // Stopped/cancelled // Stopped for running out of iterations if (model._output._iterations >= _parms._max_iterations) return true; if (model._output._updates >= _parms._max_updates) return true; // Stopped for falling below minimum step size if (step <= _parms._min_step_size) return true; // Stopped when enough steps and average decrease in objective per iteration < TOLERANCE return (model._output._iterations >= _parms._max_iterations) && steps_in_row > 3 && Math.abs(model._output._avg_change_obj) < TOLERANCE; } // Regularized Cholesky decomposition using H2O implementation public Cholesky regularizedCholesky(Gram gram, int max_attempts) { int attempts = 0; double addedL2 = 0; // TODO: Should I report this to the user? Cholesky chol = gram.cholesky(null); while (!chol.isSPD() && attempts < max_attempts) { if (addedL2 == 0) addedL2 = 1e-5; else addedL2 *= 10; ++attempts; gram.addDiag(addedL2); // try to add L2 penalty to make the Gram SPD Log.info("Added L2 regularization = " + addedL2 + " to diagonal of Gram matrix"); gram.cholesky(chol); } if (!chol.isSPD()) throw new Gram.NonSPDMatrixException(); return chol; } public Cholesky regularizedCholesky(Gram gram) { return regularizedCholesky(gram, 10); } // Regularized Cholesky decomposition using JAMA implementation public CholeskyDecomposition regularizedCholesky(double[][] gram, int max_attempts, boolean throw_exception) { int attempts = 0; double addedL2 = 0; Matrix gmat = new Matrix(gram); CholeskyDecomposition chol = new CholeskyDecomposition(gmat); while (!chol.isSPD() && attempts < max_attempts) { if (addedL2 == 0) addedL2 = 1e-5; else addedL2 *= 10; ++attempts; for (int i = 0; i < gram.length; i++) gmat.set(i,i,addedL2); // try to add L2 penalty to make the Gram SPD Log.info("Added L2 regularization = " + addedL2 + " to diagonal of Gram matrix"); chol = new CholeskyDecomposition(gmat); } if (!chol.isSPD() && throw_exception) throw new Gram.NonSPDMatrixException(); return chol; } // Recover singular values and eigenvectors of XY // However, they are only saved if the user has specified _parms._recover_svd to be true. // Otherwise, we are doing recoverSVD to just recover enough information to calculate information // for variance metrics specified in PUBDEV-3501. public void recoverSVD(GLRMModel model, DataInfo xinfo, DataInfo dinfo) { // NOTE: Gram computes X'X/n where n = nrow(A) = number of rows in training set GramTask xgram = new GramTask(_job._key, xinfo).doAll(xinfo._adaptedFrame); GramTask dgram = new GramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame.subframe(0, _ncolA)); Cholesky xxchol = regularizedCholesky(xgram._gram); long nobs = xgram._nobs; // R from QR decomposition of X = QR is upper triangular factor of Cholesky of X'X // Gram = X'X/n = LL' -> X'X = (L*sqrt(n))(L'*sqrt(n)) Matrix x_r = new Matrix(xxchol.getL()).transpose(); x_r = x_r.times(Math.sqrt(nobs)); Matrix yt = new Matrix(model._output._archetypes_raw.getY(true)); QRDecomposition yt_qr = new QRDecomposition(yt); Matrix yt_r = yt_qr.getR(); // S from QR decomposition of Y' = ZS Matrix rrmul = x_r.times(yt_r.transpose()); SingularValueDecomposition rrsvd = new SingularValueDecomposition(rrmul); // RS' = U \Sigma V' double[] sval = rrsvd.getSingularValues(); // get singular values as double array double dfcorr = (nobs > 1)?nobs/(nobs-1.0):1.0; // find number of observations double oneOverNobsm1 = (nobs>1)?1.0/Math.sqrt(nobs-1):1.0; model._output._std_deviation = Arrays.copyOf(sval, sval.length); ArrayUtils.mult(model._output._std_deviation, oneOverNobsm1); model._output._total_variance = dfcorr * dgram._gram.diagSum(); double maxSumVal = ArrayUtils.l2norm2(model._output._std_deviation); if (maxSumVal > model._output._total_variance) { // scale output std to avoid cum std > 1 double catScale = Math.sqrt(model._output._total_variance/maxSumVal); ArrayUtils.mult(model._output._std_deviation, catScale); } double[] vars = new double[model._output._std_deviation.length]; double[] prop_var = new double[vars.length]; double[] cum_var = new double[vars.length]; generateIPC(model._output._std_deviation, model._output._total_variance, vars, prop_var, cum_var); String[] colTypes = new String[_parms._k]; String[] colFormats = new String[_parms._k]; String[] colHeaders = new String[_parms._k]; String[] pcHeaders = new String[_parms._k]; // header for variance metrics, set to equal PCA Arrays.fill(colTypes, "double"); Arrays.fill(colFormats, "%5f"); for (int i = 0; i < colHeaders.length; i++) { colHeaders[i] = "Vec" + String.valueOf(i + 1); pcHeaders[i] = "pc" + String.valueOf(i + 1); } model._output._importance = new TwoDimTable("Importance of components", null, new String[]{"Standard deviation", "Proportion of Variance", "Cumulative Proportion"}, pcHeaders, colTypes, colFormats, "", new String[3][], new double[][]{model._output._std_deviation, prop_var, cum_var}); if (_parms._recover_svd) { // only save the eigenvectors, singular values if _recover_svd=true // Eigenvectors are V'Z' = (ZV)' Matrix eigvec = yt_qr.getQ().times(rrsvd.getV()); // Make TwoDimTable objects for prettier output model._output._eigenvectors_raw = eigvec.getArray(); // Singular values ordered in weakly descending order by algorithm model._output._singular_vals = rrsvd.getSingularValues(); assert model._output._names_expanded.length == model._output._eigenvectors_raw.length; model._output._eigenvectors = new TwoDimTable("Eigenvectors", null, model._output._names_expanded, colHeaders, colTypes, colFormats, "", new String[model._output._eigenvectors_raw.length][], model._output._eigenvectors_raw); } } private transient Frame _rebalancedTrain; @SuppressWarnings("ConstantConditions") // Method too complex for IntelliJ @Override public void computeImpl() { GLRMModel model = null; DataInfo dinfo = null, xinfo = null, tinfo = null, tempinfo = null; Frame fr = null; Frame frTA = null; // frame to store T(A) Frame xwF = null; // frame to store X, W matrices for wide datasets Frame xwF2 = null; Frame xVecs = null; // store X frame vectors int colCount = _ncolA; ObjCalc objtsk = null; ObjCalcW objtskw = null; Archetypes yt = null; Archetypes ytnew = null; try { init(true); // Initialize + Validate parameters if (error_count() > 0) throw new IllegalArgumentException("Found validation errors: " + validationErrors()); // The model to be built model = new GLRMModel(dest(), _parms, new GLRMModel.GLRMOutput(GLRM.this)); model.delete_and_lock(_job); _rebalancedTrain = new Frame(_train); // Save adapted frame info for scoring later tinfo = new DataInfo(_train, _valid, 0, true, _parms._transform, DataInfo.TransformType.NONE, false, false, false, /* weights */ false, /* offset */ false, /* fold */ false); DKV.put(tinfo._key, tinfo); // used later to recover SVD info tempinfo = new DataInfo(_train, null, 0, true, _parms._transform, DataInfo.TransformType.NONE, false, false, false, /* weights */ false, /* offset */ false, /* fold */ false); correctForBinaryLoss(tinfo); model._output._permutation = tinfo._permutation; model._output._nnums = tinfo._nums; model._output._ncats = tinfo._cats; model._output._catOffsets = tinfo._catOffsets; int[] numLevels = tinfo._adaptedFrame.cardinality(); // may have more numerical columns now since we may change binary columns with binary loss to numerical columns for (int colIndex = tinfo._cats; colIndex < _train.numCols(); colIndex++) { if (numLevels[colIndex] > -1) numLevels[colIndex] = -1; else break; // hit the numericcal columns already. Nothing more need to be done here. } // need to prevent binary data column being expanded into two when the loss function is logistic here if (error_count() > 0) throw new IllegalArgumentException("Found validation errors: " + validationErrors()); model._output._catOffsets = tinfo._catOffsets; model._output._names_expanded = tinfo.coefNames(); // Save training frame adaptation information for use in scoring later model._output._normSub = tinfo._normSub == null ? new double[tinfo._nums] : tinfo._normSub; if (tinfo._normMul == null) { model._output._normMul = new double[tinfo._nums]; Arrays.fill(model._output._normMul, 1.0); } else model._output._normMul = tinfo._normMul; // Save loss function for each column in adapted frame order assert _lossFunc != null && _lossFunc.length == _train.numCols(); model._output._lossFunc = new GlrmLoss[_lossFunc.length]; for (int i = 0; i < _lossFunc.length; i++) model._output._lossFunc[i] = _lossFunc[tinfo._permutation[i]]; long nobs = _train.numRows() * _train.numCols(); long na_cnt = 0; for (int i = 0; i < _train.numCols(); i++) na_cnt += _train.vec(i).naCnt(); model._output._nobs = nobs - na_cnt; // TODO: Should we count NAs? // 0) Initialize Y and X matrices // Jam A and X into a single frame for distributed computation // [A,X,W] A is read-only training data, X is matrix from prior iteration, W is working copy of X this iteration // for _wideDataset=true, it stores [A, YeXOld, W (YeXNew)] fr = new Frame(_train); Vec anyvec = fr.anyVec(); assert anyvec != null; for (int i = 0; i < _ncolX; i++) fr.add("xcol_" + i, anyvec.makeZero()); for (int i = 0; i < _ncolX; i++) fr.add("wcol_" + i, anyvec.makeZero()); dinfo = new DataInfo(/* train */ fr, /* validation */ null, /* nResponses */ 0, /* useAllFactorLevels */ true, /* pred. transform */ _parms._transform, /* resp. transform */ DataInfo.TransformType.NONE, /* skipMissing */ false, /* imputeMissing */ false, /* missingBucket */ false, /* weights */ false, /* offset */ false, /* fold */ false); DKV.put(dinfo._key, dinfo); // for wideDataset, contains A, Yex_old, Yex_new. For normal, contains A, XO, W fr = dinfo._adaptedFrame; int weightId = dinfo._weights ? dinfo.weightChunkId() : -1; // Use closed form solution for X if quadratic loss and regularization _job.update(1, "Initializing X and Y matrices"); // One unit of work double[/*k*/][/*features*/] yinit = initialXY(tinfo, dinfo._adaptedFrame, model, na_cnt); // on transformed A // Store Y' for more efficient matrix ops (rows = features, cols = k rank) yt = new Archetypes(transpose(yinit), true, tinfo._catOffsets, numLevels); ytnew = yt; double yreg = _parms._regularization_y.regularize(yt._archetypes); // Set X to closed-form solution of ALS equation if possible for better accuracy. No need to set to // work with wideDataset. Dealing with small matrices anymore. Optimize to use H2O Chol maybe. if (!(_parms._init == GlrmInitialization.User && _parms._user_x != null) && hasClosedForm(na_cnt)) initialXClosedForm(dinfo, yt, model._output._normSub, model._output._normMul); if (_wideDataset) { // 1. create fr as transpose(A). 2. make T(X) as double[][] array 3. build frame for x colCount = (int) _train.numRows(); frTA = generateFrameOfZeros(_ncolA, colCount); xwF = new water.util.ArrayUtils().frame(transpose(yinit)); // YeX stored as frame, duplicated xwF2 = new water.util.ArrayUtils().frame(transpose(yinit)); xwF.add(xwF2); new DMatrix.TransposeTsk(frTA).doAll(dinfo._adaptedFrame.subframe(0, _ncolA)); // store T(A) yinit = new double[_parms._k][colCount]; // store the X matrix from adaptedFrame as 2D double array for (int index = colCount; index < colCount+_ncolX; index++) { int trueIndex = index-colCount; yinit[trueIndex] = new FrameUtils.Vec2ArryTsk(colCount).doAll(dinfo._adaptedFrame.vec(trueIndex+_ncolA)).res; } // set weights to _weights in archetype class instead of as part of frame double[] tempWeights = new double[(int)_train.numRows()]; if (weightId < 0) { // Arrays.fill(tempWeights,1); } else { tempWeights = new FrameUtils.Vec2ArryTsk(weightId).doAll(dinfo._adaptedFrame.vec(weightId)).res; } yt = new Archetypes(transpose(yinit), true, tinfo._catOffsets, numLevels, tempWeights); ytnew = yt; setTrain(rebalance(xwF, false, _result + ".temporary.xwF")); } // Compute initial objective function _job.update(1, "Computing initial objective function"); // One unit of work // Assume regularization on initial X is finite, else objective can be NaN if \gamma_x = 0 boolean regX = _parms._regularization_x != GlrmRegularizer.None && _parms._gamma_x != 0; if (_wideDataset) { objtskw = new ObjCalcW(_parms, yt, colCount, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, regX, xwF, 0); objtskw.doAll(frTA); model._output._objective = objtskw._loss + _parms._gamma_x * objtskw._xold_reg + _parms._gamma_y * yreg; } else { objtsk = new ObjCalc(_parms, yt, _ncolA, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, weightId, regX); objtsk.doAll(fr); model._output._objective = objtsk._loss + _parms._gamma_x * objtsk._xold_reg + _parms._gamma_y * yreg; } model._output._archetypes_raw = yt; // at some point, need to switch from yt to y after all updates. model._output._iterations = 0; model._output._updates = 0; model._output._avg_change_obj = 2 * TOLERANCE; // Allow at least 1 iteration model._output._step_size = 0; // set to zero model.update(_job); // Update model in K/V store double step = _parms._init_step_size; // Initial step size int steps_in_row = 0; // Keep track of number of steps taken that decrease objective while (!isDone(model, steps_in_row, step)) { // One unit of work _job.update(1, "Iteration " + String.valueOf(model._output._iterations+1) + " of alternating minimization"); UpdateX xtsk = null; UpdateYeX yextsk = null; double alpha = step/_ncolA; // TODO: Should step be divided by number of original or expanded (with 0/1 categorical) cols? // 1) Update X matrix given fixed Y. Wide dataset, update Y. if (_wideDataset) { yextsk = new UpdateYeX(_parms, yt, alpha, _ncolA, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, xwF); double[][] yttmp = yextsk.doAll(frTA)._ytnew; ytnew = new Archetypes(yttmp, true, tinfo._catOffsets, numLevels, ytnew._weights); } else { // find out how much time it takes to update x, for wide dataset, it is updating Y xtsk = new UpdateX(_parms, yt, alpha, _ncolA, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, weightId); xtsk.doAll(dinfo._adaptedFrame); } model._output._updates++; // 2) Update Y matrix given fixed X, for wide dataset, update X then. if (model._output._updates < _parms._max_updates) { if (_wideDataset) { UpdateXeY xeytsk = new UpdateXeY(_parms, ytnew, alpha, colCount, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, frTA); xeytsk.doAll(xwF); yreg = xeytsk._yreg; } else { // If max_updates is odd, we will terminate after the X update, for wide dataset, it updates Y UpdateY ytsk = new UpdateY(_parms, yt, alpha, _ncolA, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, weightId); double[][] yttmp = ytsk.doAll(dinfo._adaptedFrame)._ytnew; ytnew = new Archetypes(yttmp, true, tinfo._catOffsets, numLevels); yreg = ytsk._yreg; } model._output._updates++; } double obj_new = 0; if (_wideDataset) { objtskw = new ObjCalcW(_parms, ytnew, colCount, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, regX, xwF, _parms._k); objtskw.doAll(frTA); obj_new = objtskw._loss + _parms._gamma_x * yextsk._xreg + _parms._gamma_y * yreg; } else { // 3) Compute average change in objective function objtsk = new ObjCalc(_parms, ytnew, _ncolA, _ncolX, tinfo._cats, model._output._normSub, model._output._normMul, model._output._lossFunc, weightId); objtsk.doAll(dinfo._adaptedFrame); obj_new = objtsk._loss + _parms._gamma_x * xtsk._xreg + _parms._gamma_y * yreg; } model._output._avg_change_obj = (model._output._objective - obj_new) / nobs; model._output._iterations++; // step = 1.0 / model._output._iterations; // Step size \alpha_k = 1/iters if (model._output._avg_change_obj > 0) { // Objective decreased this iteration yt = ytnew; model._output._archetypes_raw = ytnew; // Need full archetypes object for scoring model._output._objective = obj_new; step *= 1.05; steps_in_row = Math.max(1, steps_in_row+1); if (_wideDataset) { // update X matrix right now to avoid potential multi-thread collision. new updateXVecs(0, _ncolX).doAll(xwF); } else { new updateXVecs(_ncolA, _ncolX).doAll(dinfo._adaptedFrame); } } else { // If objective increased, re-run with smaller step size step /= Math.max(1.5, -steps_in_row); steps_in_row = Math.min(0, steps_in_row-1); if (_parms._verbose) { Log.info("Iteration " + model._output._iterations + ": Objective increased to " + obj_new + "; reducing step size to " + step); _job.update(0,"Iteration " + model._output._iterations + ": Objective increased to " + obj_new + "; reducing step size to " + step); } } // Add to scoring history model._output._training_time_ms.add(System.currentTimeMillis()); model._output._history_step_size.add(step); model._output._history_objective.add(model._output._objective); model.update(_job); // Update model in K/V store } model._output._representation_name = StringUtils.isNullOrEmpty(_parms._representation_name) ? "GLRMLoading_" + Key.rand() : _parms._representation_name; model._output._representation_key = Key.make(model._output._representation_name); model._output._x_factor_key = model._output._representation_key; // point to this key for default String[] xnames = new String[_ncolX]; for (int i=0; i<_ncolX; i++) { xnames[i] = "Arch"+String.valueOf(i+1); } Frame x = null; if (_wideDataset) { // extract X into archetype, extract Y into X frames x = ArrayUtils.frame(model._output._representation_key, xnames, yt._transposed ? yt._archetypes : transpose(yt._archetypes)); yt._archetypes = new FrameUtils.Vecs2ArryTsk(_ncolY, _parms._k).doAll(xwF).res; model._output._archetypes_raw = new Archetypes(yt._archetypes, yt._transposed, tinfo._catOffsets, numLevels); } else { // for normal dataset // 4) Save solution to model output // Save X frame for user reference later Vec[] xvecs = new Vec[_ncolX]; for (int i = 0; i < _ncolX; i++) { xvecs[i] = fr.vec(idx_xold(i, _ncolA)); } x = new Frame(model._output._representation_key, xnames, xvecs); } xinfo = new DataInfo(x, null, 0, true, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, false, false, false, /* weights */ false, /* offset */ false, /* fold */ false); DKV.put(x); DKV.put(xinfo); // add last step_size used model._output._step_size = step; // Add to scoring history model._output._history_step_size.add(step); // Transpose Y' to get original Y model._output._archetypes = yt.buildTable(model._output._names_expanded, false); recoverSVD(model, xinfo, tempinfo); // getting variance information here // Impute and compute error metrics on training/validation frame model._output._training_metrics = model.scoreMetricsOnly(_parms.train()); model._output._validation_metrics = model.scoreMetricsOnly(_parms.valid()); model._output._model_summary = createModelSummaryTable(model._output); model._output._scoring_history = createScoringHistoryTable(model._output); //no need to call this per iteration // model.update(_job); } finally { if (model._output._iterations ==0) { warn("_max_runtime_secs", "model may not be properly built due to timeout. Set " + "max_runtime_secs to 0 or increase its value."); } if (model != null) { Frame loadingFrm = DKV.getGet(model._output._representation_key); if (loadingFrm != null) Scope.untrack(loadingFrm); model.unlock(_job); } if (tinfo != null) tinfo.remove(); if (dinfo != null) dinfo.remove(); if (xinfo != null) xinfo.remove(); if (tempinfo != null) tempinfo.remove(); // copy what is in XeY into archetypes if (xwF != null) { xwF.delete(); } if (xwF2 != null) xwF2.delete(); // if (x != null && !_parms._keep_loading) x.delete(); // Clean up unused copy of X matrix if (xVecs!=null) xVecs.remove(); if (frTA != null) frTA.delete(); if ((fr != null) && (!_wideDataset)) { for (int i = 0; i < _ncolX; i++) fr.vec(idx_xnew(i, _ncolA, _ncolX)).remove(); } } } private Frame generateFrameOfZeros(int rowCount, int colCount) { Vec tempVec = Vec.makeZero(rowCount); return(new Frame(tempVec.makeZeros(colCount))); // return a frame of zeros with size rowCount by colCount } /* This funciton will 1. change categorical binary columns using binary loss function into numerical columns. This involves performing the following variables: _nums, _cats, _catOffsets, _catMissing, _catNAFill, _permutation, _normMul, _normSub, _normMeans, _numOffsets If no logistic loss is specified, no action will be performed. 2. for numeric columns using logistic loss, it will prevent it from being transformed to say zero mean and unit variance columns. */ private void correctForBinaryLoss(DataInfo tinfo) { // set mean = 0 and mul = 1.0 for binary numeric columns using binary loss functions for (int index = 0; index < tinfo._nums; index++) { if (_lossFunc[tinfo._permutation[index+tinfo._cats]].isForBinary()) { // binary loss used on numeric columns if (tinfo._normMul != null) tinfo._normMul[index] = 1; if (tinfo._normSub != null) tinfo._normSub[index] = 0; } } // change binary categorical columns using binary loss functions to binary numerical columns if (!(_binaryColumnIndices == null) && (_binaryColumnIndices.size()>0)) { // change the number of categorical and numerical column counts. int binaryLossCols = _binaryColumnIndices.size(); // number of columns to change to numerics int numCatColumns = tinfo._cats; // store original categorical column number int numNumColumns = tinfo._nums; // store original numerical column number tinfo._cats -= binaryLossCols; // decrease the number of categorical columns tinfo._nums += binaryLossCols; // increase the number of numerical columns int[] catOffsetsTemp = new int[tinfo._cats+1]; // offset column indices for 1-hot expanded values (includes enum-enum interaction) boolean[] catMissingTemp = new boolean[tinfo._cats]; // bucket for missing levels int[] catNAFillTemp = new int[tinfo._cats]; // majority class of each categorical col (or last bucket if _catMissing[i] is true) int[] permutationTemp = new int[tinfo._permutation.length]; // permutation matrix mapping input col indices to adaptedFrame int[] numOffsetsTemp = new int[tinfo._nums]; int[] cardinalities = _train.cardinality(); int[] currentCardinality = new int[tinfo._cats]; double[] normMulTemp = new double[tinfo._nums]; double[] normSubTemp = new double[tinfo._nums]; double[] numMeansTemp = new double[tinfo._nums]; int newColIndex = 0; for (int colIndex = 0; colIndex < numCatColumns; colIndex++) { // go through all categoricals if (!(_binaryColumnIndices.contains(tinfo._permutation[colIndex]))) { permutationTemp[newColIndex] = tinfo._permutation[colIndex]; catMissingTemp[newColIndex] = tinfo._catMissing[colIndex]; catNAFillTemp[newColIndex] = tinfo.catNAFill(colIndex); currentCardinality[newColIndex] = cardinalities[colIndex]; catOffsetsTemp[newColIndex+1] = catOffsetsTemp[newColIndex]+currentCardinality[newColIndex]; newColIndex++; } } numOffsetsTemp[0] = catOffsetsTemp[newColIndex]; for (int colIndex = 0; colIndex < binaryLossCols; colIndex++) { // set infos for new numerical binary columns permutationTemp[colIndex + newColIndex] = _binaryColumnIndices.get(colIndex); normMulTemp[colIndex] = 1.0; normSubTemp[colIndex] = 0.0; numMeansTemp[colIndex] = 0.0; if (colIndex > 0) numOffsetsTemp[colIndex] = numOffsetsTemp[colIndex-1]+1; } // copy over original numerical columns for (int colIndex = 0; colIndex < numNumColumns; colIndex++) { int newColumnIndex = colIndex + binaryLossCols; if (tinfo._normSub != null) { normMulTemp[newColumnIndex] = tinfo._normMul[colIndex]; } if (tinfo._normSub != null) { normSubTemp[newColumnIndex] = tinfo._normSub[colIndex]; } if (tinfo._numMeans != null) { numMeansTemp[newColumnIndex] = tinfo._numMeans[colIndex]; } numOffsetsTemp[newColumnIndex] = numOffsetsTemp[newColumnIndex-1]+1; int numColIndex = newColumnIndex + tinfo._cats; permutationTemp[numColIndex] = tinfo._permutation[numColIndex]; } // copy the changed arrays back to tinfo information tinfo._catOffsets = Arrays.copyOf(catOffsetsTemp, catOffsetsTemp.length); tinfo._catMissing = Arrays.copyOf(catMissingTemp, tinfo._cats); tinfo.setCatNAFill(Arrays.copyOf(catNAFillTemp, tinfo._cats)); tinfo._permutation = Arrays.copyOf(permutationTemp, tinfo._permutation.length); tinfo._numOffsets = Arrays.copyOf(numOffsetsTemp, tinfo._nums); if (tinfo._normMul != null) { tinfo._normMul = Arrays.copyOf(normMulTemp, tinfo._nums); } if (tinfo._normSub != null) { tinfo._normSub = Arrays.copyOf(normSubTemp, tinfo._nums); } if (tinfo._numMeans != null) { tinfo._numMeans = Arrays.copyOf(numMeansTemp, tinfo._nums); tinfo._numNAFill = tinfo._numMeans; // NAs will be imputed with means } _ncolY = _ncolY-binaryLossCols; // adjust for binary columns with binary loss changed to numerical columns } } private TwoDimTable createModelSummaryTable(GLRMModel.GLRMOutput output) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); // TODO: This causes overflow in R if too large // colHeaders.add("Number of Observed Entries"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Number of Iterations"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Final Step Size"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Final Objective Value"); colTypes.add("double"); colFormat.add("%.5f"); TwoDimTable table = new TwoDimTable( "Model Summary", null, new String[1], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); int row = 0; int col = 0; // table.set(row, col++, output._nobs); table.set(row, col++, output._iterations); table.set(row, col++, output._history_step_size.get(output._history_step_size.size() - 1)); table.set(row, col, output._objective); return table; } private TwoDimTable createScoringHistoryTable(GLRMModel.GLRMOutput output) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("Iterations"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Step Size"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Objective"); colTypes.add("double"); colFormat.add("%.5f"); int rows = output._training_time_ms.size(); TwoDimTable table = new TwoDimTable( "Scoring History", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); for (int row = 0; row<rows; row++) { int col = 0; assert(row < table.getRowDim()); assert(col < table.getColDim()); DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss"); table.set(row, col++, fmt.print(output._training_time_ms.get(row))); table.set(row, col++, PrettyPrint.msecs(output._training_time_ms.get(row) - _job.start_time(), true)); table.set(row, col++, row); table.set(row, col++, output._history_step_size.get(row)); table.set(row, col , output._history_objective.get(row)); } return table; } } @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor") protected static final class Archetypes extends Iced<Archetypes> { double[][] _archetypes; // Y has nrows = k (lower dim), ncols = n (features) boolean _transposed; // Is _archetypes = Y'? Used during model building for convenience. final int[] _catOffsets; final int[] _numLevels; // numLevels[i] = -1 if column i is not categorical double[] _weights; // store weights per row for wide datasets; Archetypes(double[][] y, boolean transposed, int[] catOffsets, int[] numLevels) { _archetypes = y; _transposed = transposed; _catOffsets = catOffsets; _numLevels = numLevels; // TODO: Check sum(cardinality[cardinality > 0]) + nnums == nfeatures() _weights = null; } Archetypes(double[][] y, boolean transposed, int[] catOffsets, int[] numLevels, double[] weights) { _archetypes = y; _transposed = transposed; _catOffsets = catOffsets; _numLevels = numLevels; // TODO: Check sum(cardinality[cardinality > 0]) + nnums == nfeatures() _weights = Arrays.copyOf(weights, weights.length); } public int rank() { return _transposed ? _archetypes[0].length : _archetypes.length; } public int nfeatures() { return _transposed ? _archetypes.length : _archetypes[0].length; } // If transpose = true, we want to return Y' public double[][] getY(boolean transpose) { return (transpose ^ _transposed) ? transpose(_archetypes) : _archetypes; } public TwoDimTable buildTable(String[] features, boolean transpose) { // Must pass in categorical column expanded feature names int rank = rank(); int nfeat = nfeatures(); assert features != null && features.length == nfeatures(); double[][] yraw = getY(transpose); if (transpose) { // rows = features (n), columns = archetypes (k) String[] colTypes = new String[rank]; String[] colFormats = new String[rank]; String[] colHeaders = new String[rank]; Arrays.fill(colTypes, "double"); Arrays.fill(colFormats, "%5f"); for (int i = 0; i < colHeaders.length; i++) colHeaders[i] = "Arch" + String.valueOf(i + 1); return new TwoDimTable("Archetypes", null, features, colHeaders, colTypes, colFormats, "", new String[yraw.length][], yraw); } else { // rows = archetypes (k), columns = features (n) String[] rowNames = new String[rank]; String[] colTypes = new String[nfeat]; String[] colFormats = new String[nfeat]; Arrays.fill(colTypes, "double"); Arrays.fill(colFormats, "%5f"); for (int i = 0; i < rowNames.length; i++) rowNames[i] = "Arch" + String.valueOf(i + 1); return new TwoDimTable("Archetypes", null, rowNames, features, colTypes, colFormats, "", new String[yraw.length][], yraw); } } // For j = 0 to number of numeric columns - 1 public int getNumCidx(int j) { return _catOffsets[_catOffsets.length-1]+j; } // For j = 0 to number of categorical columns - 1, and level = 0 to number of levels in categorical column - 1 public int getCatCidx(int j, int level) { return GlrmMojoModel.getCatCidx(j, level, _numLevels, _catOffsets); } protected final double getNum(int j, int k) { int cidx = GlrmMojoModel.getNumCidx(j, _catOffsets); return _transposed ? _archetypes[cidx][k] : _archetypes[k][cidx]; } // Inner product x * y_j where y_j is numeric column j of Y protected final double lmulNumCol(double[] x, int j) { return GlrmMojoModel.lmulNumCol(x, j, _transposed, _archetypes, _catOffsets); } protected final double getCat(int j, int level, int k) { int cidx = getCatCidx(j, level); return _transposed ? _archetypes[cidx][k] : _archetypes[k][cidx]; } // Extract Y_j the k by d_j block of Y corresponding to categorical column j // Note: d_j = number of levels in categorical column j protected final double[][] getCatBlock(int j) { int catColJLevel = _numLevels[j]; assert catColJLevel != 0 : "Number of levels in categorical column cannot be zero"; double[][] block = new double[rank()][catColJLevel]; if (_transposed) { for (int level = 0; level < catColJLevel; level++) { int cidx = getCatCidx(j,level); for (int k = 0; k < rank(); k++) block[k][level] = _archetypes[cidx][k]; } } else { for (int level = 0; level < catColJLevel; level++) { int cidx = getCatCidx(j,level); for (int k = 0; k < rank(); k++) block[k][level] = _archetypes[k][cidx]; } } return block; } // Vector-matrix product x * Y_j where Y_j is block of Y corresponding to categorical column j protected final double[] lmulCatBlock(double[] x, int j) { return GlrmMojoModel.lmulCatBlock(x, j, _numLevels, _transposed, _archetypes, _catOffsets); } } // In chunk, first _ncolA cols are A, next _ncolX cols are X protected static int idx_xold(int c, int ncolA) { return ncolA+c; } protected static int idx_xnew(int c, int ncolA, int ncolX) { return ncolA+ncolX+c; } // Initialize X to standard Gaussian random matrix projected into regularizer subspace private static class InitialXProj extends MRTask<InitialXProj> { GLRMParameters _parms; final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) InitialXProj(GLRMParameters parms, int ncolA, int ncolX) { _parms = parms; _ncolA = ncolA; _ncolX = ncolX; } @Override public void map(Chunk[] chks) { Random rand = RandomUtils.getRNG(_parms._seed); // may have to set back to 0 for compatibility for (int row = 0; row < chks[0]._len; row++) { rand.setSeed(_parms._seed + chks[0].start() + row); // global row ID determines the seed double[] xrow = ArrayUtils.gaussianVector(_ncolX, rand); xrow = _parms._regularization_x.project(xrow, rand); for (int c = 0; c < xrow.length; c++) { chks[_ncolA+c].set(row, xrow[c]); chks[_ncolA+_ncolX+c].set(row, xrow[c]); } } } } // Initialize X = UD, where U is m x k and D is a diagonal k x k matrix private static class InitialXSVD extends MRTask<InitialXSVD> { final double[] _diag; // Diagonal of D final int _ncolU; // Number of cols in U (k) final int _offX; // Column offset to X matrix final int _offW; // Column offset to W matrix InitialXSVD(double[] diag, int ncolU, int ncolA, int ncolX) { assert diag != null && diag.length == ncolU; _diag = diag; _ncolU = ncolU; _offX = ncolU + ncolA; _offW = _offX + ncolX; } @Override public void map(Chunk[] chks) { for (int row = 0; row < chks[0]._len; row++) { for (int c = 0; c < _ncolU; c++) { double ud = chks[c].atd(row) * _diag[c]; chks[_offX+c].set(row, ud); chks[_offW+c].set(row, ud); } } } } // Initialize X to matrix of indicator columns for cluster assignments, e.g. k = 4, cluster = 3 -> [0, 0, 1, 0] private static class InitialXKMeans extends MRTask<InitialXKMeans> { GLRMParameters _parms; KMeansModel _model; final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) InitialXKMeans(GLRMParameters parms, KMeansModel model, int ncolA, int ncolX) { _parms = parms; _model = model; _ncolA = ncolA; _ncolX = ncolX; } @Override public void map(Chunk[] chks) { double[] tmp = new double[_ncolA]; Random rand = RandomUtils.getRNG(0); for (int row = 0; row < chks[0]._len; row++) { double[] p = _model.score_ratio(chks, row, tmp); rand.setSeed(_parms._seed + chks[0].start() + row); //global row ID determines the seed // TODO: Should we restrict indicator cols to regularizer subspace? p = _parms._regularization_x.project(p, rand); for (int c = 0; c < p.length; c++) { chks[_ncolA+c].set(row, p[c]); chks[_ncolA+_ncolX+c].set(row, p[c]); } } } } public static class updateXVecs extends MRTask<updateXVecs> { int _startCol; int _numCols; int _endCols; public updateXVecs(int startCol, int numCols) { assert startCol >= 0; assert numCols > 0; _startCol = startCol; _numCols = numCols; _endCols = startCol+numCols; } public void map(Chunk[] chks) { for (int colIndex = _startCol; colIndex < _endCols; colIndex++) { for (int rowIndex = 0; rowIndex < chks[0]._len; rowIndex++) { xFrameVec(chks, colIndex, 0).set(rowIndex, xFrameVec(chks, colIndex, _numCols).atd(rowIndex)); } } } } //-------------------------------------------------------------------------------------------------------------------- // Update X step //-------------------------------------------------------------------------------------------------------------------- private static class UpdateX extends MRTask<UpdateX> { // Input GLRMParameters _parms; GlrmLoss[] _lossFunc; final double _alpha; // Step size divided by num cols in A final Archetypes _yt; // _yt = Y' (transpose of Y) final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) final int _ncats; // Number of categorical cols in training frame final double[] _normSub; // For standardizing training data final double[] _normMul; final int _weightId; // Output double _xreg; // Regularization evaluated on new X UpdateX(GLRMParameters parms, Archetypes yt, double alpha, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul, GlrmLoss[] lossFunc, int weightId) { assert yt != null && yt.rank() == ncolX; _parms = parms; _yt = yt; _lossFunc = lossFunc; _alpha = alpha; _ncolA = ncolA; _ncolX = ncolX; // Info on A (cols 1 to ncolA of frame) assert ncats <= ncolA; _ncats = ncats; _weightId = weightId; _normSub = normSub; _normMul = normMul; } private Chunk chk_xold(Chunk[] chks, int c) { return chks[_ncolA + c]; } private Chunk chk_xnew(Chunk[] chks, int c) { return chks[_ncolA + _ncolX + c]; } @SuppressWarnings("ConstantConditions") // The method is too complex for IntelliJ @Override public void map(Chunk[] cs) { assert (_ncolA + 2*_ncolX) == cs.length; double[] a = new double[_ncolA]; double[] tgrad = new double[_ncolX]; // new gradient calculation with reduced memory allocation double[] u = new double[_ncolX]; Chunk chkweight = _weightId >= 0 ? cs[_weightId] : new C0DChunk(1, cs[0]._len); Random rand = RandomUtils.getRNG(0); _xreg = 0; double[] xy = null; double[] prod = null; if (_yt._numLevels[0] > 0) { xy = new double[_yt._numLevels[0]]; // maximum categorical level column is always the first one prod = new double[_yt._numLevels[0]]; } for (int row = 0; row < cs[0]._len; row++) { rand.setSeed(_parms._seed + cs[0].start() + row); //global row ID determines the seed Arrays.fill(tgrad, 0.0); // temporary gradient for comparison // Additional user-specified weight on loss for this row double cweight = chkweight.atd(row); assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; // Compute gradient of objective at row // Categorical columns for (int j = 0; j < _ncats; j++) { a[j] = cs[j].atd(row); if (Double.isNaN(a[j])) continue; // Skip missing observations in row int catColJLevel = _yt._numLevels[j]; Arrays.fill(xy, 0, catColJLevel, 0); // reset xy before accumulate sum // Calculate x_i * Y_j where Y_j is sub-matrix corresponding to categorical col j for (int level = 0; level < catColJLevel ; level++) { for (int k = 0; k < _ncolX; k++) { xy[level] += chk_xold(cs, k).atd(row) * _yt.getCat(j, level, k); } } // Gradient wrt x_i is matrix product \grad L_{i,j}(x_i * Y_j, A_{i,j}) * Y_j' double[] weight = _lossFunc[j].mlgrad(xy, (int) a[j], prod, catColJLevel ); if (_yt._transposed) { for (int c = 0; c < catColJLevel ; c++) { int cidx = _yt.getCatCidx(j, c); double weights = cweight * weight[c]; double[] yArchetypes = _yt._archetypes[cidx]; for (int k = 0; k < _ncolX; k++) tgrad[k] += weights * yArchetypes[k]; } } else { for (int c = 0; c < catColJLevel; c++) { int cidx = _yt.getCatCidx(j, c); double weights = cweight * weight[c]; for (int k = 0; k < _ncolX; k++) tgrad[k] += weights * _yt._archetypes[k][cidx]; } } } // Numeric columns for (int j = _ncats; j < _ncolA; j++) { int js = j - _ncats; a[j] = cs[j].atd(row); if (Double.isNaN(a[j])) continue; // Skip missing observations in row // Inner product x_i * y_j double xy1 = 0; for (int k = 0; k < _ncolX; k++) xy1 += chk_xold(cs, k).atd(row) * _yt.getNum(js, k); // Sum over y_j weighted by gradient of loss \grad L_{i,j}(x_i * y_j, A_{i,j}) double weight = cweight * _lossFunc[j].lgrad(xy1, (a[j] - _normSub[js]) * _normMul[js]); for (int k = 0; k < _ncolX; k++) tgrad[k] += weight * _yt.getNum(js, k); } // Update row x_i of working copy with new values for (int k = 0; k < _ncolX; k++) { double xold = chk_xold(cs, k).atd(row); // Old value of x_i u[k] = xold - _alpha * tgrad[k]; } double[] xnew = _parms._regularization_x.rproxgrad(u, _alpha*_parms._gamma_x, rand); _xreg += _parms._regularization_x.regularize(xnew); for (int k = 0; k < _ncolX; k++) chk_xnew(cs, k).set(row,xnew[k]); } } @Override public void reduce(UpdateX other) { _xreg += other._xreg; } } //-------------------------------------------------------------------------------------------------------------------- // Update Y equivalent to X step for wide datasets. Now the Y is stored as a 2D double array instead. //-------------------------------------------------------------------------------------------------------------------- private static class UpdateYeX extends MRTask<UpdateYeX> { // Input GLRMParameters _parms; GlrmLoss[] _lossFunc; final double _alpha; // Step size divided by num cols in A final Archetypes _ytold; // Old Y' matrix final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) final int _ncats; // Number of categorical cols in training frame final double[] _normSub; // For standardizing training data final double[] _normMul; final Frame _xVecs; // store XeY old and new // Output double[][] _ytnew; // New Y matrix double _xreg; // Regularization evaluated on new Y which is essentially x UpdateYeX(GLRMParameters parms, Archetypes yt, double alpha, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul, GlrmLoss[] lossFunc, Frame xVecs) { assert yt != null && yt.rank() == ncolX; _parms = parms; _lossFunc = lossFunc; _alpha = alpha; // should be the same as normal dataset _ncolA = ncolA; _ncolX = ncolX; _ytold = yt; _xreg = 0; // Info on A (cols 1 to ncolA of frame) assert ncats <= ncolA; _ncats = ncats; _normSub = normSub; _normMul = normMul; _xVecs = xVecs; } /* instead of doing column wise, need to do row wise in order not to swap chunks containing Xs. */ @Override public void map(Chunk[] cs) { double[] chkweight = _ytold._weights; // weight per sample int numTArow = (int) cs[0]._len; // number of rows in Chunk of T(A) int tArowStart = (int) cs[0].start(); // first row of chunk T(A) int tArowEnd = numTArow + numTArow - 1; // last row index of chunk T(A) Chunk[] xChunks = new Chunk[_parms._k*2]; // to store chunk of X _ytnew = new double[_ytold.nfeatures()][_ncolX]; // transpose of YeX double[] xy = null; // store expanded enum columns content double[] grad = null; if (_ytold._numLevels[tArowStart] > 0) { // minor optimization here xy = new double[_ytold._numLevels[tArowStart]]; grad = new double[_ytold._numLevels[tArowStart]]; } ArrayList<Integer> xChunkIndices = findXChunkIndices(_xVecs, tArowStart, tArowEnd, _ytold); // grab x chunk ind int numColIndexOffset = _ytold._catOffsets[_ncats] - _ncats; // index into xframe numerical rows getXChunk(_xVecs, xChunkIndices.remove(0), xChunks); // get the first xFrame chunk int xChunkRowStart = (int) xChunks[0].start(); // first row index of xFrame int xChunkSize = (int) xChunks[0]._len; // number of rows in xFrame int xRow = 0; // store true index of X chunk int tARow = 0; // store true index of T(A) chunk double cweight; // weight per data row. for (int row = 0; row < cs[0]._len; row++) { // rows of T(A), columns of A tARow = row + tArowStart; // true row index into T(A) chunks if (tARow < _ncats) { // dealing with a enum int catColJLevel = _ytold._numLevels[tARow]; // Categorical row for (int j = 0; j < cs.length; j++) { // j is column of T(A) (rows of A) cweight = chkweight[j]; assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; double a = cs[j].atd(row); // access an element of T(A) if (Double.isNaN(a)) continue; // Skip missing observations in column // Calculate x_i * Y_j where Y_j is sub-matrix corresponding to categorical col j Arrays.fill(xy, 0.0); for (int level = 0; level < catColJLevel; level++) { // finish dealing with one enum xRow = level+_ytold._catOffsets[tARow]-xChunkRowStart; // equivalent row index into X chunk if (xRow >= xChunkSize) { // need to load in the next X chunk if (xChunkIndices.size() < 1) { Log.err("GLRM train updateYeX", "Chunks mismatch between A transpose and X frame."); } else { getXChunk(_xVecs, xChunkIndices.remove(0), xChunks); xChunkRowStart = (int) xChunks[0].start(); // first row index of xFrame xChunkSize = (int) xChunks[0]._len; // number of rows in xFrame xRow = level + _ytold._catOffsets[tARow] - xChunkRowStart; } } for (int k = 0; k < _ncolX; k++) { xy[level] += xFrameVec(xChunks, k, 0).atd(xRow) * yArcheTypeVal(_ytold, j, k); } } // Gradient for level p is x_i weighted by \grad_p L_{i,j}(x_i * Y_j, A_{i,j}) double[] weight = _lossFunc[tARow].mlgrad(xy, (int) a, grad, catColJLevel); for (int level = 0; level < catColJLevel; level++) { xRow = level+_ytold._catOffsets[tARow]-xChunkRowStart; // equivalent row into X chunk for (int k = 0; k < _ncolX; k++) _ytnew[j][k] += cweight * weight[level] * xFrameVec(xChunks, k, 0).atd(xRow); } } } else { // dealing with numerical rows // Numeric columns xRow = tARow - xChunkRowStart + numColIndexOffset; //index into x frame which expanded categoricals if (xRow >= xChunkSize) { // load in new chunk of xFrame if (xChunkIndices.size() < 1) { Log.err("GLRM train", "Chunks mismatch between A transpose and X frame."); } else { getXChunk(_xVecs, xChunkIndices.remove(0), xChunks); // get a xVec chunk xChunkRowStart = (int) xChunks[0].start(); // first row index of xFrame xChunkSize = (int) xChunks[0]._len; // number of rows in xFrame xRow = tARow - xChunkRowStart + numColIndexOffset; } } int numRow = tARow - _ncats; for (int j = 0; j < cs.length; j++) { // columns of T(A) chunks cweight = chkweight[j]; assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; // Compute gradient of objective at column double a = cs[j].atd(row); // T(A) if (Double.isNaN(a)) continue; // Skip missing observations in column // Inner product x_i * y_j double txy = 0; for (int k = 0; k < _ncolX; k++) txy += xFrameVec(xChunks, k, 0).atd(xRow) * yArcheTypeVal(_ytold, j, k); // Sum over x_i weighted by gradient of loss \grad L_{i,j}(x_i * y_j, A_{i,j}) double weight = cweight * _lossFunc[tARow].lgrad(txy, (a - _normSub[numRow]) * _normMul[numRow]); for (int k = 0; k < _ncolX; k++) _ytnew[j][k] += weight * xFrameVec(xChunks, k, 0).atd(xRow); } } } } @Override public void reduce(UpdateYeX other) { ArrayUtils.add(_ytnew, other._ytnew); } @Override protected void postGlobal() { assert _ytnew.length == _ytold.nfeatures() && _ytnew[0].length == _ytold.rank(); Random rand = RandomUtils.getRNG(_parms._seed); // Compute new y_j values using proximal gradient for (int j = 0; j < _ytnew.length; j++) { double[] u = new double[_ytnew[0].length]; // Do not touch this memory allocation. Needed for proper function. for (int k = 0; k < _ytnew[0].length; k++) u[k] = _ytold._archetypes[j][k] - _alpha * _ytnew[j][k]; _ytnew[j] = _parms._regularization_x.rproxgrad(u, _alpha*_parms._gamma_x, rand); _xreg += _parms._regularization_x.regularize(_ytnew[j]); } } } public static double yArcheTypeVal(Archetypes yt, int j, int k) { return yt._transposed?yt._archetypes[j][k]:yt._archetypes[k][j]; } //-------------------------------------------------------------------------------------------------------------------- // Update X equivalent to Y step for wide datasets. Now the X is stored in H2OFrame instead of Y in 2D double array //-------------------------------------------------------------------------------------------------------------------- private static class UpdateXeY extends MRTask<UpdateXeY> { // Input GLRMParameters _parms; GlrmLoss[] _lossFunc; final double _alpha; // Step size divided by num cols in A final Archetypes _yt; // _yt = Y' (transpose of Y) final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) final int _ncats; // Number of categorical cols in training frame final double[] _normSub; // For standardizing training data final double[] _normMul; final Frame _tAVecs; // stores T(A) in this case final int[] _finalCatIndex; // Output double _yreg; // Regularization evaluated on new X UpdateXeY(GLRMParameters parms, Archetypes yt, double alpha, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul, GlrmLoss[] lossFunc, Frame tAVecs) { assert yt != null && yt.rank() == ncolX; _parms = parms; _yt = yt; _lossFunc = lossFunc; _alpha = alpha; _ncolA = ncolA; _ncolX = ncolX; _tAVecs = tAVecs; // Info on A (cols 1 to ncolA of frame) assert ncats <= ncolA; _ncats = ncats; _normSub = normSub; _normMul = normMul; _finalCatIndex = new int[_ncats]; for (int index=0; index < _ncats; index++) { _finalCatIndex[index] = _yt._catOffsets[index+1]-1; } } @SuppressWarnings("ConstantConditions") // The method is too complex for IntelliJ @Override public void map(Chunk[] cs) { // cs contains x vectors old and new double[] chkweight = _yt._weights; // weight per sample int numXRow = (int) cs[0]._len; // number of rows in Chunk of X int xRowStart = (int) cs[0].start(); // first row of chunk X int xRowEnd = xRowStart + numXRow - 1; // last row index of chunk X Chunk[] tAChunks = new Chunk[_ncolA]; // to store chunk of T(A) Chunk[] xChunksN = new Chunk[cs.length]; // store next chunk of X just in case categoricals are cut off. int numCatColumns = _yt._catOffsets[_ncats]; int xTARowStart = findOriginalColIndex(xRowStart, _ncats, numCatColumns, _yt); int xTARowEnd = findOriginalColIndex(xRowEnd, _ncats, numCatColumns, _yt); double[][] xMat = null; // store a chunk of x double[] xy = null; double[] prod = null; double[][] tgradEnum = null; double[][] uEnum = null; // grab the corresponding T(A) chunks ArrayList<Integer> tAChunkIndices = findtAChunkIndices(_tAVecs, xRowStart, xRowEnd, _yt); // grab T(A) chunk ind getXChunk(_tAVecs, tAChunkIndices.remove(0), tAChunks); // get the first tAFrame chunk int tAColNum = tAChunks.length; int tAChunkRowStart = (int) tAChunks[0].start(); // first row index of T(A) Frame int tAChunkSize = (int) tAChunks[0]._len; // number of rows in T(A) Frame int xRow = 0; // store row index of X chunk if (_yt._numLevels[tAChunkRowStart] > 0) { tgradEnum = new double[_yt._numLevels[tAChunkRowStart]][_ncolX]; uEnum = new double[_yt._numLevels[tAChunkRowStart]][_ncolX]; xMat = new double[_yt._numLevels[tAChunkRowStart]][_ncolX]; xy = new double[_yt._numLevels[tAChunkRowStart]]; // maximum categorical level column is always the first one prod = new double[_yt._numLevels[tAChunkRowStart]]; } double a = 0; // store an element of T(A) double[] tgrad = new double[_ncolX]; // store a row of tgrad double[] u = new double[_ncolX]; // store a row Random rand = RandomUtils.getRNG(0); _yreg = 0; int row = 0; // relative row index into current t(A) chunk int rowX = 0; // relative row index into current X chunk int rowXTrue = 0; // true row index into X chunk from the start of an expanded enum column int currentXRowstart = 0; // denote the enum columns actually contained in current X chunk int currentXRowEnd = 0; for (int tArow = xTARowStart; tArow <= xTARowEnd; tArow++) { // each row of T(A) chunk under consideration row = tArow-tAChunkRowStart; // translate to relative row index of a T(A) chunk, change automatically if (row >= tAChunkSize) { // hitting the end of T(A) chunk, reload it getXChunk(_tAVecs, tAChunkIndices.remove(0), tAChunks); tAChunkRowStart = (int) tAChunks[0].start(); // first row index of T(A) Frame tAChunkSize = (int) tAChunks[0]._len; // number of rows in T(A) Frame row = tArow-tAChunkRowStart; // translate to relative row index of a T(A) chunk, change automatically } rand.setSeed(_parms._seed + cs[0].start() + row); //global row ID determines the seed Arrays.fill(tgrad, 0.0); // temporary gradient for comparison if (tArow < _ncats) { // dealing with a row of T(A) that is enum int catColJLevel = _yt._numLevels[tArow]; // initialize tgradEnum to all zeros for (int level=0; level<tgradEnum.length; level++) { Arrays.fill(tgradEnum[level], 0, _parms._k, 0); } for (int j=0; j < tAColNum; j++) { // go throw all columns of T(A) // compute gradient of objective for enum columns if (j == 0) { // at the start of a enum column before expansion xRow = rowX+xRowStart; // absolute true row index into X chunk, should have this one rowXTrue = _yt._catOffsets[tArow]; // absolute start of correct true expanded x chunk row index, int levelSeen = 0; if (xRow > rowXTrue) { // need info from previous X chunk ArrayList<Integer> tempXChunkCidx = findXChunkIndices(_fr, rowXTrue, xRow - 1, _yt); getXChunk(_fr, tempXChunkCidx.remove(0), xChunksN); // read in a new X chunk int tempXStart = (int) xChunksN[0].start(); int tempXSize = (int) xChunksN[0]._len; for (int rowN = rowXTrue; rowN < xRow; rowN++) { // grab start enum column upto xRow int relRowX = rowN-tempXStart; // relative row into new X chunk if (relRowX > tempXSize) { // need to get to next X chunk in order to get the x row if (tempXChunkCidx.size() < 1) { Log.err("GLRM train", "Chunks mismatch between A transpose and X frame."); } else { getXChunk(_fr, tempXChunkCidx.remove(0), xChunksN); // read in a new X chunk tempXStart = (int) xChunksN[0].start(); tempXSize = (int) xChunksN[0]._len; relRowX = rowN - tempXStart; } } for (int k = 0; k < _ncolX; k++) { // store a matrix of catColJLevel by _ncolX elements to form one T(A) xMat[levelSeen][k] = xFrameVec(xChunksN, k, 0).atd(relRowX); } levelSeen++; } } currentXRowstart = levelSeen; for (int level=levelSeen; level < catColJLevel; level++) { if (rowX <= xRowEnd) { for (int k = 0; k < _ncolX; k++) { // store a matrix of catColJLevel by _ncolX elements to form one T(A) xMat[levelSeen][k] = xFrameVec(cs, k, 0).atd(rowX); } levelSeen++; rowX++; // move relative row index of current X chunk as well xRow++; // move to next absolute index of current X chunk } else { // reach end of current X chunk and if calculation not finished, need more next X chunk break; } } currentXRowEnd=levelSeen; int levelLeft = catColJLevel-levelSeen; if (levelLeft > 0) { // current x chunk does not contains the enum rows needed, next chunk does int endXRow = xRow+levelLeft-1; ArrayList<Integer> tempXChunkCidx = findXChunkIndices(_fr, xRow, endXRow, _yt); getXChunk(_fr, tempXChunkCidx.remove(0), xChunksN); // read in a new X chunk int tempXStart = (int) xChunksN[0].start(); int tempXSize = (int) xChunksN[0]._len; for (int rowN = xRow; rowN <= endXRow; rowN++) { Arrays.fill(tgradEnum[levelSeen], 0, _ncolX, 0); // time to reset tgrad with all zeros int relRowX = rowN-tempXStart; if (relRowX > tempXSize) { if (tempXChunkCidx.size() < 1) { Log.err("GLRM train", "Chunks mismatch between A transpose and X frame."); } else { getXChunk(_fr, tempXChunkCidx.remove(0), xChunksN); // read in a new X chunk tempXStart = (int) xChunksN[0].start(); tempXSize = (int) xChunksN[0]._len; relRowX = rowN - tempXStart; } } for (int k = 0; k < _ncolX; k++) { // store a matrix of catColJLevel by _ncolX elements to form one T(A) xMat[levelSeen][k] = xFrameVec(xChunksN, k, 0).atd(relRowX); } levelSeen++; } } } // Additional user-specified weight on loss for this row double cweight = chkweight[j]; assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; a = tAChunks[j].atd(row); // look at each element of T(A) if (Double.isNaN(a)) { continue; // Skip missing observations in row and adjust the corresponding relative index into X chunk } Arrays.fill(xy, 0, catColJLevel, 0); // reset xy before accumulate sum // obtained the matrix of X to form XY for one element of T(A) for (int level=0; level < catColJLevel; level++) { for (int k = 0; k < _ncolX; k++) { xy[level] += xMat[level][k] * yArcheTypeVal(_yt, j, k); } } double[] weight = _lossFunc[tArow].mlgrad(xy, (int) a, prod, catColJLevel); // catColJLevel by 1 // form tgradEnum which is catColJLevel by _ncolX for (int c = 0; c < catColJLevel; c++) { for (int k = 0; k < _ncolX; k++) { tgradEnum[c][k]+=weight[c]*cweight*yArcheTypeVal(_yt, j, k); // need to accumulate this over all columns } } } //update X which is catColJLevel by k by uEnum per one row of T(A) for (int level=currentXRowstart; level<currentXRowEnd; level++) { // only make updates to rows in current X chunk // only update for the rows that the current X chunk has for (int k = 0; k < _ncolX; k++) { uEnum[level][k] = xMat[level][k] - _alpha * tgradEnum[level][k]; } // calculate how much update is due to regularization term double[] xnew = _parms._regularization_y.rproxgrad(uEnum[level], _alpha*_parms._gamma_y, rand); _yreg += _parms._regularization_y.regularize(xnew); // need to update X chunks with new X values, it is of size catColJLevel by k // checking which x chunks contains the x elements that needed to be updated. int trueXRow = level+_yt._catOffsets[tArow]; // absolute Row index into X chunk xRow = trueXRow-xRowStart; for (int k=0; k<_ncolX; k++) { xFrameVec(cs, k, _parms._k).set(xRow, xnew[k]); } } } else { // dealing with numerical columns, separate from categoricals rowX = findExpColIndex(tArow, _ncats, _yt) - xRowStart; // translate to relative row index of x chunk int numRow = tArow - _ncats; // index into numerical rows of T(A) without enum columns expansion for (int j=0; j<tAColNum; j++) { double cweight = chkweight[j]; assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; a = tAChunks[j].atd(row); // take one element of T(A) if (Double.isNaN(a)) continue; // Inner product of X_i y_j to get an element of T(A)i, j double txy = 0; for (int k = 0; k < _ncolX; k++) { txy += xFrameVec(cs, k, 0).atd(rowX)*yArcheTypeVal(_yt, j, k); } double weight = cweight * _lossFunc[tArow].lgrad(txy, (a-_normSub[numRow])*_normMul[numRow]); for (int k=0; k<_ncolX; k++) { tgrad[k] += weight * yArcheTypeVal(_yt, j, k); } } //update row x for (int k=0; k<_ncolX; k++) { double xold = xFrameVec(cs, k, 0).atd(rowX); u[k] = xold-_alpha*tgrad[k]; } // calculate how much update is due to regularization term double[] xnew = _parms._regularization_y.rproxgrad(u, _alpha*_parms._gamma_y, rand); _yreg += _parms._regularization_y.regularize(xnew); for (int k=0; k<_ncolX; k++) { xFrameVec(cs, k, _parms._k).set(rowX, xnew[k]); } } } } @Override public void reduce(UpdateXeY other) { _yreg += other._yreg; } } private static class UpdateY extends MRTask<UpdateY> { // Input GLRMParameters _parms; GlrmLoss[] _lossFunc; final double _alpha; // Step size divided by num cols in A final Archetypes _ytold; // Old Y' matrix final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) final int _ncats; // Number of categorical cols in training frame final double[] _normSub; // For standardizing training data final double[] _normMul; final int _weightId; // Output double[][] _ytnew; // New Y matrix double _yreg; // Regularization evaluated on new Y UpdateY(GLRMParameters parms, Archetypes yt, double alpha, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul, GlrmLoss[] lossFunc, int weightId) { assert yt != null && yt.rank() == ncolX; _parms = parms; _lossFunc = lossFunc; _alpha = alpha; _ncolA = ncolA; _ncolX = ncolX; _ytold = yt; _yreg = 0; // Info on A (cols 1 to ncolA of frame) assert ncats <= ncolA; _ncats = ncats; _weightId = weightId; _normSub = normSub; _normMul = normMul; } private Chunk chk_xnew(Chunk[] chks, int c) { return chks[_ncolA + _ncolX + c]; } @Override public void map(Chunk[] cs) { assert (_ncolA + 2*_ncolX) == cs.length; _ytnew = new double[_ytold.nfeatures()][_ncolX]; Chunk chkweight = _weightId >= 0 ? cs[_weightId]:new C0DChunk(1,cs[0]._len); double[] xy = null; double[] grad = null; if (_ytold._numLevels[0] > 0) { xy = new double[_ytold._numLevels[0]]; grad = new double[_ytold._numLevels[0]]; } // Categorical columns for (int j = 0; j < _ncats; j++) { int catColJLevel = _ytold._numLevels[j]; // Compute gradient of objective at column for (int row = 0; row < cs[0]._len; row++) { double a = cs[j].atd(row); if (Double.isNaN(a)) continue; // Skip missing observations in column double cweight = chkweight.atd(row); assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; // Calculate x_i * Y_j where Y_j is sub-matrix corresponding to categorical col j Arrays.fill(xy, 0.0); for (int level = 0; level < catColJLevel; level++) { for (int k = 0; k < _ncolX; k++) { xy[level] += chk_xnew(cs, k).atd(row) * _ytold.getCat(j,level,k); } } // Gradient for level p is x_i weighted by \grad_p L_{i,j}(x_i * Y_j, A_{i,j}) double[] weight = _lossFunc[j].mlgrad(xy, (int)a, grad,catColJLevel); for (int level = 0; level < catColJLevel; level++) { for (int k = 0; k < _ncolX; k++) _ytnew[_ytold.getCatCidx(j, level)][k] += cweight * weight[level] * chk_xnew(cs, k).atd(row); } } } // Numeric columns for (int j = _ncats; j < _ncolA; j++) { int js = j - _ncats; int yidx = _ytold.getNumCidx(js); // Compute gradient of objective at column for (int row = 0; row < cs[0]._len; row++) { double a = cs[j].atd(row); if (Double.isNaN(a)) continue; // Skip missing observations in column // Additional user-specified weight on loss for this row double cweight = chkweight.atd(row); assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; // Inner product x_i * y_j double txy = 0; for (int k = 0; k < _ncolX; k++) txy += chk_xnew(cs, k).atd(row) * _ytold.getNum(js,k); // Sum over x_i weighted by gradient of loss \grad L_{i,j}(x_i * y_j, A_{i,j}) double weight = cweight * _lossFunc[j].lgrad(txy, (a - _normSub[js]) * _normMul[js]); for (int k = 0; k < _ncolX; k++) _ytnew[yidx][k] += weight * chk_xnew(cs, k).atd(row); } } } @Override public void reduce(UpdateY other) { ArrayUtils.add(_ytnew, other._ytnew); } @Override protected void postGlobal() { assert _ytnew.length == _ytold.nfeatures() && _ytnew[0].length == _ytold.rank(); Random rand = RandomUtils.getRNG(_parms._seed); // Compute new y_j values using proximal gradient for (int j = 0; j < _ytnew.length; j++) { double[] u = new double[_ytnew[0].length]; // Do not touch this memory allocation. Needed for proper function. for (int k = 0; k < _ytnew[0].length; k++) u[k] = _ytold._archetypes[j][k] - _alpha * _ytnew[j][k]; _ytnew[j] = _parms._regularization_y.rproxgrad(u, _alpha*_parms._gamma_y, rand); _yreg += _parms._regularization_y.regularize(_ytnew[j]); } } } /* Calculate the sum over the loss function in the optimization objective for wideDatasets. Basically, we are looking at T(A) = XY. In this case, Y is equivalent to T(X) and X is equivalent to T(Y) and we are looking at T(A) and not A. */ private static class ObjCalcW extends MRTask<ObjCalcW> { // Input GLRMParameters _parms; GlrmLoss[] _lossFunc; final Archetypes _yt; // _yt = Y' (transpose of Y) (YeX, stored as 2D array) final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) final int _ncats; // Number of categorical cols in training frame final double[] _normSub; // For standardizing training data final double[] _normMul; final boolean _regX; // Should I calculate regularization of (old) X matrix? final Frame _xVecs; // store XeY and XeY new final int _xOffset; // which X to use, 0 to use old new, _parms._k to use new one // Output double _loss; // Loss evaluated on A - XY using new X (and current Y) static double _xold_reg; // Regularization evaluated on old X st (Y for wide datasets) ObjCalcW(GLRMParameters parms, Archetypes yt, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul, GlrmLoss[] lossFunc, boolean regX, Frame xVecs, int xOffset) { assert yt != null && yt.rank() == ncolX; assert ncats <= ncolA; _parms = parms; _yt = yt; _lossFunc = lossFunc; _ncolA = ncolA; _ncolX = ncolX; _ncats = ncats; _regX = regX; _xVecs = xVecs; _normSub = normSub; _normMul = normMul; _xOffset = xOffset; } @SuppressWarnings("ConstantConditions") // The method is too complex @Override public void map(Chunk[] cs) { // cs now is n by m, x is n_exp by k, y is 2-D array of m by k double[] chkweight = _yt._weights; // weight per sample int numTArow = (int) cs[0]._len; int tArowStart = (int) cs[0].start(); int tArowEnd = numTArow+tArowStart-1; // last row index Chunk[] xChunks = new Chunk[_parms._k*2]; // number of columns, store old and new X int startxcidx = cs[0].cidx(); // here, reg_x is meant for the y vectors and only do it once per obj calculation if ((_regX) && (startxcidx == 0)) { calXOldReg(_yt._archetypes, _yt._archetypes.length); } ArrayList<Integer> xChunkIndices = findXChunkIndices(_xVecs, tArowStart, tArowEnd, _yt); // contains x chunks double[] xy = null; // store the vector of categoricals for one column int numColIndexOffset = _yt._catOffsets[_ncats] - _ncats; // index into xframe numerical columns if (_yt._numLevels[tArowStart] > 0) { // allocate memory only if there are categoricals in T(A) chunks xy = new double[_yt._numLevels[tArowStart]]; } getXChunk(_xVecs, xChunkIndices.remove(0), xChunks); // get the first xFrame chunk int xChunkRowStart = (int) xChunks[0].start(); // first row index of xFrame int xChunkSize = (int) xChunks[0]._len; // number of rows in xFrame int xRow = 0; int tARow = 0; assert ((tArowStart >= xChunkRowStart) && (tArowStart < (xChunkRowStart + xChunkSize))); // xFrame has T(A) start for (int rowIndex = 0; rowIndex < numTArow; rowIndex++) { // use indexing of T(A) tARow = rowIndex + tArowStart; // true row index of T(A) if (tARow < _ncats) { // dealing with categorical columns now // perform comparison for categoricals int catRowLevel = _yt._numLevels[tARow]; // number of bits to expand a categorical columns for (int colIndex = 0; colIndex < cs.length; colIndex++) { // look at one element of T(A) double a = cs[colIndex].atd(rowIndex); // grab an element of T(A) if (Double.isNaN(a)) continue; ; Arrays.fill(xy, 0, catRowLevel, 0); // reset before next accumulated sum for (int level = 0; level < catRowLevel; level++) { // one element of T(A) composed of several of XY xRow = level + _yt._catOffsets[tARow] - xChunkRowStart; // relative row if (xRow >= xChunkSize) { // load in new chunk of xFrame if (xChunkIndices.size() < 1) { Log.err("GLRM train", "Chunks mismatch between A transpose and X frame."); } else { getXChunk(_xVecs, xChunkIndices.remove(0), xChunks); // get a xVec chunk xChunkRowStart = (int) xChunks[0].start(); // first row index of xFrame xChunkSize = (int) xChunks[0]._len; // number of rows in xFrame xRow = level + _yt._catOffsets[tARow] - xChunkRowStart; } } for (int innerCol = 0; innerCol < _parms._k; innerCol++) { xy[level] += xFrameVec(xChunks, innerCol, _xOffset).atd(xRow)*yArcheTypeVal(_yt, colIndex, innerCol); } } _loss += chkweight[colIndex] * _lossFunc[tARow].mloss(xy, (int) a, catRowLevel); } } else { // looking into numerical columns here // perform comparison for numericals xRow = tARow - xChunkRowStart + numColIndexOffset; //index into x frame which expanded categoricals if (xRow >= xChunkSize) { // load in new chunk of xFrame if (xChunkIndices.size() < 1) { Log.err("GLRM train", "Chunks mismatch between A transpose and X frame."); } else { getXChunk(_xVecs, xChunkIndices.remove(0), xChunks); // get a xVec chunk xChunkRowStart = (int) xChunks[0].start(); // first row index of xFrame xChunkSize = (int) xChunks[0]._len; // number of rows in xFrame xRow = tARow - xChunkRowStart + numColIndexOffset; } } int numRow = tARow - _ncats; // index into T(A) without categorical type expansion for (int colIndex = 0; colIndex < cs.length; colIndex++) { double a = cs[colIndex].atd(rowIndex); // access dataset T(A) if (Double.isNaN(a)) continue; double txy = 0.0; for (int innerCol = 0; innerCol < _parms._k; innerCol++) { txy += xFrameVec(xChunks, innerCol, _xOffset).atd(xRow) * yArcheTypeVal(_yt, colIndex, innerCol); } _loss += chkweight[colIndex] * _lossFunc[tARow].loss(txy, (a - _normSub[numRow]) * _normMul[numRow]); } } } } private void calXOldReg(double[][] yvals, int yLen) { for (int j = 0; j < yLen; j++) { double[] xrow = Arrays.copyOf(yvals[j], _parms._k); _xold_reg += _parms._regularization_x.regularize(xrow); } } @Override public void reduce(ObjCalcW other) { // if (_newChunk || other._newChunk) _loss += other._loss; } } public static Chunk xFrameVec(Chunk[] chks, int c, int offset) { return chks[offset + c]; } /* For wide datasets, X is stored in another frame different from frame containing T(A). Tasks are passed chunks containing T(A). Hence, to match the row numbers in a T(A) chunk, we need to grab the X chunk correctly, hence this is what this function does. */ public static void getXChunk(Frame xVecs, int chunkIdx, Chunk[] xChunks) { int xWidth = xChunks.length; // width of x and xold matices for (int j = 0; j < xWidth; ++j) { // read in the relevant xVec chunks xChunks[j] = xVecs.vec(j).chunkForChunkIdx(chunkIdx); } } /* Recall again, tasks are passed chunks containing chunks of xVec. Need to find the corresponding chunks in TA that contains the same row number of x Vecs. The correct chunks will be stored as an array. */ public static ArrayList<Integer> findtAChunkIndices(Frame tAVecs, int xStart, int xEnd, Archetypes yt) { ArrayList<Integer> tAcidx = new ArrayList<Integer>(); int tANChunks = tAVecs.anyVec().nChunks(); // number of chunks of input frame if (tANChunks == 1) { // no need to do anything. tAcidx.add(0); return tAcidx; // only one chunk. Everything should be there. } int startTAcidx=0; int numCats = yt._catOffsets.length-1; // number of categorical columns in T(A) int numCatColumns = yt._catOffsets[numCats]; // number of cat expanded columns xStart = findOriginalColIndex(xStart, numCats, numCatColumns, yt); // translate col indices to expanded column index of enums xEnd = findOriginalColIndex(xEnd, numCats, numCatColumns, yt); findGoodCidx(tAVecs, tAcidx, false, xStart, tANChunks, startTAcidx); // find start T(A) cidx startTAcidx = tAcidx.get(0); if (startTAcidx < (tANChunks-1)) { findGoodCidx(tAVecs, tAcidx, true, xEnd, tANChunks, startTAcidx); // find all blocks } return tAcidx; } /* Recall again, tasks are passed chunks containing chunks of T(A). Need to find the corresponding chunks that contains the same row number of T(A). The correct chunks will be stored as an array. */ public static ArrayList<Integer> findXChunkIndices(Frame xVecs, int taStart, int taEnd, Archetypes yt) { ArrayList<Integer> xcidx = new ArrayList<Integer>(); int xNChunks = xVecs.anyVec().nChunks(); if (xNChunks == 1) { xcidx.add(0); return xcidx; // only one chunk. Everything should be there. } int startTAcidx=0; int numCats = yt._catOffsets.length-1; taStart = findExpColIndex(taStart, numCats, yt); // translate col indices to expanded column index of enums taEnd = findExpColIndex(taEnd, numCats, yt); // last column of T(A) findGoodCidx(xVecs, xcidx, false, taStart, xNChunks, startTAcidx); // find start xcidx startTAcidx = xcidx.get(0); if (startTAcidx < (xNChunks-1)) { findGoodCidx(xVecs, xcidx, true, taEnd, xNChunks, startTAcidx); // find all blocks } return xcidx; } /* For each T(A) chunk, find the chunk indices of X frame that contains the same rows as in the T(A) chunk. It will add the correct chunk index into an array. */ public static void findGoodCidx(Frame xVecs, ArrayList<Integer> currentList, boolean addIndex, int taIndex, int xNChunks, int startTAcidx) { Chunk[] xChunks = new Chunk[1]; for (int index = startTAcidx; index < xNChunks; index++) { // check to make sure start row is there xChunks[0] = xVecs.vec(0).chunkForChunkIdx(index); long xStart = xChunks[0].start(); // start row of xVec Chunks long xEnd = xStart + xChunks[0]._len; if ((taIndex >= xStart) && (taIndex < xEnd)) { // found end chunk if (currentList.size() > 0) { // do not want to add duplicate indices if (!currentList.contains(index)) currentList.add(index); } else { currentList.add(index); // first elligible one, add it } break; } if (addIndex && (!currentList.contains(index))) { currentList.add(index); // add all chunks containing the range of rows between start row and end row. } } } /* Translate the column index into expanded column index taking care of enum 1-hot encoding expansion. */ private static int findExpColIndex(int oldIndex, int numCats, Archetypes yt) { if (oldIndex < numCats) { // find true row start considering categorical columns return yt._catOffsets[oldIndex]; } else { // taStart in the numerical columns now return oldIndex-numCats+yt._catOffsets[numCats]; } } /* Translate the expanded column index back to origin column index before enum 1-hot encoding expansion. */ private static int findOriginalColIndex(int oldIndex, int numCats, int totEnumColumns, Archetypes yt) { if (oldIndex <totEnumColumns) { // x Chunk column index is one of enum columns for (int index = 1; index < yt._catOffsets.length; index++) { if (oldIndex < yt._catOffsets[index]) { return (index-1); // return the corresponding T(A) row index } } } return (oldIndex-totEnumColumns+numCats); } // Calculate the sum over the loss function in the optimization objective private static class ObjCalc extends MRTask<ObjCalc> { // Input GLRMParameters _parms; GlrmLoss[] _lossFunc; final Archetypes _yt; // _yt = Y' (transpose of Y) final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) final int _ncats; // Number of categorical cols in training frame final double[] _normSub; // For standardizing training data final double[] _normMul; final int _weightId; final boolean _regX; // Should I calculate regularization of (old) X matrix? // Output double _loss; // Loss evaluated on A - XY using new X (and current Y) double _xold_reg; // Regularization evaluated on old X ObjCalc(GLRMParameters parms, Archetypes yt, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul, GlrmLoss[] lossFunc, int weightId) { this(parms, yt, ncolA, ncolX, ncats, normSub, normMul, lossFunc, weightId, false); } ObjCalc(GLRMParameters parms, Archetypes yt, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul, GlrmLoss[] lossFunc, int weightId, boolean regX) { assert yt != null && yt.rank() == ncolX; assert ncats <= ncolA; _parms = parms; _yt = yt; _lossFunc = lossFunc; _ncolA = ncolA; _ncolX = ncolX; _ncats = ncats; _regX = regX; _weightId = weightId; _normSub = normSub; _normMul = normMul; } private Chunk chk_xnew(Chunk[] chks, int c) { return chks[_ncolA + _ncolX + c]; } @SuppressWarnings("ConstantConditions") // The method is too complex @Override public void map(Chunk[] cs) { assert (_ncolA + 2 * _ncolX) == cs.length; Chunk chkweight = _weightId >= 0 ? cs[_weightId]:new C0DChunk(1,cs[0]._len); // weight is per data sample _loss = _xold_reg = 0; double[] xrow = null; double[] xy = null; if (_yt._numLevels[0] > 0) // only allocate xy when there are categorical columns xy = new double[_yt._numLevels[0]]; // maximum categorical level column is always the first one if (_regX) // allocation memory only if necessary xrow = new double[_ncolX]; for (int row = 0; row < cs[0]._len; row++) { // Additional user-specified weight on loss for this row double cweight = chkweight.atd(row); // weight is per row for normal dataset assert !Double.isNaN(cweight) : "User-specified weight cannot be NaN"; // Categorical columns for (int j = 0; j < _ncats; j++) { // contribution from categoricals _loss += cweight*lossDueToCategorical(cs, j, row, xy); } // Numeric columns for (int j = _ncats; j < _ncolA; j++) { _loss += cweight*lossDueToNumeric(cs, j, row, _ncats); } // Calculate regularization term for old X if requested if (_regX) { _xold_reg += regularizationTermOldX(cs, row, xrow, _ncolA, _ncolA + _ncolX, _ncolX); } } } private double regularizationTermOldX(Chunk[] cs, int row, double[] xrow, int colStart, int colEnd, int colWidth) { int idx = 0; for (int j = colStart; j < colEnd; j++) { xrow[idx] = cs[j].atd(row); idx++; } assert idx == colWidth; return _parms._regularization_x.regularize(xrow); } private double lossDueToNumeric(Chunk[] cs, int j, int row, int offsetA) { double a = cs[j].atd(row); if (Double.isNaN(a)) return 0.0; // Skip missing observations in row // Inner product x_i * y_j double txy = 0; int js = j - offsetA; for (int k = 0; k < _ncolX; k++) txy += chk_xnew(cs, k).atd(row) * _yt.getNum(js, k); return _lossFunc[j].loss(txy, (a - _normSub[js]) * _normMul[js]); } private double lossDueToCategorical(Chunk[] cs, int colInd, int row, double[] xy) { double a = cs[colInd].atd(row); if (Double.isNaN(a)) return 0.0; int catColJLevel = _yt._numLevels[colInd]; Arrays.fill(xy, 0, catColJLevel, 0); // reset before next accumulation sum // Calculate x_i * Y_j where Y_j is sub-matrix corresponding to categorical col j for (int level = 0; level < catColJLevel; level++) { // level index into extra columns due to categoricals. for (int k = 0; k < _ncolX; k++) { xy[level] += chk_xnew(cs, k).atd(row) * _yt.getCat(colInd, level, k); } } return _lossFunc[colInd].mloss(xy, (int)a, catColJLevel); } @Override public void reduce(ObjCalc other) { _loss += other._loss; _xold_reg += other._xold_reg; } } // Solves XD = AY' for X where A is m x n, Y is k x n, D is k x k, and m >> n > k // Resulting matrix X = (AY')D^(-1) will have dimensions m x k private static class CholMulTask extends MRTask<CholMulTask> { final Archetypes _yt; // _yt = Y' (transpose of Y) final int _ncolA; // Number of cols in training frame final int _ncolX; // Number of cols in X (k) final int _ncats; // Number of categorical cols in training frame final double[] _normSub; // For standardizing training data final double[] _normMul; CholeskyDecomposition _chol; // Cholesky decomposition of D = D', since we solve D'X' = DX' = AY' CholMulTask(CholeskyDecomposition chol, Archetypes yt, int ncolA, int ncolX, int ncats, double[] normSub, double[] normMul) { assert yt != null && yt.rank() <= ncolX; assert ncats <= ncolA; _yt = yt; _ncolA = ncolA; _ncolX = ncolX; _ncats = ncats; _chol = chol; _normSub = normSub; _normMul = normMul; } // [A,X,W] A is read-only training data, X is left matrix in A = XY decomposition, W is working copy of X @Override public void map(Chunk[] cs) { assert (_ncolA + 2*_ncolX) == cs.length; double[] xrow = new double[_ncolX]; for (int row = 0; row < cs[0]._len; row++) { // 1) Compute single row of AY' for (int k = 0; k < _ncolX; k++) { // Categorical columns double x = 0; for (int d = 0; d < _ncats; d++) { double a = cs[d].atd(row); if (Double.isNaN(a)) continue; x += _yt.getCat(d, (int)a, k); } // Numeric columns for (int d = _ncats; d < _ncolA; d++) { int ds = d - _ncats; double a = cs[d].atd(row); if (Double.isNaN(a)) continue; x += (a - _normSub[ds]) * _normMul[ds] * _yt.getNum(ds, k); } xrow[k] = x; } // 2) Cholesky solve for single row of X // _chol.solve(xrow); Matrix tmp = _chol.solve(new Matrix(new double[][] {xrow}).transpose()); xrow = tmp.getColumnPackedCopy(); // 3) Save row of solved values into X (and copy W = X) int i = 0; for (int d = _ncolA; d < _ncolA+_ncolX; d++) { cs[d].set(row, xrow[i]); cs[d+_ncolX].set(row, xrow[i++]); } assert i == xrow.length; } } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glrm/GLRMGenX.java
package hex.glrm; import hex.genmodel.algos.glrm.GlrmMojoModel; import water.MRTask; import water.MemoryManager; import water.fvec.Chunk; import water.fvec.NewChunk; /** * GLRMGenX will generate the coefficients (X matrix) of a GLRM model given the archetype * for a dataframe. */ public class GLRMGenX extends MRTask<GLRMGenX> { final GLRMModel _m; // contains info to transfer to the glrm mojo model final int _k; // store column size of X matrix GlrmMojoModel _gMojoModel; // instantiate mojo model from GLRM model info public GLRMGenX(GLRMModel m, int k) { _m = m; _m._parms = m._parms; _k = k; } @Override protected void setupLocal() { _gMojoModel = new GlrmMojoModel(_m._output._names, _m._output._domains, null); _gMojoModel._allAlphas = GlrmMojoModel.initializeAlphas(_gMojoModel._numAlphaFactors); // set _allAlphas array GLRM.Archetypes arch = _m._output._archetypes_raw; // fill out the mojo model, no need to fill out every field _gMojoModel._ncolA = _m._output._lossFunc.length; _gMojoModel._ncolY = arch.nfeatures(); _gMojoModel._nrowY = arch.rank(); _gMojoModel._ncolX = _m._parms._k; _gMojoModel._seed = _m._parms._seed; _gMojoModel._regx = _m._parms._regularization_x; _gMojoModel._gammax = _m._parms._gamma_x; _gMojoModel._init = _m._parms._init; _gMojoModel._ncats = _m._output._ncats; _gMojoModel._nnums = _m._output._nnums; _gMojoModel._normSub = _m._output._normSub; _gMojoModel._normMul = _m._output._normMul; _gMojoModel._permutation = _m._output._permutation; _gMojoModel._reverse_transform = _m._parms._impute_original; _gMojoModel._transposed = _m._output._archetypes_raw._transposed; // loss functions _gMojoModel._losses = _m._output._lossFunc; // archetypes _gMojoModel._numLevels = arch._numLevels; _gMojoModel._catOffsets = arch._catOffsets; _gMojoModel._archetypes = arch.getY(false); } public void map(Chunk[] chks, NewChunk[] preds) { int featureLen = chks.length; long rowStart = chks[0].start(); long baseSeed = _gMojoModel._seed+rowStart; double[] rowdata = MemoryManager.malloc8d(chks.length); // read in each row of data double[] pdimensions = MemoryManager.malloc8d(_k); for (int rid = 0; rid < chks[0]._len; ++rid) { for (int col = 0; col < featureLen; col++) { rowdata[col] = chks[col].atd(rid); } _gMojoModel.score0(rowdata, pdimensions, baseSeed+rid); // make prediction for (int c=0; c<_k; c++) { preds[c].addNum(pdimensions[c]); } } } public GlrmMojoWriter getMojo() { return new GlrmMojoWriter(_m); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glrm/GLRMModel.java
package hex.glrm; import hex.DataInfo; import hex.Model; import hex.ModelCategory; import hex.ModelMetrics; import hex.genmodel.algos.glrm.GlrmInitialization; import hex.genmodel.algos.glrm.GlrmLoss; import hex.genmodel.algos.glrm.GlrmMojoModel; import hex.genmodel.algos.glrm.GlrmRegularizer; import hex.svd.SVDModel.SVDParameters; import water.*; import water.exceptions.H2OFailException; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import water.udf.CFuncRef; import water.util.ArrayUtils; import water.util.TwoDimTable; import java.util.ArrayList; import java.util.List; /** * GLRM (<a href="https://web.stanford.edu/~boyd/papers/pdf/glrm.pdf">Generalized Low Rank Model</a>). * * The model seeks to represent an input frame A of dimensions m x n as a product of two smaller matrices X (m x k) * and Y (k x n) of rank k each. To this end, the model solves a generic optimization problem * Loss(A, XY) + Rx(X) + Ry(Y) -> min_{X,Y} * in other words it tries to find X and Y such that XY is close to A (as measured by the loss function), taking into * account regularization constraints on X and Y as well. * * Note that the input frame A may have columns of different types; while output matrices X and Y are always * real-valued. * * The Loss function is assumed to be separable in each element of the matrix, so that * Loss(A, XY) = Sum[L_{ij}(A_{ij}, x_i y_j) over i=1..m, j=1..n] * the individual loss functions can be different for each element; but in our implementation we assume that L_{ij}'s * are constant over rows and may only differ by columns. Thus, L_{ij} == L_j. * * The regularizers Rx and Ry are assumed to be row-separable: * Rx(X) = Sum[rx_i(x_i) for i=1..m] * Ry(Y) = Sum[ry_j(x_j) for j=1..n] * * * The output of the model consists of matrices X and Y. There are multiple interpretations of these (see section 5.4 * in Boyd's paper). In particular, * + The rows of Y (1 x n) can be interpreted as "idealized examples" of input rows (even if rows of Y are always * real-valued, while rows of the input data may have any types). Thus, we call them *archetypes* in the code. * + The rows of X (1 x k) provide an embedding of each original data row into a lower-dimensional space. Thus, we * call them "representations" of the data. */ public class GLRMModel extends Model<GLRMModel, GLRMModel.GLRMParameters, GLRMModel.GLRMOutput> implements Model.GLRMArchetypes { //-------------------------------------------------------------------------------------------------------------------- // Input parameters //-------------------------------------------------------------------------------------------------------------------- public static class GLRMParameters extends Model.Parameters { @Override public String algoName() { return "GLRM"; } @Override public String fullName() { return "Generalized Low Rank Modeling"; } @Override public String javaName() { return GLRMModel.class.getName(); } @Override public long progressUnits() { return 2 + _max_iterations; } // Data transformation (demean to compare with PCA) public DataInfo.TransformType _transform = DataInfo.TransformType.NONE; public int _k = 1; // Rank of resulting XY matrix public GlrmInitialization _init = GlrmInitialization.PlusPlus; // Initialization of Y matrix, use SVD for all numerics public SVDParameters.Method _svd_method = SVDParameters.Method.Randomized; // SVD initialization method (for _init = SVD) public Key<Frame> _user_y; // User-specified Y matrix (for _init = User) public Key<Frame> _user_x; // User-specified X matrix (for _init = User) public boolean _expand_user_y = true; // Should categorical columns in _user_y be expanded via one-hot encoding? (for _init = User) // Loss functions public GlrmLoss _loss = GlrmLoss.Quadratic; // Default loss function for numeric cols public GlrmLoss _multi_loss = GlrmLoss.Categorical; // Default loss function for categorical cols public int _period = 1; // Length of the period when _loss = Periodic public GlrmLoss[] _loss_by_col; // Override default loss function for specific columns public int[] _loss_by_col_idx; // Regularization functions public GlrmRegularizer _regularization_x = GlrmRegularizer.None; // Regularization function for X matrix public GlrmRegularizer _regularization_y = GlrmRegularizer.None; // Regularization function for Y matrix public double _gamma_x = 0; // Regularization weight on X matrix public double _gamma_y = 0; // Regularization weight on Y matrix // Optional parameters public int _max_iterations = 1000; // Max iterations public int _max_updates = 2*_max_iterations; // Max number of updates (X or Y) public double _init_step_size = 1.0; // Initial step size (decrease until we hit min_step_size) public double _min_step_size = 1e-4; // Min step size @Deprecated public String _loading_name; // store x frame frame-id given by use public String _representation_name; // store x frame frame-id given by user public boolean _recover_svd = false; // Recover singular values and eigenvectors of XY at the end? public boolean _impute_original = false; // Reconstruct original training data by reversing _transform? public boolean _verbose = true; // Log when objective increases each iteration? } //-------------------------------------------------------------------------------------------------------------------- // Outputs //-------------------------------------------------------------------------------------------------------------------- public static class GLRMOutput extends Model.Output { // Number of iterations executed public int _iterations; // Number of updates executed public int _updates; // Current value of the objective function public double _objective; // Current value of step_size used public double _step_size; // Average change in objective function this iteration public double _avg_change_obj; public ArrayList<Double> _history_objective = new ArrayList<>(); // Mapping from lower dimensional k-space to training features (Y) public TwoDimTable _archetypes; public GLRM.Archetypes _archetypes_raw; // Needed for indexing into Y for scoring // Step size each iteration public ArrayList<Double> _history_step_size = new ArrayList<>(); // SVD of output XY public double[/*feature*/][/*k*/] _eigenvectors_raw; public TwoDimTable _eigenvectors; public double[] _singular_vals; // Frame key of X matrix public String _representation_name; // the final frame name for X frame. Equals to _parms._loading_name if user specified it. Otherwise, H2O will assign. public Key<Frame> _representation_key; public Key<? extends Model> _init_key; public Key<Frame> _x_factor_key; // store key of x factor generated from dataset prediction // Number of categorical and numeric columns public int _ncats; public int _nnums; // Number of good rows in training frame (not skipped) public long _nobs; // Categorical offset vector public int[] _catOffsets; // Standardization arrays for numeric data columns public double[] _normSub; // Mean of each numeric column public double[] _normMul; // One over standard deviation of each numeric column // Permutation array mapping adapted to original training col indices public int[] _permutation; // _permutation[i] = j means col i in _adaptedFrame maps to col j of _train // Expanded column names of adapted training frame public String[] _names_expanded; // Loss function for every column in adapted training frame public GlrmLoss[] _lossFunc; // Training time public ArrayList<Long> _training_time_ms = new ArrayList<>(); // Total column variance for expanded and transformed data public double _total_variance; // Standard deviation of each principal component public double[] _std_deviation; // Importance of principal components // Standard deviation, proportion of variance explained, and cumulative proportion of variance explained public TwoDimTable _importance; public GLRMOutput(GLRM b) { super(b); } /** Override because base class implements ncols-1 for features with the * last column as a response variable; for GLRM all the columns are features. */ @Override public int nfeatures() { return _names.length; } @Override public ModelCategory getModelCategory() { return ModelCategory.DimReduction; } } public GLRMModel(Key<GLRMModel> selfKey, GLRMParameters parms, GLRMOutput output) { super(selfKey, parms, output); } @Override protected Futures remove_impl(Futures fs, boolean cascade) { Keyed.remove(_output._init_key, fs, true); Keyed.remove(_output._x_factor_key, fs, true); Keyed.remove(_output._representation_key, fs, true); return super.remove_impl(fs, cascade); } @Override protected AutoBuffer writeAll_impl(AutoBuffer ab) { ab.putKey(_output._init_key); ab.putKey(_output._representation_key); return super.writeAll_impl(ab); } @Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { ab.getKey(_output._init_key, fs); ab.getKey(_output._representation_key, fs); return super.readAll_impl(ab, fs); } @Override public GlrmMojoWriter getMojo() { return new GlrmMojoWriter(this); } //-------------------------------------------------------------------------------------------------------------------- // Scoring //-------------------------------------------------------------------------------------------------------------------- /*** * GLRM performs the action A=X*Y during training. During prediction, given a new dataset Anew, we will have * Anew = Xnew*Y. If score/predict is called, Anew will be returned. However, when transform is called, we will * return Xnew in this case. */ @Override public Frame transform(Frame fr) { if (fr.getKey() == null) throw new H2OFailException("H2O frame must have a valid frame key/id."); if (_output._x_factor_key != null && _output._x_factor_key.toString().contains(fr.getKey().toString())) { return DKV.get(_output._x_factor_key).get(); } else if (_output._representation_key.toString().contains(fr.getKey().toString())) { return DKV.get(_output._representation_key).get(); } else { // new frame, need to generate the new X-factor before returning it try (Scope.Safe s = Scope.safe(fr)) { Frame adaptFr = adaptFrameForScore(fr, true); _output._x_factor_key = Key.make("GLRMLoading_"+fr._key); GLRMGenX gs = new GLRMGenX(this, _parms._k); gs.doAll(gs._k, Vec.T_NUM, adaptFr); String[] loadingFrmNames = new String[gs._k]; for (int index = 1; index <= gs._k; index++) loadingFrmNames[index-1] = "Arch"+index; String[][] loadingFrmDomains = new String[gs._k][]; Frame xFrame = gs.outputFrame(_output._x_factor_key, loadingFrmNames, loadingFrmDomains); DKV.put(xFrame); Scope.untrack(xFrame); return xFrame; } } } // GLRM scoring is data imputation based on feature domains using reconstructed XY (see Udell (2015), Section 5.3) // Check if the frame is the same as used in training. If yes, return the XY. Otherwise, take the archetypes and // generate new coefficients for it and then do X*Y private PredictScoreResult reconstruct(Frame orig, Frame adaptedFr, Key<Frame> destination_key, boolean save_imputed, boolean reverse_transform) { int ncols = _output._names.length; assert ncols == adaptedFr.numCols(); String prefix = "reconstr_"; _output._x_factor_key = gen_representation_key(orig); // Need [A,X,P] where A = adaptedFr, X = loading frame, P = imputed frame // Note: A is adapted to original training frame, P has columns shuffled so cats come before nums! Frame fullFrm = new Frame(adaptedFr); Frame loadingFrm = null; // get this from DKV or generate it from scratch // call resconstruct only if test frame key and training frame key matches plus frame dimensions match as well if (DKV.get(_output._x_factor_key) != null) { // try to use the old X frame if possible loadingFrm = DKV.get(_output._x_factor_key).get(); } else { // need to generate the X matrix and put it in as a frame ID. Mojo predict will return one row of x as a double[] GLRMGenX gs = new GLRMGenX(this, _parms._k); gs.doAll(gs._k, Vec.T_NUM, adaptedFr); String[] loadingFrmNames = new String[gs._k]; for (int index = 1; index <= gs._k; index++) loadingFrmNames[index - 1] = "Arch" + index; String[][] loadingFrmDomains = new String[gs._k][]; Scope.untrack(gs.outputFrame(_output._x_factor_key, loadingFrmNames, loadingFrmDomains)); loadingFrm = DKV.get(_output._x_factor_key).get(); } fullFrm.add(loadingFrm); String[][] adaptedDomme = adaptedFr.domains(); Vec anyVec = fullFrm.anyVec(); assert anyVec != null; for (int i = 0; i < ncols; i++) { Vec v = anyVec.makeZero(); v.setDomain(adaptedDomme[i]); fullFrm.add(prefix + _output._names[i], v); } GLRMScore gs = new GLRMScore(ncols, _parms._k, save_imputed, reverse_transform).doAll(fullFrm); // reconstruct X*Y // Return the imputed training frame int x = ncols + _parms._k, y = fullFrm.numCols(); Frame f = fullFrm.extractFrame(x, y); // this will call vec_impl() and we cannot call the delete() below just yet f = new Frame((destination_key == null ? Key.<Frame>make() : destination_key), f.names(), f.vecs()); DKV.put(f); return new PredictScoreResult(gs._mb, f, f); } public Key<Frame> gen_representation_key(Frame fr) { if ((_parms.train() != null) && (fr.checksum() == _parms.train().checksum()) && fr._key.equals(_parms.train()._key)) // use training X factor here. return _output._representation_key; else return Key.make("GLRMLoading_"+fr._key); } @Override protected PredictScoreResult predictScoreImpl(Frame orig, Frame adaptedFr, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) { return reconstruct(orig, adaptedFr, Key.make(destination_key), true, _parms._impute_original); } @Override public Frame scoreReconstruction(Frame frame, Key<Frame> destination_key, boolean reverse_transform) { Frame adaptedFr = new Frame(frame); adaptTestForTrain(adaptedFr, true, false); PredictScoreResult result = reconstruct(frame, adaptedFr, destination_key, true, reverse_transform); result.makeModelMetrics(frame, adaptedFr); return result.getPredictions(); } /** * Project each archetype into original feature space * @param frame Original training data with m rows and n columns * @param destination_key Frame Id for output * @return Frame containing k rows and n columns, where each row corresponds to an archetype */ @Override public Frame scoreArchetypes(Frame frame, Key<Frame> destination_key, boolean reverse_transform) { int ncols = _output._names.length; Frame adaptedFr = new Frame(frame); adaptTestForTrain(adaptedFr, true, false); assert ncols == adaptedFr.numCols(); String[][] adaptedDomme = adaptedFr.domains(); double[][] proj = new double[_parms._k][_output._nnums + _output._ncats]; // Categorical columns for (int d = 0; d < _output._ncats; d++) { double[][] block = _output._archetypes_raw.getCatBlock(d); for (int k = 0; k < _parms._k; k++) proj[k][_output._permutation[d]] = _output._lossFunc[d].mimpute(block[k]); } // Numeric columns for (int d = _output._ncats; d < (_output._ncats + _output._nnums); d++) { int ds = d - _output._ncats; for (int k = 0; k < _parms._k; k++) { double num = _output._archetypes_raw.getNum(ds, k); proj[k][_output._permutation[d]] = _output._lossFunc[d].impute(num); if (reverse_transform) proj[k][_output._permutation[d]] = proj[k][_output._permutation[d]] / _output._normMul[ds] + _output._normSub[ds]; } } // Convert projection of archetypes into a frame with correct domains Frame f = ArrayUtils.frame((destination_key == null ? Key.<Frame>make() : destination_key), adaptedFr.names(), proj); for(int i = 0; i < ncols; i++) f.vec(i).setDomain(adaptedDomme[i]); return f; } private class GLRMScore extends MRTask<GLRMScore> { final int _ncolA; // Number of cols in original data A final int _ncolX; // Number of cols in X (rank k) final boolean _save_imputed; // Save imputed data into new vecs? final boolean _reverse_transform; // Reconstruct original training data by reversing transform? ModelMetricsGLRM.GlrmModelMetricsBuilder _mb; GLRMScore( int ncolA, int ncolX, boolean save_imputed ) { this(ncolA, ncolX, save_imputed, _parms._impute_original); } GLRMScore( int ncolA, int ncolX, boolean save_imputed, boolean reverse_transform ) { _ncolA = ncolA; _ncolX = ncolX; _save_imputed = save_imputed; _reverse_transform = reverse_transform; } @Override public void map(Chunk[] chks) { float[] atmp = new float[_ncolA]; double[] xtmp = new double[_ncolX]; double[] preds = new double[_ncolA]; _mb = GLRMModel.this.makeMetricBuilder(null); if (_save_imputed) { for (int row = 0; row < chks[0]._len; row++) { double[] p = impute_data(chks, row, xtmp, preds); compute_metrics(chks, row, atmp, p); for (int c = 0; c < preds.length; c++) chks[_ncolA + _ncolX + c].set(row, p[c]); } } else { for (int row = 0; row < chks[0]._len; row++) { double[] p = impute_data(chks, row, xtmp, preds); compute_metrics(chks, row, atmp, p); } } } @Override public void reduce(GLRMScore other) { if (_mb != null) _mb.reduce(other._mb); } @Override protected void postGlobal() { if (_mb != null) _mb.postGlobal(); } private float[] compute_metrics(Chunk[] chks, int row_in_chunk, float[] tmp, double[] preds) { for (int i = 0; i < tmp.length; i++) tmp[i] = (float)chks[i].atd(row_in_chunk); _mb.perRow(preds, tmp, GLRMModel.this); return tmp; } private double[] impute_data(Chunk[] chks, int row_in_chunk, double[] tmp, double[] preds) { for (int i = 0; i < tmp.length; i++ ) tmp[i] = chks[_ncolA+i].atd(row_in_chunk); impute_data(tmp, preds); return preds; } private double[] impute_data(double[] tmp, double[] preds) { return GlrmMojoModel.impute_data(tmp, preds, _output._nnums, _output._ncats, _output._permutation, _reverse_transform, _output._normMul, _output._normSub, _output._lossFunc, _output._archetypes_raw._transposed, _output._archetypes_raw._archetypes, _output._catOffsets, _output._archetypes_raw._numLevels); } } @Override protected double[] score0(double[] data, double[] preds) { throw H2O.unimpl(); } public ModelMetricsGLRM scoreMetricsOnly(Frame frame) { if (frame == null) return null; int ncols = _output._names.length; // Need [A,X] where A = adapted test frame, X = loading frame // Note: A is adapted to original training frame Frame adaptedFr = new Frame(frame); adaptTestForTrain(adaptedFr, true, false); assert ncols == adaptedFr.numCols(); // Append loading frame X for calculating XY Frame fullFrm = new Frame(adaptedFr); Value tempV = DKV.get(gen_representation_key(frame)); if (tempV == null) tempV = DKV.get(_output._representation_key); Frame loadingFrm = tempV.get(); fullFrm.add(loadingFrm); GLRMScore gs = new GLRMScore(ncols, _parms._k, false).doAll(fullFrm); ModelMetrics mm = gs._mb.makeModelMetrics(GLRMModel.this, frame, null, null); // save error metrics based on imputed data return (ModelMetricsGLRM) mm; } @Override public ModelMetricsGLRM.GlrmModelMetricsBuilder makeMetricBuilder(String[] domain) { return new ModelMetricsGLRM.GlrmModelMetricsBuilder(_parms._k, _output._permutation, _parms._impute_original); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glrm/GlrmMojoWriter.java
package hex.glrm; import hex.ModelMojoWriter; import hex.genmodel.algos.glrm.GlrmLoss; import java.io.IOException; import java.nio.ByteBuffer; /** * MOJO serializer for GLRM model. */ public class GlrmMojoWriter extends ModelMojoWriter<GLRMModel, GLRMModel.GLRMParameters, GLRMModel.GLRMOutput> { @SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler public GlrmMojoWriter() {} public GlrmMojoWriter(GLRMModel model) { super(model); } @Override public String mojoVersion() { return "1.10"; } @Override protected void writeModelData() throws IOException { writekv("initialization", model._parms._init); writekv("regularizationX", model._parms._regularization_x); writekv("regularizationY", model._parms._regularization_y); writekv("gammaX", model._parms._gamma_x); writekv("gammaY", model._parms._gamma_y); writekv("ncolX", model._parms._k); writekv("seed", model._parms._seed); // store seed for later use writekv("reverse_transform", model._parms._impute_original); // DataInfo mapping writekv("cols_permutation", model._output._permutation); writekv("num_categories", model._output._ncats); writekv("num_numeric", model._output._nnums); writekv("norm_sub", model._output._normSub); writekv("norm_mul", model._output._normMul); writekv("transposed", model._output._archetypes_raw._transposed); // Loss functions writekv("ncolA", model._output._lossFunc.length); startWritingTextFile("losses"); for (GlrmLoss loss : model._output._lossFunc) { writeln(loss.toString()); } finishWritingTextFile(); // Archetypes GLRM.Archetypes arch = model._output._archetypes_raw; writekv("ncolY", arch.nfeatures()); writekv("nrowY", arch.rank()); writekv("num_levels_per_category", arch._numLevels); writekv("catOffsets", arch._catOffsets); int n = arch.rank() * arch.nfeatures(); ByteBuffer bb = ByteBuffer.wrap(new byte[n * 8]); for (double[] row : arch.getY(false)) for (double val : row) bb.putDouble(val); writeblob("archetypes", bb.array()); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/glrm/ModelMetricsGLRM.java
package hex.glrm; import hex.CustomMetric; import hex.Model; import hex.ModelMetrics; import hex.ModelMetricsUnsupervised; import water.fvec.Frame; public class ModelMetricsGLRM extends ModelMetricsUnsupervised { public double _numerr; public double _caterr; public long _numcnt; public long _catcnt; public ModelMetricsGLRM(Model model, Frame frame, double numerr, double caterr, CustomMetric customMetric) { super(model, frame, 0, Double.NaN, customMetric); _numerr = numerr; _caterr = caterr; } public ModelMetricsGLRM(Model model, Frame frame, double numerr, double caterr, long numcnt, long catcnt, CustomMetric customMetric) { this(model, frame, numerr, caterr, customMetric); _numcnt = numcnt; _catcnt = catcnt; } public static class GlrmModelMetricsBuilder extends MetricBuilderUnsupervised<GlrmModelMetricsBuilder> { public double _miscls; // Number of misclassified categorical values public long _numcnt; // Number of observed numeric entries public long _catcnt; // Number of observed categorical entries public int[] _permutation; // Permutation array for shuffling cols public boolean _impute_original; public GlrmModelMetricsBuilder(int dims, int[] permutation) { this(dims, permutation, false); } public GlrmModelMetricsBuilder(int dims, int[] permutation, boolean impute_original) { _work = new double[dims]; _miscls = _numcnt = _catcnt = 0; _permutation = permutation; _impute_original = impute_original; } @Override public double[] perRow(double[] preds, float[] dataRow, Model m) { assert m instanceof GLRMModel; GLRMModel gm = (GLRMModel) m; assert gm._output._ncats + gm._output._nnums == dataRow.length; int ncats = gm._output._ncats; double[] sub = gm._output._normSub; double[] mul = gm._output._normMul; // Permute cols so categorical before numeric since error metric different for (int i = 0; i < ncats; i++) { int idx = _permutation[i]; if (Double.isNaN(dataRow[idx])) continue; if (dataRow[idx] != preds[idx]) _miscls++; _catcnt++; } int c = 0; for (int i = ncats; i < dataRow.length; i++) { int idx = _permutation[i]; if (Double.isNaN(dataRow[idx])) { c++; continue; } double diff = (_impute_original ? dataRow[idx] : (dataRow[idx] - sub[c]) * mul[c]) - preds[idx]; _sumsqe += diff * diff; _numcnt++; c++; } assert c == gm._output._nnums; return preds; } @Override public void reduce(GlrmModelMetricsBuilder mm) { super.reduce(mm); _miscls += mm._miscls; _numcnt += mm._numcnt; _catcnt += mm._catcnt; } @Override public ModelMetrics makeModelMetrics(Model m, Frame f) { // double numerr = _numcnt > 0 ? _sumsqe / _numcnt : Double.NaN; // double caterr = _catcnt > 0 ? _miscls / _catcnt : Double.NaN; // return m._output.addModelMetrics(new ModelMetricsGLRM(m, f, numerr, caterr)); return m.addModelMetrics(new ModelMetricsGLRM(m, f, _sumsqe, _miscls, _numcnt, _catcnt, _customMetric)); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/gram/Gram.java
package hex.gram; import hex.DataInfo; import hex.FrameTask2; import jsr166y.ForkJoinTask; import jsr166y.RecursiveAction; import water.*; import water.fvec.Chunk; import water.util.ArrayUtils; import java.util.ArrayList; import java.util.Arrays; public final class Gram extends Iced<Gram> { boolean _hasIntercept; public double[][] _xx; public double[] _diag; public double[][] _frame2DProduce; // store result of transpose(Aframe)*eigenvector2Darray public int _diagN; final int _denseN; int _fullN; final static int MIN_TSKSZ=10000; private static class XXCache { public final boolean lowerDiag; public final boolean icptFirst; public final double [][] xx; public XXCache(double [][] xx, boolean lowerDiag, boolean icptFirst){ this.xx = xx; this.lowerDiag = lowerDiag; this.icptFirst = icptFirst; } public boolean match(boolean lowerDiag, boolean icptFirst){ return this.lowerDiag == lowerDiag && this.icptFirst == icptFirst; } } public transient XXCache _xxCache; public Gram(DataInfo dinfo) { this(dinfo.fullN(), dinfo.largestCat(), dinfo.numNums(), dinfo._cats,true); } public Gram(int N, int diag, int dense, int sparse, boolean hasIntercept) { _hasIntercept = hasIntercept; _fullN = N + (_hasIntercept?1:0); _xx = new double[_fullN - diag][]; _diag = MemoryManager.malloc8d(_diagN = diag); _denseN = dense; for( int i = 0; i < (_fullN - _diagN); ++i ) _xx[i] = MemoryManager.malloc8d(diag + i + 1); } public Gram(double[][] xxCacheNew) { _xx = xxCacheNew; _xxCache = new XXCache(xxCacheNew,false,false); _denseN = xxCacheNew.length; _fullN = xxCacheNew.length; } public void dropIntercept(){ if(!_hasIntercept) throw new IllegalArgumentException("Has no intercept"); double [][] xx = new double[_xx.length-1][]; for(int i = 0; i < xx.length; ++i) xx[i] = _xx[i]; _xx = xx; _hasIntercept = false; --_fullN; } public Gram deep_clone(){ Gram res = clone(); if(_xx != null) res._xx = ArrayUtils.deepClone(_xx); if(_diag != null) res._diag = res._diag.clone(); return res; } public final int fullN(){return _fullN;} public double _diagAdded; public void addDiag(double [] ds) { int i = 0; for(;i < Math.min(_diagN,ds.length); ++i) _diag[i] += ds[i]; for(;i < ds.length; ++i) _xx[i-_diagN][i] += ds[i]; } /** * Add the effect of gam column smoothness factor. * @param activeColumns: store active columns * @param ds: store penalty matrix per column * @param gamIndices: penalty column indices taken into account categorical column offset */ public void addGAMPenalty(Integer[] activeColumns, double[][][] ds, int[][] gamIndices) { int numGamCols = gamIndices.length; for (int gamInd = 0; gamInd < numGamCols; gamInd++) { // deal with each GAM column separately int numKnots = gamIndices[gamInd].length; for (int betaInd = 0; betaInd < numKnots; betaInd++) { Integer betaIndex = gamIndices[gamInd][betaInd]; // for multinomial if (activeColumns != null) { // column indices in gamIndices need to be translated due to columns deleted betaIndex = Arrays.binarySearch(activeColumns, betaIndex); if (betaIndex < 0) continue; } for (int betaIndj = 0; betaIndj <= betaInd; betaIndj++) { Integer betaIndexJ = gamIndices[gamInd][betaIndj]; if (activeColumns != null) { // column indices in gamIndices need to be translated due to columns deleted betaIndexJ = Arrays.binarySearch(activeColumns, betaIndexJ); if (betaIndexJ < 0) continue; } int rowLen = _xx[betaIndex - _diagN].length; if (betaIndexJ < rowLen) { // only update the lower half, symmetric _xx[betaIndex - _diagN][betaIndexJ] += 2 * ds[gamInd][betaInd][betaIndj]; } } } } } public void addGAMPenalty(double[][][] ds, int[][] gamIndices, double[][] xmatrix) { int numGamCols = gamIndices.length; int numKnots; int betaIndex, betaIndexJ; for (int gamInd=0; gamInd<numGamCols; gamInd++) { numKnots = gamIndices[gamInd].length; for (int betaInd=0; betaInd<numKnots; betaInd++) { betaIndex = gamIndices[gamInd][betaInd]; for (int betaIndj=0; betaIndj<= betaInd; betaIndj++) { betaIndexJ = gamIndices[gamInd][betaIndj]; xmatrix[betaIndex][betaIndexJ] += 2*ds[gamInd][betaInd][betaIndj]; } } } } public double get(int i, int j) { if(j > i) { int k = i; i = j; j = k; // throw new IllegalArgumentException("Gram stored as lower diagnoal matrix, j must be < i"); } if(i < _diagN) return(j == i)?_diag[i]:0; return _xx[i-_diagN][j]; } public void addDiag(double d) {addDiag(d,false);} public void addDiag(double d, boolean add2Intercept) { _diagAdded += d; for( int i = 0; i < _diag.length; ++i ) _diag[i] += d; int ii = (!_hasIntercept || add2Intercept)?0:1; for( int i = 0; i < _xx.length - ii; ++i ) _xx[i][_xx[i].length - 1] += d; } public double sparseness(){ double [][] xx = getXX(); double nzs = 0; for(int i = 0; i < xx.length; ++i) for(int j = 0; j < xx[i].length; ++j) if(xx[i][j] != 0) nzs += 1; return nzs/(xx.length*xx.length); } public double diagSum(){ double res = 0; if(_diag != null){ for(double d:_diag) res += d; } if(_xx != null){ for(double [] x:_xx)res += x[x.length-1]; } return res; } private static double r2_eps = 1e-7; private static final int MIN_PAR = 1000; private final void updateZij(int i, int j, double [][] Z, double [] gamma) { double [] Zi = Z[i]; double Zij = Zi[j]; for (int k = 0; k < j; ++k) Zij -= gamma[k] * Zi[k]; Zi[j] = Zij; } private final void updateZ(final double [] gamma, final double [][] Z, int j){ for (int i = j + 1; i < Z.length; ++i) // update xj to zj // updateZij(i,j,Z,gamma); } /** * Compute Cholesky decompostion by computing partial QR decomposition (R == LU). * * The advantage of this method over the standard solve is that it can deal with Non-SPD matrices. * Gram matrix comes out as Non-SPD if we have collinear columns. * QR decomposition can identify collinear (redundant) columns and remove them from the dataset. * * QR computation: * QR is computed using Gram-Schmidt elimination, using Gram matrix instead of the underlying dataset. * * Gram-schmidt decomposition can be computed as follows: (from "The Eelements of Statistical Learning", Algorithm 3.1) * 1. set z0 = x0 = 1 (Intercept) * 2. for j = 1:p * for l = 0:j-1 * gamma_jl = dot(x_l,x_j)/dot(x_l,x_l) * zj = xj - sum(gamma_j[l]*x_l) * if(zj ~= 0) xj was redundant (collinear) * Zjs are orthogonal projections of xk and form base of the X space. (dot(z_i,z_j) == 0 for i != j) * In the end, gammas contain (Scaled) R from the QR decomp which is == LU from cholesky decomp. * * * Note that all of these operations can be be computed from the Gram matrix only, as gram matrix contains * dot(x_i,x_j) for i = 1..N, j = 1..N * * We can obviously compute gamma_lk directly, instead of replacing xk with zk, we fix the gram matrix. * When doing that, we need to replace dot(xi,xk) with dot(xi,zk) for all i. * There are two cases, * 1) dot(xk,xk) -> dot(zk,zk) * dot(xk - sum(gamma_l*x_l),xk - sum(gamma_l*x_l)) * = dot(xk,xk) - 2* sum(gamma_l*dot(x_i,x_k) + sum(gamma_l*sum(gamma_k*dot(x_l,x_k))) * (can be simplified using the fact that dot(zi,zj) == 0 for i != j * 2) dot(xi,xk) -> dot(xi,zk) for i != k * dot(xi, xj - sum(gamma_l*x_l)) * = dot(xi, xj) - dot(xi,sum(gamma_l*x_l)) * = dot(xi,xj) - sum(gamma_l*dot(xi,x_l)) (linear combination * * The algorithm then goes as follows: * for j = 1:n * for l = 1:j-1 * compute gamma_jl * update gram by replacing xk with zk = xk- sum(gamma_jl*s*xl); * * @param dropped_cols - empty list which will be filled with collinear columns removed during computation * @return Cholesky - cholesky decomposition fo the gram */ public Cholesky qrCholesky(ArrayList<Integer> dropped_cols, boolean standardized) { final double [][] Z = getXX(true,true); final double [][] R = new double[Z.length][]; final double [] Zdiag = new double[Z.length]; final double [] ZdiagInv = new double[Z.length]; for(int i = 0; i < Z.length; ++i) ZdiagInv[i] = 1.0/(Zdiag[i] = Z[i][i]); for(int j = 0; j < Z.length; ++j) { final double [] gamma = R[j] = new double[j+1]; for(int l = 0; l <= j; ++l) // compute gamma_l_j gamma[l] = Z[j][l]*ZdiagInv[l]; double zjj = Z[j][j]; for(int k = 0; k < j; ++k) // only need the diagonal, the rest is 0 (dot product of orthogonal vectors) zjj += gamma[k] * (gamma[k] * Z[k][k] - 2*Z[j][k]); // Check R^2 for the current column and ignore if too high (1-R^2 too low), R^2 = 1- rs_res/rs_tot // rs_res = zjj (the squared residual) // rs_tot = sum((yi - mean(y))^2) = mean(y^2) - mean(y)^2, // mean(y^2) is on diagonal // mean(y) is in the intercept (0 if standardized) // might not be regularized with number of observations, that's why dividing by intercept diagonal double rs_tot = standardized ?ZdiagInv[j] :1.0/(Zdiag[j]-Z[j][0]*ZdiagInv[0]*Z[j][0]); if(j > 0 && zjj*rs_tot < r2_eps) { // collinear column, drop it! zjj = 0; dropped_cols.add(j-1); ZdiagInv[j] = 0; } else ZdiagInv[j] = 1./zjj; Z[j][j] = zjj; int jchunk = Math.max(1,MIN_PAR/(Z.length-j)); int nchunks = (Z.length - j - 1)/jchunk; nchunks = Math.min(nchunks,H2O.NUMCPUS); if(nchunks <= 1) { // single trheaded update updateZ(gamma,Z,j); } else { // multi-threaded update final int fjchunk = (Z.length - 1 - j)/nchunks; int rem = Z.length - 1 - j - fjchunk*nchunks; for(int i = Z.length-rem; i < Z.length; ++i) updateZij(i,j,Z,gamma); RecursiveAction[] ras = new RecursiveAction[nchunks]; final int fj = j; int k = 0; for (int i = j + 1; i < Z.length-rem; i += fjchunk) { // update xj to zj // final int fi = i; ras[k++] = new RecursiveAction() { @Override protected final void compute() { int max_i = Math.min(fi+fjchunk,Z.length); for(int i = fi; i < max_i; ++i) updateZij(i,fj,Z,gamma); } }; } ForkJoinTask.invokeAll(ras); } } // update the R - we computed Rt/sqrt(diag(Z)) which we can directly use to solve the problem if(R.length < 500) for(int i = 0; i < R.length; ++i) for (int j = 0; j <= i; ++j) R[i][j] *= Math.sqrt(Z[j][j]); else { RecursiveAction [] ras = new RecursiveAction[R.length]; for(int i = 0; i < ras.length; ++i) { final int fi = i; final double [] Rrow = R[i]; ras[i] = new RecursiveAction() { @Override protected void compute() { for (int j = 0; j <= fi; ++j) Rrow[j] *= Math.sqrt(Z[j][j]); } }; } ForkJoinTask.invokeAll(ras); } // drop the ignored cols if(dropped_cols.isEmpty()) return new Cholesky(R,new double[0], true); double [][] Rnew = new double[R.length-dropped_cols.size()][]; for(int i = 0; i < Rnew.length; ++i) Rnew[i] = new double[i+1]; int j = 0; for(int i = 0; i < R.length; ++i) { if(Z[i][i] == 0) continue; int k = 0; for(int l = 0; l <= i; ++l) { if(k < dropped_cols.size() && l == (dropped_cols.get(k)+1)) { ++k; continue; } Rnew[j][l - k] = R[i][l]; } ++j; } return new Cholesky(Rnew,new double[0], true); } public void dropCols(int[] cols) { int diagCols = 0; for(int i =0; i < cols.length; ++i) if(cols[i] < _diagN) ++diagCols; else break; int j = 0; if(diagCols > 0) { double [] diag = MemoryManager.malloc8d(_diagN - diagCols); int k = 0; for(int i = 0; i < _diagN; ++i) if (j < cols.length && cols[j] == i) { ++j; } else diag[k++] = _diag[i]; _diag = diag; } double [][] xxNew = new double[_xx.length-cols.length+diagCols][]; int iNew = 0; for(int i = 0; i < _xx.length; ++i) { if(j < cols.length && (_diagN + i) == cols[j]){ ++j; continue; } if(j == 0) { xxNew[iNew++] = _xx[i]; continue; } int l = 0,m = 0; double [] x = MemoryManager.malloc8d(_xx[i].length-j); for(int k = 0; k < _xx[i].length; ++k) if(l < cols.length && k == cols[l]) { ++l; } else x[m++] = _xx[i][k]; xxNew[iNew++] = x; } _xx = xxNew; _diagN = _diag.length; _fullN = _xx[_xx.length-1].length; } public int[] findZeroCols(){ ArrayList<Integer> zeros = new ArrayList<>(); if(_diag != null) for(int i = 0; i < _diag.length; ++i) if(_diag[i] == 0)zeros.add(i); for(int i = 0; i < _xx.length; ++i) if(_xx[i][_xx[i].length-1] == 0) zeros.add(_xx[i].length-1); if(zeros.size() == 0) return new int[0]; int [] ary = new int[zeros.size()]; for(int i = 0; i < zeros.size(); ++i) ary[i] = zeros.get(i); return ary; } public String toString(){ if(_fullN >= 1000) return "Gram(" + _fullN + ")"; else return ArrayUtils.pprint(getXX(true,false)); } static public class InPlaceCholesky { final double _xx[][]; // Lower triangle of the symmetric matrix. private boolean _isSPD; private InPlaceCholesky(double xx[][], boolean isspd) { _xx = xx; _isSPD = isspd; } static private class BlockTask extends RecursiveAction { final double[][] _xx; final int _i0, _i1, _j0, _j1; public BlockTask(double xx[][], int ifr, int ito, int jfr, int jto) { _xx = xx; _i0 = ifr; _i1 = ito; _j0 = jfr; _j1 = jto; } @Override public void compute() { for (int i=_i0; i < _i1; i++) { double rowi[] = _xx[i]; for (int k=_j0; k < _j1; k++) { double rowk[] = _xx[k]; double s = 0.0; for (int jj = 0; jj < k; jj++) s += rowk[jj]*rowi[jj]; rowi[k] = (rowi[k] - s) / rowk[k]; } } } } public static InPlaceCholesky decompose_2(double xx[][], int STEP, int P) { boolean isspd = true; final int N = xx.length; P = Math.max(1, P); for (int j=0; j < N; j+=STEP) { // update the upper left triangle. final int tjR = Math.min(j+STEP, N); for (int i=j; i < tjR; i++) { double rowi[] = xx[i]; double d = 0.0; for (int k=j; k < i; k++) { double rowk[] = xx[k]; double s = 0.0; for (int jj = 0; jj < k; jj++) s += rowk[jj]*rowi[jj]; rowi[k] = s = (rowi[k] - s) / rowk[k]; d += s*s; } for (int jj = 0; jj < j; jj++) { double s = rowi[jj]; d += s*s; } d = rowi[i] - d; isspd = isspd && (d > 0.0); rowi[i] = Math.sqrt(Math.max(0.0, d)); } if (tjR == N) break; // update the lower strip int i = tjR; Futures fs = new Futures(); int rpb = 0; // rows per block int p = P; // concurrency while ( tjR*(rpb=(N - tjR)/p)<Gram.MIN_TSKSZ && p>1) --p; while (p-- > 1) { fs.add(new BlockTask(xx,i,i+rpb,j,tjR).fork()); i += rpb; } new BlockTask(xx,i,N,j,tjR).compute(); fs.blockForPending(); } return new InPlaceCholesky(xx, isspd); } public double[][] getL() { return _xx; } public boolean isSPD() { return _isSPD; } } public Cholesky cholesky(Cholesky chol) { return cholesky(chol,true,""); } /** * Compute the Cholesky decomposition. * * In case our gram starts with diagonal submatrix of dimension N, we exploit this fact to reduce the complexity of the problem. * We use the standard decomposition of the Cholesky factorization into submatrices. * * We split the Gram into 3 regions (4 but we only consider lower diagonal, sparse means diagonal region in this context): * diagonal * diagonal*dense * dense*dense * Then we can solve the Cholesky in 3 steps: * 1. We solve the diagonal part right away (just do the sqrt of the elements). * 2. The diagonal*dense part is simply divided by the sqrt of diagonal. * 3. Compute Cholesky of dense*dense - outer product of Cholesky of diagonal*dense computed in previous step * * @param chol * @return the Cholesky decomposition */ public Cholesky cholesky(Cholesky chol, boolean parallelize,String id) { long start = System.currentTimeMillis(); if( chol == null ) { double[][] xx = _xx.clone(); for( int i = 0; i < xx.length; ++i ) xx[i] = xx[i].clone(); chol = new Cholesky(xx, _diag.clone()); } final Cholesky fchol = chol; final int sparseN = _diag.length; final int denseN = _fullN - sparseN; // compute the cholesky of the diagonal and diagonal*dense parts if( _diag != null ) for( int i = 0; i < sparseN; ++i ) { double d = 1.0 / (chol._diag[i] = Math.sqrt(_diag[i])); for( int j = 0; j < denseN; ++j ) chol._xx[j][i] = d*_xx[j][i]; } ForkJoinTask [] fjts = new ForkJoinTask[denseN]; // compute the outer product of diagonal*dense //Log.info("SPARSEN = " + sparseN + " DENSEN = " + denseN); final int[][] nz = new int[denseN][]; for( int i = 0; i < denseN; ++i ) { final int fi = i; fjts[i] = new RecursiveAction() { @Override protected void compute() { int[] tmp = new int[sparseN]; double[] rowi = fchol._xx[fi]; int n = 0; for( int k = 0; k < sparseN; ++k ) if (rowi[k] != .0) tmp[n++] = k; nz[fi] = Arrays.copyOf(tmp, n); } }; } ForkJoinTask.invokeAll(fjts); for( int i = 0; i < denseN; ++i ) { final int fi = i; fjts[i] = new RecursiveAction() { @Override protected void compute() { double[] rowi = fchol._xx[fi]; int[] nzi = nz[fi]; for( int j = 0; j <= fi; ++j ) { double[] rowj = fchol._xx[j]; int[] nzj = nz[j]; double s = 0; for (int t=0,z=0; t < nzi.length && z < nzj.length; ) { int k1 = nzi[t]; int k2 = nzj[z]; if (k1 < k2) { t++; continue; } else if (k1 > k2) { z++; continue; } else { s += rowi[k1] * rowj[k1]; t++; z++; } } rowi[j + sparseN] = _xx[fi][j + sparseN] - s; } } }; } ForkJoinTask.invokeAll(fjts); // compute the cholesky of dense*dense-outer_product(diagonal*dense) double[][] arr = new double[denseN][]; for( int i = 0; i < arr.length; ++i ) arr[i] = Arrays.copyOfRange(fchol._xx[i], sparseN, sparseN + denseN); final int p = H2ORuntime.availableProcessors(); InPlaceCholesky d = InPlaceCholesky.decompose_2(arr, 10, p); fchol.setSPD(d.isSPD()); arr = d.getL(); for( int i = 0; i < arr.length; ++i ) { // See PUBDEV-5585: we use a manual array copy instead of System.arraycopy because of behavior on Java 10 // Used to be: System.arraycopy(arr[i], 0, fchol._xx[i], sparseN, i + 1); for (int j = 0; j < i + 1; j++) fchol._xx[i][sparseN + j] = arr[i][j]; } return chol; } public double[][] getXX(){return getXX(false, false);} public double[][] getXX(boolean lowerDiag, boolean icptFist) { if(_xxCache != null && _xxCache.match(lowerDiag,icptFist)) return _xxCache.xx; final int N = _fullN; double[][] xx = new double[N][]; for( int i = 0; i < N; ++i ) xx[i] = MemoryManager.malloc8d(lowerDiag?i+1:N); return getXX(xx, lowerDiag, icptFist); } public double[][]getXX(double[][] xalloc) { return getXX(xalloc,false, false);} public double[][] getXX(double[][] xalloc, boolean lowerDiag, boolean icptFist) { final int N = _fullN; double[][] xx = xalloc; int off = 0; if(_hasIntercept && icptFist) { double [] icptRow = _xx[_xx.length-1]; xx[0][0] = icptRow[icptRow.length-1]; for(int i = 0; i < icptRow.length-1; ++i) xx[i+1][0] = icptRow[i]; off = 1; } for( int i = 0; i < _diag.length; ++i ) { xx[i + off][i + off] = _diag[i]; if(!lowerDiag) { int col = i+off; double [] xrow = xx[i+off]; for (int j = off; j < _xx.length; ++j) xrow[j+_diagN] = _xx[j][col]; } } for( int i = 0; i < _xx.length - off; ++i ) { double [] xrow = xx[i+_diag.length + off]; double [] xrowOld = _xx[i]; System.arraycopy(xrowOld,0,xrow,off,xrowOld.length); if(!lowerDiag) { int col = xrowOld.length-1; int row = i+1; for (int j = col+1; j < xrow.length; ++j) xrow[j] = _xx[row++][col]; } } _xxCache = new XXCache(xx,lowerDiag,icptFist); return xx; } /** * This method will copy the xx matrix into a matrix xalloc which is of bigger size than the actual xx by 1 in both * row and column. */ public double[][] getXXCPM(double[][] xalloc, boolean lowerDiag, boolean icptFirst) { double[][] xx = xalloc; int off = 0; if(_hasIntercept && icptFirst) { double [] icptRow = _xx[_xx.length-1]; xx[0][0] = icptRow[icptRow.length-1]; for(int i = 0; i < icptRow.length-1; ++i) xx[i+1][0] = icptRow[i]; off = 1; } for( int i = 0; i < _diag.length; ++i ) { xx[i + off][i + off] = _diag[i]; if(!lowerDiag) { int col = i+off; double [] xrow = xx[i+off]; for (int j = off; j < _xx.length; ++j) xrow[j+_diagN] = _xx[j][col]; } } for( int i = 0; i < _xx.length - off; ++i ) { double [] xrow = xx[i+_diag.length + off]; double [] xrowOld = _xx[i]; System.arraycopy(xrowOld,0,xrow,off,xrowOld.length); if(!lowerDiag) { int col = xrowOld.length-1; int row = i+1; int xrowLen = xrow.length-1; for (int j = col+1; j < xrowLen; ++j) xrow[j] = _xx[row++][col]; } } return xx; } public void add(Gram grm) { ArrayUtils.add(_xx,grm._xx); ArrayUtils.add(_diag,grm._diag); } public final boolean hasNaNsOrInfs() { for( int i = 0; i < _xx.length; ++i ) for( int j = 0; j < _xx[i].length; ++j ) if( Double.isInfinite(_xx[i][j]) || Double.isNaN(_xx[i][j]) ) return true; for( double d : _diag ) if( Double.isInfinite(d) || Double.isNaN(d) ) return true; return false; } public static final class Cholesky { public final double[][] _xx; protected final double[] _diag; private boolean _isSPD; private boolean _icptFirst; public Cholesky(double[][] xx, double[] diag) { _xx = xx; _diag = diag; _icptFirst = false; } public Cholesky(double[][] xx, double[] diag, boolean icptFirst) { _xx = xx; _diag = diag; _icptFirst = icptFirst; _isSPD = true; } public void solve(final double [][] ys){ RecursiveAction [] ras = new RecursiveAction[ys.length]; for(int i = 0; i < ras.length; ++i) { final int fi = i; ras[i] = new RecursiveAction() { @Override protected void compute() { ys[fi][fi] = 1; solve(ys[fi]); } }; } ForkJoinTask.invokeAll(ras); } public double [][] getInv(){ double [][] res = new double[_xx[_xx.length-1].length][_xx[_xx.length-1].length]; for(int i = 0; i < res.length; ++i) res[i][i] = 1; solve(res); return res; } public double [] getInvDiag(){ final double [] res = new double[_xx.length + _diag.length]; RecursiveAction [] ras = new RecursiveAction[res.length]; for(int i = 0; i < ras.length; ++i) { final int fi = i; ras[i] = new RecursiveAction() { @Override protected void compute() { double [] tmp = new double[res.length]; tmp[fi] = 1; solve(tmp); res[fi] = tmp[fi]; } }; } ForkJoinTask.invokeAll(ras); return res; } public double[][] getL() { final int N = _xx.length+_diag.length; double[][] xx = new double[N][]; for( int i = 0; i < N; ++i ) xx[i] = MemoryManager.malloc8d(N); for( int i = 0; i < _diag.length; ++i ) xx[i][i] = _diag[i]; for( int i = 0; i < _xx.length; ++i ) { for( int j = 0; j < _xx[i].length; ++j ) { xx[i + _diag.length][j] = _xx[i][j]; } } return xx; } /** * Find solution to A*x = y. * * Result is stored in the y input vector. May throw NonSPDMatrix exception in case Gram is not * positive definite. * * @param y */ public final void solve(double[] y) { if( !isSPD() ) throw new NonSPDMatrixException(); if(_icptFirst) { double icpt = y[y.length-1]; for(int i = y.length-1; i > 0; --i) y[i] = y[i-1]; y[0] = icpt; } // diagonal for( int k = 0; k < _diag.length; ++k ) y[k] /= _diag[k]; // rest final int n = _xx.length == 0?0:_xx[_xx.length-1].length; // Solve L*Y = B; for( int k = _diag.length; k < n; ++k ) { double d = 0; for( int i = 0; i < k; i++ ) d += y[i] * _xx[k - _diag.length][i]; y[k] = (y[k]-d)/_xx[k - _diag.length][k]; } // Solve L'*X = Y; for( int k = n - 1; k >= _diag.length; --k ) { y[k] /= _xx[k - _diag.length][k]; for( int i = 0; i < k; ++i ) y[i] -= y[k] * _xx[k - _diag.length][i]; } // diagonal for( int k = _diag.length - 1; k >= 0; --k ) y[k] /= _diag[k]; if(_icptFirst) { double icpt = y[0]; for(int i = 1; i < y.length; ++i) y[i-1] = y[i]; y[y.length-1] = icpt; } } public final boolean isSPD() {return _isSPD;} public final void setSPD(boolean b) {_isSPD = b;} } public final void addRowSparse(DataInfo.Row r, double w) { final int intercept = _hasIntercept?1:0; final int denseRowStart = _fullN - _denseN - _diagN - intercept; // we keep dense numbers at the right bottom of the matrix, -1 is for intercept assert _denseN + denseRowStart == _xx.length-intercept; final double [] interceptRow = _hasIntercept?_xx[_xx.length-1]:null; // nums for(int i = 0; i < r.nNums; ++i) { int cid = r.numIds[i]; final double [] mrow = _xx[cid - _diagN]; final double d = w*r.numVals[i]; for(int j = 0; j <= i; ++j) mrow[r.numIds[j]] += d*r.numVals[j]; if(_hasIntercept) interceptRow[cid] += d; // intercept*x[i] // nums * cats for(int j = 0; j < r.nBins; ++j) mrow[r.binIds[j]] += d; } if(_hasIntercept){ // intercept*intercept interceptRow[interceptRow.length-1] += w; // intercept X cat for(int j = 0; j < r.nBins; ++j) interceptRow[r.binIds[j]] += w; } final boolean hasDiag = (_diagN > 0 && r.nBins > 0 && r.binIds[0] < _diagN); // cat X cat for(int i = hasDiag?1:0; i < r.nBins; ++i){ final double [] mrow = _xx[r.binIds[i] - _diagN]; for(int j = 0; j <= i; ++j) mrow[r.binIds[j]] += w; } // DIAG if(hasDiag && r.nBins > 0) _diag[r.binIds[0]] += w; } public final void addRow(DataInfo.Row row, double w) { if(row.numIds == null) addRowDense(row,w); else addRowSparse(row, w); } public final void addRowDense(DataInfo.Row row, double w) { final int intercept = _hasIntercept?1:0; final int denseRowStart = _fullN - _denseN - _diagN - intercept; // we keep dense numbers at the right bottom of the matrix, -1 is for intercept final int denseColStart = _fullN - _denseN - intercept; assert _denseN + denseRowStart == _xx.length-intercept; final double [] interceptRow = _hasIntercept?_xx[_denseN + denseRowStart]:null; // nums for(int i = 0; i < _denseN; ++i) if(row.numVals[i] != 0) { final double [] mrow = _xx[i+denseRowStart]; final double d = w * row.numVals[i]; for(int j = 0; j <= i; ++j) if(row.numVals[j] != 0) mrow[j+denseColStart] += d* row.numVals[j]; if(_hasIntercept) interceptRow[i+denseColStart] += d; // intercept*x[i] // nums * cats for(int j = 0; j < row.nBins; ++j) mrow[row.binIds[j]] += d; } if(_hasIntercept){ // intercept*intercept interceptRow[_denseN+denseColStart] += w; // intercept X cat for(int j = 0; j < row.nBins; ++j) interceptRow[row.binIds[j]] += w; } final boolean hasDiag = (_diagN > 0 && row.nBins > 0 && row.binIds[0] < _diagN); // cat X cat for(int i = hasDiag?1:0; i < row.nBins; ++i){ final double [] mrow = _xx[row.binIds[i] - _diagN]; for(int j = 0; j <= i; ++j) mrow[row.binIds[j]] += w; } // DIAG if(hasDiag) _diag[row.binIds[0]] += w; } public void mul(double x){ if(_diag != null)for(int i = 0; i < _diag.length; ++i) _diag[i] *= x; for(int i = 0; i < _xx.length; ++i) for(int j = 0; j < _xx[i].length; ++j) _xx[i][j] *= x; } public double [] mul(double [] x){ double [] res = MemoryManager.malloc8d(x.length); mul(x,res); return res; } private double [][] XX = null; /* public void mul(double [] x, double [] res){ Arrays.fill(res,0); if(XX == null) XX = getXX(false,false); for(int i = 0; i < XX.length; ++i){ double d = 0; double [] xi = XX[i]; for(int j = 0; j < XX.length; ++j) d += xi[j]*x[j]; res[i] = d; } }*/ /* This method will not allocate the extra memory and hence is considered for lowMemory systems. However, need to consider case when you have categoricals! Make them part of the matrix in the multiplication process. Done! */ public void mul(double[] x, double[] res){ int colSize = fullN(); // actual gram matrix size int offsetForCat = colSize-_xx.length; // offset for categorical columns for (int rowIndex = 0; rowIndex < colSize; rowIndex++) { double d = 0; if (rowIndex >=offsetForCat) { for (int colIndex = 0; colIndex < rowIndex; colIndex++) { // below diagonal d += _xx[rowIndex - offsetForCat][colIndex] * x[colIndex]; } } // on diagonal d+= (rowIndex>=offsetForCat)?_xx[rowIndex-offsetForCat][rowIndex]*x[rowIndex]:_diag[rowIndex]*x[rowIndex]; for (int colIndex = rowIndex+1; colIndex < colSize; colIndex++) { // above diagonal if (rowIndex<offsetForCat) { if ((colIndex>=offsetForCat)) { d += _xx[colIndex-offsetForCat][rowIndex]*x[colIndex]; } } else { d += _xx[colIndex-offsetForCat][rowIndex]*x[colIndex]; } /* d += (rowIndex<offsetForCat)?((colIndex<offsetForCat)?0:_xx[colIndex-offsetForCat][rowIndex]*x[colIndex]): _xx[colIndex-offsetForCat][rowIndex]*x[colIndex];*/ } res[rowIndex] = d; } } /** * Task to compute outer product of a matrix normalized by the number of observations (not counting rows with NAs). * in R's notation g = X%*%T(X)/nobs, nobs = number of rows of X with no NA. Copied from GramTask. * @author wendycwong */ public static class OuterGramTask extends MRTask<OuterGramTask> { public Gram _gram; public long _nobs; boolean _intercept = false; int[] _catOffsets; double _scale; // 1/(number of samples) final Key<Job> _jobKey; protected final DataInfo _dinfo; public OuterGramTask(Key<Job> jobKey, DataInfo dinfo){ _dinfo = dinfo; _jobKey = jobKey; _catOffsets = dinfo._catOffsets != null?Arrays.copyOf(dinfo._catOffsets, dinfo._catOffsets.length):null; _scale = dinfo._adaptedFrame.numRows() > 0?1.0/dinfo._adaptedFrame.numRows():0.0; } /* Need to do our own thing here since we need to access and multiple different rows of a chunck. */ @Override public void map(Chunk[] chks) { // TODO: implement the sparse option. chunkInit(); DataInfo.Row rowi = _dinfo.newDenseRow(); DataInfo.Row rowj = _dinfo.newDenseRow(); Chunk[] chks2 = new Chunk[chks.length]; // perform inner product within local chunk innerProductChunk(rowi, rowj, chks, chks); // perform inner product of local chunk with other chunks with lower chunk index for (int chkIndex = 0; chkIndex < chks[0].cidx(); chkIndex++) { for (int colIndex = 0; colIndex < chks2.length; colIndex++) { // grab the alternate chunk chks2[colIndex] = _fr.vec(colIndex).chunkForChunkIdx(chkIndex); } innerProductChunk(rowi, rowj, chks, chks2); } chunkDone(); } /* This method performs inner product operation over one chunk. */ public void innerProductChunk(DataInfo.Row rowi, DataInfo.Row rowj, Chunk[] localChunk, Chunk[] alterChunk) { int rowOffsetLocal = (int) localChunk[0].start(); // calculate row indices for this particular chunks of data int rowOffsetAlter = (int) alterChunk[0].start(); int localChkRows = localChunk[0]._len; int alterChkRows = alterChunk[0]._len; for (int rowL = 0; rowL < localChkRows; rowL++) { _dinfo.extractDenseRow(localChunk, rowL, rowi); if (!rowi.isBad()) { ++_nobs; int rowIOffset = rowL + rowOffsetLocal; for (int j = 0; j < alterChkRows; j++) { int rowJOffset = j+rowOffsetAlter; if (rowJOffset > rowIOffset) { // we are done with this chunk, next chunk please break; } _dinfo.extractDenseRow(alterChunk, j, rowj); //grab the row from new chunk and perform inner product of rows if ((!rowi.isBad() && rowi.weight != 0) && (!rowj.isBad() && rowj.weight != 0)) { this._gram._xx[rowIOffset][rowJOffset] = rowi.dotSame(rowj); } } } } } /* Basically, every time we get an array of chunks, we will generate certain parts of the gram matrix for only this block. */ public void chunkInit(){ _gram = new Gram((int)_dinfo._adaptedFrame.numRows(), 0, _dinfo.numNums(), _dinfo._cats, _intercept); } public void chunkDone(){ _gram.mul(_scale); } /* Since each chunk only change a certain part of the gram matrix, we can add them all together when we are doing the reduce job. Hence, this part should be left alone. */ @Override public void reduce(OuterGramTask gt) { _gram.add(gt._gram); _nobs += gt._nobs; } } /** * Task to compute gram matrix normalized by the number of observations (not counting rows with NAs). * in R's notation g = t(X)%*%X/nobs, nobs = number of rows of X with no NA. * @author tomasnykodym */ public static class GramTask extends FrameTask2<GramTask> { private boolean _std = true; public Gram _gram; public long _nobs; boolean _intercept = false; public GramTask(Key<Job> jobKey, DataInfo dinfo){ super(null,dinfo,jobKey); } public GramTask(Key<Job> jobKey, DataInfo dinfo, boolean std, boolean intercept){ super(null,dinfo,jobKey); _std = std; _intercept = intercept; } @Override public void chunkInit(){ _gram = new Gram(_dinfo.fullN(), _dinfo.largestCat(), _dinfo.numNums(), _dinfo._cats, _intercept); } double _prev = 0; @Override protected void processRow(DataInfo.Row r) { _gram.addRow(r, r.weight); ++_nobs; double current = (_gram.get(_dinfo.fullN()-1,_dinfo.fullN()-1) - _prev); _prev += current; } @Override public void chunkDone(){ if(_std) { if (_nobs > 0) { // removing NA rows may produce _nobs=0 double r = 1.0 / _nobs; _gram.mul(r); } } } @Override public void reduce(GramTask gt){ if(_std) { if ((_nobs > 0) && (gt._nobs > 0)) { // removing NA rows may produce _nobs=0 double r1 = (double) _nobs / (_nobs + gt._nobs); _gram.mul(r1); double r2 = (double) gt._nobs / (_nobs + gt._nobs); gt._gram.mul(r2); } } _gram.add(gt._gram); _nobs += gt._nobs; } } public static class NonSPDMatrixException extends RuntimeException { public NonSPDMatrixException(){} public NonSPDMatrixException(String msg){super(msg);} } public static class CollinearColumnsException extends RuntimeException { public CollinearColumnsException(){} public CollinearColumnsException(String msg){super(msg);} } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/grep/Grep.java
package hex.grep; import hex.ModelBuilder; import hex.ModelCategory; import water.*; import water.fvec.ByteVec; import water.fvec.Chunk; import water.fvec.Vec; import water.util.Log; import java.util.Arrays; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; /** * Grep model builder... building a trivial GrepModel */ public class Grep extends ModelBuilder<GrepModel,GrepModel.GrepParameters,GrepModel.GrepOutput> { public Grep( GrepModel.GrepParameters parms ) { super(parms); init(false); } @Override protected GrepDriver trainModelImpl() { return new GrepDriver(); } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ModelCategory.Unknown}; } @Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Experimental; } @Override public boolean isSupervised() { return false; } Pattern _pattern = null; /** Initialize the ModelBuilder, validating all arguments and preparing the * training frame. This call is expected to be overridden in the subclasses * and each subclass will start with "super.init();". This call is made * by the front-end whenever the GUI is clicked, and needs to be fast; * heavy-weight prep needs to wait for the trainModel() call. * * Validate the regex. */ @Override public void init(boolean expensive) { super.init(expensive); if( _parms._regex == null ) { error("_regex", "regex is missing"); } else { try { _pattern = Pattern.compile(_parms._regex); } catch( PatternSyntaxException pse ) { error("regex", pse.getMessage()); } } if( _parms._train == null ) return; Vec[] vecs = _parms.train().vecs(); if( vecs.length != 1 ) error("_train","Frame must contain exactly 1 Vec (of raw text)"); if( !(vecs[0] instanceof ByteVec) ) error("_train","Frame must contain exactly 1 Vec (of raw text)"); } // ---------------------- private class GrepDriver extends Driver { @Override public void computeImpl() { GrepModel model = null; try { init(true); // The model to be built model = new GrepModel(dest(), _parms, new GrepModel.GrepOutput(Grep.this)); model.delete_and_lock(_job); // --- // Run the main Grep Loop GrepGrep gg = new GrepGrep(_pattern).doAll(train().vecs()[0]); // Fill in the model model._output._matches = Arrays.copyOf(gg._matches,gg._cnt); model._output._offsets = Arrays.copyOf(gg._offsets,gg._cnt); StringBuilder sb = new StringBuilder("Grep: \n"); sb.append(Arrays.toString(model._output._matches)).append("\n"); sb.append(Arrays.toString(model._output._offsets)).append("\n"); Log.info(sb); } finally { if( model != null ) model.unlock(_job); } } } private class ByteSeq implements CharSequence { private final byte _bs0[]; private final byte _bs1[]; ByteSeq( Chunk chk0, Chunk chk1 ) { _bs0 = chk0.getBytes(); _bs1 = chk1==null ? null : chk1.getBytes(); } @Override public char charAt(int idx ) { return (char)(idx < _bs0.length ? _bs0[idx] : _bs1[idx-_bs0.length]); } @Override public int length( ) { return _bs0.length+(_bs1==null?0:_bs1.length); } @Override public ByteSeq subSequence( int start, int end ) { throw H2O.unimpl(); } @Override public String toString() { throw H2O.unimpl(); } String str( int s, int e ) { return new String(_bs0,s,e-s); } } private class GrepGrep extends MRTask<GrepGrep> { private final Pattern _pattern; // Outputs, hopefully not too big for once machine! String[] _matches; long [] _offsets; int _cnt; GrepGrep( Pattern pattern ) { _pattern = pattern; } @Override public void map( Chunk chk ) { _matches = new String[1]; // Result holders; will lazy expand _offsets = new long [1]; ByteSeq bs = new ByteSeq(chk,chk.nextChunk()); // We already checked that this is an instance of a ByteVec, which means // all the Chunks contain raw text as byte arrays. Matcher m = _pattern.matcher(bs); while( m.find() && m.start() < bs._bs0.length ) add(bs.str(m.start(),m.end()),chk.start()+m.start()); _job.update(chk._len); // Whole chunk of work, done all at once } @Override public void reduce( GrepGrep gg1 ) { GrepGrep gg0 = this; if( gg0._cnt < gg1._cnt ) { gg0 = gg1; gg1 = this; } // Larger result on left for( int i=0; i<gg1._cnt; i++ ) gg0.add(gg1._matches[i], gg1._offsets[i]); if( gg0 != this ) { _matches = gg0._matches; _offsets = gg0._offsets; _cnt = gg0._cnt; } } private void add( String s, long off ) { if( _cnt == _matches.length ) { _matches = Arrays.copyOf(_matches,_cnt<<1); _offsets = Arrays.copyOf(_offsets,_cnt<<1); } _matches[_cnt ] = s; _offsets[_cnt++] = off; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/grep/GrepModel.java
package hex.grep; import hex.Model; import hex.ModelCategory; import hex.ModelMetrics; import water.H2O; import water.Key; public class GrepModel extends Model<GrepModel,GrepModel.GrepParameters,GrepModel.GrepOutput> { public static class GrepParameters extends Model.Parameters { public String algoName() { return "Grep"; } public String fullName() { return "Grep"; } public String javaName() { return GrepModel.class.getName(); } @Override public long progressUnits() { return train() != null ? train().numRows() : 1; } public String _regex; // The regex } public static class GrepOutput extends Model.Output { // Iterations executed public String[] _matches; public long[] _offsets; public GrepOutput( Grep b ) { super(b); } @Override public ModelCategory getModelCategory() { return ModelCategory.Unknown; } } GrepModel( Key selfKey, GrepParameters parms, GrepOutput output) { super(selfKey,parms,output); } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { throw H2O.unimpl("GrepModel does not have Model Metrics."); } @Override protected double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/]) { throw H2O.unimpl(); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/hglm/ComputationStateHGLM.java
package hex.hglm; import Jama.Matrix; import hex.DataInfo; import water.Job; import water.util.ArrayUtils; import water.util.Log; import java.util.Random; import static hex.hglm.HGLMUtils.*; import static water.util.ArrayUtils.copy2DArray; import static water.util.ArrayUtils.gaussianVector; public class ComputationStateHGLM { /*** * the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf * I will be referring to the doc and different parts of it to explain my implementation. */ final int _numFixedCoeffs; // fixed coefficient length including inactive predictors final int _numRandomCoeffs; // random coefficient length including inactive predictors public final HGLMModel.HGLMParameters _parms; int _iter; private double[] _beta; // fixed, if standardized, normalized coefficients, else, non-normalized coefficients private double[][] _ubeta; // random , if standardized, normalized coefficients, else non-normalized coefficients private double[][] _T; // positive definite matrix, size random coefficient length by random coefficient length final DataInfo _dinfo; private final Job _job; double _tauEVarE10 = 0; // variance estimate of random noise calculated from equation 10 of the doc double _tauEVarE17 = 0; // variance estimate of random noise calculated from equation 17 of the doc String[] _fixedCofficientNames; // include intercept if enabled String[] _randomCoefficientNames; // include intercept only if random effect is in intercept String[] _level2UnitNames; // enum levels of group column final int _numLevel2Unit; final int _level2UnitIndex; final int _nobs; public ComputationStateHGLM(Job job, HGLMModel.HGLMParameters parms, DataInfo dinfo, HGLMTask.ComputationEngineTask engTask, int iter) { _job = job; _parms = parms; _dinfo = dinfo; _iter = iter; _fixedCofficientNames = engTask._fixedCoeffNames; _level2UnitNames = engTask._level2UnitNames; _randomCoefficientNames = engTask._randomCoeffNames; _level2UnitIndex = engTask._level2UnitIndex; initComputationStateHGLM(engTask); _numFixedCoeffs = _beta.length; _numRandomCoeffs = _ubeta[0].length; _numLevel2Unit = _ubeta.length; _nobs = engTask._nobs; } /** * set initial values for: * 1. initial fixed coefficients from user or assigned by us; * 2. initial random coefficients from user or randomly assigned; * 3. sigma square; * 4. T matrix value */ void initComputationStateHGLM(HGLMTask.ComputationEngineTask engineTask) { int numRandomCoeff = _randomCoefficientNames.length; int numFixCoeff = _fixedCofficientNames.length; // need to initialize the coefficients, fixed and random if (_parms._seed == -1) // set the seed if not set by user _parms._seed = new Random().nextLong(); Log.info("Random seed: "+_parms._seed); Random random = new Random(_parms._seed); if (_parms._tau_e_var_init > 0.0) _tauEVarE10 = _parms._tau_e_var_init; else _tauEVarE10 = Math.abs(random.nextGaussian()); _T = new double[numRandomCoeff][numRandomCoeff]; if (_parms._initial_t_matrix != null) { grabInitValuesFromFrame(_parms._initial_t_matrix, _T); double[][] transposeT = ArrayUtils.transpose(_T); if (!equal2DArrays(_T, transposeT, 1e-6)) throw new IllegalArgumentException("initial_t_matrix must be symmetric but is not!"); // make sure matrix is semi positive definite Matrix tMat = new Matrix(_T); if ((_parms._max_iterations > 0) && !tMat.chol().isSPD()) // only check this when we actually build the model throw new IllegalArgumentException("initial_t_matrix must be positive semi definite but is not!"); } else { if (_parms._tau_u_var_init > 0.0) { _tauEVarE10 = _parms._tau_u_var_init; } else { _tauEVarE10 = Math.abs(random.nextGaussian()); } setDiagValues(_T, _tauEVarE10); } _ubeta = new double[engineTask._numLevel2Units][engineTask._numRandomCoeffs]; if ( null != _parms._initial_random_effects) { // read in initial random values grabInitValuesFromFrame(_parms._initial_random_effects, _ubeta); } else { // randomly generating random initial values gaussianVector(random, _ubeta, _level2UnitNames.length, numRandomCoeff); ArrayUtils.mult(_ubeta, Math.sqrt(_T[0][0])); } // copy over initial fixed coefficient values if (null != _parms._initial_fixed_effects) { if (_parms._initial_fixed_effects.length != numFixCoeff) throw new IllegalArgumentException("initial_fixed_effects must be an double[] array of size "+numFixCoeff); _beta = _parms._initial_fixed_effects; } else { _beta = new double[numFixCoeff]; _beta[_beta.length-1] = _parms.train().vec(_parms._response_column).mean(); } } public double[] getBeta() { return _beta; } public double[][] getUbeta() { return _ubeta; } public double getTauUVar() { return _tauEVarE10; } public double getTauEVarE10() { return _tauEVarE10; } public String[] getFixedCofficientNames() { return _fixedCofficientNames; } public String[] getRandomCoefficientNames() { return _randomCoefficientNames; } public String[] getGroupColumnNames() { return _level2UnitNames; } public double[][] getT() { return _T; } public int getNumFixedCoeffs() { return _numFixedCoeffs; } public int getNumRandomCoeffs() { return _numRandomCoeffs; } public int getNumLevel2Units() { return _numLevel2Unit; } public int getLevel2UnitIndex() { return _level2UnitIndex; } public void setBeta(double[] beta) { System.arraycopy(beta, 0, _beta, 0, beta.length); } public void setUbeta(double[][] ubeta) { copy2DArray(ubeta, _ubeta); } public void setT(double[][] tmat) { copy2DArray(tmat, _T); } public void setTauEVarE10(double tEVar) { _tauEVarE10 = tEVar; } public static class ComputationStateSimple { final public double[] _beta; final public double[][] _ubeta; final public double[][] _tmat; final public double _tauEVar; public ComputationStateSimple(double[] beta, double[][] ubeta, double[][] tmat, double tauEVar) { _beta = beta; _ubeta = ubeta; _tmat = tmat; _tauEVar = tauEVar; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/hglm/HGLM.java
package hex.hglm; import hex.*; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import water.H2O; import water.Job; import water.Key; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.Frame; import water.udf.CFuncRef; import water.util.Log; import water.util.TwoDimTable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static hex.glm.GLMModel.GLMParameters.Family.gaussian; import static hex.glm.GLMModel.GLMParameters.MissingValuesHandling.*; import static hex.hglm.HGLMModel.HGLMParameters.Method.EM; import static hex.hglm.HGLMUtils.*; import static hex.hglm.MetricBuilderHGLM.calHGLMLlg; import static water.util.ArrayUtils.*; public class HGLM extends ModelBuilder<HGLMModel, HGLMModel.HGLMParameters, HGLMModel.HGLMModelOutput> { /*** * the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf * I will be referring to the doc and different parts of it to explain my implementation. */ long _startTime; // model building start time; private transient ComputationStateHGLM _state; private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss"); @Override public ModelCategory[] can_build() { return new ModelCategory[]{ModelCategory.Regression}; } @Override public boolean isSupervised() { return true; } @Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Experimental; } @Override public boolean havePojo() { return false; } @Override public boolean haveMojo() { return false; } public HGLM(boolean startup_once) { super(new HGLMModel.HGLMParameters(), startup_once); } protected HGLM(HGLMModel.HGLMParameters parms) { super(parms); init(false); } public HGLM(HGLMModel.HGLMParameters parms, Key<HGLMModel> key) { super(parms, key); init(false); } @Override protected ModelBuilder<HGLMModel, HGLMModel.HGLMParameters, HGLMModel.HGLMModelOutput>.Driver trainModelImpl() { return new HGLMDriver(); } static class ScoringHistory { private ArrayList<Integer> _scoringIters = new ArrayList<>(); private ArrayList<Long> _scoringTimes = new ArrayList<>(); private ArrayList<Double> _logLikelihood = new ArrayList<>(); private ArrayList<Double> _tauEVar = new ArrayList<>(); public ArrayList<Integer> getScoringIters() { return _scoringIters;} public void addIterationScore(int iter, double loglikelihood, double tauEVar) { _scoringIters.add(iter); _scoringTimes.add(System.currentTimeMillis()); _logLikelihood.add(loglikelihood); _tauEVar.add(tauEVar); } public TwoDimTable to2dTable() { String[] cnames = new String[]{"timestamp", "number_of_iterations", "loglikelihood", "noise_variance"}; String[] ctypes = new String[]{"string", "int", "double", "double"}; String[] cformats = new String[]{"%s", "%d", "%.5f", "%.5f"}; int tableSize = _scoringIters.size(); TwoDimTable res = new TwoDimTable("Scoring History", "", new String[tableSize], cnames, ctypes, cformats, ""); int col = 0; for (int i=0; i<tableSize; i++) { res.set(i, col++, DATE_TIME_FORMATTER.print(_scoringTimes.get(i))); res.set(i, col++, _scoringIters.get(i)); res.set(i, col++, _logLikelihood.get(i)); res.set(i, col, _tauEVar.get(i)); col = 0; } return res; } } @Override public void init(boolean expensive) { if (_parms._nfolds > 0 || _parms._fold_column != null) error("nfolds or _fold_coumn", " cross validation is not supported in HGLM right now."); if (null != _parms._family && !gaussian.equals(_parms._family)) error("family", " only Gaussian families are supported now"); if (null != _parms._method && !EM.equals(_parms._method)) error("method", " only EM (expectation maximization) is supported for now."); if (null != _parms._missing_values_handling && PlugValues == _parms._missing_values_handling && _parms._plug_values == null) error("PlugValues", " if specified, must provide a frame with plug values in plug_values."); if (_parms._tau_u_var_init < 0) error("tau_u_var_init", "if set, must > 0.0."); if (_parms._tau_e_var_init < 0) error("tau_e_var_init", "if set, must > 0.0."); if (_parms._seed == 0) error("seed", "cannot be set to any number except zero."); if (_parms._em_epsilon < 0) error("em_epsilon", "if specified, must >= 0.0."); if (_parms._score_iteration_interval <= 0) error("score_iteration_interval", "if specified must be >= 1."); super.init(expensive); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(HGLM.this); if (expensive) { if (_parms._max_iterations == 0) { warn("max_iterations", "for HGLM, must be >= 1 (or -1 for unlimited or default setting) " + "to obtain proper model. Setting it to be 0 will only return the correct coefficient names and an empty" + " model."); warn("_max_iterations", H2O.technote(2, "for HGLM, if specified, must be >= 1 or == -1.")); } if (_parms._max_iterations == -1) _parms._max_iterations = 1000; Frame trainFrame = train(); List<String> columnNames = Arrays.stream(trainFrame.names()).collect(Collectors.toList()); if (_parms._group_column == null) { error("group_column", " column used to generate level 2 units is missing"); } else { if (!columnNames.contains(_parms._group_column)) error("group_column", " is not found in the training frame."); else if (!trainFrame.vec(_parms._group_column).isCategorical()) error("group_column", " should be a categorical column."); } if (_parms._random_columns == null && !_parms._random_intercept) { error("random_columns", " should not be null if random_intercept is false. You must " + "specify predictors in random_columns or set random_intercept to true."); } if (_parms._random_columns != null) { boolean goodRandomColumns = (Arrays.stream(_parms._random_columns).filter(x -> columnNames.contains(x)).count() == _parms._random_columns.length); if (!goodRandomColumns) error("random_columns", " can only contain columns in the training frame."); } if (_parms._gen_syn_data) { _parms._max_iterations = 0; if (_parms._tau_e_var_init <= 0) error("tau_e_var_init", "If gen_syn_data is true, tau_e_var_init must be > 0."); } } } private class HGLMDriver extends Driver { DataInfo _dinfo = null; @Override public void computeImpl() { _startTime = System.currentTimeMillis(); init(true); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(HGLM.this); _job.update(0, "Initializing HGLM model training"); HGLMModel model = null; ScoringHistory scTrain = new ScoringHistory(); ScoringHistory scValid = _parms._valid == null ? null : new ScoringHistory(); try { /*** * Need to do the following things: * 1. Generate all the various coefficient names; * 2. Initialize the coefficient values (fixed and random) * 3. Set modelOutput fields. */ // _dinfo._adaptedFrame will contain group_column. Check and make sure clients will pass that along as well. _dinfo = new DataInfo(_train.clone(), null, 1, _parms._use_all_factor_levels, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, _parms.missingValuesHandling() == Skip, _parms.missingValuesHandling() == MeanImputation || _parms.missingValuesHandling() == PlugValues, _parms.makeImputer(), false, hasWeightCol(), hasOffsetCol(), hasFoldCol(), null); model = new HGLMModel(dest(), _parms, new HGLMModel.HGLMModelOutput(HGLM.this, _dinfo)); model.write_lock(_job); _job.update(1, "Starting to build HGLM model..."); if (EM == _parms._method) fitEM(model, _job, scTrain, scValid); model._output.setModelOutputFields(_state); // must be called before calling scoring scoreAndUpdateModel(model, true, scTrain); model._output._model_summary = generateSummary(model._output); model._output._start_time = _startTime; model._output._training_time_ms = System.currentTimeMillis() - _startTime; model._output._scoring_history = scTrain.to2dTable(); if (valid() != null) { scoreAndUpdateModel(model, false, scValid); if (scValid._scoringIters.size() > 0) model._output._scoring_history_valid = scValid.to2dTable(); } } finally { model.update(_job); model.unlock(_job); } } private TwoDimTable generateSummary(HGLMModel.HGLMModelOutput modelOutput) { String[] names = new String[]{"number_of_iterations", "loglikelihood", "noise_variance"}; String[] types = new String[]{"int", "double", "double"}; String[] formats = new String[]{"%d", "%.5f", "%.5f"}; TwoDimTable summary = new TwoDimTable("HGLM Model", "summary", new String[]{""}, names, types, formats, ""); summary.set(0, 0, modelOutput._iterations); summary.set(0, 1, modelOutput._log_likelihood); summary.set(0, 2, modelOutput._tau_e_var); return summary; } private long timeSinceLastScoring(long startTime) { return System.currentTimeMillis() - startTime; } private void scoreAndUpdateModel(HGLMModel model, boolean forTraining, ScoringHistory sc) { Log.info("Scoring after " + timeSinceLastScoring(_startTime) + "ms at iteration "+model._output._iterations); long tcurrent = System.currentTimeMillis(); if (forTraining) { model.score(_parms.train(), null, CFuncRef.from(_parms._custom_metric_func)).delete(); ModelMetricsRegressionHGLM mtrain = (ModelMetricsRegressionHGLM) ModelMetrics.getFromDKV(model, _parms.train()); model._output._training_metrics = mtrain; model._output._training_time_ms = tcurrent - model._output._start_time; if (null != mtrain) { model._output._log_likelihood = mtrain._log_likelihood; model._output._icc = mtrain._icc.clone(); sc.addIterationScore(_state._iter, model._output._log_likelihood, mtrain._var_residual); } } else { Log.info("Scoring on validation dataset."); model.score(_parms.valid(), null, CFuncRef.from(_parms._custom_metric_func)).delete(); ModelMetricsRegressionHGLM mvalid = (ModelMetricsRegressionHGLM) ModelMetrics.getFromDKV(model, _parms.valid()); if (null != mvalid) { model._output._validation_metrics = mvalid; model._output._log_likelihood_valid = ((ModelMetricsRegressionHGLM) model._output._validation_metrics).llg(); sc.addIterationScore(_state._iter, model._output._log_likelihood_valid, model._output._tau_e_var); } } } /** * Build HGLM model using EM (Expectation Maximization) described in section II of the doc. */ void fitEM(HGLMModel model, Job job, ScoringHistory scTrain, ScoringHistory scValid) { int iteration = 0; // form fixed arrays and matrices whose values do not change HGLMTask.ComputationEngineTask engineTask = new HGLMTask.ComputationEngineTask(job, _parms, _dinfo); engineTask.doAll(_dinfo._adaptedFrame); model._output.setModelOutput(engineTask); if (_parms._showFixedMatVecs) model._output.setModelOutputFixMatVec(engineTask); _state = new ComputationStateHGLM(_job, _parms, _dinfo, engineTask, iteration); try { if (_parms._max_iterations > 0) { // grab current value of fixed beta, tauEVar, tauUVar double[] beta = _state.getBeta().clone(); double[][] ubeta; double tauEVarE10 = _state.getTauEVarE10(); double[][] tMat = copy2DArray(_state.getT()); double[][][] cjInv; double[][] tMatInv; while (true) { iteration++; // E step: estimate the random beta (random effect coefficient, need to grab Cj (inverse) tMatInv = generateTInverse(tMat); cjInv = generateCJInverse(engineTask._ArjTArj, tauEVarE10, tMatInv); // for each level 2 value ubeta = estimateNewRandomEffects(cjInv, engineTask._ArjTYj, engineTask._ArjTAfj, beta);// new random coefficients // M step beta = estimateFixedCoeff(engineTask._AfTAftInv, engineTask._AfjTYjSum, engineTask._AfjTArj, ubeta);// new fixed coeficients tMat = estimateNewtMat(ubeta, tauEVarE10, cjInv, engineTask._oneOverJ); // provide better estimate of tauEVar HGLMTask.ResidualLLHTask rLlhE10 = new HGLMTask.ResidualLLHTask(_job, _parms, _dinfo, ubeta, beta, engineTask); rLlhE10.doAll(_dinfo._adaptedFrame); tauEVarE10 = rLlhE10._residualSquare * engineTask._oneOverN; // from equation 10 of the doc // check to make sure determinant of V is positive, see section II.V of the doc if (!checkPositiveG(engineTask._numLevel2Units, tMat)) Log.info("HGLM model building is stopped due to matrix G in section II.V of the doc is no longer PSD"); // check if stopping conditions are satisfied if (!progress(beta, ubeta, tMat, tauEVarE10, scTrain, scValid, model, rLlhE10)) return; } } } catch(Exception ex) { // will catch matrix singular during loglikelihood calculation if (iteration > 1) // some coefficients are valid, just return return; else throw new RuntimeException(ex); // bad matrix from the start, no model is built. } } public boolean progress(double[] beta, double[][] ubeta, double[][] tmat, double tauEVarE10, ScoringHistory scTrain, ScoringHistory scValid, HGLMModel model, HGLMTask.ResidualLLHTask rLlh) { _state._iter++; if (_state._iter >= _parms._max_iterations || stop_requested()) return false; double[] betaDiff = new double[beta.length]; minus(betaDiff, beta, _state.getBeta()); double maxBetaDiff = maxMag(betaDiff) / maxMag(beta); double[][] tmatDiff = new double[tmat.length][tmat[0].length]; minus(tmatDiff, tmat, _state.getT()); double maxTmatDiff = maxMag(tmatDiff) / maxMag(tmat); double[][] ubetaDiff = new double[ubeta.length][ubeta[0].length]; minus(ubetaDiff, ubeta, _state.getUbeta()); double maxUBetaDiff = maxMag(ubetaDiff) / maxMag(ubeta); double tauEVarDiff = Math.abs(tauEVarE10 - _state.getTauEVarE10()) / tauEVarE10; boolean converged = ((maxBetaDiff <= _parms._em_epsilon) && (maxTmatDiff <= _parms._em_epsilon) && (maxUBetaDiff <= _parms._em_epsilon) && (tauEVarDiff <= _parms._em_epsilon)); if (!converged) { // update values in _state _state.setBeta(beta); _state.setUbeta(ubeta); _state.setT(tmat); _state.setTauEVarE10(tauEVarE10); if (_parms._score_each_iteration || ((_parms._score_iteration_interval % _state._iter) == 0)) { model._output.setModelOutputFields(_state); scoreAndUpdateModel(model, true, scTrain); // perform scoring and updating scoring history if (_parms.valid() != null) scoreAndUpdateModel(model, false, scValid); } else { // calculate log likelihood with current parameter settings double logLikelihood = calHGLMLlg(_state._nobs, tmat, tauEVarE10, model._output._arjtarj, rLlh._sse_fixed, rLlh._yMinusXTimesZ); scTrain.addIterationScore(_state._iter, logLikelihood, tauEVarE10); } } return !converged; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/hglm/HGLMModel.java
package hex.hglm; import hex.DataInfo; import hex.Model; import hex.ModelCategory; import hex.ModelMetrics; import hex.deeplearning.DeepLearningModel; import hex.glm.GLM; import hex.glm.GLMModel; import water.*; import water.fvec.Frame; import water.fvec.Vec; import water.udf.CFuncRef; import water.util.TwoDimTable; import java.io.Serializable; import java.util.Arrays; import static hex.glm.GLMModel.GLMParameters.Family.gaussian; import static hex.hglm.HGLMModel.HGLMParameters.Method.EM; import static hex.hglm.HGLMUtils.*; import static water.util.ArrayUtils.copy2DArray; public class HGLMModel extends Model<HGLMModel, HGLMModel.HGLMParameters, HGLMModel.HGLMModelOutput> { /** * the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf * I will be referring to the doc and different parts of it to explain my implementation. */ public HGLMModel(Key<HGLMModel> selfKey, HGLMParameters parms, HGLMModelOutput output) { super(selfKey, parms, output); } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { return new MetricBuilderHGLM(domain, true, true, _parms._random_intercept, _output); } @Override public String[] makeScoringNames() { return new String[]{"predict"}; } @Override protected double[] score0(double[] data, double[] preds) { throw new UnsupportedOperationException("HGLMModel.score0 should never be called"); } @Override protected PredictScoreResult predictScoreImpl(Frame fr, Frame adaptFrm, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) { String[] predictNames = makeScoringNames(); String[][] domains = new String[predictNames.length][]; boolean forTraining = _parms.train().getKey().equals(fr.getKey()); HGLMScore gs = makeScoringTask(adaptFrm, true, j, computeMetrics && !_parms._gen_syn_data); gs.doAll(predictNames.length, Vec.T_NUM, gs._dinfo._adaptedFrame); MetricBuilderHGLM mb = null; Frame rawFrame = null; if (gs._computeMetrics) { // only calculate log-likelihood, mse and other metrics if _computeMetrics mb = gs._mb; if (forTraining) { _output._yMinusXTimesZ = gs._yMinusXTimesZ; _output._yMinusFixPredSquare = mb._yMinusFixPredSquare; } else { // store for all frames other than the training frame _output._yMinusXTimesZValid = gs._yMinusXTimesZ; _output._yMinusFixPredSquareValid = mb._yMinusFixPredSquare; } rawFrame = gs.outputFrame(); } domains[0] = gs._predDomains; Frame outputFrame = gs.outputFrame(Key.make(destination_key), predictNames, domains); return new PredictScoreResult(mb, rawFrame, outputFrame); } private HGLMScore makeScoringTask(Frame adaptFrm, boolean makePredictions, Job j, boolean computeMetrics) { int responseId = adaptFrm.find(_output.responseName()); if(responseId > -1 && adaptFrm.vec(responseId).isBad()) { // remove inserted invalid response adaptFrm = new Frame(adaptFrm.names(),adaptFrm.vecs()); adaptFrm.remove(responseId); } final boolean detectedComputeMetrics = computeMetrics && (adaptFrm.vec(_output.responseName()) != null && !adaptFrm.vec(_output.responseName()).isBad()); String [] domain = _output.nclasses()<=1 ? null : (!detectedComputeMetrics ? _output._domains[_output._domains.length-1] : adaptFrm.lastVec().domain()); return new HGLMScore(j, this, _output._dinfo.scoringInfo(_output._names, adaptFrm), domain, computeMetrics, makePredictions); } public static class HGLMParameters extends Model.Parameters { public long _seed = -1; public GLMModel.GLMParameters.Family _family; public int _max_iterations = -1; public double[] _initial_fixed_effects; // initial values of fixed coefficients public Key _initial_random_effects; // frame key that contains the initial starting values of random coefficient effects public Key _initial_t_matrix; // frame key taht contains the initial starting values of T matrix public double _tau_u_var_init = 0; // initial random coefficient effects variance estimate, set by user public double _tau_e_var_init = 0; // initial random noise variance estimate, set by user public GLMModel.GLMParameters.Family _random_family = gaussian; public String[] _random_columns; // store predictors that have random components in the coefficients public Method _method; public double _em_epsilon = 1e-3; public boolean _random_intercept = true; public String _group_column; public Serializable _missing_values_handling = GLMModel.GLMParameters.MissingValuesHandling.MeanImputation; public Key<Frame> _plug_values = null; public boolean _use_all_factor_levels = false; public boolean _showFixedMatVecs = false; // internal parameter, if true, will show AfjTY, ArjTY, ArjTArj, AfjTAfj, AfjTArj public int _score_iteration_interval = 5; public boolean _score_each_iteration = false; public boolean _gen_syn_data = false; @Override public String algoName() { return "HGLM"; } @Override public String fullName() { return "Hierarchical Generalized Linear Model"; } @Override public String javaName() { return HGLMModel.class.getName(); } @Override public long progressUnits() { return 1; } public enum Method {EM}; // EM: expectation maximization public HGLMParameters() { super(); _family = gaussian; _method = EM; } public GLMModel.GLMParameters.MissingValuesHandling missingValuesHandling() { if (_missing_values_handling instanceof GLMModel.GLMParameters.MissingValuesHandling) return (GLMModel.GLMParameters.MissingValuesHandling) _missing_values_handling; assert _missing_values_handling instanceof DeepLearningModel.DeepLearningParameters.MissingValuesHandling; switch ((DeepLearningModel.DeepLearningParameters.MissingValuesHandling) _missing_values_handling) { case MeanImputation: return GLMModel.GLMParameters.MissingValuesHandling.MeanImputation; case Skip: return GLMModel.GLMParameters.MissingValuesHandling.Skip; default: throw new IllegalStateException("Unsupported missing values handling value: " + _missing_values_handling); } } public boolean imputeMissing() { return missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.MeanImputation || missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.PlugValues; } public DataInfo.Imputer makeImputer() { if (missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.PlugValues) { if (_plug_values == null || _plug_values.get() == null) { throw new IllegalStateException("Plug values frame needs to be specified when Missing Value Handling = PlugValues."); } return new GLM.PlugValuesImputer(_plug_values.get()); } else { // mean/mode imputation and skip (even skip needs an imputer right now! PUBDEV-6809) return new DataInfo.MeanImputer(); } } } public static class HGLMModelOutput extends Model.Output { public DataInfo _dinfo; final GLMModel.GLMParameters.Family _family; final GLMModel.GLMParameters.Family _random_family; public String[] _fixed_coefficient_names; // include intercept only if _parms._intercept is true public String[] _random_coefficient_names; // include intercept only if _parms._random_intercept = true public String[] _group_column_names; public long _training_time_ms; public double[] _beta; // fixed coefficients public double[][] _ubeta; // random coefficients public double[][] _tmat; // calculated with non-standardize random effects coefficients double _tauUVar; public double _tau_e_var; // test parameters public double[][] _afjtyj; public double[][] _arjtyj; public double[][][] _afjtafj; public double[][][] _arjtarj; public double[][][] _afjtarj; public double[][] _yMinusXTimesZ; // generate during training public double[][] _yMinusXTimesZValid; // store same value for frames other than training frame public int _num_fixed_coeffs; public int _num_random_coeffs; int[] _randomCatIndices; int[] _randomNumIndices; int[] _randomCatArrayStartIndices; int _predStartIndexRandom; boolean _randomSlopeToo; int[] _fixedCatIndices; int _numLevel2Units; int _level2UnitIndex; // store column index of level 2 predictor column int _predStartIndexFixed; public double[] _icc; public double _log_likelihood; public double _log_likelihood_valid; // store for frames other than training public int _iterations; public int _nobs; public int _nobs_valid; public double _yMinusFixPredSquare; public double _yMinusFixPredSquareValid; public TwoDimTable _scoring_history_valid; /** * For debugging only. Copy over the generated fixed matrices to model._output. */ public void setModelOutputFixMatVec(HGLMTask.ComputationEngineTask comp) { _afjtyj = copy2DArray(comp._AfjTYj); _arjtyj = copy2DArray(comp._ArjTYj); _afjtafj = copy3DArray(comp._AfjTAfj); _afjtarj = copy3DArray(comp._AfjTArj); _nobs = comp._nobs; } public void setModelOutput(HGLMTask.ComputationEngineTask comp) { _randomCatIndices = comp._randomCatIndices; _randomNumIndices = comp._randomNumIndices; _randomCatArrayStartIndices = comp._randomCatArrayStartIndices; _predStartIndexRandom = comp._predStartIndexRandom; _randomSlopeToo = !(comp._numRandomCoeffs == 1 && comp._parms._random_intercept); _fixedCatIndices = comp._fixedCatIndices; _predStartIndexFixed = comp._predStartIndexFixed; _arjtarj = copy3DArray(comp._ArjTArj); _log_likelihood = Double.NEGATIVE_INFINITY; } public HGLMModelOutput(HGLM b, DataInfo dinfo) { super(b, dinfo._adaptedFrame); _dinfo = dinfo; _domains = dinfo._adaptedFrame.domains(); _family = b._parms._family; _random_family = b._parms._random_family; } public void setModelOutputFields(ComputationStateHGLM state) { _fixed_coefficient_names = state.getFixedCofficientNames(); _random_coefficient_names = state.getRandomCoefficientNames(); _group_column_names = state.getGroupColumnNames(); _tauUVar = state.getTauUVar(); _tau_e_var = state.getTauEVarE10(); _tmat = state.getT(); _num_fixed_coeffs = state.getNumFixedCoeffs(); _num_random_coeffs = state.getNumRandomCoeffs(); _numLevel2Units = state.getNumLevel2Units(); _level2UnitIndex = state.getLevel2UnitIndex(); _nobs = state._nobs; _beta = state.getBeta(); _ubeta = state.getUbeta(); _num_random_coeffs = _ubeta[0].length; _iterations = state._iter; } @Override public int nclasses() { // only support Gaussian now return 1; } @Override public ModelCategory getModelCategory() { return ModelCategory.Regression; } } @Override protected Futures remove_impl(Futures fs, boolean cascade) { super.remove_impl(fs, cascade); return fs; } @Override protected AutoBuffer writeAll_impl(AutoBuffer ab) { return super.writeAll_impl(ab); } @Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { return super.readAll_impl(ab, fs); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(super.toString()); sb.append(" loglikelihood: "+this._output._log_likelihood); sb.append(" fixed effect coefficients: "+ Arrays.toString(this._output._beta)); int numLevel2 = this._output._ubeta.length; for (int index=0; index<numLevel2; index++) sb.append(" standard error of random effects for level 2 index " + index + ": "+this._output._tmat[index][index]); sb.append(" standard error of residual error: "+this._output._tau_e_var); sb.append(" ICC: "+ Arrays.toString(this._output._icc)); sb.append(" loglikelihood: "+this._output._log_likelihood); sb.append(" iterations taken to build model: " + this._output._iterations); sb.append(" coefficients for fixed effect: "+Arrays.toString(this._output._beta)); for (int index=0; index<numLevel2; index++) sb.append(" coefficients for random effect for level 2 index: "+index+": "+Arrays.toString(this._output._ubeta[index])); return sb.toString(); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/hglm/HGLMScore.java
package hex.hglm; import hex.DataInfo; import water.Job; import water.MRTask; import water.MemoryManager; import water.fvec.Chunk; import water.fvec.NewChunk; import water.util.ArrayUtils; import java.util.Arrays; import java.util.Random; import static hex.hglm.HGLMTask.ComputationEngineTask.fillInFixedRowValues; import static hex.hglm.HGLMTask.ComputationEngineTask.fillInRandomRowValues; import static water.util.ArrayUtils.innerProduct; public class HGLMScore extends MRTask<HGLMScore> { // the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf // I will be referring to the doc and different parts of it to explain my implementation. DataInfo _dinfo; double[] _beta; // non-standardized coefficients double[][] _ubeta; // non-standardized coefficients final Job _job; boolean _computeMetrics; boolean _makePredictions; final HGLMModel _model; MetricBuilderHGLM _mb; String[] _predDomains; int _nclass; HGLMModel.HGLMParameters _parms; int _level2UnitIndex; int[] _fixedCatIndices; int _numLevel2Units; int _predStartIndexFixed; int[] _randomCatIndices; int[] _randomNumIndices; int[] _randomCatArrayStartIndices; int _predStartIndexRandom; final boolean _randomSlopeToo; final boolean _randomIntercept; // true if present public double[][] _yMinusXTimesZ; // use non-normalized coefficients double[][] _tmat; Random randomObj; final double _noiseStd; public HGLMScore(final Job j, final HGLMModel model, DataInfo dinfo, final String[] respDomain, final boolean computeMetrics, final boolean makePredictions) { _job = j; _model = model; _dinfo = dinfo; _computeMetrics = computeMetrics; // can be true only if the response column is available and calcualte loglikelihood _makePredictions = makePredictions; _beta = model._output._beta; // non-standardized/non-normalized coefficients _ubeta = model._output._ubeta; // non-standardized/non-normalized coefficients _predDomains = respDomain; _nclass = model._output.nclasses(); _parms = model._parms; _level2UnitIndex = model._output._level2UnitIndex; _fixedCatIndices = model._output._fixedCatIndices; _numLevel2Units = model._output._numLevel2Units; _predStartIndexFixed = model._output._predStartIndexFixed; _randomCatIndices = model._output._randomCatIndices; _randomNumIndices = model._output._randomNumIndices; _randomCatArrayStartIndices = model._output._randomCatArrayStartIndices; _predStartIndexRandom = model._output._predStartIndexRandom; _randomSlopeToo = model._output._randomSlopeToo; _randomIntercept = _parms._random_intercept; _tmat = model._output._tmat; // generated from non-standardized random coefficients randomObj = new Random(_parms._seed); _noiseStd = Math.sqrt(_parms._tau_e_var_init); // not affected by standardization/normalization } @Override public void map(Chunk[] chks, NewChunk[] nc) { if (isCancelled() || (_job != null && _job.stop_requested())) return; float[] response = null; // store response column value if exists int numPredValues = _nclass <= 1 ? 1 : _nclass + 1; double[] predictVals = MemoryManager.malloc8d(numPredValues); double[] xji = MemoryManager.malloc8d(_model._output._beta.length); double[] zji = MemoryManager.malloc8d(_model._output._ubeta[0].length); if (_computeMetrics) { _mb = (MetricBuilderHGLM) _model.makeMetricBuilder(_predDomains); response = new float[1]; _yMinusXTimesZ = new double[_numLevel2Units][zji.length]; } DataInfo.Row r = _dinfo.newDenseRow(); if (_computeMetrics && (r.response == null || r.response.length == 0)) throw new IllegalArgumentException("computeMetrics can only be set to true if the response column exists in" + " dataset passed to prediction function."); int chkLen = chks[0].len(); int level2Index; for (int rid = 0; rid < chkLen; rid++) { _dinfo.extractDenseRow(chks, rid, r); level2Index = _parms._use_all_factor_levels ? r.binIds[_level2UnitIndex] - _dinfo._catOffsets[_level2UnitIndex] : (int) chks[_level2UnitIndex].at8(rid); processRow(r, predictVals, nc, numPredValues, xji, zji, level2Index); if (_computeMetrics && !r.response_bad) { // calculate metrics response[0] = (float) r.response[0]; _mb.perRow(predictVals, response, r.weight, r.offset, xji, zji, _yMinusXTimesZ, level2Index, _model); } } } @Override public void reduce(HGLMScore other) { if (_mb != null) _mb.reduce(other._mb); if (_computeMetrics) ArrayUtils.add(_yMinusXTimesZ, other._yMinusXTimesZ); } private void processRow(DataInfo.Row r, double[] ps, NewChunk[] preds, int numPredCols, double[] xji, double[] zji, int level2Index) { if (r.predictors_bad) { Arrays.fill(ps, Double.NaN); return; } else if (r.weight == 0) { Arrays.fill(ps, 0.0); return; } ps = scoreRow(r, ps, xji, zji, level2Index); // weight is not zero and response is valid if (_makePredictions) for (int predCol = 0; predCol < numPredCols; predCol++) { // write prediction to NewChunk preds[predCol].addNum(ps[predCol]); } } /** * only processing gaussian for now. */ public double[] scoreRow(DataInfo.Row r, double[] preds, double[] xji, double[] zji, int level2Index) { fillInFixedRowValues(r, xji, _parms, _fixedCatIndices, _level2UnitIndex, _numLevel2Units, _predStartIndexFixed, _dinfo); fillInRandomRowValues(r, zji, _parms, _randomCatIndices, _randomNumIndices, _randomCatArrayStartIndices, _predStartIndexRandom, _dinfo, _randomSlopeToo, _randomIntercept); preds[0] = innerProduct(xji, _beta) + innerProduct(zji, _ubeta[level2Index]) + r.offset; preds[0] = _parms._gen_syn_data ? preds[0]+randomObj.nextGaussian()*_noiseStd : preds[0]; return preds; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/hglm/HGLMTask.java
package hex.hglm; import Jama.Matrix; import hex.DataInfo; import water.Job; import water.MRTask; import water.MemoryManager; import water.fvec.Chunk; import water.util.ArrayUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static hex.hglm.HGLMUtils.fillZTTimesZ; import static water.util.ArrayUtils.*; public abstract class HGLMTask { // the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf // I will be referring to the doc and different parts of it to explain my implementation. /*** * This class will calculate the residual Yj-Afj*beta-Arj*ubetaj for level 2 unit j. It implements step 2 of * section II.VIII of the doc. */ public static class ResidualLLHTask extends MRTask<ResidualLLHTask> { final public double[][] _ubeta; final public double[] _beta; // new fixed coefficients calculated final HGLMModel.HGLMParameters _parms; final DataInfo _dinfo; double _residualSquare; double[] _residualSquareLevel2; final int[] _fixedCatIndices; final int _level2UnitIndex; final int _numLevel2Units; final int _predStartIndexFixed; final int[] _randomCatIndices; final int[] _randomNumIndices; final int[] _randomCatArrayStartIndices; final int _predStartIndexRandom; final int _numFixedCoeffs; final int _numRandomCoeffs; double[][] _yMinusXTimesZ; // standarized if parms._standardize=true and vice versa double _sse_fixed; Job _job; final boolean _randomSlopeToo; public ResidualLLHTask(Job job, HGLMModel.HGLMParameters parms, DataInfo dataInfo, double[][] ubeta, double[] beta, ComputationEngineTask computeEngine) { _parms = parms; _dinfo = dataInfo; _ubeta = ubeta; _beta = beta; _job = job; _fixedCatIndices = computeEngine._fixedCatIndices; _level2UnitIndex = computeEngine._level2UnitIndex; _numLevel2Units = computeEngine._numLevel2Units; _predStartIndexFixed = computeEngine._predStartIndexFixed; _randomCatIndices = computeEngine._randomCatIndices; _randomNumIndices = computeEngine._randomNumIndices; _randomCatArrayStartIndices = computeEngine._randomCatArrayStartIndices; _predStartIndexRandom = computeEngine._predStartIndexRandom; _numFixedCoeffs = computeEngine._numFixedCoeffs; _numRandomCoeffs = computeEngine._numRandomCoeffs; _randomSlopeToo = _parms._random_columns != null && _parms._random_columns.length > 0; } @Override public void map(Chunk[] chks) { if(_job != null && _job.stop_requested()) return; _residualSquare = 0.0; _residualSquareLevel2 = new double[_numLevel2Units]; double[] xji = MemoryManager.malloc8d(_numFixedCoeffs); double[] zji = MemoryManager.malloc8d(_numRandomCoeffs); int chkLen = chks[0].len(); _yMinusXTimesZ = new double[_numLevel2Units][_numRandomCoeffs]; int level2Index; double residual, y, residualSquare; double residualFixed; DataInfo.Row r = _dinfo.newDenseRow(); for (int rowInd = 0; rowInd < chkLen; rowInd++) { _dinfo.extractDenseRow(chks, rowInd, r); if (!r.isBad() && !(r.weight == 0)) { y = r.response(0); level2Index = _parms._use_all_factor_levels ? r.binIds[_level2UnitIndex] - _dinfo._catOffsets[_level2UnitIndex] : (int) chks[_level2UnitIndex].at8(rowInd); ComputationEngineTask.fillInFixedRowValues(r, xji, _parms, _fixedCatIndices, _level2UnitIndex, _numLevel2Units, _predStartIndexFixed, _dinfo); // read in predictors for fixed coefficient effects ComputationEngineTask.fillInRandomRowValues(r, zji, _parms, _randomCatIndices, _randomNumIndices, _randomCatArrayStartIndices, _predStartIndexRandom, _dinfo, _randomSlopeToo, _parms._random_intercept); // read in random coefficient effects residualFixed = y - innerProduct(xji, _beta) - r.offset; _sse_fixed += residualFixed * residualFixed; residual = residualFixed - innerProduct(zji, _ubeta[level2Index]); residualSquare = residual*residual; _residualSquare += residualSquare; _residualSquareLevel2[level2Index] += residualSquare; add(_yMinusXTimesZ[level2Index], mult(zji, residualFixed)); } } } @Override public void reduce(ResidualLLHTask otherTask) { add(_residualSquareLevel2, otherTask._residualSquareLevel2); _residualSquare += otherTask._residualSquare; add(_yMinusXTimesZ, otherTask._yMinusXTimesZ); _sse_fixed += otherTask._sse_fixed; } } /*** * This class will pre-calculate arrays (double[]) or matrices (double[][]) that will be used in later calculations * that are part of the CDSS described in equation 11 of the doc. * */ public static class ComputationEngineTask extends MRTask<ComputationEngineTask> { double _YjTYjSum; // calculate sum of transpose(Yj)*Yj across all level 2 units public double[][] _AfjTYj; // calculate transpose(Afj)*Yj for each level 2 unit, Y public double[][] _ArjTYj; public double[][][] _AfjTAfj; // equivalent to transpose(Xj)*Xj for each j public double[][][] _ArjTArj; // equivalent to tranpose(Zj)*Zj for each j public double[][][] _AfjTArj; public double[][][] _ArjTAfj; public double[][] _AfTAftInv; public double[] _AfTAftInvAfjTYj; // vectors are represented in row array. Need to transpose it if used as Matrix public double[] _AfjTYjSum; double _oneOverJ; double _oneOverN; int _numFixedCoeffs; int _numRandomCoeffs; String[] _fixedCoeffNames; String[] _randomCoeffNames; String[] _level2UnitNames; int _numLevel2Units; final HGLMModel.HGLMParameters _parms; int _nobs; double _weightedSum; final DataInfo _dinfo; int _level2UnitIndex; int[] _randomPredXInterceptIndices; int[] _randomCatIndices; int[] _randomNumIndices; int[] _randomCatArrayStartIndices; // starting index of random cat predictors int[] _fixedPredXInterceptIndices; int[] _fixedCatIndices; int[] _fixedNumIndices; String[] _fixedPredNames; String[] _randomPredNames; int _predStartIndexFixed; int _predStartIndexRandom; Job _job; final boolean _randomSlopeToo; double[][] _zTTimesZ; public ComputationEngineTask(Job job, HGLMModel.HGLMParameters parms, DataInfo dinfo) { _parms = parms; _dinfo = dinfo; _job = job; _randomSlopeToo = _parms._random_columns != null && _parms._random_columns.length > 0; extractNamesNIndices(); } void setPredXInterceptIndices(List<String> predictorNames) { boolean randomColsExist = _parms._random_columns != null; _randomPredXInterceptIndices = randomColsExist ? new int[_parms._random_columns.length] : null; List<String> fixedPredNames = new ArrayList<>(); List<String> randomPredNames = new ArrayList<>(); List<Integer> randomCatPredList = new ArrayList<>(); List<Integer> randomNumPredList = new ArrayList<>(); _fixedPredXInterceptIndices = new int[predictorNames.size() - 1]; List<Integer> fixedCatPredList = new ArrayList<>(); List<Integer> fixedNumPredList = new ArrayList<>(); if (randomColsExist) { for (int index = 0; index < _randomPredXInterceptIndices.length; index++) { _randomPredXInterceptIndices[index] = predictorNames.indexOf(_parms._random_columns[index]); if (_randomPredXInterceptIndices[index] < _dinfo._cats) randomCatPredList.add(_randomPredXInterceptIndices[index]); else randomNumPredList.add(_randomPredXInterceptIndices[index]); randomPredNames.add(predictorNames.get(_randomPredXInterceptIndices[index])); } } if (randomCatPredList.size() > 0) { _randomCatIndices = randomCatPredList.stream().mapToInt(x -> x).toArray(); Arrays.sort(_randomCatIndices); List<Integer> randomCatLevels = Arrays.stream(_randomCatIndices).map(x -> _dinfo._adaptedFrame.vec(x).domain().length).boxed().collect(Collectors.toList()); randomCatLevels.add(0, _parms._use_all_factor_levels ? 0 : 1); int[] randomCatArrayStartIndices = randomCatLevels.stream().map(x -> _parms._use_all_factor_levels ? x : (x - 1)).mapToInt(x -> x).toArray(); _randomCatArrayStartIndices = ArrayUtils.cumsum(randomCatArrayStartIndices); } if (randomNumPredList.size() > 0) { _randomNumIndices = randomNumPredList.stream().mapToInt(x -> x).toArray(); Arrays.sort(_randomNumIndices); } for (int index = 0; index < _fixedPredXInterceptIndices.length; index++) { String predName = predictorNames.get(index); if (!predName.equals(_parms._group_column)) { if (index < _dinfo._cats) fixedCatPredList.add(index); else fixedNumPredList.add(index); fixedPredNames.add(predName); } } if (fixedCatPredList.size() > 0) { _fixedCatIndices = fixedCatPredList.stream().mapToInt(x -> x).toArray(); Arrays.sort(_fixedCatIndices); } if (fixedNumPredList.size() > 0) { _fixedNumIndices = fixedNumPredList.stream().mapToInt(x -> x).toArray(); Arrays.sort(_fixedNumIndices); } _fixedPredNames = fixedPredNames.stream().toArray(String[]::new); _randomPredNames = randomPredNames.stream().toArray(String[]::new); _predStartIndexFixed = fixedCatPredList.size() == 0 ? 0 : (_parms._use_all_factor_levels ? Arrays.stream(_fixedCatIndices).map(x -> _dinfo._adaptedFrame.vec(x).domain().length).sum() : Arrays.stream(_fixedCatIndices).map(x -> (_dinfo._adaptedFrame.vec(x).domain().length - 1)).sum()); _predStartIndexRandom = randomCatPredList.size() == 0 ? 0 : (_parms._use_all_factor_levels ? Arrays.stream(_randomCatIndices).map(x -> _dinfo._adaptedFrame.vec(x).domain().length).sum() : Arrays.stream(_randomCatIndices).map(x -> (_dinfo._adaptedFrame.vec(x).domain().length - 1)).sum()); } void extractNamesNIndices() { List<String> predictorNames = Arrays.stream(_dinfo._adaptedFrame.names()).collect(Collectors.toList()); _level2UnitIndex = predictorNames.indexOf(_parms._group_column); // assign coefficient names for fixed, random and group column List<String> allCoeffNames = Arrays.stream(_dinfo.coefNames()).collect(Collectors.toList()); String groupCoeffStarts = _parms._group_column + "."; _level2UnitNames = Arrays.stream(_dinfo._adaptedFrame.vec(_level2UnitIndex).domain()).map(x -> groupCoeffStarts + x).toArray(String[]::new); List<String> groupCoeffNames = Arrays.stream(_level2UnitNames).collect(Collectors.toList()); // fixed Coefficients are all coefficient names excluding group_column List<String> fixedCoeffNames = allCoeffNames.stream().filter(x -> !groupCoeffNames.contains(x)).collect(Collectors.toList()); fixedCoeffNames.add("intercept"); _fixedCoeffNames = fixedCoeffNames.stream().toArray(String[]::new); List<String> randomPredictorNames = new ArrayList<>(); if (_randomSlopeToo) { // random coefficients names int[] randomColumnsIndicesSorted = Arrays.stream(_parms._random_columns).mapToInt(x -> predictorNames.indexOf(x)).toArray(); Arrays.sort(randomColumnsIndicesSorted); _parms._random_columns = Arrays.stream(randomColumnsIndicesSorted).mapToObj(x -> predictorNames.get(x)).toArray(String[]::new); for (String coefName : _parms._random_columns) { String startCoef = coefName + "."; randomPredictorNames.addAll(allCoeffNames.stream().filter(x -> x.startsWith(startCoef) || x.equals(coefName)).collect(Collectors.toList())); } } if (_parms._random_intercept) randomPredictorNames.add("intercept"); _randomCoeffNames = randomPredictorNames.stream().toArray(String[]::new); _numLevel2Units = _level2UnitNames.length; _numFixedCoeffs = _fixedCoeffNames.length; _numRandomCoeffs = _randomCoeffNames.length; setPredXInterceptIndices(predictorNames); } @Override public void map(Chunk[] chks) { if(_job != null && _job.stop_requested()) return; initializeArraysVar(); double y; double[] xji = MemoryManager.malloc8d(_numFixedCoeffs); double[] zji = MemoryManager.malloc8d(_numRandomCoeffs); int level2Index; int chkLen = chks[0].len(); DataInfo.Row r = _dinfo.newDenseRow(); for (int rowInd = 0; rowInd < chkLen; rowInd++) { _dinfo.extractDenseRow(chks, rowInd, r); if (!r.isBad() && !(r.weight == 0)) { y = r.response(0); _YjTYjSum += y * y; _nobs++; _weightedSum += r.weight; level2Index = _parms._use_all_factor_levels ? r.binIds[_level2UnitIndex] - _dinfo._catOffsets[_level2UnitIndex] : (int) chks[_level2UnitIndex].at8(rowInd); fillInFixedRowValues(r, xji, _parms, _fixedCatIndices, _level2UnitIndex, _numLevel2Units, _predStartIndexFixed, _dinfo); // read in predictors for fixed coefficient effects fillInRandomRowValues(r, zji, _parms, _randomCatIndices, _randomNumIndices, _randomCatArrayStartIndices, _predStartIndexRandom, _dinfo, _randomSlopeToo, _parms._random_intercept); // read in random coefficient effects formFixedMatricesVectors(level2Index, xji, y, _AfjTYj, _AfjTAfj); // form _AfjTYj, _AfjTAfj formFixedMatricesVectors(level2Index, zji, y, _ArjTYj, _ArjTArj); // form ArjTYj, _ArjTArj outerProductCum(_AfjTArj[level2Index], xji, zji); // form AfjTArj } } } /** * It does two things: * a. form output product of one row of data set (matMat[level2Ind]) * b. form product of one row of data and response y. */ void formFixedMatricesVectors(int level2Ind, double[] xji, double y, double[][] matVec, double[][][] matMat) { outputProductSymCum(matMat[level2Ind], xji); multCum(xji, matVec[level2Ind], y); } static void fillInRandomRowValues(DataInfo.Row r, double[] zji, HGLMModel.HGLMParameters parms, int[] randomCatIndices, int[] randomNumIndices, int[] randomCatArrayStartIndices, int predStartIndexRandom, DataInfo dinfo, boolean randomSlopeToo, boolean randomIntercept) { // read in predictors for random coefficient effects Arrays.fill(zji, 0.0); int catPredInd; int startEnumInd = 0; int catVal; if (randomSlopeToo) { if (randomCatIndices != null) { for (int catInd = 0; catInd < randomCatIndices.length; catInd++) { catPredInd = randomCatIndices[catInd]; catVal = r.binIds[catPredInd]; if (!parms._use_all_factor_levels) { RowInfo rowInfo = grabCatIndexVal(r, startEnumInd, catPredInd, dinfo); catVal = rowInfo._catVal; startEnumInd = rowInfo._rowEnumInd; } if (catVal >= 0) zji[catVal - dinfo._catOffsets[catPredInd] + randomCatArrayStartIndices[catInd]] = 1; } } if (randomNumIndices != null) for (int numInd = 0; numInd < randomNumIndices.length; numInd++) zji[numInd + predStartIndexRandom] = r.numVals[randomNumIndices[numInd] - dinfo._cats]; } if (randomIntercept) zji[zji.length - 1] = 1.0; } public static void fillInFixedRowValues(DataInfo.Row r, double[] xji, HGLMModel.HGLMParameters parms, int[] fixedCatIndices, int level2UnitIndex, int numLevel2Units, int predStartIndexFixed, DataInfo dinfo) { // read in predictors for fixed coefficient effects Arrays.fill(xji, 0.0); int startEnumInd = 0; int catPredInd; int catVal; if (r.nBins > 1) { // will always have at least one enum column for (int catInd = 0; catInd < fixedCatIndices.length; catInd++) { catPredInd = fixedCatIndices[catInd]; catVal = r.binIds[catPredInd]; if (!parms._use_all_factor_levels) { RowInfo rowInfo = grabCatIndexVal(r, startEnumInd, catPredInd, dinfo); catVal = rowInfo._catVal; startEnumInd = rowInfo._rowEnumInd; } if (catVal > -1) { if (catPredInd < level2UnitIndex) { xji[catVal] = 1; } else if (catPredInd > level2UnitIndex) { xji[catVal - (parms._use_all_factor_levels ? numLevel2Units : (numLevel2Units - 1))] = 1; } } } } for (int numInd = 0; numInd < r.nNums; numInd++) { xji[numInd + predStartIndexFixed] = r.numVals[numInd]; } xji[xji.length - 1] = 1.0; // for intercept } public static RowInfo grabCatIndexVal(DataInfo.Row r, int startEnumInd, int enumIndexOfInterest, DataInfo dinfo) { int startInd = startEnumInd; for (int index = startEnumInd; index < r.nBins; index++) { if (dinfo._catOffsets[enumIndexOfInterest] <= r.binIds[index] && r.binIds[index] < dinfo._catOffsets[enumIndexOfInterest + 1]) return new RowInfo(index, r.binIds[index]); if (r.binIds[index] >= dinfo._catOffsets[enumIndexOfInterest + 1]) return new RowInfo(index, -1); startInd = index; } return new RowInfo(startInd, -1); } static class RowInfo { int _rowEnumInd; int _catVal; public RowInfo(int rowEnumInd, int catVal) { _rowEnumInd = rowEnumInd; _catVal = catVal; } } void initializeArraysVar() { _YjTYjSum = 0; _nobs = 0; _weightedSum = 0.0; _AfjTYj = MemoryManager.malloc8d(_numLevel2Units, _numFixedCoeffs); _ArjTYj = MemoryManager.malloc8d(_numLevel2Units, _numRandomCoeffs); _AfjTAfj = MemoryManager.malloc8d(_numLevel2Units, _numFixedCoeffs, _numFixedCoeffs); _ArjTArj = MemoryManager.malloc8d(_numLevel2Units, _numRandomCoeffs, _numRandomCoeffs); _AfjTArj = MemoryManager.malloc8d(_numLevel2Units, _numFixedCoeffs, _numRandomCoeffs); } @Override public void reduce(ComputationEngineTask otherTask) { _YjTYjSum += otherTask._YjTYjSum; _nobs += otherTask._nobs; _weightedSum += otherTask._weightedSum; add(_AfjTYj, otherTask._AfjTYj); add(_ArjTYj, otherTask._ArjTYj); add(_AfjTAfj, otherTask._AfjTAfj); add(_ArjTArj, otherTask._ArjTArj); add(_AfjTArj, otherTask._AfjTArj); } @Override public void postGlobal() { _ArjTAfj = new double[_numLevel2Units][][]; _AfjTYjSum = MemoryManager.malloc8d(_numFixedCoeffs); _AfTAftInvAfjTYj = MemoryManager.malloc8d(_numFixedCoeffs); _oneOverJ = 1.0 / _numLevel2Units; _oneOverN = 1.0 / _nobs; double[][] sumAfjAfj = MemoryManager.malloc8d(_numFixedCoeffs, _numFixedCoeffs); sumAfjAfjAfjTYj(_AfjTAfj, _AfjTYj, sumAfjAfj, _AfjTYjSum); for (int index = 0; index < _numLevel2Units; index++) _ArjTAfj[index] = new Matrix(_AfjTArj[index]).transpose().getArray(); _zTTimesZ = fillZTTimesZ(_ArjTArj); if (_parms._max_iterations > 0) { // only proceed if max_iterations is not zero _AfTAftInv = (new Matrix(sumAfjAfj)).inverse().getArray(); matrixVectorMult(_AfTAftInvAfjTYj, _AfTAftInv, _AfjTYjSum); } } public static void sumAfjAfjAfjTYj(double[][][] afjTAfj, double[][] afjTYj, double[][] sumAfjAfj, double[] sumAfjTYj) { int numLevel2 = afjTAfj.length; for (int index=0; index<numLevel2; index++) { add(sumAfjAfj, afjTAfj[index]); add(sumAfjTYj, afjTYj[index]); } } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/hglm/HGLMUtils.java
package hex.hglm; import Jama.Matrix; import water.DKV; import water.Key; import water.fvec.Frame; import water.util.ArrayUtils; import java.util.Arrays; import static water.util.ArrayUtils.*; public class HGLMUtils { // the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf // I will be referring to the doc and different parts of it to explain my implementation. public static double[][][] copy3DArray(double[][][] src) { int firstInd = src.length; double[][][] dest = new double[firstInd][][]; for (int index=0; index<firstInd; index++) dest[index] = copy2DArray(src[index]); return dest; } public static void grabInitValuesFromFrame(Key frameKey, double[][] ubeta) { int numRow = ubeta.length; int numCol = ubeta[0].length; Frame randomEffects = DKV.getGet(frameKey); if (randomEffects.numRows() != numRow || randomEffects.numCols() != numCol) throw new IllegalArgumentException("initial_random_effects: Initial random coefficients must be" + " a double[][] array of size "+numRow+" rows and "+numCol+" columns" + " but is not."); final ArrayUtils.FrameToArray f2a = new ArrayUtils.FrameToArray(0, numCol-1, numRow, ubeta); f2a.doAll(randomEffects).getArray(); } public static void setDiagValues(double[][] tMat, double tauUVar) { int matSize = tMat.length; for (int index=0; index<matSize; index++) { assert matSize == tMat[index].length; tMat[index][index] = tauUVar; } } public static boolean equal2DArrays(double[][] arr1, double[][] arr2, double threshold) { int dim1 = arr1.length; int dim2; assert dim1 == arr2.length : "arrays first dimension are different."; for (int ind1 = 0; ind1 < dim1; ind1++) { dim2 = arr1[ind1].length; assert dim2 == arr2[ind1].length : "arrays second dimension are different."; for (int ind2 = 0; ind2 < dim2; ind2++) if (Math.abs(arr1[ind1][ind2]-arr2[ind1][ind2]) > threshold) return false; } return true; } public static double[][] generateTInverse(double[][] tMat) { Matrix tMatrix = new Matrix(tMat); return tMatrix.inverse().getArray(); } public static double[][][] generateCJInverse(double[][][] arjTArj, double tauEVar, double[][] tMatInv) { int numLevel2Unit = arjTArj.length; double[][][] cJInverse = new double[numLevel2Unit][][]; int arjTArjSize = arjTArj[0].length; double[][] tempResult = new double[arjTArjSize][arjTArjSize]; double[][] sigmaTimestMatInv = new double[arjTArjSize][arjTArjSize]; mult(tMatInv, sigmaTimestMatInv, tauEVar); for (int index = 0; index < numLevel2Unit; index++) { add(tempResult, arjTArj[index], sigmaTimestMatInv); cJInverse[index] = new Matrix(tempResult).inverse().getArray(); } return cJInverse; } /** * Note that the term ArjTYj and ArjTAfj are fixed and won't change. They are stored in engineTask */ public static double[][] estimateNewRandomEffects(double[][][] cjInv, double[][] ArjTYj, double[][][] ArjTAfj, double[] beta) { int numLevel2Unit = cjInv.length; int numRandCoef = cjInv[0].length; double[][] ubeta = new double[numLevel2Unit][numRandCoef]; double[] arjTafjbeta = new double[numRandCoef]; double[] result = new double[numRandCoef]; for (int index=0; index < numLevel2Unit; index++) { matrixVectorMult(arjTafjbeta, ArjTAfj[index], beta); // ArjTAfj*betaUtil minus(result, ArjTYj[index], arjTafjbeta); // (ArjTYj-ArjTAfj*beta) matrixVectorMult(ubeta[index], cjInv[index], result); Arrays.fill(arjTafjbeta, 0.0); } return ubeta; } public static double[] estimateFixedCoeff(double[][] AfjTAfjSumInv, double[] AfjTYjSum, double[][][] AfjTArj, double[][] ubeta) { int numLevel2 = ubeta.length; int numFixedCoeffs = AfjTAfjSumInv.length; double[] betaFixed = new double[numFixedCoeffs]; double[] AfjTArjTimesBrj = new double[numFixedCoeffs]; for (int index=0; index<numLevel2; index++) { matrixVectorMult(AfjTArjTimesBrj, AfjTArj[index], ubeta[index]); } minus(AfjTArjTimesBrj, AfjTYjSum, AfjTArjTimesBrj); matrixVectorMult(betaFixed, AfjTAfjSumInv, AfjTArjTimesBrj); return betaFixed; } public static double[][] estimateNewtMat(double[][] ubeta, double tauEVar, double[][][] cJInv, double oneOverNumLevel2) { int numLevel2 = ubeta.length; int numRandCoef = ubeta[0].length; double[][] tmat = new double[numRandCoef][numRandCoef]; double[][] tempCInvj = new double[numRandCoef][numRandCoef]; for (int index=0; index<numLevel2; index++) { outputProductSymCum(tmat, ubeta[index]); add(tempCInvj, cJInv[index]); } mult(tempCInvj, tauEVar); add(tmat, tempCInvj); mult(tmat, oneOverNumLevel2); return tmat; } public static double calTauEvarEq17(double residualSquare, double tauEVar, double[][][] cjInv, double[][][] arjTArj, double oneOverN) { int numLevel2 = cjInv.length; int numRandCoef = arjTArj[0].length; double[][] cInvArjTArj = new double[numRandCoef][numRandCoef]; for (int index=0; index<numLevel2; index++) matrixMult(cInvArjTArj, cjInv[index], arjTArj[index]); double sigmaTrace = tauEVar * trace(cInvArjTArj) ; return (residualSquare + sigmaTrace)*oneOverN; } public static double[][] fillZTTimesZ(double[][][] arjTArj) { int numLevel2 = arjTArj.length; int numRandCoef = arjTArj[0].length; int zSize = numLevel2 * numRandCoef; double[][] zTTimesZ = new double[zSize][zSize]; int startRowIndex; for (int leveIndex=0; leveIndex<numLevel2; leveIndex++) { startRowIndex = leveIndex*numRandCoef; for (int rInd=0; rInd<numRandCoef; rInd++) { System.arraycopy(arjTArj[leveIndex][rInd], 0, zTTimesZ[startRowIndex+rInd], startRowIndex, numRandCoef); } } return zTTimesZ; } public static boolean checkPositiveG(int numLevel2Units, double[][] tMat) { double[][] gMat = expandMat(tMat, numLevel2Units); return (new Matrix(gMat).det()) >= 0; } public static double[][] generateNewTmat(double[][] ubeta) { int numIndex2 = ubeta.length; double oneOverJ = 1.0/numIndex2; int numRandCoeff = ubeta[0].length; double[][] newTmat = new double[numRandCoeff][numRandCoeff]; for (int index=0; index<numIndex2; index++) { outerProductCum(newTmat, ubeta[index], ubeta[index]); } mult(newTmat, oneOverJ); return newTmat; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/hglm/MetricBuilderHGLM.java
package hex.hglm; import Jama.Matrix; import hex.*; import water.fvec.Frame; import water.util.ArrayUtils; import java.util.Arrays; import java.util.List; import static hex.glm.GLMModel.GLMParameters.MissingValuesHandling.*; public class MetricBuilderHGLM extends ModelMetricsSupervised.MetricBuilderSupervised<MetricBuilderHGLM> { // the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf // I will be referring to the doc and different parts of it to explain my implementation. public static final double LOG_2PI = Math.log(2*Math.PI); ModelMetrics.MetricBuilder _metricBuilder; // point to generic model metric classes final boolean _intercept; final boolean _random_intercept; final boolean _computeMetrics; public double[] _beta; public double[][] _ubeta; public double[][] _tmat; public double _yMinusFixPredSquare; public double _sse; public int _nobs; public MetricBuilderHGLM(String[] domain, boolean computeMetrics, boolean intercept, boolean random_intercept, HGLMModel.HGLMModelOutput output) { super(domain == null ? 0 : domain.length, domain); _intercept = intercept; _computeMetrics = computeMetrics; _random_intercept = random_intercept; _metricBuilder = new ModelMetricsRegression.MetricBuilderRegression(); // everything else goes back regression _beta = output._beta; _ubeta = output._ubeta; _tmat = output._tmat; } public double[] perRow(double[] ds, float[] yact, double weight, double offset, double[] xji, double[] zji, double[][] yMinusXTimesZ, int level2Index, Model m) { if (weight == 0) return ds; _metricBuilder.perRow(ds, yact, weight, offset, m); add2(yact[0], ds[0], weight, xji, zji, yMinusXTimesZ, level2Index, offset); return ds; } private void add2(double yresp, double predictedVal, double weight, double[] input, double[] randomInput, double[][] yMinusXTimesZ, int level2Index, double offset) { double temp = yresp- ArrayUtils.innerProduct(_beta, input)-offset; _yMinusFixPredSquare += temp*temp; ArrayUtils.add(yMinusXTimesZ[level2Index], ArrayUtils.mult(randomInput, temp)); _nobs++; temp = yresp-predictedVal; _sse += temp*temp; } @Override public void reduce(MetricBuilderHGLM other) { _metricBuilder.reduce(other._metricBuilder); _yMinusFixPredSquare += other._yMinusFixPredSquare; _sse += other._sse; _nobs += other._nobs; } @Override public double[] perRow(double[] ds, float[] yact, Model m) { return ds; } @Override public ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) { HGLMModel hglmM = (HGLMModel) m; ModelMetrics mm = _metricBuilder.makeModelMetrics(hglmM, f, null, null); ModelMetricsRegression metricsRegression = (ModelMetricsRegression) mm; boolean forTraining = m._parms.train().getKey().equals(f.getKey()); double[][] tmat = hglmM._output._tmat; // already set with non-standardized random coefficients if (forTraining) { double loglikelihood = calHGLMLlg(metricsRegression._nobs, tmat, hglmM._output._tau_e_var, hglmM._output._arjtarj, this._yMinusFixPredSquare, hglmM._output._yMinusXTimesZ); mm = new ModelMetricsRegressionHGLM(m, f, metricsRegression._nobs, this.weightedSigma(), loglikelihood, this._customMetric, hglmM._output._iterations, hglmM._output._beta, hglmM._output._ubeta, tmat, hglmM._output._tau_e_var, metricsRegression._MSE, this._yMinusFixPredSquare / metricsRegression._nobs, metricsRegression.mae(), metricsRegression._root_mean_squared_log_error, metricsRegression._mean_residual_deviance, metricsRegression.aic()); } else { List<String> colNames = Arrays.asList(f.names()); boolean hasWeights = hglmM._parms._weights_column != null && colNames.contains(hglmM._parms._weights_column); boolean hasOffsets = hglmM._parms._offset_column != null && colNames.contains(hglmM._parms._offset_column); DataInfo dinfo = new DataInfo(adaptedFrame, null, 1, hglmM._parms._use_all_factor_levels, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, hglmM._parms.missingValuesHandling() == Skip, hglmM._parms.missingValuesHandling() == MeanImputation || hglmM._parms.missingValuesHandling() == PlugValues, hglmM._parms.makeImputer(), false, hasWeights, hasOffsets, false, null); HGLMTask.ComputationEngineTask engineTask = new HGLMTask.ComputationEngineTask(null, hglmM._parms, dinfo); engineTask.doAll(dinfo._adaptedFrame); double loglikelihood = calHGLMLlg(engineTask._nobs, tmat, hglmM._output._tau_e_var, engineTask._ArjTArj, this._yMinusFixPredSquare, hglmM._output._yMinusXTimesZValid); mm = new ModelMetricsRegressionHGLM(m, f, metricsRegression._nobs, this.weightedSigma(), loglikelihood, this._customMetric, hglmM._output._iterations, hglmM._output._beta, hglmM._output._ubeta, tmat, hglmM._output._tau_e_var,metricsRegression._MSE, this._yMinusFixPredSquare /metricsRegression._nobs, metricsRegression.mae(), metricsRegression._root_mean_squared_log_error, metricsRegression._mean_residual_deviance, metricsRegression.aic()); hglmM._output._nobs_valid = engineTask._nobs; } if (m != null) m.addModelMetrics(mm); return mm; } /** * * This method calculates the log-likelihood as described in section II.V of the doc. */ public static double calHGLMLlg(long nobs, double[][] tmat, double varResidual, double[][][] zjTTimesZj, double yMinsXFixSquared, double[][] yMinusXFixTimesZ) { int numLevel2 = zjTTimesZj.length; double[][] tmatInv = new Matrix(tmat).inverse().getArray(); double tmatDeterminant = new Matrix(tmat).det(); double oneOVar = 1.0 / varResidual; double oneOVarSq = oneOVar * oneOVar; double llg = nobs * LOG_2PI + oneOVar * yMinsXFixSquared; double[][] invTPlusZjTZ; Matrix yMinusXjFixed; Matrix yjMinusXjFixed; for (int ind2 = 0; ind2 < numLevel2; ind2++) { invTPlusZjTZ = calInvTPZjTZ(tmatInv, zjTTimesZj[ind2], oneOVar); llg += Math.log(varResidual * new Matrix(invTPlusZjTZ).det() * tmatDeterminant); yMinusXjFixed = new Matrix(new double[][]{yMinusXFixTimesZ[ind2]}); yjMinusXjFixed = yMinusXjFixed.times(new Matrix(invTPlusZjTZ).inverse().times(yMinusXjFixed.transpose())); llg -= oneOVarSq * yjMinusXjFixed.getArray()[0][0]; } return -0.5 * llg; } public static double[][] calInvTPZjTZ(double[][] tmatInv, double[][] zjTTimesZj, double oneOVar) { return new Matrix(tmatInv).plus(new Matrix(zjTTimesZj).times(oneOVar)).getArray(); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/isotonic/IsotonicRegression.java
package hex.isotonic; import hex.ModelBuilder; import hex.ModelCategory; import hex.ModelMetrics; import water.fvec.Frame; import water.fvec.Vec; import water.udf.CFuncRef; import water.util.ArrayUtils; import water.util.FrameUtils; import water.util.TwoDimTable; import water.util.VecUtils; public class IsotonicRegression extends ModelBuilder<IsotonicRegressionModel, IsotonicRegressionModel.IsotonicRegressionParameters, IsotonicRegressionModel.IsotonicRegressionOutput> { @Override public ModelCategory[] can_build() { return new ModelCategory[]{ModelCategory.Regression}; } @Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Experimental; } @Override public boolean isSupervised() { return true; } // for ModelBuilder registration public IsotonicRegression(boolean startup_once) { super(new IsotonicRegressionModel.IsotonicRegressionParameters(), startup_once); } public IsotonicRegression(IsotonicRegressionModel.IsotonicRegressionParameters parms) { super(parms); init(false); } @Override public void init(boolean expensive) { super.init(expensive); if (train() != null) { if (numFeatureCols() != 1) { error("_train", "Training frame for Isotonic Regression can only have a single feature column, " + "training frame columns: " + ArrayUtils.toStringQuotedElements(train().names())); } if (expensive) { Vec resp = response(); if (resp != null && (resp.naCnt() > 0)) { error("_response_column", "Isotonic Regression doesn't support NA values in response."); } if (numFeatureCols() == 1) { Vec xVec = train().vec(0); if (xVec != null && (xVec.naCnt() > 0)) { error("_response_column", "Isotonic Regression doesn't support NA values in feature column '" + train().name(0) + "'."); } } } } } private int numFeatureCols() { return train().numCols() - (numSpecialCols() + 1 /*response*/); } @Override protected IsotonicRegressionDriver trainModelImpl() { return new IsotonicRegressionDriver(); } private class IsotonicRegressionDriver extends Driver { @Override public void computeImpl() { IsotonicRegressionModel model = null; Frame thresholds = null; Vec weights = null; try { init(true); // The model to be built model = new IsotonicRegressionModel(dest(), _parms, new IsotonicRegressionModel.IsotonicRegressionOutput(IsotonicRegression.this)); model.delete_and_lock(_job); Vec xVec = _train.vec(0); weights = hasWeightCol() ? _weights : _train.anyVec().makeCon(1.0); VecUtils.MinMaxTask minMax = VecUtils.findMinMax(xVec, weights); model._output._min_x = minMax._min; model._output._max_x = minMax._max; Frame fr = new Frame(); fr.add("y", response()); fr.add("X", xVec); fr.add("w", weights); thresholds = PoolAdjacentViolatorsDriver.runPAV(fr); model._output._nobs = weights.nzCnt(); model._output._thresholds_y = FrameUtils.asDoubles(thresholds.vec(0)); model._output._thresholds_x = FrameUtils.asDoubles(thresholds.vec(1)); _job.update(1); model.update(_job); model.score(_parms.train(), null, CFuncRef.from(_parms._custom_metric_func)).delete(); model._output._training_metrics = ModelMetrics.getFromDKV(model, _parms.train()); if (valid() != null) { _job.update(0,"Scoring validation frame"); model.score(_parms.valid(), null, CFuncRef.from(_parms._custom_metric_func)).delete(); model._output._validation_metrics = ModelMetrics.getFromDKV(model, _parms.valid()); } model._output._model_summary = generateSummary(model._output); model.update(_job); } finally { if (model != null) { model.unlock(_job); } if (thresholds != null) { thresholds.delete(); } if (weights != null && !hasWeightCol()) { weights.remove(); } } } } private TwoDimTable generateSummary(IsotonicRegressionModel.IsotonicRegressionOutput output) { String[] names = new String[]{"Number of Observations", "Number of Thresholds"}; String[] types = new String[]{"long", "long"}; String[] formats = new String[]{"%d", "%d"}; TwoDimTable summary = new TwoDimTable("Isotonic Regression Model", "summary", new String[]{""}, names, types, formats, ""); summary.set(0, 0, output._nobs); summary.set(0, 1, output._thresholds_x.length); return summary; } @Override public boolean haveMojo() { return true; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/isotonic/IsotonicRegressionModel.java
package hex.isotonic; import hex.*; import hex.genmodel.algos.isotonic.IsotonicCalibrator; import hex.genmodel.algos.isotonic.IsotonicRegressionUtils; import water.Job; import water.Key; import water.fvec.Frame; import water.udf.CFuncRef; public class IsotonicRegressionModel extends Model<IsotonicRegressionModel, IsotonicRegressionModel.IsotonicRegressionParameters, IsotonicRegressionModel.IsotonicRegressionOutput> { public IsotonicRegressionModel(Key<IsotonicRegressionModel> selfKey, IsotonicRegressionParameters parms, IsotonicRegressionOutput output) { super(selfKey, parms, output); } @Override protected Model<IsotonicRegressionModel, IsotonicRegressionParameters, IsotonicRegressionOutput>.BigScore makeBigScoreTask(String[][] domains, String[] names, Frame adaptFrm, boolean computeMetrics, boolean makePrediction, Job j, CFuncRef customMetricFunc) { return super.makeBigScoreTask(domains, names, adaptFrm, computeMetrics, makePrediction, j, customMetricFunc); } public enum OutOfBoundsHandling { NA, Clip } public static class IsotonicRegressionParameters extends Model.Parameters { public String algoName() { return "IsotonicRegression"; } public String fullName() { return "Isotonic Regression"; } public String javaName() { return IsotonicRegressionModel.class.getName(); } @Override public long progressUnits() { return 1; } public OutOfBoundsHandling _out_of_bounds = OutOfBoundsHandling.NA; } public static class IsotonicRegressionOutput extends Model.Output { public long _nobs; public double[] _thresholds_y; public double[] _thresholds_x; public double _min_x; public double _max_x; public IsotonicRegressionOutput(IsotonicRegression b) { super(b); } @Override public ModelCategory getModelCategory() { return ModelCategory.Regression; } @Override public String[] classNames() { return null; } } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { return new ModelMetricsRegression.MetricBuilderRegression<>(); } @Override protected double[] score0(double[] data, double[] preds) { final double x = _parms._out_of_bounds == OutOfBoundsHandling.Clip ? clip(data[0]) : data[0]; preds[0] = IsotonicRegressionUtils.score(x, _output._min_x, _output._max_x, _output._thresholds_x, _output._thresholds_y); return preds; } private double clip(double x) { return IsotonicRegressionUtils.clip(x, _output._min_x, _output._max_x); } @Override protected String[] makeScoringNames() { return new String[]{"predict"}; } @Override protected String[][] makeScoringDomains(Frame adaptFrm, boolean computeMetrics, String[] names) { return new String[1][]; } public IsotonicCalibrator toIsotonicCalibrator() { return new IsotonicCalibrator( _output._min_x, _output._max_x, _output._thresholds_x, _output._thresholds_y ); } @Override public boolean haveMojo() { return true; } @Override public IsotonicRegressionMojoWriter getMojo() { return new IsotonicRegressionMojoWriter(this); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/isotonic/IsotonicRegressionMojoWriter.java
package hex.isotonic; import hex.ModelMojoWriter; import java.io.IOException; public class IsotonicRegressionMojoWriter extends ModelMojoWriter<IsotonicRegressionModel, IsotonicRegressionModel.IsotonicRegressionParameters, IsotonicRegressionModel.IsotonicRegressionOutput> { @SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler public IsotonicRegressionMojoWriter() {} public IsotonicRegressionMojoWriter(IsotonicRegressionModel model) { super(model); } @Override public String mojoVersion() { return "1.00"; } @Override protected void writeModelData() throws IOException { write(model.toIsotonicCalibrator()); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/isotonic/PoolAdjacentViolators.java
package hex.isotonic; import water.fvec.NewChunk; import water.util.ArrayUtils; /** * Implements Pool Adjacent Violators algorithm suitable for parallelization * using <a href="https://link.springer.com/chapter/10.1007/978-3-642-99789-1_10">An Approach to Parallelizing Isotonic Regression</a>. * * Loosely follows <a href="https://github.com/apache/spark/blob/a2c7b2133cfee7fa9abfaa2bfbfb637155466783/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala">Spark implementation</a> */ class PoolAdjacentViolators { /** * Block weights */ private final double[] _ws; /** * mean target/response in the block */ private final double[] _wYs; /** * Block metadata: * - there are 2 types of blocks - unmerged and merged * - for unmerged: * for i (= start of the block): * _blocks[i] species where the given block ends (inclusive) * initially blocks are singletons [i, i], meaning _blocks[i] = i * _blocks[i] + 1 is thus beginning of the next block * - for merged: * for i (= current end of the block) * _blocks[i] is a negative reference to the original start of the block * (- _blocks[i - 1] - 1) maps to the current start of the block * allows us to (quickly) walk backwards - get reference to the previous block * we are using negative numbers for debugging purposes (we can quickly see which * blocks were merged) */ private final int[] _blocks; PoolAdjacentViolators(double[] ys) { this(ys, null); } public PoolAdjacentViolators(double[] ys, double[] ws) { _ws = ws != null ? ws.clone() // make a copy - we will modify the weights : ArrayUtils.constAry(ys.length, 1.0); _wYs = new double[_ws.length]; for (int i = 0; i < _ws.length; i++) { _wYs[i] = _ws[i] * ys[i]; } _blocks = ArrayUtils.seq(0, ys.length); } void findThresholds(double[] xs, NewChunk[] ncs) { findThresholds(xs, ncs[0], ncs[1], ncs[2]); } void findThresholds(double[] xs, NewChunk outYs, NewChunk outXs, NewChunk outWs) { mergeViolators(); outputThresholds(xs, outYs, outXs, outWs); } void mergeViolators() { for (int block = 0; next(block) < _blocks.length; ) { if (meanY(block) >= meanY(next(block))) { mergeWithNext(block); while ((block > 0) && (meanY(prev(block)) >= meanY(block))) { block = prev(block); mergeWithNext(block); } } else { block = next(block); } } } void outputThresholds(double[] xs, NewChunk outYs, NewChunk outXs, NewChunk outWs) { for (int i = 0; i < xs.length; i = next(i)) { if (xs[_blocks[i]] > xs[i]) { outYs.addNum(meanY(i)); outXs.addNum(xs[i]); outWs.addNum(_ws[i] / 2); outYs.addNum(meanY(i)); outXs.addNum(xs[_blocks[i]]); outWs.addNum(_ws[i] / 2); } else { outYs.addNum(meanY(i)); outXs.addNum(xs[i]); outWs.addNum(_ws[i]); } } } int next(int b) { return _blocks[b] + 1; } int prev(int b) { if (_blocks[b - 1] == b - 1) // unmerged singleton block return b - 1; int ref = _blocks[b - 1]; if (ref >= 0) throw new IllegalStateException("Block representation is broken, " + "expected a negative encoded block reference, instead got: " + ref + " for block " + b + "."); return -ref-1; } void mergeWithNext(int b1) { final int b2 = _blocks[b1] + 1; _blocks[b1] = _blocks[b2]; _blocks[_blocks[b2]] = -b1-1; _ws[b1] += _ws[b2]; _wYs[b1] += _wYs[b2]; } double meanY(int b) { return _wYs[b] / _ws[b]; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/isotonic/PoolAdjacentViolatorsDriver.java
package hex.isotonic; import water.*; import water.fvec.*; import static water.rapids.Merge.sort; import java.util.Arrays; /** * Distributed implementation of Pool Adjacent Violators algorithm * for H2O Frames */ public class PoolAdjacentViolatorsDriver { public static Frame runPAV(Frame fr) { // y, X, w if (fr.numCols() != 3) { throw new IllegalArgumentException("Input frame is expected to have 3 columns: y, X, weights."); } if (fr.lastVec().min() < 0) { throw new IllegalArgumentException("Weights cannot be negative."); } Frame sorted = null; Frame local = null; Frame single = null; try { sorted = sort(fr, new int[]{1, 0}); local = pav(sorted); single = RebalanceDataSet.toSingleChunk(local); return pav(single); } finally { Futures fs = new Futures(); if (sorted != null) sorted.remove(fs); if (local != null) local.remove(fs); if (single != null) single.remove(fs); fs.blockForPending(); } } static Frame pav(Frame fr) { return new PoolAdjacentViolatorsTask() .doAll(3, Vec.T_NUM, fr).outputFrame(); } static class PoolAdjacentViolatorsTask extends MRTask<PoolAdjacentViolatorsTask> { @Override public void map(Chunk[] cs, NewChunk[] ncs) { assert cs.length == 3; Chunk weightChunk = cs[2]; int len = 0; int[] idx = new int[weightChunk._len]; for (int i = 0; i < idx.length; i++) { if (weightChunk.isNA(i)) continue; double w = weightChunk.atd(i); if (w != 0) { idx[len++] = i; } } idx = Arrays.copyOf(idx, len); double[] ys = cs[0].getDoubles(MemoryManager.malloc8d(len), idx); double[] xs = cs[1].getDoubles(MemoryManager.malloc8d(len), idx); double[] ws = cs[2].getDoubles(MemoryManager.malloc8d(len), idx); new PoolAdjacentViolators(ys, ws).findThresholds(xs, ncs); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/kmeans/KMeans.java
package hex.kmeans; import hex.*; import hex.util.LinearAlgebraUtils; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import water.*; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import water.util.*; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import static hex.genmodel.GenModel.Kmeans_preprocessData; /** * Scalable K-Means++ (KMeans||)<br> * http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf<br> * http://www.youtube.com/watch?v=cigXAxV3XcY */ public class KMeans extends ClusteringModelBuilder<KMeansModel,KMeansModel.KMeansParameters,KMeansModel.KMeansOutput> { @Override public ToEigenVec getToEigenVec() { return LinearAlgebraUtils.toEigen; } // Convergence tolerance final static private double TOLERANCE = 1e-4; @Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.Clustering }; } @Override public boolean havePojo() { return true; } @Override public boolean haveMojo() { return true; } public enum Initialization { Random, PlusPlus, Furthest, User } /** Start the KMeans training Job on an F/J thread. */ @Override protected KMeansDriver trainModelImpl() { return new KMeansDriver(); } // Called from an http request public KMeans( KMeansModel.KMeansParameters parms ) { super(parms ); init(false); } public KMeans( KMeansModel.KMeansParameters parms, Job job) { super(parms,job); init(false); } public KMeans(boolean startup_once) { super(new KMeansModel.KMeansParameters(),startup_once); } @Override protected void checkMemoryFootPrint_impl() { long mem_usage = 8 /*doubles*/ * _parms._k * _train.numCols() * (_parms._standardize ? 2 : 1); long max_mem = H2O.SELF._heartbeat.get_free_mem(); if (mem_usage > max_mem) { String msg = "Centroids won't fit in the driver node's memory (" + PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem) + ") - try reducing the number of columns and/or the number of categorical factors."; error("_train", msg); } } /** Initialize the ModelBuilder, validating all arguments and preparing the * training frame. This call is expected to be overridden in the subclasses * and each subclass will start with "super.init();". * * Validate K, max_iterations and the number of rows. */ @Override public void init(boolean expensive) { super.init(expensive); if(expensive) if(_parms._fold_column != null) _train.remove(_parms._fold_column); if( _parms._max_iterations <= 0 || _parms._max_iterations > 1e6) error("_max_iterations", " max_iterations must be between 1 and 1e6"); if (_train == null) return; if (_parms._init == Initialization.User && _parms._user_points == null) error("_user_y","Must specify initial cluster centers"); if (_parms._user_points != null) { // Check dimensions of user-specified centers Frame user_points = _parms._user_points.get(); if (user_points == null) error("_user_y", "User-specified points do not refer to a valid frame"); else if (user_points.numCols() != _train.numCols() - numSpecialCols()) error("_user_y", "The user-specified points must have the same number of columns (" + (_train.numCols() - numSpecialCols()) + ") as the training observations"); else if( user_points.numRows() != _parms._k) error("_user_y", "The number of rows in the user-specified points is not equal to k = " + _parms._k); } if (_parms._estimate_k) { if (_parms._user_points!=null) error("_estimate_k", "Cannot estimate k if user_points are provided."); if(_parms._cluster_size_constraints != null){ error("_estimate_k", "Cannot estimate k if cluster_size_constraints are provided."); } info("_seed", "seed is ignored when estimate_k is enabled."); info("_init", "Initialization scheme is ignored when estimate_k is enabled - algorithm is deterministic."); if (expensive) { boolean numeric = false; for (Vec v : _train.vecs()) { if (v.isNumeric()) { numeric = true; break; } } if (!numeric) { error("_estimate_k", "Cannot estimate k if data has no numeric columns."); } } } if(_parms._cluster_size_constraints != null){ if(_parms._cluster_size_constraints.length != _parms._k){ error("_cluster_size_constraints", "\"The number of cluster size constraints is not equal to k = \" + _parms._k"); } } if(_parms._fold_assignment == Model.Parameters.FoldAssignmentScheme.Stratified){ error("fold_assignment", "K-means is an unsupervised algorithm; the stratified fold assignment cannot be used because of the missing response column."); } if (expensive && error_count() == 0) checkMemoryFootPrint(); } public void cv_makeAggregateModelMetrics(ModelMetrics.MetricBuilder[] mbs){ super.cv_makeAggregateModelMetrics(mbs); ((ModelMetricsClustering.MetricBuilderClustering) mbs[0])._within_sumsqe = null; ((ModelMetricsClustering.MetricBuilderClustering) mbs[0])._size = null; } // ---------------------- private final class KMeansDriver extends Driver { private String[][] _isCats; // Categorical columns // Initialize cluster centers double[][] initial_centers(KMeansModel model, final Vec[] vecs, final double[] means, final double[] mults, final int[] modes, int k) { // Categoricals use a different distance metric than numeric columns. model._output._categorical_column_count=0; _isCats = new String[vecs.length][]; for( int v=0; v<vecs.length; v++ ) { _isCats[v] = vecs[v].isCategorical() ? new String[0] : null; if (_isCats[v] != null) model._output._categorical_column_count++; } Random rand = water.util.RandomUtils.getRNG(_parms._seed-1); double centers[][]; // Cluster centers if( null != _parms._user_points ) { // User-specified starting points Frame user_points = _parms._user_points.get(); int numCenters = (int)user_points.numRows(); int numCols = model._output.nfeatures(); centers = new double[numCenters][numCols]; Vec[] centersVecs = user_points.vecs(); // Get the centers and standardize them if requested for (int r=0; r<numCenters; r++) { for (int c=0; c<numCols; c++){ centers[r][c] = centersVecs[c].at(r); centers[r][c] = Kmeans_preprocessData(centers[r][c], c, means, mults, modes); } } } else { // Random, Furthest, or PlusPlus initialization if (_parms._init == Initialization.Random) { // Initialize all cluster centers to random rows centers = new double[k][model._output.nfeatures()]; for (double[] center : centers) randomRow(vecs, rand, center, means, mults, modes); } else { centers = new double[1][model._output.nfeatures()]; // Initialize first cluster center to random row randomRow(vecs, rand, centers[0], means, mults, modes); model._output._iterations = 0; while (model._output._iterations < 5) { // Sum squares distances to cluster center SumSqr sqr = new SumSqr(centers, means, mults, modes, _isCats).doAll(vecs); // Sample with probability inverse to square distance Sampler sampler = new Sampler(centers, means, mults, modes, _isCats, sqr._sqr, k * 3, _parms.getOrMakeRealSeed(), hasWeightCol()).doAll(vecs); centers = ArrayUtils.append(centers, sampler._sampled); // Fill in sample centers into the model model._output._centers_raw = destandardize(centers, _isCats, means, mults); model._output._tot_withinss = sqr._sqr / _train.numRows(); model._output._iterations++; // One iteration done model.update(_job); // Make early version of model visible, but don't update progress using update(1) if (stop_requested()) { if (timeout()) warn("_max_runtime_secs reached.", "KMeans exited before finishing all iterations."); break; // Stopped/cancelled } } // Recluster down to k cluster centers centers = recluster(centers, rand, k, _parms._init, _isCats); model._output._iterations = 0; // Reset iteration count } } assert(centers.length == k); return centers; } // Number of reinitialization attempts for preventing empty clusters transient private int _reinit_attempts; // Handle the case where some centers go dry. Rescue only 1 cluster // per iteration ('cause we only tracked the 1 worst row) boolean cleanupBadClusters( IterationTask task, final Vec[] vecs, final double[][] centers, final double[] means, final double[] mults, final int[] modes ) { // Find any bad clusters int clu; for( clu=0; clu<centers.length; clu++ ) if( task._size[clu] == 0 ) break; if( clu == centers.length ) return false; // No bad clusters long row = task._worst_row; Log.warn("KMeans: Re-initializing cluster " + clu + " to row " + row); data(centers[clu] = task._cMeans[clu], vecs, row, means, mults, modes); task._size[clu] = 1; //FIXME: PUBDEV-871 Some other cluster had their membership count reduced by one! (which one?) // Find any MORE bad clusters; we only fixed the first one for( clu=0; clu<centers.length; clu++ ) if( task._size[clu] == 0 ) break; if( clu == centers.length ) return false; // No MORE bad clusters // If we see 2 or more bad rows, just re-run Lloyds to get the // next-worst row. We don't count this as an iteration, because // we're not really adjusting the centers, we're trying to get // some centers *at-all*. Log.warn("KMeans: Re-running Lloyds to re-init another cluster"); if (_reinit_attempts++ < centers.length) { return true; // Rerun Lloyds, and assign points to centroids } else { _reinit_attempts = 0; return false; } } // Compute all interesting KMeans stats (errors & variances of clusters, // etc). Return new centers. double[][] computeStatsFillModel(IterationTask task, KMeansModel model, final Vec[] vecs, final double[] means, final double[] mults, final int[] modes, int k) { // Fill in the model based on original destandardized centers if (model._parms._standardize) { model._output._centers_std_raw = task._cMeans; } model._output._centers_raw = destandardize(task._cMeans, _isCats, means, mults); model._output._size = task._size; model._output._withinss = task._cSqr; double ssq = 0; // sum squared error for( int i=0; i<k; i++ ) ssq += model._output._withinss[i]; // sum squared error all clusters model._output._tot_withinss = ssq; // Sum-of-square distance from grand mean if(k == 1) { model._output._totss = model._output._tot_withinss; } else { // If data already standardized, grand mean is just the origin TotSS totss = new TotSS(means,mults,modes, train().domains(), train().cardinality()).doAll(vecs); model._output._totss = totss._tss; } model._output._betweenss = model._output._totss - model._output._tot_withinss; // MSE between-cluster model._output._iterations++; model._output._history_withinss = ArrayUtils.copyAndFillOf( model._output._history_withinss, model._output._history_withinss.length+1, model._output._tot_withinss); model._output._k = ArrayUtils.copyAndFillOf(model._output._k, model._output._k.length+1, k); model._output._training_time_ms = ArrayUtils.copyAndFillOf(model._output._training_time_ms, model._output._training_time_ms.length+1, System.currentTimeMillis()); model._output._reassigned_count = ArrayUtils.copyAndFillOf(model._output._reassigned_count, model._output._reassigned_count.length+1, task._reassigned_count); // Two small TwoDimTables - cheap model._output._model_summary = createModelSummaryTable(model._output); model._output._scoring_history = createScoringHistoryTable(model._output); // Take the cluster stats from the model, and assemble them into a model metrics object model._output._training_metrics = makeTrainingMetrics(model); return task._cMeans; // New centers } // Main worker thread @Override public void computeImpl() { KMeansModel model = null; Key bestOutputKey = Key.make(); try { init(true); // Do lock even before checking the errors, since this block is finalized by unlock // (not the best solution, but the code is more readable) // Something goes wrong if( error_count() > 0 ) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(KMeans.this); // The model to be built // Set fold_column to null and will be added back into model parameter after String fold_column = _parms._fold_column; _parms._fold_column = null; model = new KMeansModel(dest(), _parms, new KMeansModel.KMeansOutput(KMeans.this)); model.delete_and_lock(_job); int startK = _parms._estimate_k ? 1 : _parms._k; final Vec vecs[] = _train.vecs(); // mults & means for standardization final double[] means = _train.means(); // means are used to impute NAs final double[] mults = _parms._standardize ? _train.mults() : null; final int [] impute_cat = new int[vecs.length]; for(int i = 0; i < vecs.length; i++) impute_cat[i] = vecs[i].isCategorical() ? DataInfo.imputeCat(vecs[i],true) : -1; model._output._normSub = means; model._output._normMul = mults; model._output._mode = impute_cat; // Initialize cluster centers and standardize if requested double[][] centers = initial_centers(model,vecs,means,mults,impute_cat, startK); if( centers==null ) return; // Stopped/cancelled during center-finding boolean work_unit_iter = !_parms._estimate_k; boolean constrained = _parms._cluster_size_constraints != null; // --- // Run the main KMeans Clustering loop // Stop after enough iterations or reassigned_count < TOLERANCE * num_rows double sum_squares = 0; final double rel_improvement_cutoff = Math.min(0.02 + 10. / _train.numRows() + 2.5 / Math.pow(model._output.nfeatures(), 2), 0.8); if (_parms._estimate_k) Log.info("Cutoff for relative improvement in within_cluster_sum_of_squares: " + rel_improvement_cutoff); Vec[] vecs2; long csum = 0; if(!constrained) { vecs2 = Arrays.copyOf(vecs, vecs.length+1); vecs2[vecs2.length-1] = vecs2[0].makeCon(-1); } else { int newVecLength = vecs.length + 2 * centers.length + 3; // data (+ weight column) + distances + edge indices + result distance + old assignment + new assignment vecs2 = Arrays.copyOf(vecs, newVecLength); for (int i = vecs.length; i < newVecLength; i++) { vecs2[i] = vecs2[0].makeCon(Double.MAX_VALUE); } // Check sum of constrains for(int i = 0; i<_parms._cluster_size_constraints.length; i++){ assert _parms._cluster_size_constraints[i] > 0: "The value of constraint should be higher then zero."; csum += _parms._cluster_size_constraints[i]; assert csum <= vecs[0].length(): "The sum of constraints ("+csum+") is higher than the number of data rows ("+vecs[0].length()+")."; } } for (int k = startK; k <= _parms._k; ++k) { if(!constrained){ Log.info("Running Lloyds iteration for " + k + " centroids."); } else { Log.info("Running Constrained K-means iteration for " + k + " centroids."); } model._output._iterations = 0; // Loop ends only when iterations > max_iterations with strict inequality double[][] lo=null, hi=null; boolean stop = false; do { assert(centers.length == k); IterationTask task; if(!constrained) { //Lloyds algorithm task = new LloydsIterationTask(centers, means, mults, impute_cat, _isCats, k, hasWeightCol()).doAll(vecs2); //1 PASS OVER THE DATA } else { // Constrained K-means // Get distances and aggregated values CalculateDistancesTask countDistancesTask = new CalculateDistancesTask(centers, means, mults, impute_cat, _isCats, k, hasWeightCol()).doAll(vecs2); // Check if the constraint setting does not break cross validation setting assert !hasWeightCol() || csum <= countDistancesTask._non_zero_weights : "The sum of constraints ("+csum+") is higher than the number of data rows with non zero weights ("+countDistancesTask._non_zero_weights+") because cross validation is set."; // Calculate center assignments // Experimental code. Polynomial implementation - slow performance. Need to be parallelize! KMeansSimplexSolver solver = new KMeansSimplexSolver(_parms._cluster_size_constraints, new Frame(vecs2), countDistancesTask._sum, hasWeightCol(), countDistancesTask._non_zero_weights); // Get cluster assignments Frame result = solver.assignClusters(); // Count statistics and result task task = new CalculateMetricTask(centers, means, mults, impute_cat, _isCats, k, hasWeightCol()).doAll(result); } // Pick the max categorical level for cluster center max_cats(task._cMeans, task._cats, _isCats); // Handle the case where some centers go dry. Rescue only 1 cluster // per iteration ('cause we only tracked the 1 worst row) // If constrained K-meas is set, clusters with zero points are allowed if(!_parms._estimate_k && _parms._cluster_size_constraints == null && cleanupBadClusters(task,vecs,centers,means,mults,impute_cat) ) continue; // Compute model stats; update standardized cluster centers centers = computeStatsFillModel(task, model, vecs, means, mults, impute_cat, k); if (model._parms._score_each_iteration) Log.info(model._output._model_summary); lo = task._lo; hi = task._hi; if (work_unit_iter) { model.update(_job); // Update model in K/V store _job.update(1); //1 more iteration } stop = (task._reassigned_count < Math.max(1,train().numRows()*TOLERANCE) || model._output._iterations >= _parms._max_iterations || stop_requested()); if (stop) { if (model._output._iterations < _parms._max_iterations) Log.info("K-means converged after " + model._output._iterations + " iterations."); else Log.info("K-means stopped after " + model._output._iterations + " iterations."); } } while (!stop); double sum_squares_now = model._output._tot_withinss; double rel_improvement; if (sum_squares==0) { rel_improvement = 1; } else { rel_improvement = (sum_squares - sum_squares_now) / sum_squares; } Log.info("Relative improvement in total withinss: " + rel_improvement); sum_squares = sum_squares_now; if (_parms._estimate_k && k > 1) { boolean outerConverged = rel_improvement < rel_improvement_cutoff; if (outerConverged) { KMeansModel.KMeansOutput best = DKV.getGet(bestOutputKey); model._output = best; Log.info("Converged. Retrieving the best model with k=" + model._output._k[model._output._k.length-1]); break; } } if (!work_unit_iter) { DKV.put(bestOutputKey, IcedUtils.deepCopy(model._output)); //store a clone to avoid sharing the state between DKV and here model.update(_job); // Update model in K/V store _job.update(1); //1 more round for auto-clustering } if (lo != null && hi != null && _parms._estimate_k) centers = splitLargestCluster(centers, lo, hi, means, mults, impute_cat, vecs2, k); } //k-finder vecs2[vecs2.length-1].remove(); // Create metrics by scoring on training set otherwise scores are based on last Lloyd iteration // These lines cause the training metrics are recalculated on strange model values. // Especially for Constrained Kmeans, it returns a result that does not meet the constraints set // because scoring is based on calculated centroids and does not preserve the constraints // There is a GH issue to explore this part of code: https://github.com/h2oai/h2o-3/issues/8543 if(!constrained) { model.score(_parms.train()).delete(); model._output._training_metrics = ModelMetrics.getFromDKV(model,_parms.train()); } model.update(_job); // Update model in K/V store Log.info(model._output._model_summary); Log.info(model._output._scoring_history); Log.info(((ModelMetricsClustering)model._output._training_metrics).createCentroidStatsTable().toString()); // At the end: validation scoring (no need to gather scoring history) if (_valid != null) { model.score(_parms.valid()).delete(); //this appends a ModelMetrics on the validation set model._output._validation_metrics = ModelMetrics.getFromDKV(model,_parms.valid()); } model._parms._fold_column = fold_column; model.update(_job); // Update model in K/V store } finally { if( model != null ) model.unlock(_job); DKV.remove(bestOutputKey); } } double[][] splitLargestCluster(double[][] centers, double[][] lo, double[][] hi, double[] means, double[] mults, int[] impute_cat, Vec[] vecs2, int k) { double[][] newCenters = Arrays.copyOf(centers, centers.length + 1); for (int i = 0; i < centers.length; ++i) newCenters[i] = centers[i].clone(); double maxRange=0; int clusterToSplit=0; int dimToSplit=0; for (int i = 0; i < centers.length; ++i) { double[] range = new double[hi[i].length]; for( int col=0; col<hi[i].length; col++ ) { if (_isCats[col]!=null) continue; // can't split a cluster along categorical direction range[col] = hi[i][col] - lo[i][col]; if ((float)range[col] > (float)maxRange) { //break ties clusterToSplit = i; dimToSplit = col; maxRange = range[col]; } } // Log.info("Range for cluster " + i + ": " + Arrays.toString(range)); } // start out new centroid as a copy of the one to split assert (_isCats[dimToSplit] == null); double splitPoint = newCenters[clusterToSplit][dimToSplit]; // Log.info("Splitting cluster " + clusterToSplit + " in half in dimension " + dimToSplit + " at splitpoint: " + splitPoint); // compute the centroids of the two sub-clusters SplitTask task = new SplitTask(newCenters, means, mults, impute_cat, _isCats, k+1, hasWeightCol(), clusterToSplit, dimToSplit, splitPoint).doAll(vecs2); // Log.info("Splitting: " + Arrays.toString(newCenters[clusterToSplit])); newCenters[clusterToSplit] = task._cMeans[clusterToSplit].clone(); // Log.info("Into One: " + Arrays.toString(newCenters[clusterToSplit])); newCenters[newCenters.length-1] = task._cMeans[newCenters.length-1].clone(); // Log.info(" Two: " + Arrays.toString(newCenters[newCenters.length-1])); return newCenters; } private TwoDimTable createModelSummaryTable(KMeansModel.KMeansOutput output) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("Number of Rows"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Number of Clusters"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Number of Categorical Columns"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Number of Iterations"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Within Cluster Sum of Squares"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Total Sum of Squares"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Between Cluster Sum of Squares"); colTypes.add("double"); colFormat.add("%.5f"); final int rows = 1; TwoDimTable table = new TwoDimTable( "Model Summary", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); int row = 0; int col = 0; table.set(row, col++, Math.round(_train.numRows() * (hasWeightCol() ? _train.lastVec().mean() : 1))); table.set(row, col++, output._centers_raw.length); table.set(row, col++, output._categorical_column_count); table.set(row, col++, output._k.length-1); table.set(row, col++, output._tot_withinss); table.set(row, col++, output._totss); table.set(row, col++, output._betweenss); return table; } private TwoDimTable createScoringHistoryTable(KMeansModel.KMeansOutput output) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s"); colHeaders.add("Iterations"); colTypes.add("long"); colFormat.add("%d"); if (_parms._estimate_k) { colHeaders.add("Number of Clusters"); colTypes.add("long"); colFormat.add("%d"); } colHeaders.add("Number of Reassigned Observations"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Within Cluster Sum Of Squares"); colTypes.add("double"); colFormat.add("%.5f"); final int rows = output._history_withinss.length; TwoDimTable table = new TwoDimTable( "Scoring History", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); int row = 0; for( int i = 0; i<rows; i++ ) { int col = 0; assert(row < table.getRowDim()); assert(col < table.getColDim()); DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss"); table.set(row, col++, fmt.print(output._training_time_ms[i])); table.set(row, col++, PrettyPrint.msecs(output._training_time_ms[i]-_job.start_time(), true)); table.set(row, col++, i); if (_parms._estimate_k) table.set(row, col++, output._k[i]); table.set(row, col++, output._reassigned_count[i]); table.set(row, col++, output._history_withinss[i]); row++; } return table; } } // ------------------------------------------------------------------------- // Initial sum-of-square-distance to nearest cluster center private static class TotSS extends MRTask<TotSS> { // IN final double[] _means, _mults; final int[] _modes; final String[][] _isCats; final int[] _card; // OUT double _tss; double[] _gc; // Grand center (mean of cols) TotSS(double[] means, double[] mults, int[] modes, String[][] isCats, int[] card) { _means = means; _mults = mults; _modes = modes; _tss = 0; _isCats = isCats; _card = card; // Mean of numeric col is zero when standardized _gc = mults!=null ? new double[means.length] : Arrays.copyOf(means, means.length); for(int i=0; i<means.length; i++) { if(isCats[i] != null) _gc[i] = _modes[i]; } } @Override public void map(Chunk[] cs) { for( int row = 0; row < cs[0]._len; row++ ) { double[] values = new double[cs.length]; // fetch the data - using consistent NA and categorical data handling (same as for training) data(values, cs, row, _means, _mults, _modes); // compute the distance from the (standardized) cluster centroids _tss += hex.genmodel.GenModel.KMeans_distance(_gc, values, _isCats); } } @Override public void reduce(TotSS other) { _tss += other._tss; } } // ------------------------------------------------------------------------- // Initial sum-of-square-distance to nearest cluster center private static class SumSqr extends MRTask<SumSqr> { // IN double[][] _centers; double[] _means, _mults; // Standardization int[] _modes; // Imputation of missing categoricals final String[][] _isCats; // OUT double _sqr; SumSqr( double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats ) { _centers = centers; _means = means; _mults = mults; _modes = modes; _isCats = isCats; } @Override public void map(Chunk[] cs) { double[] values = new double[cs.length]; ClusterDist cd = new ClusterDist(); for( int row = 0; row < cs[0]._len; row++ ) { data(values, cs, row, _means, _mults, _modes); _sqr += minSqr(_centers, values, _isCats, cd); } _means = _mults = null; _modes = null; _centers = null; } @Override public void reduce(SumSqr other) { _sqr += other._sqr; } } // ------------------------------------------------------------------------- // Sample rows with increasing probability the farther they are from any // cluster center. private static class Sampler extends MRTask<Sampler> { // IN double[][] _centers; double[] _means, _mults; // Standardization int[] _modes; // Imputation of missing categoricals final String[][] _isCats; final double _sqr; // Min-square-error final double _probability; // Odds to select this point final long _seed; boolean _hasWeight; // OUT double[][] _sampled; // New cluster centers Sampler( double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, double sqr, double prob, long seed, boolean hasWeight ) { _centers = centers; _means = means; _mults = mults; _modes = modes; _isCats = isCats; _sqr = sqr; _probability = prob; _seed = seed; _hasWeight = hasWeight; } @Override public void map(Chunk[] cs) { int N = cs.length - (_hasWeight?1:0); double[] values = new double[N]; ArrayList<double[]> list = new ArrayList<>(); Random rand = RandomUtils.getRNG(0); ClusterDist cd = new ClusterDist(); for( int row = 0; row < cs[0]._len; row++ ) { rand.setSeed(_seed + cs[0].start()+row); data(values, cs, row, _means, _mults, _modes); double sqr = minSqr(_centers, values, _isCats, cd); if( _probability * sqr > rand.nextDouble() * _sqr ) list.add(values.clone()); } _sampled = new double[list.size()][]; list.toArray(_sampled); _centers = null; _means = _mults = null; _modes = null; } @Override public void reduce(Sampler other) { _sampled = ArrayUtils.append(_sampled, other._sampled); } } public static class IterationTask extends MRTask<IterationTask> { // IN double[][] _centers; double[] _means, _mults; // Standardization int[] _modes; // Imputation of missing categoricals final int _k; final String[][] _isCats; boolean _hasWeight; // OUT double[][] _lo, _hi; // Bounding box double _reassigned_count; double[][] _cMeans; // Means for each cluster long[/*k*/][/*features*/][/*nfactors*/] _cats; // Histogram of cat levels double[] _cSqr; // Sum of squares for each cluster long[] _size; // Number of rows in each cluster long _worst_row; // Row with max err double _worst_err; // Max-err-row's max-err IterationTask(double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, int k, boolean hasWeight ) { _centers = centers; _means = means; _mults = mults; _modes = modes; _isCats = isCats; _k = k; _hasWeight = hasWeight; } } // --------------------------------------- // A Lloyd's pass: // Find nearest cluster center for every point // Compute new mean/center & variance & rows for each cluster // Compute distance between clusters // Compute total sqr distance private static class LloydsIterationTask extends IterationTask { LloydsIterationTask(double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, int k, boolean hasWeight ) { super(centers, means, mults, modes, isCats, k, hasWeight); } @Override public void map(Chunk[] cs) { int N = cs.length - (_hasWeight ? 1:0) - 1 /*clusterassignment*/; assert _centers[0].length==N; _lo = new double[_k][N]; for( int clu=0; clu< _k; clu++ ) Arrays.fill(_lo[clu], Double.MAX_VALUE); _hi = new double[_k][N]; for( int clu=0; clu< _k; clu++ ) Arrays.fill(_hi[clu], -Double.MAX_VALUE); _cMeans = new double[_k][N]; _cSqr = new double[_k]; _size = new long[_k]; // Space for cat histograms _cats = new long[_k][N][]; for( int clu=0; clu< _k; clu++ ) for( int col=0; col<N; col++ ) _cats[clu][col] = _isCats[col]==null ? null : new long[cs[col].vec().cardinality()]; _worst_err = 0; Chunk assignment = cs[cs.length-1]; // Find closest cluster center for each row double[] values = new double[N]; // Temp data to hold row as doubles ClusterDist cd = new ClusterDist(); for( int row = 0; row < cs[0]._len; row++ ) { double weight = _hasWeight ? cs[N].atd(row) : 1; if (weight == 0) continue; //skip holdout rows assert(weight == 1); //K-Means only works for weight 1 (or weight 0 for holdout) data(values, cs, row, _means, _mults, _modes); // Load row as doubles closest(_centers, values, _isCats, cd); // Find closest cluster center if (cd._cluster != assignment.at8(row)) { _reassigned_count+=weight; assignment.set(row, cd._cluster); } for( int clu=0; clu< _k; clu++ ) { for( int col=0; col<N; col++ ) { if (cd._cluster == clu) { _lo[clu][col] = Math.min(values[col], _lo[clu][col]); _hi[clu][col] = Math.max(values[col], _hi[clu][col]); } } } int clu = cd._cluster; assert clu != -1; // No broken rows _cSqr[clu] += cd._dist; // Add values and increment counter for chosen cluster for( int col = 0; col < N; col++ ) if( _isCats[col] != null ) _cats[clu][col][(int)values[col]]++; // Histogram the cats else _cMeans[clu][col] += values[col]; // Sum the column centers _size[clu]++; // Track worst row if( cd._dist > _worst_err) { _worst_err = cd._dist; _worst_row = cs[0].start()+row; } } // Scale back down to local mean for( int clu = 0; clu < _k; clu++ ) if( _size[clu] != 0 ) ArrayUtils.div(_cMeans[clu], _size[clu]); _centers = null; _means = _mults = null; _modes = null; } @Override public void reduce(IterationTask mr) { _reassigned_count += mr._reassigned_count; for( int clu = 0; clu < _k; clu++ ) { long ra = _size[clu]; long rb = mr._size[clu]; double[] ma = _cMeans[clu]; double[] mb = mr._cMeans[clu]; for( int c = 0; c < ma.length; c++ ) // Recursive mean if( ra+rb > 0 ) ma[c] = (ma[c] * ra + mb[c] * rb) / (ra + rb); } ArrayUtils.add(_cats, mr._cats); ArrayUtils.add(_cSqr, mr._cSqr); ArrayUtils.add(_size, mr._size); for( int clu=0; clu< _k; clu++ ) { for( int col=0; col<_lo[clu].length; col++ ) { _lo[clu][col] = Math.min(mr._lo[clu][col], _lo[clu][col]); _hi[clu][col] = Math.max(mr._hi[clu][col], _hi[clu][col]); } } // track global worst-row if( _worst_err < mr._worst_err) { _worst_err = mr._worst_err; _worst_row = mr._worst_row; } } } private static class CalculateDistancesTask extends MRTask<CalculateDistancesTask> { // IN double[][] _centers; double[] _means, _mults; // Standardization int[] _modes; // Imputation of missing categoricals final int _k; boolean _hasWeight; final String[][] _isCats; double _sum; long _non_zero_weights; CalculateDistancesTask(double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, int k, boolean hasWeight) { _centers = centers; _means = means; _mults = mults; _modes = modes; _k = k; _hasWeight = hasWeight; _isCats = isCats; _sum = 0; _non_zero_weights = 0; } @Override public void map(Chunk[] cs) { int N = cs.length - (_hasWeight ? 1 : 0) - 3 - 2*_centers.length /*data + weight column + distances + edge indices + old assignment + new assignment */; assert _centers[0].length == N; int vecsStart = _hasWeight ? N+1 : N; double[] values = new double[N]; // Temp data to hold row as doubles for (int row = 0; row < cs[0]._len; row++) { double weight = _hasWeight ? cs[N].atd(row) : 1; if (weight == 0) continue; //skip holdout rows _non_zero_weights++; assert (weight == 1); //K-Means only works for weight 1 (or weight 0 for holdout) data(values, cs, row, _means, _mults, _modes); // Load row as doubles double[] distances = getDistances(_centers, values, _isCats); for(int cluster=0; cluster<distances.length; cluster++){ double tmpDist = distances[cluster]; cs[vecsStart+cluster].set(row, tmpDist); _sum += tmpDist; } } } @Override public void reduce(CalculateDistancesTask mrt) { _sum += mrt._sum; _non_zero_weights += mrt._non_zero_weights; } } private static class CalculateMetricTask extends IterationTask { CalculateMetricTask(double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, int k, boolean hasWeight) { super(centers, means, mults, modes, isCats, k, hasWeight); } @Override public void map(Chunk[] cs) { int N = cs.length - (_hasWeight ? 1:0) - 3 /*clusterassignment*/; assert _centers[0].length==N; _lo = new double[_k][N]; for( int clu=0; clu< _k; clu++ ) Arrays.fill(_lo[clu], Double.MAX_VALUE); _hi = new double[_k][N]; for( int clu=0; clu< _k; clu++ ) Arrays.fill(_hi[clu], -Double.MAX_VALUE); _cMeans = new double[_k][N]; _cSqr = new double[_k]; _size = new long[_k]; // Space for cat histograms _cats = new long[_k][N][]; for( int clu=0; clu< _k; clu++ ) for( int col=0; col<N; col++ ) _cats[clu][col] = _isCats[col]==null ? null : new long[cs[col].vec().cardinality()]; _worst_err = 0; Chunk distances = cs[cs.length-3]; Chunk oldAssignments = cs[cs.length-2]; Chunk newAssignments = cs[cs.length-1]; // Find closest cluster center for each row double[] values = new double[N]; // Temp data to hold row as doubles for( int row = 0; row < cs[0]._len; row++ ) { double weight = _hasWeight ? cs[N].atd(row) : 1; if (weight == 0) continue; //skip holdout rows assert(weight == 1); //K-Means only works for weight 1 (or weight 0 for holdout) data(values, cs, row, _means, _mults, _modes); // Load row as doubles int cluster = (int) newAssignments.at8(row); double distance = distances.atd(row); if (cluster != oldAssignments.at8(row)) { _reassigned_count+=weight; oldAssignments.set(row, cluster); } for( int clu=0; clu< _k; clu++ ) { for( int col=0; col<N; col++ ) { if (cluster == clu) { _lo[clu][col] = Math.min(values[col], _lo[clu][col]); _hi[clu][col] = Math.max(values[col], _hi[clu][col]); } } } assert cluster != -1 : "cluster "+cluster+" is not set for row "+row; // No broken rows _cSqr[cluster] += distance; // Add values and increment counter for chosen cluster for( int col = 0; col < N; col++ ) if( _isCats[col] != null ) _cats[cluster][col][(int)values[col]]++; // Histogram the cats else _cMeans[cluster][col] += values[col]; // Sum the column centers _size[cluster]++; // Track worst row if( distance > _worst_err) { _worst_err = distance; _worst_row = cs[0].start()+row; } } // Scale back down to local mean for( int clu = 0; clu < _k; clu++ ) if( _size[clu] != 0 ) ArrayUtils.div(_cMeans[clu], _size[clu]); _centers = null; _means = _mults = null; _modes = null; } @Override public void reduce(IterationTask mr) { _reassigned_count += mr._reassigned_count; for( int clu = 0; clu < _k; clu++ ) { long ra = _size[clu]; long rb = mr._size[clu]; double[] ma = _cMeans[clu]; double[] mb = mr._cMeans[clu]; for( int c = 0; c < ma.length; c++ ) // Recursive mean if( ra+rb > 0 ) ma[c] = (ma[c] * ra + mb[c] * rb) / (ra + rb); } ArrayUtils.add(_cats, mr._cats); ArrayUtils.add(_cSqr, mr._cSqr); ArrayUtils.add(_size, mr._size); for( int clu=0; clu< _k; clu++ ) { for( int col=0; col<_lo[clu].length; col++ ) { _lo[clu][col] = Math.min(mr._lo[clu][col], _lo[clu][col]); _hi[clu][col] = Math.max(mr._hi[clu][col], _hi[clu][col]); } } // track global worst-row if( _worst_err < mr._worst_err) { _worst_err = mr._worst_err; _worst_row = mr._worst_row; } } } // A pair result: nearest cluster center and the square distance private static final class ClusterDist { int _cluster; double _dist; } private static double minSqr(double[][] centers, double[] point, String[][] isCats, ClusterDist cd) { return closest(centers, point, isCats, cd, centers.length)._dist; } private static double minSqr(double[][] centers, double[] point, String[][] isCats, ClusterDist cd, int count) { return closest(centers,point,isCats,cd,count)._dist; } private static ClusterDist closest(double[][] centers, double[] point, String[][] isCats, ClusterDist cd) { return closest(centers, point, isCats, cd, centers.length); } /** Return both nearest of N cluster center/centroids, and the square-distance. */ private static ClusterDist closest(double[][] centers, double[] point, String[][] isCats, ClusterDist cd, int count) { int min = -1; double minSqr = Double.MAX_VALUE; for( int cluster = 0; cluster < count; cluster++ ) { double sqr = hex.genmodel.GenModel.KMeans_distance(centers[cluster],point,isCats); if( sqr < minSqr ) { // Record nearest cluster min = cluster; minSqr = sqr; } } cd._cluster = min; // Record nearest cluster cd._dist = minSqr; // Record square-distance return cd; // Return for flow-coding } /** Return square-distance of point to all clusters. */ private static double[] getDistances(double[][] centers, double[] point, String[][] isCats) { double[] distances = new double[centers.length]; for( int cluster = 0; cluster < centers.length; cluster++ ) { distances[cluster] = hex.genmodel.GenModel.KMeans_distance(centers[cluster],point,isCats); } return distances; } // KMeans++ re-clustering private static double[][] recluster(double[][] points, Random rand, int N, Initialization init, String[][] isCats) { double[][] res = new double[N][]; res[0] = points[0]; int count = 1; ClusterDist cd = new ClusterDist(); switch( init ) { case Random: break; case PlusPlus: { // k-means++ while( count < res.length ) { double sum = 0; for (double[] point1 : points) sum += minSqr(res, point1, isCats, cd, count); for (double[] point : points) { if (minSqr(res, point, isCats, cd, count) >= rand.nextDouble() * sum) { res[count++] = point; break; } } } break; } case Furthest: { // Takes cluster center further from any already chosen ones while( count < res.length ) { double max = 0; int index = 0; for( int i = 0; i < points.length; i++ ) { double sqr = minSqr(res, points[i], isCats, cd, count); if( sqr > max ) { max = sqr; index = i; } } res[count++] = points[index]; } break; } default: throw H2O.fail(); } return res; } private void randomRow(Vec[] vecs, Random rand, double[] center, double[] means, double[] mults, int[] modes) { long row = Math.max(0, (long) (rand.nextDouble() * vecs[0].length()) - 1); data(center, vecs, row, means, mults, modes); } // Pick most common cat level for each cluster_centers' cat columns private static double[][] max_cats(double[][] centers, long[][][] cats, String[][] isCats) { for( int clu = 0; clu < centers.length; clu++ ) for( int col = 0; col < centers[0].length; col++ ) if( isCats[col] != null ) centers[clu][col] = ArrayUtils.maxIndex(cats[clu][col]); return centers; } private static double[][] destandardize(double[][] centers, String[][] isCats, double[] means, double[] mults) { int K = centers.length; int N = centers[0].length; double[][] value = new double[K][N]; for( int clu = 0; clu < K; clu++ ) { System.arraycopy(centers[clu],0,value[clu],0,N); if( mults!=null ) { // Reverse standardization for( int col = 0; col < N; col++) if( isCats[col] == null ) value[clu][col] = value[clu][col] / mults[col] + means[col]; } } return value; } private static void data(double[] values, Vec[] vecs, long row, double[] means, double[] mults, int[] modes) { for( int i = 0; i < values.length; i++ ) { values[i] = Kmeans_preprocessData(vecs[i].at(row), i, means, mults, modes); } } private static void data(double[] values, Chunk[] chks, int row, double[] means, double[] mults, int[] modes) { for( int i = 0; i < values.length; i++ ) { values[i] = Kmeans_preprocessData(chks[i].atd(row), i, means, mults, modes); } } /** * This helper creates a ModelMetricsClustering from a trained model * @param model, must contain valid statistics from training, such as _betweenss etc. */ private ModelMetricsClustering makeTrainingMetrics(KMeansModel model) { ModelMetricsClustering mm = new ModelMetricsClustering(model, train(), CustomMetric.EMPTY); mm._size = model._output._size; mm._withinss = model._output._withinss; mm._betweenss = model._output._betweenss; mm._totss = model._output._totss; mm._tot_withinss = model._output._tot_withinss; model.addMetrics(mm); return mm; } private static class SplitTask extends MRTask<SplitTask> { // IN double[][] _centers; double[] _means, _mults; // Standardization int[] _modes; // Imputation of missing categoricals final int _k; final String[][] _isCats; final boolean _hasWeight; final int _clusterToSplit; final int _dimToSplit; final double _splitPoint; // OUT double[][] _cMeans; // Means for each cluster long[] _size; // Number of rows in each cluster SplitTask(double[][] centers, double[] means, double[] mults, int[] modes, String[][] isCats, int k, boolean hasWeight, int clusterToSplit, int dimToSplit, double splitPoint) { _centers = centers; _means = means; _mults = mults; _modes = modes; _isCats = isCats; _k = k; _hasWeight = hasWeight; _clusterToSplit = clusterToSplit; _dimToSplit = dimToSplit; _splitPoint = splitPoint; } @Override public void map(Chunk[] cs) { int N = cs.length - (_hasWeight ? 1:0) - 1 /*clusterassignment*/; assert _centers[0].length==N; _cMeans = new double[_k][N]; _size = new long[_k]; Chunk assignment = cs[cs.length-1]; // Find closest cluster center for each row double[] values = new double[N]; // Temp data to hold row as doubles ClusterDist cd = new ClusterDist(); for( int row = 0; row < cs[0]._len; row++ ) { if (assignment.at8(row) != _clusterToSplit) continue; double weight = _hasWeight ? cs[N].atd(row) : 1; if (weight == 0) continue; //skip holdout rows assert(weight == 1); //K-Means only works for weight 1 (or weight 0 for holdout) data(values, cs, row, _means, _mults, _modes); // Load row as doubles assert (_isCats[_dimToSplit]==null); if (values[_dimToSplit] > _centers[_clusterToSplit][_dimToSplit]) { cd._cluster = _centers.length-1; assignment.set(row, cd._cluster); } else { cd._cluster = _clusterToSplit; } int clu = cd._cluster; assert clu != -1; // No broken rows // Add values and increment counter for chosen cluster for( int col = 0; col < N; col++ ) _cMeans[clu][col] += values[col]; // Sum the column centers _size[clu]++; } // Scale back down to local mean for( int clu = 0; clu < _k; clu++ ) if( _size[clu] != 0 ) ArrayUtils.div(_cMeans[clu], _size[clu]); _centers = null; _means = _mults = null; _modes = null; } @Override public void reduce(SplitTask mr) { for( int clu = 0; clu < _k; clu++ ) { long ra = _size[clu]; long rb = mr._size[clu]; double[] ma = _cMeans[clu]; double[] mb = mr._cMeans[clu]; for( int c = 0; c < ma.length; c++ ) // Recursive mean if( ra+rb > 0 ) ma[c] = (ma[c] * ra + mb[c] * rb) / (ra + rb); } ArrayUtils.add(_size, mr._size); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/kmeans/KMeansModel.java
package hex.kmeans; import hex.*; import hex.genmodel.IClusteringModel; import hex.util.EffectiveParametersUtils; import hex.util.LinearAlgebraUtils; import water.DKV; import water.Job; import water.Key; import water.MRTask; import water.codegen.CodeGenerator; import water.codegen.CodeGeneratorPipeline; import water.exceptions.JCodeSB; import water.fvec.Chunk; import water.fvec.Frame; import water.udf.CFuncRef; import water.util.ArrayUtils; import water.util.JCodeGen; import water.util.SBPrintStream; import java.util.Arrays; import static hex.genmodel.GenModel.Kmeans_preprocessData; public class KMeansModel extends ClusteringModel<KMeansModel,KMeansModel.KMeansParameters,KMeansModel.KMeansOutput> { @Override public ToEigenVec getToEigenVec() { return LinearAlgebraUtils.toEigen; } public static class KMeansParameters extends ClusteringModel.ClusteringParameters { public String algoName() { return "KMeans"; } public String fullName() { return "K-means"; } public String javaName() { return KMeansModel.class.getName(); } @Override public long progressUnits() { return _estimate_k ? _k : _max_iterations; } public int _max_iterations = 10; // Max iterations for Lloyds public boolean _standardize = true; // Standardize columns public KMeans.Initialization _init = KMeans.Initialization.Furthest; public Key<Frame> _user_points; public boolean _pred_indicator = false; // For internal use only: generate indicator cols during prediction // Ex: k = 4, cluster = 3 -> [0, 0, 1, 0] public boolean _estimate_k = false; // If enabled, iteratively find up to _k clusters public int[] _cluster_size_constraints = null; } public static class KMeansOutput extends ClusteringModel.ClusteringOutput { // Iterations executed public int _iterations; // Sum squared distance between each point and its cluster center. public double[/*k*/] _withinss; // Within-cluster sum of square error // Sum squared distance between each point and its cluster center. public double _tot_withinss; // Within-cluster sum-of-square error public double[/*iterations*/] _history_withinss = new double[]{Double.NaN}; // Sum squared distance between each point and grand mean. public double _totss; // Total sum-of-square error to grand mean centroid // Sum squared distance between each cluster center and grand mean, divided by total number of observations. public double _betweenss; // Total between-cluster sum-of-square error (totss - tot_withinss) // Number of categorical columns trained on public int _categorical_column_count; // Training time public long[/*iterations*/] _training_time_ms = new long[]{System.currentTimeMillis()}; public double[/*iterations*/] _reassigned_count = new double[]{Double.NaN}; public int[/*iterations*/] _k = new int[]{0}; public KMeansOutput( KMeans b ) { super(b); } } public KMeansModel(Key selfKey, KMeansParameters parms, KMeansOutput output) { super(selfKey,parms,output); } @Override public void initActualParamValues() { super.initActualParamValues(); EffectiveParametersUtils.initFoldAssignment(_parms); EffectiveParametersUtils.initCategoricalEncoding(_parms, Model.Parameters.CategoricalEncodingScheme.Enum); } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { assert domain == null; return new ModelMetricsClustering.MetricBuilderClustering(_output.nfeatures(),_output._k[_output._k.length-1]); } @Override protected PredictScoreResult predictScoreImpl(Frame orig, Frame adaptedFr, String destination_key, final Job j, boolean computeMetrics, CFuncRef customMetricFunc) { if (!_parms._pred_indicator) { return super.predictScoreImpl(orig, adaptedFr, destination_key, j, computeMetrics, customMetricFunc); } else { final int len = _output._k[_output._k.length-1]; String prefix = "cluster_"; Frame adaptFrm = new Frame(adaptedFr); for(int c = 0; c < len; c++) adaptFrm.add(prefix + Double.toString(c+1), adaptFrm.anyVec().makeZero()); new MRTask() { @Override public void map( Chunk chks[] ) { if (isCancelled() || j != null && j.stop_requested()) return; double tmp [] = new double[_output._names.length]; double preds[] = new double[len]; for(int row = 0; row < chks[0]._len; row++) { Arrays.fill(preds,0); double p[] = score_indicator(chks, row, tmp, preds); for(int c = 0; c < preds.length; c++) chks[_output._names.length + c].set(row, p[c]); } if (j != null) j.update(1); } }.doAll(adaptFrm); // Return the predicted columns int x = _output._names.length, y = adaptFrm.numCols(); Frame f = adaptFrm.extractFrame(x, y); // this will call vec_impl() and we cannot call the delete() below just yet f = new Frame(Key.<Frame>make(destination_key), f.names(), f.vecs()); DKV.put(f); ModelMetrics.MetricBuilder<?> mb = makeMetricBuilder(null); return new PredictScoreResult(mb, f, f); } } public double[] score_indicator(Chunk[] chks, int row_in_chunk, double[] tmp, double[] preds) { assert _parms._pred_indicator; assert tmp.length == _output._names.length && preds.length == _output._centers_raw.length; for(int i = 0; i < tmp.length; i++) tmp[i] = chks[i].atd(row_in_chunk); double[] clus = new double[1]; score0(tmp, clus); // this saves cluster number into clus[0] assert preds != null && ArrayUtils.l2norm2(preds) == 0 : "preds must be a vector of all zeros, got " + Arrays.toString(preds); assert clus[0] >= 0 && clus[0] < preds.length : "Cluster number must be an integer in [0," + String.valueOf(preds.length) + ")"; preds[(int)clus[0]] = 1; return preds; } public double[] score_ratio(Chunk[] chks, int row_in_chunk, double[] tmp) { assert _parms._pred_indicator; assert tmp.length == _output._names.length; for(int i = 0; i < tmp.length; i++) tmp[i] = chks[i].atd(row_in_chunk); double[][] centers = _parms._standardize ? _output._centers_std_raw : _output._centers_raw; double[] preds = hex.genmodel.GenModel.KMeans_simplex(centers,tmp,_output._domains); assert preds.length == _output._k[_output._k.length-1]; assert Math.abs(ArrayUtils.sum(preds) - 1) < 1e-6 : "Sum of k-means distance ratios should equal 1"; return preds; } @Override protected double[] score0(double[] data, double[] preds, double offset) { return score0(data, preds); } @Override protected double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/]) { double[][] centers = _parms._standardize ? _output._centers_std_raw : _output._centers_raw; Kmeans_preprocessData(data, _output._normSub, _output._normMul, _output._mode); preds[0] = hex.genmodel.GenModel.KMeans_closest(centers,data,_output._domains); return preds; } @Override protected double data(Chunk[] chks, int row, int col){ return Kmeans_preprocessData(chks[col].atd(row),col,_output._normSub,_output._normMul,_output._mode); } @Override protected Class<?>[] getPojoInterfaces() { return new Class<?>[]{IClusteringModel.class}; } // Override in subclasses to provide some top-level model-specific goodness @Override protected void toJavaPredictBody(SBPrintStream body, CodeGeneratorPipeline classCtx, CodeGeneratorPipeline fileCtx, final boolean verboseCode) { // This is model name final String mname = JCodeGen.toJavaId(_key.toString()); if(_parms._standardize) { fileCtx.add(new CodeGenerator() { @Override public void generate(JCodeSB out) { JCodeGen.toClassWithArray(out, null, mname + "_MEANS", _output._normSub, "Column means of training data"); JCodeGen.toClassWithArray(out, null, mname + "_MULTS", _output._normMul, "Reciprocal of column standard deviations of training data"); JCodeGen.toClassWithArray(out, null, mname + "_MODES", _output._mode, "Mode for categorical columns"); JCodeGen.toClassWithArray(out, null, mname + "_CENTERS", _output._centers_std_raw, "Normalized cluster centers[K][features]"); } }); // Predict function body: Standardize data first body.ip("Kmeans_preprocessData(data,") .pj(mname + "_MEANS", "VALUES,") .pj(mname + "_MULTS", "VALUES,") .pj(mname + "_MODES", "VALUES") .p(");").nl(); // Predict function body: main work function is a utility in GenModel class. body.ip("preds[0] = KMeans_closest(") .pj(mname + "_CENTERS", "VALUES") .p(", data, DOMAINS); ").nl(); // at function level } else { fileCtx.add(new CodeGenerator() { @Override public void generate(JCodeSB out) { JCodeGen.toClassWithArray(out, null, mname + "_CENTERS", _output._centers_raw, "Denormalized cluster centers[K][features]"); } }); // Predict function body: main work function is a utility in GenModel class. body.ip("preds[0] = KMeans_closest(") .pj(mname + "_CENTERS", "VALUES") .p(",data, DOMAINS);").nl(); // at function level } } @Override protected SBPrintStream toJavaTransform(SBPrintStream ccsb, CodeGeneratorPipeline fileCtx, boolean verboseCode) { // ccsb = classContext ccsb.nl(); ccsb.ip("// Pass in data in a double[], in a same way as to the score0 function.").nl(); ccsb.ip("// Cluster distances will be stored into the distances[] array. Function").nl(); ccsb.ip("// will return the closest cluster. This way the caller can avoid to call").nl(); ccsb.ip("// score0(..) to retrieve the cluster where the data point belongs.").nl(); ccsb.ip("public final int distances( double[] data, double[] distances ) {").nl(); toJavaDistancesBody(ccsb.ii(1)); ccsb.ip("return cluster;").nl(); ccsb.di(1).ip("}").nl(); ccsb.nl(); ccsb.ip("// Returns number of cluster used by this model.").nl(); ccsb.ip("public final int getNumClusters() {").nl(); toJavaGetNumClustersBody(ccsb.ii(1)); ccsb.ip("return nclusters;").nl(); ccsb.di(1).ip("}").nl(); // Output class context CodeGeneratorPipeline classCtx = new CodeGeneratorPipeline(); //new SB().ii(1); classCtx.generate(ccsb.ii(1)); ccsb.di(1); return ccsb; } private void toJavaDistancesBody(SBPrintStream body) { // This is model name final String mname = JCodeGen.toJavaId(_key.toString()); if(_parms._standardize) { // Distances function body: Standardize data first body.ip("Kmeans_preprocessData(data,") .pj(mname + "_MEANS", "VALUES,") .pj(mname + "_MULTS", "VALUES,") .pj(mname + "_MODES", "VALUES") .p(");").nl(); // Distances function body: main work function is a utility in GenModel class. body.ip("int cluster = KMeans_distances(") .pj(mname + "_CENTERS", "VALUES") .p(", data, DOMAINS, distances); ").nl(); // at function level } else { // Distances function body: main work function is a utility in GenModel class. body.ip("int cluster = KMeans_distances(") .pj(mname + "_CENTERS", "VALUES") .p(",data, DOMAINS, distances);").nl(); // at function level } } private void toJavaGetNumClustersBody(SBPrintStream body) { // This is model name final String mname = JCodeGen.toJavaId(_key.toString()); body.ip("int nclusters = ").pj(mname + "_CENTERS", "VALUES").p(".length;").nl(); } @Override protected boolean toJavaCheckTooBig() { return _parms._standardize ? _output._centers_std_raw.length * _output._centers_std_raw[0].length > 1e6 : _output._centers_raw.length * _output._centers_raw[0].length > 1e6; } @Override public KMeansMojoWriter getMojo() { return new KMeansMojoWriter(this); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/kmeans/KMeansMojoWriter.java
package hex.kmeans; import hex.ModelMojoWriter; import java.io.IOException; public class KMeansMojoWriter extends ModelMojoWriter<KMeansModel, KMeansModel.KMeansParameters, KMeansModel.KMeansOutput> { @SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler public KMeansMojoWriter() {} public KMeansMojoWriter(KMeansModel model) { super(model); } @Override public String mojoVersion() { return "1.00"; } @Override protected void writeModelData() throws IOException { writekv("standardize", model._parms._standardize); double[][] centers; if (model._parms._standardize) { writekv("standardize_means", model._output._normSub); writekv("standardize_mults", model._output._normMul); writekv("standardize_modes", model._output._mode); centers = model._output._centers_std_raw; } else centers = model._output._centers_raw; writekv("center_num", centers.length); for (int i = 0; i < centers.length; i++) writekv("center_" + i, centers[i]); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/kmeans/KMeansSimplexSolver.java
package hex.kmeans; import water.Iced; import water.MRTask; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import water.util.ArrayUtils; import java.util.ArrayList; import java.util.Collections; /** * Polynomial implementation in average, exponential in the worst case - slow performance. * Calculate Minimal Cost Flow problem using simplex method with go through spanning tree. * The sum of constraints are smaller the time is faster - it uses MCF until all constraints are satisfied then use standard K-means. */ class KMeansSimplexSolver { public Frame _weights; // input data + weight column + calculated distances from all points to all centres + edge indices + columns to store result of cluster assignments public double _sumWeights; // calculated sum of all weights to calculate maximal capacity value public boolean _hasWeightsColumn; // weight column existence flag public long _numberOfNonZeroWeightPoints; //if weights columns is set, how many rows has non zero weight public int _constraintsLength; public long _numberOfPoints; public long _edgeSize; public long _nodeSize; public long _resultSize; // Input graph to store K-means configuration public Vec.Reader _demandsReader; // store demand of all nodes (-1 for data points, constraints values for constraints nodes, ) public Vec.Reader _capacitiesReader; // store capacities of all edges + edges from all node to leader node public double _maxAbsDemand; // maximal absolute demand to calculate maximal capacity value // Spanning tree to calculate min cost flow public SpanningTree tree; /** * Construct K-means simplex solver. * @param constrains array of constraints * @param weights input data + weight column + calculated distances from all points to all centres + edge indices + columns to store result of cluster assignments * @param sumDistances calculated sum of all weights to calculate maximal capacity value * @param hasWeights weight column existence flag * @param numberOfNonZeroWeightPoints if weights columns is set, how many rows has non zero weight */ public KMeansSimplexSolver(int[] constrains, Frame weights, double sumDistances, boolean hasWeights, long numberOfNonZeroWeightPoints) { this._numberOfPoints = weights.numRows(); this._nodeSize = this._numberOfPoints + constrains.length + 1; this._edgeSize = _numberOfPoints * constrains.length + constrains.length; this._constraintsLength = constrains.length; Vec demands = Vec.makeCon(0, _nodeSize, Vec.T_NUM); Vec capacities = Vec.makeCon(0, _edgeSize + _nodeSize, Vec.T_NUM); this._resultSize = this._numberOfPoints * _constraintsLength; this._hasWeightsColumn = hasWeights; this._numberOfNonZeroWeightPoints = numberOfNonZeroWeightPoints; this._weights = weights; this._sumWeights = sumDistances; long constraintsSum = 0; _maxAbsDemand = Double.MIN_VALUE; Vec.Writer demandWriter = demands.open(); for (long i = 0; i < _nodeSize; i++) { if (i < _numberOfPoints) { demandWriter.set(i, -1); } else { long tmpDemand; if (i < _nodeSize - 1) { tmpDemand = constrains[(int)(i - _numberOfPoints)]; constraintsSum += constrains[(int)(i - _numberOfPoints)]; } else { tmpDemand = _numberOfNonZeroWeightPoints - constraintsSum; } demandWriter.set(i, tmpDemand); if (Math.abs(tmpDemand) > _maxAbsDemand) { _maxAbsDemand = Math.abs(tmpDemand); } } } demandWriter.close(); int edgeIndexStart = _weights.numCols() - 3 - _constraintsLength; long edgeIndex = 0; for (long i = 0; i < _weights.numRows(); i++) { for(int j=0; j < _constraintsLength; j++){ _weights.vec(edgeIndexStart + j).set(i, edgeIndex++); } } Vec.Writer capacitiesWriter = capacities.open(); // Initialize graph and spanning tree. // always start with infinity _capacities for (long i = 0; i < _edgeSize; i++) { capacitiesWriter.set(i, Long.MAX_VALUE); } // find maximum value for capacity double maxCapacity = 3 * (_sumWeights > _maxAbsDemand ? _sumWeights : _maxAbsDemand); // fill max capacity from the leader node to all others _nodes for (long i = 0; i < _nodeSize; i++) { capacitiesWriter.set(i + _edgeSize, maxCapacity); } capacitiesWriter.close(); this._capacitiesReader = capacities.new Reader(); //this._additiveWeightsReader = additiveWeights.new Reader(); this._demandsReader = demands.new Reader(); this.tree = new SpanningTree(_nodeSize, _edgeSize, _constraintsLength); tree.init(_numberOfPoints, maxCapacity, demands); } /** * Get weight base on edge index from weights data or from additive weights. * @param edgeIndex * @return weight by edge index */ public double getWeight(long edgeIndex) { long numberOfFrameWeights = this._numberOfPoints * this._constraintsLength; if (edgeIndex < numberOfFrameWeights) { int i = _weights.numCols() - 2 * _constraintsLength - 3 + (int)(edgeIndex % _constraintsLength); long j = Math.round(edgeIndex / _constraintsLength); return _weights.vec(i).at(j); } return 0; } /** * Get weight base on edge index from weights data or from additive weights. * @param edgeIndex * @return true if the weight at edge index is not zero */ public boolean isNonZeroWeight(long edgeIndex) { if(_hasWeightsColumn) { long numberOfFrameWeights = this._numberOfPoints * this._constraintsLength; if (edgeIndex < numberOfFrameWeights) { long i = Math.round(edgeIndex / _constraintsLength); int j = _weights.numCols() - 1 - 2 * _constraintsLength - 3; return _weights.vec(j).at8(i) == 1; } } return true; } /** * Find edge which has the minimal reduced weight. * @return edge index */ public long findMinimalReducedWeight() { FindMinimalWeightTask t = new FindMinimalWeightTask(tree, _hasWeightsColumn, _constraintsLength).doAll(_weights); double minimalWeight = t.minimalWeight; long minimalIndex = t.minimalIndex; long additiveEdgesIndexStart = _weights.vec(0).length() * _constraintsLength; // Iterate over number of constraints, it is size K, MR task is not optimal here for(long i = additiveEdgesIndexStart; i < _edgeSize; i++){ double tmpWeight = tree.reduceWeight(i, getWeight(i)); boolean countValue = !_hasWeightsColumn || isNonZeroWeight(i); if (countValue && tmpWeight < minimalWeight) { minimalWeight = tmpWeight; minimalIndex = i; } } return minimalIndex; } /** * Find next optimal entering edge to find cycle. * @return index of the edge */ public Edge findNextEnteringEdge() { // Check if continue if(!tree.areConstraintsSatisfied()) { long minimalIndex = findMinimalReducedWeight(); if (tree.getFlowByEdgeIndex(minimalIndex) == 0) { return new Edge(minimalIndex, tree._sources.at8(minimalIndex), tree._targets.at8(minimalIndex)); } else { return new Edge(minimalIndex, tree._targets.at8(minimalIndex), tree._sources.at8(minimalIndex)); } } // if all constraints are satisfied, return null return null; } /** * Find cycle from the edge defined by source and target nodes to leader node and back. * @param edgeIndex * @param sourceIndex source node index * @param targetIndex target node index * @return cycle in spanning tree */ public NodesEdgesObject getCycle(long edgeIndex, long sourceIndex, long targetIndex) { long ancestor = tree.findAncestor(sourceIndex, targetIndex); NodesEdgesObject resultPath = tree.getPath(sourceIndex, ancestor); resultPath.reverseNodes(); resultPath.reverseEdges(); if (resultPath.edgeSize() != 1 || resultPath.getEdge(0) != edgeIndex) { resultPath.addEdge(edgeIndex); } NodesEdgesObject resultPathBack = tree.getPath(targetIndex, ancestor); resultPathBack.removeLastNode(); resultPath.addAllNodes(resultPathBack.getNodes()); resultPath.addAllEdges(resultPathBack.getEdges()); return resultPath; } /** * Find the leaving edge with minimal residual capacity. * @param cycle input cycle of edges and nodes to determine leaving edge * @return the edge with minimal residual capacity */ public Edge getLeavingEdge(NodesEdgesObject cycle) { cycle.reverseNodes(); cycle.reverseEdges(); double minResidualCapacity = Double.MAX_VALUE; int minIndex = -1; for (int i = 0; i < cycle.edgeSize(); i++) { double tmpResidualCapacity = tree.getResidualCapacity(cycle.getEdge(i), cycle.getNode(i), _capacitiesReader.at(cycle.getEdge(i))); boolean countValue = !_hasWeightsColumn || isNonZeroWeight(cycle.getEdge(i)); if (countValue && tmpResidualCapacity < minResidualCapacity) { minResidualCapacity = tmpResidualCapacity; minIndex = i; } } assert minIndex != -1; long nodeIndex = cycle.getNode(minIndex); long edgeIndex = cycle.getEdge(minIndex); return new Edge(edgeIndex, nodeIndex, nodeIndex == tree._sources.at8(edgeIndex) ? tree._targets.at8(edgeIndex) : tree._sources.at8(edgeIndex)); } /** * Calculation minimal cost flow using pivot loop and spanning tree: * - Loop over all entering edges to find minimal cost flow in spanning tree. * - When edge is find edit spanning tree. * - If constraints are satisfied or no edge is found, stop. */ public void calculateMinimalCostFlow() { Edge edge = findNextEnteringEdge(); while (edge != null) { long enteringEdgeIndex = edge.getEdgeIndex(); long enteringEdgeSourceIndex = edge.getSourceIndex(); long enteringEdgeTargetIndex = edge.getTargetIndex(); NodesEdgesObject cycle = getCycle(enteringEdgeIndex, enteringEdgeSourceIndex, enteringEdgeTargetIndex); Edge leavingEdge = getLeavingEdge(cycle); long leavingEdgeIndex = leavingEdge.getEdgeIndex(); long leavingEdgeSourceIndex = leavingEdge.getSourceIndex(); long leavingEdgeTargetIndex = leavingEdge.getTargetIndex(); double residualCap = tree.getResidualCapacity(leavingEdgeIndex, leavingEdgeSourceIndex, _capacitiesReader.at(leavingEdgeIndex)); if(residualCap != 0) { tree.augmentFlow(cycle, residualCap); } if (enteringEdgeIndex != leavingEdgeIndex) { if (leavingEdgeSourceIndex != tree._parents.at8(leavingEdgeTargetIndex)) { long tmpS = leavingEdgeSourceIndex; leavingEdgeSourceIndex = leavingEdgeTargetIndex; leavingEdgeTargetIndex = tmpS; } if (cycle.indexOfEdge(enteringEdgeIndex) < cycle.indexOfEdge(leavingEdgeIndex)) { long tmpP = enteringEdgeSourceIndex; enteringEdgeSourceIndex = enteringEdgeTargetIndex; enteringEdgeTargetIndex = tmpP; } tree.removeParentEdge(leavingEdgeSourceIndex, leavingEdgeTargetIndex); tree.makeRoot(enteringEdgeTargetIndex); tree.addEdge(enteringEdgeIndex, enteringEdgeSourceIndex, enteringEdgeTargetIndex); tree.updatePotentials(enteringEdgeIndex, enteringEdgeSourceIndex, enteringEdgeTargetIndex, getWeight(enteringEdgeIndex)); } edge = findNextEnteringEdge(); } } public void checkConstraintsCondition(int[] numberOfPointsInCluster){ for(int i = 0; i<_constraintsLength; i++){ assert numberOfPointsInCluster[i] >= _demandsReader.at8(_numberOfPoints+i) : String.format("Cluster %d has %d assigned points however should has assigned at least %d points.", i+1, numberOfPointsInCluster[i], _demandsReader.at8(_numberOfPoints+i)); } } /** * Calculate minimal cost flow and based on flow assign cluster to all data points. * @return input data with new cluster assignments */ public Frame assignClusters() { // run minimal cost flow calculation calculateMinimalCostFlow(); // add flow columns to assign clusters _weights = _weights.add(new Frame(tree._edgeFlowDataPoints)); int dataStopLength = _weights.numCols() - (_hasWeightsColumn ? 1 : 0) - 3 * _constraintsLength - 3; // assign cluster based on calculated flow AssignClusterTask task = new AssignClusterTask(_constraintsLength, _hasWeightsColumn, _weights.numCols()); task.doAll(_weights); // check constraints are satisfied checkConstraintsCondition(task._numberOfPointsInCluster); // remove distances columns + edge indices columns for(int i = 0; i < 2 * _constraintsLength; i++) { _weights.remove(dataStopLength+(_hasWeightsColumn ? 1 : 0)); } // remove flow columns for(int i = 0; i < _constraintsLength; i++) { _weights.remove(_weights.numCols()-1); } return _weights; } } /** * Class to store structures for calculation of flow for minimal cost flow problem. */ class SpanningTree extends Iced<SpanningTree> { public long _nodeSize; public long _edgeSize; public int _secondLayerSize; public long _dataPointSize; public Vec[] _edgeFlowDataPoints; // [constraints size] nodeSize - secondLayerSize - 1 (number of data) public Vec _edgeFlowRest; // secondLayerSize size + node size public Vec _nodePotentials; // node size, long public Vec _parents; // node size + 1, integer public Vec _parentEdges; // node size + 1, integer public Vec _subtreeSize; // node size + 1, integer public Vec _nextDepthFirst; // node size + 1, integer public Vec _previousNodes; // node size + 1, integer public Vec _lastDescendants; // node size + 1, integer public Vec _sources; // edge size + node size public Vec _targets; // edge size + node size SpanningTree(long nodeSize, long edgeSize, int secondLayerSize){ this._nodeSize = nodeSize; this._edgeSize = edgeSize; this._secondLayerSize = secondLayerSize; this._dataPointSize = nodeSize - secondLayerSize - 1; this._edgeFlowDataPoints = new Vec[secondLayerSize]; for(int i=0; i < secondLayerSize; i++){ this._edgeFlowDataPoints[i] = Vec.makeCon(0, _dataPointSize, Vec.T_NUM); } this._edgeFlowRest = Vec.makeCon(0, secondLayerSize + nodeSize, Vec.T_NUM); this._nodePotentials = Vec.makeCon(0, nodeSize, Vec.T_NUM); this._parents = Vec.makeCon(0, nodeSize+1, Vec.T_NUM); this._parentEdges = Vec.makeCon(0, nodeSize+1, Vec.T_NUM); this._subtreeSize = Vec.makeCon(1, nodeSize+1, Vec.T_NUM); this._nextDepthFirst = Vec.makeCon(0, nodeSize+1, Vec.T_NUM); this._previousNodes = Vec.makeCon(0, nodeSize+1, Vec.T_NUM); this._lastDescendants = Vec.makeCon(0, nodeSize+1, Vec.T_NUM); } public void init(long numberOfPoints, double maxCapacity, Vec demands){ _sources = Vec.makeCon(0, _edgeSize + _nodeSize, Vec.T_NUM); _targets = Vec.makeCon(0, _edgeSize + _nodeSize, Vec.T_NUM); for (long i = 0; i < _nodeSize; i++) { if (i < numberOfPoints) { for (int j = 0; j < _secondLayerSize; j++) { _sources.set(i * _secondLayerSize + j, i); _targets.set(i * _secondLayerSize + j, numberOfPoints + j); } } else { if (i < _nodeSize - 1) { _sources.set(numberOfPoints* _secondLayerSize +i-numberOfPoints, i); _targets.set(numberOfPoints* _secondLayerSize +i-numberOfPoints, _nodeSize - 1); } } } for (long i = 0; i < _nodeSize; i++) { long demand = demands.at8(i); if (demand >= 0) { _sources.set(_edgeSize + i, _nodeSize); _targets.set(_edgeSize + i, i); } else { _sources.set(_edgeSize + i, i); _targets.set(_edgeSize + i, _nodeSize); } if (i < _nodeSize - 1) { _nextDepthFirst.set(i, i + 1); } _edgeFlowRest.set(_secondLayerSize+i, Math.abs(demand)); _nodePotentials.set(i, demand < 0 ? maxCapacity : -maxCapacity); _parents.set(i, _nodeSize); _parentEdges.set(i, i + _edgeSize); _previousNodes.set(i, i - 1); _lastDescendants.set(i, i); } _parents.set(_nodeSize, -1); _subtreeSize.set(_nodeSize, _nodeSize + 1); _nextDepthFirst.set(_nodeSize - 1, _nodeSize); _previousNodes.set(0, _nodeSize); _previousNodes.set(_nodeSize, _nodeSize - 1); _lastDescendants.set(_nodeSize, _nodeSize - 1); } /** * Check if the constraints are satisfied. * If yes, the algorithm can continue as standard K-means and save time. Useful when constraints are small numbers * @return true if the constraints are satisfied */ public boolean areConstraintsSatisfied() { Vec.Reader flowReader = _edgeFlowRest.new Reader(); long length = flowReader.length(); for(long i = 2; i < _secondLayerSize + 2; i++) { if(flowReader.at8(length - i) > 0) { return false; } } return true; } public long findAncestor(long sourceIndex, long targetIndex) { long subtreeSizeSource = _subtreeSize.at8(sourceIndex); long subtreeSizeTarget = _subtreeSize.at8(targetIndex); while (true) { while (subtreeSizeSource < subtreeSizeTarget) { sourceIndex = _parents.at8(sourceIndex); subtreeSizeSource = _subtreeSize.at8(sourceIndex); } while (subtreeSizeSource > subtreeSizeTarget) { targetIndex = _parents.at8(targetIndex); subtreeSizeTarget = _subtreeSize.at8(targetIndex); } if (subtreeSizeSource == subtreeSizeTarget) { if (sourceIndex !=targetIndex) { sourceIndex = _parents.at8(sourceIndex); subtreeSizeSource = _subtreeSize.at8(sourceIndex); targetIndex = _parents.at8(targetIndex); subtreeSizeTarget = _subtreeSize.at8(targetIndex); } else { return sourceIndex; } } } } public long getFlowByEdgeIndex(long edgeIndex){ if(edgeIndex < _dataPointSize * _secondLayerSize) { int i = (int)(edgeIndex % _secondLayerSize); long j = Math.round(edgeIndex / _secondLayerSize); return _edgeFlowDataPoints[i].at8(j); } else { return _edgeFlowRest.at8(edgeIndex-_dataPointSize * _secondLayerSize); } } public void setFlowByEdgeIndex(long edgeIndex, long value){ if(edgeIndex < _dataPointSize * _secondLayerSize) { int i = (int)(edgeIndex % _secondLayerSize); long j = Math.round(edgeIndex / _secondLayerSize); _edgeFlowDataPoints[i].set(j, value); } else { _edgeFlowRest.set(edgeIndex - _dataPointSize * _secondLayerSize, value); } } public double reduceWeight(long edgeIndex, double weight) { double newWeight = weight - _nodePotentials.at(_sources.at8(edgeIndex)) + _nodePotentials.at(_targets.at8(edgeIndex)); return getFlowByEdgeIndex(edgeIndex) == 0 ? newWeight : - newWeight; } public NodesEdgesObject getPath(long node, long ancestor) { NodesEdgesObject result = new NodesEdgesObject(); result.addNode(node); while (node != ancestor) { result.addEdge(_parentEdges.at8(node)); node = _parents.at8(node); result.addNode(node); } return result; } public double getResidualCapacity(long edgeIndex, long nodeIndex, double capacity) { long flow = getFlowByEdgeIndex(edgeIndex); return nodeIndex == _sources.at8(edgeIndex) ? capacity - flow : flow; } public void augmentFlow(NodesEdgesObject nodesEdges, double flow) { for (int i = 0; i < nodesEdges.edgeSize(); i++) { long edge = nodesEdges.getEdge(i); long node = nodesEdges.getNode(i); long edgeFlow = getFlowByEdgeIndex(edge); if (node == _sources.at8(edge)) { setFlowByEdgeIndex(edge, edgeFlow + (int)flow); } else { setFlowByEdgeIndex(edge, edgeFlow - (int)flow); } } } public void removeParentEdge(long sourceIndex, long targetIndex) { long subtreeSizeTarget = _subtreeSize.at8(targetIndex); long previousTargetIndex = _previousNodes.at8(targetIndex); long lastTargetIndex = _lastDescendants.at8(targetIndex); long nextTargetIndex = _nextDepthFirst.at8(lastTargetIndex); _parents.set(targetIndex, -1); _parentEdges.set(targetIndex, -1); _nextDepthFirst.set(previousTargetIndex, nextTargetIndex); _previousNodes.set(nextTargetIndex, previousTargetIndex); _nextDepthFirst.set(lastTargetIndex, targetIndex); _previousNodes.set(targetIndex, lastTargetIndex); while (sourceIndex != -1) { _subtreeSize.set(sourceIndex, _subtreeSize.at8(sourceIndex) - subtreeSizeTarget); if (lastTargetIndex == _lastDescendants.at8(sourceIndex)) { _lastDescendants.set(sourceIndex, previousTargetIndex); } sourceIndex = _parents.at8(sourceIndex); } } public void makeRoot(long nodeIndex) { ArrayList<Long> ancestors = new ArrayList<>(); while (nodeIndex != -1) { ancestors.add(nodeIndex); nodeIndex = _parents.at8(nodeIndex); } Collections.reverse(ancestors); for (int i = 0; i < ancestors.size() - 1; i++) { long sourceIndex = ancestors.get(i); long targetIndex = ancestors.get(i + 1); long subtreeSizeSource = _subtreeSize.at8(sourceIndex); long lastSourceIndex = _lastDescendants.at8(sourceIndex); long prevTargetIndex = _previousNodes.at8(targetIndex); long lastTargetIndex = _lastDescendants.at8(targetIndex); long nextTargetIndex = _nextDepthFirst.at8(lastTargetIndex); _parents.set(sourceIndex, targetIndex); _parents.set(targetIndex, -1); _parentEdges.set(sourceIndex, _parentEdges.at8(targetIndex)); _parentEdges.set(targetIndex, -1); _subtreeSize.set(sourceIndex, subtreeSizeSource - _subtreeSize.at8(targetIndex)); _subtreeSize.set(targetIndex, subtreeSizeSource); _nextDepthFirst.set(prevTargetIndex, nextTargetIndex); _previousNodes.set(nextTargetIndex, prevTargetIndex); _nextDepthFirst.set(lastTargetIndex, targetIndex); _previousNodes.set(targetIndex, lastTargetIndex); if (lastSourceIndex == lastTargetIndex) { _lastDescendants.set(sourceIndex, prevTargetIndex); lastSourceIndex = prevTargetIndex; } _previousNodes.set(sourceIndex, lastTargetIndex); _nextDepthFirst.set(lastTargetIndex, sourceIndex); _nextDepthFirst.set(lastSourceIndex, targetIndex); _previousNodes.set(targetIndex, lastSourceIndex); _lastDescendants.set(targetIndex, lastSourceIndex); } } public void addEdge(long edgeIndex, long sourceIndex, long targetIndex) { long lastSourceIndex = _lastDescendants.at8(sourceIndex); long nextSourceIndex = _nextDepthFirst.at8(lastSourceIndex); long subtreeSizeTarget = _subtreeSize.at8(targetIndex); long lastTargetIndex = _lastDescendants.at8(targetIndex); _parents.set(targetIndex, sourceIndex); _parentEdges.set(targetIndex, edgeIndex); _nextDepthFirst.set(lastSourceIndex, targetIndex); _previousNodes.set(targetIndex, lastSourceIndex); _previousNodes.set(nextSourceIndex, lastTargetIndex); _nextDepthFirst.set(lastTargetIndex, nextSourceIndex); while (sourceIndex != -1) { _subtreeSize.set(sourceIndex, _subtreeSize.at8(sourceIndex) + subtreeSizeTarget); if (lastSourceIndex == _lastDescendants.at8(sourceIndex)) { _lastDescendants.set(sourceIndex, lastTargetIndex); } sourceIndex = _parents.at8(sourceIndex); } } public void updatePotentials(long edgeIndex, long sourceIndex, long targetIndex, double weight) { double potential; if (targetIndex == _targets.at8(edgeIndex)) { potential = _nodePotentials.at(sourceIndex) - weight - _nodePotentials.at(targetIndex); } else { potential = _nodePotentials.at(sourceIndex) + weight - _nodePotentials.at(targetIndex); } _nodePotentials.set(targetIndex, _nodePotentials.at(targetIndex) + potential); long last = _lastDescendants.at8(targetIndex); while (targetIndex != last) { targetIndex = _nextDepthFirst.at8(targetIndex); _nodePotentials.set(targetIndex, _nodePotentials.at(targetIndex) + potential); } } } /** * Helper class to store edges in Spanning tree net */ class Edge { private long _edgeIndex; private long _sourceIndex; private long _targetIndex; public Edge(long edgeIndex, long sourceIndex, long targetIndex) { this._edgeIndex = edgeIndex; this._sourceIndex = sourceIndex; this._targetIndex = targetIndex; } public long getEdgeIndex() { return _edgeIndex; } public long getSourceIndex() { return _sourceIndex; } public long getTargetIndex() { return _targetIndex; } @Override public String toString() { return _edgeIndex+" "+_sourceIndex+" "+_targetIndex; } } /** * Helper class to store edges and nodes of one cycle in Spanning tree net */ class NodesEdgesObject { private ArrayList<Long> _nodes; private ArrayList<Long> _edges; public NodesEdgesObject() { this._nodes = new ArrayList<>(); this._edges = new ArrayList<>(); } public void addNode(long node){ _nodes.add(node); } public void removeLastNode(){ _nodes.remove(_nodes.size()-1); } public long getNode(int index){ return _nodes.get(index); } public ArrayList<Long> getNodes() { return _nodes; } public void addEdge(long edge){ _edges.add(edge); } public long getEdge(int index){ return _edges.get(index); } public ArrayList<Long> getEdges() { return _edges; } public int edgeSize(){ return _edges.size(); } public int indexOfEdge(long value){ return _edges.indexOf(value); } public void reverseNodes(){ Collections.reverse(_nodes); } public void reverseEdges(){ Collections.reverse(_edges); } public void addAllNodes(ArrayList<Long> newNodes){ _nodes.addAll(newNodes); } public void addAllEdges(ArrayList<Long> newEdges){ _edges.addAll(newEdges); } @Override public String toString() { StringBuilder sb = new StringBuilder("NEO: nodes: "); for (long i: _nodes) { sb.append(i+" "); } sb.append("edges: "); for (long i: _edges) { sb.append(i+" "); } sb.deleteCharAt(sb.length()-1); return sb.toString(); } } /** * Map Reduce task to find minimal reduced weight (distance). */ class FindMinimalWeightTask extends MRTask<FindMinimalWeightTask> { // IN private SpanningTree _tree; private boolean _hasWeightsColumn; private int _constraintsLength; //OUT double minimalWeight = Double.MAX_VALUE; long minimalIndex = -1; FindMinimalWeightTask(SpanningTree tree, boolean hasWeightsColumn, int constraintsLength) { _tree = tree; _hasWeightsColumn = hasWeightsColumn; _constraintsLength = constraintsLength; } @Override public void map(Chunk[] cs) { int startDistancesIndex = cs.length - 2 * _constraintsLength - 3; int startEdgeIndex = cs.length - 3 - _constraintsLength; for (int i = 0; i < cs[0]._len; i++) { for (int j = 0; j < _constraintsLength; j++) { double weight = cs[startDistancesIndex + j].atd(i); long edgeIndex = cs[startEdgeIndex + j].at8(i); double tmpWeight = _tree.reduceWeight(edgeIndex, weight); boolean countValue = !_hasWeightsColumn || cs[startDistancesIndex-1].at8(i) == 1; if (countValue && tmpWeight < minimalWeight) { minimalWeight = tmpWeight; minimalIndex = edgeIndex; } } } } @Override public void reduce(FindMinimalWeightTask mrt) { if (mrt.minimalWeight < minimalWeight) { minimalIndex = mrt.minimalIndex; minimalWeight = mrt.minimalWeight; } } } /** * Map Reduce task to assign cluster index based on calculated flow. * If no cluster assigned - assign cluster by minimal distance. * Return number of points in each cluster and changed input frame based on new cluster assignment. */ class AssignClusterTask extends MRTask<AssignClusterTask> { // IN private int _constraintsLength; private boolean _hasWeightsColumn; private int _weightIndex; private int _distanceIndexStart; private int _flowIndexStart; private int _oldAssignmentIndex; private int _newAssignmentIndex; private int _distanceAssignmentIndex; private int _dataStopIndex; // OUT int[] _numberOfPointsInCluster; // changed input chunks AssignClusterTask(int constraintsLength, boolean hasWeightsColumn, int numCols){ // Input data structure should be: // - data points (number of columns from training dataset) // - weight (1 column if CV is enabled) // - distances from data points to each cluster (k columns) // - edge indices (k columns of columns, not useful here) // - result distance (1 column, if the cluster is assigned there is distance to this cluster) // - old assignment (1 column, assignment from the previous iteration) // - new assignment (1 column, assignment form the current iteration) // - flow (k columns, calculated assignment from the MCF algorithm) // Based on this structure indices are calculated and used _constraintsLength = constraintsLength; _hasWeightsColumn = hasWeightsColumn; _distanceAssignmentIndex = numCols - 3 - constraintsLength; _oldAssignmentIndex = numCols - 2 - constraintsLength; _newAssignmentIndex = numCols - 1 - constraintsLength; _dataStopIndex = numCols - (_hasWeightsColumn ? 1 : 0) - 3 * _constraintsLength - 3; _weightIndex = _dataStopIndex; _distanceIndexStart = _dataStopIndex + (_hasWeightsColumn ? 1 : 0); _flowIndexStart = numCols - constraintsLength; } public void assignCluster(Chunk[] cs, int row, int clusterIndex){ // old assignment cs[_oldAssignmentIndex].set(row, cs[_newAssignmentIndex].at8(row)); // new assignment cs[_newAssignmentIndex].set(row, clusterIndex); // distances cs[_distanceAssignmentIndex].set(row, cs[_dataStopIndex + (_hasWeightsColumn ? 1 : 0) + clusterIndex].atd(row)); _numberOfPointsInCluster[clusterIndex]++; } @Override public void map(Chunk[] cs) { _numberOfPointsInCluster = new int[_constraintsLength]; for (int i = 0; i < cs[0].len(); i++) { if (!_hasWeightsColumn || cs[_weightIndex].at8(i) == 1) { // CV is not enabled or weight is 1 boolean assigned = false; for (int j = 0; j < _constraintsLength; j++) { if (cs[_flowIndexStart + j].at8(i) == 1) { // data point has assignment from MCF algorithm assignCluster(cs, i, j); assigned = true; break; } } if(!assigned){ // data point has no assignment from MCF -> min distance is used double minDistance = cs[_distanceIndexStart].atd(i); int minIndex = 0; for (int j = 1; j < _constraintsLength; j++) { double tmpDistance = cs[_distanceIndexStart + j].atd(i); if(minDistance > tmpDistance){ minDistance = tmpDistance; minIndex = j; } } assignCluster(cs, i, minIndex); } } } } @Override public void reduce(AssignClusterTask mrt) { ArrayUtils.add(this._numberOfPointsInCluster, mrt._numberOfPointsInCluster); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/modelselection/ModelSelection.java
package hex.modelselection; import hex.*; import hex.glm.GLM; import hex.glm.GLMModel; import water.*; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.Frame; import water.util.ArrayUtils; import water.util.PrettyPrint; import java.lang.reflect.Field; import java.util.*; import java.util.stream.Collectors; import java.util.stream.DoubleStream; import java.util.stream.IntStream; import java.util.stream.Stream; import static hex.genmodel.utils.MathUtils.combinatorial; import static hex.glm.GLMModel.GLMParameters.Family.*; import static hex.modelselection.ModelSelectionModel.ModelSelectionParameters.Mode.*; import static hex.modelselection.ModelSelectionUtils.*; public class ModelSelection extends ModelBuilder<hex.modelselection.ModelSelectionModel, hex.modelselection.ModelSelectionModel.ModelSelectionParameters, hex.modelselection.ModelSelectionModel.ModelSelectionModelOutput> { public String[][] _bestModelPredictors; // store for each predictor number, the best model predictors public double[] _bestR2Values; // store the best R2 values of the best models with fix number of predictors public String[][] _predictorsAdd; public String[][] _predictorsRemoved; DataInfo _dinfo; String[] _coefNames; public int _numPredictors; public String[] _predictorNames; double[][] _currCPM; Frame _currCPMFrame; int[] _trackSweep; public int _glmNFolds = 0; Model.Parameters.FoldAssignmentScheme _foldAssignment = null; String _foldColumn = null; public ModelSelection(boolean startup_once) { super(new hex.modelselection.ModelSelectionModel.ModelSelectionParameters(), startup_once); } public ModelSelection(hex.modelselection.ModelSelectionModel.ModelSelectionParameters parms) { super(parms); init(false); } public ModelSelection(hex.modelselection.ModelSelectionModel.ModelSelectionParameters parms, Key<hex.modelselection.ModelSelectionModel> key) { super(parms, key); init(false); } @Override protected int nModelsInParallel(int folds) { return nModelsInParallel(1,2); // disallow nfold cross-validation } @Override protected ModelSelectionDriver trainModelImpl() { return new ModelSelectionDriver(); } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ModelCategory.Regression}; } // because of r2 usage @Override public boolean isSupervised() { return true; } @Override public boolean haveMojo() { return false; } @Override public boolean havePojo() { return false; } public void init(boolean expensive) { if (_parms._nfolds > 0 || _parms._fold_column != null) { // cv enabled if (backward.equals(_parms._mode)) { error("nfolds/fold_column", "cross-validation is not supported for backward " + "selection."); } else if (maxrsweep.equals(_parms._mode)) { error("nfolds/fold_column", "cross-validation is not supported for maxrsweep, " + " maxrsweepsmall, maxrsweep and maxrsweepfull."); } else { _glmNFolds = _parms._nfolds; if (_parms._fold_assignment != null) { _foldAssignment = _parms._fold_assignment; _parms._fold_assignment = null; } if (_parms._fold_column != null) { _foldColumn = _parms._fold_column; _parms._fold_column = null; } _parms._nfolds = 0; } } super.init(expensive); if (error_count() > 0) return; if (expensive) { initModelSelectionParameters(); if (error_count() > 0) return; initModelParameters(); } } private void initModelParameters() { if (!backward.equals(_parms._mode)) { _bestR2Values = new double[_parms._max_predictor_number]; _bestModelPredictors = new String[_parms._max_predictor_number][]; if (!backward.equals(_parms._mode)) _predictorsAdd = new String[_parms._max_predictor_number][]; _predictorsRemoved = new String[_parms._max_predictor_number][]; } } private void initModelSelectionParameters() { _predictorNames = extractPredictorNames(_parms, _dinfo, _foldColumn); _numPredictors = _predictorNames.length; if (maxr.equals(_parms._mode) || allsubsets.equals(_parms._mode) || maxrsweep.equals(_parms._mode)) { // check for maxr and allsubsets if (_parms._lambda == null && !_parms._lambda_search && _parms._alpha == null && !maxrsweep.equals(_parms._mode)) _parms._lambda = new double[]{0.0}; // disable regularization if not specified if (nclasses() > 1) error("response", "'allsubsets', 'maxr', 'maxrsweep', " + "'maxrsweep' only works with regression."); if (!(AUTO.equals(_parms._family) || gaussian.equals(_parms._family))) error("_family", "ModelSelection only supports Gaussian family for 'allsubset' and 'maxr' mode."); if (AUTO.equals(_parms._family)) _parms._family = gaussian; if (_parms._max_predictor_number < 1 || _parms._max_predictor_number > _numPredictors) error("max_predictor_number", "max_predictor_number must exceed 0 and be no greater" + " than the number of predictors of the training frame."); } else { // checks for backward selection only _parms._compute_p_values = true; if (_parms._valid != null) error("validation_frame", " is not supported for ModelSelection mode='backward'"); if (_parms._lambda_search) error("lambda_search", "backward selection does not support lambda_search."); if (_parms._lambda != null) { if (_parms._lambda.length > 1) error("lambda", "if set must be set to 0 and cannot be an array or more than" + " length one for backward selection."); if (_parms._lambda[0] != 0) error("lambda", "must be set to 0 for backward selection"); } else { _parms._lambda = new double[]{0.0}; } if (multinomial.equals(_parms._family) || ordinal.equals(_parms._family)) error("family", "backward selection does not support multinomial or ordinal"); if (_parms._min_predictor_number <= 0) error("min_predictor_number", "must be >= 1."); if (_parms._min_predictor_number > _numPredictors) error("min_predictor_number", "cannot exceed the total number of predictors (" + _numPredictors + ")in the dataset."); } if (_parms._nparallelism < 0) error("nparallelism", "must be >= 0."); if (_parms._nparallelism == 0) _parms._nparallelism = H2O.NUMCPUS; if (maxrsweep.equals(_parms._mode)) warn("validation_frame", " is not used in choosing the best k subset for ModelSelection" + " models with maxrsweep."); if (maxrsweep.equals(_parms._mode) && !_parms._build_glm_model && _parms._influence != null) error("influence", " can only be set if glm models are built. With maxrsweep model without" + " build_glm_model = true, no GLM models will be built and hence no regression influence diagnostics" + " can be calculated."); } protected void checkMemoryFootPrint(int p) { if (maxrsweep.equals(_parms._mode)) p = (int) Math.ceil(p*(_parms._max_predictor_number+2)/_dinfo.fullN()); HeartBeat hb = H2O.SELF._heartbeat; long mem_usage = (long) (hb._cpus_allowed * (p * p * p)); long max_mem = hb.get_free_mem(); if (mem_usage > max_mem) { String msg = "Gram matrices (one per thread) won't fit in the driver node's memory (" + PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem) + ") - try reducing the number of columns and/or the number of categorical factors (or switch to the L-BFGS solver)."; error("_train", msg); } } public class ModelSelectionDriver extends Driver { public final void buildModel() { hex.modelselection.ModelSelectionModel model = null; try { int numModelBuilt = 0; model = new hex.modelselection.ModelSelectionModel(dest(), _parms, new hex.modelselection.ModelSelectionModel.ModelSelectionModelOutput(ModelSelection.this, _dinfo)); model.write_lock(_job); model._output._mode = _parms._mode; if (backward.equals(_parms._mode)) { model._output._best_model_ids = new Key[_numPredictors]; model._output._coef_p_values = new double[_numPredictors][]; model._output._z_values = new double[_numPredictors][]; model._output._best_predictors_subset = new String[_numPredictors][]; model._output._coefficient_names = new String[_numPredictors][]; model._output._predictors_removed_per_step = new String[_numPredictors][]; model._output._predictors_added_per_step = new String[_numPredictors][]; } else { // maxr, maxrsweep, allsubset model._output._best_r2_values = new double[_parms._max_predictor_number]; model._output._best_predictors_subset = new String[_parms._max_predictor_number][]; model._output._coefficient_names = new String[_parms._max_predictor_number][]; model._output._predictors_removed_per_step = new String[_parms._max_predictor_number][]; model._output._predictors_added_per_step = new String[_parms._max_predictor_number][]; if (maxrsweep.equals(_parms._mode) && !_parms._build_glm_model) { model._output._coefficient_values = new double[_parms._max_predictor_number][]; model._output._coefficient_values_normalized = new double[_parms._max_predictor_number][]; model._output._best_model_ids = null; } else { model._output._best_model_ids = new Key[_parms._max_predictor_number]; } } // build glm model with num_predictors and find one with best R2 if (allsubsets.equals(_parms._mode)) buildAllSubsetsModels(model); else if (maxr.equals(_parms._mode)) buildMaxRModels(model); else if (backward.equals(_parms._mode)) numModelBuilt = buildBackwardModels(model); else if (maxrsweep.equals(_parms._mode)) buildMaxRSweepModels(model); _job.update(0, "Completed GLM model building. Extracting results now."); model.update(_job); // copy best R2 and best predictors to model._output if (backward.equals(_parms._mode)) { model._output.shrinkArrays(numModelBuilt); model._output.generateSummary(numModelBuilt); } else { model._output.generateSummary(); } } finally { model.update(_job); model.unlock(_job); } } Frame extractCPM(double[][] cpmA, String[] coefNames) { String[] coefnames = (coefNames == null || coefNames.length ==0) ? _dinfo.coefNames() : coefNames; List<String> predList = Arrays.stream(coefnames).collect(Collectors.toList()); if (_parms._intercept) predList.add("intercept"); predList.add("XTYnYTY"); Frame cpm = new water.util.ArrayUtils().frame(Key.make(), predList.stream().toArray(String[]::new), cpmA); Scope.track(cpm); return rebalance(cpm, false, Key.make().toString()); } /*** * The maxrsweep mode is explained here in the pdf doc: https://github.com/h2oai/h2o-3/issues/6538. * Apart from actions specific to the sweeping implementation, the logic in this function is very similar to * that of maxr mode. */ void buildMaxRSweepModels(ModelSelectionModel model) { _coefNames = _dinfo.coefNames(); // generate cross-product matrix (CPM) as in section III of doc CPMnPredNames cpmPredIndex = genCPMPredNamesIndex(_job._key, _dinfo, _predictorNames, _parms); _predictorNames = cpmPredIndex._predNames; if (_predictorNames.length < _parms._max_predictor_number) error("max_predictor_number", "Your dataset contains duplicated predictors. " + "After removal, reduce your max_predictor_number to " + _predictorNames.length + " or less."); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(ModelSelection.this); int cpmSize = cpmPredIndex._cpm.length; if (_parms._multinode_mode) { _currCPMFrame = extractCPM(cpmPredIndex._cpm, cpmPredIndex._coefNames); Scope.track(_currCPMFrame); DKV.put(_currCPMFrame); _trackSweep = IntStream.range(0, cpmPredIndex._cpm.length).map(x -> 1).toArray(); if (_parms._intercept) sweepCPMParallel(_currCPMFrame, new int[]{cpmSize - 2}, _trackSweep); cpmPredIndex._cpm = null; // array no longer needed } else { _currCPM = cpmPredIndex._cpm; if (_parms._intercept) sweepCPM(_currCPM, new int[]{cpmSize - 2}, false); } int[][] pred2CPMIndice = cpmPredIndex._pred2CPMMapping; checkMemoryFootPrint(cpmSize); // generate mapping of predictor index to CPM indices due to enum columns add multiple rows/columns to CPM double r2Scale = 1.0 / calR2Scale(train(), _parms._response_column); CoeffNormalization coefNorm = generateScale(_dinfo, _parms._standardize); List<Integer> currSubsetIndices = new ArrayList<>(); // store best k predictor subsets for 1 to k predictors List<String> predNames = new ArrayList<>(Arrays.asList(_predictorNames)); // store predictor indices that are still available to be added to the bigger subset List<Integer> validSubset = IntStream.rangeClosed(0, predNames.size() - 1).boxed().collect(Collectors.toList()); SweepModel bestModel = null; List<String> allCoefList = Stream.of(_coefNames).collect(Collectors.toList()); BitSet predictorIndices = new BitSet(_predictorNames.length); // pre-allocate memory to use for (int predNum = 1; predNum <= _parms._max_predictor_number; predNum++) { // find best predictor subset for each subset size Set<BitSet> usedCombos = new HashSet<>(); bestModel = forwardStep(currSubsetIndices, validSubset, usedCombos, predictorIndices, pred2CPMIndice, bestModel, _parms._intercept); validSubset.removeAll(currSubsetIndices); _job.update(predNum, "Finished forward step with " + predNum + " predictors."); if (predNum <= _numPredictors && predNum > 1) { // implement the replacement part bestModel = replacement(currSubsetIndices, validSubset, usedCombos, predictorIndices, bestModel, pred2CPMIndice); // reset validSubset currSubsetIndices = IntStream.of(bestModel._predSubset).boxed().collect(Collectors.toList()); validSubset = IntStream.rangeClosed(0, predNames.size() - 1).boxed().collect(Collectors.toList()); validSubset.removeAll(currSubsetIndices); } // build glm model with best subcarrier subsets for size and record the update if (_parms._build_glm_model) { GLMModel bestR2Model = buildGLMModel(currSubsetIndices); DKV.put(bestR2Model); model._output.updateBestModels(bestR2Model, predNum - 1); } else { /* if (bestModel._CPM == null) { if (predNum < _parms._max_predictor_number) { throw new RuntimeException("sweeping has failed due to zero pivot values at predictor size " + predNum + ". To avoid this, perform experiment with max_predictor_number to be: " + (predNum - 1) + " or less."); } else { warn("maxrsweep process", " has failed when all predictors are chosen. R2 values is set" + " to -1 to indicate that. All predictors are chosen."); bestModel._CPM = cpmPredIndex._cpm; bestModel._predSubset = currSubsetIndices.stream().mapToInt(x->x).toArray(); int cpmLastInd = bestModel._CPM.length-1; bestModel._CPM[cpmLastInd][cpmLastInd] = Double.MAX_VALUE; } }*/ model._output.updateBestModels(_predictorNames, allCoefList, predNum - 1, _parms._intercept, bestModel._CPM.length, bestModel._predSubset, bestModel._CPM, r2Scale, coefNorm, pred2CPMIndice, _dinfo); } } } public GLMModel buildGLMModel(List<Integer> bestSubsetIndices) { // generate training frame int[] subsetIndices = bestSubsetIndices.stream().mapToInt(Integer::intValue).toArray(); Frame trainFrame = generateOneFrame(subsetIndices, _parms, _predictorNames, null); DKV.put(trainFrame); // generate training parameters final Field[] field1 = ModelSelectionModel.ModelSelectionParameters.class.getDeclaredFields(); final Field[] field2 = Model.Parameters.class.getDeclaredFields(); GLMModel.GLMParameters params = new GLMModel.GLMParameters(); setParamField(_parms, params, false, field1, Collections.emptyList()); setParamField(_parms, params, true, field2, Collections.emptyList()); params._train = trainFrame._key; if (_parms._valid != null) params._valid = _parms._valid; // build and return model GLMModel model = new GLM(params).trainModel().get(); DKV.remove(trainFrame._key); return model; } /** * Perform variable selection using MaxR implementing the sequential replacement method: * 1. forward step: find the initial subset of predictors with highest R2 values. When subset size = 1, this * means basically choosing the one predictor that generates a model with highest R2. When subset size > 1, * this means we choose one predictor to add to the subset which generates a model with hightest R2. * 2. Replacement step: consider the predictors in subset as pred0, pred1, pred2 (using subset size 3 as example): * a. Keep pred1, pred2 and replace pred0 with remaining predictors, find replacement with highest R2 * b. keep pred0, pred2 and replace pred1 with remaining predictors, find replacement with highest R2 * c. keeep pred0, pred1 and replace pred2 with remaining predictors, find replacement with highest R2 * d. from step 2a, 2b, 2c, choose the predictor subset with highest R2, say the subset is pred0, pred4, pred2 * e. Take subset pred0, pred4, pred2 and go through steps a,b,c,d again and only stop when the best R2 does * not improve anymore. * * see doc at https://github.com/h2oai/h2o-3/issues/7217 for details. * * @param model */ void buildMaxRModels(ModelSelectionModel model) { List<Integer> currSubsetIndices = new ArrayList<>(); List<String> coefNames = new ArrayList<>(Arrays.asList(_predictorNames)); List<Integer> validSubset = IntStream.rangeClosed(0, coefNames.size() - 1).boxed().collect(Collectors.toList()); for (int predNum = 1; predNum <= _parms._max_predictor_number; predNum++) { // perform for each subset size Set<BitSet> usedCombos = new HashSet<>(); GLMModel bestR2Model = forwardStep(currSubsetIndices, coefNames, predNum - 1, validSubset, _parms, _foldColumn, _glmNFolds, _foldAssignment, usedCombos); // forward step validSubset.removeAll(currSubsetIndices); _job.update(predNum, "Finished building all models with "+predNum+" predictors."); if (predNum < _numPredictors && predNum > 1) { double bestR2ofModel = 0; if (bestR2Model != null) bestR2ofModel = bestR2Model.r2(); GLMModel currBestR2Model = replacement(currSubsetIndices, coefNames, bestR2ofModel, _parms, _glmNFolds, _foldColumn, validSubset, _foldAssignment, usedCombos); if (currBestR2Model != null) { bestR2Model.delete(); bestR2Model = currBestR2Model; } validSubset.removeAll(currSubsetIndices); } DKV.put(bestR2Model); model._output.updateBestModels(bestR2Model, predNum - 1); } } /** * Implements the backward selection mode. Refer to III of ModelSelectionTutorial.pdf in * https://github.com/h2oai/h2o-3/issues/7232 */ private int buildBackwardModels(ModelSelectionModel model) { List<String> predNames = new ArrayList<>(Arrays.asList(_predictorNames)); Frame train = DKV.getGet(_parms._train); List<String> numPredNames = predNames.stream().filter(x -> train.vec(x).isNumeric()).collect(Collectors.toList()); List<String> catPredNames = predNames.stream().filter(x -> !numPredNames.contains(x)).collect(Collectors.toList()); int numModelsBuilt = 0; String[] coefName = predNames.toArray(new String[0]); for (int predNum = _numPredictors; predNum >= _parms._min_predictor_number; predNum--) { int modelIndex = predNum-1; Frame trainingFrame = generateOneFrame(null, _parms, coefName, _foldColumn); DKV.put(trainingFrame); GLMModel.GLMParameters[] glmParam = generateGLMParameters(new Frame[]{trainingFrame}, _parms, _glmNFolds, _foldColumn, _foldAssignment); GLMModel glmModel = new GLM(glmParam[0]).trainModel().get(); DKV.put(glmModel); // track model // evaluate which variable to drop for next round of testing and store corresponding values // if p_values_threshold is specified, model building may stop model._output.extractPredictors4NextModel(glmModel, modelIndex, predNames, numPredNames, catPredNames); numModelsBuilt++; DKV.remove(trainingFrame._key); coefName = predNames.toArray(new String[0]); _job.update(predNum, "Finished building all models with "+predNum+" predictors."); if (_parms._p_values_threshold > 0) { // check if p-values are used to stop model building if (DoubleStream.of(model._output._coef_p_values[modelIndex]) .limit(model._output._coef_p_values[modelIndex].length-1) .allMatch(x -> x <= _parms._p_values_threshold)) break; } if (predNames.size() == 0) // no more predictors available to build models with break; } return numModelsBuilt; } /*** * Find the subset of predictors of sizes 1, 2, ..., _parm._max_predictor_number that generate the * highest R2 value using brute-force. Basically for each subset size, all combinations of predictors * are considered. This method is guaranteed to find the best predictor subset at the cost of computation * complexity. * * @param model */ void buildAllSubsetsModels(ModelSelectionModel model) { for (int predNum=1; predNum <= _parms._max_predictor_number; predNum++) { int numModels = combinatorial(_numPredictors, predNum); // generate the training frames with num_predictor predictors in the frame Frame[] trainingFrames = generateTrainingFrames(_parms, predNum, _predictorNames, numModels, _foldColumn); // find best GLM Model with highest R2 value GLMModel bestModel = buildExtractBestR2Model(trainingFrames,_parms, _glmNFolds, _foldColumn, _foldAssignment); DKV.put(bestModel); // extract R2 and collect the best R2 and the predictors set int index = predNum-1; model._output.updateBestModels(bestModel, index); // remove training frames from DKV removeTrainingFrames(trainingFrames); _job.update(predNum, "Finished building all models with "+predNum+" predictors."); } } @Override public void computeImpl() { if (_parms._lambda_search || !_parms._intercept || _parms._lambda == null || _parms._lambda[0] > 0) _parms._use_all_factor_levels = true; _dinfo = new DataInfo(_train.clone(), _valid, 1, _parms._use_all_factor_levels, _parms._standardize ? DataInfo.TransformType.STANDARDIZE : DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, _parms.missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.Skip, _parms.imputeMissing(), _parms.makeImputer(), false, hasWeightCol(), hasOffsetCol(), hasFoldCol(), null); init(true); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(ModelSelection.this); _job.update(0, "finished init and ready to build models"); buildModel(); } } /*** * Given current predictor subset in currSubsetIndices, this method will add one more predictor to the subset and * choose the one that will increase the R2 by the most. */ public SweepModel forwardStep(List<Integer> currSubsetIndices, List<Integer> validSubsets, Set<BitSet> usedCombo, BitSet predIndices, int[][] predInd2CPMInd, SweepModel bestModel, boolean hasIntercept) { // generate all models double[] subsetErrVar = bestModel == null ? generateAllErrVar(_currCPM, _currCPMFrame, -1, currSubsetIndices, validSubsets, usedCombo, predIndices, predInd2CPMInd, hasIntercept) : generateAllErrVar(_currCPM, _currCPMFrame, bestModel._CPM.length - 1, currSubsetIndices, validSubsets, usedCombo, predIndices, predInd2CPMInd, hasIntercept); // find the best subset and the corresponding cpm by checking for lowest error variance int bestInd = -1; double errorVarianceMin = Double.MAX_VALUE; int numModel = subsetErrVar.length; for (int index=0; index<numModel; index++) { if (subsetErrVar[index] < errorVarianceMin) { errorVarianceMin = subsetErrVar[index]; bestInd = index; } } if (bestInd == -1) { // Predictor sets are duplicates. Return SweepModel for findBestMSEModel stream operations return new SweepModel(null, null, errorVarianceMin); } else { // set new predictor subset to curSubsetIndices, int newPredictor = validSubsets.get(bestInd); List<Integer> newIndices = extractCPMIndexFromPredOnly(predInd2CPMInd, new int[]{newPredictor}); if (_parms._multinode_mode) sweepCPMParallel(_currCPMFrame, newIndices.stream().mapToInt(x -> x).toArray(), _trackSweep); else sweepCPM(_currCPM, newIndices.stream().mapToInt(x -> x).toArray(), false); currSubsetIndices.add(newPredictor); int[] subsetPred = currSubsetIndices.stream().mapToInt(x->x).toArray(); double[][] subsetCPM = _parms._multinode_mode ? extractPredSubsetsCPMFrame(_currCPMFrame, subsetPred, predInd2CPMInd, hasIntercept) : extractPredSubsetsCPM(_currCPM, subsetPred, predInd2CPMInd, hasIntercept); return new SweepModel(subsetPred, subsetCPM, errorVarianceMin); } } /** * consider the predictors in subset as pred0, pred1, pred2 (using subset size 3 as example): * a. Keep pred1, pred2 and replace pred0 with remaining predictors, find replacement with highest R2 * b. keep pred0, pred2 and replace pred1 with remaining predictors, find replacement with highest R2 * c. keep pred0, pred1 and replace pred2 with remaining predictors, find replacement with highest R2 * d. from step 2a, 2b, 2c, choose the predictor subset with highest R2, say the subset is pred0, pred4, pred2 * e. Take subset pred0, pred4, pred2 and go through steps a,b,c,d again and only stop when the best R2 does * not improve anymore. * * see doc at https://github.com/h2oai/h2o-3/issues/7217 for details. * * The most important thing here is to make sure validSubset contains the true eligible predictors to choose * from. Inside the for loop, I will remove and add predictors that have been chosen in oneLessSubset and add * the removed predictor back to validSubset after it is no longer selected in the predictor subset. * * I also will reset the validSubset from time to time to just start to include all predictors as * valid, then I will remove all predictors that have been chosen in currSubsetIndices. * */ public SweepModel replacement(List<Integer> currSubsetIndices, List<Integer> validSubset, Set<BitSet> usedCombos, BitSet predIndices, SweepModel bestModel, int[][] predictorIndex2CPMIndices) { double errorVarianceMin = bestModel._errorVariance; int currSubsetSize = currSubsetIndices.size(); // predictor subset size int lastBestErrVarPosIndex = -1; SweepModel currModel = new SweepModel(bestModel._predSubset, bestModel._CPM, bestModel._errorVariance); SweepModel bestErrVarModel = new SweepModel(bestModel._predSubset, bestModel._CPM, bestModel._errorVariance); SweepModel tempModel; while (true) { // loop to find better predictor subset via sequential replacement for (int index = 0; index < currSubsetSize; index++) { // go through each predictor position ArrayList<Integer> oneLessSubset = new ArrayList<>(currSubsetIndices); int removedPred = oneLessSubset.remove(index); validSubset.removeAll(oneLessSubset); currModel._predSubset = oneLessSubset.stream().mapToInt(x -> x).toArray(); tempModel = forwardStepR(currSubsetIndices, validSubset, usedCombos, predIndices, predictorIndex2CPMIndices, currModel, errorVarianceMin, index); if (tempModel._CPM != null && errorVarianceMin > tempModel._errorVariance) { currModel = tempModel; errorVarianceMin = currModel._errorVariance; lastBestErrVarPosIndex = index; bestErrVarModel = new SweepModel(currModel._predSubset, currModel._CPM, currModel._errorVariance); validSubset.add(removedPred); } } if (lastBestErrVarPosIndex >= 0) // improvement was found, continue lastBestErrVarPosIndex = -1; else break; } return bestErrVarModel; } /*** * Given a currSubsetIndices and a predPos, this function will try to look for new predictor that will decrease the * error variance compared to bestErrVar. This is slightly different than the forwardStep. This is what happens, * given the bestModel with the predictor subset stored in currSubsetIndices, it will do the following: * 1. extract a subsetCPM which contains the cpm rows/columns corresponding to predictors in currSubsetIndices; * 2. perform a sweeping for the removed predictors to undo the effect of the removed predictors. This subset is * stored in subsetCPMO. * 3. Next, in generateAllErrVarR, a new predictor is added to currentSubsetIndices to decide which one will provide * the lowest error variance. * 4. If the lowest error variance is lower than bestErrVar, the currentSubsetIndices will replace the predictor * at predPos with the new predictor. * 5. If the lowest error variance found from generateAllErrVarR is higher than bestErrVar, nothing is done. */ public SweepModel forwardStepR(List<Integer> currSubsetIndices, List<Integer> validSubsets, Set<BitSet> usedCombo, BitSet predIndices, int[][] predInd2CPMInd, SweepModel bestModel, double bestErrVar, int predPos) { // generate all models double[][] subsetCPMO = ArrayUtils.deepClone(bestModel._CPM); // grab cpm that swept by all in currSubsetIndices int predRemoved = currSubsetIndices.get(predPos); int[] removedPredSweepInd = extractSweepIndices(currSubsetIndices, predPos, predRemoved, predInd2CPMInd, _parms._intercept); SweepVector[][] removedPredSV = sweepCPM(subsetCPMO, removedPredSweepInd, true); // undo sweep by removed pred double[] subsetErrVar = generateAllErrVarR(_currCPM, _currCPMFrame, subsetCPMO, predPos, currSubsetIndices, validSubsets, usedCombo, predIndices, predInd2CPMInd, _parms._intercept, removedPredSweepInd, removedPredSV); // find the best subset and the corresponding cpm by checking for lowest error variance int bestInd = -1; double errorVarianceMin = Double.MAX_VALUE; int numModel = subsetErrVar.length; for (int index=0; index<numModel; index++) { if (subsetErrVar[index] < errorVarianceMin) { errorVarianceMin = subsetErrVar[index]; bestInd = index; } } if (bestInd == -1 || errorVarianceMin > bestErrVar) { // Predictor sets are duplicates. Return SweepModel for findBestMSEModel stream operations return new SweepModel(null, null, errorVarianceMin); } else { // new predictor in replacement performs better than before int newPredictor = validSubsets.get(bestInd); currSubsetIndices.remove(predPos); // removed the predictor at position predPos currSubsetIndices.add(predPos, newPredictor); // add new replaced predictor into predictor subset int[] subsetPred = currSubsetIndices.stream().mapToInt(Integer::intValue).toArray(); bestModel._predSubset = subsetPred; double[][] subsetCPM; if (_parms._multinode_mode) { sweepCPMParallel(_currCPMFrame, predInd2CPMInd[predRemoved], _trackSweep); sweepCPMParallel(_currCPMFrame, predInd2CPMInd[newPredictor], _trackSweep); subsetCPM = extractPredSubsetsCPMFrame(_currCPMFrame, subsetPred, predInd2CPMInd, _parms._intercept); } else { sweepCPM(_currCPM, predInd2CPMInd[predRemoved], false); // undo sweeping by replaced predictor on currCPM sweepCPM(_currCPM, predInd2CPMInd[newPredictor], false);// perform sweeping by newly chosen predictor on currCPM subsetCPM = extractPredSubsetsCPM(_currCPM, subsetPred, predInd2CPMInd, _parms._intercept); } bestModel._CPM = subsetCPM; bestModel._errorVariance = errorVarianceMin; return bestModel; } } /** * Given the training Frame array, build models for each training frame and return the GLMModel with the best * R2 values. * * @param trainingFrames * @return */ public static GLMModel buildExtractBestR2Model(Frame[] trainingFrames, ModelSelectionModel.ModelSelectionParameters parms, int glmNFolds, String foldColumn, Model.Parameters.FoldAssignmentScheme foldAssignment) { GLMModel.GLMParameters[] trainingParams = generateGLMParameters(trainingFrames, parms, glmNFolds, foldColumn, foldAssignment); // generate the builder; GLM[] glmBuilder = buildGLMBuilders(trainingParams); // call parallel build GLM[] glmResults = ModelBuilderHelper.trainModelsParallel(glmBuilder, parms._nparallelism); // find best GLM Model with highest R2 value return findBestModel(glmResults); } /** * Given a predictor subset with indices stored in currSubsetIndices, one more predictor from the coefNames * that was not found in currSubsetIndices was added to the subset to form a new Training frame. An array of * training frame are built training frames from all elligible predictors from coefNames. GLMParameters are * built for all TrainingFrames, GLM models are built from those parameters. The GLM model with the highest * R2 value is returned. The added predictor which resulted in the highest R2 value will be added to * currSubsetIndices. * * see doc at https://github.com/h2oai/h2o-3/issues/7217 for details. * * @param currSubsetIndices: stored predictors that are chosen in the subset * @param coefNames: predictor names of full training frame * @param predPos: index/location of predictor to be added into currSubsetIndices * @return GLMModel with highest R2 value */ public static GLMModel forwardStep(List<Integer> currSubsetIndices, List<String> coefNames, int predPos, List<Integer> validSubsets, ModelSelectionModel.ModelSelectionParameters parms, String foldColumn, int glmNFolds, Model.Parameters.FoldAssignmentScheme foldAssignment, Set<BitSet> usedCombo) { String[] predictorNames = coefNames.stream().toArray(String[]::new); // generate training frames Frame[] trainingFrames = generateMaxRTrainingFrames(parms, predictorNames, foldColumn, currSubsetIndices, predPos, validSubsets, usedCombo); if (trainingFrames.length > 0) { // find GLM model with best R2 value and return it GLMModel bestModel = buildExtractBestR2Model(trainingFrames, parms, glmNFolds, foldColumn, foldAssignment); List<String> coefUsed = extraModelColumnNames(coefNames, bestModel); for (int predIndex = coefUsed.size() - 1; predIndex >= 0; predIndex--) { int index = coefNames.indexOf(coefUsed.get(predIndex)); if (!currSubsetIndices.contains(index)) { currSubsetIndices.add(predPos, index); break; } } removeTrainingFrames(trainingFrames); return bestModel; } else { return null; } } /** * Contains information of a predictor subsets like predictor indices of the subset (with the newest predictor as * the last element of the array), CPM associated with predictor subset minus the latest element and the error * variance of the CPM. */ public static class SweepModel { int[] _predSubset; double[][] _CPM; double _errorVariance; public SweepModel(int[] predSubset, double[][] cpm, double mse) { _predSubset = predSubset; _CPM = cpm; _errorVariance = mse; } } public static GLMModel forwardStep(List<Integer> currSubsetIndices, List<String> coefNames, int predPos, List<Integer> validSubsets, ModelSelectionModel.ModelSelectionParameters parms, String foldColumn, int glmNFolds, Model.Parameters.FoldAssignmentScheme foldAssignment) { return forwardStep(currSubsetIndices, coefNames, predPos, validSubsets, parms, foldColumn, glmNFolds, foldAssignment, null); } /** * consider the predictors in subset as pred0, pred1, pred2 (using subset size 3 as example): * a. Keep pred1, pred2 and replace pred0 with remaining predictors, find replacement with highest R2 * b. keep pred0, pred2 and replace pred1 with remaining predictors, find replacement with highest R2 * c. keeep pred0, pred1 and replace pred2 with remaining predictors, find replacement with highest R2 * d. from step 2a, 2b, 2c, choose the predictor subset with highest R2, say the subset is pred0, pred4, pred2 * e. Take subset pred0, pred4, pred2 and go through steps a,b,c,d again and only stop when the best R2 does * not improve anymore. * * see doc at https://github.com/h2oai/h2o-3/issues/7217 for details. * */ public static GLMModel replacement(List<Integer> currSubsetIndices, List<String> coefNames, double bestR2, ModelSelectionModel.ModelSelectionParameters parms, int glmNFolds, String foldColumn, List<Integer> validSubset, Model.Parameters.FoldAssignmentScheme foldAssignment, Set<BitSet> usedCombos) { int currSubsetSize = currSubsetIndices.size(); int lastInd = currSubsetSize-1; int lastBestR2PosIndex = -1; GLMModel bestR2Model = null; GLMModel oneModel; while (true) { for (int index = 0; index < currSubsetSize; index++) { // go through all predictor position in subset ArrayList<Integer> oneLessSubset = new ArrayList<>(currSubsetIndices); int predIndexRemoved = oneLessSubset.remove(index); oneModel = forwardStep(oneLessSubset, coefNames, index, validSubset, parms, foldColumn, glmNFolds, foldAssignment, usedCombos); if (oneModel != null) { if (oneModel.r2() > bestR2) { lastBestR2PosIndex = index; validSubset.remove(oneLessSubset.get(lastInd)); if (bestR2Model != null) bestR2Model.delete(); bestR2Model = oneModel; bestR2 = bestR2Model.r2(); currSubsetIndices.clear(); currSubsetIndices.addAll(oneLessSubset); validSubset.add(predIndexRemoved); } else { oneModel.delete(); } } } if (lastBestR2PosIndex >= 0) lastBestR2PosIndex = -1; else break; } return bestR2Model; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/modelselection/ModelSelectionModel.java
package hex.modelselection; import hex.*; import hex.deeplearning.DeepLearningModel; import hex.glm.GLM; import hex.glm.GLMModel; import water.*; import water.fvec.Frame; import water.fvec.Vec; import water.udf.CFuncRef; import water.util.TwoDimTable; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; import static hex.glm.GLMModel.GLMParameters.Family.AUTO; import static hex.modelselection.ModelSelectionUtils.*; public class ModelSelectionModel extends Model<ModelSelectionModel, ModelSelectionModel.ModelSelectionParameters, ModelSelectionModel.ModelSelectionModelOutput> { public ModelSelectionModel(Key<ModelSelectionModel> selfKey, ModelSelectionParameters parms, ModelSelectionModelOutput output) { super(selfKey, parms, output); } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { assert domain == null; switch (_output.getModelCategory()) { case Regression: return new ModelMetricsRegression.MetricBuilderRegression(); default: throw H2O.unimpl("Invalid ModelCategory " + _output.getModelCategory()); } } @Override protected double[] score0(double[] data, double[] preds) { throw new UnsupportedOperationException("ModelSelection does not support scoring on data. It only provide " + "information on predictor relevance"); } @Override public Frame score(Frame fr, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) { throw new UnsupportedOperationException("AnovaGLM does not support scoring on data. It only provide " + "information on predictor relevance"); } @Override public Frame result() { return _output.generateResultFrame(); } public static class ModelSelectionParameters extends Model.Parameters { public double[] _alpha; public boolean _standardize = true; public boolean _intercept = true; GLMModel.GLMParameters.Family _family = AUTO; public boolean _lambda_search; public GLMModel.GLMParameters.Link _link = GLMModel.GLMParameters.Link.family_default; public GLMModel.GLMParameters.Solver _solver = GLMModel.GLMParameters.Solver.IRLSM; public String[] _interactions=null; public Serializable _missing_values_handling = GLMModel.GLMParameters.MissingValuesHandling.MeanImputation; public boolean _compute_p_values = false; public boolean _remove_collinear_columns = false; public int _nfolds = 0; // disable cross-validation public Key<Frame> _plug_values = null; public int _max_predictor_number = 1; public int _min_predictor_number = 1; public int _nparallelism = 0; public double _p_values_threshold = 0; public double _tweedie_variance_power; public double _tweedie_link_power; public Mode _mode = Mode.maxr; // mode chosen to perform model selection public double _beta_epsilon = 1e-4; public double _objective_epsilon = -1; // -1 to use default setting public double _gradient_epsilon = -1; // -1 to use default setting public double _obj_reg = -1.0; public double[] _lambda = new double[]{0.0}; public boolean _use_all_factor_levels = false; public boolean _build_glm_model = false; public GLMModel.GLMParameters.Influence _influence; // if set to dfbetas will calculate the difference of betas obtained from including and excluding a data row public boolean _multinode_mode = false; // for maxrsweep only, if true will run on multiple nodes in cluster public enum Mode { allsubsets, // use combinatorial, exponential runtime maxr, // use sequential replacement but calls GLM to build all models, slow but can use cross-validation and validation dataset to build more robust results maxrsweep, // perform incremental maxrsweep without using sweeping vectors, only on CPM. backward // use backward selection } @Override public String algoName() { return "ModelSelection"; } @Override public String fullName() { return "Model Selection"; } @Override public String javaName() { return ModelSelectionModel.class.getName(); } @Override public long progressUnits() { return 1; } public GLMModel.GLMParameters.MissingValuesHandling missingValuesHandling() { if (_missing_values_handling instanceof GLMModel.GLMParameters.MissingValuesHandling) return (GLMModel.GLMParameters.MissingValuesHandling) _missing_values_handling; assert _missing_values_handling instanceof DeepLearningModel.DeepLearningParameters.MissingValuesHandling; switch ((DeepLearningModel.DeepLearningParameters.MissingValuesHandling) _missing_values_handling) { case MeanImputation: return GLMModel.GLMParameters.MissingValuesHandling.MeanImputation; case Skip: return GLMModel.GLMParameters.MissingValuesHandling.Skip; default: throw new IllegalStateException("Unsupported missing values handling value: " + _missing_values_handling); } } public boolean imputeMissing() { return missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.MeanImputation || missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.PlugValues; } public DataInfo.Imputer makeImputer() { if (missingValuesHandling() == GLMModel.GLMParameters.MissingValuesHandling.PlugValues) { if (_plug_values == null || _plug_values.get() == null) { throw new IllegalStateException("Plug values frame needs to be specified when Missing Value " + "Handling = PlugValues."); } return new GLM.PlugValuesImputer(_plug_values.get()); } else { // mean/mode imputation and skip (even skip needs an imputer right now! PUBDEV-6809) return new DataInfo.MeanImputer(); } } } public static class ModelSelectionModelOutput extends Model.Output { GLMModel.GLMParameters.Family _family; DataInfo _dinfo; String[][] _coefficient_names; // store for each predictor number, the best model predictors double[] _best_r2_values; // store the best R2 values of the best models with fix number of predictors String[][] _predictors_added_per_step; String[][] _predictors_removed_per_step; public Key[] _best_model_ids; double[][] _coef_p_values; double[][] _coefficient_values; // store best predictor subset coefficient values double[][] _coefficient_values_normalized; // store best predictor subset coefficient values double[][] _z_values; public ModelSelectionParameters.Mode _mode; String[][] _best_predictors_subset; // predictor names for subset of each size public ModelSelectionModelOutput(hex.modelselection.ModelSelection b, DataInfo dinfo) { super(b, dinfo._adaptedFrame); _dinfo = dinfo; } public String[][] coefficientNames() { return _coefficient_names; } public double[][] beta() { int numModel = _best_model_ids.length; double[][] coeffs = new double[numModel][]; for (int index=0; index < numModel; index++) { GLMModel oneModel = DKV.getGet(_best_model_ids[index]); coeffs[index] = oneModel._output.beta().clone(); } return coeffs; } public double[][] getNormBeta() { int numModel = _best_model_ids.length; double[][] coeffs = new double[numModel][]; for (int index=0; index < numModel; index++) { GLMModel oneModel = DKV.getGet(_best_model_ids[index]); coeffs[index] = oneModel._output.getNormBeta().clone(); } return coeffs; } @Override public ModelCategory getModelCategory() { return ModelCategory.Regression; } private Frame generateResultFrame() { int numRows = _coefficient_names.length; String[] modelNames = new String[numRows]; String[] coefNames = new String[numRows]; String[] predNames = new String[numRows]; String[] modelIds = _best_model_ids == null ? null : Stream.of(_best_model_ids).map(Key::toString).toArray(String[]::new); String[] zvalues = new String[numRows]; String[] pvalues = new String[numRows]; String[] predAddedNames = new String[numRows]; String[] predRemovedNames = new String[numRows]; boolean backwardMode = _z_values!=null; // generate model names and predictor names for (int index=0; index < numRows; index++) { int numPred = _best_predictors_subset[index].length; modelNames[index] = "best "+numPred+" predictors model"; coefNames[index] = backwardMode ? String.join(", ", _coefficient_names[index]) :String.join(", ", _coefficient_names[index]); predAddedNames[index] = backwardMode ? "" : String.join(", ", _predictors_added_per_step[index]); predRemovedNames[index] = _predictors_removed_per_step[index] == null ? "" : String.join(", ", _predictors_removed_per_step[index]); predNames[index] = String.join(", ", _best_predictors_subset[index]); if (backwardMode) { zvalues[index] = joinDouble(_z_values[index]); pvalues[index] = joinDouble(_coef_p_values[index]); } } // generate vectors before forming frame Vec.VectorGroup vg = Vec.VectorGroup.VG_LEN1; Vec modNames = Vec.makeVec(modelNames, vg.addVec()); Vec modelIDV = modelIds == null ? null : Vec.makeVec(modelIds, vg.addVec()); Vec r2=null; Vec zval=null; Vec pval=null; Vec predAdded=null; Vec predRemoved; if (backwardMode) { zval = Vec.makeVec(zvalues, vg.addVec()); pval = Vec.makeVec(pvalues, vg.addVec()); } else { r2 = Vec.makeVec(_best_r2_values, vg.addVec()); predAdded = Vec.makeVec(predAddedNames, vg.addVec()); } predRemoved = Vec.makeVec(predRemovedNames, vg.addVec()); Vec coefN = Vec.makeVec(coefNames, vg.addVec()); Vec predN = Vec.makeVec(predNames, vg.addVec()); if (backwardMode) { String[] colNames = new String[]{"model_name", "model_id", "z_values", "p_values", "coefficient_names", "predictor_names", "predictors_removed"}; return new Frame(Key.<Frame>make(), colNames, new Vec[]{modNames, modelIDV, zval, pval, coefN, predN, predRemoved}); } else { if (modelIds == null) { String[] colNames = new String[]{"model_name", "best_r2_value", "coefficient_names", "predictor_names", "predictors_removed", "predictors_added"}; return new Frame(Key.<Frame>make(), colNames, new Vec[]{modNames, r2, coefN, predN, predRemoved, predAdded}); } else { String[] colNames = new String[]{"model_name", "model_id", "best_r2_value", "coefficient_names", "predictor_names", "predictors_removed", "predictors_added"}; return new Frame(Key.<Frame>make(), colNames, new Vec[]{modNames, modelIDV, r2, coefN, predN, predRemoved, predAdded}); } } } public void shrinkArrays(int numModelsBuilt) { if (_coefficient_names.length > numModelsBuilt) { _coefficient_names = shrinkStringArray(_coefficient_names, numModelsBuilt); _best_predictors_subset = shrinkStringArray(_best_predictors_subset, numModelsBuilt); _coefficient_names = shrinkStringArray(_coefficient_names, numModelsBuilt); _z_values = shrinkDoubleArray(_z_values, numModelsBuilt); _coef_p_values = shrinkDoubleArray(_coef_p_values, numModelsBuilt); _best_model_ids = shrinkKeyArray(_best_model_ids, numModelsBuilt); _predictors_removed_per_step = shrinkStringArray(_predictors_removed_per_step, numModelsBuilt); } } public void generateSummary() { int numModels = _best_r2_values.length; String[] names = new String[]{"best_r2_value", "coefficient_names", "predictor_names", "predictors_removed", "predictors_added"}; String[] types = new String[]{"double", "String", "String", "String", "String"}; String[] formats = new String[]{"%d", "%s", "%s", "%s", "%s"}; String[] rowHeaders = new String[numModels]; for (int index=1; index<=numModels; index++) rowHeaders[index-1] = "with "+_best_predictors_subset[index-1].length+" predictors"; _model_summary = new TwoDimTable("ModelSelection Model Summary", "summary", rowHeaders, names, types, formats, ""); for (int rIndex=0; rIndex < numModels; rIndex++) { int colInd = 0; _model_summary.set(rIndex, colInd++, _best_r2_values[rIndex]); _model_summary.set(rIndex, colInd++, String.join(", ", _coefficient_names[rIndex])); _model_summary.set(rIndex, colInd++, String.join(", ", _best_predictors_subset[rIndex])); if (_predictors_removed_per_step[rIndex] != null) _model_summary.set(rIndex, colInd++, String.join(", ", _predictors_removed_per_step[rIndex])); else _model_summary.set(rIndex, colInd++, ""); _model_summary.set(rIndex, colInd++, String.join(", ", _predictors_added_per_step[rIndex])); } } // for backward model only public void generateSummary(int numModels) { String[] names = new String[]{"coefficient_names", "predictor_names", "z_values", "p_values", "predictors_removed"}; String[] types = new String[]{"string", "string", "string", "string", "string"}; String[] formats = new String[]{"%s", "%s", "%s", "%s", "%s"}; String[] rowHeaders = new String[numModels]; for (int index=0; index < numModels; index++) { rowHeaders[index] = "with "+_best_predictors_subset[index].length+" predictors"; } _model_summary = new TwoDimTable("ModelSelection Model Summary", "summary", rowHeaders, names, types, formats, ""); for (int rIndex=0; rIndex < numModels; rIndex++) { int colInd = 0; String coeffNames = String.join(", ", _coefficient_names[rIndex]); String predNames = String.join(", ", _best_predictors_subset[rIndex]); String pValue = joinDouble(_coef_p_values[rIndex]); String zValue = joinDouble(_z_values[rIndex]); _model_summary.set(rIndex, colInd++, coeffNames); _model_summary.set(rIndex, colInd++, predNames); _model_summary.set(rIndex, colInd++, zValue); _model_summary.set(rIndex, colInd++, pValue); _model_summary.set(rIndex, colInd, _predictors_removed_per_step[rIndex][0]); } } void updateBestModels(GLMModel bestModel, int index) { _best_model_ids[index] = bestModel.getKey(); if (bestModel._parms._nfolds > 0) { int r2Index = Arrays.asList(bestModel._output._cross_validation_metrics_summary.getRowHeaders()).indexOf("r2"); Float tempR2 = (Float) bestModel._output._cross_validation_metrics_summary.get(r2Index, 0); _best_r2_values[index] = tempR2.doubleValue(); } else { _best_r2_values[index] = bestModel.r2(); } extractCoeffs(bestModel, index); updateAddedRemovedPredictors(index); } void extractCoeffs(GLMModel model, int index) { _coefficient_names[index] = model._output.coefficientNames().clone(); // all coefficients ArrayList<String> coeffNames = new ArrayList<>(Arrays.asList(model._output.coefficientNames())); _coefficient_names[index] = coeffNames.toArray(new String[0]); // without intercept List<String> predNames = Stream.of(model.names()).collect(Collectors.toList()); predNames.remove(model._parms._response_column); _best_predictors_subset[index] = predNames.stream().toArray(String[]::new); } void updateBestModels(String[] predictorNames, List<String> allCoefNames, int index, boolean hasIntercept, int actualCPMSize, int[] predsubset, double[][] lastCPM, double r2Scale, CoeffNormalization coeffN, int[][] pred2CPMIndex, DataInfo dinfo) { int lastCPMIndex = actualCPMSize-1; if (lastCPM[lastCPMIndex][lastCPMIndex] == Double.MAX_VALUE) _best_r2_values[index] = -1; else _best_r2_values[index] = 1-r2Scale * lastCPM[lastCPMIndex][lastCPMIndex]; extractCoeffs(predictorNames, allCoefNames, lastCPM, index, hasIntercept, actualCPMSize, predsubset, coeffN, pred2CPMIndex, dinfo); updateAddedRemovedPredictors(index); } void extractCoeffs(String[] predNames, List<String> allCoefNames, double[][] cpm, int index, boolean hasIntercept, int actualCPMSize, int[] predSubset, CoeffNormalization coeffN, int[][] predsubset2CPMIndices, DataInfo dinfo) { _best_predictors_subset[index] = extractPredsFromPredIndices(predNames, predSubset); _coefficient_names[index] = extractCoefsFromPred(allCoefNames, hasIntercept, dinfo, predSubset); extractCoefsValues(cpm, _coefficient_names[index].length, hasIntercept, actualCPMSize, coeffN, index, predSubset, predsubset2CPMIndices); } public void extractCoefsValues(double[][] cpm, int coefValLen, boolean hasIntercept, int actualCPMSize, CoeffNormalization coeffN, int predIndex, int[] predSubset, int[][] pred2CPMIndices) { _coefficient_values[predIndex] = new double[coefValLen]; _coefficient_values_normalized[predIndex] = new double[coefValLen]; int lastCPMIndex = actualCPMSize-1; int cpmIndexOffset = hasIntercept?1:0; boolean standardize = coeffN._standardize; double[] sigmaOrOneOSigma = coeffN._sigmaOrOneOSigma; double[] meanOverSigma = coeffN._meanOverSigma; double sumBetaMeanOverSigma = 0; int numIndexStart = _dinfo._cats; int offset =0; int predSubsetLen = predSubset.length; int cpmInd, coefIndex; for (int pIndex = 0; pIndex < predSubsetLen; pIndex++) { int predictor = predSubset[pIndex]; if (predictor >= numIndexStart) { // numerical columns coefIndex = pIndex+offset; cpmInd = cpmIndexOffset+pIndex; if (standardize) { _coefficient_values[predIndex][coefIndex] = cpm[cpmInd][lastCPMIndex]*sigmaOrOneOSigma[predictor-numIndexStart]; _coefficient_values_normalized[predIndex][coefIndex] = cpm[cpmInd][lastCPMIndex]; } else { _coefficient_values[predIndex][coefIndex] = cpm[cpmInd][lastCPMIndex]; _coefficient_values_normalized[predIndex][coefIndex] = cpm[cpmInd][lastCPMIndex]*sigmaOrOneOSigma[predictor-numIndexStart]; } sumBetaMeanOverSigma += _coefficient_values_normalized[predIndex][coefIndex]*meanOverSigma[predictor-numIndexStart]; } else { // categorical columns int cpmLen = pred2CPMIndices[predictor].length; // indices of cpm to grab for coefficients info for (int cpmIndex = 0; cpmIndex < cpmLen; cpmIndex++) { coefIndex = offset + cpmIndex + pIndex; cpmInd = cpmIndexOffset + cpmIndex + pIndex; _coefficient_values[predIndex][coefIndex] = cpm[cpmInd][lastCPMIndex]; _coefficient_values_normalized[predIndex][coefIndex] = cpm[cpmInd][lastCPMIndex]; } offset += cpmLen-1; cpmIndexOffset += cpmLen-1; } } if (hasIntercept) { // extract intercept value int lastCoefInd = _coefficient_values[predIndex].length-1; if (coeffN._standardize) { _coefficient_values_normalized[predIndex][lastCoefInd] = cpm[0][lastCPMIndex]; _coefficient_values[predIndex][lastCoefInd] = cpm[0][lastCPMIndex]-sumBetaMeanOverSigma; } else { _coefficient_values_normalized[predIndex][lastCoefInd] = cpm[0][lastCPMIndex]+sumBetaMeanOverSigma; _coefficient_values[predIndex][lastCoefInd] = cpm[0][lastCPMIndex]; } } } public static String[] extractCoefsFromPred(List<String> allCoefList, boolean hasIntercept, DataInfo dinfo, int[] predSubset) { List<String> coefNames = new ArrayList<>(); int numPred = predSubset.length; int predIndex; int numCats = dinfo._cats; int catOffsets = dinfo._catOffsets[dinfo._catOffsets.length-1]; int numCatLevel; for (int index=0; index<numPred; index++) { predIndex = predSubset[index]; if (predIndex < numCats) { // categorical columns numCatLevel = dinfo._catOffsets[predIndex+1]-dinfo._catOffsets[predIndex]; final int predictorInd=predIndex; List<String> coeffs = IntStream.range(0, numCatLevel).mapToObj(x -> allCoefList.get(x+dinfo._catOffsets[predictorInd])).collect(Collectors.toList()); coefNames.addAll(coeffs); } else { // numerical columns coefNames.add(allCoefList.get(predIndex+catOffsets-numCats)); } } if (hasIntercept) coefNames.add("Intercept"); return coefNames.toArray(new String[0]); } public static String[] extractPredsFromPredIndices(String[] allPreds, int[] predSubset) { int numPreds = predSubset.length; String[] predSubsetNames = new String[numPreds]; for (int index=0; index<numPreds; index++) predSubsetNames[index] = allPreds[predSubset[index]]; return predSubsetNames; } void updateAddedRemovedPredictors(int index) { final List<String> newSet = Stream.of(_coefficient_names[index]).collect(Collectors.toList()); if (index > 0) { final List<String> oldSet = Stream.of(_coefficient_names[index - 1]).collect(Collectors.toList()); List<String> predDeleted = oldSet.stream().filter(x -> (!newSet.contains(x) && !"Intercept".equals(x))).collect(Collectors.toList()); _predictors_removed_per_step[index] = predDeleted == null || predDeleted.size()==0 ? new String[]{""} : predDeleted.toArray(new String[predDeleted.size()]); if (!ModelSelectionParameters.Mode.backward.equals(_mode)) { List<String> predAdded = newSet.stream().filter(x -> (!oldSet.contains(x) && !"Intercept".equals(x))).collect(Collectors.toList()); _predictors_added_per_step[index] = predAdded.toArray(new String[0]); } return; } else if (!ModelSelectionParameters.Mode.backward.equals(_mode)) { _predictors_added_per_step[index] = new String[]{_coefficient_names[index][0]}; _predictors_removed_per_step[index] = new String[]{""}; return; } _predictors_removed_per_step[index] = new String[]{""}; _predictors_added_per_step[index] = new String[]{""}; } /** * Method to remove redundant predictors at the beginning of backward method. */ void resetCoeffs(GLMModel model, List<String> predNames, List<String> numPredNames, List<String> catPredNames) { final String[] coeffName = model._output.coefficientNames(); int[] idxs = model._output.bestSubmodel().idxs; if (idxs == null) // no redundant predictors return; List<String> coeffNames = Arrays.stream(idxs).mapToObj(x -> coeffName[x]).collect(Collectors.toList()); resetAllPreds(predNames, catPredNames, numPredNames, model, coeffNames); // remove redundant preds } void resetAllPreds(List<String> predNames, List<String> catPredNames, List<String> numPredNames, GLMModel model, List<String> coeffNames) { if (model._output.bestSubmodel().idxs.length == model.coefficients().size()) // no redundant predictors return; resetNumPredNames(numPredNames, coeffNames); resetCatPredNames(model.dinfo(), model._output.bestSubmodel().idxs, catPredNames); if (predNames.size() > (numPredNames.size() + catPredNames.size())) { predNames.clear(); predNames.addAll(catPredNames); predNames.addAll(numPredNames); } } public void resetNumPredNames(List<String> numPredNames, List<String> coeffNames) { List<String> newNumPredNames = numPredNames.stream().filter(x -> coeffNames.contains(x)).collect(Collectors.toList()); numPredNames.clear(); numPredNames.addAll(newNumPredNames); } public void resetCatPredNames(DataInfo dinfo, int[] idxs, List<String> catPredNames) { List<String> newCatPredNames = new ArrayList<>(); List<Integer> idxsList = Arrays.stream(idxs).boxed().collect(Collectors.toList()); int[] catOffset = dinfo._catOffsets; int catIndex = catOffset.length; int maxCatOffset = catOffset[catIndex-1]; for (int index=1; index<catIndex; index++) { int offsetedIndex = index-1; List<Integer> currCatList = IntStream.range(catOffset[offsetedIndex], catOffset[index]).boxed().collect(Collectors.toList()); if (currCatList.stream().filter(x -> idxsList.contains(x)).count() > 0 && currCatList.get(currCatList.size()-1) < maxCatOffset) { newCatPredNames.add(catPredNames.get(offsetedIndex)); } } if (newCatPredNames.size() < catPredNames.size()) { catPredNames.clear(); catPredNames.addAll(newCatPredNames); } } /*** * Eliminate predictors with lowest z-value (z-score) magnitude as described in III of * ModelSelectionTutorial.pdf in https://github.com/h2oai/h2o-3/issues/7232 */ void extractPredictors4NextModel(GLMModel model, int index, List<String> predNames, List<String> numPredNames, List<String> catPredNames) { boolean firstRun = (index+1) == predNames.size(); List<String> oldPredNames = firstRun ? new ArrayList<>(predNames) : null; extractCoeffs(model, index); int predIndex2Remove = findMinZValue(model, numPredNames, catPredNames, predNames); String pred2Remove = predNames.get(predIndex2Remove); if (firstRun) // remove redundant predictors if present resetCoeffs(model, predNames, numPredNames, catPredNames); List<String> redundantPred = firstRun ? oldPredNames.stream().filter(x -> !predNames.contains(x)).collect(Collectors.toList()) : null; _best_model_ids[index] = model.getKey(); if (redundantPred != null && redundantPred.size() > 0) { redundantPred = redundantPred.stream().map(x -> x+"(redundant_predictor)").collect(Collectors.toList()); redundantPred.add(pred2Remove); _predictors_removed_per_step[index] = redundantPred.stream().toArray(String[]::new); } else { _predictors_removed_per_step[index] = new String[]{pred2Remove}; } _z_values[index] = model._output.zValues().clone(); _coef_p_values[index] = model._output.pValues().clone(); predNames.remove(pred2Remove); if (catPredNames.contains(pred2Remove)) catPredNames.remove(pred2Remove); else numPredNames.remove(pred2Remove); } } @Override protected Futures remove_impl(Futures fs, boolean cascade) { super.remove_impl(fs, cascade); if (cascade && _output._best_model_ids != null && _output._best_model_ids.length > 0) { for (Key oneModelID : _output._best_model_ids) if (null != oneModelID) Keyed.remove(oneModelID, fs, cascade); // remove model key } return fs; } @Override protected AutoBuffer writeAll_impl(AutoBuffer ab) { if (_output._best_model_ids != null && _output._best_model_ids.length > 0) { for (Key oneModelID : _output._best_model_ids) if (null != oneModelID) ab.putKey(oneModelID); // add GLM model key } return super.writeAll_impl(ab); } @Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { if (_output._best_model_ids != null && _output._best_model_ids.length > 0) { for (Key oneModelID : _output._best_model_ids) { if (null != oneModelID) ab.getKey(oneModelID, fs); // add GLM model key } } return super.readAll_impl(ab, fs); } public HashMap<String, Double>[] coefficients() { return coefficients(false); } public HashMap<String, Double>[] coefficients(boolean standardize) { int numModel = _output._best_model_ids.length; HashMap<String, Double>[] coeffs = new HashMap[numModel]; for (int index=0; index < numModel; index++) { coeffs[index] = coefficients(index+1, standardize); } return coeffs; } public HashMap<String, Double> coefficients(int predictorSize) { return coefficients(predictorSize, false); } public HashMap<String, Double> coefficients(int predictorSize, boolean standardize) { int numModel = _output._best_model_ids.length; if (predictorSize <= 0 || predictorSize > numModel) throw new IllegalArgumentException("predictorSize must be between 1 and maximum size of predictor subset" + " size."); GLMModel oneModel = DKV.getGet(_output._best_model_ids[predictorSize-1]); return oneModel.coefficients(standardize); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/modelselection/ModelSelectionTasks.java
package hex.modelselection; import water.MRTask; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import java.util.stream.LongStream; public class ModelSelectionTasks { public static class SweepFrameParallel extends MRTask<SweepFrameParallel> { final double _oneOPivot; final int[] _trackPivotSweeps; final int _sweepIndex; final double[] _ariCol; // store column of a_ir final int _cpmLen; public SweepFrameParallel(int[] trackPSweeps, int sweepInd, Frame cpm) { _trackPivotSweeps = trackPSweeps; _sweepIndex = sweepInd; _cpmLen = cpm.numCols(); // extract row corresponding to sweeping row Vec sweepVec = cpm.vec(_sweepIndex); _ariCol = LongStream.range(0, _cpmLen).mapToDouble(x -> sweepVec.at(x)).toArray(); _oneOPivot = 1.0 / _ariCol[_sweepIndex]; } public void map(Chunk[] chks) { int chunkNRows = chks[0]._len; int rowOffset = (int) chks[0].start(); int numCols = chks.length; double currEle; for (int rInd = 0; rInd < chunkNRows; rInd++) { int trueRowInd = rInd + rowOffset; for (int cInd = 0; cInd < numCols; cInd++) { currEle = chks[cInd].atd(rInd); if (trueRowInd != _sweepIndex && cInd != _sweepIndex) { // not working on the sweeping row/column currEle = currEle - _ariCol[trueRowInd] * _trackPivotSweeps[cInd] * _trackPivotSweeps[_sweepIndex] * _ariCol[cInd] * _oneOPivot; } else if (trueRowInd == _sweepIndex && cInd == _sweepIndex) { // working on the sweeping element currEle = _oneOPivot; } else if (trueRowInd == _sweepIndex && cInd != _sweepIndex) { // working on sweeping row currEle = _trackPivotSweeps[cInd] * _trackPivotSweeps[_sweepIndex] * _ariCol[cInd] * _oneOPivot; } else if (trueRowInd != _sweepIndex && cInd == _sweepIndex) { // working on sweeping column currEle = -_oneOPivot * _ariCol[trueRowInd]; } chks[cInd].set(rInd, currEle); } } } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/modelselection/ModelSelectionUtils.java
package hex.modelselection; import hex.DataInfo; import hex.Model; import hex.glm.GLM; import hex.glm.GLMModel; import hex.glm.GLMTask; import hex.gram.Gram; import jsr166y.ForkJoinTask; import jsr166y.RecursiveAction; import water.DKV; import water.Key; import water.MemoryManager; import water.fvec.Frame; import water.fvec.Vec; import water.util.ArrayUtils; import java.lang.reflect.Field; import java.util.*; import java.util.stream.Collectors; import java.util.stream.IntStream; import static hex.glm.GLMModel.GLMParameters.Family.gaussian; public class ModelSelectionUtils { public static Frame[] generateTrainingFrames(ModelSelectionModel.ModelSelectionParameters parms, int predNum, String[] predNames, int numModels, String foldColumn) { int maxPredNum = predNames.length; Frame[] trainFrames = new Frame[numModels]; int[] predIndices = IntStream.range(0, predNum).toArray(); // contains indices to predictor names int zeroBound = maxPredNum-predNum; int[] bounds = IntStream.range(zeroBound, maxPredNum).toArray(); // highest combo value for (int frameCount = 0; frameCount < numModels; frameCount++) { // generate one combo trainFrames[frameCount] = generateOneFrame(predIndices, parms, predNames, foldColumn); DKV.put(trainFrames[frameCount]); updatePredIndices(predIndices, bounds); } return trainFrames; } /*** * Given predictor indices stored in currentPredIndices, we need to find the next combination of predictor indices * to use to generate the next combination. For example, if we have 4 predictors and we are looking to take two * predictors, predictor indices can change in the following sequence [0,1]->[0,2]->[0,3]->[1,2]->[1,2]->[2,3]. * * @param currentPredIndices * @param indicesBounds */ public static void updatePredIndices(int[] currentPredIndices, int[] indicesBounds) { int lastPredInd = currentPredIndices.length-1; for (int index = lastPredInd; index >= 0; index--) { if (currentPredIndices[index] < indicesBounds[index]) { // increase LSB first currentPredIndices[index]++; updateLaterIndices(currentPredIndices, index, lastPredInd); break; } } } /*** * Give 5 predictors and say we want the combo of 3 predictors, this function will properly reset the prediction * combination indices say from [0, 1, 4] -> [0, 2, 3] or [0, 3, 4] -> [1, 2, 3]. Given an index that was just * updated, it will update the indices that come later in the list correctly. * * @param currentPredIndices * @param indexUpdated * @param lastPredInd */ public static void updateLaterIndices(int[] currentPredIndices, int indexUpdated, int lastPredInd) { for (int index = indexUpdated; index < lastPredInd; index++) { currentPredIndices[index+1] = currentPredIndices[index]+1; } } /*** * Given a predictor indices set, this function will generate a training frame containing the predictors with * indices in predIndices. * * @param predIndices * @param parms * @param predNames * @return */ public static Frame generateOneFrame(int[] predIndices, Model.Parameters parms, String[] predNames, String foldColumn) { final Frame predVecs = new Frame(Key.make()); final Frame train = parms.train(); boolean usePredIndices = predIndices != null; int numPreds = usePredIndices? predIndices.length : predNames.length; for (int index = 0; index < numPreds; index++) { int predVecNum = usePredIndices ? predIndices[index] : index; predVecs.add(predNames[predVecNum], train.vec(predNames[predVecNum])); } if (parms._weights_column != null) predVecs.add(parms._weights_column, train.vec(parms._weights_column)); if (parms._offset_column != null) predVecs.add(parms._offset_column, train.vec(parms._offset_column)); if (foldColumn != null) predVecs.add(foldColumn, train.vec(foldColumn)); predVecs.add(parms._response_column, train.vec(parms._response_column)); return predVecs; } public static void setBitSet(BitSet predBitSet, int[] currIndices) { for (int predIndex : currIndices) predBitSet.set(predIndex); } /*** * Class to store the CPM produced, predNames and pred2CPMMapping after the removal of redundant predictors * if present */ static class CPMnPredNames { double[][] _cpm; String[] _predNames; String[] _coefNames; int[][] _pred2CPMMapping; public CPMnPredNames(double[][] cpm, String[] predNames, String[] coeffNames, int[][] pred2CPMM) { _cpm = cpm; // cpm with duplicated predictors removed _predNames = predNames; // predictor names with duplicated predictors removed _pred2CPMMapping = pred2CPMM; // mapping of predictors to cpm indices _coefNames = coeffNames; } } public static CPMnPredNames genCPMPredNamesIndex(Key jobKey, DataInfo dinfo, String[] predictorNames, ModelSelectionModel.ModelSelectionParameters parms) { // check if there are redundant predictors ArrayList<Integer> ignoredCols = new ArrayList<>(); GLMTask.GLMIterationTask gtask = genGramCheckDup(jobKey, dinfo, ignoredCols, parms); double[] xTransposey; Gram gram = gtask.getGram(); List<Integer> ignoredFullPredCols = new ArrayList<>(); String[] coefNames = dinfo.coefNames(); // drop unwanted predictors if (ignoredCols.size() > 0) { List<String> ignoredPredNames = new ArrayList<>(); List<String> ignoredCoefNames = new ArrayList<>(); ignoredFullPredCols = findFullDupPred(dinfo, ignoredCols, ignoredPredNames, ignoredCoefNames, predictorNames); coefNames = Arrays.stream(coefNames).filter(x -> !ignoredCoefNames.contains(x)).toArray(String[]::new); predictorNames = Arrays.stream(predictorNames).filter(x -> !ignoredPredNames.contains(x)).toArray(String[]::new); // drop cols from gram and XTY xTransposey = dropIgnoredCols(gtask, ignoredFullPredCols); } else { xTransposey = gtask.getXY(); } return new CPMnPredNames(formCPM(gram, xTransposey, gtask.getYY()), predictorNames, coefNames, mapPredIndex2CPMIndices(dinfo, predictorNames.length, ignoredFullPredCols)); } /** * This method attempts to map all predictors into the corresponding cpm indices that refer to that predictor. * This is complicated by two things: * a. the presence of duplicated predictors that are removed; * b. the presence of enum predictors that will map one predictor to multiple consecutive cpm indices. * Note that ignoredPredInd is at the level of coefficient indexing and not predictor indexing */ public static int[][] mapPredIndex2CPMIndices(DataInfo dinfo, int numPreds, List<Integer> ignoredPredInd) { int[][] pred2CPMMapping = new int[numPreds][]; int offset = 0; int countPreds = 0; for (int index=0; index < dinfo._cats; index++) { // take care of categorical columns int catStartLevel = dinfo._catOffsets[index]; if (!ignoredPredInd.contains(catStartLevel)) { // enum pred not ignored int numLevels = dinfo._catOffsets[index + 1] - dinfo._catOffsets[index]; // number of catLevels pred2CPMMapping[countPreds++] = IntStream.iterate(offset, n -> n + 1).limit(numLevels).toArray(); offset += numLevels; } if (countPreds >= numPreds) break; } int totPreds = dinfo._catOffsets[dinfo._cats]+dinfo._nums; for (int index=dinfo._catOffsets[dinfo._cats]; index < totPreds; index++) { if (countPreds >= numPreds) break; if (!ignoredPredInd.contains(index)) pred2CPMMapping[countPreds++] = new int[]{offset++}; } return pred2CPMMapping; } public static double[][] formCPM(Gram gram, double[] xTransposey, double yy) { int coeffSize = xTransposey.length; int cPMsize = coeffSize+1; double[][] crossProductMatrix = MemoryManager.malloc8d(cPMsize, cPMsize); gram.getXXCPM(crossProductMatrix, false, false); // copy xZTransposex, xTransposey, yy to crossProductMatrix for (int rowIndex=0; rowIndex<coeffSize; rowIndex++) { crossProductMatrix[rowIndex][coeffSize] = xTransposey[rowIndex]; } System.arraycopy(xTransposey, 0, crossProductMatrix[coeffSize], 0, coeffSize); crossProductMatrix[coeffSize][coeffSize] = yy; return crossProductMatrix; } public static double[] dropIgnoredCols(GLMTask.GLMIterationTask gtask, List<Integer> ignoredCols) { Gram gram = gtask.getGram(); int[] droppedCols = ignoredCols.stream().mapToInt(x->x).toArray(); gram.dropCols(droppedCols); return ArrayUtils.removeIds(gtask.getXY(), droppedCols); } /*** * The duplicated columns generated by qr-cholesky is at the level of coefficients. This means for enum predictors * there could be multiple coefficients, one for each level of the enum level. For enum predictors, I will * remove the predictor only if all its columns are duplicated. This make it a little more complicated. The * returned ignored list is at the level of coefficients. */ public static List<Integer> findFullDupPred(DataInfo dinfo, List<Integer> ignoredCols, List<String> ignoredPredNames, List<String> ignoredCoefNames, String[] prednames) { List<Integer> ignoredColsCopy = new ArrayList<>(ignoredCols); List<Integer> fullIgnoredCols = new ArrayList<>(); int[] catOffsets = dinfo._catOffsets; String[] allCoefNames = dinfo.coefNames(); if (dinfo._cats > 0) { // there are enum columns in dataset int catOffsetsLen = catOffsets.length; for (int index = 1; index < catOffsetsLen; index++) { final int counter = index; List<Integer> discarded = ignoredColsCopy.stream().filter(x -> x < catOffsets[counter]).collect(Collectors.toList()); if ((discarded != null) && (discarded.size() == (catOffsets[index]-catOffsets[index-1]))) { // full enum predictors found in ignored columns fullIgnoredCols.addAll(discarded); ignoredPredNames.add(prednames[index-1]); ignoredCoefNames.addAll(discarded.stream().map(x -> allCoefNames[x]).collect(Collectors.toList())); ; } if (discarded != null && discarded.size() > 0) ignoredColsCopy.removeAll(discarded); } } if (ignoredColsCopy != null && ignoredColsCopy.size()>0) { int offsetNum = dinfo._numOffsets[0]-dinfo._cats; ignoredPredNames.addAll(ignoredColsCopy.stream().map(x -> prednames[x-offsetNum]).collect(Collectors.toList())); ignoredCoefNames.addAll(ignoredColsCopy.stream().map(x -> allCoefNames[x]).collect(Collectors.toList())); fullIgnoredCols.addAll(ignoredColsCopy); // add all remaining numerical ignored predictors columns } return fullIgnoredCols; } public static GLMTask.GLMIterationTask genGramCheckDup(Key jobKey, DataInfo dinfo, ArrayList<Integer> ignoredCols, ModelSelectionModel.ModelSelectionParameters parms) { double[] beta = new double[dinfo.coefNames().length]; beta = Arrays.stream(beta).map(x -> 1.0).toArray(); // set coefficient to all 1 GLMTask.GLMIterationTask gtask = new GLMTask.GLMIterationTask(jobKey, dinfo, new GLMModel.GLMWeightsFun(gaussian, GLMModel.GLMParameters.Link.identity, 1, 0.1, 0.1, 1, false), beta).doAll(dinfo._adaptedFrame); Gram gram = gtask.getGram(); Gram.Cholesky chol = gram.qrCholesky(ignoredCols, parms._standardize); if (!chol.isSPD()) throw new Gram.NonSPDMatrixException(); return gtask; } public static double calR2Scale(Frame train, String resp) { Vec respV = train.vec(resp); double sigma = respV.sigma(); double var = sigma*sigma; long nobs = train.numRows()-respV.naCnt()-1; return nobs*var; } static class CoeffNormalization { double[] _sigmaOrOneOSigma; // only for the numerical predictors double[] _meanOverSigma; boolean _standardize; public CoeffNormalization(double[] oOSigma, double[] mOSigma, boolean standardize) { _sigmaOrOneOSigma = oOSigma; _meanOverSigma = mOSigma; _standardize = standardize; } } static CoeffNormalization generateScale(DataInfo dinfo, boolean standardize) { int numCols = dinfo._nums; double[] sigmaOrOneOverSigma = new double[numCols]; double[] mOverSigma = new double[numCols]; for (int index = 0; index < numCols; index++) { if (standardize) { sigmaOrOneOverSigma[index] = dinfo._normMul[index]; // 1/sigma mOverSigma[index] = dinfo._numMeans[index] * dinfo._normMul[index]; } else { sigmaOrOneOverSigma[index] = dinfo._normSigmaStandardizationOff[index]; // sigma mOverSigma[index] = dinfo._numMeans[index] / dinfo._normSigmaStandardizationOff[index]; } } return new CoeffNormalization(sigmaOrOneOverSigma, mOverSigma, standardize); } /**double * @param predictorNames * @param foldColumn * @param currSubsetIndices * @param validSubsets Lists containing only valid predictor indices to choose from * @return */ public static Frame[] generateMaxRTrainingFrames(ModelSelectionModel.ModelSelectionParameters parms, String[] predictorNames, String foldColumn, List<Integer> currSubsetIndices, int newPredPos, List<Integer> validSubsets, Set<BitSet> usedCombo) { List<Frame> trainFramesList = new ArrayList<>(); List<Integer> changedSubset = new ArrayList<>(currSubsetIndices); changedSubset.add(newPredPos, -1); // value irrelevant int[] predIndices = changedSubset.stream().mapToInt(Integer::intValue).toArray(); int predNum = predictorNames.length; BitSet tempIndices = new BitSet(predNum); int predSizes = changedSubset.size(); boolean emptyUsedCombo = (usedCombo != null) && (usedCombo.size() == 0); for (int predIndex : validSubsets) { // consider valid predictor indices only predIndices[newPredPos] = predIndex; if (emptyUsedCombo && predSizes > 1) { // add all indices set into usedCombo tempIndices.clear(); setBitSet(tempIndices, predIndices); usedCombo.add((BitSet) tempIndices.clone()); Frame trainFrame = generateOneFrame(predIndices, parms, predictorNames, foldColumn); DKV.put(trainFrame); trainFramesList.add(trainFrame); } else if (usedCombo != null && predSizes > 1) { // only need to check for forward and replacement step for maxR tempIndices.clear(); setBitSet(tempIndices, predIndices); if (usedCombo.add((BitSet) tempIndices.clone())) { // returns true if not in keyset Frame trainFrame = generateOneFrame(predIndices, parms, predictorNames, foldColumn); DKV.put(trainFrame); trainFramesList.add(trainFrame); } } else { // just build without checking duplicates for other modes Frame trainFrame = generateOneFrame(predIndices, parms, predictorNames, foldColumn); DKV.put(trainFrame); trainFramesList.add(trainFrame); } } return trainFramesList.stream().toArray(Frame[]::new); } /*** * Given the original predictor subset, this function will go into a for loop and choose one predictor out of the * remaining predictor set validSubsets and put it into the array allPreds. Then, it will spin off a process to * calculation the error variance with the addition of the new predictor. All error variances will be stored in * an array and returned for further processing. This is very similar to generateAllErrVar but they are not the * same. For starters, the replaced predictors are included in the predictor subset allPreds. For details, refer to * https://github.com/h2oai/h2o-3/issues/6538, section VI. */ public static double[] generateAllErrVarR(final double[][] allCPM, final Frame allCPMFrame, double[][] prevCPM, int predPos, List<Integer> currSubsetIndices, List<Integer> validSubsets, Set<BitSet> usedCombo, BitSet tempIndices, final int[][] pred2CPMIndices, final boolean hasIntercept, int[] removedPredSweepInd, SweepVector[][] removedPredSV) { int[] allPreds = new int[currSubsetIndices.size() + 1]; // store the bigger predictor subset int lastPredInd = allPreds.length - 1; System.arraycopy(currSubsetIndices.stream().mapToInt(Integer::intValue).toArray(), 0, allPreds, 0, allPreds.length - 1); int maxModelCount = validSubsets.size(); RecursiveAction[] resA = new RecursiveAction[maxModelCount]; final double[] subsetMSE = new double[maxModelCount]; Arrays.fill(subsetMSE, Double.MAX_VALUE); int modelCount = 0; int[] oneLessSub = new int[lastPredInd]; List<Integer> oneLessSubset = new ArrayList<>(currSubsetIndices); oneLessSubset.remove(predPos); System.arraycopy(oneLessSubset.stream().mapToInt(Integer::intValue).toArray(), 0, oneLessSub, 0, oneLessSub.length - 1); int oneLessSubInd = lastPredInd-1; for (int predIndex : validSubsets) { // consider valid predictor indices only allPreds[lastPredInd] = predIndex; oneLessSub[oneLessSubInd] = predIndex; tempIndices.clear(); setBitSet(tempIndices, oneLessSub); if (usedCombo.add((BitSet) tempIndices.clone())) { final int resCount = modelCount++; genMSE4MorePredsR(pred2CPMIndices, allCPM, allCPMFrame, prevCPM, allPreds, subsetMSE, resA, resCount, hasIntercept, removedPredSV, removedPredSweepInd); } } ForkJoinTask.invokeAll(Arrays.stream(resA).filter(Objects::nonNull).toArray(RecursiveAction[]::new)); return subsetMSE; } /*** * Generate the error variance for one predictor subset setting in allPreds. It will do the following: * 1. add rows/columns corresponding to new predictor and store the partial CPM in subsetCPM; * 2. sweep the new rows/columns from 1 with the sweep vectors generated for the removed predictor and store * everything in subsetCPM; * 3. sweep the subsetCPM with rows/columns associated with the new predictor; * 4. record the new error variance. * * For details, refer to https://github.com/h2oai/h2o-3/issues/6538, section VI. */ public static void genMSE4MorePredsR(final int[][] pred2CPMIndices, final double[][] allCPM, final Frame allCPMFrame, double[][] prevCPM, final int[] allPreds, final double[] subsetMSE, RecursiveAction[] resA, final int resCount, final boolean hasIntercept, SweepVector[][] removePredSV, int[] removedPredSweepInd) { final int[] subsetIndices = allPreds.clone(); resA[resCount] = new RecursiveAction() { @Override protected void compute() { double[][] subsetCPM = addNewPred2CPM(allCPM, allCPMFrame, prevCPM, subsetIndices, pred2CPMIndices, hasIntercept); // new pred added but swept with removed predictor // swept just new pred with sweep vector of removed pred to undo its effect int newPredInd = subsetIndices[subsetIndices.length - 1]; int newPredCPMLength = pred2CPMIndices[newPredInd].length; int lastSweepIndex = prevCPM.length-1; if (newPredCPMLength == 1) { applySweepVectors2NewPred(removePredSV, subsetCPM, newPredCPMLength, removedPredSweepInd); } else { SweepVector[][] newSV = mapBasicVector2Multiple(removePredSV, newPredCPMLength); applySweepVectors2NewPred(newSV, subsetCPM, newPredCPMLength, removedPredSweepInd); } // sweep subsetCPM with newly added predictor int[] newPredSweepInd = IntStream.range(0,newPredCPMLength).map(x -> x+lastSweepIndex).toArray(); sweepCPM(subsetCPM, newPredSweepInd, false); // only apply the sweepIndices to only the last element of the CPM int lastInd = subsetCPM.length-1; subsetMSE[resCount] = subsetCPM[lastInd][lastInd]; } }; } /*** * Given the original predictor subset, this function will go into a for loop and choose one predictor out of the * remaining predictor set validSubsets and put it into the array allPreds. Then, it will spin off a process to * calculation the error variance with the addition of the new predictor. All error variances will be stored in * an array and returned for further processing. For details, refer to * https://github.com/h2oai/h2o-3/issues/6538, section V. */ public static double[] generateAllErrVar(final double[][] allCPM, Frame allCPMFrame, int prevCPMSize, List<Integer> currSubsetIndices, List<Integer> validSubsets, Set<BitSet> usedCombo, BitSet tempIndices, final int[][] pred2CPMIndices, final boolean hasIntercept) { int[] allPreds = new int[currSubsetIndices.size() + 1]; // store the bigger predictor subset int lastPredInd = allPreds.length - 1; if (currSubsetIndices.size() > 0) // copy over last best predictor subset with smaller subset size System.arraycopy(currSubsetIndices.stream().mapToInt(Integer::intValue).toArray(), 0, allPreds, 0, allPreds.length - 1); int predSizes = allPreds.length; int maxModelCount = validSubsets.size(); RecursiveAction[] resA = new RecursiveAction[maxModelCount]; final double[] subsetMSE = Arrays.stream(new double[maxModelCount]).map(x -> Double.MAX_VALUE).toArray(); int modelCount = 0; for (int predIndex : validSubsets) { // consider valid predictor indices only allPreds[lastPredInd] = predIndex; if (predSizes > 1) { tempIndices.clear(); setBitSet(tempIndices, allPreds); if (usedCombo.add((BitSet) tempIndices.clone())) { final int resCount = modelCount++; genMSE4MorePreds(pred2CPMIndices, allCPM, allCPMFrame, allPreds, prevCPMSize, subsetMSE, resA, resCount, hasIntercept); } } else { // start from first predictor final int resCount = modelCount++; genMSE1stPred(pred2CPMIndices, allCPM, allCPMFrame, allPreds, subsetMSE, resA, resCount, hasIntercept); } } ForkJoinTask.invokeAll(Arrays.stream(resA).filter(Objects::nonNull).toArray(RecursiveAction[]::new)); return subsetMSE; } /*** * This method will calculate the error variance value for all predictors in the allPreds. For details, * refer to https://github.com/h2oai/h2o-3/issues/6538, section V. */ public static void genMSE4MorePreds(final int[][] pred2CPMIndices, final double[][] allCPM, final Frame allCPMFrame, final int[] allPreds, int lastSweepIndex, final double[] subsetMSE, RecursiveAction[] resA, final int resCount, final boolean hasIntercept) { final int[] subsetIndices = allPreds.clone(); resA[resCount] = new RecursiveAction() { @Override protected void compute() { boolean multinodeMode = allCPM == null && allCPMFrame != null; double[][] subsetCPM = multinodeMode ? extractPredSubsetsCPMFrame(allCPMFrame, subsetIndices, pred2CPMIndices, hasIntercept) : extractPredSubsetsCPM(allCPM, subsetIndices, pred2CPMIndices, hasIntercept); int lastPredInd = subsetIndices[subsetIndices.length - 1]; int newPredCPMLength = pred2CPMIndices[lastPredInd].length; int[] sweepIndices = IntStream.range(0,newPredCPMLength).map(x -> x+lastSweepIndex).toArray(); sweepCPM(subsetCPM, sweepIndices, false); // only apply the sweepIndices to only the last element of the CPM int lastInd = subsetCPM.length-1; subsetMSE[resCount] = subsetCPM[lastInd][lastInd]; } }; } /*** * This function performs sweeping on the last row and column only to update the variance error to reduce * computation time. */ public static double sweepMSE(double[][] subsetCPM, List<Integer> sweepIndices) { int sweepLen = sweepIndices.size(); int cpmLen = subsetCPM.length; int lastInd = cpmLen-1; if (sweepLen == 1) { // quick stop for one sweep only int sweepInd = sweepIndices.get(0); return subsetCPM[lastInd][lastInd] -subsetCPM[lastInd][sweepInd]*subsetCPM[sweepInd][lastInd]/subsetCPM[sweepInd][sweepInd]; } Set<SweepElement>[] sweepElements = new Set[sweepLen]; List<SweepElement> tempElements = new ArrayList<>(); tempElements.add(new SweepElement(lastInd, lastInd, new ArrayList<>(sweepIndices))); while (tempElements.size() > 0) { SweepElement oneEle = tempElements.remove(0); if (oneEle._sweepIndices.size() == 1) { if (sweepElements[0] == null) sweepElements[0] = new HashSet<>(); sweepElements[0].add(oneEle); } else { // sweepIndices size > 1 int arrIndex = oneEle._sweepIndices.size() - 1; if (sweepElements[arrIndex] == null) sweepElements[arrIndex] = new HashSet<>(); sweepElements[arrIndex].add(oneEle); process(oneEle, tempElements); } } sweepCPMElements(sweepElements, subsetCPM); return subsetCPM[lastInd][lastInd]; } public static void sweepCPMElements(Set<SweepElement>[] sweepElements, double[][] subsetCPM) { int numSweeps = sweepElements.length; int row, col, oneIndex; for (int index = 0; index < numSweeps; index++) { Set<SweepElement> oneSweepAction = sweepElements[index]; for (SweepElement oneElement : oneSweepAction) { oneIndex = oneElement._sweepIndices.get(oneElement._sweepIndices.size() - 1); row = oneElement._row; col = oneElement._col; subsetCPM[row][col] = subsetCPM[row][col] - subsetCPM[row][oneIndex] * subsetCPM[oneIndex][col] / subsetCPM[oneIndex][oneIndex]; } } } /*** * This method will generate all the elements that are needed to perform sweeping on the currEle. The * formula for sweeping is: * subsetCPM[row][col] = subsetCPM[row][col]-subsetCPM[row][sweepInd]*subsetCPM[sweepInd][row]/subsetCPM[sweepInd][sweepInd] * * We are not performing the actual sweeping here but rather to remember the elements of subsetCPM that we need * to perform sweeping on. From the formula, each currEle will generate three more new SweepElements */ public static void process(SweepElement currEle, List<SweepElement> tempList) { List<Integer> newSweepIndices = new ArrayList<>(currEle._sweepIndices); int sweepIndex = newSweepIndices.remove(newSweepIndices.size()-1); // remove last sweepIndices tempList.add(new SweepElement(currEle._row, currEle._col, newSweepIndices)); tempList.add(new SweepElement(currEle._row, sweepIndex, newSweepIndices)); tempList.add(new SweepElement(sweepIndex, currEle._col, newSweepIndices)); tempList.add(new SweepElement(sweepIndex, sweepIndex, newSweepIndices)); } static class SweepElement { final int _row; final int _col; final List<Integer> _sweepIndices; public SweepElement(int row, int col, List<Integer> sweepInd) { _row = row; _col = col; _sweepIndices = sweepInd; } @Override public boolean equals(Object o) { if (o instanceof SweepElement) { if (_row == ((SweepElement) o)._row && _col == ((SweepElement) o)._col) { if (_sweepIndices.equals(((SweepElement) o)._sweepIndices)) return true; } } return false; } @Override public int hashCode() { return _row+(_col+1)*10+_sweepIndices.hashCode(); } } static class CPMElement { final int _row; final int _col; public CPMElement(int row, int col) { _row = row; _col = col; } @Override public boolean equals(Object o) { if (o instanceof CPMElement) { return (_row == ((CPMElement) o)._row && _col == ((CPMElement) o)._col); } return false; } @Override public int hashCode() { Integer rowCol = _row+(_col+1)*10; return rowCol.hashCode(); } } /*** * This method will calculate the variance variance when only one predictor is considered in allPreds. For details, * refer to https://github.com/h2oai/h2o-3/issues/6538, section V. */ public static void genMSE1stPred(final int[][] pred2CPMIndices, final double[][] allCPM, final Frame allCPMFrame, final int[] allPreds, final double[] subsetMSE, RecursiveAction[] resA, final int resCount, final boolean hasIntercept) { final int[] subsetIndices = allPreds.clone(); resA[resCount] = new RecursiveAction() { @Override protected void compute() { // generate CPM corresponding to the subset indices in subsetIndices boolean multinodeMode = allCPM == null && allCPMFrame != null; double[][] subsetCPM = multinodeMode ? extractPredSubsetsCPMFrame(allCPMFrame, subsetIndices, pred2CPMIndices, hasIntercept) : extractPredSubsetsCPM(allCPM, subsetIndices, pred2CPMIndices, hasIntercept); int lastSubsetIndex = subsetCPM.length-1; // perform sweeping action and record the sweeping vector and save the changed cpm subsetMSE[resCount] = sweepMSE(subsetCPM, IntStream.range(1, lastSubsetIndex).boxed().collect(Collectors.toList())); } }; } /*** * When multiple rows/columns are added to the CPM due to the new predictor being categorical, we need to map the * old sweep vector arrays to new bigger sweep vector arrays. See section V.II.V of doc. */ public static SweepVector[][] mapBasicVector2Multiple(SweepVector[][] sweepVec, int newPredCPMLen) { int numSweep = sweepVec.length; int oldColLen = sweepVec[0].length/2; int newColLen = oldColLen+newPredCPMLen-1; // sweepVector from old CPM was calculated when one new row/col is added int lastNewColInd = newColLen-1; int lastOldColInd = oldColLen-1; SweepVector[][] newSweepVec = new SweepVector[numSweep][newColLen*2]; for (int sInd = 0; sInd < numSweep; sInd++) { double oneOverPivot = sweepVec[sInd][lastOldColInd-1]._value; int rowColInd = sweepVec[sInd][0]._column; for (int vInd = 0; vInd < lastNewColInd; vInd++) { if (vInd < lastOldColInd) { // index within old sweep vector range newSweepVec[sInd][vInd] = new SweepVector(vInd, rowColInd, sweepVec[sInd][vInd]._value); newSweepVec[sInd][vInd+newColLen] = new SweepVector(rowColInd, vInd, sweepVec[sInd][vInd+oldColLen]._value); } else if (vInd == lastOldColInd) { // last sweep index newSweepVec[sInd][lastNewColInd] = new SweepVector(lastNewColInd, rowColInd, sweepVec[sInd][lastOldColInd]._value); newSweepVec[sInd][lastNewColInd + newColLen] = new SweepVector(rowColInd, lastNewColInd, sweepVec[sInd][lastOldColInd + oldColLen]._value); newSweepVec[sInd][vInd] = new SweepVector(vInd, rowColInd, oneOverPivot); newSweepVec[sInd][vInd+newColLen] = new SweepVector(rowColInd, vInd, oneOverPivot); } else { // new sweep vector index exceed old sweep vector index newSweepVec[sInd][vInd] = new SweepVector(vInd, rowColInd, oneOverPivot); newSweepVec[sInd][vInd+newColLen] = new SweepVector(rowColInd, vInd, oneOverPivot); } } } return newSweepVec; } /*** * This method will sweep the rows/columns added to the CPM due to the addition of the new predictor using sweep * vector arrays. See Step 3 of section V.II.IV of doc. The sweep vectors should contain sweeping for predictor * 0, for predictor 2, .... predictor s of the predictor subset. */ public static void applySweepVectors2NewPred(SweepVector[][] sweepVec, double[][] subsetCPM, int numNewRows, int[] sweepMat) { int numSweep = sweepVec.length; // number of sweeps that we need to do if (sweepMat == null) { for (int sweepInd=0; sweepInd < numSweep; sweepInd++) { oneSweepWSweepVector(sweepVec[sweepInd], subsetCPM, sweepInd, numNewRows); } } else { int sweepInd; for (int index = 0; index < numSweep; index++) { sweepInd = sweepMat[index]; oneSweepWSweepVector(sweepVec[index], subsetCPM, sweepInd, numNewRows); } } } /*** * This method perform just one sweep of the sweeping action described in Step 3 of section V.II.IV of doc. * Note that for a sweep vector array of 2*(N+1), the first N+1 elements describe the changes to the new column. * The last N+1 elements describing changes to the new row. The sweep vector arrays will contain changes to * the same element multiple times. I only change each new element once by using elementAccessCount to keep track * of which elements have been changed already. See Step 3 of section V.II.IV of doc for details. * * In addition, I did not use two arrays to implement the changes. Hence, the order of change is important here. * I used temporary variable to store element changes that are used by other element updates. I copied the * temporary elements back to the CPM at the end. */ public static void oneSweepWSweepVector(SweepVector[] sweepVec, double[][] subsetCPM, int sweepIndex, int colRowsAdded) { int sweepVecLen = sweepVec.length / 2; int newLastCPMInd = sweepVecLen - 1; int oldSweepVec = sweepVecLen - colRowsAdded; int oldLastCPMInd = oldSweepVec - 1; // sweeping index before adding new rows/columns double[] colSweeps = new double[colRowsAdded]; double[] rowSweeps = new double[colRowsAdded]; Set<CPMElement> trackSweep = new HashSet<>(); CPMElement oneEle; for (int rcInd = 0; rcInd < colRowsAdded; rcInd++) { // for each newly added row/column int rowColInd = sweepVec[0]._column + rcInd; for (int svInd = 0; svInd < sweepVecLen; svInd++) { // working on each additional row/col int svIndOffset = svInd + sweepVecLen; if (sweepVec[svInd]._row == sweepIndex) { // take care of both row and column elements at sweepIndex rowSweeps[rcInd] = sweepVec[svInd]._value * subsetCPM[sweepIndex][rowColInd]; colSweeps[rcInd] = sweepVec[svIndOffset]._value * subsetCPM[rowColInd][sweepIndex]; } else if (sweepVec[svInd]._row == newLastCPMInd) { oneEle = new CPMElement(newLastCPMInd, rowColInd); if (!trackSweep.contains(oneEle)) { trackSweep.add(oneEle); subsetCPM[newLastCPMInd][rowColInd] = subsetCPM[newLastCPMInd][rowColInd] - sweepVec[svInd]._value * subsetCPM[sweepIndex][rowColInd]; } oneEle = new CPMElement(rowColInd, newLastCPMInd); if (!trackSweep.contains(oneEle)) { trackSweep.add(oneEle); subsetCPM[rowColInd][newLastCPMInd] = subsetCPM[rowColInd][newLastCPMInd] - sweepVec[svIndOffset]._value * subsetCPM[rowColInd][sweepIndex]; } } else if (sweepVec[svInd]._row == rowColInd) { oneEle = new CPMElement(rowColInd, rowColInd); if (!trackSweep.contains(oneEle)) { subsetCPM[rowColInd][rowColInd] = subsetCPM[rowColInd][rowColInd] - subsetCPM[rowColInd][sweepIndex] * subsetCPM[sweepIndex][rowColInd] * sweepVec[svInd]._value; trackSweep.add(oneEle); } } else if (sweepVec[svInd]._row < oldLastCPMInd) { oneEle = new CPMElement(sweepVec[svInd]._row, rowColInd); if (!trackSweep.contains(oneEle)) { subsetCPM[sweepVec[svInd]._row][rowColInd] = subsetCPM[sweepVec[svInd]._row][rowColInd] - subsetCPM[sweepIndex][rowColInd] * sweepVec[svInd]._value; trackSweep.add(oneEle); } oneEle = new CPMElement(rowColInd, sweepVec[svIndOffset]._column); if (!trackSweep.contains(oneEle)) { trackSweep.add(oneEle); subsetCPM[rowColInd][sweepVec[svIndOffset]._column] = subsetCPM[rowColInd][sweepVec[svIndOffset]._column] - subsetCPM[rowColInd][sweepIndex] * sweepVec[svIndOffset]._value; } } else { // considering rows/columns >= oldSweepVec oneEle = new CPMElement(sweepVec[svInd]._row, rowColInd); if (!trackSweep.contains(oneEle)) { trackSweep.add(oneEle); subsetCPM[sweepVec[svInd]._row][rowColInd] = subsetCPM[sweepVec[svInd]._row][rowColInd] - subsetCPM[sweepVec[svInd]._row][sweepIndex] * subsetCPM[sweepIndex][rowColInd] * sweepVec[svInd]._value; } oneEle = new CPMElement(rowColInd, sweepVec[svIndOffset]._column); if (!trackSweep.contains(oneEle)) { trackSweep.add(oneEle); subsetCPM[rowColInd][sweepVec[svIndOffset]._column] = subsetCPM[rowColInd][sweepVec[svIndOffset]._column] - subsetCPM[rowColInd][sweepIndex] * subsetCPM[sweepIndex][sweepVec[svIndOffset]._column] * sweepVec[svIndOffset]._value; } } } } // take care of updating elements that are not updated for (int rcInd = 0; rcInd < colRowsAdded; rcInd++) { int rowColInd = sweepVec[0]._column + rcInd; subsetCPM[sweepIndex][rowColInd] = rowSweeps[rcInd]; subsetCPM[rowColInd][sweepIndex] = colSweeps[rcInd]; } } /** * Given current CPM which has been swept already, we need to add the lastest predictor to the current CPM that have * not been swept. The new elements belonging to the newest predictor is extracted from the original allCPM. * * Basically, I just extract the CPM from the original non-swept CPM that contains rows/columns of CPM * corresponding to the predictors in subsetPredIndex. Next, I copied over the swept rows/columns of CPM * corresponding to the previous predictor subset while leaving the rows/columns corresponding to the new rows/ * columns due to the new predictor unchanged. * * See Step 2 in section V.II.IV of doc. */ public static double[][] addNewPred2CPM(double[][] allCPM, Frame allCPMFrame, double[][] currentCPM, int[] subsetPredIndex, int[][] pred2CPMIndices, boolean hasIntercept) { boolean multinodeMode = allCPM == null && allCPMFrame != null; double[][] newCPM = multinodeMode ? extractPredSubsetsCPMFrame(allCPMFrame, subsetPredIndex, pred2CPMIndices, hasIntercept) : extractPredSubsetsCPM(allCPM, subsetPredIndex, pred2CPMIndices, hasIntercept); int oldCPMDim = currentCPM.length-1; // XTX dimension int newCPMDim = newCPM.length; int lastnewCPMInd = newCPMDim-1; for (int index=0; index<oldCPMDim; index++) { // copy over the swept CPM elements of smaller predictor subset System.arraycopy(currentCPM[index], 0, newCPM[index], 0, oldCPMDim);// copy over old cpm newCPM[index][lastnewCPMInd] = currentCPM[index][oldCPMDim]; // copy over the last column of CPM } // correct last row of newCPM to be part of last row of currentCPM System.arraycopy(currentCPM[oldCPMDim], 0, newCPM[lastnewCPMInd], 0, oldCPMDim); newCPM[lastnewCPMInd][lastnewCPMInd] = currentCPM[oldCPMDim][oldCPMDim]; // copy over corner element return newCPM; } /** * Given predRemoved (the predictor that is to be removed and replaced in the forward step), this method will * calculate the locations of the CPM rows/columns associated with it. CurrSubsetIndices contains the original * predictor subsets. */ public static int[] extractSweepIndices(List<Integer> currSubsetIndices, int predPos, int predRemoved, int[][] predInd2CPMIndices, boolean hasIntercept) { int predRemovedLen = predInd2CPMIndices[predRemoved].length; int totalSize = IntStream.range(0, predPos).map(x->predInd2CPMIndices[currSubsetIndices.get(x)].length).sum() + (hasIntercept ? 1 : 0); return IntStream.range(0, predRemovedLen).map(x -> x+totalSize).toArray(); } public static List<Integer> extractCPMIndexFromPred(int cpmLastIndex, int[][] pred2CPMIndices, int[] newPredList, boolean hasIntercept) { List<Integer> CPMIndices = extractCPMIndexFromPredOnly(pred2CPMIndices, newPredList); if (hasIntercept) CPMIndices.add(0, cpmLastIndex-1);; CPMIndices.add(cpmLastIndex); return CPMIndices; } /*** * Given the predictor in subset newPredList, this function will find the rows/columns in the cpm matrix that * are contributed by the predictors in subset newPredList. */ public static List<Integer> extractCPMIndexFromPredOnly(int[][] pred2CPMIndices, int[] newPredList) { List<Integer> CPMIndices = new ArrayList<>(); for (int predInd : newPredList) { CPMIndices.addAll(Arrays.stream(pred2CPMIndices[predInd]).boxed().collect(Collectors.toList())); } return CPMIndices; } /*** * This method perform the sweeping action described in section II of doc. In addition, if genSweepVector is set * to true, it will also generate the corresponding sweep vector arrays described in section V.II of doc. */ public static SweepVector[][] sweepCPM(double[][] subsetCPM, int[] sweepIndices, boolean genSweepVector) { int currSubsetCPMSize = subsetCPM.length; int numSweep = sweepIndices.length; SweepVector[][] sweepVecs = new SweepVector[numSweep][2*(currSubsetCPMSize+1)]; for (int index=0; index < numSweep; index++) performOneSweep(subsetCPM, sweepVecs[index], sweepIndices[index], genSweepVector); return sweepVecs; } public static void sweepCPMParallel(Frame cpm, int[] sweepIndices, int[] trackPivotSweeps) { int numSweep = sweepIndices.length; for (int index=0; index < numSweep; index++) { new ModelSelectionTasks.SweepFrameParallel(trackPivotSweeps, sweepIndices[index], cpm).doAll(cpm); DKV.put(cpm); trackPivotSweeps[sweepIndices[index]] *= -1; } } /** * store information on sweeping actions that are to be performed to new rows/columns added to CPM due to the * addition of new predcitors. */ public static class SweepVector { int _row; int _column; double _value; public SweepVector(int rIndex, int cIndex, double val) { _row = rIndex; _column = cIndex; _value = val; } } /*** * Perform one sweep according to section II of doc and generate sweep vector according to section V.II of doc. */ public static void performOneSweep(double[][] subsetCPM, SweepVector[] sweepVec, int sweepIndex, boolean genSweepVector) { int subsetCPMLen = subsetCPM.length; int lastSubsetInd = subsetCPMLen-1; if (subsetCPM[sweepIndex][sweepIndex]==0) { // pivot is zero, set error variance to max value subsetCPM[lastSubsetInd][lastSubsetInd] = Double.MAX_VALUE; return; } else { // subsetCPM is healthy double oneOverPivot = 1.0/subsetCPM[sweepIndex][sweepIndex]; // generate sweep vector as in section V.II of doc if (genSweepVector) { int sweepVecLen = sweepVec.length / 2; for (int index = 0; index < sweepVecLen; index++) { if (index == sweepIndex) { sweepVec[index] = new SweepVector(index, lastSubsetInd, oneOverPivot); sweepVec[index + sweepVecLen] = new SweepVector(lastSubsetInd, index, -oneOverPivot); } else if (index == subsetCPMLen) { sweepVec[index] = new SweepVector(index, lastSubsetInd, subsetCPM[lastSubsetInd][sweepIndex] * oneOverPivot); sweepVec[index + sweepVecLen] = new SweepVector(lastSubsetInd, index, subsetCPM[sweepIndex][lastSubsetInd] * oneOverPivot); } else if (index==lastSubsetInd) { sweepVec[index] = new SweepVector(index, lastSubsetInd, oneOverPivot); sweepVec[index+sweepVecLen] = new SweepVector(lastSubsetInd, index, oneOverPivot); } else { sweepVec[index] = new SweepVector(index, lastSubsetInd, subsetCPM[index][sweepIndex] * oneOverPivot); sweepVec[index + sweepVecLen] = new SweepVector(lastSubsetInd, index, subsetCPM[sweepIndex][index] * oneOverPivot); } } } // perform sweeping action as in section II of doc. for (int rInd = 0; rInd < subsetCPMLen; rInd++) { for (int cInd = rInd; cInd < subsetCPMLen; cInd++) { if (rInd != sweepIndex && cInd != sweepIndex) { subsetCPM[rInd][cInd] = subsetCPM[rInd][cInd]- subsetCPM[rInd][sweepIndex]*subsetCPM[sweepIndex][cInd]*oneOverPivot; if (cInd != rInd) subsetCPM[cInd][rInd] = subsetCPM[cInd][rInd]- subsetCPM[cInd][sweepIndex]*subsetCPM[sweepIndex][rInd]*oneOverPivot; } } } for (int index=0; index < subsetCPMLen; index++) { subsetCPM[index][sweepIndex] = -subsetCPM[index][sweepIndex]*oneOverPivot; if (sweepIndex != index) subsetCPM[sweepIndex][index] = subsetCPM[sweepIndex][index]*oneOverPivot; } subsetCPM[sweepIndex][sweepIndex] = oneOverPivot; } } public static String[][] shrinkStringArray(String[][] array, int numModels) { int offset = array.length - numModels; String[][] newArray =new String[numModels][]; for (int index=0; index < numModels; index++) newArray[index] = array[offset+index].clone(); return newArray; } public static double[][] shrinkDoubleArray(double[][] array, int numModels) { int offset = array.length-numModels; double[][] newArray =new double[numModels][]; for (int index=0; index < numModels; index++) newArray[index] = array[offset+index].clone(); return newArray; } public static Key[] shrinkKeyArray(Key[] array, int numModels) { int arrLen = array.length; Key[] newArray = new Key[numModels]; System.arraycopy(array, (arrLen-numModels), newArray, 0, numModels); return newArray; } public static String joinDouble(double[] val) { int arrLen = val.length; // skip the intercept terms String[] strVal = new String[arrLen]; for (int index=0; index < arrLen; index++) strVal[index] = Double.toString(val[index]); return String.join(", ", strVal); } public static GLMModel.GLMParameters[] generateGLMParameters(Frame[] trainingFrames, ModelSelectionModel.ModelSelectionParameters parms, int nfolds, String foldColumn, Model.Parameters.FoldAssignmentScheme foldAssignment) { final int numModels = trainingFrames.length; GLMModel.GLMParameters[] params = new GLMModel.GLMParameters[numModels]; final Field[] field1 = ModelSelectionModel.ModelSelectionParameters.class.getDeclaredFields(); final Field[] field2 = Model.Parameters.class.getDeclaredFields(); for (int index = 0; index < numModels; index++) { params[index] = new GLMModel.GLMParameters(); setParamField(parms, params[index], false, field1, Collections.emptyList()); setParamField(parms, params[index], true, field2, Collections.emptyList()); params[index]._train = trainingFrames[index]._key; params[index]._nfolds = nfolds; params[index]._fold_column = foldColumn; params[index]._fold_assignment = foldAssignment; } return params; } public static void setParamField(Model.Parameters params, GLMModel.GLMParameters glmParam, boolean superClassParams, Field[] paramFields, List<String> excludeList) { // assign relevant GAMParameter fields to GLMParameter fields Field glmField; boolean emptyExcludeList = excludeList.size() == 0; for (Field oneField : paramFields) { try { if (emptyExcludeList || !excludeList.contains(oneField.getName())) { if (superClassParams) glmField = glmParam.getClass().getSuperclass().getDeclaredField(oneField.getName()); else glmField = glmParam.getClass().getDeclaredField(oneField.getName()); glmField.set(glmParam, oneField.get(params)); } } catch (IllegalAccessException|NoSuchFieldException e) { // suppress error printing, only cares about fields that are accessible ; } } } public static GLM[] buildGLMBuilders(GLMModel.GLMParameters[] trainingParams) { int numModels = trainingParams.length; GLM[] builders = new GLM[numModels]; for (int index=0; index<numModels; index++) builders[index] = new GLM(trainingParams[index]); return builders; } public static void removeTrainingFrames(Frame[] trainingFrames) { for (Frame oneFrame : trainingFrames) DKV.remove(oneFrame._key); } /** * Given GLM run results of a fixed number of predictors, find the model with the best R2 value. * * @param glmResults */ public static GLMModel findBestModel(GLM[] glmResults) { double bestR2Val = 0; int numModels = glmResults.length; GLMModel bestModel = null; for (int index = 0; index < numModels; index++) { GLMModel oneModel = glmResults[index].get(); double currR2 = oneModel.r2(); if (oneModel._parms._nfolds > 0) { int r2Index = Arrays.asList(oneModel._output._cross_validation_metrics_summary.getRowHeaders()).indexOf("r2"); Float tempR2 = (Float) oneModel._output._cross_validation_metrics_summary.get(r2Index, 0); currR2 = tempR2.doubleValue(); } if (currR2 > bestR2Val) { bestR2Val = currR2; if (bestModel != null) bestModel.delete(); bestModel = oneModel; } else { oneModel.delete(); } } return bestModel; } public static String[] extractPredictorNames(Model.Parameters parms, DataInfo dinfo, String foldColumn) { List<String> frameNames = Arrays.stream(dinfo._adaptedFrame.names()).collect(Collectors.toList()); String[] nonResponseCols = parms.getNonPredictors(); for (String col : nonResponseCols) frameNames.remove(col); if (foldColumn != null && frameNames.contains(foldColumn)) frameNames.remove(foldColumn); return frameNames.stream().toArray(String[]::new); } public static int findMinZValue(GLMModel model, List<String> numPredNames, List<String> catPredNames, List<String> predNames) { List<Double> zValList = Arrays.stream(model._output.zValues()).boxed().map(Math::abs).collect(Collectors.toList()); List<String> coeffNames = Arrays.stream(model._output.coefficientNames()).collect(Collectors.toList()); if (coeffNames.contains("Intercept")) { // remove intercept terms int interceptIndex = coeffNames.indexOf("Intercept"); zValList.remove(interceptIndex); coeffNames.remove(interceptIndex); } // grab min z-values for numerical and categorical columns PredNameMinZVal numericalPred = findNumMinZVal(numPredNames, zValList, coeffNames); PredNameMinZVal categoricalPred = findCatMinOfMaxZScore(model, zValList); // null if all predictors are inactive // choose the min z-value from numerical and categorical predictors and return its index in predNames if (categoricalPred != null && categoricalPred._minZVal >= 0 && categoricalPred._minZVal < numericalPred._minZVal) { // categorical pred has minimum z-value return predNames.indexOf(categoricalPred._predName); } else { // numerical pred has minimum z-value return predNames.indexOf(numericalPred._predName); } } public static PredNameMinZVal findNumMinZVal(List<String> numPredNames, List<Double> zValList, List<String> coeffNames) { double minNumVal = -1; String numPredMinZ = null; if (numPredNames != null && numPredNames.size() > 0) { List<Double> numZValues = new ArrayList<>(); for (String predName : numPredNames) { int eleInd = coeffNames.indexOf(predName); double oneZValue = zValList.get(eleInd); if (Double.isNaN(oneZValue)) { zValList.set(eleInd, Double.POSITIVE_INFINITY); numZValues.add(Double.POSITIVE_INFINITY); // NaN corresponds to inactive predictors } else { numZValues.add(oneZValue); } } minNumVal = numZValues.stream().min(Double::compare).get(); // minimum z-value of numerical predictors numPredMinZ = numPredNames.get(numZValues.indexOf(minNumVal)); } return new PredNameMinZVal(numPredMinZ, minNumVal); } /*** * This method extracts the categorical coefficient z-score (abs(z-value)) by using the following method: * 1. From GLMModel model, it extracts the column names of the dinfo._adaptedFrame that is used to build the glm * model and generate the glm coefficients. The column names will be in exactly the same order as the coefficient * names with the exception that each enum levels will not be given a name in the column names. * 2. To figure out which coefficient name corresponds to which column name, we use the catOffsets which will tell * us how many enum levels are used in the glm model coefficients. If the catOffset for the first coefficient * says 3, that means that column will have three enum levels represented in the glm model coefficients. * * For categorical predictors with multiple enum levels, we will look at the max z-score. This will show the best * performing enum levels. We will remove the enum predictor if its best z-score is not good enough when compared * to the z-score of other predictors. */ public static PredNameMinZVal findCatMinOfMaxZScore(GLMModel model, List<Double> zValList) { String[] columnNames = model.names(); // column names of dinfo._adaptedFrame int[] catOffsets = model._output.getDinfo()._catOffsets; List<Double> bestZValues = new ArrayList<>(); List<String> catPredNames = new ArrayList<>(); if (catOffsets != null) { int numCatCol = catOffsets.length - 1; int numNaN = (int) zValList.stream().filter(x -> Double.isNaN(x)).count(); if (numNaN == zValList.size()) { // if all levels are NaN, this predictor is redundant return null; } else { for (int catInd = 0; catInd < numCatCol; catInd++) { // go through each categorical column List<Double> catZValues = new ArrayList<>(); int nextCatOffset = catOffsets[catInd + 1]; for (int eleInd = catOffsets[catInd]; eleInd < nextCatOffset; eleInd++) { // check z-value for each level double oneZVal = zValList.get(eleInd); if (Double.isNaN(oneZVal)) { // one level is inactivity, let other levels be used zValList.set(eleInd, 0.0); catZValues.add(0.0); } else { catZValues.add(oneZVal); } } if (catZValues.size() > 0) { double oneCatMinZ = catZValues.stream().max(Double::compare).get(); // choose the best z-value here bestZValues.add(oneCatMinZ); catPredNames.add(columnNames[catInd]); } } } } if (bestZValues.size() < 1) return null; double maxCatLevel = bestZValues.stream().min(Double::compare).get(); String catPredBestZ = catPredNames.get(bestZValues.indexOf(maxCatLevel)); return new PredNameMinZVal(catPredBestZ, maxCatLevel); } static class PredNameMinZVal { String _predName; double _minZVal; public PredNameMinZVal(String predName, double minZVal) { _predName= predName; _minZVal = minZVal; } } public static List<String> extraModelColumnNames(List<String> coefNames, GLMModel bestModel) { List<String> coefUsed = new ArrayList<String>(); List<String> modelColumns = new ArrayList<>(Arrays.asList(bestModel.names())); for (String coefName : modelColumns) { if (coefNames.contains(coefName)) coefUsed.add(coefName); } return coefUsed; } /*** * Given a predictor subset and the complete CPM, we extract the CPM associated with the predictors * specified in the predictor subset (predIndices). If there is intercept, it will be moved to the first row * and column. */ public static double[][] extractPredSubsetsCPM(double[][] allCPM, int[] predIndices, int[][] pred2CPMIndices, boolean hasIntercept) { List<Integer> CPMIndices = extractCPMIndexFromPred(allCPM.length-1, pred2CPMIndices, predIndices, hasIntercept); int subsetcpmDim = CPMIndices.size(); double[][] subsetCPM = new double[subsetcpmDim][subsetcpmDim]; for (int rIndex=0; rIndex < subsetcpmDim; rIndex++) { for (int cIndex=rIndex; cIndex < subsetcpmDim; cIndex++) { subsetCPM[rIndex][cIndex] = allCPM[CPMIndices.get(rIndex)][CPMIndices.get(cIndex)]; subsetCPM[cIndex][rIndex] = allCPM[CPMIndices.get(cIndex)][CPMIndices.get(rIndex)]; } } return subsetCPM; } /*** * Given a predictor subset and the complete CPM, we extract the CPM associated with the predictors * specified in the predictor subset (predIndices). If there is intercept, it will be moved to the first row * and column. */ public static double[][] extractPredSubsetsCPMFrame(Frame allCPM, int[] predIndices, int[][] pred2CPMIndices, boolean hasIntercept) { List<Integer> CPMIndices = extractCPMIndexFromPred(allCPM.numCols()-1, pred2CPMIndices, predIndices, hasIntercept); int subsetcpmDim = CPMIndices.size(); double[][] subsetCPM = new double[subsetcpmDim][subsetcpmDim]; for (int rIndex=0; rIndex < subsetcpmDim; rIndex++) { for (int cIndex=rIndex; cIndex < subsetcpmDim; cIndex++) { subsetCPM[rIndex][cIndex] = allCPM.vec(CPMIndices.get(cIndex)).at(CPMIndices.get(rIndex)); subsetCPM[cIndex][rIndex] = allCPM.vec(CPMIndices.get(rIndex)).at(CPMIndices.get(cIndex)); } } return subsetCPM; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/naivebayes/NaiveBayes.java
package hex.naivebayes; import hex.*; import hex.naivebayes.NaiveBayesModel.NaiveBayesOutput; import hex.naivebayes.NaiveBayesModel.NaiveBayesParameters; import water.*; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.Chunk; import water.util.ArrayUtils; import water.util.PrettyPrint; import water.util.TwoDimTable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** * Naive Bayes * This is an algorithm for computing the conditional a-posterior probabilities of a categorical * response from independent predictors using Bayes rule. * <a href = "http://en.wikipedia.org/wiki/Naive_Bayes_classifier">Naive Bayes on Wikipedia</a> * <a href = "http://cs229.stanford.edu/notes/cs229-notes2.pdf">Lecture Notes by Andrew Ng</a> * @author anqi_fu * */ public class NaiveBayes extends ModelBuilder<NaiveBayesModel,NaiveBayesParameters,NaiveBayesOutput> { public boolean isSupervised(){return true;} @Override protected NaiveBayesDriver trainModelImpl() { return new NaiveBayesDriver(); } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.Unknown }; } @Override public boolean havePojo() { return true; } @Override public boolean haveMojo() { return false; } @Override protected void checkMemoryFootPrint_impl() { // compute memory usage for pcond matrix long mem_usage = (_train.numCols() - 1) * _train.lastVec().cardinality(); String[][] domains = _train.domains(); long count = 0; for (int i = 0; i < _train.numCols() - 1; i++) { count += domains[i] == null ? 2 : domains[i].length; } mem_usage *= count; mem_usage *= 8; //doubles long max_mem = H2O.SELF._heartbeat.get_free_mem(); if (mem_usage > max_mem) { String msg = "Conditional probabilities won't fit in the driver node's memory (" + PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem) + ") - try reducing the number of columns, the number of response classes or the number of categorical factors of the predictors."; error("_train", msg); } } // Called from an http request public NaiveBayes(NaiveBayesModel.NaiveBayesParameters parms) { super(parms); init(false); } public NaiveBayes(boolean startup_once) { super(new NaiveBayesParameters(),startup_once); } @Override public void init(boolean expensive) { super.init(expensive); if (_response != null) { if (!_response.isCategorical()) error("_response", "Response must be a categorical column"); else if (_response.isConst()) error("_response", "Response must have at least two unique categorical levels"); } if (_parms._laplace < 0) error("_laplace", "Laplace smoothing must be a number >= 0"); if (_parms._min_sdev < 1e-10) error("_min_sdev", "Min. standard deviation must be at least 1e-10"); if (_parms._eps_sdev < 0) error("_eps_sdev", "Threshold for standard deviation must be positive"); if (_parms._min_prob < 1e-10) error("_min_prob", "Min. probability must be at least 1e-10"); if (_parms._eps_prob < 0) error("_eps_prob", "Threshold for probability must be positive"); hide("_balance_classes", "Balance classes is not applicable to NaiveBayes."); hide("_class_sampling_factors", "Class sampling factors is not applicable to NaiveBayes."); hide("_max_after_balance_size", "Max after balance size is not applicable to NaiveBayes."); if (expensive && error_count() == 0) checkMemoryFootPrint(); } class NaiveBayesDriver extends Driver { public boolean computeStatsFillModel(NaiveBayesModel model, DataInfo dinfo, NBTask tsk) { model._output._levels = _response.domain(); model._output._rescnt = tsk._rescnt; model._output._ncats = dinfo._cats; if(stop_requested() && !timeout()) return false; _job.update(1, "Initializing arrays for model statistics"); // String[][] domains = dinfo._adaptedFrame.domains(); String[][] domains = model._output._domains; double[] apriori = new double[tsk._nrescat]; double[][][] pcond = new double[tsk._npreds][][]; for(int i = 0; i < pcond.length; i++) { int ncnt = domains[i] == null ? 2 : domains[i].length; pcond[i] = new double[tsk._nrescat][ncnt]; } if(stop_requested() && !timeout()) return false; _job.update(1, "Computing probabilities for categorical cols"); // A-priori probability of response y for(int i = 0; i < apriori.length; i++) apriori[i] = ((double)tsk._rescnt[i] + _parms._laplace)/(tsk._nobs + tsk._nrescat * _parms._laplace); // apriori[i] = tsk._rescnt[i]/tsk._nobs; // Note: R doesn't apply laplace smoothing to priors, even though this is textbook definition // Probability of categorical predictor x_j conditional on response y for(int col = 0; col < dinfo._cats; col++) { assert pcond[col].length == tsk._nrescat; for(int i = 0; i < pcond[col].length; i++) { for(int j = 0; j < pcond[col][i].length; j++) pcond[col][i][j] = ((double)tsk._jntcnt[col][i][j] + _parms._laplace)/((double)tsk._rescnt[i] + domains[col].length * _parms._laplace); } } if(stop_requested() && !timeout()) return false; _job.update(1, "Computing mean and standard deviation for numeric cols"); // Mean and standard deviation of numeric predictor x_j for every level of response y for(int col = 0; col < dinfo._nums; col++) { for(int i = 0; i < pcond[0].length; i++) { int cidx = dinfo._cats + col; double num = tsk._rescnt[i]; double pmean = tsk._jntsum[col][i][0]/num; pcond[cidx][i][0] = pmean; // double pvar = tsk._jntsum[col][i][1]/num - pmean * pmean; double pvar = tsk._jntsum[col][i][1]/(num - 1) - pmean * pmean * num/(num - 1); pcond[cidx][i][1] = Math.sqrt(pvar); } } model._output._apriori_raw = apriori; model._output._pcond_raw = pcond; // Create table of conditional probabilities for every predictor model._output._pcond = new TwoDimTable[pcond.length]; String[] rowNames = _response.domain(); for(int col = 0; col < dinfo._cats; col++) { String[] colNames = _train.vec(col).domain(); String[] colTypes = new String[colNames.length]; String[] colFormats = new String[colNames.length]; Arrays.fill(colTypes, "double"); Arrays.fill(colFormats, "%5f"); model._output._pcond[col] = new TwoDimTable(_train.name(col), null, rowNames, colNames, colTypes, colFormats, "Y_by_" + _train.name(col), new String[rowNames.length][], pcond[col]); } for(int col = 0; col < dinfo._nums; col++) { int cidx = dinfo._cats + col; model._output._pcond[cidx] = new TwoDimTable(_train.name(cidx), null, rowNames, new String[] {"Mean", "Std_Dev"}, new String[] {"double", "double"}, new String[] {"%5f", "%5f"}, "Y_by_" + _train.name(cidx), new String[rowNames.length][], pcond[cidx]); } // Create table of a-priori probabilities for the response String[] colTypes = new String[_response.cardinality()]; String[] colFormats = new String[_response.cardinality()]; Arrays.fill(colTypes, "double"); Arrays.fill(colFormats, "%5f"); model._output._apriori = new TwoDimTable("A Priori Response Probabilities", null, new String[1], _response.domain(), colTypes, colFormats, "", new String[1][], new double[][] {apriori}); model._output._model_summary = createModelSummaryTable(model._output); if(stop_requested() && !timeout()) return false; _job.update(1, "Scoring and computing metrics on training data"); if (_parms._compute_metrics) { model.score(_parms.train()).delete(); // This scores on the training data and appends a ModelMetrics model._output._training_metrics = ModelMetrics.getFromDKV(model,_parms.train()); } // At the end: validation scoring (no need to gather scoring history) if(stop_requested() && !timeout()) return false; _job.update(1, "Scoring and computing metrics on validation data"); if (_valid != null) { model.score(_parms.valid()).delete(); //this appends a ModelMetrics on the validation set model._output._validation_metrics = ModelMetrics.getFromDKV(model,_parms.valid()); } return true; } @Override public void computeImpl() { NaiveBayesModel model = null; DataInfo dinfo = null; try { init(true); // Initialize parameters if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(NaiveBayes.this); dinfo = new DataInfo(_train, _valid, 1, false, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, true, false, false, _weights!=null, false, _fold!=null); // The model to be built model = new NaiveBayesModel(dest(), _parms, new NaiveBayesOutput(NaiveBayes.this)); model.delete_and_lock(_job); _job.update(1, "Begin distributed Naive Bayes calculation"); NBTask tsk = new NBTask(_job._key, dinfo, _response.cardinality()).doAll(dinfo._adaptedFrame); if (computeStatsFillModel(model, dinfo, tsk)) model.update(_job); } finally { if (model != null) model.unlock(_job); if (dinfo != null) dinfo.remove(); } } } private TwoDimTable createModelSummaryTable(NaiveBayesOutput output) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("Number of Response Levels"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Min Apriori Probability"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Max Apriori Probability"); colTypes.add("double"); colFormat.add("%.5f"); double apriori_min = output._apriori_raw[0]; double apriori_max = output._apriori_raw[0]; for(int i = 1; i < output._apriori_raw.length; i++) { if(output._apriori_raw[i] < apriori_min) apriori_min = output._apriori_raw[i]; else if(output._apriori_raw[i] > apriori_max) apriori_max = output._apriori_raw[i]; } final int rows = 1; TwoDimTable table = new TwoDimTable( "Model Summary", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); int row = 0; int col = 0; table.set(row, col++, output._apriori_raw.length); table.set(row, col++, apriori_min); table.set(row, col , apriori_max); return table; } // Note: NA handling differs from R for efficiency purposes // R's method: For each predictor x_j, skip counting that row for p(x_j|y) calculation if x_j = NA. // If response y = NA, skip counting row entirely in all calculations // H2O's method: Just skip all rows where any x_j = NA or y = NA. Should be more memory-efficient, but results incomparable with R. private static class NBTask extends MRTask<NBTask> { final protected Key<Job> _jobKey; final DataInfo _dinfo; final String[][] _domains; // Domains of the training frame final int _nrescat; // Number of levels for the response y final int _npreds; // Number of predictors in the training frame public int _nobs; // Number of rows counted in calculation public int[/*nrescat*/] _rescnt; // Count of each level in the response public int[/*npreds*/][/*nrescat*/][] _jntcnt; // For each categorical predictor, joint count of response and predictor levels public double[/*npreds*/][/*nrescat*/][] _jntsum; // For each numeric predictor, sum and squared sum of entries for every response level public NBTask(Key<Job> jobKey, DataInfo dinfo, int nres) { _jobKey = jobKey; _dinfo = dinfo; _nrescat = nres; _domains = dinfo._adaptedFrame.domains(); _npreds = dinfo._cats + dinfo._nums; } @Override public void map(Chunk[] chks) { if( _jobKey.get().stop_requested() ) return; _nobs = 0; _rescnt = new int[_nrescat]; if(_dinfo._cats > 0) { _jntcnt = new int[_dinfo._cats][][]; for (int i = 0; i < _dinfo._cats; i++) { _jntcnt[i] = new int[_nrescat][_domains[i].length]; } } if(_dinfo._nums > 0) { _jntsum = new double[_dinfo._nums][][]; for (int i = 0; i < _dinfo._nums; i++) { _jntsum[i] = new double[_nrescat][2]; } } Chunk res = chks[_dinfo.responseChunkId(0)]; //response OUTER: for(int row = 0; row < chks[0]._len; row++) { if (_dinfo._weights && chks[_dinfo.weightChunkId()].atd(row)==0) continue OUTER; if (_dinfo._weights && chks[_dinfo.weightChunkId()].atd(row)!=1) throw new IllegalArgumentException("Weights must be either 0 or 1 for Naive Bayes."); // Skip row if any entries in it are NA for( Chunk chk : chks ) { if(Double.isNaN(chk.atd(row))) continue OUTER; } // Record joint counts of categorical predictors and response int rlevel = (int)res.atd(row); for(int col = 0; col < _dinfo._cats; col++) { int plevel = (int)chks[col].atd(row); _jntcnt[col][rlevel][plevel]++; } // Record sum for each pair of numerical predictors and response for(int col = 0; col < _dinfo._nums; col++) { int cidx = _dinfo._cats + col; double x = chks[cidx].atd(row); _jntsum[col][rlevel][0] += x; _jntsum[col][rlevel][1] += x*x; } _rescnt[rlevel]++; _nobs++; } } @Override public void reduce(NBTask nt) { _nobs += nt._nobs; ArrayUtils.add(_rescnt, nt._rescnt); if(null != _jntcnt) { for (int col = 0; col < _jntcnt.length; col++) ArrayUtils.add(_jntcnt[col], nt._jntcnt[col]); } if(null != _jntsum) { for (int col = 0; col < _jntsum.length; col++) ArrayUtils.add(_jntsum[col], nt._jntsum[col]); } } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/naivebayes/NaiveBayesModel.java
package hex.naivebayes; import hex.Model; import hex.ModelMetrics; import hex.ModelMetricsBinomial; import hex.ModelMetricsMultinomial; import hex.genmodel.GenModel; import hex.schemas.NaiveBayesModelV3; import hex.util.EffectiveParametersUtils; import water.H2O; import water.Key; import water.api.schemas3.ModelSchemaV3; import water.codegen.CodeGenerator; import water.codegen.CodeGeneratorPipeline; import water.exceptions.JCodeSB; import water.util.JCodeGen; import water.util.SBPrintStream; import water.util.TwoDimTable; public class NaiveBayesModel extends Model<NaiveBayesModel,NaiveBayesModel.NaiveBayesParameters,NaiveBayesModel.NaiveBayesOutput> { public static class NaiveBayesParameters extends Model.Parameters { public double _laplace = 0; // Laplace smoothing parameter public double _eps_sdev = 0; // Cutoff below which standard deviation is replaced with _min_sdev public double _min_sdev = 0.001; // Minimum standard deviation to use for observations without enough data public double _eps_prob = 0; // Cutoff below which probability is replaced with _min_prob public double _min_prob = 0.001; // Minimum conditional probability to use for observations without enough data public boolean _compute_metrics = true; // Should a second pass be made through data to compute metrics? public String algoName() { return "NaiveBayes"; } public String fullName() { return "Naive Bayes"; } public String javaName() { return NaiveBayesModel.class.getName(); } @Override public long progressUnits() { return 6; } } public static class NaiveBayesOutput extends Model.Output { // Class distribution of the response public TwoDimTable _apriori; public double[/*res level*/] _apriori_raw; // For every predictor, a table providing, for each attribute level, the conditional probabilities given the target class public TwoDimTable[/*predictor*/] _pcond; public double[/*predictor*/][/*res level*/][/*pred level*/] _pcond_raw; // Count of response levels public int[] _rescnt; // Domain of the response public String[] _levels; // Number of categorical predictors public int _ncats; public NaiveBayesOutput(NaiveBayes b) { super(b); } } public NaiveBayesModel(Key selfKey, NaiveBayesParameters parms, NaiveBayesOutput output) { super(selfKey,parms,output); } @Override public void initActualParamValues() { super.initActualParamValues(); EffectiveParametersUtils.initFoldAssignment(_parms); } public ModelSchemaV3 schema() { return new NaiveBayesModelV3(); } // TODO: Constant response shouldn't be regression. Need to override getModelCategory() @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { switch(_output.getModelCategory()) { case Binomial: return new ModelMetricsBinomial.MetricBuilderBinomial(domain); case Multinomial: return new ModelMetricsMultinomial.MetricBuilderMultinomial(domain.length,domain, _parms._auc_type); default: throw H2O.unimpl(); } } // Note: For small probabilities, product may end up zero due to underflow error. Can circumvent by taking logs. @Override protected double[] score0(double[] data, double[] preds) { double[] nums = new double[_output._levels.length]; // log(p(x,y)) for all levels of y assert preds.length >= (_output._levels.length + 1); // Note: First column of preds is predicted response class // Compute joint probability of predictors for every response class for(int rlevel = 0; rlevel < _output._levels.length; rlevel++) { // Take logs to avoid overflow: p(x,y) = p(x|y)*p(y) -> log(p(x,y)) = log(p(x|y)) + log(p(y)) nums[rlevel] = Math.log(_output._apriori_raw[rlevel]); for(int col = 0; col < _output._ncats; col++) { if(Double.isNaN(data[col])) continue; // Skip predictor in joint x_1,...,x_m if NA int plevel = (int)data[col]; double prob = plevel < _output._pcond_raw[col][rlevel].length ? _output._pcond_raw[col][rlevel][plevel] : _parms._laplace / ((double)_output._rescnt[rlevel] + _parms._laplace * _output._domains[col].length); // Laplace smoothing if predictor level unobserved in training set nums[rlevel] += Math.log(prob <= _parms._eps_prob ? _parms._min_prob : prob); // log(p(x|y)) = \sum_{j = 1}^m p(x_j|y) } // For numeric predictors, assume Gaussian distribution with sample mean and variance from model for(int col = _output._ncats; col < data.length; col++) { if(Double.isNaN(data[col])) continue; // Skip predictor in joint x_1,...,x_m if NA double x = data[col]; double mean = Double.isNaN(_output._pcond_raw[col][rlevel][0]) ? 0 : _output._pcond_raw[col][rlevel][0]; double stddev = Double.isNaN(_output._pcond_raw[col][rlevel][1]) ? 1.0 : (_output._pcond_raw[col][rlevel][1] <= _parms._eps_sdev ? _parms._min_sdev : _output._pcond_raw[col][rlevel][1]); // double prob = Math.exp(new NormalDistribution(mean, stddev).density(data[col])); // slower double prob = Math.exp(-((x-mean)*(x-mean))/(2.*stddev*stddev)) / (stddev*Math.sqrt(2.*Math.PI)); // faster nums[rlevel] += Math.log(prob <= _parms._eps_prob ? _parms._min_prob : prob); } } // Numerically unstable: // p(x,y) = exp(log(p(x,y))), p(x) = \Sum_{r = levels of y} exp(log(p(x,y = r))) -> p(y|x) = p(x,y)/p(x) // Instead, we rewrite using a more stable form: // p(y|x) = p(x,y)/p(x) = exp(log(p(x,y))) / (\Sum_{r = levels of y} exp(log(p(x,y = r))) // = 1 / ( exp(-log(p(x,y))) * \Sum_{r = levels of y} exp(log(p(x,y = r))) ) // = 1 / ( \Sum_{r = levels of y} exp( log(p(x,y = r)) - log(p(x,y)) )) for(int i = 0; i < nums.length; i++) { double sum = 0; for(int j = 0; j < nums.length; j++) sum += Math.exp(nums[j] - nums[i]); preds[i+1] = 1/sum; } // Select class with highest conditional probability preds[0] = GenModel.getPrediction(preds, _output._priorClassDist, data, defaultThreshold()); return preds; } @Override protected SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileCtx) { sb = super.toJavaInit(sb, fileCtx); sb.ip("public boolean isSupervised() { return " + isSupervised() + "; }").nl(); sb.ip("public int nfeatures() { return " + _output.nfeatures() + "; }").nl(); sb.ip("public int nclasses() { return " + _output.nclasses() + "; }").nl(); // This is model name final String mname = JCodeGen.toJavaId(_key.toString()); fileCtx.add(new CodeGenerator() { @Override public void generate(JCodeSB out) { JCodeGen.toClassWithArray(out, null, mname + "_RESCNT", _output._rescnt, "Count of categorical levels in response."); JCodeGen.toClassWithArray(out, null, mname + "_APRIORI", _output._apriori_raw, "Apriori class distribution of the response."); JCodeGen.toClassWithArray(out, null, mname + "_PCOND", _output._pcond_raw, "Conditional probability of predictors."); double[] dlen = null; if (_output._ncats > 0) { dlen = new double[_output._ncats]; for (int i = 0; i < _output._ncats; i++) dlen[i] = _output._domains[i].length; } JCodeGen.toClassWithArray(out, null, mname + "_DOMLEN", dlen, "Number of unique levels for each categorical predictor."); } }); return sb; } @Override protected void toJavaPredictBody(SBPrintStream bodySb, CodeGeneratorPipeline classCtx, CodeGeneratorPipeline fileCtx, final boolean verboseCode) { // This is model name final String mname = JCodeGen.toJavaId(_key.toString()); bodySb.i().p("java.util.Arrays.fill(preds,0);").nl(); bodySb.i().p("double mean, sdev, prob;").nl(); bodySb.i().p("double[] nums = new double[" + _output._levels.length + "];").nl(); bodySb.i().p("for(int i = 0; i < " + _output._levels.length + "; i++) {").nl(); bodySb.i(1).p("nums[i] = Math.log(").pj(mname+"_APRIORI", "VALUES").p("[i]);").nl(); bodySb.i(1).p("for(int j = 0; j < " + _output._ncats + "; j++) {").nl(); bodySb.i(2).p("if(Double.isNaN(data[j])) continue;").nl(); bodySb.i(2).p("int level = (int)data[j];").nl(); bodySb.i(2).p("prob = level < ").p(_output._pcond_raw.length).p(" ? " + mname + "_PCOND.VALUES[j][i][level] : ") .p(_parms._laplace == 0 ? "0" : _parms._laplace + "/("+mname+"_RESCNT.VALUES[i] + " + _parms._laplace + "*" + mname + "_DOMLEN.VALUES[j])").p(";").nl(); bodySb.i(2).p("nums[i] += Math.log(prob <= " + _parms._eps_prob + " ? " + _parms._min_prob + " : prob);").nl(); bodySb.i(1).p("}").nl(); bodySb.i(1).p("for(int j = " + _output._ncats + "; j < data.length; j++) {").nl(); bodySb.i(2).p("if(Double.isNaN(data[j])) continue;").nl(); bodySb.i(2).p("mean = Double.isNaN("+mname+"_PCOND.VALUES[j][i][0]) ? 0 : "+mname+"_PCOND.VALUES[j][i][0];").nl(); bodySb.i(2).p("sdev = Double.isNaN("+mname+"_PCOND.VALUES[j][i][1]) ? 1 : ("+mname+"_PCOND.VALUES[j][i][1] <= " + _parms._eps_sdev + " ? " + _parms._min_sdev + " : "+mname+"_PCOND.VALUES[j][i][1]);").nl(); bodySb.i(2).p("prob = Math.exp(-((data[j]-mean)*(data[j]-mean))/(2.*sdev*sdev)) / (sdev*Math.sqrt(2.*Math.PI));").nl(); bodySb.i(2).p("nums[i] += Math.log(prob <= " + _parms._eps_prob + " ? " + _parms._min_prob + " : prob);").nl(); bodySb.i(1).p("}").nl(); bodySb.i().p("}").nl(); bodySb.i().p("double sum;").nl(); bodySb.i().p("for(int i = 0; i < nums.length; i++) {").nl(); bodySb.i(1).p("sum = 0;").nl(); bodySb.i(1).p("for(int j = 0; j < nums.length; j++) {").nl(); bodySb.i(2).p("sum += Math.exp(nums[j]-nums[i]);").nl(); bodySb.i(1).p("}").nl(); bodySb.i(1).p("preds[i+1] = 1/sum;").nl(); bodySb.i().p("}").nl(); bodySb.i().p("preds[0] = hex.genmodel.GenModel.getPrediction(preds, PRIOR_CLASS_DISTRIB, data, " + defaultThreshold()+");").nl(); } @Override protected boolean isFeatureUsedInPredict(int featureIdx) { /** * NaiveBayes considers each feature independently so even if we would have two features * that are identical to the target NB would pick both... In the case of constant input * hex.Model#adaptTestForTrain will take care of removing the columns and the default logic * will work just fine if `_ignore_const_cols` is true else we check that the feature is * independent to the response. (P(x|resp=A) = P(x|resp=B) =... ) (I think that in the real world being "numerically" * independent will be pretty rare) */ for (int response = 0; response < _output._pcond_raw[featureIdx].length; response++) { double val = _output._pcond_raw[featureIdx][response][0]; for (double p : _output._pcond_raw[featureIdx][response]) { if (val != p) return true; } } return false; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/optimization/ADMM.java
package hex.optimization; import water.H2O; import water.MemoryManager; import water.util.ArrayUtils; import water.util.Log; import water.util.MathUtils; import water.util.MathUtils.Norm; /** * Created by tomasnykodym on 3/2/15. */ public class ADMM { public interface ProximalSolver { public double [] rho(); public boolean solve(double [] beta_given, double [] result); public boolean hasGradient(); public OptimizationUtils.GradientInfo gradient(double [] beta); public int iter(); } public static class L1Solver { final double RELTOL; final double ABSTOL; double gerr; int iter; final double _eps; final int max_iter; MathUtils.Norm _gradientNorm = Norm.L_Infinite; public double [] _u; public static double DEFAULT_RELTOL = 1e-2; public static double DEFAULT_ABSTOL = 1e-4; public L1Solver setGradientNorm(MathUtils.Norm n) {_gradientNorm = n; return this;} public L1Solver(double eps, int max_iter, double [] u) { this(eps,max_iter,DEFAULT_RELTOL,DEFAULT_ABSTOL,u); } public L1Solver(double eps, int max_iter, double reltol, double abstol, double [] u) { _eps = eps; this.max_iter = max_iter; _u = u; RELTOL = reltol; ABSTOL = abstol; } public L_BFGS.ProgressMonitor _pm; public boolean solve(ProximalSolver solver, double[] res, double lambda, boolean hasIntercept) { return solve(solver, res, lambda, hasIntercept, null, null); } private double computeErr(double[] z, double[] grad, double lambda, double[] lb, double[] ub) { grad = grad.clone(); // check the gradient gerr = 0; if (lb != null) for (int j = 0; j < z.length; ++j) if (z[j] == lb[j] && grad[j] > 0) grad[j] = z[j] >= 0?-lambda:lambda; if (ub != null) for (int j = 0; j < z.length; ++j) if (z[j] == ub[j] && grad[j] < 0) grad[j] = z[j] >= 0?-lambda:lambda; subgrad(lambda, z, grad); switch(_gradientNorm) { case L_Infinite: gerr = ArrayUtils.linfnorm(grad,false); break; case L2_2: gerr = ArrayUtils.l2norm2(grad, false); break; case L2: gerr = Math.sqrt(ArrayUtils.l2norm2(grad, false)); break; case L1: gerr = ArrayUtils.l1norm(grad,false); break; default: throw H2O.unimpl(); } return gerr; } public boolean solve(ProximalSolver solver, double[] z, double l1pen, boolean hasIntercept, double[] lb, double[] ub) { gerr = Double.POSITIVE_INFINITY; iter = 0; if (l1pen == 0 && lb == null && ub == null) { solver.solve(null, z); return true; } int hasIcpt = hasIntercept?1:0; int N = z.length; double abstol = ABSTOL * Math.sqrt(N); double [] rho = solver.rho(); double [] x = z.clone(); double [] beta_given = MemoryManager.malloc8d(N); double [] u; if(_u != null) { u = _u; for (int i = 0; i < beta_given.length - hasIcpt; ++i) beta_given[i] = z[i] - _u[i]; } else u = _u = MemoryManager.malloc8d(z.length); double [] kappa = MemoryManager.malloc8d(rho.length); if(l1pen > 0) for(int i = 0; i < N-hasIcpt; ++i) kappa[i] = rho[i] != 0?l1pen/rho[i]:0; int i; double orlx = 1.0; // over-relaxation double reltol = RELTOL; for (i = 0; i < max_iter && solver.solve(beta_given, x); ++i) { if(_pm != null && (i + 1) % 5 == 0)_pm.progress(z,solver.gradient(z)); // compute u and z updateADMM double rnorm = 0, snorm = 0, unorm = 0, xnorm = 0; for (int j = 0; j < N - hasIcpt; ++j) { double xj = x[j]; double zjold = z[j]; double x_hat = xj * orlx + (1 - orlx) * zjold; double zj = shrinkage(x_hat + u[j], kappa[j]); if (lb != null && zj < lb[j]) zj = lb[j]; if (ub != null && zj > ub[j]) zj = ub[j]; u[j] += x_hat - zj; beta_given[j] = zj - u[j]; double r = xj - zj; double s = zj - zjold; rnorm += r * r; snorm += s * s; xnorm += xj * xj; unorm += rho[j] * rho[j] * u[j] * u[j]; z[j] = zj; } if (hasIntercept) { int idx = x.length - 1; double icpt = x[idx]; if (lb != null && icpt < lb[idx]) icpt = lb[idx]; if (ub != null && icpt > ub[idx]) icpt = ub[idx]; double r = x[idx] - icpt; double s = icpt - z[idx]; u[idx] += r; beta_given[idx] = icpt - u[idx]; rnorm += r * r; snorm += s * s; xnorm += icpt * icpt; unorm += rho[idx] * rho[idx] * u[idx] * u[idx]; z[idx] = icpt; } if (rnorm < (abstol + (reltol * Math.sqrt(xnorm))) && snorm < (abstol + reltol * Math.sqrt(unorm))) { double oldGerr = gerr; computeErr(z, solver.gradient(z)._gradient, l1pen, lb, ub); if ((gerr > _eps) /* || solver.improving() */){// && (allzeros || i < 5 /* let some warm up before giving up */ /*|| Math.abs(oldGerr - gerr) > _eps * 0.1*/)) { Log.debug("ADMM.L1Solver: iter = " + i + " , gerr = " + gerr + ", oldGerr = " + oldGerr + ", rnorm = " + rnorm + ", snorm " + snorm); if(abstol > 1e-12) abstol *= .1; if(reltol > 1e-10) reltol *= .1; reltol *= .1; continue; } if(gerr > _eps) Log.warn("ADMM solver finished with gerr = " + gerr + " > eps = " + _eps); iter = i; if(_pm != null && (i + 1) % 5 == 0)_pm.progress(z,solver.gradient(z)); return true; } } computeErr(z, solver.gradient(z)._gradient, l1pen, lb, ub); if(iter == max_iter) Log.warn("ADMM solver reached maximum number of iterations (" + max_iter + ")"); else Log.warn("ADMM solver stopped after " + i + " iterations. (max_iter=" + max_iter + ")"); if(gerr > _eps) Log.warn("ADMM solver finished with gerr = " + gerr + " > eps = " + _eps); iter = max_iter; if(_pm != null && (i + 1) % 5 == 0)_pm.progress(z,solver.gradient(z)); return false; } @Override public String toString(){ return "iter = " + iter + ", gerr = " + gerr; } /** * Estimate optimal rho based on l1 penalty and (estimate of) solution x without the l1penalty * @param x * @param l1pen * @return */ public static double estimateRho(double x, double l1pen, double lb, double ub){ if(Double.isInfinite(x))return 0; // happens for all zeros double rho = 0; if(l1pen != 0 && x != 0) { if (x > 0) { double D = l1pen * (l1pen + 4 * x); if (D >= 0) { D = Math.sqrt(D); double r = (l1pen + D) / (2 * x); if (r > 0) rho = r; else Log.warn("negative rho estimate(1)! r = " + r); } } else if (x < 0) { double D = l1pen * (l1pen - 4 * x); if (D >= 0) { D = Math.sqrt(D); double r = -(l1pen + D) / (2 * x); if (r > 0) rho = r; else Log.warn("negative rho estimate(2)! r = " + r); } } rho *= .25; } if(!Double.isInfinite(lb) || !Double.isInfinite(ub)) { boolean oob = -Math.min(x - lb, ub - x) > -1e-4; rho = oob?10:1e-1; } return rho; } } public static double shrinkage(double x, double kappa) { double sign = x < 0?-1:1; double sx = x*sign; return sx <= kappa?0:sign*(sx - kappa); } public static void subgrad(final double lambda, final double [] beta, final double [] grad){ if(beta == null)return; for(int i = 0; i < grad.length-1; ++i) {// add l2 reg. term to the gradient if(beta[i] < 0) grad[i] = shrinkage(grad[i]-lambda,lambda*1e-4); else if(beta[i] > 0) grad[i] = shrinkage(grad[i] + lambda,lambda*1e-4); else grad[i] = shrinkage(grad[i], lambda); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/optimization/L_BFGS.java
package hex.optimization; import hex.optimization.OptimizationUtils.GradientInfo; import hex.optimization.OptimizationUtils.GradientSolver; import hex.optimization.OptimizationUtils.LineSearchSolver; import hex.optimization.OptimizationUtils.MoreThuente; import water.Iced; import water.MemoryManager; import water.util.ArrayUtils; import water.util.MathUtils; import java.util.Arrays; import java.util.Random; /** * Created by tomasnykodym on 9/15/14. * * Generic L-BFGS optimizer implementation. * * NOTE: The solver object keeps its state and so the same object can not be reused to solve different problems. * (but can be used for warm-starting/continuation of the same problem) * * Usage: * * To apply L-BFGS to your optimization problem, provide a GradientSolver with following 2 methods: * 1) double [] getGradient(double []): * evaluate ginfo at given coefficients, typically an MRTask * 2) double [] getObjVals(double[] beta, double[] direction): * evaluate objective value at line-search search points (e.g. objVals[k] = obj(beta + step(k)*direction), step(k) = .75^k) * typically a single MRTask * @see hex.glm.GLM.GLMGradientSolver * * L-BFGS will then perform following loop: * while(not converged): * coefs := doLineSearch(coefs, dir) // distributed, 1 pass over data * ginfo := getGradient(coefs) // distributed, 1 pass over data * history += (coefs, ginfo) // local * dir := newDir(history, ginfo) // local * * 1 L-BFGS iteration thus takes 2 passes over the (distributed) dataset. * */ public final class L_BFGS extends Iced { int _maxIter = 500; double _gradEps = 1e-8; double _objEps = 1e-10; // line search params int _historySz = 20; History _hist; public L_BFGS() {} public L_BFGS setMaxIter(int m) {_maxIter = m; return this;} public L_BFGS setGradEps(double d) {_gradEps = d; return this;} public L_BFGS setObjEps(double d) { _objEps = d; return this; } public L_BFGS setHistorySz(int sz) {_historySz = sz; return this;} public int k() {return _hist._k;} public int maxIter(){ return _maxIter;} /** * Monitor progress and enable early termination. */ public interface ProgressMonitor { boolean progress(double [] betaDiff, GradientInfo ginfo); } // constants used in line search public static final class Result { public final int iter; public final double [] coefs; public final GradientInfo ginfo; public final boolean converged; public final double rel_improvement; public Result(boolean converged, int iter, double [] coefs, GradientInfo ginfo, double rel_improvement){ this.iter = iter; this.coefs = coefs; this.ginfo = ginfo; this.converged = converged; this.rel_improvement = rel_improvement; } public String toString(){ if(coefs.length < 10) { return "L-BFGS_res(converged? " + converged + ", iter = " + iter + ", obj = " + ginfo._objVal + ", rel_improvement = " + rel_improvement + ", coefs = " + Arrays.toString(coefs) + ", grad = " + Arrays.toString(ginfo._gradient) + ")"; } else { return "L-BFGS_res(converged? " + converged + ", iter = " + iter + ", obj = " + ginfo._objVal + ", rel_improvement = " + rel_improvement + "grad_linf_norm = " + ArrayUtils.linfnorm(ginfo._gradient,false) + ")"; } } } /** * Keeps L-BFGS history ie curvature information recorded over the last m steps. */ public static final class History extends Iced { private final double [][] _s; private final double [][] _y; private final double [] _rho; private final double [] _alpha; final int _m, _n; public History(int m, int n) { _m = m; _alpha = new double[_m]; _n = n; _s = new double[m][]; _y = new double[m][]; _rho = MemoryManager.malloc8d(m); Arrays.fill(_rho,Double.NaN); for (int i = 0; i < m; ++i) { _s[i] = MemoryManager.malloc8d(n); Arrays.fill(_s[i], Double.NaN); // to make sure we don't just run with zeros _y[i] = MemoryManager.malloc8d(n); Arrays.fill(_y[i], Double.NaN); } } int getId(int k) {return (_k + k) % _m;} int _k; private final void update(double [] pk, double [] gNew, double [] gOld){ int id = getId(0); double[] y = _y[id]; double[] s = _s[id]; for (int i = 0; i < gNew.length; ++i) y[i] = gNew[i] - gOld[i]; System.arraycopy(pk,0,s,0,pk.length); _rho[id] = 1.0/ArrayUtils.innerProduct(s,y); ++_k; } // the actual core of L-BFGS algo // compute new search direction using the ginfo at current beta and history protected final double [] getSearchDirection(final double [] gradient, double [] q) { System.arraycopy(gradient,0,q,0,q.length); if(_k != 0) { int k = Math.min(_k,_m); for (int i = 1; i <= k; ++i) { int id = getId(-i); _alpha[id] = _rho[id] * ArrayUtils.innerProduct(_s[id], q); MathUtils.wadd(q, _y[id], -_alpha[id]); } int lastId = getId(-1); final double[] y = _y[lastId]; double Hk0 = -1.0 / (ArrayUtils.innerProduct(y, y) * _rho[lastId]); ArrayUtils.mult(q, Hk0); for (int i = k; i > 0; --i) { int id = getId(-i); double beta = _rho[id] * ArrayUtils.innerProduct(_y[id], q); MathUtils.wadd(q, _s[id], -_alpha[id] - beta); } } else ArrayUtils.mult(q,-1); return q; } } /** * Solve the optimization problem defined by the user-supplied ginfo function using L-BFGS algorithm. * * Will result into multiple (10s to 100s or even 1000s) calls of the user-provided ginfo function. * Outside of that it does only limited single threaded computation (order of number of coefficients). * The ginfo is likely to be the most expensive part and key for good perfomance. * * @param gslvr - user ginfo function * @param beta - starting solution * @return Optimal solution (coefficients) + ginfo info returned by the user ginfo * function evaluated at the found optmimum. */ // public final Result solve(GradientSolver gslvr, double [] beta, GradientInfo ginfo, ProgressMonitor pm) {return solve(gslvr,beta,beta.clone(),ginfo,pm);} public final Result solve(GradientSolver gslvr, double [] beta, GradientInfo ginfo, ProgressMonitor pm) { if(_hist == null) _hist = new History(_historySz, beta.length); int iter = 0; double rel_improvement = 1; final double [] pk = new double[beta.length]; double minStep = 1e-16; LineSearchSolver lineSearch = new MoreThuente(gslvr,beta,ginfo); while(!ArrayUtils.hasNaNsOrInfs(beta) && (ArrayUtils.linfnorm(ginfo._gradient,false) > _gradEps && rel_improvement > _objEps) && iter != _maxIter) { ++iter; _hist.getSearchDirection(ginfo._gradient,pk); if(!lineSearch.evaluate(pk)) break; lineSearch.setInitialStep(Math.max(minStep, lineSearch.step())); GradientInfo newGinfo = lineSearch.ginfo(); _hist.update(pk, newGinfo._gradient, ginfo._gradient); rel_improvement = (ginfo._objVal - newGinfo._objVal)/Math.abs(ginfo._objVal); ginfo = newGinfo; if(!pm.progress(lineSearch.getX(), ginfo))break; } return new Result((ArrayUtils.linfnorm(ginfo._gradient,false) <= _gradEps || rel_improvement <= _objEps),iter,lineSearch.getX(), lineSearch.ginfo(),rel_improvement); } /** * Solve the optimization problem defined by the user-supplied ginfo function using L-BFGS algorithm. * * Will result into multiple (10s to 100s or even 1000s) calls of the user-provided ginfo function. * Outside of that it does only limited single threaded computation (order of number of coefficients). * The ginfo is likely to be the most expensive part and key for good perfomance. * * @param gslvr - user ginfo function * @params coefs - intial solution * @return Optimal solution (coefficients) + ginfo info returned by the user ginfo * function evaluated at the found optmimum. */ public final Result solve(GradientSolver gslvr, double [] coefs){ return solve(gslvr, coefs, gslvr.getGradient(coefs), new ProgressMonitor(){ @Override public boolean progress(double[] beta, GradientInfo ginfo) { return true; } }); } public static double [] startCoefs(int n, long seed){ double [] res = MemoryManager.malloc8d(n); Random r = new Random(seed); for(int i = 0; i < res.length; ++i) res[i] = r.nextGaussian(); return res; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/optimization/OptimizationUtils.java
package hex.optimization; import hex.glm.ComputationState; import hex.glm.ConstrainedGLMUtils; import hex.glm.GLM; import water.Iced; import water.util.ArrayUtils; import water.util.Log; import java.util.Arrays; import java.util.List; import static hex.glm.ComputationState.EPS_CS_SQUARE; import static hex.glm.ConstrainedGLMUtils.*; /** * Created by tomasnykodym on 9/29/15. */ public class OptimizationUtils { public static class GradientInfo extends Iced { public double _objVal; public double [] _gradient; public GradientInfo(double objVal, double [] grad){ _objVal = objVal; _gradient = grad; } public boolean isValid(){ if(Double.isNaN(_objVal)) return false; return !ArrayUtils.hasNaNsOrInfs(_gradient); } @Override public String toString(){ return " objVal = " + _objVal + ", " + Arrays.toString(_gradient); } } /** * Provides ginfo computation and line search evaluation specific to given problem. * Typically just a wrapper around MRTask calls. */ public interface GradientSolver { /** * Evaluate ginfo at solution beta. * @param beta * @return */ GradientInfo getGradient(double [] beta); GradientInfo getObjective(double [] beta); } public interface LineSearchSolver { boolean evaluate(double [] direction); double step(); GradientInfo ginfo(); LineSearchSolver setInitialStep(double s); int nfeval(); double getObj(); double[] getX(); } public static final class SimpleBacktrackingLS implements LineSearchSolver { private double [] _beta; final double _stepDec = .33; private double _step; private final GradientSolver _gslvr; private GradientInfo _ginfo; // gradient info excluding l1 penalty private double _objVal; // objective including l1 penalty final double _l1pen; int _maxfev = 20; double _minStep = 1e-4; public SimpleBacktrackingLS(GradientSolver gslvr, double [] betaStart, double l1pen) { this(gslvr, betaStart, l1pen, gslvr.getObjective(betaStart)); } public SimpleBacktrackingLS(GradientSolver gslvr, double [] betaStart, double l1pen, GradientInfo ginfo) { _gslvr = gslvr; _beta = betaStart; _ginfo = ginfo; _l1pen = l1pen; _objVal = _ginfo._objVal + _l1pen * ArrayUtils.l1norm(_beta,true); // gam smoothness should have been added } public int nfeval() {return -1;} @Override public double getObj() {return _objVal;} @Override public double[] getX() {return _beta;} public LineSearchSolver setInitialStep(double s){ return this; } @Override public boolean evaluate(double[] direction) { double step = 1; double minStep = 1; for(double d:direction) { d = Math.abs(1e-4/d); if(d < minStep) minStep = d; } double [] newBeta = direction.clone(); for(int i = 0; i < _maxfev && step >= minStep; ++i, step*= _stepDec) { GradientInfo ginfo = _gslvr.getObjective(ArrayUtils.wadd(_beta,direction,newBeta,step)); double objVal = ginfo._objVal + _l1pen * ArrayUtils.l1norm(newBeta,true); if(objVal < _objVal){ _ginfo = ginfo; _objVal = objVal; _beta = newBeta; _step = step; return true; } } return false; } @Override public double step() { return _step; } @Override public GradientInfo ginfo() { return _ginfo; } @Override public String toString(){return "";} } public static final class MoreThuente implements LineSearchSolver { double _stMin, _stMax; double _initialStep = 1; double _minRelativeImprovement = 1e-8; private final GradientSolver _gslvr; private double [] _beta; public MoreThuente(GradientSolver gslvr, double [] betaStart){ this(gslvr,betaStart,gslvr.getGradient(betaStart),.1,.1,1e-2); } public MoreThuente(GradientSolver gslvr, double [] betaStart, GradientInfo ginfo){ this(gslvr,betaStart,ginfo,.1,.1,1e-8); } public MoreThuente(GradientSolver gslvr, double [] betaStart, GradientInfo ginfo, double ftol, double gtol, double xtol){ _gslvr = gslvr; _beta = betaStart; _ginfox = ginfo; if(ginfo._gradient == null) throw new IllegalArgumentException("GradientInfo for MoreThuente line search solver must include gradient"); _ftol = ftol; _gtol = gtol; _xtol = xtol; } public MoreThuente setInitialStep(double t) {_initialStep = t; return this;} @Override public int nfeval() { return _iter; } @Override public double getObj() {return ginfo()._objVal;} @Override public double[] getX() { return _beta;} double _xtol = 1e-8; double _ftol = .1; // .2/.25 works double _gtol = .1; double _xtrapf = 4; // fval, dg and step of the best step so far double _fvx; double _dgx; double _stx; double _bestStep; GradientInfo _betGradient; // gradient info with at least minimal relative improvement and best value of augmented function double _bestPsiVal; // best value of augmented function GradientInfo _ginfox; // fval, dg and step of the best step so far double _fvy; double _dgy; double _sty; boolean _brackt; boolean _bound; int _returnStatus; public final String [] messages = new String[]{ "In progress or not evaluated", // 0 "The sufficient decrease condition and the directional derivative condition hold.", // 1 "Relative width of the interval of uncertainty is at most xtol.", // 2 "Number of calls to gradient solver has reached the limit.", // 3 "The step is at the lower bound stpmin.", // 4 "The step is at the upper bound stpmax.", // 5 "Rounding errors prevent further progress, ftol/gtol tolerances may be too small.", // 6 "Non-negative differential." // 7 }; private double nextStep(GradientInfo ginfo, double dg, double stp, double off) { double fvp = ginfo._objVal - stp*off; double dgp = dg - off; double fvx = _fvx - _stx * off; double fvy = _fvy - _sty * off; double stx = _stx; double sty = _sty; double dgx = _dgx - off; double dgy = _dgy - off; if ((_brackt && (stp <= Math.min(stx,sty) || stp >= Math.max(stx,sty))) || dgx*(stp-stx) >= 0.0) return Double.NaN; double theta = 3 * (fvx - fvp) / (stp - stx) + dgx + dgp; double s = Math.max(Math.max(Math.abs(theta),Math.abs(dgx)),Math.abs(dgp)); double sInv = 1/s; double ts = theta*sInv; double gamma = s*Math.sqrt(Math.max(0., (ts*ts) - ((dgx * sInv) * (dgp*sInv)))); int info = 0; // case 1 double nextStep; if (fvp > fvx) { info = 1; if (stp < stx) gamma = -gamma; _bound = true; _brackt = true; double p = (gamma - dgx) + theta; double q = ((gamma - dgx) + gamma) + dgp; double r = p / q; double stpc = stx + r * (stp - stx); double stpq = stx + ((dgx / ((fvx - fvp) / (stp - stx) + dgx)) / 2) * (stp - stx); nextStep = (Math.abs(stpc - stx) < Math.abs(stpq - stx)) ? stpc : stpc + (stpq - stpc) / 2; } else if (dgp * dgx < 0) { // case 2 info = 2; if (stp > stx) gamma = -gamma; _bound = false; _brackt = true; double p = (gamma - dgp) + theta; double q = ((gamma - dgp) + gamma) + dgx; double r = p / q; double stpc = stp + r * (stx - stp); double stpq = stp + (dgp / (dgp - dgx)) * (stx - stp); nextStep = (Math.abs(stpc - stp) > Math.abs(stpq - stp)) ? stpc : stpq; } else if (Math.abs(dgp) < Math.abs(dgx)) { // case 3 info = 3; if (stp > stx) gamma = -gamma; _bound = true; double p = gamma - dgp + theta; double q = gamma + dgx - dgp + gamma; double r = p / q; double stpc; if (r < 0.0 && gamma != 0.0) stpc = stp + r * (stx - stp); else if (stp > stx) stpc = _stMax; else stpc = _stMin; // stpq = stp + (dp/(dp-dx))*(stx - stp); double stpq = stp + (dgp / (dgp - dgx)) * (stx - stp); if (_brackt) nextStep = (Math.abs(stp - stpc) < Math.abs(stp - stpq)) ? stpc : stpq; else nextStep = (Math.abs(stp - stpc) > Math.abs(stp - stpq)) ? stpc : stpq; } else { // case 4 info = 4; _bound = false; if (_brackt) { theta = 3 * (fvp - fvy) / (sty - stp) + dgy + dgp; gamma = Math.sqrt(theta * theta - dgy * dgp); if (stp > sty) gamma = -gamma; double p = (gamma - dgp) + theta; double q = ((gamma - dgp) + gamma) + dgy; double r = p / q; nextStep = stp + r * (sty - stp); } else nextStep = stp > stx ? _stMax : _stMin; } if(fvp > fvx) { _sty = stp; _fvy = ginfo._objVal; _dgy = dg; } else { if(dgp * dgx < 0) { _sty = _stx; _fvy = _fvx; _dgy = _dgx; } _stx = stp; _fvx = ginfo._objVal; _dgx = dg; _ginfox = ginfo; } if(nextStep > _stMax) nextStep = _stMax; if(nextStep < _stMin) nextStep = _stMin; if (_brackt & _bound) if (_sty > _stx) nextStep = Math.min(_stx + .66 * (_sty - _stx), nextStep); else nextStep = Math.max(_stx + .66 * (_sty - _stx), nextStep); return nextStep; } public String toString(){ return "MoreThuente line search, iter = " + _iter + ", status = " + messages[_returnStatus] + ", step = " + _stx + ", I = " + "[" + _stMin + ", " + _stMax + "], grad = " + _dgx + ", bestObj = " + _fvx; } private int _iter; int _maxfev = 20; double _maxStep = 1e10; double _minStep = 1e-10; @Override public boolean evaluate(double [] direction) { double oldObjval = _ginfox._objVal; double step = _initialStep; _bound = false; _brackt = false; _stx = _sty = 0; _stMin = _stMax = 0; _betGradient = null; _bestPsiVal = Double.POSITIVE_INFINITY; _bestStep = 0; double maxObj = _ginfox._objVal - _minRelativeImprovement*_ginfox._objVal; final double dgInit = ArrayUtils.innerProduct(_ginfox._gradient, direction); final double dgtest = dgInit * _ftol; if(dgtest > 1e-4) Log.warn("MoreThuente LS: got possitive differential " + dgtest); if(dgtest >= 0) { _returnStatus = 7; return false; } double [] beta = new double[_beta.length]; double width = _maxStep - _minStep; double oldWidth = 2*width; boolean stage1 = true; _fvx = _fvy = _ginfox._objVal; _dgx = _dgy = dgInit; _iter = 0; while (true) { if (_brackt) { _stMin = Math.min(_stx, _sty); _stMax = Math.max(_stx, _sty); } else { _stMin = _stx; _stMax = step + _xtrapf * (step - _stx); } step = Math.min(step,_maxStep); step = Math.max(step,_minStep); double maxFval = oldObjval + step * dgtest; for (int i = 0; i < beta.length; ++i) beta[i] = _beta[i] + step * direction[i]; GradientInfo newGinfo = _gslvr.getGradient(beta); // gradient for one class, active cols if rcc=true if(newGinfo._objVal < maxObj && (_betGradient == null || (newGinfo._objVal - maxFval) < _bestPsiVal)){ _bestPsiVal = (newGinfo._objVal - maxFval); _betGradient = newGinfo; _bestStep = step; } ++_iter; if(_iter < _maxfev && (!Double.isNaN(step) && (Double.isNaN(newGinfo._objVal) || Double.isInfinite(newGinfo._objVal) || ArrayUtils.hasNaNsOrInfs(newGinfo._gradient)))) { _brackt = true; _sty = step; _maxStep = step; _fvy = Double.POSITIVE_INFINITY; _dgy = Double.MAX_VALUE; step *= .5; continue; } double dgp = ArrayUtils.innerProduct(newGinfo._gradient, direction); if(Double.isNaN(step) || _brackt && (step <= _stMin || step >= _stMax)) { _returnStatus = 6; break; } if (step == _maxStep && newGinfo._objVal <= maxFval & dgp <= dgtest){ _returnStatus = 5; _stx = step; _ginfox = newGinfo; break; } if (step == _minStep && (newGinfo._objVal > maxFval | dgp >= dgtest)){ _returnStatus = 4; if(_betGradient != null) { _stx = _bestStep; _ginfox = _betGradient; } else { _stx = step; _ginfox = newGinfo; } break; } if (_iter >= _maxfev){ _returnStatus = 3; if(_betGradient != null) { _stx = _bestStep; _ginfox = _betGradient; } else { _stx = step; _ginfox = newGinfo; } break; } if (_brackt && (_stMax-_stMin) <= _xtol*_stMax) { _ginfox = newGinfo; _returnStatus = 2; break; } // check for convergence if (newGinfo._objVal < maxFval && Math.abs(dgp) <= -_gtol * dgInit) { // got solution satisfying both conditions _stx = step; _dgx = dgp; _fvx = newGinfo._objVal; _ginfox = newGinfo; _returnStatus = 1; break; } // f > ftest1 || dg < min(ftol,gtol)*dginit stage1 = stage1 && (newGinfo._objVal > maxFval || dgp < dgtest); boolean useAugmentedFuntcion = stage1 && newGinfo._objVal <= _fvx && newGinfo._objVal > maxFval; double off = useAugmentedFuntcion?dgtest:0; double nextStep = nextStep(newGinfo,dgp,step,off); if (_brackt) { if (Math.abs(_sty - _stx) >= .66 * oldWidth) nextStep = _stx + .5 * (_sty - _stx); oldWidth = width; width = Math.abs(_sty - _stx); } step = nextStep; } boolean succ = _ginfox._objVal < oldObjval; if(succ) { // make sure we have correct beta (not all return cases have valid current beta!) for (int i = 0; i < beta.length; ++i) beta[i] = _beta[i] + _stx * direction[i]; _beta = beta; } return succ; } @Override public double step() {return _stx;} @Override public GradientInfo ginfo() { return _ginfox; } } /*** * This class implements the exact line search described in the doc, Algorithm 11.5 */ public static final class ExactLineSearch { public final double _betaLS1 = 1e-4; public final double _betaLS2 = 0.99; public final double _lambdaLS = 2; public double _alphal; public double _alphar; public double _alphai; public double[] _direction; // beta_k+1 - beta_k public int _maxIteration = 50; // 40 too low for tight constraints public double[] _originalBeta; public double[] _newBeta; public GLM.GLMGradientInfo _ginfoOriginal; public double _currGradDirIP; // current gradient and direction inner product public String[] _coeffNames; public ExactLineSearch(double[] betaCnd, ComputationState state, List<String> coeffNames) { reset(betaCnd, state, coeffNames); } public void reset(double[] betaCnd, ComputationState state, List<String> coeffNames) { _direction = new double[betaCnd.length]; ArrayUtils.subtract(betaCnd, state.beta(), _direction); _ginfoOriginal = state.ginfo(); _originalBeta = state.beta(); _alphai = 1; _alphal = 0; _alphar = Double.POSITIVE_INFINITY; _coeffNames = coeffNames.toArray(new String[0]); _currGradDirIP = ArrayUtils.innerProduct(_ginfoOriginal._gradient, _direction); } /*** * Evaluate and make sure that step size alphi is not too big so that objective function is still decreasing. Refer * to the doc, Definition 11.6 */ public boolean evaluateFirstWolfe(GLM.GLMGradientInfo ginfoNew) { double newObj = ginfoNew._objVal; double rhs = _ginfoOriginal._objVal+_alphai*_betaLS1* _currGradDirIP; return (newObj <= rhs); } /*** * Evaluate and make sure that step size alphi is not too small so that good progress is made in reducing the * loss function. Refer to the doc, Definition 11.8 */ public boolean evaluateSecondWolfe(GLM.GLMGradientInfo ginfo) { double lhs = ArrayUtils.innerProduct(ginfo._gradient, _direction); return lhs >= _betaLS2* _currGradDirIP; } public boolean setAlphai(boolean firstWolfe, boolean secondWolfe) { if (!firstWolfe && secondWolfe) { // step is too long _alphar = _alphai; _alphai = 0.5*(_alphal+_alphar); return true; } else if (firstWolfe && !secondWolfe) { // step is too short _alphal = _alphai; if (_alphar < Double.POSITIVE_INFINITY) _alphai = 0.5*(_alphal+_alphar); else _alphai = _lambdaLS * _alphai; return true; } return false; } public void setBetaConstraintsDeriv(double[] lambdaEqual, double[] lambdaLessThan, ComputationState state, ConstrainedGLMUtils.LinearConstraints[] equalityConstraints, ConstrainedGLMUtils.LinearConstraints[] lessThanEqualToConstraints, GLM.GLMGradientSolver gradientSolver, double[] betaCnd) { _newBeta = betaCnd; updateConstraintValues(betaCnd, Arrays.asList(_coeffNames), equalityConstraints, lessThanEqualToConstraints); calculateConstraintSquare(state, equalityConstraints, lessThanEqualToConstraints); // update gradient from constraints transpose(lambda)*h(beta)(value, not changed, active status may change) // and gram contribution from ck/2*transpose(h(beta))*h(beta)), value not changed but active status may change state.updateConstraintInfo(equalityConstraints, lessThanEqualToConstraints); // calculate new gradient and objective function; _ginfoOriginal = calGradient(betaCnd, state, gradientSolver, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); } /*** * Implements the Line Search algorithm in the doc, Algorithm 11.5. */ public boolean findAlpha(double[] lambdaEqual, double[] lambdaLessThan, ComputationState state, ConstrainedGLMUtils.LinearConstraints[] equalityConstraints, ConstrainedGLMUtils.LinearConstraints[] lessThanEqualToConstraints, GLM.GLMGradientSolver gradientSolver) { if (_currGradDirIP > 0) { _newBeta = _originalBeta; return false; } GLM.GLMGradientInfo newGrad; double[] newCoef; int betaLen = _originalBeta.length; double[] tempDirection = new double[betaLen]; boolean firstWolfe; boolean secondWolfe; boolean alphaiChange; double gradMagSquare; boolean gradSmallEnough; for (int index=0; index<_maxIteration; index++) { ArrayUtils.mult(_direction, tempDirection, _alphai); // tempCoef=alpha_i*direction newCoef = ArrayUtils.add(tempDirection, _originalBeta); // newCoef = coef + alpha_i*direction // calculate constraint values with new coefficients, constraints magnitude square updateConstraintValues(newCoef, Arrays.asList(_coeffNames), equalityConstraints, lessThanEqualToConstraints); calculateConstraintSquare(state, equalityConstraints, lessThanEqualToConstraints); // update gradient from constraints transpose(lambda)*h(beta)(value, not changed, active status may change) // and gram contribution from ck/2*transpose(h(beta))*h(beta)), value not changed but active status may change state.updateConstraintInfo(equalityConstraints, lessThanEqualToConstraints); // calculate new gradient and objective function for new coefficients newCoef newGrad = calGradient(newCoef, state, gradientSolver, lambdaEqual, lambdaLessThan, equalityConstraints, lessThanEqualToConstraints); gradMagSquare = ArrayUtils.innerProduct(newGrad._gradient, newGrad._gradient); gradSmallEnough = gradMagSquare <= state._csGLMState._epsilonkCSSquare; // evaluate if first Wolfe condition is satisfied; firstWolfe = evaluateFirstWolfe(newGrad); // evaluate if second Wolfe condition is satisfied; secondWolfe = evaluateSecondWolfe(newGrad); // return if both conditions are satisfied; if (firstWolfe && secondWolfe) { _newBeta = newCoef; _ginfoOriginal = newGrad; return true; } // set alphai if first Wolfe condition is not satisfied, set alpha i if second Wolfe condition is not satisfied; alphaiChange = setAlphai(firstWolfe, secondWolfe); if (!alphaiChange || _alphar < EPS_CS_SQUARE) { // if alphai, alphar value are not changed and alphar is too small, quit if (gradSmallEnough) { _newBeta = newCoef; _ginfoOriginal = newGrad; } return false; } } return false; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/ModelMetricsPCA.java
package hex.pca; import hex.CustomMetric; import hex.Model; import hex.ModelMetrics; import hex.ModelMetricsUnsupervised; import water.fvec.Frame; public class ModelMetricsPCA extends ModelMetricsUnsupervised { public ModelMetricsPCA(Model model, Frame frame, CustomMetric customMetric) { super(model, frame, 0, Double.NaN, customMetric); } // PCA currently does not have any model metrics to compute during scoring public static class PCAModelMetrics extends MetricBuilderUnsupervised<PCAModelMetrics> { public PCAModelMetrics(int dims) { _work = new double[dims]; } @Override public double[] perRow(double[] preds, float[] dataRow, Model m) { return preds; } @Override public ModelMetrics makeModelMetrics(Model m, Frame f) { return m.addModelMetrics(new ModelMetricsPCA(m, f, _customMetric)); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/PCA.java
package hex.pca; import hex.DataInfo; import hex.ModelBuilder; import hex.ModelCategory; import hex.ModelMetrics; import hex.genmodel.algos.glrm.GlrmInitialization; import hex.genmodel.algos.glrm.GlrmLoss; import hex.genmodel.algos.glrm.GlrmRegularizer; import hex.glrm.GLRM; import hex.glrm.GLRMModel; import hex.gram.Gram; import hex.gram.Gram.GramTask; import hex.gram.Gram.OuterGramTask; import hex.pca.PCAModel.PCAParameters; import hex.svd.SVD; import hex.svd.SVDModel; import water.DKV; import water.H2O; import water.HeartBeat; import water.Job; import water.fvec.Frame; import water.rapids.Rapids; import water.util.PrettyPrint; import water.util.TwoDimTable; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedHashMap; import static hex.util.DimensionReductionUtils.*; import static water.util.ArrayUtils.mult; /** * Principal Components Analysis * It computes the principal components from the singular value decomposition using the power method. * <a href = "http://www.cs.yale.edu/homes/el327/datamining2013aFiles/07_singular_value_decomposition.pdf">SVD via Power Method Algorithm</a> * @author anqi_fu * */ public class PCA extends ModelBuilder<PCAModel,PCAModel.PCAParameters,PCAModel.PCAOutput> { // Number of columns in training set (p) private transient int _ncolExp; // With categoricals expanded into 0/1 indicator cols boolean _wideDataset = false; // default with wideDataset set to be false. @Override protected PCADriver trainModelImpl() { return new PCADriver(); } @Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.Clustering }; } @Override public boolean isSupervised() { return false; } @Override public boolean havePojo() { return true; } @Override public boolean haveMojo() { return true; } @Override protected void checkMemoryFootPrint_impl() { HeartBeat hb = H2O.SELF._heartbeat; // todo: Add to H2O object memory information so we don't have to use heartbeat. // int numCPUs= H2O.NUMCPUS; // proper way to get number of CPUs. double p = hex.util.LinearAlgebraUtils.numColsExp(_train, true); double r = _train.numRows(); boolean useGramSVD = _parms._pca_method == PCAParameters.Method.GramSVD; boolean usePower = _parms._pca_method == PCAParameters.Method.Power; boolean useRandomized = _parms._pca_method == PCAParameters.Method.Randomized; boolean useGLRM = _parms._pca_method == PCAParameters.Method.GLRM; // gets to zero if nChunks=1. Denote number of reduces to combine results from chunks. Each chunk will store // its gram matrix using half the memory needed since it is symmetrical. Hence, total number of of grams // that will be created will be multiplied by the gram matrix size * number of reduces to be done. double gramSize = _train.lastVec().nChunks()==1 ? 1 : Math.log((double) _train.lastVec().nChunks()) / Math.log(2.); long mem_usage = (useGramSVD || usePower || useRandomized || useGLRM) ? (long) (hb._cpus_allowed * p * p * 8/*doubles*/ * gramSize) : 1; //one gram per core long mem_usage_w = (useGramSVD || usePower || useRandomized || useGLRM) ? (long) (hb._cpus_allowed * r * r * 8/*doubles*/ * gramSize) : 1; long max_mem = hb.get_free_mem(); if ((mem_usage > max_mem) && (mem_usage_w > max_mem)) { String msg = "Gram matrices (one per thread) won't fit in the driver node's memory (" + PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem) + ") - try reducing the number of columns and/or the number of categorical factors."; error("_train", msg); } // _wideDataset is true if original memory does not fit. if (mem_usage > max_mem) { _wideDataset = true; // have to set _wideDataset in this case } else { // both ways fit into memory. Want to choose wideDataset if p is too big. if ((p > 5000) && ( r < 5000)) { _wideDataset = true; } } } /* Set value of wideDataset. Note that this routine is used for test purposes only and not for users. */ public void setWideDataset(boolean isWide) { _wideDataset = isWide; } // Called from an http request public PCA(PCAParameters parms) { super(parms); init(false); } public PCA(boolean startup_once) { super(new PCAParameters(),startup_once); } @Override public void init(boolean expensive) { super.init(expensive); if (_parms._max_iterations < 1 || _parms._max_iterations > 1e6) { error("_max_iterations", "max_iterations must be between 1 and 1e6 inclusive"); } if (_train == null) { return; } _ncolExp = hex.util.LinearAlgebraUtils.numColsExp(_train,_parms._use_all_factor_levels); // if (_ncolExp < 2) error("_train", "_train must have more than one column when categoricals are expanded"); // TODO: Initialize _parms._k = min(ncolExp(_train), nrow(_train)) if not set int k_min = (int)Math.min(_ncolExp, _train.numRows()); if (_parms._k < 1) { _parms._k = k_min; warn("_k", "_k is set to be "+k_min); } else if (_parms._k > k_min) { error("_k", "_k must be between 1 and " + k_min); } if (!_parms._use_all_factor_levels && _parms._pca_method == PCAParameters.Method.GLRM) { error("_use_all_factor_levels", "GLRM only implemented for _use_all_factor_levels = true"); } if (_parms._pca_method != PCAParameters.Method.GLRM && expensive && error_count() == 0) { if (!(_train.hasNAs()) || _parms._impute_missing) { checkMemoryFootPrint(); // perform memory check here if dataset contains no NAs or if impute_missing enabled } } } class PCADriver extends Driver { protected void buildTables(PCAModel pca, String[] rowNames) { // Eigenvectors are just the V matrix String[] colTypes = new String[_parms._k]; String[] colFormats = new String[_parms._k]; String[] colHeaders = new String[_parms._k]; Arrays.fill(colTypes, "double"); Arrays.fill(colFormats, "%5f"); assert rowNames.length == pca._output._eigenvectors_raw.length; for (int i = 0; i < colHeaders.length; i++) { colHeaders[i] = "PC" + String.valueOf(i + 1); } pca._output._eigenvectors = new TwoDimTable("Rotation", null, rowNames, colHeaders, colTypes, colFormats, "", new String[pca._output._eigenvectors_raw.length][], pca._output._eigenvectors_raw); // Importance of principal components double[] vars = new double[pca._output._std_deviation.length]; double[] prop_var = new double[pca._output._std_deviation.length]; // Proportion of total variance double[] cum_var = new double[pca._output._std_deviation.length]; // Cumulative proportion of total variance generateIPC(pca._output._std_deviation, pca._output._total_variance, vars, prop_var, cum_var); pca._output._importance = new TwoDimTable("Importance of components", null, new String[]{"Standard deviation", "Proportion of Variance", "Cumulative Proportion"}, colHeaders, colTypes, colFormats, "", new String[3][], new double[][]{pca._output._std_deviation, prop_var, cum_var}); pca._output._model_summary = pca._output._importance; } protected void computeStatsFillModel(PCAModel pca, SVDModel svd, Gram gram) { // Fill PCA model with additional info needed for scoring pca._output._normSub = svd._output._normSub; pca._output._normMul = svd._output._normMul; pca._output._permutation = svd._output._permutation; pca._output._nnums = svd._output._nnums; pca._output._ncats = svd._output._ncats; pca._output._catOffsets = svd._output._catOffsets; pca._output._nobs = svd._output._nobs; if (_parms._k != svd._parms._nv) { // not enough eigenvalues was found. _job.warn("_train PCA: Dataset is rank deficient. _parms._k was "+_parms._k+" and is now set to "+svd._parms._nv); pca._parms._k = svd._parms._nv; _parms._k = svd._parms._nv; } // Fill model with eigenvectors and standard deviations pca._output._std_deviation = mult(svd._output._d, 1.0 / Math.sqrt(svd._output._nobs - 1.0)); pca._output._eigenvectors_raw = svd._output._v; // Since gram = X'X/n, but variance requires n-1 in denominator pca._output._total_variance = gram != null?gram.diagSum()*pca._output._nobs/(pca._output._nobs-1.0):svd._output._total_variance; buildTables(pca, svd._output._names_expanded); } protected void computeStatsFillModel(PCAModel pca, GLRMModel glrm, Gram gram) { assert glrm._parms._recover_svd; // Fill model with additional info needed for scoring pca._output._normSub = glrm._output._normSub; pca._output._normMul = glrm._output._normMul; pca._output._permutation = glrm._output._permutation; pca._output._nnums = glrm._output._nnums; pca._output._ncats = glrm._output._ncats; pca._output._catOffsets = glrm._output._catOffsets; pca._output._objective = glrm._output._objective; // Fill model with eigenvectors and standard deviations double dfcorr = 1.0 / Math.sqrt(_train.numRows() - 1.0); pca._output._std_deviation = new double[_parms._k]; pca._output._eigenvectors_raw = glrm._output._eigenvectors_raw; pca._output._nobs = _train.numRows(); if (gram._diagN == 0) { // no categorical columns for (int i = 0; i < glrm._output._singular_vals.length; i++) { pca._output._std_deviation[i] = dfcorr * glrm._output._singular_vals[i]; } // Since gram = X'X/n, but variance requires n-1 in denominator pca._output._total_variance = gram.diagSum() * pca._output._nobs / (pca._output._nobs - 1.0); } else { // no change to eigen values for categoricals pca._output._std_deviation = glrm._output._std_deviation; pca._output._total_variance = glrm._output._total_variance; } buildTables(pca, glrm._output._names_expanded); } protected void computeStatsFillModel(PCAModel pca, DataInfo dinfo, double[] sval, double[][] eigvec, Gram gram, long nobs) { // Save adapted frame info for scoring later pca._output._normSub = dinfo._normSub == null ? new double[dinfo._nums] : dinfo._normSub; if(dinfo._normMul == null) { pca._output._normMul = new double[dinfo._nums]; Arrays.fill(pca._output._normMul, 1.0); } else { pca._output._normMul = dinfo._normMul; } pca._output._permutation = dinfo._permutation; pca._output._nnums = dinfo._nums; pca._output._ncats = dinfo._cats; pca._output._catOffsets = dinfo._catOffsets; double dfcorr = nobs / (nobs - 1.0); pca._output._std_deviation = new double[_parms._k]; // Only want first k standard deviations for(int i = 0; i < _parms._k; i++) { sval[i] = dfcorr * sval[i]; // Degrees of freedom = n-1, where n = nobs = # row observations processed pca._output._std_deviation[i] = Math.sqrt(sval[i]); } pca._output._eigenvectors_raw = new double[eigvec.length][_parms._k]; // Only want first k eigenvectors for(int i = 0; i < eigvec.length; i++) { System.arraycopy(eigvec[i], 0, pca._output._eigenvectors_raw[i], 0, _parms._k); } pca._output._total_variance = dfcorr * gram.diagSum(); // Since gram = X'X/n, but variance requires n-1 in denominator buildTables(pca, dinfo.coefNames()); } // Main worker thread @Override public void computeImpl() { PCAModel model = null; DataInfo dinfo = null, tinfo = null; DataInfo AE = null; Gram gram = null; try { init(true); // Initialize parameters if (error_count() > 0) { throw new IllegalArgumentException("Found validation errors: " + validationErrors()); } // The model to be built model = new PCAModel(dest(), _parms, new PCAModel.PCAOutput(PCA.this)); model.delete_and_lock(_job); // store (possibly) rebalanced input train to pass it to nested SVD job Frame tranRebalanced = new Frame(_train); if (!_parms._impute_missing) { // added warning to user per request from Nidhi _job.warn("_train: Dataset used may contain fewer number of rows due to removal of rows with " + "NA/missing values. If this is not desirable, set impute_missing argument in pca call to " + "TRUE/True/true/... depending on the client language."); } if ((!_parms._impute_missing) && tranRebalanced.hasNAs()) { // remove NAs rows tinfo = new DataInfo(_train, _valid, 0, _parms._use_all_factor_levels, _parms._transform, DataInfo.TransformType.NONE, /* skipMissing */ !_parms._impute_missing, /* imputeMissing */ _parms._impute_missing, /* missingBucket */ false, /* weights */ false, /* offset */ false, /* fold */ false, /* intercept */ false); DKV.put(tinfo._key, tinfo); DKV.put(tranRebalanced._key, tranRebalanced); _train = Rapids.exec(String.format("(na.omit %s)", tranRebalanced._key)).getFrame(); // remove NA rows DKV.remove(tranRebalanced._key); checkMemoryFootPrint(); // check memory footprint again to enable wideDataSet } dinfo = new DataInfo(_train, _valid, 0, _parms._use_all_factor_levels, _parms._transform, DataInfo.TransformType.NONE, /* skipMissing */ !_parms._impute_missing, /* imputeMissing */ _parms._impute_missing, /* missingBucket */ false, /* weights */ false, /* offset */ false, /* fold */ false, /* intercept */ false); DKV.put(dinfo._key, dinfo); if (!_parms._impute_missing && tranRebalanced.hasNAs()) { // fixed the std and mean of dinfo to that of the frame before removing NA rows dinfo._normMul = tinfo._normMul; dinfo._numMeans = tinfo._numMeans; dinfo._numNAFill = dinfo._numMeans; // NAs will be imputed with means dinfo._normSub = tinfo._normSub; } if(_parms._pca_method == PCAParameters.Method.GramSVD) { // Calculate and save Gram matrix of training data // NOTE: Gram computes A'A/n where n = nrow(A) = number of rows in training set (excluding rows with NAs) _job.update(1, "Begin distributed calculation of Gram matrix"); OuterGramTask ogtsk = null; GramTask gtsk = null; if (_wideDataset) { ogtsk = new OuterGramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame); // 30 times slower than gram gram = ogtsk._gram; model._output._nobs = ogtsk._nobs; } else { gtsk = new GramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame); gram = gtsk._gram; // TODO: This ends up with all NaNs if training data has too many missing values assert gram.fullN() == _ncolExp; model._output._nobs = gtsk._nobs; } // Cannot calculate SVD if all rows contain missing value(s) and hence were skipped // and if the user specify k to be higher than min(number of columns, number of rows) if((model._output._nobs == 0) || (model._output._nobs < _parms._k )) { error("_train", "Number of row in _train is less than k. " + "Consider setting impute_missing = TRUE or using pca_method = 'GLRM' instead or reducing the " + "value of parameter k."); } if (error_count() > 0) { throw new IllegalArgumentException("Found validation errors: " + validationErrors()); } // Compute SVD of Gram A'A/n using netlib-java (MTJ) library _job.update(1, "Calculating SVD of Gram matrix locally"); double[][] gramMatrix; gramMatrix = _wideDataset ? ogtsk._gram.getXX() : gtsk._gram.getXX(); PCAInterface svd = null; svd = PCAImplementationFactory.createSVDImplementation(gramMatrix, _parms._pca_implementation); assert svd != null; double[][] rightEigenvectors = svd.getPrincipalComponents(); if (_wideDataset) { // correct for the eigenvector by t(A)*eigenvector for wide dataset rightEigenvectors = getTransformedEigenvectors(dinfo, rightEigenvectors); } double[] variances = svd.getVariances(); PCA.this._job.update(1, "Computing stats from SVD using " + _parms._pca_implementation.toString()); computeStatsFillModel(model, dinfo, variances, rightEigenvectors, gram, model._output._nobs); model._output._training_time_ms.add(System.currentTimeMillis()); // generate variables for scoring_history generation LinkedHashMap<String, ArrayList> scoreTable = new LinkedHashMap<>(); scoreTable.put("Timestamp", model._output._training_time_ms); model._output._scoring_history = createScoringHistoryTableDR(scoreTable, "Scoring History for GramSVD", _job.start_time()); // model._output._scoring_history.tableHeader = "Scoring history from GLRM"; } else if(_parms._pca_method == PCAParameters.Method.Power || _parms._pca_method == PCAParameters.Method.Randomized) { SVDModel.SVDParameters parms = new SVDModel.SVDParameters(); parms._train = _parms._train; parms._valid = _parms._valid; parms._ignored_columns = _parms._ignored_columns; parms._ignore_const_cols = _parms._ignore_const_cols; parms._score_each_iteration = _parms._score_each_iteration; parms._use_all_factor_levels = _parms._use_all_factor_levels; parms._transform = _parms._transform; parms._nv = _parms._k; parms._max_iterations = _parms._max_iterations; parms._seed = _parms._seed; parms._impute_missing = _parms._impute_missing; parms._max_runtime_secs = _parms._max_runtime_secs; // Set method for computing SVD accordingly if(_parms._pca_method == PCAParameters.Method.Power) { parms._svd_method = SVDModel.SVDParameters.Method.Power; } else if(_parms._pca_method == PCAParameters.Method.Randomized) { parms._svd_method = SVDModel.SVDParameters.Method.Randomized; } // Calculate standard deviation, but not projection parms._only_v = false; parms._keep_u = false; parms._save_v_frame = false; SVD svdP = new SVD(parms, _job); svdP.setWideDataset(_wideDataset); // force to treat dataset as wide even though it is not. // Build an SVD model SVDModel svd = svdP.trainModelNested(tranRebalanced); svd.remove(); // Remove from DKV // Recover PCA results from SVD model _job.update(1, "Computing stats from SVD"); if (_parms._pca_method == PCAParameters.Method.Randomized) { // okay to use it here. GramTask gtsk = new GramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame); gram = gtsk._gram; // TODO: This ends up with all NaNs if training data has too many missing values*/ computeStatsFillModel(model, svd, gram); } else { computeStatsFillModel(model, svd, null); } model._output._scoring_history = svd._output._scoring_history; } else if(_parms._pca_method == PCAParameters.Method.GLRM) { GLRMModel.GLRMParameters parms = new GLRMModel.GLRMParameters(); parms._train = _parms._train; parms._valid = _parms._valid; parms._ignored_columns = _parms._ignored_columns; parms._ignore_const_cols = _parms._ignore_const_cols; parms._score_each_iteration = _parms._score_each_iteration; parms._transform = _parms._transform; parms._k = _parms._k; parms._max_iterations = _parms._max_iterations; parms._seed = _parms._seed; parms._max_runtime_secs = _parms._max_runtime_secs; parms._recover_svd = true; parms._loss = GlrmLoss.Quadratic; parms._gamma_x = parms._gamma_y = 0; parms._regularization_x = GlrmRegularizer.None; parms._regularization_y = GlrmRegularizer.None; if (dinfo._cats > 0) parms._init = GlrmInitialization.PlusPlus; else parms._init = GlrmInitialization.SVD; // changed from PlusPlus to SVD. Seems to give better result // Build an SVD model // Hack: we have to resort to unsafe type casts because _job is of Job<PCAModel> type, whereas a GLRM // model requires a Job<GLRMModel> _job. If anyone knows how to avoid this hack, please fix it! GLRM glrmP = new GLRM(parms, (Job)_job); glrmP.setWideDataset(_wideDataset); // force to treat dataset as wide even though it is not. GLRMModel glrm = glrmP.trainModelNested(tranRebalanced); glrm._output._representation_key.get().delete(); glrm.remove(); // Remove from DKV // Recover PCA results from GLRM model _job.update(1, "Computing stats from GLRM decomposition"); GramTask gtsk = new GramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame); gram = gtsk._gram; // TODO: This ends up with all NaNs if training data has too many missing values computeStatsFillModel(model, glrm, gram); model._output._scoring_history = glrm._output._scoring_history; model._output._scoring_history.setTableHeader("Scoring history from GLRM"); } _job.update(1, "Scoring and computing metrics on training data"); if (_parms._compute_metrics) { model.score(_parms.train()).delete(); // This scores on the training data and appends a ModelMetrics ModelMetrics mm = ModelMetrics.getFromDKV(model,_parms.train()); model._output._training_metrics = mm; } // At the end: validation scoring (no need to gather scoring history) _job.update(1, "Scoring and computing metrics on validation data"); if (_valid != null) { model.score(_parms.valid()).delete(); //this appends a ModelMetrics on the validation set model._output._validation_metrics = ModelMetrics.getFromDKV(model,_parms.valid()); } model.update(_job); } catch (Exception e) { throw new RuntimeException(e); } finally { if (model != null) { model.unlock(_job); } if (dinfo != null) { dinfo.remove(); } if (tinfo != null) { tinfo.remove(); } if (AE != null) { AE.remove(); } } } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/PCAImplementation.java
package hex.pca; public enum PCAImplementation { MTJ_EVD_DENSEMATRIX, MTJ_EVD_SYMMMATRIX, MTJ_SVD_DENSEMATRIX, JAMA; final static PCAImplementation fastestImplementation = MTJ_EVD_SYMMMATRIX; // set to the fastest implementation public static PCAImplementation getFastestImplementation() { return fastestImplementation; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/PCAImplementationFactory.java
package hex.pca; import hex.pca.jama.PCAJama; import hex.pca.mtj.PCA_MTJ_EVD_DenseMatrix; import hex.pca.mtj.PCA_MTJ_EVD_SymmMatrix; import hex.pca.mtj.PCA_MTJ_SVD_DenseMatrix; class PCAImplementationFactory { static PCAInterface createSVDImplementation(double[][] gramMatrix, PCAImplementation implementation) throws Exception { switch (implementation) { case MTJ_EVD_DENSEMATRIX: return new PCA_MTJ_EVD_DenseMatrix(gramMatrix); case MTJ_EVD_SYMMMATRIX: return new PCA_MTJ_EVD_SymmMatrix(gramMatrix); case MTJ_SVD_DENSEMATRIX: return new PCA_MTJ_SVD_DenseMatrix(gramMatrix); case JAMA: return new PCAJama(gramMatrix); default: throw new Exception("Unrecognized svdImplementation " + implementation.toString()); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/PCAInterface.java
package hex.pca; public interface PCAInterface { double[] getVariances(); double[][] getPrincipalComponents(); }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/PCAModel.java
package hex.pca; import hex.DataInfo; import hex.Model; import hex.ModelCategory; import hex.ModelMetrics; import water.DKV; import water.Job; import water.Key; import water.MRTask; import water.codegen.CodeGenerator; import water.codegen.CodeGeneratorPipeline; import water.exceptions.JCodeSB; import water.fvec.Chunk; import water.fvec.Frame; import water.udf.CFuncRef; import water.util.JCodeGen; import water.util.SBPrintStream; import water.util.TwoDimTable; import java.util.ArrayList; public class PCAModel extends Model<PCAModel,PCAModel.PCAParameters,PCAModel.PCAOutput> { public static class PCAParameters extends Model.Parameters { public String algoName() { return "PCA"; } public String fullName() { return "Principal Components Analysis"; } public String javaName() { return PCAModel.class.getName(); } @Override public long progressUnits() { return _pca_method == PCAParameters.Method.GramSVD ? 5 : 3; } public DataInfo.TransformType _transform = DataInfo.TransformType.NONE; // Data transformation public Method _pca_method = Method.GramSVD; // Method for computing PCA public PCAImplementation _pca_implementation = PCAImplementation.getFastestImplementation(); // PCA implementation public int _k = 1; // Number of principal components public int _max_iterations = 1000; // Max iterations public boolean _use_all_factor_levels = false; // When expanding categoricals, should first level be kept or dropped? public boolean _compute_metrics = true; // Should a second pass be made through data to compute metrics? public boolean _impute_missing = false; // Should missing numeric values be imputed with the column mean? public enum Method { GramSVD, Power, Randomized, GLRM } } public static class PCAOutput extends Model.Output { // GLRM final value of loss function public double _objective; // Principal components (eigenvectors) public double[/*feature*/][/*k*/] _eigenvectors_raw; public TwoDimTable _eigenvectors; // Standard deviation of each principal component public double[] _std_deviation; // Importance of principal components // Standard deviation, proportion of variance explained, and cumulative proportion of variance explained public TwoDimTable _importance; // Number of categorical and numeric columns public int _ncats; public int _nnums; // Number of good rows in training frame (not skipped) public long _nobs; // Total column variance for expanded and transformed data public double _total_variance; // Categorical offset vector public int[] _catOffsets; // If standardized, mean of each numeric data column public double[] _normSub; // If standardized, one over standard deviation of each numeric data column public double[] _normMul; // Permutation matrix mapping training col indices to adaptedFrame public int[] _permutation; // the following fields are added for scoring history which can different fields depending on the PCA Method // here are the common fields for all PCA methods public ArrayList<Long> _training_time_ms = new ArrayList<>(); public PCAOutput(PCA b) { super(b); } /** Override because base class implements ncols-1 for features with the * last column as a response variable; for PCA all the columns are * features. */ @Override public int nfeatures() { return _names.length; } @Override public ModelCategory getModelCategory() { return ModelCategory.DimReduction; } } public PCAModel(Key selfKey, PCAParameters parms, PCAOutput output) { super(selfKey,parms,output); } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { return new ModelMetricsPCA.PCAModelMetrics(_parms._k); } @Override protected PredictScoreResult predictScoreImpl(Frame origFr, Frame adaptedFr, String destination_key, final Job j, boolean computeMetrics, CFuncRef customMetricFunc) { Frame adaptFrm = new Frame(adaptedFr); for(int i = 0; i < _parms._k; i++) adaptFrm.add("PC"+String.valueOf(i+1),adaptFrm.anyVec().makeZero()); new MRTask() { @Override public void map( Chunk chks[] ) { if (isCancelled() || j != null && j.stop_requested()) return; double tmp [] = new double[_output._names.length]; double preds[] = new double[_parms._k]; for( int row = 0; row < chks[0]._len; row++) { double p[] = score0(chks, row, tmp, preds); for( int c=0; c<preds.length; c++ ) chks[_output._names.length+c].set(row, p[c]); } if (j != null) j.update(1); } }.doAll(adaptFrm); // Return the projection into principal component space int x = _output._names.length, y = adaptFrm.numCols(); Frame f = adaptFrm.extractFrame(x, y); // this will call vec_impl() and we cannot call the delete() below just yet f = new Frame(Key.<Frame>make(destination_key), f.names(), f.vecs()); DKV.put(f); ModelMetrics.MetricBuilder<?> mb = makeMetricBuilder(null); return new PredictScoreResult(mb, f, f); } @Override protected double[] score0(double data[/*ncols*/], double preds[/*k*/]) { int numStart = _output._catOffsets[_output._catOffsets.length-1]; assert data.length == _output._nnums + _output._ncats; for(int i = 0; i < _parms._k; i++) { preds[i] = 0; for (int j = 0; j < _output._ncats; j++) { double tmp = data[_output._permutation[j]]; if (Double.isNaN(tmp)) continue; // Missing categorical values are skipped int last_cat = _output._catOffsets[j+1]-_output._catOffsets[j]-1; int level = (int)tmp - (_parms._use_all_factor_levels ? 0:1); // Reduce index by 1 if first factor level dropped during training if (level < 0 || level > last_cat) continue; // Skip categorical level in test set but not in train preds[i] += _output._eigenvectors_raw[_output._catOffsets[j]+level][i]; } int dcol = _output._ncats; int vcol = numStart; for (int j = 0; j < _output._nnums; j++) { preds[i] += (data[_output._permutation[dcol]] - _output._normSub[j]) * _output._normMul[j] * _output._eigenvectors_raw[vcol][i]; dcol++; vcol++; } } return preds; } @Override protected SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileCtx) { sb = super.toJavaInit(sb, fileCtx); sb.ip("public boolean isSupervised() { return " + isSupervised() + "; }").nl(); sb.ip("public int nfeatures() { return "+_output.nfeatures()+"; }").nl(); sb.ip("public int nclasses() { return "+_parms._k+"; }").nl(); // This is model name final String mname = JCodeGen.toJavaId(_key.toString()); fileCtx.add(new CodeGenerator() { @Override public void generate(JCodeSB out) { if (_output._nnums > 0) { JCodeGen.toClassWithArray(out, null, mname + "_NORMMUL", _output._normMul, "Standardization/Normalization scaling factor for numerical variables."); JCodeGen.toClassWithArray(out, null, mname + "_NORMSUB", _output._normSub, "Standardization/Normalization offset for numerical variables."); } JCodeGen.toClassWithArray(out, null, mname + "_CATOFFS", _output._catOffsets, "Categorical column offsets."); JCodeGen.toClassWithArray(out, null, mname + "_PERMUTE", _output._permutation, "Permutation index vector."); JCodeGen.toClassWithArray(out, null, mname + "_EIGVECS", _output._eigenvectors_raw, "Eigenvector matrix."); } }); return sb; } @Override protected void toJavaPredictBody(SBPrintStream bodySb, CodeGeneratorPipeline classCtx, CodeGeneratorPipeline fileCtx, final boolean verboseCode) { // This is model name final String mname = JCodeGen.toJavaId(_key.toString()); bodySb.i().p("java.util.Arrays.fill(preds,0);").nl(); final int cats = _output._ncats; final int nums = _output._nnums; bodySb.i().p("final int nstart = ").pj(mname+"_CATOFFS", "VALUES").p("[").pj(mname+"_CATOFFS", "VALUES").p(".length-1];").nl(); bodySb.i().p("for(int i = 0; i < ").p(_parms._k).p("; i++) {").nl(); // Categorical columns bodySb.i(1).p("for(int j = 0; j < ").p(cats).p("; j++) {").nl(); bodySb.i(2).p("double d = data[").pj(mname+"_PERMUTE", "VALUES").p("[j]];").nl(); bodySb.i(2).p("if(Double.isNaN(d)) continue;").nl(); bodySb.i(2).p("int last = ").pj(mname+"_CATOFFS", "VALUES").p("[j+1]-").pj(mname+"_CATOFFS", "VALUES").p("[j]-1;").nl(); bodySb.i(2).p("int c = (int)d").p(_parms._use_all_factor_levels ? ";":"-1;").nl(); bodySb.i(2).p("if(c < 0 || c > last) continue;").nl(); bodySb.i(2).p("preds[i] += ").pj(mname+"_EIGVECS", "VALUES").p("[").pj(mname+"_CATOFFS", "VALUES").p("[j]+c][i];").nl(); bodySb.i(1).p("}").nl(); // Numeric columns if (_output._nnums > 0) { bodySb.i(1).p("for(int j = 0; j < ").p(nums).p("; j++) {").nl(); bodySb.i(2).p("preds[i] += (data[").pj(mname + "_PERMUTE", "VALUES").p("[j" + (cats > 0 ? "+" + cats : "") + "]]-").pj(mname + "_NORMSUB", "VALUES").p("[j])*").pj(mname + "_NORMMUL", "VALUES").p("[j]*").pj(mname + "_EIGVECS", "VALUES").p("[j" + (cats > 0 ? "+ nstart" : "") + "][i];").nl(); bodySb.i(1).p("}").nl(); } bodySb.i().p("}").nl(); } @Override public PCAMojoWriter getMojo() { return new PCAMojoWriter(this); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/PCAMojoWriter.java
package hex.pca; import hex.ModelMojoWriter; import water.MemoryManager; import java.io.IOException; import java.nio.ByteBuffer; public class PCAMojoWriter extends ModelMojoWriter<PCAModel, PCAModel.PCAParameters, PCAModel.PCAOutput> { @SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler public PCAMojoWriter() {} public PCAMojoWriter(PCAModel model) { super(model); } @Override public String mojoVersion() { return "1.00"; } @Override protected void writeModelData() throws IOException { writekv("pcaMethod", model._parms._pca_method.toString()); // for reference writekv("pca_impl", model._parms._pca_implementation.toString()); writekv("k", model._parms._k); writekv("use_all_factor_levels", model._parms._use_all_factor_levels); writekv("permutation", model._output._permutation); writekv("ncats", model._output._ncats); writekv("nnums", model._output._nnums); writekv("normSub", model._output._normSub); writekv("normMul", model._output._normMul); writekv("catOffsets", model._output._catOffsets); int n = model._output._eigenvectors_raw.length*model._output._eigenvectors_raw[0].length; writekv("eigenvector_size", model._output._eigenvectors_raw.length); ByteBuffer bb = ByteBuffer.wrap(MemoryManager.malloc1(n * 8)); for (double[] row : model._output._eigenvectors_raw) for (double val : row) bb.putDouble(val); writeblob("eigenvectors_raw", bb.array()); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/jama/PCAJama.java
package hex.pca.jama; import Jama.Matrix; import Jama.SingularValueDecomposition; import hex.pca.PCAInterface; public class PCAJama implements PCAInterface { private Matrix gramMatrix; private SingularValueDecomposition svd; private double[][] rightEigenvectors; public PCAJama(double[][] gramMatrix) { this.gramMatrix = new Matrix(gramMatrix); runSVD(); } @Override public double[] getVariances() { return svd.getSingularValues(); } @Override public double[][] getPrincipalComponents() { return rightEigenvectors; } private void runSVD() { svd = gramMatrix.svd(); rightEigenvectors = svd.getV().getArray(); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/mtj/PCA_MTJ_EVD_DenseMatrix.java
package hex.pca.mtj; import hex.pca.PCAInterface; import hex.util.EigenPair; import hex.util.LinearAlgebraUtils; import no.uib.cipr.matrix.DenseMatrix; import no.uib.cipr.matrix.Matrix; import no.uib.cipr.matrix.NotConvergedException; import water.util.ArrayUtils; public class PCA_MTJ_EVD_DenseMatrix implements PCAInterface { private DenseMatrix gramMatrix; private no.uib.cipr.matrix.EVD evd; private double[] eigenvalues; private double[][] eigenvectors; public PCA_MTJ_EVD_DenseMatrix(double[][] gramMatrix) { this.gramMatrix = new DenseMatrix(gramMatrix); runEVD(); } private void runEVD() { int gramDimension = gramMatrix.numRows(); try { evd = no.uib.cipr.matrix.EVD.factorize(gramMatrix); } catch (NotConvergedException e) { throw new RuntimeException(e); } // initial eigenpairs eigenvalues = evd.getRealEigenvalues(); Matrix eigenvectorMatrix = evd.getRightEigenvectors(); eigenvectors = LinearAlgebraUtils.reshape1DArray(((DenseMatrix) eigenvectorMatrix).getData(), gramDimension, gramDimension); // sort eigenpairs in descending order according to the magnitude of eigenvalues EigenPair[] eigenPairs = LinearAlgebraUtils.createReverseSortedEigenpairs(eigenvalues, eigenvectors); eigenvalues = LinearAlgebraUtils.extractEigenvaluesFromEigenpairs(eigenPairs); eigenvectors = ArrayUtils.transpose(LinearAlgebraUtils.extractEigenvectorsFromEigenpairs(eigenPairs)); } @Override public double[] getVariances() { return eigenvalues; } @Override public double[][] getPrincipalComponents() { return eigenvectors; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/mtj/PCA_MTJ_EVD_SymmMatrix.java
package hex.pca.mtj; import hex.pca.PCAInterface; import hex.util.EigenPair; import hex.util.LinearAlgebraUtils; import no.uib.cipr.matrix.DenseMatrix; import no.uib.cipr.matrix.NotConvergedException; import no.uib.cipr.matrix.UpperSymmDenseMatrix; import water.util.ArrayUtils; public class PCA_MTJ_EVD_SymmMatrix implements PCAInterface { private UpperSymmDenseMatrix symmGramMatrix; private no.uib.cipr.matrix.SymmDenseEVD symmDenseEVD; private double[][] eigenvectors; private double[] eigenvalues; public PCA_MTJ_EVD_SymmMatrix(double[][] gramMatrix) { this.symmGramMatrix = new UpperSymmDenseMatrix(new DenseMatrix(gramMatrix)); runEVD(); } @Override public double[] getVariances() { return eigenvalues; } @Override public double[][] getPrincipalComponents() { return eigenvectors; } private void runEVD() { int gramDimension = symmGramMatrix.numRows(); try { symmDenseEVD = no.uib.cipr.matrix.SymmDenseEVD.factorize(this.symmGramMatrix); } catch (NotConvergedException e) { throw new RuntimeException(e); } // initial eigenpairs eigenvalues = symmDenseEVD.getEigenvalues(); double[] Vt_1D = symmDenseEVD.getEigenvectors().getData(); eigenvectors = LinearAlgebraUtils.reshape1DArray(Vt_1D, gramDimension, gramDimension); // sort eigenpairs in descending order according to the magnitude of eigenvalues EigenPair[] eigenPairs = LinearAlgebraUtils.createReverseSortedEigenpairs(eigenvalues, eigenvectors); eigenvalues = LinearAlgebraUtils.extractEigenvaluesFromEigenpairs(eigenPairs); eigenvectors = ArrayUtils.transpose(LinearAlgebraUtils.extractEigenvectorsFromEigenpairs(eigenPairs)); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/pca/mtj/PCA_MTJ_SVD_DenseMatrix.java
package hex.pca.mtj; import hex.pca.PCAInterface; import hex.util.LinearAlgebraUtils; import no.uib.cipr.matrix.DenseMatrix; import no.uib.cipr.matrix.NotConvergedException; public class PCA_MTJ_SVD_DenseMatrix implements PCAInterface { private DenseMatrix gramMatrix; private no.uib.cipr.matrix.SVD svd; private double[][] rightEigenvectors; public PCA_MTJ_SVD_DenseMatrix(double[][] gramMatrix) { this.gramMatrix = new DenseMatrix(gramMatrix); runSVD(); } @Override public double[] getVariances() { return svd.getS(); } @Override public double[][] getPrincipalComponents() { return rightEigenvectors; } private void runSVD() { int gramDimension = gramMatrix.numRows(); try { svd = new no.uib.cipr.matrix.SVD(gramDimension, gramDimension).factor(gramMatrix); } catch (NotConvergedException e) { throw new RuntimeException(e); } double[] Vt_1D = svd.getVt().getData(); rightEigenvectors = LinearAlgebraUtils.reshape1DArray(Vt_1D, gramDimension, gramDimension); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/BulkScorerFactory.java
package hex.psvm; import hex.genmodel.algos.psvm.KernelParameters; import hex.genmodel.algos.psvm.KernelType; public class BulkScorerFactory { /** * Creates an instance of BulkSupportVectorScorer. * * @param kt type of kernel * @param parms kernel parameters * @param svs compressed representation of the support vectors * @param svsCount number of support vectors * @param scoreRawBytes prefer a scorer that scores directly using the compressed vectors and doesn't allocate extra memory * (might not be available for all kernel types) * @return instance of BulkSupportVectorScorer */ public static BulkSupportVectorScorer makeScorer(KernelType kt, KernelParameters parms, byte[] svs, int svsCount, boolean scoreRawBytes) { switch (kt) { case gaussian: if (scoreRawBytes) return new GaussianScorerRawBytes(parms, svs); else return new GaussianScorerParsed(parms, svs, svsCount); default: throw new UnsupportedOperationException("Scoring for kernel " + kt + " is not yet implemented"); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/BulkSupportVectorScorer.java
package hex.psvm; import hex.genmodel.algos.psvm.KernelParameters; import hex.genmodel.utils.ByteBufferWrapper; import org.apache.commons.math3.util.FastMath; import water.fvec.Chunk; import java.io.Serializable; public interface BulkSupportVectorScorer extends Serializable { double[] bulkScore0(Chunk[] cs); } class GaussianScorerRawBytes implements BulkSupportVectorScorer { private final double _gamma; private final byte[] _svs; GaussianScorerRawBytes(KernelParameters parms, byte[] svs) { this(parms._gamma, svs); } private GaussianScorerRawBytes(double gamma, byte[] svs) { _gamma = gamma; _svs = svs; } public double[] bulkScore0(Chunk[] cs) { double[] result = new double[cs[0]._len]; double[] norms = new double[cs[0]._len]; ByteBufferWrapper bb = new ByteBufferWrapper(_svs); while (bb.hasRemaining()) { final double alpha = bb.get8d(); final int cats = bb.get4(); for (int i = 0; i < cats; i++) { int svCat = bb.get4(); for (int j = 0; j < norms.length; j++) { norms[j] += (int) cs[i].at8(j) == svCat ? 0 : 2; } } final int nums = bb.get4(); for (int i = 0; i < nums; i++) { double svNum = bb.get8d(); for (int j = 0; j < norms.length; j++) { double v = cs[i + cats].atd(j) - svNum; norms[j] += v * v; } } for (int j = 0; j < result.length; j++) { result[j] += alpha * FastMath.exp(-_gamma * norms[j]); norms[j] = 0; } } return result; } } class GaussianScorerParsed implements BulkSupportVectorScorer { private final double _gamma; private final double[] _alphas; private final double[][] _nums; private final int[][] _cats; GaussianScorerParsed(KernelParameters parms, byte[] svs, int svsCount) { this(parms._gamma, svs, svsCount); } private GaussianScorerParsed(double gamma, byte[] svs, int svsCount) { _gamma = gamma; _alphas = new double[svsCount]; _nums = new double[svsCount][]; _cats = new int[svsCount][]; ByteBufferWrapper bb = new ByteBufferWrapper(svs); for (int i = 0; i < svsCount; i++) { _alphas[i] = bb.get8d(); _cats[i] = new int[bb.get4()]; for (int j = 0; j < _cats[i].length; j++) { _cats[i][j] = bb.get4(); } _nums[i] = new double[bb.get4()]; for (int j = 0; j < _nums[i].length; j++) { _nums[i][j] = bb.get8d(); } } } public double[] bulkScore0(Chunk[] cs) { double[] result = new double[cs[0]._len]; double[] norms = new double[cs[0]._len]; for (int s = 0; s < _alphas.length; s++) { for (int i = 0; i < _cats[s].length; i++) { int svCat = _cats[s][i]; for (int j = 0; j < norms.length; j++) { norms[j] += (int) cs[i].at8(j) == svCat ? 0 : 2; } } for (int i = 0; i < _nums[s].length; i++) { double svNum = _nums[s][i]; for (int j = 0; j < norms.length; j++) { double v = cs[i + _cats[s].length].atd(j) - svNum; norms[j] += v * v; } } for (int j = 0; j < result.length; j++) { result[j] += _alphas[s] * FastMath.exp(-_gamma * norms[j]); norms[j] = 0; } } return result; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/MetricBuilderPSVM.java
package hex.psvm; import hex.*; import water.fvec.Frame; import water.util.ArrayUtils; import water.util.MathUtils; /** * Binomial Metric builder tailored to SVM * * SVM doesn't predict probabilities, only probabilities 0-1 are returned, this renders some binomial metric misleading (eg. AUC, logloss) * For maximum code re-use we still do use AUCBuilder, AUC2 instances provide confusion matrix and all metrics based * on confusion matrix (accuracy, ...) * * This builder produces an instance of ModelMetricsBinomial, with irrelevant metrics undefined (NaN) * @param <T> */ public class MetricBuilderPSVM<T extends MetricBuilderPSVM<T>> extends ModelMetricsSupervised.MetricBuilderSupervised<T> { protected AUC2.AUCBuilder _auc; public MetricBuilderPSVM(String[] domain) { super(2, domain); _auc = new AUC2.AUCBuilder(AUC2.NBINS); } // Passed a float[] sized nclasses+1; ds[0] must be a prediction. ds[1...nclasses-1] must be a class // distribution; @Override public double[] perRow(double ds[], float[] yact, Model m) { return perRow(ds, yact, 1, 0, m); } @Override public double[] perRow(double ds[], float[] yact, double w, double o, Model m) { if (Float.isNaN(yact[0])) return ds; // No errors if actual is missing if (ArrayUtils.hasNaNs(ds)) return ds; // No errors if prediction has missing values if (w == 0 || Double.isNaN(w)) return ds; int iact = (int) yact[0]; if (iact != 0 && iact != 1) return ds; // The actual is effectively a NaN _wY += w * iact; _wYY += w * iact * iact; // Compute error double err = iact + 1 < ds.length ? 1 - ds[iact + 1] : 1; // Error: distance from predicting ycls as 1.0 _sumsqe += w * err * err; // Squared error _count++; _wcount += w; assert !Double.isNaN(_sumsqe); _auc.perRow(ds[2], iact, w); return ds; // Flow coding } @Override public void reduce(T mb) { super.reduce(mb); // sumseq, count _auc.reduce(mb._auc); } /** * Create a ModelMetrics for a given model and frame * * @param m Model * @param f Frame * @param frameWithWeights Frame that contains extra columns such as weights * @param preds Optional predictions (can be null), only used to compute Gains/Lift table for binomial problems @return * @return ModelMetricsBinomial */ @Override public ModelMetrics makeModelMetrics(Model m, Frame f, Frame frameWithWeights, Frame preds) { double mse = Double.NaN; double sigma = Double.NaN; final AUC2 auc; if (_wcount > 0) { sigma = weightedSigma(); mse = _sumsqe / _wcount; auc = AUC2.make01AUC(_auc); } else { auc = AUC2.emptyAUC(); } ModelMetricsBinomial mm = new ModelMetricsBinomial(m, f, _count, mse, _domain, sigma, auc, Double.NaN, null, _customMetric); if (m != null) m.addModelMetrics(mm); return mm; } public String toString() { if (_wcount == 0) return "empty, no rows"; return "mse = " + MathUtils.roundToNDigits(_sumsqe / _wcount, 3); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/PSVM.java
package hex.psvm; import hex.DataInfo; import hex.FrameTask; import hex.ModelBuilder; import hex.ModelCategory; import hex.psvm.psvm.IncompleteCholeskyFactorization; import hex.psvm.psvm.Kernel; import hex.psvm.psvm.PrimalDualIPM; import water.*; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import water.util.ArrayUtils; import water.util.Log; import water.util.TwoDimTable; import water.util.VecUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.List; public class PSVM extends ModelBuilder<PSVMModel, PSVMModel.PSVMParameters, PSVMModel.PSVMModelOutput> { @Override public ModelCategory[] can_build() { return new ModelCategory[]{ModelCategory.Binomial}; } @Override public boolean isSupervised() { return true; } @Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Experimental; } public PSVM(boolean startup_once) { super(new PSVMModel.PSVMParameters(), startup_once); } public PSVM(PSVMModel.PSVMParameters parms) { super(parms); init(false); } @Override public void init(boolean expensive) { super.init(expensive); if (expensive) { } // TODO: no validation yet - everything is allowed ;) } @Override public void checkDistributions() { if (_response.isCategorical()) { if (_response.cardinality() != 2) { error("_response", "Expected a binary categorical response, but instead got response with " + _response.cardinality() + " categories."); } } else { if (_response.min() != -1 || _response.max() != 1 || !_response.isInt() || _response.nzCnt() != _response.length()) { error("_response", "Non-categorical response provided, please make sure the response is either binary categorical response or uses only values -1/+1 in case of numerical response."); } } } @Override protected boolean computePriorClassDistribution() { return false; // no use, we don't output probabilities } @Override protected int init_getNClass() { return 2; // only binomial classification is supported } @Override protected Driver trainModelImpl() { return new SVMDriver(); } private class SVMDriver extends Driver { DataInfo adaptTrain() { Frame adapted = new Frame(train()); adapted.remove(_parms._response_column); if (response().naCnt() > 0) { throw new IllegalStateException("NA values in response column are currently not supported."); } Vec numericResp; if (response().domain() == null) { numericResp = response(); } else { numericResp = new MRTask() { @Override public void map(Chunk c, NewChunk nc) { for (int i = 0; i < c._len; i++) { if (c.at8(i) == 0) nc.addNum(-1); else nc.addNum(+1); } } }.doAll(Vec.T_NUM, response()).outputFrame().vec(0); } adapted.add(_parms._response_column, numericResp); adapted.add("two_norm_sq", Scope.track(adapted.anyVec().makeZero())); // (L2 norm)^2; initialized 0 and actually calculated later // TODO: scaling / normalization return new DataInfo(adapted, null, 2, true, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, true, false, false, false, false, false, null) .disableIntercept(); } Frame prototypeFrame(DataInfo di) { Frame f = new Frame(di._adaptedFrame); f.remove("two_norm_sq"); return f; } @Override public void computeImpl() { PSVMModel model = null; try { init(true); _job.update(0, "Initializing model training"); final DataInfo di = adaptTrain(); Scope.track_generic(di); DKV.put(di); if (_parms._gamma == -1) { _parms._gamma = 1.0d / di.fullN(); Log.info("Set gamma = " + _parms._gamma); } final Vec response = di._adaptedFrame.vec(_parms._response_column); PSVMModel.PSVMModelOutput output = new PSVMModel.PSVMModelOutput(PSVM.this, prototypeFrame(di), response().domain()); model = new PSVMModel(_result, _parms, output); model.delete_and_lock(_job); final int rank = getRankICF(_parms._rank_ratio, di._adaptedFrame.numRows()); Log.info("Desired rank of ICF matrix = " + rank); _job.update(0, "Running Incomplete Cholesky Factorization"); Frame icf = IncompleteCholeskyFactorization.icf(di, _parms.kernel(), rank, _parms._fact_threshold); Scope.track(icf); _job.update(0, "Running IPM"); IPMInfo ipmInfo = new IPMInfo(); Vec alpha = PrimalDualIPM.solve(icf, response, _parms.ipmParms(), ipmInfo); icf.remove(); Log.info("IPM finished"); Vec svs = new RegulateAlphaTask(_parms.c_pos(), _parms.c_neg(), _parms._sv_threshold) .doAll(Vec.T_NUM, alpha, response) .updateStats(model._output); assert svs.length() == model._output._svs_count; Scope.track(svs); // save alpha Frame alphaFr = new Frame(Key.<Frame>make(model._key + "_alpha")); alphaFr.add("alpha", alpha); DKV.put(alphaFr); model._output._alpha_key = alphaFr._key; final int sampleSize = (int) Math.min(svs.length(), 1000); DataInfo.Row[] sampleSVs = new CollectSupportVecSamplesTask(di, svs, sampleSize) .doAll(di._adaptedFrame) .getSelected(); model._output._rho = new CalculateRhoTask(di, sampleSVs, alpha, _parms.kernel()) .doAll(di._adaptedFrame) .getRho(); long estimatedSize = 0; for (DataInfo.Row sv : sampleSVs) { estimatedSize += toSupportVector(0, sv).estimateSize(); } if (svs.length() > sampleSize) { // scale the estimate to the actual number of SVs estimatedSize = (estimatedSize * svs.length()) / sampleSize; } final boolean tooBig = estimatedSize >= Value.MAX; if (! tooBig) { Frame fr = new Frame(di._adaptedFrame); fr.add("__alpha", alpha); model._output._compressed_svs = new CompressVectorsTask(di).doAll(fr)._csvs; assert (svs.length() > sampleSize) || model._output._compressed_svs.length == estimatedSize; // sanity check - small models should have precise estimate } else { Log.err("Estimated model size (" + estimatedSize + "B) exceeds limits of DKV. Support vectors will not be stored."); model.addWarning("Model too big (size " + estimatedSize + "B) exceeds maximum model size. " + "Support vectors will not be stored as a part of the model. You can still inspect what vectors were " + "chosen and what are their alpha coefficients (see Frame alpha in model output)."); model._output._compressed_svs = new byte[0]; } Log.info("Total #support vectors: " + model._output._svs_count + " (size in memory " + estimatedSize + "B)"); model._output._model_summary = createModelSummaryTable(model._output, ipmInfo); model.update(_job); if (! tooBig) { if (_parms._disable_training_metrics) { String noMetricsWarning = "Not creating training metrics: scoring disabled (use disable_training_metrics = false to override)"; Log.warn(noMetricsWarning); model.addWarning(noMetricsWarning); } else { _job.update(0, "Scoring training frame"); Frame scoringTrain = new Frame(train()); model.adaptTestForTrain(scoringTrain, true, true); model._output._training_metrics = model.makeModelMetrics(train(), scoringTrain, "Training metrics"); } if (valid() != null) { _job.update(0,"Scoring validation frame"); Frame scoringValid = new Frame(valid()); model.adaptTestForTrain(scoringValid, true, true); model._output._validation_metrics = model.makeModelMetrics(valid(), scoringValid, "Validation metrics"); } } Scope.untrack(alpha._key); // we want to keep the alpha Vec after model is built } finally { if (model != null) model.unlock(_job); } } } private int getRankICF(double rankRatio, long numRows) { if (rankRatio == -1) { return (int) Math.sqrt(numRows); } else { return (int) (rankRatio * numRows); } } private static class CompressVectorsTask extends MRTask<CompressVectorsTask> { // IN private final DataInfo _dinfo; // OUT private byte[] _csvs; CompressVectorsTask(DataInfo dinfo) { _dinfo = dinfo; } @Override public void map(Chunk[] acs) { AutoBuffer ab = new AutoBuffer(); Chunk alpha = acs[acs.length - 1]; Chunk[] cs = Arrays.copyOf(acs, acs.length - 1); DataInfo.Row row = _dinfo.newDenseRow(); SupportVector sv = new SupportVector(); for (int i = 0; i < alpha._len; i++) { if (alpha.isNA(i)) continue; _dinfo.extractDenseRow(cs, i, row); sv.fill(alpha.atd(i), row.numVals, row.binIds); sv.compress(ab); } _csvs = ab.buf(); } @Override public void reduce(CompressVectorsTask mrt) { _csvs = ArrayUtils.append(_csvs, mrt._csvs); } } private static SupportVector toSupportVector(double alpha, DataInfo.Row row) { if (row.isSparse()) { throw new UnsupportedOperationException("Sparse rows are not yet supported"); } return new SupportVector().fill(alpha, row.numVals, row.binIds); } private static class CalculateRhoTask extends FrameTask<CalculateRhoTask> { // IN DataInfo.Row[] _selected; Vec _alpha; Kernel _kernel; // OUT double[] _rhos; // TEMP transient long _offset; transient Chunk _alphaChunk; public CalculateRhoTask(DataInfo dinfo, DataInfo.Row[] selected, Vec alpha, Kernel kernel) { super(null, dinfo); _selected = selected; _alpha = alpha; _kernel = kernel; } @Override public void map(Chunk[] chunks, NewChunk[] outputs) { _alphaChunk = _alpha.chunkForChunkIdx(chunks[0].cidx()); _offset = _alphaChunk.start(); _rhos = new double[_selected.length]; super.map(chunks, outputs); } @Override protected boolean skipRow(long gid) { return _alphaChunk.isNA((int) (gid - _offset)); } @Override protected void processRow(long gid, DataInfo.Row r) { for (int i = 0; i < _selected.length; i++) { _rhos[i] += _alphaChunk.atd((int) (gid - _offset)) * _kernel.calcKernel(r, _selected[i]); } } @Override public void reduce(CalculateRhoTask mrt) { _rhos = ArrayUtils.add(_rhos, mrt._rhos); } double getRho() { double rho = 0; for (int i = 0; i < _selected.length; i++) { rho += _selected[i].response[0] - _rhos[i]; } return rho / _selected.length; } } private static class CollectSupportVecSamplesTask extends FrameTask<CollectSupportVecSamplesTask> { // IN private Vec _svs; private int _num_selected; // OUT DataInfo.Row[][] _selected; // TEMP private transient long[] _local_selected_idxs; @Override protected void setupLocal() { super.setupLocal(); _selected = new DataInfo.Row[H2O.CLOUD.size()][]; int[] cids = VecUtils.getLocalChunkIds(_svs); long local_svs = 0; for (int cidx : cids) { local_svs += _svs.chunkLen(cidx); } final int local_contribution = (int) (_num_selected * local_svs / _svs.length()); DataInfo.Row[] local_selected = new DataInfo.Row[local_contribution]; _local_selected_idxs = new long[local_contribution]; _selected[H2O.SELF.index()] = local_selected; int v = 0; fill_selected: for (int cidx : cids) { Chunk svIndices = _svs.chunkForChunkIdx(cidx); for (int i = 0; i < svIndices._len; i++) { _local_selected_idxs[v++] = svIndices.at8(i); if (v == local_contribution) break fill_selected; } } Arrays.sort(_local_selected_idxs); } CollectSupportVecSamplesTask(DataInfo dinfo, Vec svs, int num_selected) { super(null, dinfo); _svs = svs; _num_selected = num_selected; } @Override protected boolean skipRow(long gid) { return Arrays.binarySearch(_local_selected_idxs, gid) < 0; } @Override protected void processRow(long gid, DataInfo.Row r) { int idx = Arrays.binarySearch(_local_selected_idxs, gid); _selected[H2O.SELF.index()][idx] = r.deepClone(); } @Override public void reduce(CollectSupportVecSamplesTask mrt) { for (int i = 0; i < H2O.CLOUD.size(); i++) { if (mrt._selected[i] != null) _selected[i] = mrt._selected[i]; } } DataInfo.Row[] getSelected() { return ArrayUtils.flat(_selected); } } private static class RegulateAlphaTask extends MRTask<RegulateAlphaTask> { // IN private double _c_pos; private double _c_neg; private double _sv_threshold; // OUT long _svs_count; // support vectors long _bsv_count; // bounded support vectors private RegulateAlphaTask(double c_pos, double c_neg, double sv_threshold) { _c_pos = c_pos; _c_neg = c_neg; _sv_threshold = sv_threshold; } @Override public void map(Chunk alpha, Chunk label, NewChunk nc) { for (int i = 0; i < alpha._len; i++) { final double x = alpha.atd(i); if (x <= _sv_threshold) { alpha.setNA(i); } else { _svs_count++; nc.addNum(alpha.start() + i); double c = label.atd(i) > 0 ? _c_pos : _c_neg; double out_x; if (c - x <= _sv_threshold) { out_x = c; _bsv_count++; } else { out_x = x; } alpha.set(i, out_x * label.atd(i)); } } } @Override public void reduce(RegulateAlphaTask mrt) { _svs_count += mrt._svs_count; _bsv_count += mrt._bsv_count; } private Vec updateStats(PSVMModel.PSVMModelOutput o) { o._svs_count = _svs_count; o._bsv_count = _bsv_count; return outputFrame().vec(0); } } private static TwoDimTable createModelSummaryTable(PSVMModel.PSVMModelOutput output, IPMInfo ipmInfo) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("Number of Support Vectors"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Number of Bounded Support Vectors"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Raw Model Size in Bytes"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("rho"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Number of Iterations"); colTypes.add("long"); colFormat.add("%d"); colHeaders.add("Surrogate Gap"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Primal Residual"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("Dual Residual"); colTypes.add("double"); colFormat.add("%.5f"); final int rows = 1; TwoDimTable table = new TwoDimTable( "Model Summary", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); int row = 0; int col = 0; table.set(row, col++, output._svs_count); table.set(row, col++, output._bsv_count); table.set(row, col++, output._compressed_svs != null ? output._compressed_svs.length : -1); table.set(row, col++, output._rho); table.set(row, col++, ipmInfo._iter); table.set(row, col++, ipmInfo._sgap); table.set(row, col++, ipmInfo._resp); table.set(row, col++, ipmInfo._resd); assert col == colHeaders.size(); return table; } private static class IPMInfo implements PrimalDualIPM.ProgressObserver { int _iter; double _sgap; double _resp; double _resd; boolean _converged; @Override public void reportProgress(int iter, double sgap, double resp, double resd, boolean converged) { _iter = iter; _sgap = sgap; _resp = resp; _resd = resd; _converged = converged; } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/PSVMModel.java
package hex.psvm; import hex.*; import hex.genmodel.algos.psvm.KernelParameters; import hex.genmodel.algos.psvm.KernelType; import hex.genmodel.algos.psvm.ScorerFactory; import hex.genmodel.algos.psvm.SupportVectorScorer; import hex.psvm.psvm.Kernel; import hex.psvm.psvm.KernelFactory; import hex.psvm.psvm.PrimalDualIPM; import water.Futures; import water.Key; import water.Keyed; import water.fvec.Chunk; import water.fvec.Frame; import water.util.Log; import java.util.Arrays; public class PSVMModel extends Model<PSVMModel, PSVMModel.PSVMParameters, PSVMModel.PSVMModelOutput> { private transient SupportVectorScorer _scorer; // initialized lazily public PSVMModel(Key<PSVMModel> selfKey, PSVMParameters params, PSVMModelOutput output) { super(selfKey, params, output); assert(Arrays.equals(_key._kb, selfKey._kb)); } ModelMetricsSupervised makeModelMetrics(Frame origFr, Frame adaptFr, String description) { Log.info("Making metrics: " + description); ModelMetrics.MetricBuilder mb = scoreMetrics(adaptFr); ModelMetricsSupervised mm = (ModelMetricsSupervised) mb.makeModelMetrics(this, origFr, adaptFr, null); mm._description = description; return mm; } @Override protected double[] score0(double[] data, double[] preds) { double svScore = getScorer().score0(data); return makePreds(svScore, preds); } private double[] makePreds(double svScore, double[] preds) { double pred = svScore + _output._rho; int label = pred < 0 ? 0 : 1; preds[0] = label; preds[1 + label] = 1; preds[2 - label] = 0; return preds; } @Override protected BigScorePredict setupBigScorePredict(BigScore bs) { BulkSupportVectorScorer bulkScorer = BulkScorerFactory.makeScorer( _parms._kernel_type, _parms.kernelParms(), _output._compressed_svs, (int) _output._svs_count, true); return new SVMBigScorePredict(bulkScorer); } private class SVMBigScorePredict implements BigScorePredict { private BulkSupportVectorScorer _bulkScorer; SVMBigScorePredict(BulkSupportVectorScorer bulkScorer) { _bulkScorer = bulkScorer; } @Override public BigScoreChunkPredict initMap(Frame fr, Chunk[] chks) { double[] scores = _bulkScorer.bulkScore0(chks); return new SVMBigScoreChunkPredict(scores); } } private class SVMBigScoreChunkPredict implements BigScoreChunkPredict { private final double[] _scores; private SVMBigScoreChunkPredict(double[] scores) { _scores = scores; } @Override public double[] score0(Chunk[] chks, double offset, int row_in_chunk, double[] tmp, double[] preds) { return makePreds(_scores[row_in_chunk], preds); } @Override public void close() { // nothing to do } } private SupportVectorScorer getScorer() { SupportVectorScorer svs = _scorer; if (svs == null) { _scorer = svs = ScorerFactory.makeScorer(_parms._kernel_type, _parms.kernelParms(), _output._compressed_svs); } return svs; } @SuppressWarnings("WeakerAccess") public static class PSVMParameters extends Model.Parameters { private static final PrimalDualIPM.Parms IPM_DEFAULTS = new PrimalDualIPM.Parms(); public String algoName() { return "PSVM"; } public String fullName() { return "PSVM"; } public String javaName() { return PSVMModel.class.getName(); } @Override public long progressUnits() { return 1; } public long _seed = -1; // SVM public double _hyper_param = 1.0; // "C" public double _positive_weight = 1.0; public double _negative_weight = 1.0; public double _sv_threshold = 1.0e-4; public double _zero_threshold = 1.0e-9; // not exposed public boolean _disable_training_metrics = true; // Kernel public KernelType _kernel_type = KernelType.gaussian; public double _gamma = -1; // by default use 1/(#expanded features) // ** Expert ** // ICF public double _rank_ratio = -1; // by default use sqrt(#rows) public double _fact_threshold = 1e-05; // Primal-Dual IPM public int _max_iterations = IPM_DEFAULTS._max_iter; public double _feasible_threshold = IPM_DEFAULTS._feasible_threshold; public double _surrogate_gap_threshold = IPM_DEFAULTS._feasible_threshold; public double _mu_factor = IPM_DEFAULTS._mu_factor; public Kernel kernel() { return KernelFactory.make(_kernel_type, kernelParms()); } KernelParameters kernelParms() { KernelParameters kp = new KernelParameters(); kp._gamma = _gamma; return kp; } PrimalDualIPM.Parms ipmParms() { PrimalDualIPM.Parms p = new PrimalDualIPM.Parms(); p._max_iter = _max_iterations; p._mu_factor = _mu_factor; p._feasible_threshold = _feasible_threshold; p._sgap_threshold = _surrogate_gap_threshold; p._x_epsilon = _zero_threshold; p._c_pos = _hyper_param * _positive_weight; p._c_neg = _hyper_param * _negative_weight; return p; } double c_pos() { return _hyper_param * _positive_weight; } double c_neg() { return _hyper_param * _negative_weight; } } public static class PSVMModelOutput extends Model.Output { public long _svs_count; // support vectors public long _bsv_count; // bounded support vectors public double _rho; public Key<Frame> _alpha_key; public byte[] _compressed_svs; // might be empty if the model is too large (too many SVs) PSVMModelOutput(PSVM b, Frame f, String[] respDomain) { super(b, f); _domains[_domains.length - 1] = respDomain != null ? respDomain : new String[]{"-1", "+1"}; } @Override public ModelCategory getModelCategory() { return ModelCategory.Binomial; } } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { return new MetricBuilderPSVM(domain); } @Override protected Futures remove_impl(Futures fs, boolean cascade) { Keyed.remove(_output._alpha_key, fs, true); return super.remove_impl(fs, cascade); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/SupportVector.java
package hex.psvm; import water.AutoBuffer; class SupportVector { private double _alpha; private double[] _numVals; private int[] _binIds; SupportVector fill(double alpha, double[] numVals, int[] binIds) { _alpha = alpha; _numVals = numVals; _binIds = binIds; return this; } int estimateSize() { return 8 + (4 + (_numVals.length * 8)) + (4 + (_binIds.length * 4)); } void compress(AutoBuffer ab) { ab.put8d(_alpha); // categorical columns ab.put4(_binIds.length); for (int v : _binIds) { ab.put4(v); } // numeric columns ab.put4(_numVals.length); for (double v : _numVals) { ab.put8d(v); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/psvm/IncompleteCholeskyFactorization.java
/* Copyright 2007 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package hex.psvm.psvm; import hex.DataInfo; import water.*; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import water.util.Log; /** * Implementation of ICF based on https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/34638.pdf * * This implementation is based on and takes clues from the reference PSVM implementation in C++: * https://code.google.com/archive/p/psvm/source/default/source * original code: Copyright 2007 Google Inc., Apache License, Version 2.0 */ public class IncompleteCholeskyFactorization { public static Frame icf(DataInfo di, Kernel kernel, int n, double threshold) { return icf(di._adaptedFrame, di, kernel, n, threshold); } static Frame icf(Frame frame, String response, Kernel kernel, int n, double threshold) { Frame adapted = new Frame(frame); try { adapted.add(response, adapted.remove(response)); // make response to the last column adapted.add("two_norm_sq", adapted.anyVec().makeZero()); // (L2 norm)^2; initialized 0 - will be calculated later - treated as second response for the lack of a better place DataInfo di = new DataInfo(adapted, null, 2, true, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, true, false, false, false, false, false, null) .disableIntercept(); return icf(di, kernel, n, threshold); } finally { Vec tns = adapted.vec("two_norm_sq"); if (tns != null) { tns.remove(); } } } private static Frame icf(Frame frame, DataInfo di, Kernel kernel, int n, double threshold) { Frame icf = new Frame(); Frame workspace = new Frame(); try { Vec diag1 = new InitICF(di, kernel).doAll(Vec.T_NUM, frame).outputFrame().anyVec(); // diag1: the diagonal part of Q (the kernel matrix diagonal) Vec diag2 = frame.anyVec().makeZero(); // diag2: the quadratic sum of a row of the ICF matrix Vec pivot_selected = frame.anyVec().makeZero(); workspace.add("pivot_selected", pivot_selected); workspace.add("diag1", diag1); workspace.add("diag2", diag2); for (int i = 0; i < n; i++) { FindPivot fp = new FindPivot(frame, di).doAll(workspace); if (fp._trace < threshold) { Log.info("ICF finished before full rank was reached in iteration " + i + ". Trace value = " + fp._trace + " (convergence threshold = " + threshold + ")."); break; } Log.info("ICF Iteration " + i + ": trace: " + fp._trace); Vec newCol = frame.anyVec().makeZero(); icf.add("C" + (i + 1), newCol); UpdatePivot up = new UpdatePivot(icf, pivot_selected, fp).doOnRemote(); new CalculateColumn(frame, di, kernel, icf, fp._pivot_sample, up._header_row).doAll(pivot_selected, diag2, newCol); } } finally { workspace.delete(); } return icf; } /** * Calculate a new column of the ICF matrix */ private static class CalculateColumn extends MRTask<CalculateColumn> { // IN Frame _full_frame; DataInfo _dinfo; Kernel _kernel; double[] _header_row; DataInfo.Row _pivot_sample; Frame _icf; private CalculateColumn(Frame frame, DataInfo dinfo, Kernel kernel, Frame icf, DataInfo.Row pivotSample, double[] headerRow) { _full_frame = frame; _dinfo = dinfo; _kernel = kernel; _icf = icf; _pivot_sample = pivotSample; _header_row = headerRow; } @Override public void map(Chunk pivot_selected, Chunk diag2, Chunk newColChunk) { Chunk[] icf = getLocalChunks(_icf, pivot_selected.start()); Chunk[] frameChunks = getLocalChunks(_full_frame, pivot_selected.start()); double[] newColData = MemoryManager.malloc8d(newColChunk._len); boolean[] pivotSelected = MemoryManager.mallocZ(newColChunk._len); for (int i = 0; i < newColData.length; i++) { pivotSelected[i] = pivot_selected.at8(i) != 0; newColData[i] = pivotSelected[i] ? newColChunk.atd(i) : 0; } for (int k = 0; k < icf.length - 1; k++) { for (int i = 0; i < newColChunk._len; i++) { if (pivotSelected[i]) continue; newColData[i] -= icf[k].atd(i) * _header_row[k]; } } DataInfo.Row row = _dinfo.newDenseRow(); for (int i = 0; i < newColChunk._len; i++) { if (pivotSelected[i]) continue; _dinfo.extractDenseRow(frameChunks, i, row); newColData[i] += _kernel.calcKernelWithLabel(row, _pivot_sample); } for (int i = 0; i < newColChunk._len; i++) { if (pivotSelected[i]) continue; newColData[i] /= _header_row[_header_row.length - 1]; } // Updated newCol chunk and calculate diag2 for (int i = 0; i < newColData.length; i++) { double v = newColData[i]; newColChunk.set(i, v); diag2.set(i, diag2.atd(i) + (v * v)); } } } private static class InitICF extends MRTask<InitICF> { // IN DataInfo _dinfo; Kernel _kernel; InitICF(DataInfo dinfo, Kernel kernel) { _dinfo = dinfo; _kernel = kernel; } @Override public void map(Chunk[] cs, NewChunk nc) { DataInfo.Row row = _dinfo.newDenseRow(); Chunk two_norm_sq = cs[cs.length - 1]; for (int r = 0; r < cs[0]._len; r++) { _dinfo.extractDenseRow(cs, r, row); final double tns = row.twoNormSq(); row.response[1] = tns; two_norm_sq.set(r, tns); double diag1val = _kernel.calcKernel(row, row); nc.addNum(diag1val); } } } /** * Find new pivot and calculate a trace of Q */ private static class FindPivot extends MRTask<FindPivot> { // IN Frame _full_frame; DataInfo _dinfo; // OUT long _index = -1; double _value; DataInfo.Row _pivot_sample; double _trace; FindPivot(Frame frame, DataInfo dinfo) { _full_frame = frame; _dinfo = dinfo; } @Override public void map(Chunk pivot_selected, Chunk diag1, Chunk diag2) { if (diag1._len == 0) return; int idx = -1; _value = -Double.MAX_VALUE; _trace = 0; for (int i = 0; i < diag1._len; i++) { if (pivot_selected.at8(i) != 0) continue; double diff = diag1.atd(i) - diag2.atd(i); _trace += diff; if (diff > _value) { _value = diff; idx = i; } } if (idx != -1) { _index = diag1.start() + idx; _pivot_sample = extractLocalRow(_index); } } @Override public void reduce(FindPivot mrt) { _trace += mrt._trace; if ((_index == -1) || ((mrt._index != -1) && (mrt._value > _value))) { _index = mrt._index; _value = mrt._value; _pivot_sample = mrt._pivot_sample; } } private DataInfo.Row extractLocalRow(long idx) { Chunk[] chks = getLocalChunks(_full_frame, idx); DataInfo.Row row = _dinfo.newDenseRow(); int rid = (int) (idx - chks[0].start()); _dinfo.extractDenseRow(chks, rid, row); return row; } } /** * Update pivot on remote node and return the corresponding row of the ICF matrix */ private static class UpdatePivot extends DTask<UpdatePivot> { // IN Frame _icf; Vec _pivot_selected; long _index; double _value; // OUT double[] _header_row; UpdatePivot(Frame icf, Vec pivotSelected, FindPivot fp) { _icf = icf; _pivot_selected = pivotSelected; _index = fp._index; _value = Math.sqrt(fp._value); } @Override public void compute2() { // using global Vec API on the home node of the underlying chunks - will be fast _icf.vecs()[_icf.numCols() - 1].set(_index, _value); _pivot_selected.set(_index, 1); Chunk[] chks = getLocalChunks(_icf, _index); int row = (int) (_index - chks[0].start()); _header_row = new double[chks.length]; for (int i = 0; i < chks.length; i++) { _header_row[i] = chks[i].atd(row); } tryComplete(); } UpdatePivot doOnRemote() { Vec newCol = _icf.lastVec(); assert newCol.isConst(); H2ONode node = newCol.chunkKey(newCol.elem2ChunkIdx(_index)).home_node(); return new RPC<>(node, this).call().get(); } } private static Chunk[] getLocalChunks(Frame f, long rowId) { if (f.numCols() == 0) return new Chunk[0]; Vec[] vecs = f.vecs(); Chunk[] chks = new Chunk[vecs.length]; int cidx = vecs[0].elem2ChunkIdx(rowId); for (int i = 0; i < chks.length; i++) { assert vecs[i].chunkKey(cidx).home(); chks[i] = vecs[i].chunkForChunkIdx(cidx); } return chks; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/psvm/Kernel.java
/* Copyright 2007 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package hex.psvm.psvm; import hex.DataInfo; import hex.genmodel.algos.psvm.KernelParameters; import water.Freezable; import water.Iced; public interface Kernel extends Freezable { double calcKernelWithLabel(DataInfo.Row a, DataInfo.Row b); double calcKernel(DataInfo.Row a, DataInfo.Row b); } final class GaussianKernel extends Iced implements Kernel { private final double _rbf_gamma; GaussianKernel(KernelParameters parms) { this(parms._gamma); } GaussianKernel(double rbf_gamma) { _rbf_gamma = rbf_gamma; } @Override public double calcKernel(DataInfo.Row a, DataInfo.Row b) { // ||a - b||^2 = (||a||^2 - 2 * a.b + ||b||^2) double norm_a_b_sq = a.response[1] + b.response[1] - 2 * a.innerProduct(b); return Math.exp(-_rbf_gamma * norm_a_b_sq); } @Override public double calcKernelWithLabel(DataInfo.Row a, DataInfo.Row b) { if ((int) a.response[0] != (int) b.response[0]) { return -calcKernel(a, b); } else { return calcKernel(a, b); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/psvm/KernelFactory.java
package hex.psvm.psvm; import hex.genmodel.algos.psvm.KernelParameters; import hex.genmodel.algos.psvm.KernelType; public class KernelFactory { public static Kernel make(KernelType type, KernelParameters parms) { switch (type) { case gaussian: return new GaussianKernel(parms); default: throw new UnsupportedOperationException("Kernel type '" + type.name() + "' is not yet supported."); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/psvm/LLMatrix.java
/* Copyright 2007 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package hex.psvm.psvm; import water.Iced; import water.MemoryManager; class LLMatrix extends Iced<LLMatrix> { private final int _dim; private final double[][] _data; LLMatrix(int dim) { _dim = dim; _data = new double[dim][]; for (int i = 0; i < dim; i++) { _data[i] = new double[dim - i]; } } private int dim() { return _dim; } final double get(int x, int y) { return _data[y][x-y]; } final void set(int x, int y, double value) { _data[y][x-y] = value; } final void addUnitMat() { for (double[] col : _data) col[0] += 1; } double[] cholSolve(double[] b) { double[] x = MemoryManager.malloc8d(b.length); cholForwardSub(b, x); cholBackwardSub(x, b); return b; } private void cholBackwardSub(double[] b, double[] x) { final int dim = dim(); for (int k = dim - 1; k >= 0; k--) { double tmp = b[k]; for (int i = k + 1; i < dim; i++) { tmp -= x[i] * get(i, k); } x[k] = tmp / get(k, k); } } private void cholForwardSub(double[] b, double[] x) { final int dim = dim(); for (int k = 0; k < dim; ++k) { double tmp = b[k]; for (int i = 0; i < k; ++i) { tmp -= x[i] * get(k, i); } x[k] = tmp / get(k, k); } } LLMatrix cf() { final int dim = dim(); LLMatrix m = new LLMatrix(dim); for (int i = 0; i < dim; ++i) { for (int j = i; j < dim; ++j) { double sum = get(j, i); for (int k = i-1; k >= 0; --k) { sum -= m.get(i, k) * m.get(j, k); } if (i == j) { if (sum <= 0) { // sum should be larger than 0 throw new IllegalStateException("Only symmetric positive definite matrix can perform Cholesky factorization."); } m.set(i, i, Math.sqrt(sum)); } else { m.set(j, i, sum / m.get(i, i)); } } } return m; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/psvm/MatrixUtils.java
package hex.psvm.psvm; import water.MRTask; import water.fvec.*; import water.util.ArrayUtils; /** * Utils class for matrix operations. See also {code DMatrix.java} * */ public class MatrixUtils { /** * Calculates matrix product M'DM * @param m Frame representing the M matrix (m x n), M' is expected to be lower triangular * @param diagonal Vec representation of a diagonal matrix (m x m) * @return lower triangular portion of the product (the product is a symmetrical matrix, only the lower portion is represented) */ public static LLMatrix productMtDM(Frame m, Vec diagonal) { Vec[] vecs = ArrayUtils.append(m.vecs(), diagonal); double result[] = new ProductMMTask().doAll(vecs)._result; LLMatrix product = new LLMatrix(m.numCols()); int pos = 0; for (int i = 0; i < m.numCols(); i++) { for (int j = 0; j <= i; j++) { product.set(i, j, result[pos++]); } } return product; } /** * Calculates matrix-vector product M'v * * @param m Frame representing matrix M (m x n) * @param v Vec representing vector v (m x 1) * @return m-element array representing the result of the product */ public static double[] productMtv(Frame m, Vec v) { Vec[] vecs = ArrayUtils.append(m.vecs(), v); return new ProductMtvTask().doAll(vecs)._result; } /** * Task for Matrix-Matrix product. Second Matrix is given as last columns in task input. */ private static class ProductMMTask extends MRTask<ProductMMTask> { // OUT private double[] _result; @Override public void map(Chunk[] cs) { final int column = cs.length - 1; final Chunk diagonal = cs[column]; _result = new double[(column + 1) * column / 2]; double[] buff = new double[cs[0]._len]; int offset = 0; for (int i = 0; i < column; i++) { offset += i; for (int p = 0; p < buff.length; p++) { buff[p] = cs[i].atd(p) * diagonal.atd(p); } for (int j = 0; j <= i; j++) { double sum = 0; for (int p = 0; p < buff.length; p++) { sum += buff[p] * cs[j].atd(p); } _result[offset+j] = sum; } } } @Override public void reduce(ProductMMTask mrt) { ArrayUtils.add(_result, mrt._result); } } /** * Task for Matrix-Vector product. Vector is given as last column in task input. */ static class ProductMtvTask extends MRTask<ProductMtvTask> { // OUT private double[] _result; @Override public void map(Chunk[] cs) { final int column = cs.length - 1; final Chunk v = cs[column]; _result = new double[column]; for (int j = 0; j < column; ++j) { double sum = 0.0; for (int i = 0; i < cs[0]._len; i++) { sum += cs[j].atd(i) * v.atd(i); } _result[j] = sum; } } @Override public void reduce(ProductMtvTask mrt) { ArrayUtils.add(_result, mrt._result); } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/psvm/psvm/PrimalDualIPM.java
/* Copyright 2007 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package hex.psvm.psvm; import water.Iced; import water.MRTask; import water.fvec.*; import water.util.ArrayUtils; import water.util.Log; /** * Implementation of Primal-Dual Interior Point Method based on https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/34638.pdf * * This implementation is based on and takes clues from the reference PSVM implementation in C++: * https://code.google.com/archive/p/psvm/source/default/source * original code: Copyright 2007 Google Inc., Apache License, Version 2.0 */ public class PrimalDualIPM { public static Vec solve(Frame rbicf, Vec label, Parms params, ProgressObserver observer) { checkLabel(label); Frame volatileWorkspace = makeVolatileWorkspace(label, "z", "xi", "dxi", "la", "dla", "tlx", "tux", "xilx", "laux", "d", "dx"); try { return solve(rbicf, label, params, volatileWorkspace, observer); } finally { volatileWorkspace.remove(); } } private static Vec solve(Frame rbicf, Vec label, Parms params, Frame volatileWorkspace, ProgressObserver observer) { Frame workspace = new Frame(new String[]{"label"}, new Vec[]{label}); workspace.add("x", label.makeZero()); workspace.add(volatileWorkspace); new InitTask(params).doAll(workspace); Vec z = workspace.vec("z"); Vec la = workspace.vec("la"); Vec xi = workspace.vec("xi"); Vec x = workspace.vec("x"); Vec dxi = workspace.vec("dxi"); Vec dla = workspace.vec("dla"); Vec d = workspace.vec("d"); Vec dx = workspace.vec("dx"); double nu = 0; boolean converged = false; final long num_constraints = rbicf.numRows() * 2; for (int iter = 0; iter < params._max_iter; iter++) { final double eta = new SurrogateGapTask(params).doAll(workspace)._sum; final double t = (params._mu_factor * num_constraints) / eta; Log.info("Surrogate gap before iteration " + iter + ": " + eta + "; t: " + t); computePartialZ(rbicf, x, params._tradeoff, z); CheckConvergenceTask cct = new CheckConvergenceTask(params, nu).doAll(workspace); Log.info("Residual (primal): " + cct._resp + "; residual (dual): " + cct._resd + ". Feasible threshold: " + params._feasible_threshold); converged = cct._resp <= params._feasible_threshold && cct._resd <= params._feasible_threshold && eta <= params._sgap_threshold; if (observer != null) { observer.reportProgress(iter, eta, cct._resp, cct._resd, converged); } if (converged) { break; } new UpdateVarsTask(params, t).doAll(workspace); LLMatrix icfA = MatrixUtils.productMtDM(rbicf, d); icfA.addUnitMat(); LLMatrix lra = icfA.cf(); final double dnu = computeDeltaNu(rbicf, d, label, z, x, lra); computeDeltaX(rbicf, d, label, dnu, lra, z, dx); LineSearchTask lst = new LineSearchTask(params).doAll(workspace); new MakeStepTask(lst._ap, lst._ad).doAll(x, dx, xi, dxi, la, dla); nu += lst._ad * dnu; } if (! converged) { Log.warn("The algorithm didn't converge in the maximum number of iterations. " + "Please consider changing the convergence parameters or increase the maximum number of iterations (" + params._max_iter + ")."); } volatileWorkspace.remove(); return x; } private static abstract class PDIPMTask<E extends PDIPMTask<E>> extends MRTask<E> { transient Chunk _label, _x; transient Chunk _z; transient Chunk _xi, _dxi, _la, _dla; transient Chunk _tlx, _tux, _xilx, _laux, _d; transient Chunk _dx; final double _c_pos; final double _c_neg; PDIPMTask(Parms params) { _c_pos = params._c_pos; _c_neg = params._c_neg; } @Override public void map(Chunk[] cs) { _label = cs[0]; _x = cs[1]; _z = cs[2]; _xi = cs[3]; _dxi = cs[4]; _la = cs[5]; _dla = cs[6]; _tlx = cs[7]; _tux = cs[8]; _xilx = cs[9]; _laux = cs[10]; _d = cs[11]; _dx = cs[12]; map(); } abstract void map(); } static class MakeStepTask extends MRTask<MakeStepTask> { double _ap; double _ad; MakeStepTask(double ap, double ad) { _ap = ap; _ad = ad; } @Override public void map(Chunk[] cs) { map(cs[0], cs[1], cs[2], cs[3], cs[4], cs[5]); } public void map(Chunk x, Chunk dx, Chunk xi, Chunk dxi, Chunk la, Chunk dla) { for (int i = 0; i < x._len; i++) { x.set(i, x.atd(i) + (_ap * dx.atd(i))); xi.set(i, xi.atd(i) + (_ad * dxi.atd(i))); la.set(i, la.atd(i) + (_ad * dla.atd(i))); } } } static class LineSearchTask extends PDIPMTask<LineSearchTask> { // OUT private double _ap; private double _ad; LineSearchTask(Parms params) { super(params); } @Override public void map() { map(_label, _tlx, _tux, _xilx, _laux, _xi, _la, _dx, _x, ((C8DVolatileChunk) _dxi).getValues(), ((C8DVolatileChunk) _dla).getValues()); } private void map(Chunk label, Chunk tlx, Chunk tux, Chunk xilx, Chunk laux, Chunk xi, Chunk la, Chunk dx, Chunk x, double[] dxi, double[] dla) { for (int i = 0; i < dxi.length; ++i) { dxi[i] = tlx.atd(i) - xilx.atd(i) * dx.atd(i) - xi.atd(i); dla[i] = tux.atd(i) + laux.atd(i) * dx.atd(i) - la.atd(i); } double ap = Double.MAX_VALUE; double ad = Double.MAX_VALUE; for (int i = 0; i < dxi.length; i++) { double c = (label.atd(i) > 0.0) ? _c_pos : _c_neg; if (dx.atd(i) > 0.0) { ap = Math.min(ap, (c - x.atd(i)) / dx.atd(i)); } if (dx.atd(i) < 0.0) { ap = Math.min(ap, -x.atd(i)/dx.atd(i)); } if (dxi[i] < 0.0) { ad = Math.min(ad, -xi.atd(i) / dxi[i]); } if (dla[i] < 0.0) { ad = Math.min(ad, -la.atd(i) / dla[i]); } } _ap = ap; _ad = ad; } @Override public void reduce(LineSearchTask mrt) { _ap = Math.min(_ap, mrt._ap); _ad = Math.min(_ad, mrt._ad); } @Override public void postGlobal() { _ap = Math.min(_ap, 1.0) * 0.99; _ad = Math.min(_ad, 1.0) * 0.99; } } private static void checkLabel(Vec label) { if (label.min() != -1 || label.max() != 1) throw new IllegalArgumentException("Expected a binary response encoded as +1/-1"); } static class UpdateVarsTask extends PDIPMTask<UpdateVarsTask> { private final double _epsilon_x; private final double _t; UpdateVarsTask(Parms params, double t) { super(params); _epsilon_x = params._x_epsilon; _t = t; } @Override void map() { for (int i = 0; i < _z._len; i++) { double c = (_label.atd(i) > 0) ?_c_pos : _c_neg; double m_lx = Math.max(_x.atd(i), _epsilon_x); double m_ux = Math.max(c - _x.atd(i), _epsilon_x); double tlxi = 1.0 / (_t * m_lx); double tuxi = 1.0 / (_t * m_ux); _tlx.set(i, tlxi); _tux.set(i, tuxi); double xilxi = Math.max(_xi.atd(i) / m_lx, _epsilon_x); double lauxi = Math.max(_la.atd(i) / m_ux, _epsilon_x); _d.set(i, 1.0 / (xilxi + lauxi)); _xilx.set(i, xilxi); _laux.set(i, lauxi); _z.set(i, tlxi - tuxi - _z.atd(i)); } } } static class CheckConvergenceTask extends PDIPMTask<CheckConvergenceTask> { private final double _nu; // OUT double _resd; double _resp; CheckConvergenceTask(Parms params, double nu) { super(params); _nu = nu; } @Override void map() { for (int i = 0; i < _z._len; i++) { double zi = _z.atd(i); zi += _nu * (_label.atd(i) > 0 ? 1 : -1) - 1.0; double temp = _la.atd(i) - _xi.atd(i) + zi; _z.set(i, zi); _resd += temp * temp; _resp += _label.atd(i) * _x.atd(i); } } @Override public void reduce(CheckConvergenceTask mrt) { _resd += mrt._resd; _resp += mrt._resp; } @Override protected void postGlobal() { _resp = Math.abs(_resp); _resd = Math.sqrt(_resd); } } private static void computePartialZ(Frame rbicf, Vec x, final double tradeoff, Vec z) { final double vz[] = MatrixUtils.productMtv(rbicf, x); new MRTask() { @Override public void map(Chunk[] cs) { final int p = cs.length - 2; final Chunk x = cs[p]; final Chunk z = cs[p + 1]; for (int i = 0; i < cs[0]._len; i++) { double s = 0; for (int j = 0; j < p; j++) { s += cs[j].atd(i) * vz[j]; } z.set(i, s - tradeoff * x.atd(i)); } } }.doAll(ArrayUtils.append(rbicf.vecs(), x, z)); } static class SurrogateGapTask extends PDIPMTask<SurrogateGapTask> { // OUT private double _sum; SurrogateGapTask(Parms params) { super(params); } @Override void map() { double s = 0; for (int i = 0; i < _x._len; i++) { double c = (_label.atd(i) > 0.0) ? _c_pos : _c_neg; s += _la.atd(i) * c; } for (int i = 0; i < _x._len; i++) { s += _x.atd(i) * (_xi.atd(i) - _la.atd(i)); } _sum = s; } @Override public void reduce(SurrogateGapTask mrt) { _sum += mrt._sum; } } static class InitTask extends PDIPMTask<InitTask> { InitTask(Parms params) { super(params); } @Override public void map() { for (int i = 0; i < _label._len; i++) { double c = ((_label.atd(i) > 0) ? _c_pos : _c_neg) / 10; _la.set(i, c); _xi.set(i, c); } } } private static void computeDeltaX(Frame icf, Vec d, Vec label, final double dnu, LLMatrix lra, Vec z, Vec dx) { Vec tz = new TransformWrappedVec(new Vec[]{z, label}, new LinearCombTransformFactory(1.0, -dnu)); try { linearSolveViaICFCol(icf, d, tz, lra, dx); } finally { tz.remove(); } } private static class LinearCombTransformFactory extends Iced<LinearCombTransformFactory> implements TransformWrappedVec.TransformFactory<LinearCombTransformFactory> { private final double[] _coefs; public LinearCombTransformFactory() { // to avoid the "Externalizable" warning _coefs = new double[0]; } LinearCombTransformFactory(double... coefs) { _coefs = coefs; } @Override public TransformWrappedVec.Transform create(int n_inputs) { if (n_inputs != _coefs.length) { throw new IllegalArgumentException("Expected " + _coefs.length + " inputs, got: " + n_inputs); } return new LinearCombTransform(_coefs); } } private static class LinearCombTransform implements TransformWrappedVec.Transform { private final double[] _coefs; double _sum; LinearCombTransform(double[] coefs) { _coefs = coefs; } @Override public void reset() { _sum = 0; } @Override public void setInput(int i, double value) { _sum += value * _coefs[i]; } @Override public double apply() { return _sum; } } private static double computeDeltaNu(Frame icf, Vec d, Vec label, Vec z, Vec x, LLMatrix lra) { double[] vz = partialLinearSolveViaICFCol(icf, d, z, lra); double[] vl = partialLinearSolveViaICFCol(icf, d, label, lra); DeltaNuTask dnt = new DeltaNuTask(vz, vl).doAll(ArrayUtils.append(icf.vecs(), d, z, label, x)); return dnt._sum1 / dnt._sum2; } static class DeltaNuTask extends MRTask<DeltaNuTask> { // IN private final double[] _vz; private final double[] _vl; // OUT double _sum1; double _sum2; DeltaNuTask(double[] vz, double[] vl) { _vz = vz; _vl = vl; } public void map(Chunk[] cs) { final int p = cs.length - 4; Chunk d = cs[p]; Chunk z = cs[p + 1]; Chunk label = cs[p + 2]; Chunk x = cs[p + 3]; for (int i = 0; i < label._len; i++) { double tw = z.atd(i); double tl = label.atd(i); for (int j = 0; j < p; j++) { tw -= cs[j].atd(i) * _vz[j]; tl -= cs[j].atd(i) * _vl[j]; } _sum1 += label.atd(i) * (tw * d.atd(i) + x.atd(i)); _sum2 += label.atd(i) * tl * d.atd(i); } } @Override public void reduce(DeltaNuTask mrt) { _sum1 += mrt._sum1; _sum2 += mrt._sum2; } } private static double[] partialLinearSolveViaICFCol(Frame icf, Vec d, Vec b, LLMatrix lra) { final double[] vz = new LSHelper1(false).doAll(ArrayUtils.append(icf.vecs(), d, b))._row; return lra.cholSolve(vz); } private static void linearSolveViaICFCol(Frame icf, Vec d, Vec b, LLMatrix lra, Vec out) { final double tmp[] = new LSHelper1(true).doAll(ArrayUtils.append(icf.vecs(), d, b, out))._row; final double[] vz = lra.cholSolve(tmp); new MRTask() { @Override public void map(Chunk[] cs) { final int p = cs.length - 2; Chunk d = cs[p]; Chunk x = cs[p + 1]; for (int i = 0; i < cs[0]._len; i++) { double s = 0.0; for (int j = 0; j < p; j++) { s += cs[j].atd(i) * vz[j] * d.atd(i); } x.set(i, x.atd(i) - s); } } }.doAll(ArrayUtils.append(icf.vecs(), d, out)); } static class LSHelper1 extends MRTask<LSHelper1> { // IN private final boolean _output_z; // OUT double[] _row; LSHelper1(boolean output_z) { _output_z = output_z; } @Override public void map(Chunk[] cs) { final int p = cs.length - (_output_z ? 3 : 2); _row = new double[p]; Chunk d = cs[p]; Chunk b = cs[p + 1]; double[] z = _output_z ? ((C8DVolatileChunk) cs[p + 2]).getValues() : new double[d._len]; for (int i = 0; i < z.length; i++) { z[i] = b.atd(i) * d.atd(i); } for (int j = 0; j < p; j++) { double s = 0.0; for (int i = 0; i < z.length; i++) { s += cs[j].atd(i) * z[i]; } _row[j] = s; } } @Override public void reduce(LSHelper1 mrt) { ArrayUtils.add(_row, mrt._row); } } public static class Parms { public Parms() { super(); } public Parms(double c_pos, double c_neg) { _c_pos = c_pos; _c_neg = c_neg; } public int _max_iter = 200; public double _mu_factor = 10.0; public double _tradeoff = 0; public double _feasible_threshold = 1.0e-3; public double _sgap_threshold = 1.0e-3; public double _x_epsilon = 1.0e-9; public double _c_neg = Double.NaN; public double _c_pos = Double.NaN; } private static Frame makeVolatileWorkspace(Vec blueprintVec, String... names) { return new Frame(names, blueprintVec.makeVolatileDoubles(names.length)); } public interface ProgressObserver { void reportProgress(int iter, double sgap, double resp, double resd, boolean converged); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/rulefit/Condition.java
package hex.rulefit; import water.Iced; import water.fvec.*; import water.parser.BufferedString; import water.util.ArrayUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import static hex.rulefit.Condition.Type.Numerical; public class Condition extends Iced { public enum Type {Categorical, Numerical} public enum Operator {LessThan, GreaterThanOrEqual, In} int featureIndex; Type type; public Operator operator; public String featureName; public boolean NAsIncluded; public String languageCondition; public double numTreshold; public String[] languageCatTreshold; public int[] catTreshold; public Condition(int featureIndex, Type type, Operator operator, double numTreshold, String[] languageCatTreshold, int[] catTreshold, String featureName, boolean NAsIncluded) { this.featureIndex = featureIndex; this.type = type; this.operator = operator; this.featureName = featureName; this.NAsIncluded = NAsIncluded; this.numTreshold = numTreshold; this.languageCatTreshold = languageCatTreshold; this.catTreshold = catTreshold; } public int getFeatureIndex() { return featureIndex; } public Type getType() { return type; } public Operator getOperator() { return operator; } public boolean isNAsIncluded() { return NAsIncluded; } public int getNumCatTreshold() { return catTreshold.length; } public double getNumTreshold() { return numTreshold; } String constructLanguageCondition() { StringBuilder description = new StringBuilder(); description.append("(").append(this.featureName); if (Operator.LessThan.equals(this.operator)) { description.append(" < ").append(this.numTreshold); } else if (Operator.GreaterThanOrEqual.equals(this.operator)) { description.append(" >= ").append(this.numTreshold); } else if (Operator.In.equals(this.operator)) { description.append(" in {"); for (int i = 0; i < languageCatTreshold.length; i++) { if (i != 0) description.append(", "); description.append(languageCatTreshold[i]); } description.append("}"); } if (this.NAsIncluded) { description.append(" or ").append(this.featureName).append(" is NA"); } description.append(")"); return description.toString(); } @Override public boolean equals(Object obj) { if (!(obj instanceof Condition)) return false; Condition condition = (Condition) obj; if (Numerical.equals(condition.type)) { return (this.featureIndex == condition.featureIndex && this.operator == condition.operator && this.featureName.equals(condition.featureName) && Math.abs(this.numTreshold - condition.numTreshold) < 1e-5 && this.type == condition.type); } else { return (this.NAsIncluded == condition.NAsIncluded && this.operator == condition.operator && Arrays.equals(this.catTreshold, condition.catTreshold) && this.featureIndex == condition.featureIndex && this.featureName.equals(condition.featureName) && Arrays.equals(this.languageCatTreshold, condition.languageCatTreshold) && this.type == condition.type); } } @Override public int hashCode() { if (Numerical.equals(type)) { int result = Objects.hash(featureIndex, type, operator, featureName, numTreshold); return result; } else { int result = Objects.hash(featureIndex, type, operator, featureName, NAsIncluded); result = 31 * result + Arrays.hashCode(languageCatTreshold); result = 31 * result + Arrays.hashCode(catTreshold); return result; } } public void map(Chunk[] cs, byte[] out) { Chunk col = cs[Condition.this.featureIndex]; for (int iRow = 0; iRow < col._len; ++iRow) { if (out[iRow] == 0) continue; byte newVal = 0; boolean isNA = col.isNA(iRow); // check whether condition is fulfilled: if (Condition.this.NAsIncluded && isNA) { newVal = 1; } else if (!isNA) { if (Numerical.equals(Condition.this.type)) { if (Condition.Operator.LessThan.equals(Condition.this.operator)) { if (col.atd(iRow) < Condition.this.numTreshold) { newVal = 1; } } else if (Condition.Operator.GreaterThanOrEqual.equals(Condition.this.operator)) { if (col.atd(iRow) >= Condition.this.numTreshold) { newVal = 1; } } } else if (Condition.Type.Categorical.equals(Condition.this.type)) { BufferedString tmpStr = new BufferedString(); for (int i = 0; i < Condition.this.catTreshold.length; i++) { // for string vecs if (col instanceof CStrChunk) { if (ArrayUtils.contains(Condition.this.languageCatTreshold, col.atStr(tmpStr,iRow))) { newVal = 1; } // for other categorical vecs } else if (Condition.this.catTreshold[i] == col.atd(iRow)) { newVal = 1; } } } } out[iRow] = newVal; } } Condition expandBy(Condition otherCondition) { assert this.type.equals(otherCondition.type); assert this.operator.equals(otherCondition.operator); assert this.featureIndex == otherCondition.featureIndex; assert this.featureName.equals(otherCondition.featureName); double expandedNumThreshold; String[] expandedlanguageCatTreshold; int[] expandedCatTreshold; boolean expandedNAsIncluded = false; if (this.type.equals(Type.Categorical)) { expandedNumThreshold = -1; List<String> expandedLanguageCatTresholdList = new ArrayList<>(); List<Integer> expandedCatTresholdList = new ArrayList<>(); expandedLanguageCatTresholdList.addAll(Arrays.asList(this.languageCatTreshold)); expandedCatTresholdList.addAll(Arrays.stream(this.catTreshold).boxed().collect(Collectors.toList())); for (int i = 0; i < otherCondition.catTreshold.length; i++) { if (!expandedCatTresholdList.contains(otherCondition.catTreshold[i])) { expandedCatTresholdList.add(otherCondition.catTreshold[i]); expandedLanguageCatTresholdList.add(otherCondition.languageCatTreshold[i]); } } expandedlanguageCatTreshold = expandedLanguageCatTresholdList.toArray(new String[0]); expandedCatTreshold = expandedCatTresholdList.stream().mapToInt(i->i).toArray(); } else { if (Operator.LessThan.equals(this.operator)) { expandedNumThreshold = Double.max(this.numTreshold, otherCondition.numTreshold); } else { assert Operator.GreaterThanOrEqual.equals(this.operator); expandedNumThreshold = Double.min(this.numTreshold, otherCondition.numTreshold); } expandedlanguageCatTreshold = null; expandedCatTreshold = null; } if (this.NAsIncluded || otherCondition.NAsIncluded) expandedNAsIncluded = true; return new Condition(this.featureIndex, this.type, this.operator, expandedNumThreshold, expandedlanguageCatTreshold, expandedCatTreshold, this.featureName, expandedNAsIncluded); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/rulefit/Rule.java
package hex.rulefit; import hex.genmodel.algos.tree.SharedTreeNode; import hex.genmodel.algos.tree.SharedTreeSubgraph; import hex.tree.SharedTreeModel; import org.apache.commons.math3.util.Precision; import water.Iced; import water.fvec.Chunk; import java.math.BigDecimal; import java.util.*; import java.util.stream.Collectors; public class Rule extends Iced { Condition[] conditions; double predictionValue; String languageRule; double coefficient; String varName; double support; public Rule(Condition[] conditions, double predictionValue, String varName) { this.conditions = conditions; this.predictionValue = predictionValue; this.varName = varName; } public Rule(Condition[] conditions, double predictionValue, String varName, double coefficient, double support) { this.conditions = conditions; this.predictionValue = predictionValue; this.varName = varName; this.coefficient = coefficient; this.support = support; } public void setCoefficient(double coefficient) { this.coefficient = coefficient; } public void setVarName(String varName) { this.varName = varName; } String generateLanguageRule() { StringBuilder languageRule = new StringBuilder(); if (!this.varName.startsWith("linear.")) { for (int i = 0; i < conditions.length; i++) { if (i != 0) languageRule.append(" & "); languageRule.append(conditions[i].constructLanguageCondition()); } } return languageRule.toString(); } public void map(Chunk[] cs, byte[] out) { for (Condition c : conditions) { c.map(cs, out); } } // two non-linear rules are considered equal when they have the same condition but can differ in varname/pred value/coefficient // two linear rules (conditions == null) are considered equal when they have the same varname @Override public boolean equals(Object obj) { if (!(obj instanceof Rule)) return false; Rule rule = (Rule) obj; if (this.conditions == null && rule.conditions == null) { if (this.varName == rule.varName) { return true; } return false; } if ((this.conditions == null && rule.conditions != null) || (this.conditions != null && rule.conditions == null)) { return false; } if (!Arrays.asList(rule.conditions).containsAll(Arrays.asList(conditions))) { return false; } return Math.abs(this.support - rule.support) < 1e-5; } @Override public int hashCode() { int result = Objects.hash(Precision.round(support, 5, BigDecimal.ROUND_HALF_UP)); if (conditions != null) { Condition[] sorted = Arrays.asList(conditions).stream().sorted( Comparator.comparing(Condition::getFeatureIndex) .thenComparing(Condition::isNAsIncluded) .thenComparing(Condition::getType) .thenComparing(Condition::getOperator) .thenComparing(Condition::getNumCatTreshold) .thenComparing(Condition::getNumTreshold) ).collect(Collectors.toList()).toArray(new Condition[0]); result = 31 * result + Arrays.hashCode(sorted); } else { result = 31 * result + varName.hashCode(); } return result; } public static List<Rule> extractRulesListFromModel(SharedTreeModel model, int modelId, int nclasses) { List<Rule> rules = new ArrayList<>(); nclasses = nclasses > 2 ? nclasses : 1; for (int i = 0; i < ((SharedTreeModel.SharedTreeParameters) model._parms)._ntrees; i++) { for (int treeClass = 0; treeClass < nclasses; treeClass++) { SharedTreeSubgraph sharedTreeSubgraph = model.getSharedTreeSubgraph(i, treeClass); if (sharedTreeSubgraph == null) continue; String classString = nclasses > 2 ? "_" + model._output.classNames()[treeClass] : null; rules.addAll(extractRulesFromTree(sharedTreeSubgraph, modelId, classString)); } } return rules; } public static Set<Rule> extractRulesFromTree(SharedTreeSubgraph tree, int modelId, String classString) { Set<Rule> rules = new HashSet<>(); // filter leaves List<SharedTreeNode> leaves = tree.nodesArray.stream().filter(sharedTreeNode -> sharedTreeNode.isLeaf()).collect(Collectors.toList()); // traverse paths for (SharedTreeNode leaf : leaves) { String varName = "M" + modelId + "T" + leaf.getSubgraphNumber() + "N" + leaf.getNodeNumber(); if (classString != null) { varName += classString; } traversePath(leaf, rules, varName, leaf.getPredValue()); } return rules; } private static void traversePath(SharedTreeNode node, List<Condition> conditions, Set<Rule> rules, String varName, double predValue) { SharedTreeNode parent = node.getParent(); if (parent == null) { conditions = conditions.stream().sorted(Comparator.comparing(condition -> condition.featureName)).collect(Collectors.toList()); rules.add(new Rule(conditions.toArray(new Condition[]{}), predValue, varName)); } else { Condition actualCondition; Condition newCondition; String featureName = parent.getColName(); int colId = parent.getColId(); if (node.getInclusiveLevels() != null && parent.getDomainValues() != null) { // categorical condition actualCondition = getConditionByFeatureNameAndOperator(conditions, parent.getColName(), Condition.Operator.In); CategoricalThreshold categoricalThreshold = extractCategoricalThreshold(node.getInclusiveLevels(), parent.getDomainValues()); newCondition = new Condition(colId, Condition.Type.Categorical, Condition.Operator.In, -1, categoricalThreshold.catThreshold, categoricalThreshold.catThresholdNum, featureName, node.isInclusiveNa()); } else { float splitValue = parent.getSplitValue(); Condition.Operator operator = parent.getLeftChild().equals(node) ? Condition.Operator.LessThan : Condition.Operator.GreaterThanOrEqual; actualCondition = getConditionByFeatureNameAndOperator(conditions, parent.getColName(), operator); newCondition = new Condition(colId, Condition.Type.Numerical, operator, splitValue, null, null, featureName, node.isInclusiveNa()); } if (actualCondition == null ) { conditions.add(newCondition); } else { actualCondition = actualCondition.expandBy(newCondition); } traversePath(node.getParent(), conditions, rules, varName, predValue); } } private static void traversePath(SharedTreeNode node, Set<Rule> rules, String varName, double predValue) { traversePath(node, new ArrayList<>(), rules, varName, predValue); } private static Condition getConditionByFeatureNameAndOperator(List<Condition> conditions, String featureName, Condition.Operator operator) { List<Condition> filteredConditions = conditions.stream().filter(condition -> condition.featureName.equals(featureName) && condition.operator.equals(operator)).collect(Collectors.toList()); if (filteredConditions.size() != 0) { return filteredConditions.get(0); } else { return null; } } static CategoricalThreshold extractCategoricalThreshold(BitSet inclusiveLevels, String[] domainValues) { List<Integer> matchedDomainValues = new ArrayList<>(); String[] catThreshold = new String[inclusiveLevels.cardinality()]; int[] catThresholdNum = new int[inclusiveLevels.cardinality()]; for (int i = inclusiveLevels.nextSetBit(0); i >= 0; i = inclusiveLevels.nextSetBit(i+1)) { matchedDomainValues.add(i); } for (int i = 0; i < catThreshold.length; i++) { catThreshold[i] = domainValues[matchedDomainValues.get(i)]; catThresholdNum[i] = matchedDomainValues.get(i); } return new CategoricalThreshold(catThreshold, catThresholdNum); } static class CategoricalThreshold { String[] catThreshold; int[] catThresholdNum; public CategoricalThreshold(String[] catThreshold, int[] catThresholdNum) { this.catThreshold = catThreshold; this.catThresholdNum = catThresholdNum; } } double getAbsCoefficient() { return Math.abs(coefficient); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/rulefit/RuleEnsemble.java
package hex.rulefit; import water.*; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import water.util.VecUtils; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; public class RuleEnsemble extends Iced { Rule[] rules; public RuleEnsemble(Rule[] rules) { this.rules = rules; } public Frame createGLMTrainFrame(Frame frame, int depth, int ntrees, String[] classNames, String weights, boolean calculateSupport) { Frame glmTrainFrame = new Frame(); // filter rules and create a column for each tree boolean isMultinomial = classNames != null && classNames.length > 2; int nclasses = isMultinomial ? classNames.length : 1; for (int i = 0; i < depth; i++) { for (int j = 0; j < ntrees; j++) { for (int k = 0; k < nclasses; k++) { // filter rules according to varname // varname is of structure "M" + modelId + "T" + node.getSubgraphNumber() + "N" + node.getNodeNumber() String regex = "M" + i + "T" + j + "N" + "\\d+"; if (isMultinomial) { regex += "_" + classNames[k]; } String finalRegex = regex; List<Rule> filteredRules = Arrays.stream(rules) .filter(rule -> rule.varName.matches(finalRegex)) .collect(Collectors.toList()); if (filteredRules.size() == 0) continue; RuleEnsemble ruleEnsemble = new RuleEnsemble(filteredRules.toArray(new Rule[]{})); Frame frameToMakeCategorical = ruleEnsemble.transform(frame); if (calculateSupport) { calculateSupport(ruleEnsemble, frameToMakeCategorical, weights != null ? frame.vec(weights) : null); } try { Decoder mrtask = new Decoder(); Vec catCol = mrtask.doAll(1, Vec.T_CAT, frameToMakeCategorical) .outputFrame(null, null, new String[][]{frameToMakeCategorical.names()}).vec(0); String name = isMultinomial ? "M" + i + "T" + j + "C" + k : "M" + i + "T" + j; glmTrainFrame.add(name, catCol); } finally { frameToMakeCategorical.remove(); } } } } return glmTrainFrame; } public Frame transform(Frame frame) { RuleEnsembleConverter rc = new RuleEnsembleConverter(new String[rules.length]); Frame transformedFrame = rc.doAll(rules.length, Vec.T_NUM, frame).outputFrame(); transformedFrame.setNames(rc._names); return transformedFrame; } class RuleEnsembleConverter extends MRTask<RuleEnsembleConverter> { String[] _names; RuleEnsembleConverter(String[] names) { _names = names; } @Override public void map(Chunk[] cs, NewChunk[] nc) { byte[] out = MemoryManager.malloc1(cs[0].len()); for (int i = 0; i < rules.length; i++) { Arrays.fill(out, (byte) 1); rules[i].map(cs, out); _names[i] = rules[i].varName; for (byte b : out) { nc[i].addNum(b); } } } } public Rule getRuleByVarName(String code) { List<Rule> filteredRule = Arrays.stream(this.rules) .filter(rule -> code.equals(String.valueOf(rule.varName))) .collect(Collectors.toList()); if (filteredRule.size() == 1) return filteredRule.get(0); else if (filteredRule.size() > 1) { throw new RuntimeException("Multiple rules with the same varName in RuleEnsemble!"); } else { throw new RuntimeException("No rule with varName " + code + " found!"); } } static class Decoder extends MRTask<Decoder> { Decoder() { super(); } @Override public void map(Chunk[] cs, NewChunk[] ncs) { for (int iRow = 0; iRow < cs[0].len(); iRow++) { int newValue = -1; for (int iCol = 0; iCol < cs.length; iCol++) { if (cs[iCol].at8(iRow) == 1) { newValue = iCol; } } if (newValue >= 0) ncs[0].addNum(newValue); else ncs[0].addNA(); } } } public int size() { return rules.length; } void calculateSupport(RuleEnsemble ruleEnsemble, Frame frameToMakeCategorical, Vec weights) { for (Rule rule : ruleEnsemble.rules) { if (weights != null) { Frame result = new VecUtils.SequenceProduct() .doAll(Vec.T_NUM, frameToMakeCategorical.vec(rule.varName), weights) .outputFrame(); rule.support = result.vec(0).sparseRatio(); result.remove(); } else { rule.support = frameToMakeCategorical.vec(rule.varName).sparseRatio(); } } } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/rulefit/RuleFit.java
package hex.rulefit; import hex.*; import hex.genmodel.utils.DistributionFamily; import hex.glm.GLM; import hex.glm.GLMModel; import hex.tree.SharedTree; import hex.tree.SharedTreeModel; import hex.tree.TreeStats; import hex.tree.drf.DRF; import hex.tree.drf.DRFModel; import hex.tree.gbm.GBM; import hex.tree.gbm.GBMModel; import org.apache.log4j.Logger; import water.*; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.Frame; import water.util.ArrayUtils; import water.util.TwoDimTable; import java.util.*; import java.util.stream.Collectors; import static hex.genmodel.utils.ArrayUtils.difference; import static hex.genmodel.utils.ArrayUtils.signum; import static hex.rulefit.RuleFitUtils.*; import static hex.util.DistributionUtils.distributionToFamily; /** * Rule Fit<br> * http://statweb.stanford.edu/~jhf/ftp/RuleFit.pdf * https://github.com/h2oai/h2o-tutorials/blob/8df6b492afa172095e2595922f0b67f8d715d1e0/best-practices/explainable-models/rulefit.py */ public class RuleFit extends ModelBuilder<RuleFitModel, RuleFitModel.RuleFitParameters, RuleFitModel.RuleFitOutput> { private static final Logger LOG = Logger.getLogger(RuleFit.class); protected static final long WORK_TOTAL = 1000000; private SharedTreeModel.SharedTreeParameters treeParameters = null; private GLMModel.GLMParameters glmParameters = null; @Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.Regression, ModelCategory.Binomial, ModelCategory.Multinomial }; } @Override public boolean isSupervised() { return true; } /** * Start the KMeans training Job on an F/J thread. */ @Override protected RuleFitDriver trainModelImpl() { return new RuleFitDriver(); } // Called from an http request public RuleFit(RuleFitModel.RuleFitParameters parms) { super(parms); init(false); } public RuleFit(boolean startup_once) { super(new RuleFitModel.RuleFitParameters(), startup_once); } @Override public void init(boolean expensive) { super.init(expensive); if (expensive) { _parms.validate(this); if (_parms._fold_column != null) { _train.remove(_parms._fold_column); } if (_parms._algorithm == RuleFitModel.Algorithm.AUTO) { _parms._algorithm = RuleFitModel.Algorithm.DRF; } initTreeParameters(); initGLMParameters(); ignoreBadColumns(separateFeatureVecs(), true); } // if (_train == null) return; // if (expensive && error_count() == 0) checkMemoryFootPrint(); } private void initTreeParameters() { if (_parms._algorithm == RuleFitModel.Algorithm.GBM) { treeParameters = new GBMModel.GBMParameters(); } else if (_parms._algorithm == RuleFitModel.Algorithm.DRF) { treeParameters = new DRFModel.DRFParameters(); } else { throw new RuntimeException("Unsupported algorithm for tree building: " + _parms._algorithm); } treeParameters._response_column = _parms._response_column; treeParameters._train = _parms._train; treeParameters._ignored_columns = _parms._ignored_columns; treeParameters._seed = _parms._seed; treeParameters._weights_column = _parms._weights_column; treeParameters._distribution = _parms._distribution; treeParameters._ntrees = _parms._rule_generation_ntrees; treeParameters._max_categorical_levels = _parms._max_categorical_levels; treeParameters._categorical_encoding = Model.Parameters.CategoricalEncodingScheme.EnumLimited; } private void initGLMParameters() { if (_parms._distribution == DistributionFamily.AUTO) { if (_nclass < 2) { // regression glmParameters = new GLMModel.GLMParameters(GLMModel.GLMParameters.Family.gaussian); } else if (_nclass == 2) { // binomial classification glmParameters = new GLMModel.GLMParameters(GLMModel.GLMParameters.Family.binomial); } else { // multinomial classification glmParameters = new GLMModel.GLMParameters(GLMModel.GLMParameters.Family.multinomial); } } else { glmParameters = new GLMModel.GLMParameters(distributionToFamily(_parms._distribution)); } if (RuleFitModel.ModelType.RULES_AND_LINEAR.equals(_parms._model_type) && _parms._ignored_columns != null) { glmParameters._ignored_columns = _parms._ignored_columns; } glmParameters._response_column = "linear." + _parms._response_column; glmParameters._seed = _parms._seed; // alpha ignored - set to 1 by rulefit (Lasso) glmParameters._alpha = new double[]{1}; if (_parms._weights_column != null) { glmParameters._weights_column = "linear." + _parms._weights_column; } glmParameters._auc_type = _parms._auc_type; if (_parms._lambda != null) { glmParameters._lambda = _parms._lambda; } glmParameters._ignore_const_cols = false; } private final class RuleFitDriver extends Driver { // Main worker thread @Override public void computeImpl() { String[] dataFromRulesCodes = null; RuleFitModel model = null; GLMModel glmModel; List<Rule> rulesList; RuleEnsemble ruleEnsemble = null; int ntrees = 0; TreeStats overallTreeStats = new TreeStats(); String[] classNames = null; init(true); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(RuleFit.this); try { // linearTrain = frame to be used as _train for GLM in 2., will be filled in 1. Frame linearTrain = new Frame(Key.make("paths_frame" + _result)); Frame linearValid = (_valid != null ? new Frame(Key.make("valid_paths_frame" + _result)) : null); // store train frame without bad columns to pass it to tree model builders Frame trainAdapted = new Frame(_train); // 1. Rule generation // get paths from tree models int[] depths = range(_parms._min_rule_length, _parms._max_rule_length); // prepare rules if (RuleFitModel.ModelType.RULES_AND_LINEAR.equals(_parms._model_type) || RuleFitModel.ModelType.RULES.equals(_parms._model_type)) { DKV.put(trainAdapted._key, trainAdapted); treeParameters._train = trainAdapted._key; long startAllTreesTime = System.nanoTime(); SharedTree<?, ?, ?>[] builders = ModelBuilderHelper.trainModelsParallel( makeTreeModelBuilders(_parms._algorithm, depths), nTreeEnsemblesInParallel(depths.length)); rulesList = new ArrayList<>(); for (int modelId = 0; modelId < builders.length; modelId++) { long startModelTime = System.nanoTime(); SharedTreeModel<?, ?, ?> treeModel = builders[modelId].get(); long endModelTime = System.nanoTime() - startModelTime; LOG.info("Tree model n." + modelId + " trained in " + ((double)endModelTime) / 1E9 + "s."); rulesList.addAll(Rule.extractRulesListFromModel(treeModel, modelId, nclasses())); overallTreeStats.mergeWith(treeModel._output._treeStats); ntrees += treeModel._output._ntrees; if (classNames == null) { classNames = treeModel._output.classNames(); } treeModel.delete(); } long endAllTreesTime = System.nanoTime() - startAllTreesTime; LOG.info("All tree models trained in " + ((double)endAllTreesTime) / 1E9 + "s."); LOG.info("Extracting rules from trees..."); ruleEnsemble = new RuleEnsemble(rulesList.toArray(new Rule[] {})); linearTrain.add(ruleEnsemble.createGLMTrainFrame(_train, depths.length, treeParameters._ntrees, classNames, _parms._weights_column, true)); if (_valid != null) linearValid.add(ruleEnsemble.createGLMTrainFrame(_valid, depths.length, treeParameters._ntrees, classNames, _parms._weights_column, false)); } // prepare linear terms if (RuleFitModel.ModelType.RULES_AND_LINEAR.equals(_parms._model_type) || RuleFitModel.ModelType.LINEAR.equals(_parms._model_type)) { String[] names = _train._names; linearTrain.add(RuleFitUtils.getLinearNames(names.length, names), _train.vecs(names)); if (_valid != null) linearValid.add(RuleFitUtils.getLinearNames(names.length, names), _valid.vecs(names)); } else { linearTrain.add(glmParameters._response_column, _train.vec(_parms._response_column)); if (_valid != null) linearValid.add(glmParameters._response_column, _valid.vec(_parms._response_column)); if (_parms._weights_column != null) { linearTrain.add(glmParameters._weights_column, _train.vec(_parms._weights_column)); if (_valid != null) linearValid.add(glmParameters._weights_column, _valid.vec(_parms._weights_column)); } } dataFromRulesCodes = linearTrain.names(); DKV.put(linearTrain); if (_valid != null) { DKV.put(linearValid); glmParameters._valid = linearValid._key; } // 2. Sparse linear model with Lasso glmParameters._train = linearTrain._key; if (_parms._max_num_rules > 0) { glmParameters._max_active_predictors = _parms._max_num_rules + 1; if (_parms._distribution != DistributionFamily.multinomial) { glmParameters._solver = GLMModel.GLMParameters.Solver.COORDINATE_DESCENT; } } else { if (glmParameters._lambda != null) glmParameters._lambda = getOptimalLambda(); } LOG.info("Training GLM..."); long startGLMTime = System.nanoTime(); GLM job = new GLM(glmParameters); glmModel = job.trainModel().get(); long endGLMTime = System.nanoTime() - startGLMTime; LOG.info("GLM trained in " + ((double)endGLMTime) / 1E9 + "s."); DKV.put(glmModel); model = new RuleFitModel(dest(), _parms, new RuleFitModel.RuleFitOutput(RuleFit.this), glmModel, ruleEnsemble); model._output.glmModelKey = glmModel._key; model._output._linear_names = linearTrain.names(); DKV.remove(linearTrain._key); if (linearValid != null) DKV.remove(linearValid._key); DKV.remove(trainAdapted._key); // 3. Step 3 (optional): Feature importance model._output._intercept = getIntercept(glmModel); // TODO: add here coverage_count and coverage percent model._output._rule_importance = convertRulesToTable(sortRules(deduplicateRules(RuleFitUtils.getRules(glmModel.coefficients(), ruleEnsemble, model._output.classNames(), nclasses()), _parms._remove_duplicates)), isClassifier() && nclasses() > 2, false); model._output._model_summary = generateSummary(glmModel, ruleEnsemble != null ? ruleEnsemble.size() : 0, overallTreeStats, ntrees); model._output._dataFromRulesCodes = dataFromRulesCodes; fillModelMetrics(model, glmModel); model.delete_and_lock(_job); model.update(_job); } finally { if (model != null) model.unlock(_job); } } void fillModelMetrics(RuleFitModel model, GLMModel glmModel) { model._output._validation_metrics = glmModel._output._validation_metrics; model._output._training_metrics = glmModel._output._training_metrics; model._output._cross_validation_metrics = glmModel._output._cross_validation_metrics; model._output._cross_validation_metrics_summary = glmModel._output._cross_validation_metrics_summary; Frame inputTrain = model._parms._train.get(); for (Key<ModelMetrics> modelMetricsKey : glmModel._output.getModelMetrics()) { model.addModelMetrics(modelMetricsKey.get().deepCloneWithDifferentModelAndFrame(model, inputTrain)); } } int[] range(int min, int max) { int[] array = new int[max - min + 1]; for (int i = min, j = 0; i <= max; i++, j++) { array[j] = i; } return array; } SharedTree<?, ?, ?> makeTreeModelBuilder(RuleFitModel.Algorithm algorithm, int maxDepth) { SharedTreeModel.SharedTreeParameters p = (SharedTreeModel.SharedTreeParameters) treeParameters.clone(); p._max_depth = maxDepth; final SharedTree<?, ?, ?> builder; if (algorithm.equals(RuleFitModel.Algorithm.DRF)) { builder = new DRF((DRFModel.DRFParameters) p); } else if (algorithm.equals(RuleFitModel.Algorithm.GBM)) { builder = new GBM((GBMModel.GBMParameters) p); } else { // TODO XGB throw new RuntimeException("Unsupported algorithm for tree building: " + _parms._algorithm); } return builder; } SharedTree<?, ?, ?>[] makeTreeModelBuilders(RuleFitModel.Algorithm algorithm, int[] depths) { SharedTree<?, ?, ?>[] builders = new SharedTree[depths.length]; for (int i = 0; i < depths.length; i++) { builders[i] = makeTreeModelBuilder(algorithm, depths[i]); } return builders; } double[] getOptimalLambda() { glmParameters._lambda_search = true; GLM job = new GLM(glmParameters); GLMModel lambdaModel = job.trainModel().get(); glmParameters._lambda_search = false; GLMModel.RegularizationPath regularizationPath = lambdaModel.getRegularizationPath(); double[] deviance = regularizationPath._explained_deviance_train; double[] lambdas = regularizationPath._lambdas; int bestLambdaIndex; if (deviance.length < 5) { bestLambdaIndex = deviance.length - 1; } else { bestLambdaIndex = getBestLambdaIndex(deviance); if (bestLambdaIndex >= lambdas.length) { bestLambdaIndex = getBestLambdaIndexCornerCase(deviance, lambdas); } } lambdaModel.remove(); return new double[]{lambdas[bestLambdaIndex]}; } int getBestLambdaIndex(double[] deviance) { int bestLambdaIndex = deviance.length - 1; if (deviance.length >= 5) { double[] array = difference(signum(difference(difference(deviance)))); for (int i = 0; i < array.length; i++) { if (array[i] != 0 && i > 0) { bestLambdaIndex = 3 * i; break; } } } return bestLambdaIndex; } int getBestLambdaIndexCornerCase(double[] deviance, double[] lambdas) { double[] leftUpPoint = new double[] {deviance[0], lambdas[0]}; double[] rightLowPoint = new double[] {deviance[deviance.length - 1], lambdas[lambdas.length - 1]}; double[] leftActualPoint = new double[2]; double[] rightActualPoint = new double[2]; double leftVolume, rightVolume; int leftActualId = 0; int rightActualId = deviance.length - 1; while (leftActualId < deviance.length && rightActualId < deviance.length) { if (leftActualId >= rightActualId) // volumes overlap break; // leftVolume leftActualPoint[0] = deviance[leftActualId]; leftActualPoint[1] = lambdas[leftActualId]; leftVolume = (leftUpPoint[1] - leftActualPoint[1]) * (leftActualPoint[0] - leftUpPoint[0]); // rightVolume rightActualPoint[0] = deviance[rightActualId]; rightActualPoint[1] = lambdas[rightActualId]; rightVolume = (rightActualPoint[1] - rightLowPoint[1]) * (rightLowPoint[0] - rightActualPoint[0]); if (Math.abs(leftVolume) > Math.abs(rightVolume)) { rightActualId--; // add point to rightvolume } else { leftActualId++; // add point to leftvolume } } return rightActualId; } double[] getIntercept(GLMModel glmModel) { HashMap<String, Double> glmCoefficients = glmModel.coefficients(); double[] intercept = nclasses() > 2 ? new double[nclasses()] : new double[1]; int i = 0; for (Map.Entry<String, Double> coefficient : glmCoefficients.entrySet()) { if ("Intercept".equals(coefficient.getKey()) || coefficient.getKey().contains("Intercept_")) { intercept[i] = coefficient.getValue(); i++; } } return intercept; } } protected int nTreeEnsemblesInParallel(int numDepths) { if (_parms._algorithm == RuleFitModel.Algorithm.GBM) { return nModelsInParallel(numDepths, 2); } else { return nModelsInParallel(numDepths, 1); } } TwoDimTable generateSummary(GLMModel glmModel, int ruleEnsembleSize, TreeStats overallTreeStats, int ntrees) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormats = new ArrayList<>(); TwoDimTable glmModelSummary = glmModel._output._model_summary; String[] glmColHeaders = glmModelSummary.getColHeaders(); String[] glmColTypes = glmModelSummary.getColTypes(); String[] glmColFormats = glmModelSummary.getColFormats(); // linear model info for (int i = 0; i < glmModelSummary.getColDim(); i++) { if (!"Training Frame".equals(glmColHeaders[i])) { colHeaders.add(glmColHeaders[i]); colTypes.add(glmColTypes[i]); colFormats.add(glmColFormats[i]); } } // rule ensemble info colHeaders.add("Rule Ensemble Size"); colTypes.add("long"); colFormats.add("%d"); // trees info colHeaders.add("Number of Trees"); colTypes.add("long"); colFormats.add("%d"); colHeaders.add("Number of Internal Trees"); colTypes.add("long"); colFormats.add("%d"); colHeaders.add("Min. Depth"); colTypes.add("long"); colFormats.add("%d"); colHeaders.add("Max. Depth"); colTypes.add("long"); colFormats.add("%d"); colHeaders.add("Mean Depth"); colTypes.add("double"); colFormats.add("%.5f"); colHeaders.add("Min. Leaves"); colTypes.add("long"); colFormats.add("%d"); colHeaders.add("Max. Leaves"); colTypes.add("long"); colFormats.add("%d"); colHeaders.add("Mean Leaves"); colTypes.add("double"); colFormats.add("%.5f"); final int rows = 1; TwoDimTable summary = new TwoDimTable( "Rulefit Model Summary", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormats.toArray(new String[0]), ""); int col = 0, row = 0; for (int i = 0; i < glmModelSummary.getColDim(); i++) { if (!"Training Frame".equals(glmColHeaders[i])) { summary.set(row, col++, glmModelSummary.get(row, i)); } } summary.set(row, col++, ruleEnsembleSize); summary.set(row, col++, ntrees); summary.set(row, col++, overallTreeStats._num_trees); //internal number of trees (more for multinomial) summary.set(row, col++, overallTreeStats._min_depth); summary.set(row, col++, overallTreeStats._max_depth); summary.set(row, col++, overallTreeStats._mean_depth); summary.set(row, col++, overallTreeStats._min_leaves); summary.set(row, col++, overallTreeStats._max_leaves); summary.set(row, col++, overallTreeStats._mean_leaves); return summary; } @Override public boolean haveMojo() { return true; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/rulefit/RuleFitModel.java
package hex.rulefit; import hex.*; import hex.glm.GLMModel; import hex.util.LinearAlgebraUtils; import water.*; import water.fvec.Frame; import water.fvec.Vec; import water.udf.CFuncRef; import water.util.TwoDimTable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static hex.rulefit.RuleFitUtils.deduplicateRules; import static hex.rulefit.RuleFitUtils.sortRules; public class RuleFitModel extends Model<RuleFitModel, RuleFitModel.RuleFitParameters, RuleFitModel.RuleFitOutput> implements SignificantRulesCollector{ public enum Algorithm {DRF, GBM, AUTO} public enum ModelType {RULES, RULES_AND_LINEAR, LINEAR} @Override public ToEigenVec getToEigenVec() { return LinearAlgebraUtils.toEigen; } GLMModel glmModel; RuleEnsemble ruleEnsemble; public static class RuleFitParameters extends Model.Parameters { public String algoName() { return "RuleFit"; } public String fullName() { return "RuleFit"; } public String javaName() { return RuleFitModel.class.getName(); } @Override public long progressUnits() { return RuleFit.WORK_TOTAL; } // the algorithm to use to generate rules. Options are "DRF" (default), "GBM" public Algorithm _algorithm = Algorithm.AUTO; // minimum length of rules. Defaults to 3. public int _min_rule_length = 3; // maximum length of rules. Defaults to 3. public int _max_rule_length = 3; // the maximum number of rules to return. Defaults to -1 which means the number of rules is selected // by diminishing returns in model deviance. public int _max_num_rules = -1; // specifies type of base learners in the ensemble. Options are RULES_AND_LINEAR (initial ensemble includes both rules and linear terms, default), RULES (prediction rules only), LINEAR (linear terms only) public ModelType _model_type = ModelType.RULES_AND_LINEAR; // specifies the number of trees to build in the tree model. Defaults to 50. public int _rule_generation_ntrees = 50; // whether to remove rules which are identical to an earlier rule. Defaults to true. public boolean _remove_duplicates = true; // lambda for lasso public double[] _lambda; public void validate(RuleFit rfit) { if (rfit._parms._min_rule_length > rfit._parms._max_rule_length) { rfit.error("min_rule_length", "min_rule_length cannot be greater than max_rule_length. Current values: min_rule_length = " + rfit._parms._min_rule_length + ", max_rule_length = " + rfit._parms._max_rule_length + "."); } } } public static class RuleFitOutput extends Model.Output { // a set of rules and coefficients public double[] _intercept; String[] _linear_names; public TwoDimTable _rule_importance = null; Key glmModelKey = null; String[] _dataFromRulesCodes; // feature interactions ... public RuleFitOutput(RuleFit b) { super(b); } } public RuleFitModel(Key<RuleFitModel> selfKey, RuleFitParameters parms, RuleFitOutput output, GLMModel glmModel, RuleEnsemble ruleEnsemble) { super(selfKey, parms, output); this.glmModel = glmModel; this.ruleEnsemble = ruleEnsemble; } @Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) { assert domain == null; switch (_output.getModelCategory()) { case Binomial: return new ModelMetricsBinomial.MetricBuilderBinomial(domain); case Multinomial: return new ModelMetricsMultinomial.MetricBuilderMultinomial(_output.nclasses(), domain, _parms._auc_type); case Regression: return new ModelMetricsRegression.MetricBuilderRegression(); default: throw H2O.unimpl("Invalid ModelCategory " + _output.getModelCategory()); } } @Override protected double[] score0(double data[], double preds[]) { throw new UnsupportedOperationException("RuleFitModel doesn't support scoring on raw data. Use score() instead."); } @Override public Frame score(Frame fr, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) throws IllegalArgumentException { Frame adaptFrm = new Frame(fr); adaptTestForTrain(adaptFrm, true, false); try (Scope.Safe safe = Scope.safe(adaptFrm)) { Frame linearTest = new Frame(); if (ModelType.RULES_AND_LINEAR.equals(this._parms._model_type) || ModelType.RULES.equals(this._parms._model_type)) { linearTest.add(ruleEnsemble.createGLMTrainFrame(adaptFrm, _parms._max_rule_length - _parms._min_rule_length + 1, _parms._rule_generation_ntrees, this._output.classNames(), _parms._weights_column, false)); } if (ModelType.RULES_AND_LINEAR.equals(this._parms._model_type) || ModelType.LINEAR.equals(this._parms._model_type)) { linearTest.add(RuleFitUtils.getLinearNames(adaptFrm.numCols(), adaptFrm.names()), adaptFrm.vecs()); } else { linearTest.add(RuleFitUtils.getLinearNames(1, new String[] {this._parms._response_column})[0], adaptFrm.vec(this._parms._response_column)); } Frame scored = glmModel.score(linearTest, destination_key, null, true); updateModelMetrics(glmModel, fr); return Scope.untrack(scored); } } @Override protected Futures remove_impl(Futures fs, boolean cascade) { super.remove_impl(fs, cascade); if(cascade) { glmModel.remove(fs); } return fs; } void updateModelMetrics(GLMModel glmModel, Frame fr){ for (Key<ModelMetrics> modelMetricsKey : glmModel._output.getModelMetrics()) { // what is null here was already added to RF model from GLM submodel during hex.rulefit.RuleFit.RuleFitDriver.fillModelMetrics if (modelMetricsKey.get() != null) this.addModelMetrics(modelMetricsKey.get().deepCloneWithDifferentModelAndFrame(this, fr)); } } @Override public RuleFitMojoWriter getMojo() { return new RuleFitMojoWriter(this); } @Override public boolean haveMojo() { return true; } public Frame predictRules(Frame frame, String[] ruleIds) { Frame adaptFrm = new Frame(frame); adaptTestForTrain(adaptFrm, true, false); List<String> linVarNames = Arrays.asList(glmModel.names()).stream().filter(name -> name.startsWith("linear.")).collect(Collectors.toList()); List<Rule> rules = new ArrayList<>(); List<String> linearRules = new ArrayList<>(); for (int i = 0; i < ruleIds.length; i++) { if (ruleIds[i].startsWith("linear.") && isLinearVar(ruleIds[i], linVarNames)) { linearRules.add(ruleIds[i]); } else { rules.add(ruleEnsemble.getRuleByVarName(RuleFitUtils.readRuleId(ruleIds[i]))); } } RuleEnsemble subEnsemble = new RuleEnsemble(rules.toArray(new Rule[0])); Frame result = subEnsemble.transform(adaptFrm); // linear rules apply to all the rows for (int i = 0; i < linearRules.size(); i++) { result.add(linearRules.get(i), Vec.makeOne(frame.numRows())); } result = new Frame(Key.make(), result.names(), result.vecs()); DKV.put(result); return result; } private boolean isLinearVar(String potentialLinVarId, List<String> linVarNames) { for (String linVarName : linVarNames) { if (potentialLinVarId.startsWith(linVarName)) return true; } return false; } @Override public TwoDimTable getRuleImportanceTable() { return RuleFitUtils.convertRulesToTable(sortRules(deduplicateRules(RuleFitUtils.getRules(glmModel.coefficients(), ruleEnsemble, this._output.classNames(), this._output.nclasses()), _parms._remove_duplicates)), this._output.isClassifier() && this._output.nclasses() > 2, true); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/rulefit/RuleFitMojoWriter.java
package hex.rulefit; import hex.Model; import hex.MultiModelMojoWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; public class RuleFitMojoWriter extends MultiModelMojoWriter<RuleFitModel, RuleFitModel.RuleFitParameters, RuleFitModel.RuleFitOutput> { @SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler public RuleFitMojoWriter() {} public RuleFitMojoWriter(RuleFitModel model) { super(model); } @Override public String mojoVersion() { return "1.00"; } @Override protected List<Model> getSubModels() { if (model.glmModel != null) { return Collections.singletonList(model.glmModel); } return Collections.emptyList(); } @Override protected void writeParentModelData() throws IOException { writekv("linear_model", model._output.glmModelKey); if (!model._parms._model_type.equals(RuleFitModel.ModelType.LINEAR)) { writeOrderedRuleEnsemble(model.ruleEnsemble, model._parms._max_rule_length - model._parms._min_rule_length + 1, model._parms._rule_generation_ntrees, model._output.classNames()); } if (model._parms._model_type.equals(RuleFitModel.ModelType.LINEAR)) { writekv("model_type", 0); } else if (model._parms._model_type.equals(RuleFitModel.ModelType.RULES_AND_LINEAR)) { writekv("model_type", 1); } else { writekv("model_type", 2); } writekv("type", model._output.glmModelKey); writekv("depth", model._parms._max_rule_length - model._parms._min_rule_length + 1); writekv("ntrees", model._parms._rule_generation_ntrees); writekv("data_from_rules_codes_len", model._output._dataFromRulesCodes.length); for (int i = 0; i < model._output._dataFromRulesCodes.length; i++) { writekv("data_from_rules_codes_" + i, model._output._dataFromRulesCodes[i]); } if (model._parms._weights_column != null) { writekv("weights_column", model._parms._weights_column); } writekv("linear_names_len", model._output._linear_names.length); for (int i = 0; i < model._output._linear_names.length; i++) { writekv("linear_names_" + i, model._output._linear_names[i]); } } void writeOrderedRuleEnsemble(RuleEnsemble ruleEnsemble, int depth, int ntrees, String[] classes) throws IOException { for (int i = 0; i < depth; i++) { for (int j = 0; j < ntrees; j++) { // filter rules according to varname // varname is of structue "M" + modelId + "T" + node.getSubgraphNumber() + "N" + node.getNodeNumber() String regex = "M" + i + "T" + j + "N" + "\\d+"; int nclasses = classes != null && classes.length > 2 ? classes.length : 1; String[] classRegex = new String[nclasses]; List<Rule>[] filteredClassRules = new ArrayList[nclasses]; List<Rule> filteredRules = new ArrayList<>(); for (int k = 0; k < nclasses; k++) { if (nclasses > 2) { classRegex[k] = regex + "_" + classes[k]; } else { classRegex[k] = regex; } } for (int k = 0; k < nclasses; k++) { for (int l = 0; l < ruleEnsemble.rules.length; l++) { if (ruleEnsemble.rules[l].varName.matches(classRegex[k])) { if (filteredClassRules[k] == null) { filteredClassRules[k] = new ArrayList<>(); } filteredClassRules[k].add(ruleEnsemble.rules[l]); } } // filtered ordered rules // class 0 ... class k filteredRules.addAll(filteredClassRules[k]); } int currNumRules = filteredRules.size(); writekv("num_rules_M".concat(String.valueOf(i)).concat("T").concat(String.valueOf(j)), currNumRules); String currIdPrefix = i + "_" + j + "_"; for (int k = 0; k < currNumRules; k++) { writeRule(filteredRules.get(k), currIdPrefix + k); } } } } void writeRule(Rule rule, String ruleId) throws IOException { int numConditions = rule.conditions.length; writekv("num_conditions_rule_id_" + ruleId, numConditions); for (int i = 0; i < numConditions; i++) { writeCondition(rule.conditions[i], i, ruleId); } writekv("prediction_value_rule_id_" + ruleId, rule.predictionValue); writekv("language_rule_rule_id_" + ruleId, rule.languageRule); writekv("coefficient_rule_id_" + ruleId, rule.coefficient); writekv("var_name_rule_id_" + ruleId, rule.varName); writekv("support_rule_id_" + ruleId, rule.support); } void writeCondition(Condition condition, int conditionId, String ruleId) throws IOException { String conditionIdentifier = conditionId + "_" + ruleId; writekv("feature_index_" + conditionIdentifier, condition.featureIndex); if (Condition.Type.Categorical.equals(condition.type)) { writekv("type_" + conditionIdentifier, 0); int languageCatTresholdLength = condition.languageCatTreshold.length; writekv("language_cat_treshold_length_" + conditionIdentifier, languageCatTresholdLength); for (int i = 0; i < languageCatTresholdLength; i++) { writekv("language_cat_treshold_" + i + "_" + conditionIdentifier, condition.languageCatTreshold[i]); } int catTresholdLength = condition.catTreshold.length; writekv("cat_treshold_length_" + conditionIdentifier, catTresholdLength); for (int i = 0; i < catTresholdLength; i++) { writekv("cat_treshold_length_" + i + "_" + conditionIdentifier, condition.catTreshold[i]); } } else { writekv("type_" + conditionIdentifier, 1); // Numerical writekv("num_treshold" + conditionIdentifier, condition.numTreshold); } if (Condition.Operator.LessThan.equals(condition.operator)) { writekv("operator_" + conditionIdentifier, 0); } else if (Condition.Operator.GreaterThanOrEqual.equals(condition.operator)) { writekv("operator_" + conditionIdentifier, 1); } else { writekv("operator_" + conditionIdentifier, 2); // In } writekv("feature_name_" + conditionIdentifier, condition.featureName); writekv("nas_included_" + conditionIdentifier, condition.NAsIncluded); writekv("language_condition" + conditionIdentifier, condition.languageCondition); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/rulefit/RuleFitUtils.java
package hex.rulefit; import water.util.TwoDimTable; import java.util.*; import java.util.stream.Collectors; public class RuleFitUtils { public static String[] getPathNames(int modelId, int numCols, String[] names) { String[] pathNames = new String[numCols]; for (int i = 0; i < numCols; i++) { pathNames[i] = "tree_" + modelId + "." + names[i]; } return pathNames; } public static String[] getLinearNames(int numCols, String[] names) { String[] pathNames = new String[numCols]; for (int i = 0; i < numCols; i++) { pathNames[i] = "linear." + names[i]; } return pathNames; } static Rule[] deduplicateRules(Rule[] rules, boolean remove_duplicates) { if (remove_duplicates) { List<Rule> transform = new ArrayList<>(); for (int i = 0; i < rules.length; i++) { Rule currRule = rules[i]; if (currRule.conditions != null) { // non linear rules: if (!transform.contains(currRule)) { transform.add(currRule); } else { for (int j = 0; j < transform.size(); j++) { if (i != j) { Rule ruleToExtend = transform.get(j); if (currRule.equals(ruleToExtend)) { transform.remove(j); Rule newRule = new Rule(ruleToExtend.conditions, ruleToExtend.predictionValue, ruleToExtend.varName + ", " + currRule.varName, ruleToExtend.coefficient + currRule.coefficient, ruleToExtend.support); transform.add(newRule); break; } } } } } else { // linear rules: transform.add(currRule); } } return transform.toArray(new Rule[0]); } else { return rules; } } static Rule[] sortRules(Rule[] rules) { Comparator<Rule> ruleAbsCoefficientComparator = Comparator.comparingDouble(Rule::getAbsCoefficient).reversed(); Arrays.sort(rules, ruleAbsCoefficientComparator); return rules; } /** * Returns a ruleId. * If the ruleId is in form after deduplication: "M0T0N1, M0T9N56, M9T34N56", meaning contains ", " * finds only first rule (other are equivalents) */ static String readRuleId(String ruleId) { if (ruleId.contains(",")) { return ruleId.split(",")[0]; } else { return ruleId; } } static Rule[] getRules(HashMap<String, Double> glmCoefficients, RuleEnsemble ruleEnsemble, String[] classNames, int nclasses) { // extract variable-coefficient map (filter out intercept and zero betas) Map<String, Double> filteredRules = glmCoefficients.entrySet() .stream() .filter(e -> !("Intercept".equals(e.getKey()) || e.getKey().contains("Intercept_")) && 0 != e.getValue()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); List<Rule> rules = new ArrayList<>(); Rule rule; for (Map.Entry<String, Double> entry : filteredRules.entrySet()) { if (!entry.getKey().startsWith("linear.")) { rule = ruleEnsemble.getRuleByVarName(getVarName(entry.getKey(), classNames, nclasses)); } else { rule = new Rule(null, entry.getValue(), entry.getKey()); // linear rule applies to all the rows rule.support = 1.0; } rule.setCoefficient(entry.getValue()); rules.add(rule); } return rules.toArray(new Rule[] {}); } static private String getVarName(String ruleKey, String[] classNames, int nclasses) { if (nclasses > 2) { ruleKey = removeClassNameSuffix(ruleKey, classNames); } return ruleKey.substring(ruleKey.lastIndexOf(".") + 1); } private static String removeClassNameSuffix(String ruleKey, String[] classNames) { for (int i = 0; i < classNames.length; i++) { if (ruleKey.endsWith(classNames[i])) return ruleKey.substring(0, ruleKey.length() - classNames[i].length() - 1); } return ruleKey; } static TwoDimTable convertRulesToTable(Rule[] rules, boolean isMultinomial, boolean generateLanguageRule) { List<String> colHeaders = new ArrayList<>(); List<String> colTypes = new ArrayList<>(); List<String> colFormat = new ArrayList<>(); colHeaders.add("variable"); colTypes.add("string"); colFormat.add("%s"); if (isMultinomial) { colHeaders.add("class"); colTypes.add("string"); colFormat.add("%s"); } colHeaders.add("coefficient"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("support"); colTypes.add("double"); colFormat.add("%.5f"); colHeaders.add("rule"); colTypes.add("string"); colFormat.add("%s"); final int rows = rules.length; TwoDimTable table = new TwoDimTable("Rule Importance", null, new String[rows], colHeaders.toArray(new String[0]), colTypes.toArray(new String[0]), colFormat.toArray(new String[0]), ""); for (int row = 0; row < rows; row++) { int col = 0; String varname = (rules[row]).varName; table.set(row, col++, varname); if (isMultinomial) { String segments[] = varname.split("_"); table.set(row, col++, segments[segments.length - 1]); } table.set(row, col++, (rules[row]).coefficient); table.set(row, col++, (rules[row]).support); table.set(row, col, generateLanguageRule ? rules[row].generateLanguageRule() : rules[row].languageRule); } return table; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/ANOVAGLMModelV3.java
package hex.schemas; import hex.anovaglm.ANOVAGLMModel; import water.api.API; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; import water.api.schemas3.TwoDimTableV3; import water.fvec.Frame; import water.util.TwoDimTable; import static hex.gam.MatrixFrameUtils.GAMModelUtils.copyTwoDimTable; public class ANOVAGLMModelV3 extends ModelSchemaV3<ANOVAGLMModel, ANOVAGLMModelV3, ANOVAGLMModel.ANOVAGLMParameters, ANOVAGLMV3.ANOVAGLMParametersV3, ANOVAGLMModel.ANOVAGLMModelOutput, ANOVAGLMModelV3.ANOVAGLMModelOutputV3> { public static final class ANOVAGLMModelOutputV3 extends ModelOutputSchemaV3<ANOVAGLMModel.ANOVAGLMModelOutput, ANOVAGLMModelOutputV3> { @API(help="Table of Coefficients") TwoDimTableV3[] coefficients_table; // from all models @API(help="AnovaGLM transformed predictor frame key. For debugging purposes only") KeyV3.FrameKeyV3 transformed_columns_key; @API(help="ANOVA table frame key containing Type III SS calculation, degree of freedom, F-statistics and " + "p-values. This frame content is repeated in the model summary.") KeyV3.FrameKeyV3 result_frame_key; @Override public ANOVAGLMModelOutputV3 fillFromImpl(ANOVAGLMModel.ANOVAGLMModelOutput impl) { super.fillFromImpl(impl); coefficients_table = new TwoDimTableV3[impl._coefficients_table.length]; for (int index = 0; index < coefficients_table.length; index++) { TwoDimTable temp = copyTwoDimTable(impl._coefficients_table[index], impl._coefficients_table[index].getTableHeader()); coefficients_table[index] = new TwoDimTableV3(); coefficients_table[index].fillFromImpl(temp); } return this; } } public ANOVAGLMV3.ANOVAGLMParametersV3 createParametersSchema() { return new ANOVAGLMV3.ANOVAGLMParametersV3();} public ANOVAGLMModelOutputV3 createOutputSchema() { return new ANOVAGLMModelOutputV3();} @Override public ANOVAGLMModel createImpl() { ANOVAGLMModel.ANOVAGLMParameters parms = parameters.createImpl(); return new ANOVAGLMModel(model_id.key(), parms, null); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/ANOVAGLMV3.java
package hex.schemas; import hex.anovaglm.ANOVAGLM; import hex.anovaglm.ANOVAGLMModel; import hex.glm.GLMModel; import water.api.API; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelParametersSchemaV3; public class ANOVAGLMV3 extends ModelBuilderSchema<ANOVAGLM, ANOVAGLMV3, ANOVAGLMV3.ANOVAGLMParametersV3> { public static final class ANOVAGLMParametersV3 extends ModelParametersSchemaV3<ANOVAGLMModel.ANOVAGLMParameters, ANOVAGLMParametersV3> { public static final String[] fields = new String[] { "model_id", "training_frame", "seed", "response_column", "ignored_columns", "ignore_const_cols", "score_each_iteration", "offset_column", "weights_column", "family", "tweedie_variance_power", "tweedie_link_power", "theta", // equals to 1/r and should be > 0 and <=1, used by negative binomial "solver", "missing_values_handling", "plug_values", "compute_p_values", "standardize", "non_negative", "max_iterations", "link", "prior", "alpha", "lambda", "lambda_search", "stopping_rounds", "stopping_metric", "early_stopping", "stopping_tolerance", "balance_classes", "class_sampling_factors", "max_after_balance_size", "max_runtime_secs", "save_transformed_framekeys", "highest_interaction_term", "nparallelism", "type" // GLM SS Type, only support 3 right now }; @API(help = "Seed for pseudo random number generator (if applicable)", gridable = true) public long seed; @API(help = "Standardize numeric columns to have zero mean and unit variance", level = API.Level.critical) public boolean standardize; // Input fields @API(help = "Family. Use binomial for classification with logistic regression, others are for regression problems.", values = {"AUTO", "gaussian", "binomial", "fractionalbinomial", "quasibinomial", "poisson", "gamma", "tweedie", "negativebinomial"}, level = API.Level.critical) public GLMModel.GLMParameters.Family family; @API(help = "Tweedie variance power", level = API.Level.critical, gridable = true) public double tweedie_variance_power; @API(help = "Tweedie link power", level = API.Level.critical, gridable = true) public double tweedie_link_power; @API(help = "Theta", level = API.Level.critical, gridable = true) public double theta; // used by negtaive binomial distribution family @API(help = "Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties." + " A value of 1 for alpha represents Lasso regression, a value of 0 produces Ridge regression, and " + "anything in between specifies the amount of mixing between the two. Default value of alpha is 0 when" + " SOLVER = 'L-BFGS'; 0.5 otherwise.", level = API.Level.critical, gridable = true) public double[] alpha; @API(help = "Regularization strength", required = false, level = API.Level.critical, gridable = true) public double[] lambda; @API(help = "Use lambda search starting at lambda max, given lambda is then interpreted as lambda min", level = API.Level.critical) public boolean lambda_search; @API(help = "AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems" + " with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for datasets" + " with many columns.", values = {"AUTO", "IRLSM", "L_BFGS","COORDINATE_DESCENT_NAIVE", "COORDINATE_DESCENT", "GRADIENT_DESCENT_LH", "GRADIENT_DESCENT_SQERR"}, level = API.Level.critical) public GLMModel.GLMParameters.Solver solver; @API(help = "Handling of missing values. Either MeanImputation, Skip or PlugValues.", values = { "MeanImputation", "Skip", "PlugValues" }, level = API.Level.expert, direction=API.Direction.INOUT, gridable = true) public GLMModel.GLMParameters.MissingValuesHandling missing_values_handling; @API(help = "Plug Values (a single row frame containing values that will be used to impute missing values of the" + " training/validation frame, use with conjunction missing_values_handling = PlugValues)", direction = API.Direction.INPUT) public KeyV3.FrameKeyV3 plug_values; @API(help = "Restrict coefficients (not intercept) to be non-negative") public boolean non_negative; @API(help="Request p-values computation, p-values work only with IRLSM solver and no regularization", level = API.Level.secondary, direction = API.Direction.INPUT) public boolean compute_p_values; // _remove_collinear_columns @API(help = "Maximum number of iterations", level = API.Level.secondary) public int max_iterations; @API(help = "Link function.", level = API.Level.secondary, values = {"family_default", "identity", "logit", "log", "inverse", "tweedie", "ologit"}) //"oprobit", "ologlog": will be supported. public GLMModel.GLMParameters.Link link; @API(help = "Prior probability for y==1. To be used only for logistic regression iff the data has been sampled and" + " the mean of response does not reflect reality.", level = API.Level.expert) public double prior; // dead unused args, formely inherited from supervised model schema /** * For imbalanced data, balance training data class counts via * over/under-sampling. This can result in improved predictive accuracy. */ @API(help = "Balance training data class counts via over/under-sampling (for imbalanced data).", level = API.Level.secondary, direction = API.Direction.INOUT) public boolean balance_classes; /** * Desired over/under-sampling ratios per class (lexicographic order). * Only when balance_classes is enabled. * If not specified, they will be automatically computed to obtain class balance during training. */ @API(help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling" + " factors will be automatically computed to obtain class balance during training. Requires " + "balance_classes.", level = API.Level.expert, direction = API.Direction.INOUT) public float[] class_sampling_factors; /** * When classes are balanced, limit the resulting dataset size to the * specified multiple of the original dataset size. */ @API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0). " + "Requires balance_classes.", /* dmin=1e-3, */ level = API.Level.expert, direction = API.Direction.INOUT) public float max_after_balance_size; @API(help = "Limit the number of interaction terms, if 2 means interaction between 2 columns only, 3 for three" + " columns and so on... Default to 2.", level = API.Level.critical) public int highest_interaction_term; // GLM SS Type, only support 3 @API(help = "Refer to the SS type 1, 2, 3, or 4. We are currently only supporting 3", level = API.Level.critical) public int type; // GLM SS Type, only support 3 @API(help="Stop early when there is no more relative improvement on train or validation (if provided).") public boolean early_stopping; @API(help="true to save the keys of transformed predictors and interaction column.") public boolean save_transformed_framekeys; @API(help="Number of models to build in parallel. Default to 4. Adjust according to your system.") public int nparallelism; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/AdaBoostModelV3.java
package hex.schemas; import hex.adaboost.AdaBoostModel; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; public class AdaBoostModelV3 extends ModelSchemaV3<AdaBoostModel, AdaBoostModelV3, AdaBoostModel.AdaBoostParameters, AdaBoostV3.AdaBoostParametersV3, AdaBoostModel.AdaBoostOutput, AdaBoostModelV3.AdaBoostModelOutputV3> { public static final class AdaBoostModelOutputV3 extends ModelOutputSchemaV3<AdaBoostModel.AdaBoostOutput, AdaBoostModelOutputV3> { // nothing } public AdaBoostV3.AdaBoostParametersV3 createParametersSchema() { return new AdaBoostV3.AdaBoostParametersV3(); } public AdaBoostModelOutputV3 createOutputSchema() { return new AdaBoostModelOutputV3(); } //========================== // Custom adapters go here // Version&Schema-specific filling into the impl @Override public AdaBoostModel createImpl() { AdaBoostV3.AdaBoostParametersV3 p = this.parameters; AdaBoostModel.AdaBoostParameters parms = p.createImpl(); return new AdaBoostModel( model_id.key(), parms, new AdaBoostModel.AdaBoostOutput(null) ); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/AdaBoostV3.java
package hex.schemas; import hex.adaboost.AdaBoost; import hex.adaboost.AdaBoostModel; import water.api.API; import water.api.schemas3.ModelParametersSchemaV3; public class AdaBoostV3 extends ModelBuilderSchema< AdaBoost, AdaBoostV3, AdaBoostV3.AdaBoostParametersV3> { public static final class AdaBoostParametersV3 extends ModelParametersSchemaV3<AdaBoostModel.AdaBoostParameters, AdaBoostParametersV3> { static public String[] fields = new String[]{ "model_id", "training_frame", "ignored_columns", "ignore_const_cols", "categorical_encoding", "weights_column", // AdaBoost specific "nlearners", "weak_learner", "learn_rate", "weak_learner_params", "seed", }; @API(help = "Number of AdaBoost weak learners.", gridable = true) public int nlearners; @API(help = "Choose a weak learner type. Defaults to AUTO, which means DRF.", gridable = true, values = {"AUTO", "DRF", "GLM", "GBM", "DEEP_LEARNING"}) public AdaBoostModel.Algorithm weak_learner; @API(help="Learning rate (from 0.0 to 1.0)", gridable = true) public double learn_rate; @API(help = "Customized parameters for the weak_learner algorithm.", gridable=true) public String weak_learner_params; @API(help = "Seed for pseudo random number generator (if applicable)", gridable = true) public long seed; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/AggregatorModelV99.java
package hex.schemas; import hex.aggregator.AggregatorModel; import water.api.API; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; public class AggregatorModelV99 extends ModelSchemaV3<AggregatorModel, AggregatorModelV99, AggregatorModel.AggregatorParameters, AggregatorV99.AggregatorParametersV99, AggregatorModel.AggregatorOutput, AggregatorModelV99.AggregatorModelOutputV99> { public static final class AggregatorModelOutputV99 extends ModelOutputSchemaV3<AggregatorModel.AggregatorOutput, AggregatorModelOutputV99> { @API(help = "Aggregated Frame of Exemplars") public KeyV3.FrameKeyV3 output_frame; @API(help ="Aggregated Frame mapping to the rows in the original data") public KeyV3.FrameKeyV3 mapping_frame; } // TODO: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters. public AggregatorV99.AggregatorParametersV99 createParametersSchema() { return new AggregatorV99.AggregatorParametersV99(); } public AggregatorModelOutputV99 createOutputSchema() { return new AggregatorModelOutputV99(); } // Version&Schema-specific filling into the impl @Override public AggregatorModel createImpl() { AggregatorModel.AggregatorParameters parms = parameters.createImpl(); return new AggregatorModel( model_id.key(), parms, null ); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/AggregatorV99.java
package hex.schemas; import hex.DataInfo; import hex.aggregator.Aggregator; import hex.aggregator.AggregatorModel; import water.api.API; import water.api.schemas3.ModelParametersSchemaV3; import static hex.pca.PCAModel.PCAParameters; public class AggregatorV99 extends ModelBuilderSchema<Aggregator,AggregatorV99,AggregatorV99.AggregatorParametersV99> { public static final class AggregatorParametersV99 extends ModelParametersSchemaV3<AggregatorModel.AggregatorParameters, AggregatorParametersV99> { static public String[] fields = new String[] { "model_id", "training_frame", "response_column", "ignored_columns", "ignore_const_cols", "target_num_exemplars", "rel_tol_num_exemplars", // "radius_scale", "transform", "categorical_encoding", "save_mapping_frame", "num_iteration_without_new_exemplar", // "pca_method", // "k", // "max_iterations", // "seed", // "use_all_factor_levels", // "max_runtime_secs" "export_checkpoints_dir" }; // @API(help = "Radius scaling", gridable = true) // public double radius_scale; @API(help = "Transformation of training data", values = { "NONE", "STANDARDIZE", "NORMALIZE", "DEMEAN", "DESCALE" }, gridable = true, level= API.Level.expert) // TODO: pull out of categorical class public DataInfo.TransformType transform; @API(help = "Method for computing PCA (Caution: GLRM is currently experimental and unstable)", values = { "GramSVD", "Power", "Randomized", "GLRM" }, gridable = true, level= API.Level.expert) public PCAParameters.Method pca_method; @API(help = "Rank of matrix approximation", direction = API.Direction.INOUT, gridable = true, level= API.Level.secondary) public int k; @API(help = "Maximum number of iterations for PCA", direction = API.Direction.INOUT, gridable = true, level= API.Level.expert) public int max_iterations; @API(help = "Targeted number of exemplars", direction = API.Direction.INOUT, gridable = true, level= API.Level.secondary) public int target_num_exemplars; @API(help = "Relative tolerance for number of exemplars (e.g, 0.5 is +/- 50 percents)", direction = API.Direction.INOUT, gridable = true, level= API.Level.secondary) public double rel_tol_num_exemplars; @API(help = "RNG seed for initialization", direction = API.Direction.INOUT, level= API.Level.secondary) public long seed; @API(help = "Whether first factor level is included in each categorical expansion", direction = API.Direction.INOUT, level= API.Level.expert) public boolean use_all_factor_levels; @API(help = "Whether to export the mapping of the aggregated frame", direction = API.Direction.INOUT, level= API.Level.expert) public boolean save_mapping_frame; @API(help = "The number of iterations to run before aggregator exits if the number of exemplars collected didn't change", direction = API.Direction.INOUT, level= API.Level.expert) public int num_iteration_without_new_exemplar; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/CoxPHModelV3.java
package hex.schemas; import hex.coxph.CoxPHModel; import water.api.API; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; import water.api.schemas3.TwoDimTableV3; import water.util.TwoDimTable; public class CoxPHModelV3 extends ModelSchemaV3<CoxPHModel, CoxPHModelV3, CoxPHModel.CoxPHParameters, CoxPHV3.CoxPHParametersV3, CoxPHModel.CoxPHOutput, CoxPHModelV3.CoxPHModelOutputV3> { public static final class CoxPHModelOutputV3 extends ModelOutputSchemaV3<CoxPHModel.CoxPHOutput, CoxPHModelOutputV3> { @API(help="Table of Coefficients") TwoDimTableV3 coefficients_table; @API(help = "var(coefficients)") double[][] var_coef; @API(help = "null log-likelihood") double null_loglik; @API(help = "log-likelihood") double loglik; @API(help = "log-likelihood test stat") double loglik_test; @API(help = "Wald test stat") double wald_test; @API(help = "Score test stat") double score_test; @API(help = "R-square") double rsq; @API(help = "Maximum R-square") double maxrsq; @API(help = "log relative error") double lre; @API(help = "number of iterations") int iter; @API(help = "x weighted mean vector for categorical variables") double[][] x_mean_cat; @API(help = "x weighted mean vector for numeric variables") double[][] x_mean_num; @API(help = "unweighted mean vector for numeric offsets") double[] mean_offset; @API(help = "names of offsets") String[] offset_names; @API(help = "n") long n; @API(help = "number of rows with missing values") long n_missing; @API(help = "total events") long total_event; @API(help = "time") double[] time; @API(help = "number at risk") double[] n_risk; @API(help = "number of events") double[] n_event; @API(help = "number of censored obs") double[] n_censor; @API(help = "baseline cumulative hazard") double[] cumhaz_0; @API(help = "component of var(cumhaz)") double[] var_cumhaz_1; @API(help = "component of var(cumhaz)") KeyV3.FrameKeyV3 var_cumhaz_2; @API(help = "Baseline Hazard") KeyV3.FrameKeyV3 baseline_hazard; @API(help = "Baseline Survival") KeyV3.FrameKeyV3 baseline_survival; @API(help = "formula") String formula; @API(help = "ties", values = {"efron", "breslow"}) CoxPHModel.CoxPHParameters.CoxPHTies ties; @API(help = "concordance") double concordance; @Override public CoxPHModelOutputV3 fillFromImpl(CoxPHModel.CoxPHOutput impl) { super.fillFromImpl(impl); String[] names = impl._coef_names; String[] colTypes = new String[]{"double", "double", "double", "double", "double"}; String[] colFormats = new String[]{"%5f", "%5f", "%5f", "%5f", "%5f"}; String[] colNames = new String[]{"Coefficients", "exp_coef", "exp_neg_coef", "se_coef", "z_coef"}; TwoDimTable tdt = new TwoDimTable("Coefficients","CoxPH Coefficients", names, colNames, colTypes, colFormats, "names"); // fill in coefficients for (int i = 0; i < names.length; i++) { tdt.set(i, 0, impl._coef[i]); tdt.set(i, 1, impl._exp_coef[i]); tdt.set(i, 2, impl._exp_neg_coef[i]); tdt.set(i, 3, impl._se_coef[i]); tdt.set(i, 4, impl._z_coef[i]); } coefficients_table = new TwoDimTableV3().fillFromImpl(tdt); return this; } } // CoxPHModelOutputV3 public CoxPHV3.CoxPHParametersV3 createParametersSchema() { return new CoxPHV3.CoxPHParametersV3(); } public CoxPHModelOutputV3 createOutputSchema() { return new CoxPHModelOutputV3(); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/CoxPHV3.java
package hex.schemas; import hex.coxph.CoxPH; import hex.coxph.CoxPHModel.CoxPHParameters; import water.api.API; import water.api.schemas3.FrameV3; import water.api.schemas3.ModelParametersSchemaV3; import water.api.schemas3.StringPairV3; public class CoxPHV3 extends ModelBuilderSchema<CoxPH,CoxPHV3,CoxPHV3.CoxPHParametersV3> { public static final class CoxPHParametersV3 extends ModelParametersSchemaV3<CoxPHParameters, CoxPHParametersV3> { public static String[] fields = new String[] { "model_id", "training_frame", "start_column", "stop_column", "response_column", "ignored_columns", "weights_column", "offset_column", "stratify_by", "ties", "init", "lre_min", "max_iterations", "interactions", "interaction_pairs", "interactions_only", "use_all_factor_levels", "export_checkpoints_dir", "single_node_mode" }; @API(help="Start Time Column.", direction = API.Direction.INOUT, is_member_of_frames = {"training_frame"}, is_mutually_exclusive_with = {"ignored_columns"}) public FrameV3.ColSpecifierV3 start_column; @API(help="Stop Time Column.", direction = API.Direction.INOUT, is_member_of_frames = {"training_frame"}, is_mutually_exclusive_with = {"ignored_columns"}) public FrameV3.ColSpecifierV3 stop_column; @API(help="List of columns to use for stratification.", direction = API.Direction.INOUT) public String[] stratify_by; @API(help="Method for Handling Ties.", values = {"efron", "breslow"}, direction = API.Direction.INOUT) public CoxPHParameters.CoxPHTies ties; @API(help="Coefficient starting value.", direction = API.Direction.INOUT) public double init; @API(help="Minimum log-relative error.", direction = API.Direction.INOUT) public double lre_min; @API(help="Maximum number of iterations.", direction = API.Direction.INOUT) public int max_iterations; @API(help="A list of columns that should only be used to create interactions but should not itself participate in model training.", direction=API.Direction.INPUT, level=API.Level.expert) public String[] interactions_only; @API(help="A list of predictor column indices to interact. All pairwise combinations will be computed for the list.", direction= API.Direction.INPUT, level= API.Level.expert) public String[] interactions; @API(help="A list of pairwise (first order) column interactions.", direction= API.Direction.INPUT, level= API.Level.expert) public StringPairV3[] interaction_pairs; @API(help="(Internal. For development only!) Indicates whether to use all factor levels.", direction = API.Direction.INPUT, level = API.Level.expert, gridable=true) public boolean use_all_factor_levels; @API(level = API.Level.expert, direction = API.Direction.INOUT, help = "Run on a single node to reduce the effect of network overhead (for smaller datasets)") public boolean single_node_mode; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/DRFModelV3.java
package hex.schemas; import hex.tree.drf.DRFModel; public class DRFModelV3 extends SharedTreeModelV3<DRFModel, DRFModelV3, DRFModel.DRFParameters, DRFV3.DRFParametersV3, DRFModel.DRFOutput, DRFModelV3.DRFModelOutputV3> { public static final class DRFModelOutputV3 extends SharedTreeModelV3.SharedTreeModelOutputV3<DRFModel.DRFOutput, DRFModelOutputV3> {} public DRFV3.DRFParametersV3 createParametersSchema() { return new DRFV3.DRFParametersV3(); } public DRFModelOutputV3 createOutputSchema() { return new DRFModelOutputV3(); } //========================== // Custom adapters go here // Version&Schema-specific filling into the impl @Override public DRFModel createImpl() { DRFV3.DRFParametersV3 p = this.parameters; DRFModel.DRFParameters parms = p.createImpl(); return new DRFModel( model_id.key(), parms, new DRFModel.DRFOutput(null) ); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/DRFV3.java
package hex.schemas; import hex.tree.drf.DRF; import hex.tree.drf.DRFModel.DRFParameters; import water.api.API; public class DRFV3 extends SharedTreeV3<DRF, DRFV3, DRFV3.DRFParametersV3> { public static final class DRFParametersV3 extends SharedTreeV3.SharedTreeParametersV3<DRFParameters, DRFParametersV3> { static public String[] fields = new String[]{ "model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models", "keep_cross_validation_predictions", "keep_cross_validation_fold_assignment", "score_each_iteration", "score_tree_interval", "fold_assignment", "fold_column", "response_column", "ignored_columns", "ignore_const_cols", "offset_column", "weights_column", "balance_classes", "class_sampling_factors", "max_after_balance_size", "max_confusion_matrix_size", "ntrees", "max_depth", "min_rows", "nbins", "nbins_top_level", "nbins_cats", "r2_stopping", "stopping_rounds", "stopping_metric", "stopping_tolerance", "max_runtime_secs", "seed", "build_tree_one_node", "mtries", "sample_rate", "sample_rate_per_class", "binomial_double_trees", "checkpoint", "col_sample_rate_change_per_level", "col_sample_rate_per_tree", "min_split_improvement", "histogram_type", "categorical_encoding", "calibrate_model", "calibration_frame", "calibration_method", "distribution", "custom_metric_func", "export_checkpoints_dir", "check_constant_response", "gainslift_bins", "auc_type" }; // Input fields @API(help = "Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for classification and p/3 for regression (where p is the # of predictors", gridable = true) public int mtries; @API(help = "For binary classification: Build 2x as many trees (one per class) - can lead to higher accuracy.", level = API.Level.expert) public boolean binomial_double_trees; @API(help = "Row sample rate per tree (from 0.0 to 1.0)", gridable = true) public double sample_rate; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/DTModelV3.java
package hex.schemas; import hex.tree.dt.DTModel; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; public class DTModelV3 extends ModelSchemaV3<DTModel, DTModelV3, DTModel.DTParameters, DTV3.DTParametersV3, DTModel.DTOutput, DTModelV3.DTModelOutputV3> { public static final class DTModelOutputV3 extends ModelOutputSchemaV3<DTModel.DTOutput, DTModelOutputV3> { // nothing } public DTV3.DTParametersV3 createParametersSchema() { return new DTV3.DTParametersV3(); } public DTModelOutputV3 createOutputSchema() { return new DTModelOutputV3(); } //========================== // Custom adapters go here // Version&Schema-specific filling into the impl @Override public DTModel createImpl() { DTV3.DTParametersV3 p = this.parameters; DTModel.DTParameters parms = p.createImpl(); return new DTModel(model_id.key(), parms, new DTModel.DTOutput(null)); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/DTV3.java
package hex.schemas; import hex.tree.dt.DT; import hex.tree.dt.DTModel; import water.api.API; import water.api.schemas3.ModelParametersSchemaV3; public class DTV3 extends ModelBuilderSchema< DT, DTV3, DTV3.DTParametersV3> { public static final class DTParametersV3 extends ModelParametersSchemaV3<DTModel.DTParameters, DTParametersV3> { static public String[] fields = new String[]{ "model_id", "training_frame", "ignored_columns", "ignore_const_cols", "categorical_encoding", "response_column", "seed", // SDT specific "max_depth", "min_rows" }; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Seed for random numbers (affects sampling)") public long seed; @API(help = "Max depth of tree.", gridable = true) public int max_depth; @API(help = "Fewest allowed (weighted) observations in a leaf.", gridable = true) public int min_rows; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/DataInfoFrameV3.java
package hex.schemas; import water.Iced; import water.api.API; import water.api.schemas3.KeyV3; import water.api.schemas3.SchemaV3; public class DataInfoFrameV3 extends SchemaV3<Iced,DataInfoFrameV3> { @API(help="input frame") public KeyV3.FrameKeyV3 frame; @API(help="interactions") public String[] interactions; @API(help="use all factor levels") public boolean use_all; @API(help="standardize") public boolean standardize; @API(help="interactions only returned") public boolean interactions_only; @API(help="output frame", direction=API.Direction.OUTPUT) public KeyV3.FrameKeyV3 result; }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/DeepLearningModelV3.java
package hex.schemas; import hex.deeplearning.DeepLearningModel; import water.Key; import water.api.*; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; import water.api.schemas3.TwoDimTableV3; public class DeepLearningModelV3 extends ModelSchemaV3<DeepLearningModel, DeepLearningModelV3, DeepLearningModel.DeepLearningParameters, DeepLearningV3.DeepLearningParametersV3, DeepLearningModel.DeepLearningModelOutput, DeepLearningModelV3.DeepLearningModelOutputV3> { public static final class DeepLearningModelOutputV3 extends ModelOutputSchemaV3<DeepLearningModel.DeepLearningModelOutput, DeepLearningModelOutputV3> { @API(help="Frame keys for weight matrices", level = API.Level.expert) KeyV3.FrameKeyV3[] weights; @API(help="Frame keys for bias vectors", level = API.Level.expert) KeyV3.FrameKeyV3[] biases; @API(help="Normalization/Standardization multipliers for numeric predictors", direction=API.Direction.OUTPUT, level = API.Level.expert) double[] normmul; @API(help="Normalization/Standardization offsets for numeric predictors", direction=API.Direction.OUTPUT, level = API.Level.expert) double[] normsub; @API(help="Normalization/Standardization multipliers for numeric response", direction=API.Direction.OUTPUT, level = API.Level.expert) double[] normrespmul; @API(help="Normalization/Standardization offsets for numeric response", direction=API.Direction.OUTPUT, level = API.Level.expert) double[] normrespsub; @API(help="Categorical offsets for one-hot encoding", direction=API.Direction.OUTPUT, level = API.Level.expert) int[] catoffsets; @API(help="Variable Importances", direction=API.Direction.OUTPUT, level = API.Level.secondary) TwoDimTableV3 variable_importances; } // TODO: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters. public DeepLearningV3.DeepLearningParametersV3 createParametersSchema() { return new DeepLearningV3.DeepLearningParametersV3(); } public DeepLearningModelOutputV3 createOutputSchema() { return new DeepLearningModelOutputV3(); } //========================== // Custom adapters go here // Version&Schema-specific filling into the impl @Override public DeepLearningModel createImpl() { DeepLearningModel.DeepLearningParameters parms = parameters.createImpl(); return new DeepLearningModel(Key.make() /*dest*/, parms, new DeepLearningModel.DeepLearningModelOutput(null), null, null, 0); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/DeepLearningV3.java
package hex.schemas; import hex.Distribution; import hex.deeplearning.DeepLearning; import hex.deeplearning.DeepLearningModel.DeepLearningParameters; import water.api.API; import water.api.schemas3.ModelParametersSchemaV3; import water.api.schemas3.KeyV3; public class DeepLearningV3 extends ModelBuilderSchema<DeepLearning,DeepLearningV3,DeepLearningV3.DeepLearningParametersV3> { public static final class DeepLearningParametersV3 extends ModelParametersSchemaV3<DeepLearningParameters, DeepLearningParametersV3> { // Determines the order of parameters in the GUI public static String[] fields = { "model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models", "keep_cross_validation_predictions", "keep_cross_validation_fold_assignment", "fold_assignment", "fold_column", "response_column", "ignored_columns", "ignore_const_cols", "score_each_iteration", "weights_column", "offset_column", "balance_classes", "class_sampling_factors", "max_after_balance_size", "max_confusion_matrix_size", "checkpoint", "pretrained_autoencoder", "overwrite_with_best_model", "use_all_factor_levels", "standardize", "activation", "hidden", "epochs", "train_samples_per_iteration", "target_ratio_comm_to_comp", "seed", "adaptive_rate", "rho", "epsilon", "rate", "rate_annealing", "rate_decay", "momentum_start", "momentum_ramp", "momentum_stable", "nesterov_accelerated_gradient", "input_dropout_ratio", "hidden_dropout_ratios", "l1", "l2", "max_w2", "initial_weight_distribution", "initial_weight_scale", "initial_weights", "initial_biases", "loss", "distribution", "quantile_alpha", "tweedie_power", "huber_alpha", "score_interval", "score_training_samples", "score_validation_samples", "score_duty_cycle", "classification_stop", "regression_stop", "stopping_rounds", "stopping_metric", "stopping_tolerance", "max_runtime_secs", "score_validation_sampling", "diagnostics", "fast_mode", "force_load_balance", "variable_importances", "replicate_training_data", "single_node_mode", "shuffle_training_data", "missing_values_handling", "quiet_mode", "autoencoder", "sparse", "col_major", "average_activation", "sparsity_beta", "max_categorical_features", "reproducible", "export_weights_and_biases", "mini_batch_size", "categorical_encoding", "elastic_averaging", "elastic_averaging_moving_rate", "elastic_averaging_regularization", "export_checkpoints_dir", "auc_type", "custom_metric_func", "gainslift_bins", }; /* Imbalanced Classes */ /** * For imbalanced data, balance training data class counts via * over/under-sampling. This can result in improved predictive accuracy. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Balance training data class counts via over/under-sampling (for imbalanced data).") public boolean balance_classes; /** * Desired over/under-sampling ratios per class (lexicographic order). * Only when balance_classes is enabled. * If not specified, they will be automatically computed to obtain class balance during training. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling " + "factors will be automatically computed to obtain class balance during training. Requires balance_classes.") public float[] class_sampling_factors; /** * When classes are balanced, limit the resulting dataset size to the * specified multiple of the original dataset size. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = false, help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0). " + "Requires balance_classes.") public float max_after_balance_size; /** For classification models, the maximum size (in terms of classes) of * the confusion matrix for it to be printed. This option is meant to * avoid printing extremely large confusion matrices. * */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = false, help = "[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs.") public int max_confusion_matrix_size; /* Neural Net Topology */ /** * The activation function (non-linearity) to be used by the neurons in the hidden layers. * Tanh: Hyperbolic tangent function (same as scaled and shifted sigmoid). * Rectifier: Rectifier Linear Unit: Chooses the maximum of (0, x) where x is the input value. * Maxout: Choose the maximum coordinate of the input vector. * ExpRectifier: Exponential Rectifier Linear Unit function (http://arxiv.org/pdf/1511.07289v2.pdf) * With Dropout: Zero out a random user-given fraction of the * incoming weights to each hidden layer during training, for each * training row. This effectively trains exponentially many models at * once, and can improve generalization. */ @API(level = API.Level.critical, direction = API.Direction.INOUT, gridable = true, values = {"Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", "MaxoutWithDropout"}, help = "Activation function.") public DeepLearningParameters.Activation activation; /** * The number and size of each hidden layer in the model. * For example, if a user specifies "100,200,100" a model with 3 hidden * layers will be produced, and the middle hidden layer will have 200 * neurons. */ @API(level = API.Level.critical, direction = API.Direction.INOUT, gridable = true, help = "Hidden layer sizes (e.g. [100, 100]).") public int[] hidden; /** * The number of passes over the training dataset to be carried out. * It is recommended to start with lower values for initial grid searches. * This value can be modified during checkpoint restarts and allows continuation * of selected models. */ @API(level = API.Level.critical, direction = API.Direction.INOUT, gridable = true, help = "How many times the dataset should be iterated (streamed), can be fractional.") public double epochs; /** * The number of training data rows to be processed per iteration. Note that * independent of this parameter, each row is used immediately to update the model * with (online) stochastic gradient descent. This parameter controls the * synchronization period between nodes in a distributed environment and the * frequency at which scoring and model cancellation can happen. For example, if * it is set to 10,000 on H2O running on 4 nodes, then each node will * process 2,500 rows per iteration, sampling randomly from their local data. * Then, model averaging between the nodes takes place, and scoring can happen * (dependent on scoring interval and duty factor). Special values are 0 for * one epoch per iteration, -1 for processing the maximum amount of data * per iteration (if **replicate training data** is enabled, N epochs * will be trained per iteration on N nodes, otherwise one epoch). Special value * of -2 turns on automatic mode (auto-tuning). */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Number of training samples (globally) per MapReduce iteration. Special values are 0: one epoch, -1: " + "all available data (e.g., replicated training data), -2: automatic.") public long train_samples_per_iteration; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Target ratio of communication overhead to computation. Only for multi-node operation and " + "train_samples_per_iteration = -2 (auto-tuning).") public double target_ratio_comm_to_comp; /** * The random seed controls sampling and initialization. Reproducible * results are only expected with single-threaded operation (i.e., * when running on one node, turning off load balancing and providing * a small dataset that fits in one chunk). In general, the * multi-threaded asynchronous updates to the model parameters will * result in (intentional) race conditions and non-reproducible * results. Note that deterministic sampling and initialization might * still lead to some weak sense of determinism in the model. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.") public long seed; /*Adaptive Learning Rate*/ /** * The implemented adaptive learning rate algorithm (ADADELTA) automatically * combines the benefits of learning rate annealing and momentum * training to avoid slow convergence. Specification of only two * parameters (rho and epsilon) simplifies hyper parameter search. * In some cases, manually controlled (non-adaptive) learning rate and * momentum specifications can lead to better results, but require the * specification (and hyper parameter search) of up to 7 parameters. * If the model is built on a topology with many local minima or * long plateaus, it is possible for a constant learning rate to produce * sub-optimal results. Learning rate annealing allows digging deeper into * local minima, while rate decay allows specification of different * learning rates per layer. When the gradient is being estimated in * a long valley in the optimization landscape, a large learning rate * can cause the gradient to oscillate and move in the wrong * direction. When the gradient is computed on a relatively flat * surface with small learning rates, the model can converge far * slower than necessary. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Adaptive learning rate.") public boolean adaptive_rate; /** * The first of two hyper parameters for adaptive learning rate (ADADELTA). * It is similar to momentum and relates to the memory to prior weight updates. * Typical values are between 0.9 and 0.999. * This parameter is only active if adaptive learning rate is enabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Adaptive learning rate time decay factor (similarity to prior updates).") public double rho; /** * The second of two hyper parameters for adaptive learning rate (ADADELTA). * It is similar to learning rate annealing during initial training * and momentum at later stages where it allows forward progress. * Typical values are between 1e-10 and 1e-4. * This parameter is only active if adaptive learning rate is enabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).") public double epsilon; /*Learning Rate*/ /** * When adaptive learning rate is disabled, the magnitude of the weight * updates are determined by the user specified learning rate * (potentially annealed), and are a function of the difference * between the predicted value and the target value. That difference, * generally called delta, is only available at the output layer. To * correct the output at each hidden layer, back propagation is * used. Momentum modifies back propagation by allowing prior * iterations to influence the current update. Using the momentum * parameter can aid in avoiding local minima and the associated * instability. Too much momentum can lead to instabilities, that's * why the momentum is best ramped up slowly. * This parameter is only active if adaptive learning rate is disabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Learning rate (higher => less stable, lower => slower convergence).") public double rate; /** * Learning rate annealing reduces the learning rate to "freeze" into * local minima in the optimization landscape. The annealing rate is the * inverse of the number of training samples it takes to cut the learning rate in half * (e.g., 1e-6 means that it takes 1e6 training samples to halve the learning rate). * This parameter is only active if adaptive learning rate is disabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Learning rate annealing: rate / (1 + rate_annealing * samples).") public double rate_annealing; /** * The learning rate decay parameter controls the change of learning rate across layers. * For example, assume the rate parameter is set to 0.01, and the rate_decay parameter is set to 0.5. * Then the learning rate for the weights connecting the input and first hidden layer will be 0.01, * the learning rate for the weights connecting the first and the second hidden layer will be 0.005, * and the learning rate for the weights connecting the second and third hidden layer will be 0.0025, etc. * This parameter is only active if adaptive learning rate is disabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).") public double rate_decay; /*Momentum*/ /** * The momentum_start parameter controls the amount of momentum at the beginning of training. * This parameter is only active if adaptive learning rate is disabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Initial momentum at the beginning of training (try 0.5).") public double momentum_start; /** * The momentum_ramp parameter controls the amount of learning for which momentum increases * (assuming momentum_stable is larger than momentum_start). The ramp is measured in the number * of training samples. * This parameter is only active if adaptive learning rate is disabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Number of training samples for which momentum increases.") public double momentum_ramp; /** * The momentum_stable parameter controls the final momentum value reached after momentum_ramp training samples. * The momentum used for training will remain the same for training beyond reaching that point. * This parameter is only active if adaptive learning rate is disabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Final momentum after the ramp is over (try 0.99).") public double momentum_stable; /** * The Nesterov accelerated gradient descent method is a modification to * traditional gradient descent for convex functions. The method relies on * gradient information at various points to build a polynomial approximation that * minimizes the residuals in fewer iterations of the descent. * This parameter is only active if adaptive learning rate is disabled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Use Nesterov accelerated gradient (recommended).") public boolean nesterov_accelerated_gradient; /*Regularization*/ /** * A fraction of the features for each training row to be omitted from training in order * to improve generalization (dimension sampling). */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).") public double input_dropout_ratio; /** * A fraction of the inputs for each hidden layer to be omitted from training in order * to improve generalization. Defaults to 0.5 for each hidden layer if omitted. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Hidden layer dropout ratios (can improve generalization), specify one value per hidden layer, " + "defaults to 0.5.") public double[] hidden_dropout_ratios; /** * A regularization method that constrains the absolute value of the weights and * has the net effect of dropping some weights (setting them to zero) from a model * to reduce complexity and avoid overfitting. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "L1 regularization (can add stability and improve generalization, causes many weights to become 0).") public double l1; /** * A regularization method that constrains the sum of the squared * weights. This method introduces bias into parameter estimates, but * frequently produces substantial gains in modeling as estimate variance is * reduced. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "L2 regularization (can add stability and improve generalization, causes many weights to be small.") public double l2; /** * A maximum on the sum of the squared incoming weights into * any one neuron. This tuning parameter is especially useful for unbound * activation functions such as Maxout or Rectifier. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).") public float max_w2; /*Initialization*/ /** * The distribution from which initial weights are to be drawn. The default * option is an optimized initialization that considers the size of the network. * The "uniform" option uses a uniform distribution with a mean of 0 and a given * interval. The "normal" option draws weights from the standard normal * distribution with a mean of 0 and given standard deviation. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, values = {"UniformAdaptive", "Uniform", "Normal"}, help = "Initial weight distribution.") public DeepLearningParameters.InitialWeightDistribution initial_weight_distribution; /** * The scale of the distribution function for Uniform or Normal distributions. * For Uniform, the values are drawn uniformly from -initial_weight_scale...initial_weight_scale. * For Normal, the values are drawn from a Normal distribution with a standard deviation of initial_weight_scale. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Uniform: -value...value, Normal: stddev.") public double initial_weight_scale; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable=true, help = "A list of H2OFrame ids to initialize the weight matrices of this model with.") public KeyV3.FrameKeyV3[] initial_weights; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable=true, help = "A list of H2OFrame ids to initialize the bias vectors of this model with.") public KeyV3.FrameKeyV3[] initial_biases; /** * The loss (error) function to be minimized by the model. * CrossEntropy loss is used when the model output consists of independent * hypotheses, and the outputs can be interpreted as the probability that each * hypothesis is true. Cross entropy is the recommended loss function when the * target values are class labels, and especially for imbalanced data. * It strongly penalizes error in the prediction of the actual class label. * Quadratic loss is used when the model output are continuous real values, but can * be used for classification as well (where it emphasizes the error on all * output classes, not just for the actual class). */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, required = false, values = {"Automatic", "CrossEntropy", "Quadratic", "Huber", "Absolute", "Quantile"}, help = "Loss function.") public DeepLearningParameters.Loss loss; /*Scoring*/ /** * The minimum time (in seconds) to elapse between model scoring. The actual * interval is determined by the number of training samples per iteration and the scoring duty cycle. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Shortest time interval (in seconds) between model scoring.") public double score_interval; /** * The number of training dataset points to be used for scoring. Will be * randomly sampled. Use 0 for selecting the entire training dataset. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Number of training set samples for scoring (0 for all).") public long score_training_samples; /** * The number of validation dataset points to be used for scoring. Can be * randomly sampled or stratified (if "balance classes" is set and "score * validation sampling" is set to stratify). Use 0 for selecting the entire * training dataset. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Number of validation set samples for scoring (0 for all).") public long score_validation_samples; /** * Maximum fraction of wall clock time spent on model scoring on training and validation samples, * and on diagnostics such as computation of feature importances (i.e., not on training). */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).") public double score_duty_cycle; /** * The stopping criteria in terms of classification error (1-accuracy) on the * training data scoring dataset. When the error is at or below this threshold, * training stops. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Stopping criterion for classification error fraction on training data (-1 to disable).") public double classification_stop; /** * The stopping criteria in terms of regression error (MSE) on the training * data scoring dataset. When the error is at or below this threshold, training * stops. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Stopping criterion for regression error (MSE) on training data (-1 to disable).") public double regression_stop; /** * Enable quiet mode for less output to standard output. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Enable quiet mode for less output to standard output.") public boolean quiet_mode; /** * Method used to sample the validation dataset for scoring, see Score Validation Samples above. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, values = {"Uniform", "Stratified"}, help = "Method used to sample validation dataset for scoring.") public DeepLearningParameters.ClassSamplingMethod score_validation_sampling; /* Miscellaneous */ /** * If enabled, store the best model under the destination key of this model at the end of training. * Only applicable if training is not cancelled. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "If enabled, override the final model with the best model found during training.") public boolean overwrite_with_best_model; @API(level = API.Level.secondary, direction = API.Direction.INOUT, help = "Auto-Encoder.") public boolean autoencoder; @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Use all factor levels of categorical variables. Otherwise, the first factor level is omitted (without" + " loss of accuracy). Useful for variable importances and auto-enabled for autoencoder.") public boolean use_all_factor_levels; @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "If enabled, automatically standardize the data. If disabled, the user must provide properly scaled " + "input data.") public boolean standardize; /** * Gather diagnostics for hidden layers, such as mean and RMS values of learning * rate, momentum, weights and biases. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, help = "Enable diagnostics for hidden layers.") public boolean diagnostics; /** * Whether to compute variable importances for input features. * The implemented method (by Gedeon) considers the weights connecting the * input features to the first two hidden layers. */ @API(level = API.Level.critical, direction = API.Direction.INOUT, gridable = true, help = "Compute variable importances for input features (Gedeon method) - can be slow for large networks.") public boolean variable_importances; /** * Enable fast mode (minor approximation in back-propagation), should not affect results significantly. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Enable fast mode (minor approximation in back-propagation).") public boolean fast_mode; /** * Increase training speed on small datasets by splitting it into many chunks * to allow utilization of all cores. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Force extra load balancing to increase training speed for small datasets (to keep all cores busy).") public boolean force_load_balance; /** * Replicate the entire training dataset onto every node for faster training on small datasets. */ @API(level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true, help = "Replicate the entire training dataset onto every node for faster training on small datasets.") public boolean replicate_training_data; /** * Run on a single node for fine-tuning of model parameters. Can be useful for * checkpoint resumes after training on multiple nodes for fast initial * convergence. */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Run on a single node for fine-tuning of model parameters.") public boolean single_node_mode; /** * Enable shuffling of training data (on each node). This option is * recommended if training data is replicated on N nodes, and the number of training samples per iteration * is close to N times the dataset size, where all nodes train will (almost) all * the data. It is automatically enabled if the number of training samples per iteration is set to -1 (or to N * times the dataset size or larger). */ @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Enable shuffling of training data (recommended if training data is replicated and " + "train_samples_per_iteration is close to #nodes x #rows, of if using balance_classes).") public boolean shuffle_training_data; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, values = {"MeanImputation", "Skip"}, help = "Handling of missing values. Either MeanImputation or Skip.") public DeepLearningParameters.MissingValuesHandling missing_values_handling; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Sparse data handling (more efficient for data with lots of 0 values).") public boolean sparse; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "#DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation, but " + "might slow down backpropagation.") public boolean col_major; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Average activation for sparse auto-encoder. #Experimental") public double average_activation; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Sparsity regularization. #Experimental") public double sparsity_beta; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Max. number of categorical features, enforced via hashing. #Experimental") public int max_categorical_features; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Force reproducibility on small data (will be slow - only uses 1 thread).") public boolean reproducible; @API(level = API.Level.expert, direction=API.Direction.INOUT, help = "Whether to export Neural Network weights and biases to H2O Frames.") public boolean export_weights_and_biases; @API(level = API.Level.expert, direction=API.Direction.INOUT, help = "Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).") public int mini_batch_size; @API(level = API.Level.expert, direction=API.Direction.INOUT, gridable = true, help = "Elastic averaging between compute nodes can improve distributed model convergence. #Experimental") public boolean elastic_averaging; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Elastic averaging moving rate (only if elastic averaging is enabled).") public double elastic_averaging_moving_rate; @API(level = API.Level.expert, direction = API.Direction.INOUT, gridable = true, help = "Elastic averaging regularization strength (only if elastic averaging is enabled).") public double elastic_averaging_regularization; @API(level = API.Level.expert, direction = API.Direction.INOUT, help = "Pretrained autoencoder model to initialize this model with.") public KeyV3.ModelKeyV3 pretrained_autoencoder; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/ExtendedIsolationForestModelV3.java
package hex.schemas; import hex.tree.isoforextended.ExtendedIsolationForestModel; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; public class ExtendedIsolationForestModelV3 extends ModelSchemaV3<ExtendedIsolationForestModel, ExtendedIsolationForestModelV3, ExtendedIsolationForestModel.ExtendedIsolationForestParameters, ExtendedIsolationForestV3.ExtendedIsolationForestParametersV3, ExtendedIsolationForestModel.ExtendedIsolationForestOutput, ExtendedIsolationForestModelV3.ExtendedIsolationForestModelOutputV3> { public static final class ExtendedIsolationForestModelOutputV3 extends ModelOutputSchemaV3<ExtendedIsolationForestModel.ExtendedIsolationForestOutput, ExtendedIsolationForestModelOutputV3> { // nothing } public ExtendedIsolationForestV3.ExtendedIsolationForestParametersV3 createParametersSchema() { return new ExtendedIsolationForestV3.ExtendedIsolationForestParametersV3(); } public ExtendedIsolationForestModelOutputV3 createOutputSchema() { return new ExtendedIsolationForestModelOutputV3(); } //========================== // Custom adapters go here // Version&Schema-specific filling into the impl @Override public ExtendedIsolationForestModel createImpl() { ExtendedIsolationForestV3.ExtendedIsolationForestParametersV3 p = this.parameters; ExtendedIsolationForestModel.ExtendedIsolationForestParameters parms = p.createImpl(); return new ExtendedIsolationForestModel( model_id.key(), parms, new ExtendedIsolationForestModel.ExtendedIsolationForestOutput(null) ); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/ExtendedIsolationForestV3.java
package hex.schemas; import hex.tree.isoforextended.ExtendedIsolationForest; import hex.tree.isoforextended.ExtendedIsolationForestModel; import water.api.API; import water.api.schemas3.ModelParametersSchemaV3; public class ExtendedIsolationForestV3 extends ModelBuilderSchema< ExtendedIsolationForest, ExtendedIsolationForestV3, ExtendedIsolationForestV3.ExtendedIsolationForestParametersV3> { public static final class ExtendedIsolationForestParametersV3 extends ModelParametersSchemaV3<ExtendedIsolationForestModel.ExtendedIsolationForestParameters, ExtendedIsolationForestParametersV3> { static public String[] fields = new String[]{ "model_id", "training_frame", "ignored_columns", "ignore_const_cols", "categorical_encoding", "score_each_iteration", "score_tree_interval", // Extended Isolation Forest specific "ntrees", "sample_size", "extension_level", "seed", "disable_training_metrics" }; @API(help = "Number of Extended Isolation Forest trees.", gridable = true) public int ntrees; @API(help = "Number of randomly sampled observations used to train each Extended Isolation Forest tree.", gridable = true) public int sample_size; @API(help = "Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest " + "with extension_Level = 0 behaves like Isolation Forest.", gridable = true) public int extension_level; @API(help = "Seed for pseudo random number generator (if applicable)", gridable = true) public long seed; @API(help="Score the model after every so many trees. Disabled if set to 0.", level = API.Level.secondary, gridable = false) public int score_tree_interval; @API(help = "Disable calculating training metrics (expensive on large datasets)") public boolean disable_training_metrics; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GAMModelV3.java
package hex.schemas; import hex.gam.GAMModel; import water.api.API; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; import water.api.schemas3.TwoDimTableV3; public class GAMModelV3 extends ModelSchemaV3<GAMModel, GAMModelV3, GAMModel.GAMParameters, GAMV3.GAMParametersV3, GAMModel.GAMModelOutput, GAMModelV3.GAMModelOutputV3> { public static final class GAMModelOutputV3 extends ModelOutputSchemaV3<GAMModel.GAMModelOutput, GAMModelOutputV3> { @API(help="Table of Coefficients") TwoDimTableV3 coefficients_table; @API(help="Table of Coefficients without centering") TwoDimTableV3 coefficients_table_no_centering; @API(help="GLM scoring history") TwoDimTableV3 glm_scoring_history; @API(help = "GLM model summary") TwoDimTableV3 glm_model_summary; @API(help="Table of Standardized Coefficients Magnitudes") TwoDimTableV3 standardized_coefficient_magnitudes; @API(help="Variable Importances", direction=API.Direction.OUTPUT, level = API.Level.secondary) TwoDimTableV3 variable_importances; @API(help="key storing gam columns and predictor columns. For debugging purposes only") String gam_transformed_center_key; @API(help="GLM Z values. For debugging purposes only") double[] glm_zvalues; @API(help="GLM p values. For debugging purposes only") double[] glm_pvalues; @API(help="GLM standard error values. For debugging purposes only") double[] glm_std_err; @API(help = "knot locations for all gam columns.") double[][] knot_locations; @API(help = "Gam column names for knots stored in knot_locations") String[] gam_knot_column_names; } public GAMV3.GAMParametersV3 createParametersSchema() { return new GAMV3.GAMParametersV3();} public GAMModelOutputV3 createOutputSchema() { return new GAMModelOutputV3();} @Override public GAMModel createImpl() { GAMModel.GAMParameters parms = parameters.createImpl(); return new GAMModel(model_id.key(), parms, null); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GAMV3.java
package hex.schemas; import hex.gam.GAM; import hex.gam.GAMModel; import hex.glm.GLMModel.GLMParameters; import hex.glm.GLMModel.GLMParameters.Solver; import water.api.API; import water.api.API.Direction; import water.api.API.Level; import water.api.schemas3.KeyV3.FrameKeyV3; import water.api.schemas3.ModelParametersSchemaV3; import water.api.schemas3.StringPairV3; public class GAMV3 extends ModelBuilderSchema<GAM, GAMV3, GAMV3.GAMParametersV3> { public static final class GAMParametersV3 extends ModelParametersSchemaV3<GAMModel.GAMParameters, GAMParametersV3> { public static final String[] fields = new String[] { "model_id", "training_frame", "validation_frame", "nfolds", "seed", "keep_cross_validation_models", "keep_cross_validation_predictions", "keep_cross_validation_fold_assignment", "fold_assignment", "fold_column", "response_column", "ignored_columns", "ignore_const_cols", "score_each_iteration", "offset_column", "weights_column", "family", "tweedie_variance_power", "tweedie_link_power", "theta", // equals to 1/r and should be > 0 and <=1, used by negative binomial "solver", "alpha", "lambda", "lambda_search", "early_stopping", "nlambdas", "standardize", "missing_values_handling", "plug_values", "compute_p_values", "remove_collinear_columns", "splines_non_negative", "intercept", "non_negative", "max_iterations", "objective_epsilon", "beta_epsilon", "gradient_epsilon", "link", "startval", // initial starting values for fixed and randomized coefficients, double array "prior", "cold_start", // if true, will start GLM model from initial values and conditions "lambda_min_ratio", "beta_constraints", "max_active_predictors", "interactions", "interaction_pairs", "obj_reg", "export_checkpoints_dir", "stopping_rounds", "stopping_metric", "stopping_tolerance", // dead unused args forced here by backwards compatibility, remove in V4 "balance_classes", "class_sampling_factors", "max_after_balance_size", "max_confusion_matrix_size", "max_runtime_secs", "num_knots", // array: number of knots for each predictor "spline_orders", // order of I-splines "knot_ids", // string array storing frame keys that contains knot location "gam_columns", // array: predictor column names array "standardize_tp_gam_cols", // standardize TP gam columns before transformation "scale_tp_penalty_mat", // scale penalty matrix "bs", // array, name of basis functions used "scale", // array, smoothing parameter for GAM, "keep_gam_cols", "store_knot_locations", "auc_type", "gainslift_bins", }; @API(help = "Seed for pseudo random number generator (if applicable)", gridable = true) public long seed; // Input fields @API(help = "Family. Use binomial for classification with logistic regression, others are for regression problems.", values = {"AUTO", "gaussian", "binomial","quasibinomial","ordinal", "multinomial", "poisson", "gamma", "tweedie", "negativebinomial", "fractionalbinomial"}, level = Level.critical) // took tweedie out since it's not reliable public GLMParameters.Family family; @API(help = "Tweedie variance power", level = Level.critical, gridable = true) public double tweedie_variance_power; @API(help = "Tweedie link power", level = Level.critical, gridable = true) public double tweedie_link_power; @API(help = "Theta", level = Level.critical, gridable = true) public double theta; // used by negtaive binomial distribution family @API(help = "AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for datasets with many columns.", values = {"AUTO", "IRLSM", "L_BFGS","COORDINATE_DESCENT_NAIVE", "COORDINATE_DESCENT", "GRADIENT_DESCENT_LH", "GRADIENT_DESCENT_SQERR"}, level = Level.critical) public Solver solver; @API(help = "Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS'; 0.5 otherwise.", level = Level.critical, gridable = true) public double[] alpha; @API(help = "Regularization strength", level = Level.critical, gridable = true) public double[] lambda; @API(help = "double array to initialize coefficients for GAM.", gridable=true) public double[] startval; @API(help = "Use lambda search starting at lambda max, given lambda is then interpreted as lambda min", level = Level.critical) public boolean lambda_search; @API(help="Stop early when there is no more relative improvement on train or validation (if provided)") public boolean early_stopping; @API(help = "Number of lambdas to be used in a search." + " Default indicates: If alpha is zero, with lambda search" + " set to True, the value of nlamdas is set to 30 (fewer lambdas" + " are needed for ridge regression) otherwise it is set to 100.", level = Level.critical) public int nlambdas; @API(help = "Standardize numeric columns to have zero mean and unit variance", level = Level.critical) public boolean standardize; @API(help = "Handling of missing values. Either MeanImputation, Skip or PlugValues.", values = { "MeanImputation", "Skip", "PlugValues" }, level = API.Level.expert, direction=API.Direction.INOUT, gridable = true) public GLMParameters.MissingValuesHandling missing_values_handling; @API(help = "Plug Values (a single row frame containing values that will be used to impute missing values of the training/validation frame, use with conjunction missing_values_handling = PlugValues)", direction = API.Direction.INPUT) public FrameKeyV3 plug_values; @API(help = "Restrict coefficients (not intercept) to be non-negative") public boolean non_negative; @API(help = "Maximum number of iterations", level = Level.secondary) public int max_iterations; @API(help = "Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to IRLSM solver ", level = Level.expert) public double beta_epsilon; @API(help = "Converge if objective value changes less than this."+ " Default indicates: If lambda_search"+ " is set to True the value of objective_epsilon is set to .0001. If the lambda_search is set to False and" + " lambda is equal to zero, the value of objective_epsilon is set to .000001, for any other value of lambda the" + " default value of objective_epsilon is set to .0001.", level = Level.expert) public double objective_epsilon; @API(help = "Converge if objective changes less (using L-infinity norm) than this, ONLY applies to L-BFGS solver."+ " Default indicates: If lambda_search is set to False and lambda is equal to zero, the default value" + " of gradient_epsilon is equal to .000001, otherwise the default value is .0001. If lambda_search is set to True," + " the conditional values above are 1E-8 and 1E-6 respectively.", level = Level.expert) public double gradient_epsilon; @API(help="Likelihood divider in objective value computation, default is 1/nobs") public double obj_reg; @API(help = "Link function.", level = Level.secondary, values = {"family_default", "identity", "logit", "log", "inverse", "tweedie", "ologit"}) //"oprobit", "ologlog": will be supported. public GLMParameters.Link link; @API(help="Include constant term in the model", level = Level.expert) public boolean intercept; @API(help = "Prior probability for y==1. To be used only for logistic regression iff the data has been sampled and the mean of response does not reflect reality.", level = Level.expert) public double prior; @API(help = "Only applicable to multiple alpha/lambda values when calling GLM from GAM. If false, build the next" + " model for next set of alpha/lambda values starting from the values provided by current model. If true" + " will start GLM model from scratch.", level = Level.critical) public boolean cold_start; @API(help = "Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest lambda that drives all coefficients to zero)." + " Default indicates: if the number of observations is greater than the number of variables, then lambda_min_ratio" + " is set to 0.0001; if the number of observations is less than the number of variables, then lambda_min_ratio" + " is set to 0.01.", level = Level.expert) public double lambda_min_ratio; @API(help = "Beta constraints", direction = API.Direction.INPUT /* Not required, to allow initial params validation: , required=true */) public FrameKeyV3 beta_constraints; @API(help="Maximum number of active predictors during computation. Use as a stopping criterion" + " to prevent expensive model building with many predictors." + " Default indicates: If the IRLSM solver is used," + " the value of max_active_predictors is set to 5000 otherwise it is set to 100000000.", direction = Direction.INPUT, level = Level.expert) public int max_active_predictors = -1; @API(help="A list of predictor column indices to interact. All pairwise combinations will be computed for the list.", direction=Direction.INPUT, level=Level.expert) public String[] interactions; @API(help="A list of pairwise (first order) column interactions.", direction=Direction.INPUT, level=Level.expert) public StringPairV3[] interaction_pairs; // dead unused args, formely inherited from supervised model schema /** * For imbalanced data, balance training data class counts via * over/under-sampling. This can result in improved predictive accuracy. */ @API(help = "Balance training data class counts via over/under-sampling (for imbalanced data).", level = API.Level.secondary, direction = API.Direction.INOUT) public boolean balance_classes; /** * Desired over/under-sampling ratios per class (lexicographic order). * Only when balance_classes is enabled. * If not specified, they will be automatically computed to obtain class balance during training. */ @API(help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes.", level = API.Level.expert, direction = API.Direction.INOUT) public float[] class_sampling_factors; /** * When classes are balanced, limit the resulting dataset size to the * specified multiple of the original dataset size. */ @API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires balance_classes.", /* dmin=1e-3, */ level = API.Level.expert, direction = API.Direction.INOUT) public float max_after_balance_size; /** For classification models, the maximum size (in terms of classes) of * the confusion matrix for it to be printed. This option is meant to * avoid printing extremely large confusion matrices. */ @API(help = "[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs", level = API.Level.secondary, direction = API.Direction.INOUT) public int max_confusion_matrix_size; @API(help="Request p-values computation, p-values work only with IRLSM solver and no regularization", level = Level.secondary, direction = Direction.INPUT) public boolean compute_p_values; // _remove_collinear_columns @API(help="In case of linearly dependent columns, remove some of the dependent columns", level = Level.secondary, direction = Direction.INPUT) public boolean remove_collinear_columns; // _remove_collinear_columns @API(help="If set to true, will return knot locations as double[][] array for gam column names found knots_for_gam." + " Default to false.", level = Level.secondary, direction = Direction.INPUT) public boolean store_knot_locations; @API(help = "Number of knots for gam predictors. If specified, must specify one for each gam predictor. For " + "monotone I-splines, mininum = 2, for cs spline, minimum = 3. For thin plate, minimum is size of " + "polynomial basis + 2.", level = Level.critical, gridable = true) public int[] num_knots; @API(help = "Order of I-splines or NBSplineTypeI M-splines used for gam predictors. If specified, must be the " + "same size as gam_columns. For I-splines, the spline_orders will be the same as the polynomials used to " + "generate the splines. For M-splines, the polynomials used to generate the splines will be " + "spline_order-1. Values for bs=0 or 1 will be ignored.", level = Level.critical, gridable = true) public int[] spline_orders; @API(help = "Valid for I-spline (bs=2) only. True if the I-splines are monotonically increasing (and monotonically " + "non-decreasing) and False if the I-splines are monotonically decreasing (and monotonically non-increasing)." + " If specified, must be the same size as gam_columns. Values for other spline types " + "will be ignored. Default to true.", level = Level.critical, gridable = true) public boolean[] splines_non_negative; @API(help = "Arrays of predictor column names for gam for smoothers using single or multiple predictors like " + "{{'c1'},{'c2','c3'},{'c4'},...}", required = true, level = Level.critical, gridable = true) public String[][] gam_columns; @API(help = "Smoothing parameter for gam predictors. If specified, must be of the same length as gam_columns", level = Level.critical, gridable = true) public double[] scale; @API(help = "Basis function type for each gam predictors, 0 for cr, 1 for thin plate regression with knots, 2 for" + " monotone I-splines, 3 for NBSplineTypeI M-splines (refer to doc " + "here: https://github.com/h2oai/h2o-3/issues/6926). If specified, must be the same size as " + "gam_columns", level = Level.critical, gridable = true) public int[] bs; @API(help="Save keys of model matrix", level = Level.secondary, direction = Direction.INPUT) public boolean keep_gam_cols; // if true will save keys storing GAM columns @API(help="standardize tp (thin plate) predictor columns", level = Level.secondary, direction = Direction.INPUT) public boolean standardize_tp_gam_cols; // if true, will standardize predictor columns before gamification @API(help="Scale penalty matrix for tp (thin plate) smoothers as in R", level = Level.secondary, direction = Direction.INPUT) public boolean scale_tp_penalty_mat; // if true, will apply scaling to the penalty matrix CS @API(help="Array storing frame keys of knots. One for each gam column set specified in gam_columns", level = Level.secondary, direction = Direction.INPUT) public String[] knot_ids; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GBMModelV3.java
package hex.schemas; import hex.tree.gbm.GBMModel; public class GBMModelV3 extends SharedTreeModelV3< GBMModel, GBMModelV3, GBMModel.GBMParameters, GBMV3.GBMParametersV3, GBMModel.GBMOutput, GBMModelV3.GBMModelOutputV3> { public static final class GBMModelOutputV3 extends SharedTreeModelV3.SharedTreeModelOutputV3<GBMModel.GBMOutput, GBMModelOutputV3> {} public GBMV3.GBMParametersV3 createParametersSchema() { return new GBMV3.GBMParametersV3(); } public GBMModelOutputV3 createOutputSchema() { return new GBMModelOutputV3(); } //========================== // Custom adapters go here // Version&Schema-specific filling into the impl @Override public GBMModel createImpl() { GBMV3.GBMParametersV3 p = this.parameters; GBMModel.GBMParameters parms = p.createImpl(); return new GBMModel(model_id.key(), parms, new GBMModel.GBMOutput(null)); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GBMV3.java
package hex.schemas; import hex.tree.gbm.GBM; import hex.tree.gbm.GBMModel.GBMParameters; import water.api.API; import water.api.schemas3.KeyValueV3; public class GBMV3 extends SharedTreeV3<GBM,GBMV3,GBMV3.GBMParametersV3> { public static final class GBMParametersV3 extends SharedTreeV3.SharedTreeParametersV3<GBMParameters, GBMParametersV3> { static public String[] fields = new String[] { "model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models", "keep_cross_validation_predictions", "keep_cross_validation_fold_assignment", "score_each_iteration", "score_tree_interval", "fold_assignment", "fold_column", "response_column", "ignored_columns", "ignore_const_cols", "offset_column", "weights_column", "balance_classes", "class_sampling_factors", "max_after_balance_size", "max_confusion_matrix_size", "ntrees", "max_depth", "min_rows", "nbins", "nbins_top_level", "nbins_cats", "r2_stopping", "stopping_rounds", "stopping_metric", "stopping_tolerance", "max_runtime_secs", "seed", "build_tree_one_node", "learn_rate", "learn_rate_annealing", "distribution", "quantile_alpha", "tweedie_power", "huber_alpha", "checkpoint", "sample_rate", "sample_rate_per_class", "col_sample_rate", "col_sample_rate_change_per_level", "col_sample_rate_per_tree", "min_split_improvement", "histogram_type", "max_abs_leafnode_pred", "pred_noise_bandwidth", "categorical_encoding", "calibrate_model", "calibration_frame", "calibration_method", "custom_metric_func", "custom_distribution_func", "export_checkpoints_dir", "in_training_checkpoints_dir", "in_training_checkpoints_tree_interval", "monotone_constraints", "check_constant_response", "gainslift_bins", "auc_type", "interaction_constraints", "auto_rebalance" }; // Input fields @API(help="Learning rate (from 0.0 to 1.0)", gridable = true) public double learn_rate; @API(help="Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999) ", level = API.Level.secondary, gridable = true) public double learn_rate_annealing; @API(help = "Row sample rate per tree (from 0.0 to 1.0)", gridable = true) public double sample_rate; @API(help="Column sample rate (from 0.0 to 1.0)", level = API.Level.critical, gridable = true) public double col_sample_rate; @API(help = "A mapping representing monotonic constraints. Use +1 to enforce an increasing constraint and -1 to specify a decreasing constraint.", level = API.Level.secondary) public KeyValueV3[] monotone_constraints; @API(help="Maximum absolute value of a leaf node prediction", level = API.Level.expert, gridable = true) public double max_abs_leafnode_pred; @API(help="Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node predictions", level = API.Level.expert, gridable = true) public double pred_noise_bandwidth; @API(help="A set of allowed column interactions.", level= API.Level.expert) public String[][] interaction_constraints; @API(help="Allow automatic rebalancing of training and validation datasets", level = API.Level.expert) public boolean auto_rebalance = true; // // TODO debug only, remove! // @API(help="Internal flag, use new version of histo tsk if set", level = API.Level.expert, gridable = false) // public boolean use_new_histo_tsk; // @API(help="Use with new histo task only! Internal flag, number of columns processed in parallel", level = API.Level.expert, gridable = false) // public int col_block_sz = 5; // @API(help="Use with new histo task only! Min threads to be run in parallel", level = API.Level.expert, gridable = false) // public int min_threads = -1; // @API(help="Use with new histo task only! Share histo (and use CAS) instead of making private copies", level = API.Level.expert, gridable = false) // public boolean shared_histo; // @API(help="Use with new histo task only! Access rows in order of the dataset, not in order of leafs ", level = API.Level.expert, gridable = false) // public boolean unordered; } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GLMModelV3.java
package hex.schemas; import hex.glm.GLMModel; import hex.glm.GLMModel.GLMOutput; import water.MemoryManager; import water.api.API; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelOutputSchemaV3; import water.api.schemas3.ModelSchemaV3; import water.api.schemas3.TwoDimTableV3; import water.util.ArrayUtils; import water.util.TwoDimTable; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; import static water.util.ArrayUtils.sort; //import water.util.DocGen.HTML; public class GLMModelV3 extends ModelSchemaV3<GLMModel, GLMModelV3, GLMModel.GLMParameters, GLMV3.GLMParametersV3, GLMOutput, GLMModelV3.GLMModelOutputV3> { public static final class GLMModelOutputV3 extends ModelOutputSchemaV3<GLMOutput, GLMModelOutputV3> { @API(help="Table of Coefficients") TwoDimTableV3 coefficients_table; @API(help="Table of Coefficients with coefficients denoted with class names for GLM multinonimals only.") TwoDimTableV3 coefficients_table_multinomials_with_class_names; // same as coefficients_table but with real class names. @API(help="Standardized Coefficient Magnitudes") TwoDimTableV3 standardized_coefficient_magnitudes; @API(help = "Variable Importances", direction = API.Direction.OUTPUT, level = API.Level.secondary) TwoDimTableV3 variable_importances; @API(help="Lambda minimizing the objective value, only applicable with lambda search or when arrays of alpha and " + "lambdas are provided") double lambda_best; @API(help="Alpha minimizing the objective value, only applicable when arrays of alphas are given ") double alpha_best; @API(help="submodel index minimizing the objective value, only applicable for arrays of alphas/lambda ") int best_submodel_index; // denote the submodel index that yields the best result @API(help="Lambda best + 1 standard error. Only applicable with lambda search and cross-validation") double lambda_1se; @API(help="Minimum lambda value calculated that may be used for lambda search. Early-stop may happen and " + "the minimum lambda value will not be used in this case.") double lambda_min; @API(help="Starting lambda value used when lambda search is enabled.") double lambda_max; @API(help = "Dispersion parameter, only applicable to Tweedie family (input/output) and fractional Binomial (output only)") double dispersion; @API(help = "Predictor names where variable inflation factors are calculated.") String[] vif_predictor_names; @API(help = "GLM model coefficients names.") String[] coefficient_names; @API(help = "predictor variable inflation factors.") double[] variable_inflation_factors; @API(help = "Beta (if exists) and linear constraints states") String[] linear_constraint_states; @API(help = "Table of beta (if exists) and linear constraints values and status") TwoDimTableV3 linear_constraints_table; @API(help="Contains the original dataset and the dfbetas calculated for each predictor.") KeyV3.FrameKeyV3 regression_influence_diagnostics; @API(help="True if all constraints conditions are satisfied. Otherwise, false.") boolean all_constraints_satisfied; private GLMModelOutputV3 fillMultinomial(GLMOutput impl) { if(impl.get_global_beta_multinomial() == null) return this; // no coefificients yet String [] names = impl.coefficientNames().clone(); int len = names.length-1; String [] names2 = new String[len]; // this one decides the length of standardized table length int[] indices = new int[len]; for (int i = 0; i < indices.length; ++i) indices[i] = i; // put intercept as the first String [] ns = ArrayUtils.append(new String[]{"Intercept"},Arrays.copyOf(names,names.length-1)); coefficients_table = new TwoDimTableV3(); if (impl.nclasses() > 2) // only change coefficient names for multinomials coefficients_table_multinomials_with_class_names = new TwoDimTableV3(); int n = impl.nclasses(); String[] cols = impl.hasVIF() ? new String[2*n+1] : new String[n*2]; // coefficients per class and standardized coefficients String[] cols2=null; if (n>2) { cols2 = impl.hasVIF() ? new String[n*2+1] : new String[n*2]; String[] classNames = impl._domains[impl.responseIdx()]; for (int i = 0; i < n; ++i) { cols2[i] = "coefs_class_" + classNames[i]; cols2[n + i] = "std_coefs_class_" + classNames[i]; } if (impl.hasVIF()) cols2[2*n] = "variable_inflation_factor"; } for (int i = 0; i < n; ++i) { cols[i] = "coefs_class_" +i; cols[n + i] = "std_coefs_class_" +i; } if (impl.hasVIF()) cols[2*n] = "variable_inflation_factor"; String [] colTypes = new String[cols.length]; Arrays.fill(colTypes, "double"); String [] colFormats = new String[cols.length]; Arrays.fill(colFormats,"%5f"); double [][] betaNorm = impl.getNormBetaMultinomial(); if(betaNorm != null) { TwoDimTable tdt = new TwoDimTable("Coefficients", "glm multinomial coefficients", ns, cols, colTypes, colFormats, "names"); for (int c = 0; c < n; ++c) { double[] beta = impl.get_global_beta_multinomial()[c]; tdt.set(0, c, beta[beta.length - 1]); tdt.set(0, n + c, betaNorm[c][beta.length - 1]); for (int i = 0; i < beta.length - 1; ++i) { tdt.set(i + 1, c, beta[i]); tdt.set(i + 1, n + c, betaNorm[c][i]); } } if (impl.hasVIF()) { List<String> vifPredictors = Stream.of(impl.getVIFPredictorNames()).collect(Collectors.toList()); double[] varInFactors = impl.variableInflationFactors(); for (int row=0; row < ns.length; row++) { if (vifPredictors.contains(ns[row])) { int index = vifPredictors.indexOf(ns[row]); tdt.set(row, 2*n, varInFactors[index]); } else { tdt.set(row, 2*n, Double.NaN); } } } coefficients_table.fillFromImpl(tdt); if (n>2) { // restore column names from pythonized ones coefficients_table_multinomials_with_class_names.fillFromImpl(tdt); revertCoeffNames(cols2, n, coefficients_table_multinomials_with_class_names); } final double [] magnitudes = new double[betaNorm[0].length]; calculateVarimpMultinomial(magnitudes, indices, betaNorm); for(int i = 0; i < len; ++i) names2[i] = names[indices[i]]; tdt = new TwoDimTable("Standardized Coefficient Magnitudes", "standardized coefficient magnitudes", names2, new String[]{"Coefficients", "Sign"}, new String[]{"double", "string"}, new String[]{"%5f", "%s"}, "names"); for (int i = 0; i < magnitudes.length - 1; ++i) { tdt.set(i, 0, magnitudes[indices[i]]); tdt.set(i, 1, "POS"); } standardized_coefficient_magnitudes = new TwoDimTableV3(); standardized_coefficient_magnitudes.fillFromImpl(tdt); } return this; } public static void calculateVarimpMultinomial(double[] magnitudes, int[] indices, double[][] betaNorm) { for (int i = 0; i < betaNorm.length; ++i) { for (int j = 0; j < betaNorm[i].length; ++j) { double d = betaNorm[i][j]; magnitudes[j] += d < 0 ? -d : d; } } sort(indices, magnitudes, -1, -1); } public void revertCoeffNames(String[] colNames, int nclass, TwoDimTableV3 coeffs_table) { String newName = coeffs_table.name+" with class names"; coeffs_table.name = newName; boolean bothCoeffStd = colNames.length==(2*nclass); for (int tableIndex = 1; tableIndex <= nclass; tableIndex++) { coeffs_table.columns[tableIndex].name = colNames[tableIndex-1]; if (bothCoeffStd) coeffs_table.columns[tableIndex+nclass].name = colNames[tableIndex-1+nclass]; } } @Override public GLMModelOutputV3 fillFromImpl(GLMModel.GLMOutput impl) { super.fillFromImpl(impl); lambda_1se = impl.lambda_1se(); lambda_best = impl.lambda_best(); alpha_best = impl.alpha_best(); best_submodel_index = impl.bestSubmodelIndex(); dispersion = impl.dispersion(); coefficient_names = impl.coefficientNames().clone(); if (impl._linear_constraint_states != null) // pass constraint conditions linear_constraint_states = impl._linear_constraint_states.clone(); variable_inflation_factors = impl.getVariableInflationFactors(); vif_predictor_names = impl.hasVIF() ? impl.getVIFPredictorNames() : null; List<String> validVIFNames = impl.hasVIF() ? Stream.of(vif_predictor_names).collect(Collectors.toList()) : null; if(impl._multinomial || impl._ordinal) return fillMultinomial(impl); String [] names = impl.coefficientNames().clone(); // put intercept as the first String [] ns = ArrayUtils.append(new String[]{"Intercept"},Arrays.copyOf(names,names.length-1)); coefficients_table = new TwoDimTableV3(); double [] beta = impl.beta(); final double [] magnitudes = beta==null?null:new double[beta.length]; int len = beta==null?0:magnitudes.length - 1; int[] indices = beta==null?null:new int[len]; if (beta != null) { for (int i = 0; i < indices.length; ++i) indices[i] = i; } if(beta == null) beta = MemoryManager.malloc8d(names.length); String [] colTypes = new String[]{"double"}; String [] colFormats = new String[]{"%5f"}; String [] colnames = new String[]{"Coefficients"}; if(impl.hasPValues()){ if (impl.hasVIF()) { colTypes = new String[]{"double", "double", "double", "double","double"}; colFormats = new String[]{"%5f", "%5f", "%5f", "%5f", "%5f"}; colnames = new String[]{"Coefficients", "Std. Error", "z value", "p value", "variable_inflation_factor"}; } else { colTypes = new String[]{"double", "double", "double", "double"}; colFormats = new String[]{"%5f", "%5f", "%5f", "%5f"}; colnames = new String[]{"Coefficients", "Std. Error", "z value", "p value"}; } } else if (impl.hasVIF()) { colTypes = new String[]{"double", "double"}; colFormats = new String[]{"%5f", "%5f"}; colnames = new String[]{"Coefficients", "variable_inflation_factor"}; } int stdOff = colnames.length; colTypes = ArrayUtils.append(colTypes,"double"); colFormats = ArrayUtils.append(colFormats,"%5f"); colnames = ArrayUtils.append(colnames,"Standardized Coefficients"); // as last column TwoDimTable tdt = new TwoDimTable("Coefficients","glm coefficients", ns, colnames, colTypes, colFormats, "names"); tdt.set(0, 0, beta[beta.length - 1]); for (int i = 0; i < beta.length - 1; ++i) { tdt.set(i + 1, 0, beta[i]); } double[] norm_beta = null; if(impl.beta() != null) { norm_beta = impl.getNormBeta(); tdt.set(0, stdOff, norm_beta[norm_beta.length - 1]); for (int i = 0; i < norm_beta.length - 1; ++i) tdt.set(i + 1, stdOff, norm_beta[i]); } if(impl.hasPValues()) { // fill in p values double [] stdErr = impl.stdErr(); double [] zVals = impl.zValues(); double [] pVals = impl.pValues(); tdt.set(0, 1, stdErr[stdErr.length - 1]); tdt.set(0, 2, zVals[zVals.length - 1]); tdt.set(0, 3, pVals[pVals.length - 1]); for(int i = 0; i < stdErr.length - 1; ++i) { tdt.set(i + 1, 1, stdErr[i]); tdt.set(i + 1, 2, zVals[i]); tdt.set(i + 1, 3, pVals[i]); } if (impl.hasVIF()) { for (int i=0; i < stdErr.length; i++) if (validVIFNames.contains(ns[i])) { int index = validVIFNames.indexOf(ns[i]); tdt.set(i, 4, variable_inflation_factors[index]); } else { tdt.set(i, 4, Double.NaN); } } } else if (impl.hasVIF()) { // has VIF but without p-values and stuff for (int i=0; i<ns.length; i++) { if (validVIFNames.contains(ns[i])) { int index = validVIFNames.indexOf(ns[i]); tdt.set(i, 1, variable_inflation_factors[index]); } else { tdt.set(i, 1, Double.NaN); } } } coefficients_table.fillFromImpl(tdt); if(impl.beta() != null) { // get varImp calculateVarimpBase(magnitudes, indices, impl.getNormBeta()); String[] names2 = new String[len]; for (int i = 0; i < len; ++i) names2[i] = names[indices[i]]; tdt = new TwoDimTable("Standardized Coefficient Magnitudes", "standardized coefficient magnitudes", names2, new String[]{"Coefficients", "Sign"}, new String[]{"double", "string"}, new String[]{"%5f", "%s"}, "names"); for (int i = 0; i < beta.length - 1; ++i) { tdt.set(i, 0, magnitudes[indices[i]]); tdt.set(i, 1, beta[indices[i]] < 0 ? "NEG" : "POS"); } standardized_coefficient_magnitudes = new TwoDimTableV3(); standardized_coefficient_magnitudes.fillFromImpl(tdt); } return this; } } // GLMModelOutputV2 public static void calculateVarimpBase(double[] magnitudes, int[] indices, double[] betaNorm) { for (int i = 0; i < magnitudes.length; ++i) { magnitudes[i] = (float) betaNorm[i]; if (magnitudes[i] < 0) magnitudes[i] *= -1; } sort(indices, magnitudes, -1, -1); } public GLMV3.GLMParametersV3 createParametersSchema() { return new GLMV3.GLMParametersV3(); } public GLMModelOutputV3 createOutputSchema() { return new GLMModelOutputV3(); } @Override public GLMModel createImpl() { GLMModel.GLMParameters parms = parameters.createImpl(); return new GLMModel( model_id.key(), parms, null, new double[]{0.0}, 0.0, 0.0, 0); } }
0
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GLMRegularizationPathV3.java
package hex.schemas; import hex.glm.GLMModel; import water.api.API; import water.api.schemas3.KeyV3; import water.api.schemas3.SchemaV3; /** */ public class GLMRegularizationPathV3 extends SchemaV3<GLMModel.RegularizationPath,GLMRegularizationPathV3>{ @API(help="source model", required = true, direction = API.Direction.INPUT) public KeyV3.ModelKeyV3 model; @API(help="Computed lambda values") public double [] lambdas; @API(help="alpha values used in building submodels") public double [] alphas; @API(help="explained deviance on the training set") public double [] explained_deviance_train; @API(help="explained deviance on the validation set") public double [] explained_deviance_valid; @API(help="coefficients for all lambdas") public double [][] coefficients; @API(help="standardized coefficients for all lambdas") public double [][] coefficients_std; @API(help="coefficient names") public String [] coefficient_names; @API(help="z-values") public double [][] z_values; @API(help="p-values") public double [][] p_values; @API(help="standard error") public double [][] std_errs; }