index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/Logistic.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Logistic.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.pmml.producer.LogisticProducerHelper; import weka.core.Aggregateable; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ConjugateGradientOptimization; import weka.core.Instance; import weka.core.Instances; import weka.core.Optimization; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.pmml.PMMLProducer; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.RemoveUseless; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** * <!-- globalinfo-start --> Class for building and using a multinomial logistic regression model with a ridge estimator.<br/> * <br/> * There are some modifications, however, compared to the paper of leCessie and van Houwelingen(1992): <br/> * <br/> * If there are k classes for n instances with m attributes, the parameter matrix B to be calculated will be an m*(k-1) matrix.<br/> * <br/> * The probability for class j with the exception of the last class is<br/> * <br/> * Pj(Xi) = exp(XiBj)/((sum[j=1..(k-1)]exp(Xi*Bj))+1) <br/> * <br/> * The last class has probability<br/> * <br/> * 1-(sum[j=1..(k-1)]Pj(Xi)) <br/> * = 1/((sum[j=1..(k-1)]exp(Xi*Bj))+1)<br/> * <br/> * The (negative) multinomial log-likelihood is thus: <br/> * <br/> * L = -sum[i=1..n]{<br/> * sum[j=1..(k-1)](Yij * ln(Pj(Xi)))<br/> * +(1 - (sum[j=1..(k-1)]Yij)) <br/> * * ln(1 - sum[j=1..(k-1)]Pj(Xi))<br/> * } + ridge * (B^2)<br/> * <br/> * In order to find the matrix B for which L is minimised, a Quasi-Newton Method is used to search for the optimized values of the m*(k-1) variables. Note that before we use the optimization procedure, we 'squeeze' the matrix B into a * m*(k-1) vector. For details of the optimization procedure, please check weka.core.Optimization class.<br/> * <br/> * Although original Logistic Regression does not deal with instance weights, we modify the algorithm a little bit to handle the instance weights.<br/> * <br/> * For more information see:<br/> * <br/> * le Cessie, S., van Houwelingen, J.C. (1992). Ridge Estimators in Logistic Regression. Applied Statistics. 41(1):191-201.<br/> * <br/> * Note: Missing values are replaced using a ReplaceMissingValuesFilter, and nominal attributes are transformed into numeric attributes using a NominalToBinaryFilter. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{leCessie1992, * author = {le Cessie, S. and van Houwelingen, J.C.}, * journal = {Applied Statistics}, * number = {1}, * pages = {191-201}, * title = {Ridge Estimators in Logistic Regression}, * volume = {41}, * year = {1992} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -R &lt;ridge&gt; * Set the ridge in the log-likelihood. * </pre> * * <pre> * -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence). * </pre> * * <!-- options-end --> * * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision$ */ public class Logistic extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler, PMMLProducer, Aggregateable<Logistic> { /** for serialization */ static final long serialVersionUID = 3932117032546553727L; /** The coefficients (optimized parameters) of the model */ protected double[][] m_Par; /** The data saved as a matrix */ protected double[][] m_Data; /** The number of attributes in the model */ protected int m_NumPredictors; /** The index of the class attribute */ protected int m_ClassIndex; /** The number of the class labels */ protected int m_NumClasses; /** The ridge parameter. */ protected double m_Ridge = 1e-8; /** An attribute filter */ private RemoveUseless m_AttFilter; /** The filter used to make attributes numeric. */ private NominalToBinary m_NominalToBinary; /** The filter used to get rid of missing values. */ private ReplaceMissingValues m_ReplaceMissingValues; /** Log-likelihood of the searched model */ protected double m_LL; /** The maximum number of iterations. */ private int m_MaxIts = -1; /** Wether to use conjugate gradient descent rather than BFGS updates. */ private boolean m_useConjugateGradientDescent = false; private Instances m_structure; /** * Constructor that sets the default number of decimal places to 4. */ public Logistic() { this.setNumDecimalPlaces(4); } /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a multinomial logistic " + "regression model with a ridge estimator.\n\n" + "There are some modifications, however, compared to the paper of " + "leCessie and van Houwelingen(1992): \n\n" + "If there are k classes for n instances with m attributes, the " + "parameter matrix B to be calculated will be an m*(k-1) matrix.\n\n" + "The probability for class j with the exception of the last class is\n\n" + "Pj(Xi) = exp(XiBj)/((sum[j=1..(k-1)]exp(Xi*Bj))+1) \n\n" + "The last class has probability\n\n" + "1-(sum[j=1..(k-1)]Pj(Xi)) \n\t= 1/((sum[j=1..(k-1)]exp(Xi*Bj))+1)\n\n" + "The (negative) multinomial log-likelihood is thus: \n\n" + "L = -sum[i=1..n]{\n\tsum[j=1..(k-1)](Yij * ln(Pj(Xi)))" + "\n\t+(1 - (sum[j=1..(k-1)]Yij)) \n\t* ln(1 - sum[j=1..(k-1)]Pj(Xi))" + "\n\t} + ridge * (B^2)\n\n" + "In order to find the matrix B for which L is minimised, a " + "Quasi-Newton Method is used to search for the optimized values of " + "the m*(k-1) variables. Note that before we use the optimization " + "procedure, we 'squeeze' the matrix B into a m*(k-1) vector. For " + "details of the optimization procedure, please check " + "weka.core.Optimization class.\n\n" + "Although original Logistic Regression does not deal with instance " + "weights, we modify the algorithm a little bit to handle the " + "instance weights.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString() + "\n\n" + "Note: Missing values are replaced using a ReplaceMissingValuesFilter, and " + "nominal attributes are transformed into numeric attributes using a " + "NominalToBinaryFilter."; } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "le Cessie, S. and van Houwelingen, J.C."); result.setValue(Field.YEAR, "1992"); result.setValue(Field.TITLE, "Ridge Estimators in Logistic Regression"); result.setValue(Field.JOURNAL, "Applied Statistics"); result.setValue(Field.VOLUME, "41"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.PAGES, "191-201"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(4); newVector.addElement(new Option("\tUse conjugate gradient descent rather than BFGS updates.", "C", 0, "-C")); newVector.addElement(new Option("\tSet the ridge in the log-likelihood.", "R", 1, "-R <ridge>")); newVector.addElement(new Option("\tSet the maximum number of iterations" + " (default -1, until convergence).", "M", 1, "-M <number>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -R &lt;ridge&gt; * Set the ridge in the log-likelihood. * </pre> * * <pre> * -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setUseConjugateGradientDescent(Utils.getFlag('C', options)); String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) { this.m_Ridge = Double.parseDouble(ridgeString); } else { this.m_Ridge = 1.0e-8; } String maxItsString = Utils.getOption('M', options); if (maxItsString.length() != 0) { this.m_MaxIts = Integer.parseInt(maxItsString); } else { this.m_MaxIts = -1; } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); if (this.getUseConjugateGradientDescent()) { options.add("-C"); } options.add("-R"); options.add("" + this.m_Ridge); options.add("-M"); options.add("" + this.m_MaxIts); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ @Override public String debugTipText() { return "Output debug information to the console."; } /** * Sets whether debugging output will be printed. * * @param debug * true if debugging output should be printed */ @Override public void setDebug(final boolean debug) { this.m_Debug = debug; } /** * Gets whether debugging output will be printed. * * @return true if debugging output will be printed */ @Override public boolean getDebug() { return this.m_Debug; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useConjugateGradientDescentTipText() { return "Use conjugate gradient descent rather than BFGS updates; faster for problems with many parameters."; } /** * Sets whether conjugate gradient descent is used. * * @param useConjugateGradientDescent * true if CGD is to be used. */ public void setUseConjugateGradientDescent(final boolean useConjugateGradientDescent) { this.m_useConjugateGradientDescent = useConjugateGradientDescent; } /** * Gets whether to use conjugate gradient descent rather than BFGS updates. * * @return true if CGD is used */ public boolean getUseConjugateGradientDescent() { return this.m_useConjugateGradientDescent; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String ridgeTipText() { return "Set the Ridge value in the log-likelihood."; } /** * Sets the ridge in the log-likelihood. * * @param ridge * the ridge */ public void setRidge(final double ridge) { this.m_Ridge = ridge; } /** * Gets the ridge in the log-likelihood. * * @return the ridge */ public double getRidge() { return this.m_Ridge; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String maxItsTipText() { return "Maximum number of iterations to perform."; } /** * Get the value of MaxIts. * * @return Value of MaxIts. */ public int getMaxIts() { return this.m_MaxIts; } /** * Set the value of MaxIts. * * @param newMaxIts * Value to assign to MaxIts. */ public void setMaxIts(final int newMaxIts) { this.m_MaxIts = newMaxIts; } private class OptEng extends Optimization { OptObject m_oO = null; private OptEng(final OptObject oO) { this.m_oO = oO; } @Override protected double objectiveFunction(final double[] x) throws InterruptedException { return this.m_oO.objectiveFunction(x); } @Override protected double[] evaluateGradient(final double[] x) throws InterruptedException { return this.m_oO.evaluateGradient(x); } @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } private class OptEngCG extends ConjugateGradientOptimization { OptObject m_oO = null; private OptEngCG(final OptObject oO) { this.m_oO = oO; } @Override protected double objectiveFunction(final double[] x) throws InterruptedException { return this.m_oO.objectiveFunction(x); } @Override protected double[] evaluateGradient(final double[] x) throws InterruptedException { return this.m_oO.evaluateGradient(x); } @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } private class OptObject { /** Weights of instances in the data */ private double[] weights; /** Class labels of instances */ private int[] cls; /** * Set the weights of instances * * @param w * the weights to be set */ public void setWeights(final double[] w) { this.weights = w; } /** * Set the class labels of instances * * @param c * the class labels to be set */ public void setClassLabels(final int[] c) { this.cls = c; } /** * Computes the logarithm of x plus y given the logarithms of x and y. * * This is based on Tobias P. Mann's description in "Numerically Stable Hidden Markov Implementation" (2006). */ protected double logOfSum(final double logOfX, final double logOfY) { // Check for cases where log of zero is present if (Double.isNaN(logOfX)) { return logOfY; } if (Double.isNaN(logOfY)) { return logOfX; } // Otherwise return proper result, taken care of overflows if (logOfX > logOfY) { return logOfX + Math.log(1 + Math.exp(logOfY - logOfX)); } else { return logOfY + Math.log(1 + Math.exp(logOfX - logOfY)); } } /** * Evaluate objective function * * @param x * the current values of variables * @return the value of the objective function * @throws InterruptedException */ protected double objectiveFunction(final double[] x) throws InterruptedException { double nll = 0; // -LogLikelihood int dim = Logistic.this.m_NumPredictors + 1; // Number of variables per class for (int i = 0; i < this.cls.length; i++) { // ith instance if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } double[] exp = new double[Logistic.this.m_NumClasses - 1]; int index; for (int offset = 0; offset < Logistic.this.m_NumClasses - 1; offset++) { index = offset * dim; for (int j = 0; j < dim; j++) { exp[offset] += Logistic.this.m_Data[i][j] * x[index + j]; } } double num = 0; if (this.cls[i] < Logistic.this.m_NumClasses - 1) { // Class of this instance num = exp[this.cls[i]]; } double denom = 0; for (int offset = 0; offset < Logistic.this.m_NumClasses - 1; offset++) { denom = this.logOfSum(denom, exp[offset]); } nll -= this.weights[i] * (num - denom); // Weighted NLL } // Ridge: note that intercepts NOT included for (int offset = 0; offset < Logistic.this.m_NumClasses - 1; offset++) { for (int r = 1; r < dim; r++) { nll += Logistic.this.m_Ridge * x[offset * dim + r] * x[offset * dim + r]; } } return nll; } /** * Evaluate Jacobian vector * * @param x * the current values of variables * @return the gradient vector * @throws InterruptedException */ protected double[] evaluateGradient(final double[] x) throws InterruptedException { double[] grad = new double[x.length]; int dim = Logistic.this.m_NumPredictors + 1; // Number of variables per class for (int i = 0; i < this.cls.length; i++) { // ith instance double[] num = new double[Logistic.this.m_NumClasses - 1]; // numerator of // [-log(1+sum(exp))]' int index; for (int offset = 0; offset < Logistic.this.m_NumClasses - 1; offset++) { // Which if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } // part of x double exp = 0.0; index = offset * dim; for (int j = 0; j < dim; j++) { exp += Logistic.this.m_Data[i][j] * x[index + j]; } num[offset] = exp; } double max = num[Utils.maxIndex(num)]; double denom = Math.exp(-max); // Denominator of [-log(1+sum(exp))]' for (int offset = 0; offset < Logistic.this.m_NumClasses - 1; offset++) { num[offset] = Math.exp(num[offset] - max); denom += num[offset]; } Utils.normalize(num, denom); // Update denominator of the gradient of -log(Posterior) double firstTerm; for (int offset = 0; offset < Logistic.this.m_NumClasses - 1; offset++) { // Which // part of x index = offset * dim; firstTerm = this.weights[i] * num[offset]; for (int q = 0; q < dim; q++) { grad[index + q] += firstTerm * Logistic.this.m_Data[i][q]; } } if (this.cls[i] != Logistic.this.m_NumClasses - 1) { // Not the last class for (int p = 0; p < dim; p++) { grad[this.cls[i] * dim + p] -= this.weights[i] * Logistic.this.m_Data[i][p]; } } } // Ridge: note that intercepts NOT included for (int offset = 0; offset < Logistic.this.m_NumClasses - 1; offset++) { for (int r = 1; r < dim; r++) { grad[offset * dim + r] += 2 * Logistic.this.m_Ridge * x[offset * dim + r]; } } return grad; } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds the classifier * * @param train * the training data to be used for generating the boosted classifier. * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); // Replace missing values this.m_ReplaceMissingValues = new ReplaceMissingValues(); this.m_ReplaceMissingValues.setInputFormat(train); train = Filter.useFilter(train, this.m_ReplaceMissingValues); // Remove useless attributes this.m_AttFilter = new RemoveUseless(); this.m_AttFilter.setInputFormat(train); train = Filter.useFilter(train, this.m_AttFilter); // Transform attributes this.m_NominalToBinary = new NominalToBinary(); this.m_NominalToBinary.setInputFormat(train); train = Filter.useFilter(train, this.m_NominalToBinary); // Save the structure for printing the model this.m_structure = new Instances(train, 0); // Extract data this.m_ClassIndex = train.classIndex(); this.m_NumClasses = train.numClasses(); int nK = this.m_NumClasses - 1; // Only K-1 class labels needed int nR = this.m_NumPredictors = train.numAttributes() - 1; int nC = train.numInstances(); this.m_Data = new double[nC][nR + 1]; // Data values int[] Y = new int[nC]; // Class labels double[] xMean = new double[nR + 1]; // Attribute means double[] xSD = new double[nR + 1]; // Attribute stddev's double[] sY = new double[nK + 1]; // Number of classes double[] weights = new double[nC]; // Weights of instances double totWeights = 0; // Total weights of the instances this.m_Par = new double[nR + 1][nK]; // Optimized parameter values if (this.m_Debug) { System.out.println("Extracting data..."); } for (int i = 0; i < nC; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // initialize X[][] Instance current = train.instance(i); Y[i] = (int) current.classValue(); // Class value starts from 0 weights[i] = current.weight(); // Dealing with weights totWeights += weights[i]; this.m_Data[i][0] = 1; int j = 1; for (int k = 0; k <= nR; k++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (k != this.m_ClassIndex) { double x = current.value(k); this.m_Data[i][j] = x; xMean[j] += weights[i] * x; xSD[j] += weights[i] * x * x; j++; } } // Class count sY[Y[i]]++; } if ((totWeights <= 1) && (nC > 1)) { throw new Exception("Sum of weights of instances less than 1, please reweight!"); } xMean[0] = 0; xSD[0] = 1; for (int j = 1; j <= nR; j++) { xMean[j] = xMean[j] / totWeights; if (totWeights > 1) { xSD[j] = Math.sqrt(Math.abs(xSD[j] - totWeights * xMean[j] * xMean[j]) / (totWeights - 1)); } else { xSD[j] = 0; } } if (this.m_Debug) { // Output stats about input data System.out.println("Descriptives..."); for (int m = 0; m <= nK; m++) { System.out.println(sY[m] + " cases have class " + m); } System.out.println("\n Variable Avg SD "); for (int j = 1; j <= nR; j++) { System.out.println(Utils.doubleToString(j, 8, 4) + Utils.doubleToString(xMean[j], 10, 4) + Utils.doubleToString(xSD[j], 10, 4)); } } // Normalise input data for (int i = 0; i < nC; i++) { for (int j = 0; j <= nR; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (xSD[j] != 0) { this.m_Data[i][j] = (this.m_Data[i][j] - xMean[j]) / xSD[j]; } } } if (this.m_Debug) { System.out.println("\nIteration History..."); } double x[] = new double[(nR + 1) * nK]; double[][] b = new double[2][x.length]; // Boundary constraints, N/A here // Initialize for (int p = 0; p < nK; p++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } int offset = p * (nR + 1); x[offset] = Math.log(sY[p] + 1.0) - Math.log(sY[nK] + 1.0); // Null model b[0][offset] = Double.NaN; b[1][offset] = Double.NaN; for (int q = 1; q <= nR; q++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } x[offset + q] = 0.0; b[0][offset + q] = Double.NaN; b[1][offset + q] = Double.NaN; } } OptObject oO = new OptObject(); oO.setWeights(weights); oO.setClassLabels(Y); Optimization opt = null; if (this.m_useConjugateGradientDescent) { opt = new OptEngCG(oO); } else { opt = new OptEng(oO); } opt.setDebug(this.m_Debug); if (this.m_MaxIts == -1) { // Search until convergence x = opt.findArgmin(x, b); while (x == null) { x = opt.getVarbValues(); if (this.m_Debug) { System.out.println("First set of iterations finished, not enough!"); } x = opt.findArgmin(x, b); } if (this.m_Debug) { System.out.println(" -------------<Converged>--------------"); } } else { opt.setMaxIteration(this.m_MaxIts); x = opt.findArgmin(x, b); if (x == null) { x = opt.getVarbValues(); } } this.m_LL = -opt.getMinFunction(); // Log-likelihood // Don't need data matrix anymore this.m_Data = null; // Convert coefficients back to non-normalized attribute units for (int i = 0; i < nK; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_Par[0][i] = x[i * (nR + 1)]; for (int j = 1; j <= nR; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_Par[j][i] = x[i * (nR + 1) + j]; if (xSD[j] != 0) { this.m_Par[j][i] /= xSD[j]; this.m_Par[0][i] -= this.m_Par[j][i] * xMean[j]; } } } } /** * Computes the distribution for a given instance * * @param instance * the instance for which distribution is computed * @return the distribution * @throws Exception * if the distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { this.m_ReplaceMissingValues.input(instance); instance = this.m_ReplaceMissingValues.output(); this.m_AttFilter.input(instance); instance = this.m_AttFilter.output(); this.m_NominalToBinary.input(instance); instance = this.m_NominalToBinary.output(); // Extract the predictor columns into an array double[] instDat = new double[this.m_NumPredictors + 1]; int j = 1; instDat[0] = 1; for (int k = 0; k <= this.m_NumPredictors; k++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (k != this.m_ClassIndex) { instDat[j++] = instance.value(k); } } double[] distribution = this.evaluateProbability(instDat); return distribution; } /** * Compute the posterior distribution using optimized parameter values and the testing instance. * * @param data * the testing instance * @return the posterior probability distribution * @throws InterruptedException */ private double[] evaluateProbability(final double[] data) throws InterruptedException { double[] prob = new double[this.m_NumClasses], v = new double[this.m_NumClasses]; // Log-posterior before normalizing for (int j = 0; j < this.m_NumClasses - 1; j++) { for (int k = 0; k <= this.m_NumPredictors; k++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } v[j] += this.m_Par[k][j] * data[k]; } } v[this.m_NumClasses - 1] = 0; // Do so to avoid scaling problems for (int m = 0; m < this.m_NumClasses; m++) { double sum = 0; for (int n = 0; n < this.m_NumClasses - 1; n++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } sum += Math.exp(v[n] - v[m]); } prob[m] = 1 / (sum + Math.exp(-v[m])); } return prob; } /** * Returns the coefficients for this logistic model. The first dimension indexes the attributes, and the second the classes. * * @return the coefficients for this logistic model */ public double[][] coefficients() { return this.m_Par; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ @Override public String toString() { StringBuffer temp = new StringBuffer(); String result = ""; temp.append("Logistic Regression with ridge parameter of " + this.m_Ridge); if (this.m_Par == null) { return result + ": No model built yet."; } // find longest attribute name int attLength = 0; for (int i = 0; i < this.m_structure.numAttributes(); i++) { if (i != this.m_structure.classIndex() && this.m_structure.attribute(i).name().length() > attLength) { attLength = this.m_structure.attribute(i).name().length(); } } if ("Intercept".length() > attLength) { attLength = "Intercept".length(); } if ("Variable".length() > attLength) { attLength = "Variable".length(); } attLength += 2; int colWidth = 0; // check length of class names for (int i = 0; i < this.m_structure.classAttribute().numValues() - 1; i++) { if (this.m_structure.classAttribute().value(i).length() > colWidth) { colWidth = this.m_structure.classAttribute().value(i).length(); } } // check against coefficients and odds ratios for (int j = 1; j <= this.m_NumPredictors; j++) { for (int k = 0; k < this.m_NumClasses - 1; k++) { if (Utils.doubleToString(this.m_Par[j][k], 8 + this.getNumDecimalPlaces(), this.getNumDecimalPlaces()).trim().length() > colWidth) { colWidth = Utils.doubleToString(this.m_Par[j][k], 8 + this.getNumDecimalPlaces(), this.getNumDecimalPlaces()).trim().length(); } double ORc = Math.exp(this.m_Par[j][k]); String t = " " + ((ORc > 1e10) ? "" + ORc : Utils.doubleToString(ORc, 8 + this.getNumDecimalPlaces(), this.getNumDecimalPlaces())); if (t.trim().length() > colWidth) { colWidth = t.trim().length(); } } } if ("Class".length() > colWidth) { colWidth = "Class".length(); } colWidth += 2; temp.append("\nCoefficients...\n"); temp.append(Utils.padLeft(" ", attLength) + Utils.padLeft("Class", colWidth) + "\n"); temp.append(Utils.padRight("Variable", attLength)); for (int i = 0; i < this.m_NumClasses - 1; i++) { String className = this.m_structure.classAttribute().value(i); temp.append(Utils.padLeft(className, colWidth)); } temp.append("\n"); int separatorL = attLength + ((this.m_NumClasses - 1) * colWidth); for (int i = 0; i < separatorL; i++) { temp.append("="); } temp.append("\n"); int j = 1; for (int i = 0; i < this.m_structure.numAttributes(); i++) { if (i != this.m_structure.classIndex()) { temp.append(Utils.padRight(this.m_structure.attribute(i).name(), attLength)); for (int k = 0; k < this.m_NumClasses - 1; k++) { temp.append(Utils.padLeft(Utils.doubleToString(this.m_Par[j][k], 8 + this.getNumDecimalPlaces(), this.getNumDecimalPlaces()).trim(), colWidth)); } temp.append("\n"); j++; } } temp.append(Utils.padRight("Intercept", attLength)); for (int k = 0; k < this.m_NumClasses - 1; k++) { temp.append(Utils.padLeft(Utils.doubleToString(this.m_Par[0][k], 6 + this.getNumDecimalPlaces(), this.getNumDecimalPlaces()).trim(), colWidth)); } temp.append("\n"); temp.append("\n\nOdds Ratios...\n"); temp.append(Utils.padLeft(" ", attLength) + Utils.padLeft("Class", colWidth) + "\n"); temp.append(Utils.padRight("Variable", attLength)); for (int i = 0; i < this.m_NumClasses - 1; i++) { String className = this.m_structure.classAttribute().value(i); temp.append(Utils.padLeft(className, colWidth)); } temp.append("\n"); for (int i = 0; i < separatorL; i++) { temp.append("="); } temp.append("\n"); j = 1; for (int i = 0; i < this.m_structure.numAttributes(); i++) { if (i != this.m_structure.classIndex()) { temp.append(Utils.padRight(this.m_structure.attribute(i).name(), attLength)); for (int k = 0; k < this.m_NumClasses - 1; k++) { double ORc = Math.exp(this.m_Par[j][k]); String ORs = " " + ((ORc > 1e10) ? "" + ORc : Utils.doubleToString(ORc, 8 + this.getNumDecimalPlaces(), this.getNumDecimalPlaces())); temp.append(Utils.padLeft(ORs.trim(), colWidth)); } temp.append("\n"); j++; } } return temp.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } protected int m_numModels = 0; /** * Aggregate an object with this one * * @param toAggregate * the object to aggregate * @return the result of aggregation * @throws Exception * if the supplied object can't be aggregated for some reason */ @Override public Logistic aggregate(final Logistic toAggregate) throws Exception { if (this.m_numModels == Integer.MIN_VALUE) { throw new Exception("Can't aggregate further - model has already been " + "aggregated and finalized"); } if (this.m_Par == null) { throw new Exception("No model built yet, can't aggregate"); } if (!this.m_structure.equalHeaders(toAggregate.m_structure)) { throw new Exception("Can't aggregate - data headers dont match: " + this.m_structure.equalHeadersMsg(toAggregate.m_structure)); } for (int i = 0; i < this.m_Par.length; i++) { for (int j = 0; j < this.m_Par[i].length; j++) { this.m_Par[i][j] += toAggregate.m_Par[i][j]; } } this.m_numModels++; return this; } /** * Call to complete the aggregation process. Allows implementers to do any final processing based on how many objects were aggregated. * * @throws Exception * if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { if (this.m_numModels == Integer.MIN_VALUE) { throw new Exception("Aggregation has already been finalized"); } if (this.m_numModels == 0) { throw new Exception("Unable to finalize aggregation - " + "haven't seen any models to aggregate"); } for (int i = 0; i < this.m_Par.length; i++) { for (int j = 0; j < this.m_Par[i].length; j++) { this.m_Par[i][j] /= (this.m_numModels + 1); } } // aggregation complete this.m_numModels = Integer.MIN_VALUE; } /** * Main method for testing this class. * * @param argv * should contain the command line arguments to the scheme (see Evaluation) */ public static void main(final String[] argv) { runClassifier(new Logistic(), argv); } /** * Produce a PMML representation of this logistic model * * @param train * the training data that was used to construct the model * * @return a string containing the PMML representation */ @Override public String toPMML(final Instances train) { return LogisticProducerHelper.toPMML(train, this.m_structure, this.m_Par, this.m_NumClasses); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/MultilayerPerceptron.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultilayerPerceptron.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Component; import java.awt.Dimension; import java.awt.FontMetrics; import java.awt.Graphics; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.awt.event.WindowAdapter; import java.awt.event.WindowEvent; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.StringTokenizer; import java.util.Vector; import javax.swing.BorderFactory; import javax.swing.Box; import javax.swing.BoxLayout; import javax.swing.JButton; import javax.swing.JFrame; import javax.swing.JLabel; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JTextField; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.IterativeClassifier; import weka.classifiers.functions.neural.LinearUnit; import weka.classifiers.functions.neural.NeuralConnection; import weka.classifiers.functions.neural.NeuralNode; import weka.classifiers.functions.neural.SigmoidUnit; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Randomizable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; /** * <!-- globalinfo-start --> A Classifier that uses backpropagation to classify instances.<br/> * This network can be built by hand, created by an algorithm or both. The network can also be * monitored and modified during training time. The nodes in this network are all sigmoid (except * for when the class is numeric in which case the the output nodes become unthresholded linear * units). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;learning rate&gt; * Learning Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.3). * </pre> * * <pre> * -M &lt;momentum&gt; * Momentum Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.2). * </pre> * * <pre> * -N &lt;number of epochs&gt; * Number of epochs to train through. * (Default = 500). * </pre> * * <pre> * -V &lt;percentage size of validation set&gt; * Percentage size of validation set to use to terminate * training (if this is non zero it can pre-empt num of epochs. * (Value should be between 0 - 100, Default = 0). * </pre> * * <pre> * -S &lt;seed&gt; * The value used to seed the random number generator * (Value should be &gt;= 0 and and a long, Default = 0). * </pre> * * <pre> * -E &lt;threshold for number of consequetive errors&gt; * The consequetive number of errors allowed for validation * testing before the netwrok terminates. * (Value should be &gt; 0, Default = 20). * </pre> * * <pre> * -G * GUI will be opened. * (Use this to bring up a GUI). * </pre> * * <pre> * -A * Autocreation of the network connections will NOT be done. * (This will be ignored if -G is NOT set) * </pre> * * <pre> * -B * A NominalToBinary filter will NOT automatically be used. * (Set this to not use a NominalToBinary filter). * </pre> * * <pre> * -H &lt;comma seperated numbers for nodes on each layer&gt; * The hidden layers to be created for the network. * (Value should be a list of comma separated Natural * numbers or the letters 'a' = (attribs + classes) / 2, * 'i' = attribs, 'o' = classes, 't' = attribs .+ classes) * for wildcard values, Default = a). * </pre> * * <pre> * -C * Normalizing a numeric class will NOT be done. * (Set this to not normalize the class if it's numeric). * </pre> * * <pre> * -I * Normalizing the attributes will NOT be done. * (Set this to not normalize the attributes). * </pre> * * <pre> * -R * Reseting the network will NOT be allowed. * (Set this to not allow the network to reset). * </pre> * * <pre> * -D * Learning rate decay will occur. * (Set this to cause the learning rate to decay). * </pre> * * <!-- options-end --> * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision$ */ public class MultilayerPerceptron extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, Randomizable, IterativeClassifier { /** for serialization */ private static final long serialVersionUID = -5990607817048210779L; /** * Main method for testing this class. * * @param argv * should contain command line options (see setOptions) */ public static void main(final String[] argv) { runClassifier(new MultilayerPerceptron(), argv); } /** * This inner class is used to connect the nodes in the network up to the data that they are * classifying, Note that objects of this class are only suitable to go on the attribute side or * class side of the network and not both. */ protected class NeuralEnd extends NeuralConnection { /** for serialization */ static final long serialVersionUID = 7305185603191183338L; /** * the value that represents the instance value this node represents. For an input it is the * attribute number, for an output, if nominal it is the class value. */ private int m_link; /** True if node is an input, False if it's an output. */ private boolean m_input; /** * Constructor */ public NeuralEnd(final String id) { super(id); this.m_link = 0; this.m_input = true; } /** * Call this function to determine if the point at x,y is on the unit. * * @param g * The graphics context for font size info. * @param x * The x coord. * @param y * The y coord. * @param w * The width of the display. * @param h * The height of the display. * @return True if the point is on the unit, false otherwise. */ @Override public boolean onUnit(final Graphics g, final int x, final int y, final int w, final int h) { FontMetrics fm = g.getFontMetrics(); int l = (int) (this.m_x * w) - fm.stringWidth(this.m_id) / 2; int t = (int) (this.m_y * h) - fm.getHeight() / 2; if (x < l || x > l + fm.stringWidth(this.m_id) + 4 || y < t || y > t + fm.getHeight() + fm.getDescent() + 4) { return false; } return true; } /** * This will draw the node id to the graphics context. * * @param g * The graphics context. * @param w * The width of the drawing area. * @param h * The height of the drawing area. */ @Override public void drawNode(final Graphics g, final int w, final int h) { if ((this.m_type & PURE_INPUT) == PURE_INPUT) { g.setColor(Color.green); } else { g.setColor(Color.orange); } FontMetrics fm = g.getFontMetrics(); int l = (int) (this.m_x * w) - fm.stringWidth(this.m_id) / 2; int t = (int) (this.m_y * h) - fm.getHeight() / 2; g.fill3DRect(l, t, fm.stringWidth(this.m_id) + 4, fm.getHeight() + fm.getDescent() + 4, true); g.setColor(Color.black); g.drawString(this.m_id, l + 2, t + fm.getHeight() + 2); } /** * Call this function to draw the node highlighted. * * @param g * The graphics context. * @param w * The width of the drawing area. * @param h * The height of the drawing area. */ @Override public void drawHighlight(final Graphics g, final int w, final int h) { g.setColor(Color.black); FontMetrics fm = g.getFontMetrics(); int l = (int) (this.m_x * w) - fm.stringWidth(this.m_id) / 2; int t = (int) (this.m_y * h) - fm.getHeight() / 2; g.fillRect(l - 2, t - 2, fm.stringWidth(this.m_id) + 8, fm.getHeight() + fm.getDescent() + 8); this.drawNode(g, w, h); } /** * Call this to get the output value of this unit. * * @param calculate * True if the value should be calculated if it hasn't been already. * @return The output value, or NaN, if the value has not been calculated. */ @Override public double outputValue(final boolean calculate) { if (Double.isNaN(this.m_unitValue) && calculate) { if (this.m_input) { if (MultilayerPerceptron.this.m_currentInstance.isMissing(this.m_link)) { this.m_unitValue = 0; } else { this.m_unitValue = MultilayerPerceptron.this.m_currentInstance.value(this.m_link); } } else { // node is an output. this.m_unitValue = 0; for (int noa = 0; noa < this.m_numInputs; noa++) { this.m_unitValue += this.m_inputList[noa].outputValue(true); } if (MultilayerPerceptron.this.m_numeric && MultilayerPerceptron.this.m_normalizeClass) { // then scale the value; // this scales linearly from between -1 and 1 this.m_unitValue = this.m_unitValue * MultilayerPerceptron.this.m_attributeRanges[MultilayerPerceptron.this.m_instances.classIndex()] + MultilayerPerceptron.this.m_attributeBases[MultilayerPerceptron.this.m_instances.classIndex()]; } } } return this.m_unitValue; } /** * Call this to get the error value of this unit, which in this case is the difference between the * predicted class, and the actual class. * * @param calculate * True if the value should be calculated if it hasn't been already. * @return The error value, or NaN, if the value has not been calculated. */ @Override public double errorValue(final boolean calculate) { if (!Double.isNaN(this.m_unitValue) && Double.isNaN(this.m_unitError) && calculate) { if (this.m_input) { this.m_unitError = 0; for (int noa = 0; noa < this.m_numOutputs; noa++) { this.m_unitError += this.m_outputList[noa].errorValue(true); } } else { if (MultilayerPerceptron.this.m_currentInstance.classIsMissing()) { this.m_unitError = .1; } else if (MultilayerPerceptron.this.m_instances.classAttribute().isNominal()) { if (MultilayerPerceptron.this.m_currentInstance.classValue() == this.m_link) { this.m_unitError = 1 - this.m_unitValue; } else { this.m_unitError = 0 - this.m_unitValue; } } else if (MultilayerPerceptron.this.m_numeric) { if (MultilayerPerceptron.this.m_normalizeClass) { if (MultilayerPerceptron.this.m_attributeRanges[MultilayerPerceptron.this.m_instances.classIndex()] == 0) { this.m_unitError = 0; } else { this.m_unitError = (MultilayerPerceptron.this.m_currentInstance.classValue() - this.m_unitValue) / MultilayerPerceptron.this.m_attributeRanges[MultilayerPerceptron.this.m_instances.classIndex()]; // m_numericRange; } } else { this.m_unitError = MultilayerPerceptron.this.m_currentInstance.classValue() - this.m_unitValue; } } } } return this.m_unitError; } /** * Call this to reset the value and error for this unit, ready for the next run. This will also call * the reset function of all units that are connected as inputs to this one. This is also the time * that the update for the listeners will be performed. */ @Override public void reset() { if (!Double.isNaN(this.m_unitValue) || !Double.isNaN(this.m_unitError)) { this.m_unitValue = Double.NaN; this.m_unitError = Double.NaN; this.m_weightsUpdated = false; for (int noa = 0; noa < this.m_numInputs; noa++) { this.m_inputList[noa].reset(); } } } /** * Call this to have the connection save the current weights. */ @Override public void saveWeights() { for (int i = 0; i < this.m_numInputs; i++) { this.m_inputList[i].saveWeights(); } } /** * Call this to have the connection restore from the saved weights. */ @Override public void restoreWeights() { for (int i = 0; i < this.m_numInputs; i++) { this.m_inputList[i].restoreWeights(); } } /** * Call this function to set What this end unit represents. * * @param input * True if this unit is used for entering an attribute, False if it's used for determining * a class value. * @param val * The attribute number or class type that this unit represents. (for nominal attributes). */ public void setLink(final boolean input, final int val) throws Exception { this.m_input = input; if (input) { this.m_type = PURE_INPUT; } else { this.m_type = PURE_OUTPUT; } if (val < 0 || (input && val > MultilayerPerceptron.this.m_instances.numAttributes()) || (!input && MultilayerPerceptron.this.m_instances.classAttribute().isNominal() && val > MultilayerPerceptron.this.m_instances.classAttribute().numValues())) { this.m_link = 0; } else { this.m_link = val; } } /** * @return link for this node. */ public int getLink() { return this.m_link; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * Inner class used to draw the nodes onto.(uses the node lists!!) This will also handle the user * input. */ private class NodePanel extends JPanel implements RevisionHandler { /** for serialization */ static final long serialVersionUID = -3067621833388149984L; /** * The constructor. */ public NodePanel() { this.addMouseListener(new MouseAdapter() { @Override public void mousePressed(final MouseEvent e) { if (!MultilayerPerceptron.this.m_stopped) { return; } if ((e.getModifiers() & MouseEvent.BUTTON1_MASK) == MouseEvent.BUTTON1_MASK && !e.isAltDown()) { Graphics g = NodePanel.this.getGraphics(); int x = e.getX(); int y = e.getY(); int w = NodePanel.this.getWidth(); int h = NodePanel.this.getHeight(); ArrayList<NeuralConnection> tmp = new ArrayList<>(4); for (int noa = 0; noa < MultilayerPerceptron.this.m_numAttributes; noa++) { if (MultilayerPerceptron.this.m_inputs[noa].onUnit(g, x, y, w, h)) { tmp.add(MultilayerPerceptron.this.m_inputs[noa]); NodePanel.this.selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, true); return; } } for (int noa = 0; noa < MultilayerPerceptron.this.m_numClasses; noa++) { if (MultilayerPerceptron.this.m_outputs[noa].onUnit(g, x, y, w, h)) { tmp.add(MultilayerPerceptron.this.m_outputs[noa]); NodePanel.this.selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, true); return; } } for (NeuralConnection m_neuralNode : MultilayerPerceptron.this.m_neuralNodes) { if (m_neuralNode.onUnit(g, x, y, w, h)) { tmp.add(m_neuralNode); NodePanel.this.selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, true); return; } } NeuralNode temp = new NeuralNode(String.valueOf(MultilayerPerceptron.this.m_nextId), MultilayerPerceptron.this.m_random, MultilayerPerceptron.this.m_sigmoidUnit); MultilayerPerceptron.this.m_nextId++; temp.setX((double) e.getX() / w); temp.setY((double) e.getY() / h); tmp.add(temp); MultilayerPerceptron.this.addNode(temp); NodePanel.this.selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, true); } else { // then right click Graphics g = NodePanel.this.getGraphics(); int x = e.getX(); int y = e.getY(); int w = NodePanel.this.getWidth(); int h = NodePanel.this.getHeight(); ArrayList<NeuralConnection> tmp = new ArrayList<>(4); for (int noa = 0; noa < MultilayerPerceptron.this.m_numAttributes; noa++) { if (MultilayerPerceptron.this.m_inputs[noa].onUnit(g, x, y, w, h)) { tmp.add(MultilayerPerceptron.this.m_inputs[noa]); NodePanel.this.selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, false); return; } } for (int noa = 0; noa < MultilayerPerceptron.this.m_numClasses; noa++) { if (MultilayerPerceptron.this.m_outputs[noa].onUnit(g, x, y, w, h)) { tmp.add(MultilayerPerceptron.this.m_outputs[noa]); NodePanel.this.selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, false); return; } } for (NeuralConnection m_neuralNode : MultilayerPerceptron.this.m_neuralNodes) { if (m_neuralNode.onUnit(g, x, y, w, h)) { tmp.add(m_neuralNode); NodePanel.this.selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, false); return; } } NodePanel.this.selection(null, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK, false); } } }); } /** * This function gets called when the user has clicked something It will amend the current selection * or connect the current selection to the new selection. Or if nothing was selected and the right * button was used it will delete the node. * * @param v * The units that were selected. * @param ctrl * True if ctrl was held down. * @param left * True if it was the left mouse button. */ private void selection(final ArrayList<NeuralConnection> v, final boolean ctrl, final boolean left) { if (v == null) { // then unselect all. MultilayerPerceptron.this.m_selected.clear(); this.repaint(); return; } // then exclusive or the new selection with the current one. if ((ctrl || MultilayerPerceptron.this.m_selected.size() == 0) && left) { boolean removed = false; for (int noa = 0; noa < v.size(); noa++) { removed = false; for (int nob = 0; nob < MultilayerPerceptron.this.m_selected.size(); nob++) { if (v.get(noa) == MultilayerPerceptron.this.m_selected.get(nob)) { // then remove that element MultilayerPerceptron.this.m_selected.remove(nob); removed = true; break; } } if (!removed) { MultilayerPerceptron.this.m_selected.add(v.get(noa)); } } this.repaint(); return; } if (left) { // then connect the current selection to the new one. for (int noa = 0; noa < MultilayerPerceptron.this.m_selected.size(); noa++) { for (int nob = 0; nob < v.size(); nob++) { NeuralConnection.connect(MultilayerPerceptron.this.m_selected.get(noa), v.get(nob)); } } } else if (MultilayerPerceptron.this.m_selected.size() > 0) { // then disconnect the current selection from the new one. for (int noa = 0; noa < MultilayerPerceptron.this.m_selected.size(); noa++) { for (int nob = 0; nob < v.size(); nob++) { NeuralConnection.disconnect(MultilayerPerceptron.this.m_selected.get(noa), v.get(nob)); NeuralConnection.disconnect(v.get(nob), MultilayerPerceptron.this.m_selected.get(noa)); } } } else { // then remove the selected node. (it was right clicked while // no other units were selected for (int noa = 0; noa < v.size(); noa++) { v.get(noa).removeAllInputs(); v.get(noa).removeAllOutputs(); MultilayerPerceptron.this.removeNode(v.get(noa)); } } this.repaint(); } /** * This will paint the nodes ontot the panel. * * @param g * The graphics context. */ @Override public void paintComponent(final Graphics g) { super.paintComponent(g); int x = this.getWidth(); int y = this.getHeight(); if (25 * MultilayerPerceptron.this.m_numAttributes > 25 * MultilayerPerceptron.this.m_numClasses && 25 * MultilayerPerceptron.this.m_numAttributes > y) { this.setSize(x, 25 * MultilayerPerceptron.this.m_numAttributes); } else if (25 * MultilayerPerceptron.this.m_numClasses > y) { this.setSize(x, 25 * MultilayerPerceptron.this.m_numClasses); } else { this.setSize(x, y); } y = this.getHeight(); for (int noa = 0; noa < MultilayerPerceptron.this.m_numAttributes; noa++) { MultilayerPerceptron.this.m_inputs[noa].drawInputLines(g, x, y); } for (int noa = 0; noa < MultilayerPerceptron.this.m_numClasses; noa++) { MultilayerPerceptron.this.m_outputs[noa].drawInputLines(g, x, y); MultilayerPerceptron.this.m_outputs[noa].drawOutputLines(g, x, y); } for (NeuralConnection m_neuralNode : MultilayerPerceptron.this.m_neuralNodes) { m_neuralNode.drawInputLines(g, x, y); } for (int noa = 0; noa < MultilayerPerceptron.this.m_numAttributes; noa++) { MultilayerPerceptron.this.m_inputs[noa].drawNode(g, x, y); } for (int noa = 0; noa < MultilayerPerceptron.this.m_numClasses; noa++) { MultilayerPerceptron.this.m_outputs[noa].drawNode(g, x, y); } for (NeuralConnection m_neuralNode : MultilayerPerceptron.this.m_neuralNodes) { m_neuralNode.drawNode(g, x, y); } for (int noa = 0; noa < MultilayerPerceptron.this.m_selected.size(); noa++) { MultilayerPerceptron.this.m_selected.get(noa).drawHighlight(g, x, y); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * This provides the basic controls for working with the neuralnetwork * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision$ */ class ControlPanel extends JPanel implements RevisionHandler { /** for serialization */ static final long serialVersionUID = 7393543302294142271L; /** The start stop button. */ public JButton m_startStop; /** The button to accept the network (even if it hasn't done all epochs. */ public JButton m_acceptButton; /** A label to state the number of epochs processed so far. */ public JPanel m_epochsLabel; /** A label to state the total number of epochs to be processed. */ public JLabel m_totalEpochsLabel; /** A text field to allow the changing of the total number of epochs. */ public JTextField m_changeEpochs; /** A label to state the learning rate. */ public JLabel m_learningLabel; /** A label to state the momentum. */ public JLabel m_momentumLabel; /** A text field to allow the changing of the learning rate. */ public JTextField m_changeLearning; /** A text field to allow the changing of the momentum. */ public JTextField m_changeMomentum; /** * A label to state roughly the accuracy of the network.(because the accuracy is calculated per * epoch, but the network is changing throughout each epoch train). */ public JPanel m_errorLabel; /** The constructor. */ public ControlPanel() { this.setBorder(BorderFactory.createTitledBorder("Controls")); this.m_totalEpochsLabel = new JLabel("Num Of Epochs "); this.m_epochsLabel = new JPanel() { /** for serialization */ private static final long serialVersionUID = 2562773937093221399L; @Override public void paintComponent(final Graphics g) { super.paintComponent(g); g.setColor(MultilayerPerceptron.this.m_controlPanel.m_totalEpochsLabel.getForeground()); g.drawString("Epoch " + MultilayerPerceptron.this.m_epoch, 0, 10); } }; this.m_epochsLabel.setFont(this.m_totalEpochsLabel.getFont()); this.m_changeEpochs = new JTextField(); this.m_changeEpochs.setText("" + MultilayerPerceptron.this.m_numEpochs); this.m_errorLabel = new JPanel() { /** for serialization */ private static final long serialVersionUID = 4390239056336679189L; @Override public void paintComponent(final Graphics g) { super.paintComponent(g); g.setColor(MultilayerPerceptron.this.m_controlPanel.m_totalEpochsLabel.getForeground()); if (MultilayerPerceptron.this.m_valSize == 0) { g.drawString("Error per Epoch = " + Utils.doubleToString(MultilayerPerceptron.this.m_error, 7), 0, 10); } else { g.drawString("Validation Error per Epoch = " + Utils.doubleToString(MultilayerPerceptron.this.m_error, 7), 0, 10); } } }; this.m_errorLabel.setFont(this.m_epochsLabel.getFont()); this.m_learningLabel = new JLabel("Learning Rate = "); this.m_momentumLabel = new JLabel("Momentum = "); this.m_changeLearning = new JTextField(); this.m_changeMomentum = new JTextField(); this.m_changeLearning.setText("" + MultilayerPerceptron.this.m_learningRate); this.m_changeMomentum.setText("" + MultilayerPerceptron.this.m_momentum); this.setLayout(new BorderLayout(15, 10)); MultilayerPerceptron.this.m_stopIt = true; MultilayerPerceptron.this.m_accepted = false; this.m_startStop = new JButton("Start"); this.m_startStop.setActionCommand("Start"); this.m_acceptButton = new JButton("Accept"); this.m_acceptButton.setActionCommand("Accept"); JPanel buttons = new JPanel(); buttons.setLayout(new BoxLayout(buttons, BoxLayout.Y_AXIS)); buttons.add(this.m_startStop); buttons.add(this.m_acceptButton); this.add(buttons, BorderLayout.WEST); JPanel data = new JPanel(); data.setLayout(new BoxLayout(data, BoxLayout.Y_AXIS)); Box ab = new Box(BoxLayout.X_AXIS); ab.add(this.m_epochsLabel); data.add(ab); ab = new Box(BoxLayout.X_AXIS); Component b = Box.createGlue(); ab.add(this.m_totalEpochsLabel); ab.add(this.m_changeEpochs); this.m_changeEpochs.setMaximumSize(new Dimension(200, 20)); ab.add(b); data.add(ab); ab = new Box(BoxLayout.X_AXIS); ab.add(this.m_errorLabel); data.add(ab); this.add(data, BorderLayout.CENTER); data = new JPanel(); data.setLayout(new BoxLayout(data, BoxLayout.Y_AXIS)); ab = new Box(BoxLayout.X_AXIS); b = Box.createGlue(); ab.add(this.m_learningLabel); ab.add(this.m_changeLearning); this.m_changeLearning.setMaximumSize(new Dimension(200, 20)); ab.add(b); data.add(ab); ab = new Box(BoxLayout.X_AXIS); b = Box.createGlue(); ab.add(this.m_momentumLabel); ab.add(this.m_changeMomentum); this.m_changeMomentum.setMaximumSize(new Dimension(200, 20)); ab.add(b); data.add(ab); this.add(data, BorderLayout.EAST); this.m_startStop.addActionListener(new ActionListener() { @Override public void actionPerformed(final ActionEvent e) { if (e.getActionCommand().equals("Start")) { MultilayerPerceptron.this.m_stopIt = false; ControlPanel.this.m_startStop.setText("Stop"); ControlPanel.this.m_startStop.setActionCommand("Stop"); int n = Integer.valueOf(ControlPanel.this.m_changeEpochs.getText()).intValue(); MultilayerPerceptron.this.m_numEpochs = n; ControlPanel.this.m_changeEpochs.setText("" + MultilayerPerceptron.this.m_numEpochs); double m = Double.valueOf(ControlPanel.this.m_changeLearning.getText()).doubleValue(); MultilayerPerceptron.this.setLearningRate(m); ControlPanel.this.m_changeLearning.setText("" + MultilayerPerceptron.this.m_learningRate); m = Double.valueOf(ControlPanel.this.m_changeMomentum.getText()).doubleValue(); MultilayerPerceptron.this.setMomentum(m); ControlPanel.this.m_changeMomentum.setText("" + MultilayerPerceptron.this.m_momentum); MultilayerPerceptron.this.blocker(false); } else if (e.getActionCommand().equals("Stop")) { MultilayerPerceptron.this.m_stopIt = true; ControlPanel.this.m_startStop.setText("Start"); ControlPanel.this.m_startStop.setActionCommand("Start"); } } }); this.m_acceptButton.addActionListener(new ActionListener() { @Override public void actionPerformed(final ActionEvent e) { MultilayerPerceptron.this.m_accepted = true; MultilayerPerceptron.this.blocker(false); } }); this.m_changeEpochs.addActionListener(new ActionListener() { @Override public void actionPerformed(final ActionEvent e) { int n = Integer.valueOf(ControlPanel.this.m_changeEpochs.getText()).intValue(); if (n > 0) { MultilayerPerceptron.this.m_numEpochs = n; MultilayerPerceptron.this.blocker(false); } } }); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * a ZeroR model in case no model can be built from the data or the network predicts all zeros for * the classes */ private Classifier m_ZeroR; /** Whether to use the default ZeroR model */ private boolean m_useDefaultModel = false; /** The training instances. */ private Instances m_instances; /** The current instance running through the network. */ private Instance m_currentInstance; /** A flag to say that it's a numeric class. */ private boolean m_numeric; /** The ranges for all the attributes. */ private double[] m_attributeRanges; /** The base values for all the attributes. */ private double[] m_attributeBases; /** The output units.(only feeds the errors, does no calcs) */ private NeuralEnd[] m_outputs; /** The input units.(only feeds the inputs does no calcs) */ private NeuralEnd[] m_inputs; /** All the nodes that actually comprise the logical neural net. */ private NeuralConnection[] m_neuralNodes; /** The number of classes. */ private int m_numClasses = 0; /** The number of attributes. */ private int m_numAttributes = 0; // note the number doesn't include the class. /** The panel the nodes are displayed on. */ private NodePanel m_nodePanel; /** The control panel. */ private ControlPanel m_controlPanel; /** The next id number available for default naming. */ private int m_nextId; /** A Vector list of the units currently selected. */ private ArrayList<NeuralConnection> m_selected; /** The number of epochs to train through. */ private int m_numEpochs; /** a flag to state if the network should be running, or stopped. */ private boolean m_stopIt; /** a flag to state that the network has in fact stopped. */ private boolean m_stopped; /** a flag to state that the network should be accepted the way it is. */ private boolean m_accepted; /** The window for the network. */ private JFrame m_win; /** * A flag to tell the build classifier to automatically build a neural net. */ private boolean m_autoBuild; /** * A flag to state that the gui for the network should be brought up. To allow interaction while * training. */ private boolean m_gui; /** An int to say how big the validation set should be. */ private int m_valSize; /** The number to to use to quit on validation testing. */ private int m_driftThreshold; /** The number used to seed the random number generator. */ private int m_randomSeed; /** The actual random number generator. */ private Random m_random; /** A flag to state that a nominal to binary filter should be used. */ private boolean m_useNomToBin; /** The actual filter. */ private NominalToBinary m_nominalToBinaryFilter; /** The string that defines the hidden layers */ private String m_hiddenLayers; /** This flag states that the user wants the input values normalized. */ private boolean m_normalizeAttributes; /** This flag states that the user wants the learning rate to decay. */ private boolean m_decay; /** This is the learning rate for the network. */ private double m_learningRate; /** This is the momentum for the network. */ private double m_momentum; /** Shows the number of the epoch that the network just finished. */ private int m_epoch; /** Shows the error of the epoch that the network just finished. */ private double m_error; /** * This flag states that the user wants the network to restart if it is found to be generating * infinity or NaN for the error value. This would restart the network with the current options * except that the learning rate would be smaller than before, (perhaps half of its current value). * This option will not be available if the gui is chosen (if the gui is open the user can fix the * network themselves, it is an architectural minefield for the network to be reset with the gui * open). */ private boolean m_reset; /** * This flag states that the user wants the class to be normalized while processing in the network * is done. (the final answer will be in the original range regardless). This option will only be * used when the class is numeric. */ private boolean m_normalizeClass; /** * this is a sigmoid unit. */ private final SigmoidUnit m_sigmoidUnit; /** * This is a linear unit. */ private final LinearUnit m_linearUnit; /** * The constructor. */ public MultilayerPerceptron() { this.m_instances = null; this.m_currentInstance = null; this.m_controlPanel = null; this.m_nodePanel = null; this.m_epoch = 0; this.m_error = 0; this.m_outputs = new NeuralEnd[0]; this.m_inputs = new NeuralEnd[0]; this.m_numAttributes = 0; this.m_numClasses = 0; this.m_neuralNodes = new NeuralConnection[0]; this.m_selected = new ArrayList<>(4); this.m_nextId = 0; this.m_stopIt = true; this.m_stopped = true; this.m_accepted = false; this.m_numeric = false; this.m_random = null; this.m_nominalToBinaryFilter = new NominalToBinary(); this.m_sigmoidUnit = new SigmoidUnit(); this.m_linearUnit = new LinearUnit(); // setting all the options to their defaults. To completely change these // defaults they will also need to be changed down the bottom in the // setoptions function (the text info in the accompanying functions should // also be changed to reflect the new defaults this.m_normalizeClass = true; this.m_normalizeAttributes = true; this.m_autoBuild = true; this.m_gui = false; this.m_useNomToBin = true; this.m_driftThreshold = 20; this.m_numEpochs = 500; this.m_valSize = 0; this.m_randomSeed = 0; this.m_hiddenLayers = "a"; this.m_learningRate = .3; this.m_momentum = .2; this.m_reset = true; this.m_decay = false; } /** * @param d * True if the learning rate should decay. */ public void setDecay(final boolean d) { this.m_decay = d; } /** * @return the flag for having the learning rate decay. */ public boolean getDecay() { return this.m_decay; } /** * This sets the network up to be able to reset itself with the current settings and the learning * rate at half of what it is currently. This will only happen if the network creates NaN or * infinite errors. Also this will continue to happen until the network is trained properly. The * learning rate will also get set back to it's original value at the end of this. This can only be * set to true if the GUI is not brought up. * * @param r * True if the network should restart with it's current options and set the learning rate * to half what it currently is. */ public void setReset(boolean r) { if (this.m_gui) { r = false; } this.m_reset = r; } /** * @return The flag for reseting the network. */ public boolean getReset() { return this.m_reset; } /** * @param c * True if the class should be normalized (the class will only ever be normalized if it is * numeric). (Normalization puts the range between -1 - 1). */ public void setNormalizeNumericClass(final boolean c) { this.m_normalizeClass = c; } /** * @return The flag for normalizing a numeric class. */ public boolean getNormalizeNumericClass() { return this.m_normalizeClass; } /** * @param a * True if the attributes should be normalized (even nominal attributes will get normalized * here) (range goes between -1 - 1). */ public void setNormalizeAttributes(final boolean a) { this.m_normalizeAttributes = a; } /** * @return The flag for normalizing attributes. */ public boolean getNormalizeAttributes() { return this.m_normalizeAttributes; } /** * @param f * True if a nominalToBinary filter should be used on the data. */ public void setNominalToBinaryFilter(final boolean f) { this.m_useNomToBin = f; } /** * @return The flag for nominal to binary filter use. */ public boolean getNominalToBinaryFilter() { return this.m_useNomToBin; } /** * This seeds the random number generator, that is used when a random number is needed for the * network. * * @param l * The seed. */ @Override public void setSeed(final int l) { if (l >= 0) { this.m_randomSeed = l; } } /** * @return The seed for the random number generator. */ @Override public int getSeed() { return this.m_randomSeed; } /** * This sets the threshold to use for when validation testing is being done. It works by ending * testing once the error on the validation set has consecutively increased a certain number of * times. * * @param t * The threshold to use for this. */ public void setValidationThreshold(final int t) { if (t > 0) { this.m_driftThreshold = t; } } /** * @return The threshold used for validation testing. */ public int getValidationThreshold() { return this.m_driftThreshold; } /** * The learning rate can be set using this command. NOTE That this is a static variable so it affect * all networks that are running. Must be greater than 0 and no more than 1. * * @param l * The New learning rate. */ public void setLearningRate(final double l) { if (l > 0 && l <= 1) { this.m_learningRate = l; if (this.m_controlPanel != null) { this.m_controlPanel.m_changeLearning.setText("" + l); } } } /** * @return The learning rate for the nodes. */ public double getLearningRate() { return this.m_learningRate; } /** * The momentum can be set using this command. THE same conditions apply to this as to the learning * rate. * * @param m * The new Momentum. */ public void setMomentum(final double m) { if (m >= 0 && m <= 1) { this.m_momentum = m; if (this.m_controlPanel != null) { this.m_controlPanel.m_changeMomentum.setText("" + m); } } } /** * @return The momentum for the nodes. */ public double getMomentum() { return this.m_momentum; } /** * This will set whether the network is automatically built or if it is left up to the user. (there * is nothing to stop a user from altering an autobuilt network however). * * @param a * True if the network should be auto built. */ public void setAutoBuild(boolean a) { if (!this.m_gui) { a = true; } this.m_autoBuild = a; } /** * @return The auto build state. */ public boolean getAutoBuild() { return this.m_autoBuild; } /** * This will set what the hidden layers are made up of when auto build is enabled. Note to have no * hidden units, just put a single 0, Any more 0's will indicate that the string is badly formed and * make it unaccepted. Negative numbers, and floats will do the same. There are also some wildcards. * These are 'a' = (number of attributes + number of classes) / 2, 'i' = number of attributes, 'o' = * number of classes, and 't' = number of attributes + number of classes. * * @param h * A string with a comma seperated list of numbers. Each number is the number of nodes to * be on a hidden layer. */ public void setHiddenLayers(final String h) { String tmp = ""; StringTokenizer tok = new StringTokenizer(h, ","); if (tok.countTokens() == 0) { return; } double dval; int val; String c; boolean first = true; while (tok.hasMoreTokens()) { c = tok.nextToken().trim(); if (c.equals("a") || c.equals("i") || c.equals("o") || c.equals("t")) { tmp += c; } else { dval = Double.valueOf(c).doubleValue(); val = (int) dval; if ((val == dval && (val != 0 || (tok.countTokens() == 0 && first)) && val >= 0)) { tmp += val; } else { return; } } first = false; if (tok.hasMoreTokens()) { tmp += ", "; } } this.m_hiddenLayers = tmp; } /** * @return A string representing the hidden layers, each number is the number of nodes on a hidden * layer. */ public String getHiddenLayers() { return this.m_hiddenLayers; } /** * This will set whether A GUI is brought up to allow interaction by the user with the neural * network during training. * * @param a * True if gui should be created. */ public void setGUI(final boolean a) { this.m_gui = a; if (!a) { this.setAutoBuild(true); } else { this.setReset(false); } } /** * @return The true if should show gui. */ public boolean getGUI() { return this.m_gui; } /** * This will set the size of the validation set. * * @param a * The size of the validation set, as a percentage of the whole. */ public void setValidationSetSize(final int a) { if (a < 0 || a > 99) { return; } this.m_valSize = a; } /** * @return The percentage size of the validation set. */ public int getValidationSetSize() { return this.m_valSize; } /** * Set the number of training epochs to perform. Must be greater than 0. * * @param n * The number of epochs to train through. */ public void setTrainingTime(final int n) { if (n > 0) { this.m_numEpochs = n; } } /** * @return The number of epochs to train through. */ public int getTrainingTime() { return this.m_numEpochs; } /** * Call this function to place a node into the network list. * * @param n * The node to place in the list. */ private void addNode(final NeuralConnection n) { NeuralConnection[] temp1 = new NeuralConnection[this.m_neuralNodes.length + 1]; for (int noa = 0; noa < this.m_neuralNodes.length; noa++) { temp1[noa] = this.m_neuralNodes[noa]; } temp1[temp1.length - 1] = n; this.m_neuralNodes = temp1; } /** * Call this function to remove the passed node from the list. This will only remove the node if it * is in the neuralnodes list. * * @param n * The neuralConnection to remove. * @return True if removed false if not (because it wasn't there). */ private boolean removeNode(final NeuralConnection n) { NeuralConnection[] temp1 = new NeuralConnection[this.m_neuralNodes.length - 1]; int skip = 0; for (int noa = 0; noa < this.m_neuralNodes.length; noa++) { if (n == this.m_neuralNodes[noa]) { skip++; } else if (!((noa - skip) >= temp1.length)) { temp1[noa - skip] = this.m_neuralNodes[noa]; } else { return false; } } this.m_neuralNodes = temp1; return true; } /** * This function sets what the m_numeric flag to represent the passed class it also performs the * normalization of the attributes if applicable and sets up the info to normalize the class. (note * that regardless of the options it will fill an array with the range and base, set to normalize * all attributes and the class to be between -1 and 1) * * @param inst * the instances. * @return The modified instances. This needs to be done. If the attributes are normalized then deep * copies will be made of all the instances which will need to be passed back out. */ private Instances setClassType(final Instances inst) throws Exception { if (inst != null) { // x bounds this.m_attributeRanges = new double[inst.numAttributes()]; this.m_attributeBases = new double[inst.numAttributes()]; for (int noa = 0; noa < inst.numAttributes(); noa++) { double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; for (int i = 0; i < inst.numInstances(); i++) { if (!inst.instance(i).isMissing(noa)) { double value = inst.instance(i).value(noa); if (value < min) { min = value; } if (value > max) { max = value; } } } this.m_attributeRanges[noa] = (max - min) / 2; this.m_attributeBases[noa] = (max + min) / 2; } if (this.m_normalizeAttributes) { for (int i = 0; i < inst.numInstances(); i++) { Instance currentInstance = inst.instance(i); double[] instance = new double[inst.numAttributes()]; for (int noa = 0; noa < inst.numAttributes(); noa++) { if (noa != inst.classIndex()) { if (this.m_attributeRanges[noa] != 0) { instance[noa] = (currentInstance.value(noa) - this.m_attributeBases[noa]) / this.m_attributeRanges[noa]; } else { instance[noa] = currentInstance.value(noa) - this.m_attributeBases[noa]; } } else { instance[noa] = currentInstance.value(noa); } } inst.set(i, new DenseInstance(currentInstance.weight(), instance)); } } if (inst.classAttribute().isNumeric()) { this.m_numeric = true; } else { this.m_numeric = false; } } return inst; } /** * A function used to stop the code that called buildclassifier from continuing on before the user * has finished the decision tree. * * @param tf * True to stop the thread, False to release the thread that is waiting there (if one). */ public synchronized void blocker(final boolean tf) { if (tf) { try { this.wait(); } catch (InterruptedException e) { } } else { this.notifyAll(); } } /** * Call this function to update the control panel for the gui. */ private void updateDisplay() { if (this.m_gui) { this.m_controlPanel.m_errorLabel.repaint(); this.m_controlPanel.m_epochsLabel.repaint(); } } /** * this will reset all the nodes in the network. */ private void resetNetwork() { for (int noc = 0; noc < this.m_numClasses; noc++) { this.m_outputs[noc].reset(); } } /** * This will cause the output values of all the nodes to be calculated. Note that the * m_currentInstance is used to calculate these values. */ private void calculateOutputs() { for (int noc = 0; noc < this.m_numClasses; noc++) { // get the values. this.m_outputs[noc].outputValue(true); } } /** * This will cause the error values to be calculated for all nodes. Note that the m_currentInstance * is used to calculate these values. Also the output values should have been calculated first. * * @return The squared error. */ private double calculateErrors() throws Exception { double ret = 0, temp = 0; for (int noc = 0; noc < this.m_numAttributes; noc++) { // get the errors. this.m_inputs[noc].errorValue(true); } for (int noc = 0; noc < this.m_numClasses; noc++) { temp = this.m_outputs[noc].errorValue(false); ret += temp * temp; } return ret; } /** * This will cause the weight values to be updated based on the learning rate, momentum and the * errors that have been calculated for each node. * * @param l * The learning rate to update with. * @param m * The momentum to update with. */ private void updateNetworkWeights(final double l, final double m) { for (int noc = 0; noc < this.m_numClasses; noc++) { // update weights this.m_outputs[noc].updateWeights(l, m); } } /** * This creates the required input units. */ private void setupInputs() throws Exception { this.m_inputs = new NeuralEnd[this.m_numAttributes]; int now = 0; for (int noa = 0; noa < this.m_numAttributes + 1; noa++) { if (this.m_instances.classIndex() != noa) { this.m_inputs[noa - now] = new NeuralEnd(this.m_instances.attribute(noa).name()); this.m_inputs[noa - now].setX(.1); this.m_inputs[noa - now].setY((noa - now + 1.0) / (this.m_numAttributes + 1)); this.m_inputs[noa - now].setLink(true, noa); } else { now = 1; } } } /** * This creates the required output units. */ private void setupOutputs() throws Exception { this.m_outputs = new NeuralEnd[this.m_numClasses]; for (int noa = 0; noa < this.m_numClasses; noa++) { if (this.m_numeric) { this.m_outputs[noa] = new NeuralEnd(this.m_instances.classAttribute().name()); } else { this.m_outputs[noa] = new NeuralEnd(this.m_instances.classAttribute().value(noa)); } this.m_outputs[noa].setX(.9); this.m_outputs[noa].setY((noa + 1.0) / (this.m_numClasses + 1)); this.m_outputs[noa].setLink(false, noa); NeuralNode temp = new NeuralNode(String.valueOf(this.m_nextId), this.m_random, this.m_sigmoidUnit); this.m_nextId++; temp.setX(.75); temp.setY((noa + 1.0) / (this.m_numClasses + 1)); this.addNode(temp); NeuralConnection.connect(temp, this.m_outputs[noa]); } } /** * Call this function to automatically generate the hidden units * * @throws InterruptedException */ private void setupHiddenLayer() throws InterruptedException { StringTokenizer tok = new StringTokenizer(this.m_hiddenLayers, ","); int val = 0; // num of nodes in a layer int prev = 0; // used to remember the previous layer int num = tok.countTokens(); // number of layers String c; for (int noa = 0; noa < num; noa++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // note that I am using the Double to get the value rather than the // Integer class, because for some reason the Double implementation can // handle leading white space and the integer version can't!?! c = tok.nextToken().trim(); if (c.equals("a")) { val = (this.m_numAttributes + this.m_numClasses) / 2; } else if (c.equals("i")) { val = this.m_numAttributes; } else if (c.equals("o")) { val = this.m_numClasses; } else if (c.equals("t")) { val = this.m_numAttributes + this.m_numClasses; } else { val = Double.valueOf(c).intValue(); } for (int nob = 0; nob < val; nob++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } NeuralNode temp = new NeuralNode(String.valueOf(this.m_nextId), this.m_random, this.m_sigmoidUnit); this.m_nextId++; temp.setX(.5 / (num) * noa + .25); temp.setY((nob + 1.0) / (val + 1)); this.addNode(temp); if (noa > 0) { // then do connections for (int noc = this.m_neuralNodes.length - nob - 1 - prev; noc < this.m_neuralNodes.length - nob - 1; noc++) { NeuralConnection.connect(this.m_neuralNodes[noc], temp); } } } prev = val; } tok = new StringTokenizer(this.m_hiddenLayers, ","); c = tok.nextToken(); if (c.equals("a")) { val = (this.m_numAttributes + this.m_numClasses) / 2; } else if (c.equals("i")) { val = this.m_numAttributes; } else if (c.equals("o")) { val = this.m_numClasses; } else if (c.equals("t")) { val = this.m_numAttributes + this.m_numClasses; } else { val = Double.valueOf(c).intValue(); } if (val == 0) { for (int noa = 0; noa < this.m_numAttributes; noa++) { for (int nob = 0; nob < this.m_numClasses; nob++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } NeuralConnection.connect(this.m_inputs[noa], this.m_neuralNodes[nob]); } } } else { for (int noa = 0; noa < this.m_numAttributes; noa++) { for (int nob = this.m_numClasses; nob < this.m_numClasses + val; nob++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } NeuralConnection.connect(this.m_inputs[noa], this.m_neuralNodes[nob]); } } for (int noa = this.m_neuralNodes.length - prev; noa < this.m_neuralNodes.length; noa++) { for (int nob = 0; nob < this.m_numClasses; nob++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } NeuralConnection.connect(this.m_neuralNodes[noa], this.m_neuralNodes[nob]); } } } } /** * This will go through all the nodes and check if they are connected to a pure output unit. If so * they will be set to be linear units. If not they will be set to be sigmoid units. */ private void setEndsToLinear() { for (NeuralConnection m_neuralNode : this.m_neuralNodes) { if ((m_neuralNode.getType() & NeuralConnection.OUTPUT) == NeuralConnection.OUTPUT) { ((NeuralNode) m_neuralNode).setMethod(this.m_linearUnit); } else { ((NeuralNode) m_neuralNode).setMethod(this.m_sigmoidUnit); } } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** The instances in the validation set (if any) */ protected transient Instances valSet = null; /** The number of instances in the validation set (if any) */ protected transient int numInVal = 0; /** Total weight of the instances in the training set */ protected transient double totalWeight = 0; /** Total weight of the instances in the validation set (if any) */ protected transient double totalValWeight = 0; /** Drift off counter */ protected transient double driftOff = 0; /** To keep track of error */ protected transient double lastRight = Double.POSITIVE_INFINITY; protected transient double bestError = Double.POSITIVE_INFINITY; /** Data in original format (in case learning rate gets reset */ protected transient Instances originalFormatData = null; /** * Initializes an iterative classifier. * * @param data * the instances to be used in induction * @exception Exception * if the model cannot be initialized */ @Override public void initializeClassifier(Instances data) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); this.originalFormatData = data; this.m_ZeroR = new weka.classifiers.rules.ZeroR(); this.m_ZeroR.buildClassifier(data); // only class? -> use ZeroR model if (data.numAttributes() == 1) { System.err.println("Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); this.m_useDefaultModel = true; return; } else { this.m_useDefaultModel = false; } this.m_epoch = 0; this.m_error = 0; this.m_instances = null; this.m_currentInstance = null; this.m_controlPanel = null; this.m_nodePanel = null; this.m_outputs = new NeuralEnd[0]; this.m_inputs = new NeuralEnd[0]; this.m_numAttributes = 0; this.m_numClasses = 0; this.m_neuralNodes = new NeuralConnection[0]; this.m_selected = new ArrayList<>(4); this.m_nextId = 0; this.m_stopIt = true; this.m_stopped = true; this.m_accepted = false; this.m_instances = new Instances(data); this.m_random = new Random(this.m_randomSeed); if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } this.m_instances.randomize(this.m_random); if (this.m_useNomToBin) { this.m_nominalToBinaryFilter = new NominalToBinary(); this.m_nominalToBinaryFilter.setInputFormat(this.m_instances); this.m_instances = Filter.useFilter(this.m_instances, this.m_nominalToBinaryFilter); } this.m_numAttributes = this.m_instances.numAttributes() - 1; this.m_numClasses = this.m_instances.numClasses(); this.setClassType(this.m_instances); // this sets up the validation set. // numinval is needed later this.numInVal = (int) (this.m_valSize / 100.0 * this.m_instances.numInstances()); if (this.m_valSize > 0) { if (this.numInVal == 0) { this.numInVal = 1; } this.valSet = new Instances(this.m_instances, 0, this.numInVal); } // ///////// this.setupInputs(); this.setupOutputs(); if (this.m_autoBuild) { this.setupHiddenLayer(); } // /////////////////////////// // this sets up the gui for usage if (this.m_gui) { this.m_win = Utils.getWekaJFrame("Neural Network", null); this.m_win.addWindowListener(new WindowAdapter() { @Override public void windowClosing(final WindowEvent e) { boolean k = MultilayerPerceptron.this.m_stopIt; MultilayerPerceptron.this.m_stopIt = true; int well = JOptionPane.showConfirmDialog(MultilayerPerceptron.this.m_win, "Are You Sure...\n" + "Click Yes To Accept" + " The Neural Network" + "\n Click No To Return", "Accept Neural Network", JOptionPane.YES_NO_OPTION); if (well == 0) { MultilayerPerceptron.this.m_win.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE); MultilayerPerceptron.this.m_accepted = true; MultilayerPerceptron.this.blocker(false); } else { MultilayerPerceptron.this.m_win.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE); } MultilayerPerceptron.this.m_stopIt = k; } }); this.m_win.getContentPane().setLayout(new BorderLayout()); this.m_nodePanel = new NodePanel(); // without the following two lines, the // NodePanel.paintComponents(Graphics) // method will go berserk if the network doesn't fit completely: it will // get called on a constant basis, using 100% of the CPU // see the following forum thread: // http://forum.java.sun.com/thread.jspa?threadID=580929&messageID=2945011 this.m_nodePanel.setPreferredSize(new Dimension(640, 480)); this.m_nodePanel.revalidate(); JScrollPane sp = new JScrollPane(this.m_nodePanel, JScrollPane.VERTICAL_SCROLLBAR_ALWAYS, JScrollPane.HORIZONTAL_SCROLLBAR_NEVER); this.m_controlPanel = new ControlPanel(); this.m_win.getContentPane().add(sp, BorderLayout.CENTER); this.m_win.getContentPane().add(this.m_controlPanel, BorderLayout.SOUTH); this.m_win.setSize(640, 480); this.m_win.setVisible(true); } // This sets up the initial state of the gui if (this.m_gui) { this.blocker(true); this.m_controlPanel.m_changeEpochs.setEnabled(false); this.m_controlPanel.m_changeLearning.setEnabled(false); this.m_controlPanel.m_changeMomentum.setEnabled(false); } // For silly situations in which the network gets accepted before training // commenses if (this.m_numeric) { this.setEndsToLinear(); } if (this.m_accepted) { return; } // connections done. this.totalWeight = 0; this.totalValWeight = 0; this.driftOff = 0; this.lastRight = Double.POSITIVE_INFINITY; this.bestError = Double.POSITIVE_INFINITY; // ensure that at least 1 instance is trained through. if (this.numInVal == this.m_instances.numInstances()) { this.numInVal--; } if (this.numInVal < 0) { this.numInVal = 0; } for (int noa = this.numInVal; noa < this.m_instances.numInstances(); noa++) { if (!this.m_instances.instance(noa).classIsMissing()) { this.totalWeight += this.m_instances.instance(noa).weight(); } } if (this.m_valSize != 0) { for (int noa = 0; noa < this.valSet.numInstances(); noa++) { if (!this.valSet.instance(noa).classIsMissing()) { this.totalValWeight += this.valSet.instance(noa).weight(); } } } this.m_stopped = false; } /** * Performs one iteration. * * @return false if no further iterations could be performed, true otherwise * @exception Exception * if this iteration fails for unexpected reasons */ @Override public boolean next() throws Exception { if (this.m_accepted || this.m_useDefaultModel) { // Has user accepted the network already or do we need to use default model? return false; } this.m_epoch++; double right = 0; for (int nob = this.numInVal; nob < this.m_instances.numInstances(); nob++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_currentInstance = this.m_instances.instance(nob); if (!this.m_currentInstance.classIsMissing()) { // this is where the network updating (and training occurs, for the // training set this.resetNetwork(); this.calculateOutputs(); double tempRate = this.m_learningRate * this.m_currentInstance.weight(); if (this.m_decay) { tempRate /= this.m_epoch; } right += (this.calculateErrors() / this.m_instances.numClasses()) * this.m_currentInstance.weight(); this.updateNetworkWeights(tempRate, this.m_momentum); } } right /= this.totalWeight; if (Double.isInfinite(right) || Double.isNaN(right)) { if ((!this.m_reset) || (this.originalFormatData == null)) { this.m_instances = null; throw new Exception("Network cannot train. Try restarting with a smaller learning rate."); } else { // reset the network if possible if (this.m_learningRate <= Utils.SMALL) { throw new IllegalStateException("Learning rate got too small (" + this.m_learningRate + " <= " + Utils.SMALL + ")!"); } double origRate = this.m_learningRate; // only used for when reset this.m_learningRate /= 2; this.buildClassifier(this.originalFormatData); this.m_learningRate = origRate; return false; } } // //////////////////////do validation testing if applicable if (this.m_valSize != 0) { right = 0; if (this.valSet == null) { throw new IllegalArgumentException("Trying to use validation set but validation set is null."); } for (int nob = 0; nob < this.valSet.numInstances(); nob++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_currentInstance = this.valSet.instance(nob); if (!this.m_currentInstance.classIsMissing()) { // this is where the network updating occurs, for the validation set this.resetNetwork(); this.calculateOutputs(); right += (this.calculateErrors() / this.valSet.numClasses()) * this.m_currentInstance.weight(); // note 'right' could be calculated here just using // the calculate output values. This would be faster. // be less modular } } if (right < this.lastRight) { if (right < this.bestError) { this.bestError = right; // save the network weights at this point for (int noc = 0; noc < this.m_numClasses; noc++) { this.m_outputs[noc].saveWeights(); } this.driftOff = 0; } } else { this.driftOff++; } this.lastRight = right; if (this.driftOff > this.m_driftThreshold || this.m_epoch + 1 >= this.m_numEpochs) { for (int noc = 0; noc < this.m_numClasses; noc++) { this.m_outputs[noc].restoreWeights(); } this.m_accepted = true; } right /= this.totalValWeight; } this.m_error = right; // shows what the neuralnet is upto if a gui exists. this.updateDisplay(); // This junction controls what state the gui is in at the end of each // epoch, Such as if it is paused, if it is resumable etc... if (this.m_gui) { while ((this.m_stopIt || (this.m_epoch >= this.m_numEpochs && this.m_valSize == 0)) && !this.m_accepted) { this.m_stopIt = true; this.m_stopped = true; if (this.m_epoch >= this.m_numEpochs && this.m_valSize == 0) { this.m_controlPanel.m_startStop.setEnabled(false); } else { this.m_controlPanel.m_startStop.setEnabled(true); } this.m_controlPanel.m_startStop.setText("Start"); this.m_controlPanel.m_startStop.setActionCommand("Start"); this.m_controlPanel.m_changeEpochs.setEnabled(true); this.m_controlPanel.m_changeLearning.setEnabled(true); this.m_controlPanel.m_changeMomentum.setEnabled(true); this.blocker(true); if (this.m_numeric) { this.setEndsToLinear(); } } this.m_controlPanel.m_changeEpochs.setEnabled(false); this.m_controlPanel.m_changeLearning.setEnabled(false); this.m_controlPanel.m_changeMomentum.setEnabled(false); this.m_stopped = false; // if the network has been accepted stop the training loop if (this.m_accepted) { return false; } } if (this.m_accepted) { return false; } if (this.m_epoch < this.m_numEpochs) { return true; // We can keep iterating } else { return false; } } /** * Signal end of iterating, useful for any house-keeping/cleanup * * @exception Exception * if cleanup fails */ @Override public void done() throws Exception { if (this.m_gui) { this.m_win.dispose(); this.m_controlPanel = null; this.m_nodePanel = null; } if (!this.m_useDefaultModel) { this.m_instances = new Instances(this.m_instances, 0); } this.m_currentInstance = null; this.originalFormatData = null; } /** * Call this function to build and train a neural network for the training data provided. * * @param i * The training data. * @throws Exception * if can't build classification properly. */ @Override public void buildClassifier(final Instances i) throws Exception { // Initialize classifier this.initializeClassifier(i); // For the given number of iterations while (this.next()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } } // Clean up this.done(); } /** * Call this function to predict the class of an instance once a classification model has been built * with the buildClassifier call. * * @param i * The instance to classify. * @return A double array filled with the probabilities of each class type. * @throws Exception * if can't classify instance. */ @Override public double[] distributionForInstance(final Instance i) throws Exception { // default model? if (this.m_useDefaultModel) { return this.m_ZeroR.distributionForInstance(i); } if (this.m_useNomToBin) { this.m_nominalToBinaryFilter.input(i); this.m_currentInstance = this.m_nominalToBinaryFilter.output(); } else { this.m_currentInstance = i; } // Make a copy of the instance so that it isn't modified this.m_currentInstance = (Instance) this.m_currentInstance.copy(); if (this.m_normalizeAttributes) { double[] instance = new double[this.m_currentInstance.numAttributes()]; for (int noa = 0; noa < this.m_instances.numAttributes(); noa++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (noa != this.m_instances.classIndex()) { if (this.m_attributeRanges[noa] != 0) { instance[noa] = (this.m_currentInstance.value(noa) - this.m_attributeBases[noa]) / this.m_attributeRanges[noa]; } else { instance[noa] = this.m_currentInstance.value(noa) - this.m_attributeBases[noa]; } } else { instance[noa] = this.m_currentInstance.value(noa); } } this.m_currentInstance = new DenseInstance(this.m_currentInstance.weight(), instance); this.m_currentInstance.setDataset(this.m_instances); } this.resetNetwork(); // since all the output values are needed. // They are calculated manually here and the values collected. double[] theArray = new double[this.m_numClasses]; for (int noa = 0; noa < this.m_numClasses; noa++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } theArray[noa] = this.m_outputs[noa].outputValue(true); } if (this.m_instances.classAttribute().isNumeric()) { return theArray; } // now normalize the array double count = 0; for (int noa = 0; noa < this.m_numClasses; noa++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } count += theArray[noa]; } if (count <= 0) { return this.m_ZeroR.distributionForInstance(i); } for (int noa = 0; noa < this.m_numClasses; noa++) { theArray[noa] /= count; } return theArray; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(14); newVector.addElement(new Option("\tLearning Rate for the backpropagation algorithm.\n" + "\t(Value should be between 0 - 1, Default = 0.3).", "L", 1, "-L <learning rate>")); newVector.addElement(new Option("\tMomentum Rate for the backpropagation algorithm.\n" + "\t(Value should be between 0 - 1, Default = 0.2).", "M", 1, "-M <momentum>")); newVector.addElement(new Option("\tNumber of epochs to train through.\n" + "\t(Default = 500).", "N", 1, "-N <number of epochs>")); newVector.addElement(new Option("\tPercentage size of validation set to use to terminate\n" + "\ttraining (if this is non zero it can pre-empt num of epochs.\n" + "\t(Value should be between 0 - 100, Default = 0).", "V", 1, "-V <percentage size of validation set>")); newVector.addElement(new Option("\tThe value used to seed the random number generator\n" + "\t(Value should be >= 0 and and a long, Default = 0).", "S", 1, "-S <seed>")); newVector.addElement(new Option("\tThe consequetive number of errors allowed for validation\n" + "\ttesting before the netwrok terminates.\n" + "\t(Value should be > 0, Default = 20).", "E", 1, "-E <threshold for number of consequetive errors>")); newVector.addElement(new Option("\tGUI will be opened.\n" + "\t(Use this to bring up a GUI).", "G", 0, "-G")); newVector.addElement(new Option("\tAutocreation of the network connections will NOT be done.\n" + "\t(This will be ignored if -G is NOT set)", "A", 0, "-A")); newVector.addElement(new Option("\tA NominalToBinary filter will NOT automatically be used.\n" + "\t(Set this to not use a NominalToBinary filter).", "B", 0, "-B")); newVector.addElement(new Option("\tThe hidden layers to be created for the network.\n" + "\t(Value should be a list of comma separated Natural \n" + "\tnumbers or the letters 'a' = (attribs + classes) / 2, \n" + "\t'i' = attribs, 'o' = classes, 't' = attribs .+ classes)\n" + "\tfor wildcard values, Default = a).", "H", 1, "-H <comma seperated numbers for nodes on each layer>")); newVector.addElement(new Option("\tNormalizing a numeric class will NOT be done.\n" + "\t(Set this to not normalize the class if it's numeric).", "C", 0, "-C")); newVector.addElement(new Option("\tNormalizing the attributes will NOT be done.\n" + "\t(Set this to not normalize the attributes).", "I", 0, "-I")); newVector.addElement(new Option("\tReseting the network will NOT be allowed.\n" + "\t(Set this to not allow the network to reset).", "R", 0, "-R")); newVector.addElement(new Option("\tLearning rate decay will occur.\n" + "\t(Set this to cause the learning rate to decay).", "D", 0, "-D")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;learning rate&gt; * Learning Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.3). * </pre> * * <pre> * -M &lt;momentum&gt; * Momentum Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.2). * </pre> * * <pre> * -N &lt;number of epochs&gt; * Number of epochs to train through. * (Default = 500). * </pre> * * <pre> * -V &lt;percentage size of validation set&gt; * Percentage size of validation set to use to terminate * training (if this is non zero it can pre-empt num of epochs. * (Value should be between 0 - 100, Default = 0). * </pre> * * <pre> * -S &lt;seed&gt; * The value used to seed the random number generator * (Value should be &gt;= 0 and and a long, Default = 0). * </pre> * * <pre> * -E &lt;threshold for number of consequetive errors&gt; * The consequetive number of errors allowed for validation * testing before the netwrok terminates. * (Value should be &gt; 0, Default = 20). * </pre> * * <pre> * -G * GUI will be opened. * (Use this to bring up a GUI). * </pre> * * <pre> * -A * Autocreation of the network connections will NOT be done. * (This will be ignored if -G is NOT set) * </pre> * * <pre> * -B * A NominalToBinary filter will NOT automatically be used. * (Set this to not use a NominalToBinary filter). * </pre> * * <pre> * -H &lt;comma seperated numbers for nodes on each layer&gt; * The hidden layers to be created for the network. * (Value should be a list of comma separated Natural * numbers or the letters 'a' = (attribs + classes) / 2, * 'i' = attribs, 'o' = classes, 't' = attribs .+ classes) * for wildcard values, Default = a). * </pre> * * <pre> * -C * Normalizing a numeric class will NOT be done. * (Set this to not normalize the class if it's numeric). * </pre> * * <pre> * -I * Normalizing the attributes will NOT be done. * (Set this to not normalize the attributes). * </pre> * * <pre> * -R * Reseting the network will NOT be allowed. * (Set this to not allow the network to reset). * </pre> * * <pre> * -D * Learning rate decay will occur. * (Set this to cause the learning rate to decay). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { // the defaults can be found here!!!! String learningString = Utils.getOption('L', options); if (learningString.length() != 0) { this.setLearningRate((new Double(learningString)).doubleValue()); } else { this.setLearningRate(0.3); } String momentumString = Utils.getOption('M', options); if (momentumString.length() != 0) { this.setMomentum((new Double(momentumString)).doubleValue()); } else { this.setMomentum(0.2); } String epochsString = Utils.getOption('N', options); if (epochsString.length() != 0) { this.setTrainingTime(Integer.parseInt(epochsString)); } else { this.setTrainingTime(500); } String valSizeString = Utils.getOption('V', options); if (valSizeString.length() != 0) { this.setValidationSetSize(Integer.parseInt(valSizeString)); } else { this.setValidationSetSize(0); } String seedString = Utils.getOption('S', options); if (seedString.length() != 0) { this.setSeed(Integer.parseInt(seedString)); } else { this.setSeed(0); } String thresholdString = Utils.getOption('E', options); if (thresholdString.length() != 0) { this.setValidationThreshold(Integer.parseInt(thresholdString)); } else { this.setValidationThreshold(20); } String hiddenLayers = Utils.getOption('H', options); if (hiddenLayers.length() != 0) { this.setHiddenLayers(hiddenLayers); } else { this.setHiddenLayers("a"); } if (Utils.getFlag('G', options)) { this.setGUI(true); } else { this.setGUI(false); } // small note. since the gui is the only option that can change the other // options this should be set first to allow the other options to set // properly if (Utils.getFlag('A', options)) { this.setAutoBuild(false); } else { this.setAutoBuild(true); } if (Utils.getFlag('B', options)) { this.setNominalToBinaryFilter(false); } else { this.setNominalToBinaryFilter(true); } if (Utils.getFlag('C', options)) { this.setNormalizeNumericClass(false); } else { this.setNormalizeNumericClass(true); } if (Utils.getFlag('I', options)) { this.setNormalizeAttributes(false); } else { this.setNormalizeAttributes(true); } if (Utils.getFlag('R', options)) { this.setReset(false); } else { this.setReset(true); } if (Utils.getFlag('D', options)) { this.setDecay(true); } else { this.setDecay(false); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of NeuralNet. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-L"); options.add("" + this.getLearningRate()); options.add("-M"); options.add("" + this.getMomentum()); options.add("-N"); options.add("" + this.getTrainingTime()); options.add("-V"); options.add("" + this.getValidationSetSize()); options.add("-S"); options.add("" + this.getSeed()); options.add("-E"); options.add("" + this.getValidationThreshold()); options.add("-H"); options.add(this.getHiddenLayers()); if (this.getGUI()) { options.add("-G"); } if (!this.getAutoBuild()) { options.add("-A"); } if (!this.getNominalToBinaryFilter()) { options.add("-B"); } if (!this.getNormalizeNumericClass()) { options.add("-C"); } if (!this.getNormalizeAttributes()) { options.add("-I"); } if (!this.getReset()) { options.add("-R"); } if (this.getDecay()) { options.add("-D"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * @return string describing the model. */ @Override public String toString() { // only ZeroR model? if (this.m_useDefaultModel) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_ZeroR.toString()); return buf.toString(); } StringBuffer model = new StringBuffer(this.m_neuralNodes.length * 100); // just a rough size guess NeuralNode con; double[] weights; NeuralConnection[] inputs; for (NeuralConnection m_neuralNode : this.m_neuralNodes) { con = (NeuralNode) m_neuralNode; // this would need a change // for items other than nodes!!! weights = con.getWeights(); inputs = con.getInputs(); if (con.getMethod() instanceof SigmoidUnit) { model.append("Sigmoid "); } else if (con.getMethod() instanceof LinearUnit) { model.append("Linear "); } model.append("Node " + con.getId() + "\n Inputs Weights\n"); model.append(" Threshold " + weights[0] + "\n"); for (int nob = 1; nob < con.getNumInputs() + 1; nob++) { if ((inputs[nob - 1].getType() & NeuralConnection.PURE_INPUT) == NeuralConnection.PURE_INPUT) { model.append(" Attrib " + this.m_instances.attribute(((NeuralEnd) inputs[nob - 1]).getLink()).name() + " " + weights[nob] + "\n"); } else { model.append(" Node " + inputs[nob - 1].getId() + " " + weights[nob] + "\n"); } } } // now put in the ends for (NeuralEnd m_output : this.m_outputs) { inputs = m_output.getInputs(); model.append("Class " + this.m_instances.classAttribute().value(m_output.getLink()) + "\n Input\n"); for (int nob = 0; nob < m_output.getNumInputs(); nob++) { if ((inputs[nob].getType() & NeuralConnection.PURE_INPUT) == NeuralConnection.PURE_INPUT) { model.append(" Attrib " + this.m_instances.attribute(((NeuralEnd) inputs[nob]).getLink()).name() + "\n"); } else { model.append(" Node " + inputs[nob].getId() + "\n"); } } } return model.toString(); } /** * This will return a string describing the classifier. * * @return The string. */ public String globalInfo() { return "A Classifier that uses backpropagation to classify instances.\n" + "This network can be built by hand, created by an algorithm or both. " + "The network can also be monitored and modified during training time. " + "The nodes in this network are all sigmoid (except for when the class " + "is numeric in which case the the output nodes become unthresholded " + "linear units)."; } /** * @return a string to describe the learning rate option. */ public String learningRateTipText() { return "The amount the" + " weights are updated."; } /** * @return a string to describe the momentum option. */ public String momentumTipText() { return "Momentum applied to the weights during updating."; } /** * @return a string to describe the AutoBuild option. */ public String autoBuildTipText() { return "Adds and connects up hidden layers in the network."; } /** * @return a string to describe the random seed option. */ public String seedTipText() { return "Seed used to initialise the random number generator." + "Random numbers are used for setting the initial weights of the" + " connections betweem nodes, and also for shuffling the training data."; } /** * @return a string to describe the validation threshold option. */ public String validationThresholdTipText() { return "Used to terminate validation testing." + "The value here dictates how many times in a row the validation set" + " error can get worse before training is terminated."; } /** * @return a string to describe the GUI option. */ public String GUITipText() { return "Brings up a gui interface." + " This will allow the pausing and altering of the nueral network" + " during training.\n\n" + "* To add a node left click (this node will be automatically selected," + " ensure no other nodes were selected).\n" + "* To select a node left click on it either while no other node is" + " selected or while holding down the control key (this toggles that" + " node as being selected and not selected.\n" + "* To connect a node, first have the start node(s) selected, then click" + " either the end node or on an empty space (this will create a new node" + " that is connected with the selected nodes). The selection status of" + " nodes will stay the same after the connection. (Note these are" + " directed connections, also a connection between two nodes will not" + " be established more than once and certain connections that are" + " deemed to be invalid will not be made).\n" + "* To remove a connection select one of the connected node(s) in the" + " connection and then right click the other node (it does not matter" + " whether the node is the start or end the connection will be removed" + ").\n" + "* To remove a node right click it while no other nodes (including it)" + " are selected. (This will also remove all connections to it)\n." + "* To deselect a node either left click it while holding down control," + " or right click on empty space.\n" + "* The raw inputs are provided from the labels on the left.\n" + "* The red nodes are hidden layers.\n" + "* The orange nodes are the output nodes.\n" + "* The labels on the right show the class the output node represents." + " Note that with a numeric class the output node will automatically be" + " made into an unthresholded linear unit.\n\n" + "Alterations to the neural network can only be done while the network" + " is not running, This also applies to the learning rate and other" + " fields on the control panel.\n\n" + "* You can accept the network as being finished at any time.\n" + "* The network is automatically paused at the beginning.\n" + "* There is a running indication of what epoch the network is up to" + " and what the (rough) error for that epoch was (or for" + " the validation if that is being used). Note that this error value" + " is based on a network that changes as the value is computed." + " (also depending on whether" + " the class is normalized will effect the error reported for numeric" + " classes.\n" + "* Once the network is done it will pause again and either wait to be" + " accepted or trained more.\n\n" + "Note that if the gui is not set the network will not require any" + " interaction.\n"; } /** * @return a string to describe the validation size option. */ public String validationSetSizeTipText() { return "The percentage size of the validation set." + "(The training will continue until it is observed that" + " the error on the validation set has been consistently getting" + " worse, or if the training time is reached).\n" + "If This is set to zero no validation set will be used and instead" + " the network will train for the specified number of epochs."; } /** * @return a string to describe the learning rate option. */ public String trainingTimeTipText() { return "The number of epochs to train through." + " If the validation set is non-zero then it can terminate the network" + " early"; } /** * @return a string to describe the nominal to binary option. */ public String nominalToBinaryFilterTipText() { return "This will preprocess the instances with the filter." + " This could help improve performance if there are nominal attributes" + " in the data."; } /** * @return a string to describe the hidden layers in the network. */ public String hiddenLayersTipText() { return "This defines the hidden layers of the neural network." + " This is a list of positive whole numbers. 1 for each hidden layer." + " Comma seperated. To have no hidden layers put a single 0 here." + " This will only be used if autobuild is set. There are also wildcard" + " values 'a' = (attribs + classes) / 2, 'i' = attribs, 'o' = classes" + " , 't' = attribs + classes."; } /** * @return a string to describe the nominal to binary option. */ public String normalizeNumericClassTipText() { return "This will normalize the class if it's numeric." + " This could help improve performance of the network, It normalizes" + " the class to be between -1 and 1. Note that this is only internally" + ", the output will be scaled back to the original range."; } /** * @return a string to describe the nominal to binary option. */ public String normalizeAttributesTipText() { return "This will normalize the attributes." + " This could help improve performance of the network." + " This is not reliant on the class being numeric. This will also" + " normalize nominal attributes as well (after they have been run" + " through the nominal to binary filter if that is in use) so that the" + " nominal values are between -1 and 1"; } /** * @return a string to describe the Reset option. */ public String resetTipText() { return "This will allow the network to reset with a lower learning rate." + " If the network diverges from the answer this will automatically" + " reset the network with a lower learning rate and begin training" + " again. This option is only available if the gui is not set. Note" + " that if the network diverges but isn't allowed to reset it will" + " fail the training process and return an error message."; } /** * @return a string to describe the Decay option. */ public String decayTipText() { return "This will cause the learning rate to decrease." + " This will divide the starting learning rate by the epoch number, to" + " determine what the current learning rate should be. This may help" + " to stop the network from diverging from the target output, as well" + " as improve general performance. Note that the decaying learning" + " rate will not be shown in the gui, only the original learning rate" + ". If the learning rate is changed in the gui, this is treated as the" + " starting learning rate."; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/QDA.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * QDA.java * Copyright (C) 2016 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.AbstractClassifier; import weka.core.*; import weka.core.Capabilities.Capability; import weka.estimators.MultivariateGaussianEstimator; import weka.filters.Filter; import weka.filters.unsupervised.attribute.RemoveUseless; import java.util.Collections; import java.util.Enumeration; /** * <!-- globalinfo-start --> * Generates a QDA. The covariance matrices are estimated using maximum likelihood from the per-class data. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> * Valid options are: <p/> * * <pre> -R * The ridge parameter. * (default is 1e-6)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <!-- options-end --> * * @author Eibe Frank, University of Waikato * @version $Revision: 10382 $ */ public class QDA extends AbstractClassifier implements WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -9113383498193689291L; /** Holds header of training date */ protected Instances m_Data; /** The per-class estimators */ protected MultivariateGaussianEstimator[] m_Estimators; /** The logs of the prior probabilities */ protected double[] m_LogPriors; /** Ridge parameter */ protected double m_Ridge = 1e-6; /** Rmeove useless filter */ protected RemoveUseless m_RemoveUseless; /** * Global info for this classifier. */ public String globalInfo() { return "Generates a QDA model. The covariance matrices are estimated using maximum likelihood from the per-class data."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String ridgeTipText() { return "The value of the ridge parameter."; } /** * Get the value of Ridge. * * @return Value of Ridge. */ public double getRidge() { return m_Ridge; } /** * Set the value of Ridge. * * @param newRidge Value to assign to Ridge. */ public void setRidge(double newRidge) { m_Ridge = newRidge; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { java.util.Vector<Option> newVector = new java.util.Vector<Option>(7); newVector.addElement(new Option( "\tThe ridge parameter.\n"+ "\t(default is 1e-6)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. <p/> * * <!-- options-start --> * Valid options are: <p/> * * <pre> -R * The ridge parameter. * (default is 1e-6)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) { setRidge(Double.parseDouble(ridgeString)); } else { setRidge(1e-6); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of IBk. * * @return an array of strings suitable for passing to setOptions() */ public String [] getOptions() { java.util.Vector<String> options = new java.util.Vector<String>(); options.add("-R"); options.add("" + getRidge()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Builds the classifier. */ public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(insts); // Remove constant attributes m_RemoveUseless = new RemoveUseless(); m_RemoveUseless.setInputFormat(insts); insts = Filter.useFilter(insts, m_RemoveUseless); insts.deleteWithMissingClass(); // Establish class counts, etc. int[] counts = new int[insts.numClasses()]; double[] sumOfWeightsPerClass = new double[insts.numClasses()]; for (int i = 0; i < insts.numInstances(); i++) { Instance inst = insts.instance(i); int classIndex = (int) inst.classValue(); counts[classIndex]++; sumOfWeightsPerClass[classIndex] += inst.weight(); } // Collect relevant data into array double[][][] data = new double[insts.numClasses()][][]; double[][] weights = new double[insts.numClasses()][]; for (int i = 0; i < insts.numClasses(); i++) { data[i] = new double[counts[i]][insts.numAttributes() - 1]; weights[i] = new double[counts[i]]; } int[] currentCount = new int[insts.numClasses()]; for (int i = 0; i < insts.numInstances(); i++) { Instance inst = insts.instance(i); int classIndex = (int) inst.classValue(); weights[classIndex][currentCount[classIndex]] = inst.weight(); int index = 0; double[] row = data[classIndex][currentCount[classIndex]++]; for (int j = 0; j < inst.numAttributes(); j++) { if (j != insts.classIndex()) { row[index++] = inst.value(j); } } } // Establish estimator for each class m_Estimators = new MultivariateGaussianEstimator[insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { if (sumOfWeightsPerClass[i] > 0) { m_Estimators[i] = new MultivariateGaussianEstimator(); m_Estimators[i].setRidge(getRidge()); m_Estimators[i].estimate(data[i], weights[i]); } } // Establish prior probabilities for each class m_LogPriors = new double[insts.numClasses()]; double sumOfWeights = Utils.sum(sumOfWeightsPerClass); for (int i = 0; i < insts.numClasses(); i++) { if (sumOfWeightsPerClass[i] > 0) { m_LogPriors[i] = Math.log(sumOfWeightsPerClass[i]) - Math.log(sumOfWeights); } } // Store header only m_Data = new Instances(insts, 0); } /** * Output class probabilities using Bayes' rule. */ public double[] distributionForInstance(Instance inst) throws Exception { // Filter instance m_RemoveUseless.input(inst); inst = m_RemoveUseless.output(); // Convert instance to array double[] values = new double[inst.numAttributes() - 1]; int index = 0; for (int i = 0; i < m_Data.numAttributes(); i++) { if (i != m_Data.classIndex()) { values[index++] = inst.value(i); } } double[] posteriorProbs = new double[m_Data.numClasses()]; for (int i = 0; i < m_Data.numClasses(); i++) { if (m_Estimators[i] != null) { posteriorProbs[i] = m_Estimators[i].logDensity(values) + m_LogPriors[i]; } else { posteriorProbs[i] = -Double.MAX_VALUE; } } posteriorProbs = Utils.logs2probs(posteriorProbs); return posteriorProbs; } /** * Produces textual description of the classifier. * @return the textual description */ public String toString() { if (m_LogPriors == null) { return "No model has been built yet."; } StringBuffer result = new StringBuffer(); result.append("QDA model (multivariate Gaussian for each class)\n\n"); for (int i = 0; i < m_Data.numClasses(); i++) { if (m_Estimators[i] != null) { result.append("Estimates for class " + m_Data.classAttribute().value(i) + "\n\n"); result.append("Natural logarithm of class prior probability: " + Utils.doubleToString(m_LogPriors[i], getNumDecimalPlaces()) + "\n"); result.append("Class prior probability: " + Utils.doubleToString(Math.exp(m_LogPriors[i]), getNumDecimalPlaces()) + "\n\n"); result.append("Multivariate Gaussian estimator:\n\n" + m_Estimators[i] + "\n"); } } return result.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 10382 $"); } /** * Generates an QDA classifier. * * @param argv the options */ public static void main(String [] argv){ runClassifier(new QDA(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/SGD.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SGD.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.RandomizableClassifier; import weka.classifiers.UpdateableClassifier; import weka.core.Aggregateable; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** * <!-- globalinfo-start --> Implements stochastic gradient descent for learning various linear * models (binary class SVM, binary class logistic regression, squared loss, Huber loss and * epsilon-insensitive loss linear regression). Globally replaces all missing values and transforms * nominal attributes into binary ones. It also normalizes all attributes, so the coefficients in * the output are based on the normalized data.<br/> * For numeric class attributes, the squared, Huber or epsilon-insensitve loss function must be * used. Epsilon-insensitive and Huber loss may require a much higher learning rate. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -F * Set the loss function to minimize. * 0 = hinge loss (SVM), 1 = log loss (logistic regression), * 2 = squared loss (regression), 3 = epsilon insensitive loss (regression), * 4 = Huber loss (regression). * (default = 0) * </pre> * * <pre> * -L * The learning rate. If normalization is * turned off (as it is automatically for streaming data), then the * default learning rate will need to be reduced (try 0.0001). * (default = 0.01). * </pre> * * <pre> * -R &lt;double&gt; * The lambda regularization constant (default = 0.0001) * </pre> * * <pre> * -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500) * </pre> * * <pre> * -C &lt;double&gt; * The epsilon threshold (epsilon-insenstive and Huber loss only, default = 1e-3) * </pre> * * <pre> * -N * Don't normalize the data * </pre> * * <pre> * -M * Don't replace missing values * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe{[at]}cs{[dot]}waikato{[dot]}ac{[dot]}nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ * */ public class SGD extends RandomizableClassifier implements UpdateableClassifier, OptionHandler, Aggregateable<SGD> { /** For serialization */ private static final long serialVersionUID = -3732968666673530290L; /** Replace missing values */ protected ReplaceMissingValues m_replaceMissing; /** * Convert nominal attributes to numerically coded binary ones. Uses supervised NominalToBinary in * the batch learning case */ protected Filter m_nominalToBinary; /** Normalize the training data */ protected Normalize m_normalize; /** The regularization parameter */ protected double m_lambda = 0.0001; /** The learning rate */ protected double m_learningRate = 0.01; /** Stores the weights (+ bias in the last element) */ protected double[] m_weights; /** The epsilon parameter for epsilon insensitive and Huber loss */ protected double m_epsilon = 1e-3; /** Holds the current iteration number */ protected double m_t; /** The number of training instances */ protected double m_numInstances; /** * The number of epochs to perform (batch learning). Total iterations is m_epochs * num instances */ protected int m_epochs = 500; /** * Turn off normalization of the input data. This option gets forced for incremental training. */ protected boolean m_dontNormalize = false; /** * Turn off global replacement of missing values. Missing values will be ignored instead. This * option gets forced for incremental training. */ protected boolean m_dontReplaceMissing = false; /** Holds the header of the training data */ protected Instances m_data; /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class if (this.m_loss == SQUAREDLOSS || this.m_loss == EPSILON_INSENSITIVE || this.m_loss == HUBER) { result.enable(Capability.NUMERIC_CLASS); } else { result.enable(Capability.BINARY_CLASS); } result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String epsilonTipText() { return "The epsilon threshold for epsilon insensitive and Huber " + "loss. An error with absolute value less that this " + "threshold has loss of 0 for epsilon insensitive loss. " + "For Huber loss this is the boundary between the quadratic " + "and linear parts of the loss function."; } /** * Set the epsilon threshold on the error for epsilon insensitive and Huber loss functions * * @param e * the value of epsilon to use */ public void setEpsilon(final double e) { this.m_epsilon = e; } /** * Get the epsilon threshold on the error for epsilon insensitive and Huber loss functions * * @return the value of epsilon to use */ public double getEpsilon() { return this.m_epsilon; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String lambdaTipText() { return "The regularization constant. (default = 0.0001)"; } /** * Set the value of lambda to use * * @param lambda * the value of lambda to use */ public void setLambda(final double lambda) { this.m_lambda = lambda; } /** * Get the current value of lambda * * @return the current value of lambda */ public double getLambda() { return this.m_lambda; } /** * Set the learning rate. * * @param lr * the learning rate to use. */ public void setLearningRate(final double lr) { this.m_learningRate = lr; } /** * Get the learning rate. * * @return the learning rate */ public double getLearningRate() { return this.m_learningRate; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String learningRateTipText() { return "The learning rate. If normalization is turned off " + "(as it is automatically for streaming data), then" + "the default learning rate will need to be reduced (" + "try 0.0001)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String epochsTipText() { return "The number of epochs to perform (batch learning). " + "The total number of iterations is epochs * num" + " instances."; } /** * Set the number of epochs to use * * @param e * the number of epochs to use */ public void setEpochs(final int e) { this.m_epochs = e; } /** * Get current number of epochs * * @return the current number of epochs */ public int getEpochs() { return this.m_epochs; } /** * Turn normalization off/on. * * @param m * true if normalization is to be disabled. */ public void setDontNormalize(final boolean m) { this.m_dontNormalize = m; } /** * Get whether normalization has been turned off. * * @return true if normalization has been disabled. */ public boolean getDontNormalize() { return this.m_dontNormalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String dontNormalizeTipText() { return "Turn normalization off"; } /** * Turn global replacement of missing values off/on. If turned off, then missing values are * effectively ignored. * * @param m * true if global replacement of missing values is to be turned off. */ public void setDontReplaceMissing(final boolean m) { this.m_dontReplaceMissing = m; } /** * Get whether global replacement of missing values has been disabled. * * @return true if global replacement of missing values has been turned off */ public boolean getDontReplaceMissing() { return this.m_dontReplaceMissing; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String dontReplaceMissingTipText() { return "Turn off global replacement of missing values"; } /** * Set the loss function to use. * * @param function * the loss function to use. */ public void setLossFunction(final SelectedTag function) { if (function.getTags() == TAGS_SELECTION) { this.m_loss = function.getSelectedTag().getID(); } } /** * Get the current loss function. * * @return the current loss function. */ public SelectedTag getLossFunction() { return new SelectedTag(this.m_loss, TAGS_SELECTION); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String lossFunctionTipText() { return "The loss function to use. Hinge loss (SVM), " + "log loss (logistic regression) or " + "squared loss (regression)."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(); newVector.add(new Option("\tSet the loss function to minimize.\n\t0 = " + "hinge loss (SVM), 1 = log loss (logistic regression),\n\t" + "2 = squared loss (regression), 3 = epsilon insensitive loss (regression)," + "\n\t4 = Huber loss (regression).\n\t(default = 0)", "F", 1, "-F")); newVector.add( new Option("\tThe learning rate. If normalization is\n" + "\tturned off (as it is automatically for streaming data), then the\n\t" + "default learning rate will need to be reduced " + "(try 0.0001).\n\t(default = 0.01).", "L", 1, "-L")); newVector.add(new Option("\tThe lambda regularization constant " + "(default = 0.0001)", "R", 1, "-R <double>")); newVector.add(new Option("\tThe number of epochs to perform (" + "batch learning only, default = 500)", "E", 1, "-E <integer>")); newVector.add(new Option("\tThe epsilon threshold (" + "epsilon-insenstive and Huber loss only, default = 1e-3)", "C", 1, "-C <double>")); newVector.add(new Option("\tDon't normalize the data", "N", 0, "-N")); newVector.add(new Option("\tDon't replace missing values", "M", 0, "-M")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -F * Set the loss function to minimize. * 0 = hinge loss (SVM), 1 = log loss (logistic regression), * 2 = squared loss (regression), 3 = epsilon insensitive loss (regression), * 4 = Huber loss (regression). * (default = 0) * </pre> * * <pre> * -L * The learning rate. If normalization is * turned off (as it is automatically for streaming data), then the * default learning rate will need to be reduced (try 0.0001). * (default = 0.01). * </pre> * * <pre> * -R &lt;double&gt; * The lambda regularization constant (default = 0.0001) * </pre> * * <pre> * -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500) * </pre> * * <pre> * -C &lt;double&gt; * The epsilon threshold (epsilon-insenstive and Huber loss only, default = 1e-3) * </pre> * * <pre> * -N * Don't normalize the data * </pre> * * <pre> * -M * Don't replace missing values * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.reset(); super.setOptions(options); String lossString = Utils.getOption('F', options); if (lossString.length() != 0) { this.setLossFunction(new SelectedTag(Integer.parseInt(lossString), TAGS_SELECTION)); } String lambdaString = Utils.getOption('R', options); if (lambdaString.length() > 0) { this.setLambda(Double.parseDouble(lambdaString)); } String learningRateString = Utils.getOption('L', options); if (learningRateString.length() > 0) { this.setLearningRate(Double.parseDouble(learningRateString)); } String epochsString = Utils.getOption("E", options); if (epochsString.length() > 0) { this.setEpochs(Integer.parseInt(epochsString)); } String epsilonString = Utils.getOption("C", options); if (epsilonString.length() > 0) { this.setEpsilon(Double.parseDouble(epsilonString)); } this.setDontNormalize(Utils.getFlag("N", options)); this.setDontReplaceMissing(Utils.getFlag('M', options)); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { ArrayList<String> options = new ArrayList<>(); options.add("-F"); options.add("" + this.getLossFunction().getSelectedTag().getID()); options.add("-L"); options.add("" + this.getLearningRate()); options.add("-R"); options.add("" + this.getLambda()); options.add("-E"); options.add("" + this.getEpochs()); options.add("-C"); options.add("" + this.getEpsilon()); if (this.getDontNormalize()) { options.add("-N"); } if (this.getDontReplaceMissing()) { options.add("-M"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[1]); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements stochastic gradient descent for learning" + " various linear models (binary class SVM, binary class" + " logistic regression, squared loss, Huber loss and " + "epsilon-insensitive loss linear regression)." + " Globally replaces all missing values and transforms nominal" + " attributes into binary ones. It also normalizes all attributes," + " so the coefficients in the output are based on the normalized" + " data.\n" + "For numeric class attributes, the squared, Huber or " + "epsilon-insensitve loss function must be used. Epsilon-insensitive " + "and Huber loss may require a much higher learning rate."; } /** * Reset the classifier. */ public void reset() { this.m_t = 1; this.m_weights = null; } /** * Method for building the classifier. * * @param data * the set of training instances. * @throws Exception * if the classifier can't be built successfully. */ @Override public void buildClassifier(Instances data) throws Exception { this.reset(); // can classifier handle the data? this.getCapabilities().testWithFail(data); data = new Instances(data); data.deleteWithMissingClass(); if (data.numInstances() > 0 && !this.m_dontReplaceMissing) { this.m_replaceMissing = new ReplaceMissingValues(); this.m_replaceMissing.setInputFormat(data); data = Filter.useFilter(data, this.m_replaceMissing); } // check for only numeric attributes boolean onlyNumeric = true; for (int i = 0; i < data.numAttributes(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (i != data.classIndex()) { if (!data.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } if (!onlyNumeric) { if (data.numInstances() > 0) { this.m_nominalToBinary = new weka.filters.supervised.attribute.NominalToBinary(); } else { this.m_nominalToBinary = new weka.filters.unsupervised.attribute.NominalToBinary(); } this.m_nominalToBinary.setInputFormat(data); data = Filter.useFilter(data, this.m_nominalToBinary); } if (!this.m_dontNormalize && data.numInstances() > 0) { this.m_normalize = new Normalize(); this.m_normalize.setInputFormat(data); data = Filter.useFilter(data, this.m_normalize); } this.m_numInstances = data.numInstances(); this.m_weights = new double[data.numAttributes() + 1]; this.m_data = new Instances(data, 0); if (data.numInstances() > 0) { data.randomize(new Random(this.getSeed())); // randomize the data this.train(data); } } /** the hinge loss function. */ public static final int HINGE = 0; /** the log loss function. */ public static final int LOGLOSS = 1; /** the squared loss function. */ public static final int SQUAREDLOSS = 2; /** The epsilon insensitive loss function */ public static final int EPSILON_INSENSITIVE = 3; /** The Huber loss function */ public static final int HUBER = 4; /** The current loss function to minimize */ protected int m_loss = HINGE; /** Loss functions to choose from */ public static final Tag[] TAGS_SELECTION = { new Tag(HINGE, "Hinge loss (SVM)"), new Tag(LOGLOSS, "Log loss (logistic regression)"), new Tag(SQUAREDLOSS, "Squared loss (regression)"), new Tag(EPSILON_INSENSITIVE, "Epsilon-insensitive loss (SVM regression)"), new Tag(HUBER, "Huber loss (robust regression)") }; protected double dloss(final double z) { if (this.m_loss == HINGE) { return (z < 1) ? 1 : 0; } if (this.m_loss == LOGLOSS) { // log loss if (z < 0) { return 1.0 / (Math.exp(z) + 1.0); } else { double t = Math.exp(-z); return t / (t + 1); } } if (this.m_loss == EPSILON_INSENSITIVE) { if (z > this.m_epsilon) { return 1.0; } if (-z > this.m_epsilon) { return -1.0; } return 0; } if (this.m_loss == HUBER) { if (Math.abs(z) <= this.m_epsilon) { return z; } else if (z > 0.0) { return this.m_epsilon; } else { return -this.m_epsilon; } } // squared loss return z; } private void train(final Instances data) throws Exception { for (int e = 0; e < this.m_epochs; e++) { for (int i = 0; i < data.numInstances(); i++) { this.updateClassifier(data.instance(i), false); } } } protected static double dotProd(final Instance inst1, final double[] weights, final int classIndex) throws InterruptedException { double result = 0; int n1 = inst1.numValues(); int n2 = weights.length - 1; for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } int ind1 = inst1.index(p1); int ind2 = p2; if (ind1 == ind2) { if (ind1 != classIndex && !inst1.isMissingSparse(p1)) { result += inst1.valueSparse(p1) * weights[p2]; } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } return (result); } /** * Updates the classifier with the given instance. * * @param instance * the new training instance to include in the model * @param filter * true if the instance should pass through any of the filters set up in buildClassifier(). * When batch training buildClassifier() already batch filters all training instances so * don't need to filter them again here. * @exception Exception * if the instance could not be incorporated in the model. */ protected void updateClassifier(Instance instance, final boolean filter) throws Exception { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (!instance.classIsMissing()) { if (filter) { if (this.m_replaceMissing != null) { this.m_replaceMissing.input(instance); instance = this.m_replaceMissing.output(); } if (this.m_nominalToBinary != null) { this.m_nominalToBinary.input(instance); instance = this.m_nominalToBinary.output(); } if (this.m_normalize != null) { this.m_normalize.input(instance); instance = this.m_normalize.output(); } } double wx = dotProd(instance, this.m_weights, instance.classIndex()); double y; double z; if (instance.classAttribute().isNominal()) { y = (instance.classValue() == 0) ? -1 : 1; z = y * (wx + this.m_weights[this.m_weights.length - 1]); } else { y = instance.classValue(); z = y - (wx + this.m_weights[this.m_weights.length - 1]); y = 1; } // Compute multiplier for weight decay double multiplier = 1.0; if (this.m_numInstances == 0) { multiplier = 1.0 - (this.m_learningRate * this.m_lambda) / this.m_t; } else { multiplier = 1.0 - (this.m_learningRate * this.m_lambda) / this.m_numInstances; } for (int i = 0; i < this.m_weights.length - 1; i++) { this.m_weights[i] *= multiplier; } // Only need to do the following if the loss is non-zero // if (m_loss != HINGE || (z < 1)) { if (this.m_loss == SQUAREDLOSS || this.m_loss == LOGLOSS || this.m_loss == HUBER || (this.m_loss == HINGE && (z < 1)) || (this.m_loss == EPSILON_INSENSITIVE && Math.abs(z) > this.m_epsilon)) { // Compute Factor for updates double factor = this.m_learningRate * y * this.dloss(z); // Update coefficients for attributes int n1 = instance.numValues(); for (int p1 = 0; p1 < n1; p1++) { int indS = instance.index(p1); if (indS != instance.classIndex() && !instance.isMissingSparse(p1)) { this.m_weights[indS] += factor * instance.valueSparse(p1); } } // update the bias this.m_weights[this.m_weights.length - 1] += factor; } this.m_t++; } } /** * Updates the classifier with the given instance. * * @param instance * the new training instance to include in the model * @exception Exception * if the instance could not be incorporated in the model. */ @Override public void updateClassifier(final Instance instance) throws Exception { this.updateClassifier(instance, true); } /** * Computes the distribution for a given instance * * @param instance * the instance for which distribution is computed * @return the distribution * @throws Exception * if the distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance inst) throws Exception { double[] result = (inst.classAttribute().isNominal()) ? new double[2] : new double[1]; if (this.m_replaceMissing != null) { this.m_replaceMissing.input(inst); inst = this.m_replaceMissing.output(); } if (this.m_nominalToBinary != null) { this.m_nominalToBinary.input(inst); inst = this.m_nominalToBinary.output(); } if (this.m_normalize != null) { this.m_normalize.input(inst); inst = this.m_normalize.output(); } double wx = dotProd(inst, this.m_weights, inst.classIndex());// * m_wScale; double z = (wx + this.m_weights[this.m_weights.length - 1]); if (inst.classAttribute().isNumeric()) { result[0] = z; return result; } if (z <= 0) { // z = 0; if (this.m_loss == LOGLOSS) { result[0] = 1.0 / (1.0 + Math.exp(z)); result[1] = 1.0 - result[0]; } else { result[0] = 1; } } else { if (this.m_loss == LOGLOSS) { result[1] = 1.0 / (1.0 + Math.exp(-z)); result[0] = 1.0 - result[1]; } else { result[1] = 1; } } return result; } public double[] getWeights() { return this.m_weights; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { if (this.m_weights == null) { return "SGD: No model built yet.\n"; } StringBuffer buff = new StringBuffer(); buff.append("Loss function: "); if (this.m_loss == HINGE) { buff.append("Hinge loss (SVM)\n\n"); } else if (this.m_loss == LOGLOSS) { buff.append("Log loss (logistic regression)\n\n"); } if (this.m_loss == EPSILON_INSENSITIVE) { buff.append("Epsilon insensitive loss (SVM regression)\n\n"); } else if (this.m_loss == HUBER) { buff.append("Huber loss (robust regression)\n\n"); } else { buff.append("Squared loss (linear regression)\n\n"); } buff.append(this.m_data.classAttribute().name() + " = \n\n"); int printed = 0; for (int i = 0; i < this.m_weights.length - 1; i++) { if (i != this.m_data.classIndex()) { if (printed > 0) { buff.append(" + "); } else { buff.append(" "); } buff.append(Utils.doubleToString(this.m_weights[i], 12, 4) + " " + ((this.m_normalize != null) ? "(normalized) " : "") + this.m_data.attribute(i).name() + "\n"); printed++; } } if (this.m_weights[this.m_weights.length - 1] > 0) { buff.append(" + " + Utils.doubleToString(this.m_weights[this.m_weights.length - 1], 12, 4)); } else { buff.append(" - " + Utils.doubleToString(-this.m_weights[this.m_weights.length - 1], 12, 4)); } return buff.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } protected int m_numModels = 0; /** * Aggregate an object with this one * * @param toAggregate * the object to aggregate * @return the result of aggregation * @throws Exception * if the supplied object can't be aggregated for some reason */ @Override public SGD aggregate(final SGD toAggregate) throws Exception { if (this.m_weights == null) { throw new Exception("No model built yet, can't aggregate"); } if (!this.m_data.equalHeaders(toAggregate.m_data)) { throw new Exception("Can't aggregate - data headers dont match: " + this.m_data.equalHeadersMsg(toAggregate.m_data)); } if (this.m_weights.length != toAggregate.getWeights().length) { throw new Exception("Can't aggregate - SDG to aggregate has weight vector " + "that differs in length from ours."); } for (int i = 0; i < this.m_weights.length; i++) { this.m_weights[i] += toAggregate.getWeights()[i]; } this.m_numModels++; return this; } /** * Call to complete the aggregation process. Allows implementers to do any final processing based on * how many objects were aggregated. * * @throws Exception * if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { if (this.m_numModels == 0) { throw new Exception("Unable to finalize aggregation - " + "haven't seen any models to aggregate"); } for (int i = 0; i < this.m_weights.length; i++) { this.m_weights[i] /= (this.m_numModels + 1); // plus one for us } // aggregation complete this.m_numModels = 0; } /** * Main method for testing this class. */ public static void main(final String[] args) { runClassifier(new SGD(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/SGDText.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SGDText.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Random; import java.util.Vector; import weka.classifiers.RandomizableClassifier; import weka.classifiers.UpdateableBatchProcessor; import weka.classifiers.UpdateableClassifier; import weka.core.Aggregateable; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.stemmers.NullStemmer; import weka.core.stemmers.Stemmer; import weka.core.stopwords.Null; import weka.core.stopwords.StopwordsHandler; import weka.core.tokenizers.Tokenizer; import weka.core.tokenizers.WordTokenizer; /** <!-- globalinfo-start --> * Implements stochastic gradient descent for learning a linear binary class SVM or binary class logistic regression on text data. Operates directly (and only) on String attributes. Other types of input attributes are accepted but ignored during training and classification. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F * Set the loss function to minimize. 0 = hinge loss (SVM), 1 = log loss (logistic regression) * (default = 0)</pre> * * <pre> -outputProbs * Output probabilities for SVMs (fits a logsitic * model to the output of the SVM)</pre> * * <pre> -L * The learning rate (default = 0.01).</pre> * * <pre> -R &lt;double&gt; * The lambda regularization constant (default = 0.0001)</pre> * * <pre> -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500)</pre> * * <pre> -W * Use word frequencies instead of binary bag of words.</pre> * * <pre> -P &lt;# instances&gt; * How often to prune the dictionary of low frequency words (default = 0, i.e. don't prune)</pre> * * <pre> -M &lt;double&gt; * Minimum word frequency. Words with less than this frequence are ignored. * If periodic pruning is turned on then this is also used to determine which * words to remove from the dictionary (default = 3).</pre> * * <pre> -min-coeff &lt;double&gt; * Minimum absolute value of coefficients in the model. * If periodic pruning is turned on then this * is also used to prune words from the dictionary * (default = 0.001</pre> * * <pre> -normalize * Normalize document length (use in conjunction with -norm and -lnorm)</pre> * * <pre> -norm &lt;num&gt; * Specify the norm that each instance must have (default 1.0)</pre> * * <pre> -lnorm &lt;num&gt; * Specify L-norm to use (default 2.0)</pre> * * <pre> -lowercase * Convert all tokens to lowercase before adding to the dictionary.</pre> * * <pre> -stopwords-handler * The stopwords handler to use (default Null).</pre> * * <pre> -tokenizer &lt;spec&gt; * The tokenizing algorihtm (classname plus parameters) to use. * (default: weka.core.tokenizers.WordTokenizer)</pre> * * <pre> -stemmer &lt;spec&gt; * The stemmering algorihtm (classname plus parameters) to use.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @author Eibe Frank (eibe{[at]}cs{[dot]}waikato{[dot]}ac{[dot]}nz) * */ public class SGDText extends RandomizableClassifier implements UpdateableClassifier, UpdateableBatchProcessor, WeightedInstancesHandler, Aggregateable<SGDText> { /** For serialization */ private static final long serialVersionUID = 7200171484002029584L; public static class Count implements Serializable { /** * For serialization */ private static final long serialVersionUID = 2104201532017340967L; public double m_count; public double m_weight; public Count(double c) { m_count = c; } } /** * The number of training instances at which to periodically prune the * dictionary of min frequency words. Empty or null string indicates don't * prune */ protected int m_periodicP = 0; /** * Only consider dictionary words (features) that occur at least this many * times. */ protected double m_minWordP = 3; /** * Prune terms from the model that have a coefficient smaller than this. */ protected double m_minAbsCoefficient = 0.001; /** Use word frequencies rather than bag-of-words if true */ protected boolean m_wordFrequencies = false; /** Whether to normalized document length or not */ protected boolean m_normalize = false; /** The length that each document vector should have in the end */ protected double m_norm = 1.0; /** The L-norm to use */ protected double m_lnorm = 2.0; /** The dictionary (and term weights) */ protected LinkedHashMap<String, Count> m_dictionary; /** Stopword handler to use. */ protected StopwordsHandler m_StopwordsHandler = new Null(); /** The tokenizer to use */ protected Tokenizer m_tokenizer = new WordTokenizer(); /** Whether or not to convert all tokens to lowercase */ protected boolean m_lowercaseTokens; /** The stemming algorithm. */ protected Stemmer m_stemmer = new NullStemmer(); /** The regularization parameter */ protected double m_lambda = 0.0001; /** The learning rate */ protected double m_learningRate = 0.01; /** Holds the current iteration number */ protected double m_t; /** Holds the bias term */ protected double m_bias; /** The number of training instances */ protected double m_numInstances; /** The header of the training data */ protected Instances m_data; /** * The number of epochs to perform (batch learning). Total iterations is * m_epochs * num instances */ protected int m_epochs = 500; /** * Holds the current document vector (LinkedHashMap is more efficient when * iterating over EntrySet than HashMap) */ protected transient LinkedHashMap<String, Count> m_inputVector; /** the hinge loss function. */ public static final int HINGE = 0; /** the log loss function. */ public static final int LOGLOSS = 1; /** The current loss function to minimize */ protected int m_loss = HINGE; /** Loss functions to choose from */ public static final Tag[] TAGS_SELECTION = { new Tag(HINGE, "Hinge loss (SVM)"), new Tag(LOGLOSS, "Log loss (logistic regression)") }; /** Used for producing probabilities for SVM via SGD logistic regression */ protected SGD m_svmProbs; /** * True if a logistic regression is to be fit to the output of the SVM for * producing probability estimates */ protected boolean m_fitLogistic = false; protected Instances m_fitLogisticStructure; protected double dloss(double z) { if (m_loss == HINGE) { return (z < 1) ? 1 : 0; } else { // log loss if (z < 0) { return 1.0 / (Math.exp(z) + 1.0); } else { double t = Math.exp(-z); return t / (t + 1); } } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.STRING_ATTRIBUTES); result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * the stemming algorithm to use, null means no stemming at all (i.e., the * NullStemmer is used). * * @param value the configured stemming algorithm, or null * @see NullStemmer */ public void setStemmer(Stemmer value) { if (value != null) { m_stemmer = value; } else { m_stemmer = new NullStemmer(); } } /** * Returns the current stemming algorithm, null if none is used. * * @return the current stemming algorithm, null if none set */ public Stemmer getStemmer() { return m_stemmer; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String stemmerTipText() { return "The stemming algorithm to use on the words."; } /** * the tokenizer algorithm to use. * * @param value the configured tokenizing algorithm */ public void setTokenizer(Tokenizer value) { m_tokenizer = value; } /** * Returns the current tokenizer algorithm. * * @return the current tokenizer algorithm */ public Tokenizer getTokenizer() { return m_tokenizer; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String tokenizerTipText() { return "The tokenizing algorithm to use on the strings."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useWordFrequenciesTipText() { return "Use word frequencies rather than binary " + "bag of words representation"; } /** * Set whether to use word frequencies rather than binary bag of words * representation. * * @param u true if word frequencies are to be used. */ public void setUseWordFrequencies(boolean u) { m_wordFrequencies = u; } /** * Get whether to use word frequencies rather than binary bag of words * representation. * * @return true if word frequencies are to be used. */ public boolean getUseWordFrequencies() { return m_wordFrequencies; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lowercaseTokensTipText() { return "Whether to convert all tokens to lowercase"; } /** * Set whether to convert all tokens to lowercase * * @param l true if all tokens are to be converted to lowercase */ public void setLowercaseTokens(boolean l) { m_lowercaseTokens = l; } /** * Get whether to convert all tokens to lowercase * * @return true true if all tokens are to be converted to lowercase */ public boolean getLowercaseTokens() { return m_lowercaseTokens; } /** * Sets the stopwords handler to use. * * @param value the stopwords handler, if null, Null is used */ public void setStopwordsHandler(StopwordsHandler value) { if (value != null) { m_StopwordsHandler = value; } else { m_StopwordsHandler = new Null(); } } /** * Gets the stopwords handler. * * @return the stopwords handler */ public StopwordsHandler getStopwordsHandler() { return m_StopwordsHandler; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String stopwordsHandlerTipText() { return "The stopwords handler to use (Null means no stopwords are used)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String periodicPruningTipText() { return "How often (number of instances) to prune " + "the dictionary of low frequency terms. " + "0 means don't prune. Setting a positive " + "integer n means prune after every n instances"; } /** * Set how often to prune the dictionary * * @param p how often to prune */ public void setPeriodicPruning(int p) { m_periodicP = p; } /** * Get how often to prune the dictionary * * @return how often to prune the dictionary */ public int getPeriodicPruning() { return m_periodicP; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minWordFrequencyTipText() { return "Ignore any words that don't occur at least " + "min frequency times in the training data. If periodic " + "pruning is turned on, then the dictionary is pruned " + "according to this value"; } /** * Set the minimum word frequency. Words that don't occur at least min freq * times are ignored when updating weights. If periodic pruning is turned on, * then min frequency is used when removing words from the dictionary. * * @param minFreq the minimum word frequency to use */ public void setMinWordFrequency(double minFreq) { m_minWordP = minFreq; } /** * Get the minimum word frequency. Words that don't occur at least min freq * times are ignored when updating weights. If periodic pruning is turned on, * then min frequency is used when removing words from the dictionary. * * @return the minimum word frequency to use */ public double getMinWordFrequency() { return m_minWordP; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minAbsoluteCoefficientValueTipText() { return "The minimum absolute magnitude for model coefficients. Terms " + "with weights smaller than this value are ignored. If periodic " + "pruning is turned on then this is also used to determine if a " + "word should be removed from the dictionary."; } /** * Set the minimum absolute magnitude for model coefficients. Terms with * weights smaller than this value are ignored. If periodic pruning is turned * on then this is also used to determine if a word should be removed from the * dictionary * * @param minCoeff the minimum absolute value of a model coefficient */ public void setMinAbsoluteCoefficientValue(double minCoeff) { m_minAbsCoefficient = minCoeff; } /** * Get the minimum absolute magnitude for model coefficients. Terms with * weights smaller than this value are ignored. If periodic pruning is turned * on this then is also used to determine if a word should be removed from the * dictionary * * @return the minimum absolute value of a model coefficient */ public double getMinAbsoluteCoefficientValue() { return m_minAbsCoefficient; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String normalizeDocLengthTipText() { return "If true then document length is normalized according " + "to the settings for norm and lnorm"; } /** * Set whether to normalize the length of each document * * @param norm true if document lengths is to be normalized */ public void setNormalizeDocLength(boolean norm) { m_normalize = norm; } /** * Get whether to normalize the length of each document * * @return true if document lengths is to be normalized */ public boolean getNormalizeDocLength() { return m_normalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String normTipText() { return "The norm of the instances after normalization."; } /** * Get the instance's Norm. * * @return the Norm */ public double getNorm() { return m_norm; } /** * Set the norm of the instances * * @param newNorm the norm to wich the instances must be set */ public void setNorm(double newNorm) { m_norm = newNorm; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String LNormTipText() { return "The LNorm to use for document length normalization."; } /** * Get the L Norm used. * * @return the L-norm used */ public double getLNorm() { return m_lnorm; } /** * Set the L-norm to used * * @param newLNorm the L-norm */ public void setLNorm(double newLNorm) { m_lnorm = newLNorm; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lambdaTipText() { return "The regularization constant. (default = 0.0001)"; } /** * Set the value of lambda to use * * @param lambda the value of lambda to use */ public void setLambda(double lambda) { m_lambda = lambda; } /** * Get the current value of lambda * * @return the current value of lambda */ public double getLambda() { return m_lambda; } /** * Set the learning rate. * * @param lr the learning rate to use. */ public void setLearningRate(double lr) { m_learningRate = lr; } /** * Get the learning rate. * * @return the learning rate */ public double getLearningRate() { return m_learningRate; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String learningRateTipText() { return "The learning rate."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String epochsTipText() { return "The number of epochs to perform (batch learning). " + "The total number of iterations is epochs * num" + " instances."; } /** * Set the number of epochs to use * * @param e the number of epochs to use */ public void setEpochs(int e) { m_epochs = e; } /** * Get current number of epochs * * @return the current number of epochs */ public int getEpochs() { return m_epochs; } /** * Set the loss function to use. * * @param function the loss function to use. */ public void setLossFunction(SelectedTag function) { if (function.getTags() == TAGS_SELECTION) { m_loss = function.getSelectedTag().getID(); } } /** * Get the current loss function. * * @return the current loss function. */ public SelectedTag getLossFunction() { return new SelectedTag(m_loss, TAGS_SELECTION); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lossFunctionTipText() { return "The loss function to use. Hinge loss (SVM), " + "log loss (logistic regression) or " + "squared loss (regression)."; } /** * Set whether to fit a logistic regression (itself trained using SGD) to the * outputs of the SVM (if an SVM is being learned). * * @param o true if a logistic regression is to be fit to the output of the * SVM to produce probability estimates. */ public void setOutputProbsForSVM(boolean o) { m_fitLogistic = o; } /** * Get whether to fit a logistic regression (itself trained using SGD) to the * outputs of the SVM (if an SVM is being learned). * * @return true if a logistic regression is to be fit to the output of the SVM * to produce probability estimates. */ public boolean getOutputProbsForSVM() { return m_fitLogistic; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String outputProbsForSVMTipText() { return "Fit a logistic regression to the output of SVM for " + "producing probability estimates"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.add(new Option("\tSet the loss function to minimize. 0 = " + "hinge loss (SVM), 1 = log loss (logistic regression)\n\t" + "(default = 0)", "F", 1, "-F")); newVector .add(new Option("\tOutput probabilities for SVMs (fits a logsitic\n\t" + "model to the output of the SVM)", "output-probs", 0, "-outputProbs")); newVector.add(new Option("\tThe learning rate (default = 0.01).", "L", 1, "-L")); newVector.add(new Option("\tThe lambda regularization constant " + "(default = 0.0001)", "R", 1, "-R <double>")); newVector.add(new Option("\tThe number of epochs to perform (" + "batch learning only, default = 500)", "E", 1, "-E <integer>")); newVector.add(new Option("\tUse word frequencies instead of " + "binary bag of words.", "W", 0, "-W")); newVector.add(new Option("\tHow often to prune the dictionary " + "of low frequency words (default = 0, i.e. don't prune)", "P", 1, "-P <# instances>")); newVector.add(new Option("\tMinimum word frequency. Words with less " + "than this frequence are ignored.\n\tIf periodic pruning " + "is turned on then this is also used to determine which\n\t" + "words to remove from the dictionary (default = 3).", "M", 1, "-M <double>")); newVector.add(new Option("\tMinimum absolute value of coefficients " + "in the model.\n\tIf periodic pruning is turned on then this\n\t" + "is also used to prune words from the dictionary\n\t" + "(default = 0.001", "min-coeff", 1, "-min-coeff <double>")); newVector.addElement(new Option( "\tNormalize document length (use in conjunction with -norm and " + "-lnorm)", "normalize", 0, "-normalize")); newVector.addElement(new Option( "\tSpecify the norm that each instance must have (default 1.0)", "norm", 1, "-norm <num>")); newVector.addElement(new Option("\tSpecify L-norm to use (default 2.0)", "lnorm", 1, "-lnorm <num>")); newVector.addElement(new Option("\tConvert all tokens to lowercase " + "before adding to the dictionary.", "lowercase", 0, "-lowercase")); newVector.addElement(new Option( "\tThe stopwords handler to use (default Null).", "-stopwords-handler", 1, "-stopwords-handler")); newVector.addElement(new Option( "\tThe tokenizing algorihtm (classname plus parameters) to use.\n" + "\t(default: " + WordTokenizer.class.getName() + ")", "tokenizer", 1, "-tokenizer <spec>")); newVector.addElement(new Option( "\tThe stemmering algorihtm (classname plus parameters) to use.", "stemmer", 1, "-stemmer <spec>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F * Set the loss function to minimize. 0 = hinge loss (SVM), 1 = log loss (logistic regression) * (default = 0)</pre> * * <pre> -outputProbs * Output probabilities for SVMs (fits a logsitic * model to the output of the SVM)</pre> * * <pre> -L * The learning rate (default = 0.01).</pre> * * <pre> -R &lt;double&gt; * The lambda regularization constant (default = 0.0001)</pre> * * <pre> -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500)</pre> * * <pre> -W * Use word frequencies instead of binary bag of words.</pre> * * <pre> -P &lt;# instances&gt; * How often to prune the dictionary of low frequency words (default = 0, i.e. don't prune)</pre> * * <pre> -M &lt;double&gt; * Minimum word frequency. Words with less than this frequence are ignored. * If periodic pruning is turned on then this is also used to determine which * words to remove from the dictionary (default = 3).</pre> * * <pre> -min-coeff &lt;double&gt; * Minimum absolute value of coefficients in the model. * If periodic pruning is turned on then this * is also used to prune words from the dictionary * (default = 0.001</pre> * * <pre> -normalize * Normalize document length (use in conjunction with -norm and -lnorm)</pre> * * <pre> -norm &lt;num&gt; * Specify the norm that each instance must have (default 1.0)</pre> * * <pre> -lnorm &lt;num&gt; * Specify L-norm to use (default 2.0)</pre> * * <pre> -lowercase * Convert all tokens to lowercase before adding to the dictionary.</pre> * * <pre> -stopwords-handler * The stopwords handler to use (default Null).</pre> * * <pre> -tokenizer &lt;spec&gt; * The tokenizing algorihtm (classname plus parameters) to use. * (default: weka.core.tokenizers.WordTokenizer)</pre> * * <pre> -stemmer &lt;spec&gt; * The stemmering algorihtm (classname plus parameters) to use.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { reset(); String lossString = Utils.getOption('F', options); if (lossString.length() != 0) { setLossFunction(new SelectedTag(Integer.parseInt(lossString), TAGS_SELECTION)); } setOutputProbsForSVM(Utils.getFlag("output-probs", options)); String lambdaString = Utils.getOption('R', options); if (lambdaString.length() > 0) { setLambda(Double.parseDouble(lambdaString)); } String learningRateString = Utils.getOption('L', options); if (learningRateString.length() > 0) { setLearningRate(Double.parseDouble(learningRateString)); } String epochsString = Utils.getOption("E", options); if (epochsString.length() > 0) { setEpochs(Integer.parseInt(epochsString)); } setUseWordFrequencies(Utils.getFlag("W", options)); String pruneFreqS = Utils.getOption("P", options); if (pruneFreqS.length() > 0) { setPeriodicPruning(Integer.parseInt(pruneFreqS)); } String minFreq = Utils.getOption("M", options); if (minFreq.length() > 0) { setMinWordFrequency(Double.parseDouble(minFreq)); } String minCoeff = Utils.getOption("min-coeff", options); if (minCoeff.length() > 0) { setMinAbsoluteCoefficientValue(Double.parseDouble(minCoeff)); } setNormalizeDocLength(Utils.getFlag("normalize", options)); String normFreqS = Utils.getOption("norm", options); if (normFreqS.length() > 0) { setNorm(Double.parseDouble(normFreqS)); } String lnormFreqS = Utils.getOption("lnorm", options); if (lnormFreqS.length() > 0) { setLNorm(Double.parseDouble(lnormFreqS)); } setLowercaseTokens(Utils.getFlag("lowercase", options)); String stemmerString = Utils.getOption("stemmer", options); if (stemmerString.length() == 0) { setStemmer(null); } else { String[] stemmerSpec = Utils.splitOptions(stemmerString); if (stemmerSpec.length == 0) { throw new Exception("Invalid stemmer specification string"); } String stemmerName = stemmerSpec[0]; stemmerSpec[0] = ""; Stemmer stemmer = (Stemmer) Utils.forName(Class.forName("weka.core.stemmers.Stemmer"), stemmerName, stemmerSpec); setStemmer(stemmer); } String stopwordsHandlerString = Utils.getOption("stopwords-handler", options); if (stopwordsHandlerString.length() == 0) { setStopwordsHandler(null); } else { String[] stopwordsHandlerSpec = Utils.splitOptions(stopwordsHandlerString); if (stopwordsHandlerSpec.length == 0) { throw new Exception("Invalid StopwordsHandler specification string"); } String stopwordsHandlerName = stopwordsHandlerSpec[0]; stopwordsHandlerSpec[0] = ""; StopwordsHandler stopwordsHandler = (StopwordsHandler) Utils.forName(Class.forName("weka.core.stopwords.StopwordsHandler"), stopwordsHandlerName, stopwordsHandlerSpec); setStopwordsHandler(stopwordsHandler); } String tokenizerString = Utils.getOption("tokenizer", options); if (tokenizerString.length() == 0) { setTokenizer(new WordTokenizer()); } else { String[] tokenizerSpec = Utils.splitOptions(tokenizerString); if (tokenizerSpec.length == 0) { throw new Exception("Invalid tokenizer specification string"); } String tokenizerName = tokenizerSpec[0]; tokenizerSpec[0] = ""; Tokenizer tokenizer = (Tokenizer) Utils.forName(Class.forName("weka.core.tokenizers.Tokenizer"), tokenizerName, tokenizerSpec); setTokenizer(tokenizer); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); options.add("-F"); options.add("" + getLossFunction().getSelectedTag().getID()); if (getOutputProbsForSVM()) { options.add("-output-probs"); } options.add("-L"); options.add("" + getLearningRate()); options.add("-R"); options.add("" + getLambda()); options.add("-E"); options.add("" + getEpochs()); if (getUseWordFrequencies()) { options.add("-W"); } options.add("-P"); options.add("" + getPeriodicPruning()); options.add("-M"); options.add("" + getMinWordFrequency()); options.add("-min-coeff"); options.add("" + getMinAbsoluteCoefficientValue()); if (getNormalizeDocLength()) { options.add("-normalize"); } options.add("-norm"); options.add("" + getNorm()); options.add("-lnorm"); options.add("" + getLNorm()); if (getLowercaseTokens()) { options.add("-lowercase"); } if (getStopwordsHandler() != null) { options.add("-stopwords-handler"); String spec = getStopwordsHandler().getClass().getName(); if (getStopwordsHandler() instanceof OptionHandler) { spec += " " + Utils.joinOptions(((OptionHandler) getStopwordsHandler()) .getOptions()); } options.add(spec.trim()); } options.add("-tokenizer"); String spec = getTokenizer().getClass().getName(); if (getTokenizer() instanceof OptionHandler) { spec += " " + Utils.joinOptions(((OptionHandler) getTokenizer()).getOptions()); } options.add(spec.trim()); if (getStemmer() != null) { options.add("-stemmer"); spec = getStemmer().getClass().getName(); if (getStemmer() instanceof OptionHandler) { spec += " " + Utils.joinOptions(((OptionHandler) getStemmer()).getOptions()); } options.add(spec.trim()); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[1]); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Implements stochastic gradient descent for learning" + " a linear binary class SVM or binary class" + " logistic regression on text data. Operates directly (and only) " + "on String attributes. Other types of input attributes are accepted " + "but ignored during training and classification."; } /** * Reset the classifier. */ public void reset() { m_t = 1; m_bias = 0; m_dictionary = null; } /** * Method for building the classifier. * * @param data the set of training instances. * @throws Exception if the classifier can't be built successfully. */ @Override public void buildClassifier(Instances data) throws Exception { reset(); /* * boolean hasString = false; for (int i = 0; i < data.numAttributes(); i++) * { if (data.attribute(i).isString() && data.classIndex() != i) { hasString * = true; break; } } * * if (!hasString) { throw new * Exception("Incoming data does not have any string attributes!"); } */ // can classifier handle the data? getCapabilities().testWithFail(data); m_dictionary = new LinkedHashMap<String, Count>(10000); m_numInstances = data.numInstances(); m_data = new Instances(data, 0); data = new Instances(data); if (m_fitLogistic && m_loss == HINGE) { initializeSVMProbs(data); } if (data.numInstances() > 0) { data.randomize(new Random(getSeed())); train(data); pruneDictionary(true); } } protected void initializeSVMProbs(Instances data) throws Exception { m_svmProbs = new SGD(); m_svmProbs.setLossFunction(new SelectedTag(SGD.LOGLOSS, TAGS_SELECTION)); m_svmProbs.setLearningRate(m_learningRate); m_svmProbs.setLambda(m_lambda); m_svmProbs.setEpochs(m_epochs); ArrayList<Attribute> atts = new ArrayList<Attribute>(2); atts.add(new Attribute("pred")); ArrayList<String> attVals = new ArrayList<String>(2); attVals.add(data.classAttribute().value(0)); attVals.add(data.classAttribute().value(1)); atts.add(new Attribute("class", attVals)); m_fitLogisticStructure = new Instances("data", atts, 0); m_fitLogisticStructure.setClassIndex(1); m_svmProbs.buildClassifier(m_fitLogisticStructure); } protected void train(Instances data) throws Exception { for (int e = 0; e < m_epochs; e++) { for (int i = 0; i < data.numInstances(); i++) { if (e == 0) { updateClassifier(data.instance(i), true); } else { updateClassifier(data.instance(i), false); } } } } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in the * model. */ @Override public void updateClassifier(Instance instance) throws Exception { updateClassifier(instance, true); } protected void updateClassifier(Instance instance, boolean updateDictionary) throws Exception { if (!instance.classIsMissing()) { // tokenize tokenizeInstance(instance, updateDictionary); // make a meta instance for the logistic model before we update // the SVM if (m_loss == HINGE && m_fitLogistic) { double pred = svmOutput(); double[] vals = new double[2]; vals[0] = pred; vals[1] = instance.classValue(); DenseInstance metaI = new DenseInstance(instance.weight(), vals); metaI.setDataset(m_fitLogisticStructure); m_svmProbs.updateClassifier(metaI); } // --- double wx = dotProd(m_inputVector); double y = (instance.classValue() == 0) ? -1 : 1; double z = y * (wx + m_bias); // Compute multiplier for weight decay double multiplier = 1.0; if (m_numInstances == 0) { multiplier = 1.0 - (m_learningRate * m_lambda) / m_t; } else { multiplier = 1.0 - (m_learningRate * m_lambda) / m_numInstances; } for (Map.Entry<String, Count> c : m_dictionary.entrySet()) { c.getValue().m_weight *= multiplier; } // Only need to do the following if the loss is non-zero if (m_loss != HINGE || (z < 1)) { // Compute Factor for updates double dloss = dloss(z); double factor = m_learningRate * y * dloss; // Update coefficients for attributes for (Map.Entry<String, Count> feature : m_inputVector.entrySet()) { String word = feature.getKey(); double value = (m_wordFrequencies) ? feature.getValue().m_count : 1; Count c = m_dictionary.get(word); if (c != null) { c.m_weight += factor * value; } } // update the bias m_bias += factor; } m_t++; } } protected void tokenizeInstance(Instance instance, boolean updateDictionary) { if (m_inputVector == null) { m_inputVector = new LinkedHashMap<String, Count>(); } else { m_inputVector.clear(); } for (int i = 0; i < instance.numAttributes(); i++) { if (instance.attribute(i).isString() && !instance.isMissing(i)) { m_tokenizer.tokenize(instance.stringValue(i)); while (m_tokenizer.hasMoreElements()) { String word = m_tokenizer.nextElement(); if (m_lowercaseTokens) { word = word.toLowerCase(); } word = m_stemmer.stem(word); if (m_StopwordsHandler.isStopword(word)) { continue; } Count docCount = m_inputVector.get(word); if (docCount == null) { m_inputVector.put(word, new Count(instance.weight())); } else { docCount.m_count += instance.weight(); } if (updateDictionary) { Count count = m_dictionary.get(word); if (count == null) { m_dictionary.put(word, new Count(instance.weight())); } else { count.m_count += instance.weight(); } } } } } if (updateDictionary) { pruneDictionary(false); } } protected void pruneDictionary(boolean force) { if ((m_periodicP <= 0 || m_t % m_periodicP > 0) && !force) { return; } Iterator<Map.Entry<String, Count>> entries = m_dictionary.entrySet() .iterator(); while (entries.hasNext()) { Map.Entry<String, Count> entry = entries.next(); if (entry.getValue().m_count < m_minWordP || Math.abs(entry.getValue().m_weight) < m_minAbsCoefficient) { entries.remove(); } } } protected double svmOutput() { double wx = dotProd(m_inputVector); double z = (wx + m_bias); return z; } @Override public double[] distributionForInstance(Instance inst) throws Exception { double[] result = new double[2]; tokenizeInstance(inst, false); double wx = dotProd(m_inputVector); double z = (wx + m_bias); if (m_loss == HINGE && m_fitLogistic) { double pred = z; double[] vals = new double[2]; vals[0] = pred; vals[1] = Utils.missingValue(); DenseInstance metaI = new DenseInstance(inst.weight(), vals); metaI.setDataset(m_fitLogisticStructure); return m_svmProbs.distributionForInstance(metaI); } if (z <= 0) { if (m_loss == LOGLOSS) { result[0] = 1.0 / (1.0 + Math.exp(z)); result[1] = 1.0 - result[0]; } else { result[0] = 1; } } else { if (m_loss == LOGLOSS) { result[1] = 1.0 / (1.0 + Math.exp(-z)); result[0] = 1.0 - result[1]; } else { result[1] = 1; } } return result; } protected double dotProd(Map<String, Count> document) { double result = 0; // document normalization double iNorm = 0; double fv = 0; if (m_normalize) { for (Count c : document.values()) { // word counts or bag-of-words? fv = (m_wordFrequencies) ? c.m_count : 1.0; iNorm += Math.pow(Math.abs(fv), m_lnorm); } iNorm = Math.pow(iNorm, 1.0 / m_lnorm); } for (Map.Entry<String, Count> feature : document.entrySet()) { String word = feature.getKey(); double freq = (m_wordFrequencies) ? feature.getValue().m_count : 1.0; // double freq = (feature.getValue().m_count / iNorm * m_norm); if (m_normalize) { freq *= (m_norm / iNorm); } Count weight = m_dictionary.get(word); if (weight != null && weight.m_count >= m_minWordP && Math.abs(weight.m_weight) >= m_minAbsCoefficient) { result += freq * weight.m_weight; } } return result; } @Override public String toString() { if (m_dictionary == null) { return "SGDText: No model built yet.\n"; } StringBuffer buff = new StringBuffer(); buff.append("SGDText:\n\n"); buff.append("Loss function: "); if (m_loss == HINGE) { buff.append("Hinge loss (SVM)\n\n"); } else { buff.append("Log loss (logistic regression)\n\n"); } int dictSize = 0; Iterator<Map.Entry<String, Count>> entries = m_dictionary.entrySet() .iterator(); while (entries.hasNext()) { Map.Entry<String, Count> entry = entries.next(); if (entry.getValue().m_count >= m_minWordP && Math.abs(entry.getValue().m_weight) >= m_minAbsCoefficient) { dictSize++; } } buff.append("Dictionary size: " + dictSize + "\n\n"); buff.append(m_data.classAttribute().name() + " = \n\n"); int printed = 0; entries = m_dictionary.entrySet().iterator(); while (entries.hasNext()) { Map.Entry<String, Count> entry = entries.next(); if (entry.getValue().m_count >= m_minWordP && Math.abs(entry.getValue().m_weight) >= m_minAbsCoefficient) { if (printed > 0) { buff.append(" + "); } else { buff.append(" "); } buff.append(Utils.doubleToString(entry.getValue().m_weight, 12, 4) + " " + entry.getKey() + " " + entry.getValue().m_count + "\n"); printed++; } } if (m_bias > 0) { buff.append(" + " + Utils.doubleToString(m_bias, 12, 4)); } else { buff.append(" - " + Utils.doubleToString(-m_bias, 12, 4)); } return buff.toString(); } /** * Get this model's dictionary (including term weights). * * @return this model's dictionary. */ public LinkedHashMap<String, Count> getDictionary() { return m_dictionary; } /** * Return the size of the dictionary (minus any low frequency terms that are * below the threshold but haven't been pruned yet). * * @return the size of the dictionary. */ public int getDictionarySize() { int size = 0; if (m_dictionary != null) { Iterator<Map.Entry<String, Count>> entries = m_dictionary.entrySet() .iterator(); while (entries.hasNext()) { Map.Entry<String, Count> entry = entries.next(); if (entry.getValue().m_count >= m_minWordP && Math.abs(entry.getValue().m_weight) >= m_minAbsCoefficient) { size++; } } } return size; } public double bias() { return m_bias; } public void setBias(double bias) { m_bias = bias; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } protected int m_numModels = 0; /** * Aggregate an object with this one * * @param toAggregate the object to aggregate * @return the result of aggregation * @throws Exception if the supplied object can't be aggregated for some * reason */ @Override public SGDText aggregate(SGDText toAggregate) throws Exception { if (m_dictionary == null) { throw new Exception("No model built yet, can't aggregate"); } LinkedHashMap<String, SGDText.Count> tempDict = toAggregate.getDictionary(); Iterator<Map.Entry<String, SGDText.Count>> entries = tempDict.entrySet() .iterator(); while (entries.hasNext()) { Map.Entry<String, SGDText.Count> entry = entries.next(); Count masterCount = m_dictionary.get(entry.getKey()); if (masterCount == null) { // we havent seen this term (or it's been pruned) masterCount = new Count(entry.getValue().m_count); masterCount.m_weight = entry.getValue().m_weight; m_dictionary.put(entry.getKey(), masterCount); } else { // add up masterCount.m_count += entry.getValue().m_count; masterCount.m_weight += entry.getValue().m_weight; } } m_bias += toAggregate.bias(); m_numModels++; return this; } /** * Call to complete the aggregation process. Allows implementers to do any * final processing based on how many objects were aggregated. * * @throws Exception if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { if (m_numModels == 0) { throw new Exception("Unable to finalize aggregation - " + "haven't seen any models to aggregate"); } pruneDictionary(true); Iterator<Map.Entry<String, SGDText.Count>> entries = m_dictionary .entrySet().iterator(); while (entries.hasNext()) { Map.Entry<String, Count> entry = entries.next(); entry.getValue().m_count /= (m_numModels + 1); // plus one for us entry.getValue().m_weight /= (m_numModels + 1); } m_bias /= (m_numModels + 1); // aggregation complete m_numModels = 0; } @Override public void batchFinished() throws Exception { pruneDictionary(true); } /** * Main method for testing this class. */ public static void main(String[] args) { runClassifier(new SGDText(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/SMO.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SMO.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.io.File; import java.io.FileReader; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.functions.supportVector.SMOset; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; /** * <!-- globalinfo-start --> Implements John Platt's sequential minimal optimization algorithm for training a support vector classifier.<br> * <br> * This implementation globally replaces all missing values and transforms nominal attributes into binary ones. It also normalizes all attributes by default. (In that case the coefficients in the output are based on the normalized data, not * the original data --- this is important for interpreting the classifier.)<br> * <br> * Multi-class problems are solved using pairwise classification (aka 1-vs-1).<br> * <br> * To obtain proper probability estimates, use the option that fits calibration models to the outputs of the support vector machine. In the multi-class case, the predicted probabilities are coupled using Hastie and Tibshirani's pairwise * coupling method.<br> * <br> * Note: for improved speed normalization should be turned off when operating on SparseInstances.<br> * <br> * For more information on the SMO algorithm, see<br> * <br> * J. Platt: Fast Training of Support Vector Machines using Sequential Minimal Optimization. In B. Schoelkopf and C. Burges and A. Smola, editors, Advances in Kernel Methods - Support Vector Learning, 1998.<br> * <br> * S.S. Keerthi, S.K. Shevade, C. Bhattacharyya, K.R.K. Murthy (2001). Improvements to Platt's SMO Algorithm for SVM Classifier Design. Neural Computation. 13(3):637-649.<br> * <br> * Trevor Hastie, Robert Tibshirani: Classification by Pairwise Coupling. In: Advances in Neural Information Processing Systems, 1998. <br> * <br> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> &#64;incollection{Platt1998, author = {J. Platt}, booktitle = {Advances in Kernel Methods - Support Vector Learning}, editor = {B. Schoelkopf and C. Burges and A. Smola}, publisher = {MIT Press}, title = {Fast Training of Support Vector Machines using Sequential Minimal Optimization}, year = {1998}, URL = {http://research.microsoft.com/\~jplatt/smo.html}, PS = {http://research.microsoft.com/\~jplatt/smo-book.ps.gz}, PDF = {http://research.microsoft.com/\~jplatt/smo-book.pdf} } &#64;article{Keerthi2001, author = {S.S. Keerthi and S.K. Shevade and C. Bhattacharyya and K.R.K. Murthy}, journal = {Neural Computation}, number = {3}, pages = {637-649}, title = {Improvements to Platt's SMO Algorithm for SVM Classifier Design}, volume = {13}, year = {2001}, PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/smo_mod_nc.ps.gz} } &#64;inproceedings{Hastie1998, author = {Trevor Hastie and Robert Tibshirani}, booktitle = {Advances in Neural Information Processing Systems}, editor = {Michael I. Jordan and Michael J. Kearns and Sara A. Solla}, publisher = {MIT Press}, title = {Classification by Pairwise Coupling}, volume = {10}, year = {1998}, PS = {http://www-stat.stanford.edu/\~hastie/Papers/2class.ps} } * </pre> * * <br> * <br> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p> * * <pre> * -no-checks Turns off all checks - use with caution! Turning them off assumes that data is purely numeric, doesn't contain any missing values, and has a nominal class. Turning them off also means that no header information will be stored if the machine is linear. Finally, it also assumes that no instance has a weight equal to 0. (default: checks on) * </pre> * * <pre> * -C &lt;double&gt; The complexity constant C. (default 1) * </pre> * * <pre> * -N Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) * </pre> * * <pre> * -L &lt;double&gt; The tolerance parameter. (default 1.0e-3) * </pre> * * <pre> * -P &lt;double&gt; The epsilon for round-off error. (default 1.0e-12) * </pre> * * <pre> * -M Fit calibration models to SVM outputs. * </pre> * * <pre> * -V &lt;double&gt; The number of folds for the internal cross-validation. (default -1, use training data) * </pre> * * <pre> * -W &lt;double&gt; The random number seed. (default 1) * </pre> * * <pre> * -K &lt;classname and parameters&gt; The Kernel to use. (default: weka.classifiers.functions.supportVector.PolyKernel) * </pre> * * <pre> * -calibrator &lt;scheme specification&gt; Full name of calibration model, followed by options. (default: "weka.classifiers.functions.Logistic") * </pre> * * <pre> * -output-debug-info If set, classifier is run in debug mode and may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities If set, classifier capabilities are not checked before classifier is built (use with caution). * </pre> * * <pre> * -num-decimal-places The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> * -E &lt;num&gt; The Exponent to use. (default: 1.0) * </pre> * * <pre> * -L Use lower-order terms. (default: no) * </pre> * * <pre> * -C &lt;num&gt; The size of the cache (a prime number), 0 for full cache and -1 to turn it off. (default: 250007) * </pre> * * <pre> * -output-debug-info Enables debugging output (if available) to be printed. (default: off) * </pre> * * <pre> * -no-checks Turns off all checks - use with caution! (default: checks on) * </pre> * * <pre> * Options specific to calibrator weka.classifiers.functions.Logistic: * </pre> * * <pre> * -C Use conjugate gradient descent rather than BFGS updates. * </pre> * * <pre> * -R &lt;ridge&gt; Set the ridge in the log-likelihood. * </pre> * * <pre> * -M &lt;number&gt; Set the maximum number of iterations (default -1, until convergence). * </pre> * * <pre> * -output-debug-info If set, classifier is run in debug mode and may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities If set, classifier capabilities are not checked before classifier is built (use with caution). * </pre> * * <pre> * -num-decimal-places The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @version $Revision$ */ public class SMO extends AbstractClassifier implements WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6585883636378691736L; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements John Platt's sequential minimal optimization " + "algorithm for training a support vector classifier.\n\n" + "This implementation globally replaces all missing values and " + "transforms nominal attributes into binary ones. It also " + "normalizes all attributes by default. (In that case the coefficients " + "in the output are based on the normalized data, not the " + "original data --- this is important for interpreting the classifier.)\n\n" + "Multi-class problems are solved using pairwise classification (aka 1-vs-1).\n\n" + "To obtain proper probability estimates, use the option that fits " + "calibration models to the outputs of the support vector " + "machine. In the multi-class case, the predicted probabilities " + "are coupled using Hastie and Tibshirani's pairwise coupling " + "method.\n\n" + "Note: for improved speed normalization should be turned off when " + "operating on SparseInstances.\n\n" + "For more information on the SMO algorithm, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INCOLLECTION); result.setValue(Field.AUTHOR, "J. Platt"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Fast Training of Support Vector Machines using Sequential Minimal Optimization"); result.setValue(Field.BOOKTITLE, "Advances in Kernel Methods - Support Vector Learning"); result.setValue(Field.EDITOR, "B. Schoelkopf and C. Burges and A. Smola"); result.setValue(Field.PUBLISHER, "MIT Press"); result.setValue(Field.URL, "http://research.microsoft.com/~jplatt/smo.html"); result.setValue(Field.PDF, "http://research.microsoft.com/~jplatt/smo-book.pdf"); result.setValue(Field.PS, "http://research.microsoft.com/~jplatt/smo-book.ps.gz"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "S.S. Keerthi and S.K. Shevade and C. Bhattacharyya and K.R.K. Murthy"); additional.setValue(Field.YEAR, "2001"); additional.setValue(Field.TITLE, "Improvements to Platt's SMO Algorithm for SVM Classifier Design"); additional.setValue(Field.JOURNAL, "Neural Computation"); additional.setValue(Field.VOLUME, "13"); additional.setValue(Field.NUMBER, "3"); additional.setValue(Field.PAGES, "637-649"); additional.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/smo_mod_nc.ps.gz"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Trevor Hastie and Robert Tibshirani"); additional.setValue(Field.YEAR, "1998"); additional.setValue(Field.TITLE, "Classification by Pairwise Coupling"); additional.setValue(Field.BOOKTITLE, "Advances in Neural Information Processing Systems"); additional.setValue(Field.VOLUME, "10"); additional.setValue(Field.PUBLISHER, "MIT Press"); additional.setValue(Field.EDITOR, "Michael I. Jordan and Michael J. Kearns and Sara A. Solla"); additional.setValue(Field.PS, "http://www-stat.stanford.edu/~hastie/Papers/2class.ps"); return result; } /** * Class for building a binary support vector machine. */ public class BinarySMO implements Serializable { /** for serialization */ static final long serialVersionUID = -8246163625699362456L; /** The Lagrange multipliers. */ protected double[] m_alpha; /** The thresholds. */ protected double m_b, m_bLow, m_bUp; /** The indices for m_bLow and m_bUp */ protected int m_iLow, m_iUp; /** The training data. */ protected Instances m_data; /** Weight vector for linear machine. */ protected double[] m_weights; /** Variables to hold weight vector in sparse form. (To reduce storage requirements.) */ protected double[] m_sparseWeights; protected int[] m_sparseIndices; /** Kernel to use **/ protected Kernel m_kernel; /** The transformed class values. */ protected double[] m_class; /** The current set of errors for all non-bound examples. */ protected double[] m_errors; /* The five different sets used by the algorithm. */ /** {i: 0 < m_alpha[i] < C} */ protected SMOset m_I0; /** {i: m_class[i] = 1, m_alpha[i] = 0} */ protected SMOset m_I1; /** {i: m_class[i] = -1, m_alpha[i] =C} */ protected SMOset m_I2; /** {i: m_class[i] = 1, m_alpha[i] = C} */ protected SMOset m_I3; /** {i: m_class[i] = -1, m_alpha[i] = 0} */ protected SMOset m_I4; /** The set of support vectors */ protected SMOset m_supportVectors; // {i: 0 < m_alpha[i]} /** Stores calibrator model for probability estimate */ protected Classifier m_calibrator = null; /** Reference to the header information for the calibration data */ protected Instances m_calibrationDataHeader = null; /** Stores the weight of the training instances */ protected double m_sumOfWeights = 0; /** number of kernel evaluations, used for printing statistics only **/ protected long m_nEvals = -1; /** number of kernel cache hits, used for printing statistics only **/ protected int m_nCacheHits = -1; /** * Fits calibrator model to SVM's output, so that reasonable probability estimates can be produced. If numFolds > 0, cross-validation is used to generate the training data for the calibrator. * * @param insts * the set of training instances * @param cl1 * the first class' index * @param cl2 * the second class' index * @param numFolds * the number of folds for cross-validation * @param random * for randomizing the data * @throws Exception * if the sigmoid can't be fit successfully */ protected void fitCalibrator(Instances insts, final int cl1, final int cl2, int numFolds, final Random random) throws Exception { // Create header of instances object ArrayList<Attribute> atts = new ArrayList<>(2); atts.add(new Attribute("pred")); ArrayList<String> attVals = new ArrayList<>(2); attVals.add(insts.classAttribute().value(cl1)); attVals.add(insts.classAttribute().value(cl2)); atts.add(new Attribute("class", attVals)); Instances data = new Instances("data", atts, insts.numInstances()); data.setClassIndex(1); this.m_calibrationDataHeader = data; // Collect data for fitting the calibration model if (numFolds <= 0) { // Use training data for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); double[] vals = new double[2]; vals[0] = this.SVMOutput(-1, inst); if (inst.classValue() == cl2) { vals[1] = 1; } data.add(new DenseInstance(inst.weight(), vals)); } } else { // Check whether number of folds too large if (numFolds > insts.numInstances()) { numFolds = insts.numInstances(); } // Make copy of instances because we will shuffle them around insts = new Instances(insts); // Perform three-fold cross-validation to collect // unbiased predictions insts.randomize(random); insts.stratify(numFolds); for (int i = 0; i < numFolds; i++) { Instances train = insts.trainCV(numFolds, i, random); /* * SerializedObject so = new SerializedObject(this); BinarySMO smo = (BinarySMO)so.getObject(); */ BinarySMO smo = new BinarySMO(); smo.setKernel(Kernel.makeCopy(SMO.this.m_kernel)); smo.buildClassifier(train, cl1, cl2, false, -1, -1); Instances test = insts.testCV(numFolds, i); for (int j = 0; j < test.numInstances(); j++) { double[] vals = new double[2]; vals[0] = smo.SVMOutput(-1, test.instance(j)); if (test.instance(j).classValue() == cl2) { vals[1] = 1; } data.add(new DenseInstance(test.instance(j).weight(), vals)); } } } // Build calibration model this.m_calibrator = AbstractClassifier.makeCopy(SMO.this.getCalibrator()); this.m_calibrator.buildClassifier(data); } /** * sets the kernel to use * * @param value * the kernel to use */ public void setKernel(final Kernel value) { this.m_kernel = value; } /** * Returns the kernel to use * * @return the current kernel */ public Kernel getKernel() { return this.m_kernel; } /** * Method for building the binary classifier. * * @param insts * the set of training instances * @param cl1 * the first class' index * @param cl2 * the second class' index * @param fitCalibrator * true if calibrator model is to be fit * @param numFolds * number of folds for internal cross-validation * @param randomSeed * random number generator for cross-validation * @throws Exception * if the classifier can't be built successfully */ protected void buildClassifier(final Instances insts, final int cl1, final int cl2, final boolean fitCalibrator, final int numFolds, final int randomSeed) throws Exception { // Initialize some variables this.m_bUp = -1; this.m_bLow = 1; this.m_b = 0; this.m_alpha = null; this.m_data = null; this.m_weights = null; this.m_errors = null; this.m_calibrator = null; this.m_I0 = null; this.m_I1 = null; this.m_I2 = null; this.m_I3 = null; this.m_I4 = null; this.m_sparseWeights = null; this.m_sparseIndices = null; // Store the sum of weights this.m_sumOfWeights = insts.sumOfWeights(); // Set class values this.m_class = new double[insts.numInstances()]; this.m_iUp = -1; this.m_iLow = -1; for (int i = 0; i < this.m_class.length; i++) { if ((int) insts.instance(i).classValue() == cl1) { this.m_class[i] = -1; this.m_iLow = i; } else if ((int) insts.instance(i).classValue() == cl2) { this.m_class[i] = 1; this.m_iUp = i; } else { throw new Exception("This should never happen!"); } } // Check whether one or both classes are missing if ((this.m_iUp == -1) || (this.m_iLow == -1)) { if (this.m_iUp != -1) { this.m_b = -1; } else if (this.m_iLow != -1) { this.m_b = 1; } else { this.m_class = null; return; } if (SMO.this.m_KernelIsLinear) { this.m_sparseWeights = new double[0]; this.m_sparseIndices = new int[0]; this.m_class = null; } else { this.m_supportVectors = new SMOset(0); this.m_alpha = new double[0]; this.m_class = new double[0]; } // Fit sigmoid if requested if (fitCalibrator) { this.fitCalibrator(insts, cl1, cl2, numFolds, new Random(randomSeed)); } return; } // Set the reference to the data this.m_data = insts; // If machine is linear, reserve space for weights if (SMO.this.m_KernelIsLinear) { this.m_weights = new double[this.m_data.numAttributes()]; } else { this.m_weights = null; } // Initialize alpha array to zero this.m_alpha = new double[this.m_data.numInstances()]; // Initialize sets this.m_supportVectors = new SMOset(this.m_data.numInstances()); this.m_I0 = new SMOset(this.m_data.numInstances()); this.m_I1 = new SMOset(this.m_data.numInstances()); this.m_I2 = new SMOset(this.m_data.numInstances()); this.m_I3 = new SMOset(this.m_data.numInstances()); this.m_I4 = new SMOset(this.m_data.numInstances()); // Clean out some instance variables this.m_sparseWeights = null; this.m_sparseIndices = null; // init kernel this.m_kernel.buildKernel(this.m_data); // Initialize error cache this.m_errors = new double[this.m_data.numInstances()]; this.m_errors[this.m_iLow] = 1; this.m_errors[this.m_iUp] = -1; // Build up I1 and I4 for (int i = 0; i < this.m_class.length; i++) { if (this.m_class[i] == 1) { this.m_I1.insert(i); } else { this.m_I4.insert(i); } } // Loop to find all the support vectors int numChanged = 0; boolean examineAll = true; while ((numChanged > 0) || examineAll) { numChanged = 0; if (examineAll) { for (int i = 0; i < this.m_alpha.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.examineExample(i)) { numChanged++; } } } else { // This code implements Modification 1 from Keerthi et al.'s paper for (int i = 0; i < this.m_alpha.length; i++) { if ((this.m_alpha[i] > 0) && (this.m_alpha[i] < SMO.this.m_C * this.m_data.instance(i).weight())) { if (this.examineExample(i)) { numChanged++; } // Is optimality on unbound vectors obtained? if (this.m_bUp > this.m_bLow - 2 * SMO.this.m_tol) { numChanged = 0; break; } } } // This is the code for Modification 2 from Keerthi et al.'s paper /* * boolean innerLoopSuccess = true; numChanged = 0; while ((m_bUp < m_bLow - 2 * m_tol) && * (innerLoopSuccess == true)) { innerLoopSuccess = takeStep(m_iUp, m_iLow, m_errors[m_iLow]); } */ } if (examineAll) { examineAll = false; } else if (numChanged == 0) { examineAll = true; } } // Set threshold this.m_b = (this.m_bLow + this.m_bUp) / 2.0; // Save some stats this.m_nEvals = this.m_kernel.numEvals(); this.m_nCacheHits = this.m_kernel.numCacheHits(); // Save memory if (SMO.this.m_KernelIsLinear) { this.m_kernel = null; } else { this.m_kernel.clean(); } this.m_errors = null; this.m_I0 = this.m_I1 = this.m_I2 = this.m_I3 = this.m_I4 = null; // If machine is linear, delete training data // and store weight vector in sparse format if (SMO.this.m_KernelIsLinear) { // We don't need to store the set of support vectors this.m_supportVectors = null; // We don't need to store the class values either this.m_class = null; // Clean out training data if (!SMO.this.m_checksTurnedOff) { this.m_data = new Instances(this.m_data, 0); } else { this.m_data = null; } // Convert weight vector double[] sparseWeights = new double[this.m_weights.length]; int[] sparseIndices = new int[this.m_weights.length]; int counter = 0; for (int i = 0; i < this.m_weights.length; i++) { if (this.m_weights[i] != 0.0) { sparseWeights[counter] = this.m_weights[i]; sparseIndices[counter] = i; counter++; } } this.m_sparseWeights = new double[counter]; this.m_sparseIndices = new int[counter]; System.arraycopy(sparseWeights, 0, this.m_sparseWeights, 0, counter); System.arraycopy(sparseIndices, 0, this.m_sparseIndices, 0, counter); // Clean out weight vector this.m_weights = null; // We don't need the alphas in the linear case this.m_alpha = null; } // Fit sigmoid if requested if (fitCalibrator) { this.fitCalibrator(insts, cl1, cl2, numFolds, new Random(randomSeed)); } } /** * Computes SVM output for given instance. * * @param index * the instance for which output is to be computed * @param inst * the instance * @return the output of the SVM for the given instance * @throws Exception * in case of an error */ public double SVMOutput(final int index, final Instance inst) throws Exception { double result = 0; // Is the machine linear? if (SMO.this.m_KernelIsLinear) { // Is weight vector stored in sparse format? if (this.m_sparseWeights == null) { int n1 = inst.numValues(); for (int p = 0; p < n1; p++) { if (inst.index(p) != SMO.this.m_classIndex) { result += this.m_weights[inst.index(p)] * inst.valueSparse(p); } } } else { int n1 = inst.numValues(); int n2 = this.m_sparseWeights.length; for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { int ind1 = inst.index(p1); int ind2 = this.m_sparseIndices[p2]; if (ind1 == ind2) { if (ind1 != SMO.this.m_classIndex) { result += inst.valueSparse(p1) * this.m_sparseWeights[p2]; } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } } } else { for (int i = this.m_supportVectors.getNext(-1); i != -1; i = this.m_supportVectors.getNext(i)) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } result += this.m_class[i] * this.m_alpha[i] * this.m_kernel.eval(index, i, inst); } } result -= this.m_b; return result; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); int printed = 0; if ((this.m_alpha == null) && (this.m_sparseWeights == null)) { return "BinarySMO: No model built yet.\n"; } try { text.append("BinarySMO\n\n"); // If machine linear, print weight vector if (SMO.this.m_KernelIsLinear) { text.append("Machine linear: showing attribute weights, "); text.append("not support vectors.\n\n"); // We can assume that the weight vector is stored in sparse // format because the classifier has been built for (int i = 0; i < this.m_sparseWeights.length; i++) { if (this.m_sparseIndices[i] != SMO.this.m_classIndex) { if (printed > 0) { text.append(" + "); } else { text.append(" "); } text.append(Utils.doubleToString(this.m_sparseWeights[i], 12, 4) + " * "); if (SMO.this.m_filterType == FILTER_STANDARDIZE) { text.append("(standardized) "); } else if (SMO.this.m_filterType == FILTER_NORMALIZE) { text.append("(normalized) "); } if (!SMO.this.m_checksTurnedOff) { text.append(this.m_data.attribute(this.m_sparseIndices[i]).name() + "\n"); } else { text.append("attribute with index " + this.m_sparseIndices[i] + "\n"); } printed++; } } } else { for (int i = 0; i < this.m_alpha.length; i++) { if (this.m_supportVectors.contains(i)) { double val = this.m_alpha[i]; if (this.m_class[i] == 1) { if (printed > 0) { text.append(" + "); } } else { text.append(" - "); } text.append(Utils.doubleToString(val, 12, 4) + " * <"); for (int j = 0; j < this.m_data.numAttributes(); j++) { if (j != this.m_data.classIndex()) { text.append(this.m_data.instance(i).toString(j)); } if (j != this.m_data.numAttributes() - 1) { text.append(" "); } } text.append("> * X]\n"); printed++; } } } if (this.m_b > 0) { text.append(" - " + Utils.doubleToString(this.m_b, 12, 4)); } else { text.append(" + " + Utils.doubleToString(-this.m_b, 12, 4)); } if (!SMO.this.m_KernelIsLinear) { text.append("\n\nNumber of support vectors: " + this.m_supportVectors.numElements()); } long numEval = this.m_nEvals; int numCacheHits = this.m_nCacheHits; text.append("\n\nNumber of kernel evaluations: " + numEval); if (numCacheHits >= 0 && numEval > 0) { double hitRatio = 1 - numEval * 1.0 / (numCacheHits + numEval); text.append(" (" + Utils.doubleToString(hitRatio * 100, 7, 3).trim() + "% cached)"); } } catch (Exception e) { e.printStackTrace(); return "Can't print BinarySMO classifier."; } return text.toString(); } /** * Examines instance. * * @param i2 * index of instance to examine * @return true if examination was successfull * @throws Exception * if something goes wrong */ protected boolean examineExample(final int i2) throws Exception { double y2, F2; int i1 = -1; y2 = this.m_class[i2]; if (this.m_I0.contains(i2)) { F2 = this.m_errors[i2]; } else { F2 = this.SVMOutput(i2, this.m_data.instance(i2)) + this.m_b - y2; this.m_errors[i2] = F2; // Update thresholds if ((this.m_I1.contains(i2) || this.m_I2.contains(i2)) && (F2 < this.m_bUp)) { this.m_bUp = F2; this.m_iUp = i2; } else if ((this.m_I3.contains(i2) || this.m_I4.contains(i2)) && (F2 > this.m_bLow)) { this.m_bLow = F2; this.m_iLow = i2; } } // Check optimality using current bLow and bUp and, if // violated, find an index i1 to do joint optimization // with i2... boolean optimal = true; if (this.m_I0.contains(i2) || this.m_I1.contains(i2) || this.m_I2.contains(i2)) { if (this.m_bLow - F2 > 2 * SMO.this.m_tol) { optimal = false; i1 = this.m_iLow; } } if (this.m_I0.contains(i2) || this.m_I3.contains(i2) || this.m_I4.contains(i2)) { if (F2 - this.m_bUp > 2 * SMO.this.m_tol) { optimal = false; i1 = this.m_iUp; } } if (optimal) { return false; } // For i2 unbound choose the better i1... if (this.m_I0.contains(i2)) { if (this.m_bLow - F2 > F2 - this.m_bUp) { i1 = this.m_iLow; } else { i1 = this.m_iUp; } } if (i1 == -1) { throw new Exception("This should never happen!"); } return this.takeStep(i1, i2, F2); } /** * Method solving for the Lagrange multipliers for two instances. * * @param i1 * index of the first instance * @param i2 * index of the second instance * @param F2 * @return true if multipliers could be found * @throws Exception * if something goes wrong */ protected boolean takeStep(final int i1, final int i2, final double F2) throws Exception { double alph1, alph2, y1, y2, F1, s, L, H, k11, k12, k22, eta, a1, a2, f1, f2, v1, v2, Lobj, Hobj; double C1 = SMO.this.m_C * this.m_data.instance(i1).weight(); double C2 = SMO.this.m_C * this.m_data.instance(i2).weight(); // Don't do anything if the two instances are the same if (i1 == i2) { return false; } // Initialize variables alph1 = this.m_alpha[i1]; alph2 = this.m_alpha[i2]; y1 = this.m_class[i1]; y2 = this.m_class[i2]; F1 = this.m_errors[i1]; s = y1 * y2; // Find the constraints on a2 if (y1 != y2) { L = Math.max(0, alph2 - alph1); H = Math.min(C2, C1 + alph2 - alph1); } else { L = Math.max(0, alph1 + alph2 - C1); H = Math.min(C2, alph1 + alph2); } if (L >= H) { return false; } // Compute second derivative of objective function k11 = this.m_kernel.eval(i1, i1, this.m_data.instance(i1)); k12 = this.m_kernel.eval(i1, i2, this.m_data.instance(i1)); k22 = this.m_kernel.eval(i2, i2, this.m_data.instance(i2)); eta = 2 * k12 - k11 - k22; // Check if second derivative is negative if (eta < 0) { // Compute unconstrained maximum a2 = alph2 - y2 * (F1 - F2) / eta; // Compute constrained maximum if (a2 < L) { a2 = L; } else if (a2 > H) { a2 = H; } } else { // Look at endpoints of diagonal f1 = this.SVMOutput(i1, this.m_data.instance(i1)); f2 = this.SVMOutput(i2, this.m_data.instance(i2)); v1 = f1 + this.m_b - y1 * alph1 * k11 - y2 * alph2 * k12; v2 = f2 + this.m_b - y1 * alph1 * k12 - y2 * alph2 * k22; double gamma = alph1 + s * alph2; Lobj = (gamma - s * L) + L - 0.5 * k11 * (gamma - s * L) * (gamma - s * L) - 0.5 * k22 * L * L - s * k12 * (gamma - s * L) * L - y1 * (gamma - s * L) * v1 - y2 * L * v2; Hobj = (gamma - s * H) + H - 0.5 * k11 * (gamma - s * H) * (gamma - s * H) - 0.5 * k22 * H * H - s * k12 * (gamma - s * H) * H - y1 * (gamma - s * H) * v1 - y2 * H * v2; if (Lobj > Hobj + SMO.this.m_eps) { a2 = L; } else if (Lobj < Hobj - SMO.this.m_eps) { a2 = H; } else { a2 = alph2; } } if (Math.abs(a2 - alph2) < SMO.this.m_eps * (a2 + alph2 + SMO.this.m_eps)) { return false; } // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } // Recompute a1 a1 = alph1 + s * (alph2 - a2); // To prevent precision problems if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // Update sets if (a1 > 0) { this.m_supportVectors.insert(i1); } else { this.m_supportVectors.delete(i1); } if ((a1 > 0) && (a1 < C1)) { this.m_I0.insert(i1); } else { this.m_I0.delete(i1); } if ((y1 == 1) && (a1 == 0)) { this.m_I1.insert(i1); } else { this.m_I1.delete(i1); } if ((y1 == -1) && (a1 == C1)) { this.m_I2.insert(i1); } else { this.m_I2.delete(i1); } if ((y1 == 1) && (a1 == C1)) { this.m_I3.insert(i1); } else { this.m_I3.delete(i1); } if ((y1 == -1) && (a1 == 0)) { this.m_I4.insert(i1); } else { this.m_I4.delete(i1); } if (a2 > 0) { this.m_supportVectors.insert(i2); } else { this.m_supportVectors.delete(i2); } if ((a2 > 0) && (a2 < C2)) { this.m_I0.insert(i2); } else { this.m_I0.delete(i2); } if ((y2 == 1) && (a2 == 0)) { this.m_I1.insert(i2); } else { this.m_I1.delete(i2); } if ((y2 == -1) && (a2 == C2)) { this.m_I2.insert(i2); } else { this.m_I2.delete(i2); } if ((y2 == 1) && (a2 == C2)) { this.m_I3.insert(i2); } else { this.m_I3.delete(i2); } if ((y2 == -1) && (a2 == 0)) { this.m_I4.insert(i2); } else { this.m_I4.delete(i2); } // Update weight vector to reflect change a1 and a2, if linear SVM if (SMO.this.m_KernelIsLinear) { Instance inst1 = this.m_data.instance(i1); for (int p1 = 0; p1 < inst1.numValues(); p1++) { if (inst1.index(p1) != this.m_data.classIndex()) { this.m_weights[inst1.index(p1)] += y1 * (a1 - alph1) * inst1.valueSparse(p1); } } Instance inst2 = this.m_data.instance(i2); for (int p2 = 0; p2 < inst2.numValues(); p2++) { if (inst2.index(p2) != this.m_data.classIndex()) { this.m_weights[inst2.index(p2)] += y2 * (a2 - alph2) * inst2.valueSparse(p2); } } } // Update error cache using new Lagrange multipliers for (int j = this.m_I0.getNext(-1); j != -1; j = this.m_I0.getNext(j)) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if ((j != i1) && (j != i2)) { this.m_errors[j] += y1 * (a1 - alph1) * this.m_kernel.eval(i1, j, this.m_data.instance(i1)) + y2 * (a2 - alph2) * this.m_kernel.eval(i2, j, this.m_data.instance(i2)); } } // Update error cache for i1 and i2 this.m_errors[i1] += y1 * (a1 - alph1) * k11 + y2 * (a2 - alph2) * k12; this.m_errors[i2] += y1 * (a1 - alph1) * k12 + y2 * (a2 - alph2) * k22; // Update array with Lagrange multipliers this.m_alpha[i1] = a1; this.m_alpha[i2] = a2; // Update thresholds this.m_bLow = -Double.MAX_VALUE; this.m_bUp = Double.MAX_VALUE; this.m_iLow = -1; this.m_iUp = -1; for (int j = this.m_I0.getNext(-1); j != -1; j = this.m_I0.getNext(j)) { if (this.m_errors[j] < this.m_bUp) { this.m_bUp = this.m_errors[j]; this.m_iUp = j; } if (this.m_errors[j] > this.m_bLow) { this.m_bLow = this.m_errors[j]; this.m_iLow = j; } } if (!this.m_I0.contains(i1)) { if (this.m_I3.contains(i1) || this.m_I4.contains(i1)) { if (this.m_errors[i1] > this.m_bLow) { this.m_bLow = this.m_errors[i1]; this.m_iLow = i1; } } else { if (this.m_errors[i1] < this.m_bUp) { this.m_bUp = this.m_errors[i1]; this.m_iUp = i1; } } } if (!this.m_I0.contains(i2)) { if (this.m_I3.contains(i2) || this.m_I4.contains(i2)) { if (this.m_errors[i2] > this.m_bLow) { this.m_bLow = this.m_errors[i2]; this.m_iLow = i2; } } else { if (this.m_errors[i2] < this.m_bUp) { this.m_bUp = this.m_errors[i2]; this.m_iUp = i2; } } } if ((this.m_iLow == -1) || (this.m_iUp == -1)) { throw new Exception("This should never happen!"); } // Made some progress. return true; } /** * Quick and dirty check whether the quadratic programming problem is solved. * * @throws Exception * if checking fails */ protected void checkClassifier() throws Exception { double sum = 0; for (int i = 0; i < this.m_alpha.length; i++) { if (this.m_alpha[i] > 0) { sum += this.m_class[i] * this.m_alpha[i]; } } System.err.println("Sum of y(i) * alpha(i): " + sum); for (int i = 0; i < this.m_alpha.length; i++) { double output = this.SVMOutput(i, this.m_data.instance(i)); if (Utils.eq(this.m_alpha[i], 0)) { if (Utils.sm(this.m_class[i] * output, 1)) { System.err.println("KKT condition 1 violated: " + this.m_class[i] * output); } } if (Utils.gr(this.m_alpha[i], 0) && Utils.sm(this.m_alpha[i], SMO.this.m_C * this.m_data.instance(i).weight())) { if (!Utils.eq(this.m_class[i] * output, 1)) { System.err.println("KKT condition 2 violated: " + this.m_class[i] * output); } } if (Utils.eq(this.m_alpha[i], SMO.this.m_C * this.m_data.instance(i).weight())) { if (Utils.gr(this.m_class[i] * output, 1)) { System.err.println("KKT condition 3 violated: " + this.m_class[i] * output); } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** filter: Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** filter: Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** filter: No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag[] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The binary classifier(s) */ protected BinarySMO[][] m_classifiers = null; /** The complexity parameter. */ protected double m_C = 1.0; /** Epsilon for rounding. */ protected double m_eps = 1.0e-12; /** Tolerance for accuracy of result. */ protected double m_tol = 1.0e-3; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** The class index from the training data */ protected int m_classIndex = -1; /** The class attribute */ protected Attribute m_classAttribute; /** whether the kernel is a linear one */ protected boolean m_KernelIsLinear = false; /** * Turn off all checks and conversions? Turning them off assumes that data is purely numeric, doesn't contain any missing values, and has a nominal class. Turning them off also means that no header information will be stored if the * machine is linear. Finally, it also assumes that no instance has a weight equal to 0. */ protected boolean m_checksTurnedOff; /** Precision constant for updating sets */ protected static double m_Del = 1000 * Double.MIN_VALUE; /** Whether calibrator models are to be fit */ protected boolean m_fitCalibratorModels = false; /** Determines the calibrator model to use for probability estimate */ protected Classifier m_calibrator = new Logistic(); /** The number of folds for the internal cross-validation */ protected int m_numFolds = -1; /** The random number seed */ protected int m_randomSeed = 1; /** the kernel to use */ protected Kernel m_kernel = new PolyKernel(); /** * Turns off checks for missing values, etc. Use with caution. */ public void turnChecksOff() { this.m_checksTurnedOff = true; } /** * Turns on checks for missing values, etc. */ public void turnChecksOn() { this.m_checksTurnedOff = false; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = this.getKernel().getCapabilities(); result.setOwner(this); // attribute result.enableAllAttributeDependencies(); // with NominalToBinary we can also handle nominal attributes, but only // if the kernel can handle numeric attributes if (result.handles(Capability.NUMERIC_ATTRIBUTES)) { result.enable(Capability.NOMINAL_ATTRIBUTES); } result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.disable(Capability.NO_CLASS); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method for building the classifier. Implements a one-against-one wrapper for multi-class problems. * * @param insts * the set of training instances * @throws Exception * if the classifier can't be built successfully */ @Override public void buildClassifier(Instances insts) throws Exception { if (!this.m_checksTurnedOff) { // can classifier handle the data? this.getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); /* * Removes all the instances with weight equal to 0. MUST be done since condition (8) of Keerthi's * paper is made with the assertion Ci > 0 (See equation (3a). */ Instances data = new Instances(insts, insts.numInstances()); for (int i = 0; i < insts.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (insts.instance(i).weight() > 0) { data.add(insts.instance(i)); } } if (data.numInstances() == 0) { throw new Exception("No training instances left after removing " + "instances with weight 0!"); } insts = data; } if (!this.m_checksTurnedOff) { this.m_Missing = new ReplaceMissingValues(); this.m_Missing.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_Missing); } else { this.m_Missing = null; } if (this.getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { boolean onlyNumeric = true; if (!this.m_checksTurnedOff) { for (int i = 0; i < insts.numAttributes(); i++) { if (i != insts.classIndex()) { if (!insts.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } } if (!onlyNumeric) { this.m_NominalToBinary = new NominalToBinary(); this.m_NominalToBinary.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_NominalToBinary); } else { this.m_NominalToBinary = null; } } else { this.m_NominalToBinary = null; } if (this.m_filterType == FILTER_STANDARDIZE) { this.m_Filter = new Standardize(); this.m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_Filter); } else if (this.m_filterType == FILTER_NORMALIZE) { this.m_Filter = new Normalize(); this.m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_Filter); } else { this.m_Filter = null; } this.m_classIndex = insts.classIndex(); this.m_classAttribute = insts.classAttribute(); this.m_KernelIsLinear = (this.m_kernel instanceof PolyKernel) && (((PolyKernel) this.m_kernel).getExponent() == 1.0); // Generate subsets representing each class Instances[] subsets = new Instances[insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { subsets[i] = new Instances(insts, insts.numInstances()); } for (int j = 0; j < insts.numInstances(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = insts.instance(j); subsets[(int) inst.classValue()].add(inst); } for (int i = 0; i < insts.numClasses(); i++) { subsets[i].compactify(); } // Build the binary classifiers Random rand = new Random(this.m_randomSeed); this.m_classifiers = new BinarySMO[insts.numClasses()][insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { for (int j = i + 1; j < insts.numClasses(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_classifiers[i][j] = new BinarySMO(); this.m_classifiers[i][j].setKernel(Kernel.makeCopy(this.getKernel())); Instances data = new Instances(insts, insts.numInstances()); for (int k = 0; k < subsets[i].numInstances(); k++) { data.add(subsets[i].instance(k)); } for (int k = 0; k < subsets[j].numInstances(); k++) { data.add(subsets[j].instance(k)); } data.compactify(); data.randomize(rand); this.m_classifiers[i][j].buildClassifier(data, i, j, this.m_fitCalibratorModels, this.m_numFolds, this.m_randomSeed); } } } /** * Estimates class probabilities for given instance. * * @param inst * the instance to compute the probabilities for * @throws Exception * in case of an error */ @Override public double[] distributionForInstance(Instance inst) throws Exception { // Filter instance if (!this.m_checksTurnedOff) { this.m_Missing.input(inst); this.m_Missing.batchFinished(); inst = this.m_Missing.output(); } if (this.m_NominalToBinary != null) { this.m_NominalToBinary.input(inst); this.m_NominalToBinary.batchFinished(); inst = this.m_NominalToBinary.output(); } if (this.m_Filter != null) { this.m_Filter.input(inst); this.m_Filter.batchFinished(); inst = this.m_Filter.output(); } if (!this.m_fitCalibratorModels) { double[] result = new double[inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } for (int j = i + 1; j < inst.numClasses(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if ((this.m_classifiers[i][j].m_alpha != null) || (this.m_classifiers[i][j].m_sparseWeights != null)) { double output = this.m_classifiers[i][j].SVMOutput(-1, inst); if (output > 0) { result[j] += 1; } else { result[i] += 1; } } } } Utils.normalize(result); return result; } else { // We only need to do pairwise coupling if there are more // then two classes. if (inst.numClasses() == 2) { double[] newInst = new double[2]; newInst[0] = this.m_classifiers[0][1].SVMOutput(-1, inst); newInst[1] = Utils.missingValue(); DenseInstance d = new DenseInstance(1, newInst); d.setDataset(this.m_classifiers[0][1].m_calibrationDataHeader); return this.m_classifiers[0][1].m_calibrator.distributionForInstance(d); } double[][] r = new double[inst.numClasses()][inst.numClasses()]; double[][] n = new double[inst.numClasses()][inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } for (int j = i + 1; j < inst.numClasses(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if ((this.m_classifiers[i][j].m_alpha != null) || (this.m_classifiers[i][j].m_sparseWeights != null)) { double[] newInst = new double[2]; newInst[0] = this.m_classifiers[i][j].SVMOutput(-1, inst); newInst[1] = Utils.missingValue(); DenseInstance d = new DenseInstance(1, newInst); d.setDataset(this.m_classifiers[i][j].m_calibrationDataHeader); r[i][j] = this.m_classifiers[i][j].m_calibrator.distributionForInstance(d)[0]; n[i][j] = this.m_classifiers[i][j].m_sumOfWeights; } } } return weka.classifiers.meta.MultiClassClassifier.pairwiseCoupling(n, r); } } /** * Returns an array of votes for the given instance. * * @param inst * the instance * @return array of votex * @throws Exception * if something goes wrong */ public int[] obtainVotes(Instance inst) throws Exception { // Filter instance if (!this.m_checksTurnedOff) { this.m_Missing.input(inst); this.m_Missing.batchFinished(); inst = this.m_Missing.output(); } if (this.m_NominalToBinary != null) { this.m_NominalToBinary.input(inst); this.m_NominalToBinary.batchFinished(); inst = this.m_NominalToBinary.output(); } if (this.m_Filter != null) { this.m_Filter.input(inst); this.m_Filter.batchFinished(); inst = this.m_Filter.output(); } int[] votes = new int[inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { for (int j = i + 1; j < inst.numClasses(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double output = this.m_classifiers[i][j].SVMOutput(-1, inst); if (output > 0) { votes[j] += 1; } else { votes[i] += 1; } } } return votes; } /** * Returns the weights in sparse format. */ public double[][][] sparseWeights() { int numValues = this.m_classAttribute.numValues(); double[][][] sparseWeights = new double[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { sparseWeights[i][j] = this.m_classifiers[i][j].m_sparseWeights; } } return sparseWeights; } /** * Returns the indices in sparse format. */ public int[][][] sparseIndices() { int numValues = this.m_classAttribute.numValues(); int[][][] sparseIndices = new int[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { sparseIndices[i][j] = this.m_classifiers[i][j].m_sparseIndices; } } return sparseIndices; } /** * Returns the bias of each binary SMO. */ public double[][] bias() { int numValues = this.m_classAttribute.numValues(); double[][] bias = new double[numValues][numValues]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { bias[i][j] = this.m_classifiers[i][j].m_b; } } return bias; } /* * Returns the number of values of the class attribute. */ public int numClassAttributeValues() { return this.m_classAttribute.numValues(); } /* * Returns the names of the class attributes. */ public String[] classAttributeNames() { int numValues = this.m_classAttribute.numValues(); String[] classAttributeNames = new String[numValues]; for (int i = 0; i < numValues; i++) { classAttributeNames[i] = this.m_classAttribute.value(i); } return classAttributeNames; } /** * Returns the attribute names. */ public String[][][] attributeNames() { int numValues = this.m_classAttribute.numValues(); String[][][] attributeNames = new String[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { // int numAttributes = m_classifiers[i][j].m_data.numAttributes(); int numAttributes = this.m_classifiers[i][j].m_sparseIndices.length; String[] attrNames = new String[numAttributes]; for (int k = 0; k < numAttributes; k++) { attrNames[k] = this.m_classifiers[i][j].m_data.attribute(this.m_classifiers[i][j].m_sparseIndices[k]).name(); } attributeNames[i][j] = attrNames; } } return attributeNames; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<>(); result.addElement(new Option( "\tTurns off all checks - use with caution!\n" + "\tTurning them off assumes that data is purely numeric, doesn't\n" + "\tcontain any missing values, and has a nominal class. Turning them\n" + "\toff also means that no header information will be stored if the\n" + "\tmachine is linear. Finally, it also assumes that no instance has\n" + "\ta weight equal to 0.\n" + "\t(default: checks on)", "no-checks", 0, "-no-checks")); result.addElement(new Option("\tThe complexity constant C. (default 1)", "C", 1, "-C <double>")); result.addElement(new Option("\tWhether to 0=normalize/1=standardize/2=neither. " + "(default 0=normalize)", "N", 1, "-N")); result.addElement(new Option("\tThe tolerance parameter. " + "(default 1.0e-3)", "L", 1, "-L <double>")); result.addElement(new Option("\tThe epsilon for round-off error. " + "(default 1.0e-12)", "P", 1, "-P <double>")); result.addElement(new Option("\tFit calibration models to SVM outputs. ", "M", 0, "-M")); result.addElement(new Option("\tThe number of folds for the internal\n" + "\tcross-validation. " + "(default -1, use training data)", "V", 1, "-V <double>")); result.addElement(new Option("\tThe random number seed. " + "(default 1)", "W", 1, "-W <double>")); result.addElement(new Option("\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addElement(new Option("\tFull name of calibration model, followed by options.\n" + "\t(default: \"weka.classifiers.functions.Logistic\")", "calibrator", 0, "-calibrator <scheme specification>")); result.addAll(Collections.list(super.listOptions())); result.addElement(new Option("", "", 0, "\nOptions specific to kernel " + this.getKernel().getClass().getName() + ":")); result.addAll(Collections.list(((OptionHandler) this.getKernel()).listOptions())); if (this.getCalibrator() instanceof OptionHandler) { result.addElement(new Option("", "", 0, "\nOptions specific to calibrator " + this.getCalibrator().getClass().getName() + ":")); result.addAll(Collections.list(((OptionHandler) this.getCalibrator()).listOptions())); } return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p> * * <pre> * -no-checks Turns off all checks - use with caution! Turning them off assumes that data is purely numeric, doesn't contain any missing values, and has a nominal class. Turning them off also means that no header information will be stored if the machine is linear. Finally, it also assumes that no instance has a weight equal to 0. (default: checks on) * </pre> * * <pre> * -C &lt;double&gt; The complexity constant C. (default 1) * </pre> * * <pre> * -N Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) * </pre> * * <pre> * -L &lt;double&gt; The tolerance parameter. (default 1.0e-3) * </pre> * * <pre> * -P &lt;double&gt; The epsilon for round-off error. (default 1.0e-12) * </pre> * * <pre> * -M Fit calibration models to SVM outputs. * </pre> * * <pre> * -V &lt;double&gt; The number of folds for the internal cross-validation. (default -1, use training data) * </pre> * * <pre> * -W &lt;double&gt; The random number seed. (default 1) * </pre> * * <pre> * -K &lt;classname and parameters&gt; The Kernel to use. (default: weka.classifiers.functions.supportVector.PolyKernel) * </pre> * * <pre> * -calibrator &lt;scheme specification&gt; Full name of calibration model, followed by options. (default: "weka.classifiers.functions.Logistic") * </pre> * * <pre> * -output-debug-info If set, classifier is run in debug mode and may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities If set, classifier capabilities are not checked before classifier is built (use with caution). * </pre> * * <pre> * -num-decimal-places The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> * -E &lt;num&gt; The Exponent to use. (default: 1.0) * </pre> * * <pre> * -L Use lower-order terms. (default: no) * </pre> * * <pre> * -C &lt;num&gt; The size of the cache (a prime number), 0 for full cache and -1 to turn it off. (default: 250007) * </pre> * * <pre> * -output-debug-info Enables debugging output (if available) to be printed. (default: off) * </pre> * * <pre> * -no-checks Turns off all checks - use with caution! (default: checks on) * </pre> * * <pre> * Options specific to calibrator weka.classifiers.functions.Logistic: * </pre> * * <pre> * -C Use conjugate gradient descent rather than BFGS updates. * </pre> * * <pre> * -R &lt;ridge&gt; Set the ridge in the log-likelihood. * </pre> * * <pre> * -M &lt;number&gt; Set the maximum number of iterations (default -1, until convergence). * </pre> * * <pre> * -output-debug-info If set, classifier is run in debug mode and may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities If set, classifier capabilities are not checked before classifier is built (use with caution). * </pre> * * <pre> * -num-decimal-places The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; String[] tmpOptions; this.setChecksTurnedOff(Utils.getFlag("no-checks", options)); tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) { this.setC(Double.parseDouble(tmpStr)); } else { this.setC(1.0); } tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) { this.setToleranceParameter(Double.parseDouble(tmpStr)); } else { this.setToleranceParameter(1.0e-3); } tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) { this.setEpsilon(Double.parseDouble(tmpStr)); } else { this.setEpsilon(1.0e-12); } tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) { this.setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); } else { this.setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); } this.setBuildCalibrationModels(Utils.getFlag('M', options)); tmpStr = Utils.getOption('V', options); if (tmpStr.length() != 0) { this.setNumFolds(Integer.parseInt(tmpStr)); } else { this.setNumFolds(-1); } tmpStr = Utils.getOption('W', options); if (tmpStr.length() != 0) { this.setRandomSeed(Integer.parseInt(tmpStr)); } else { this.setRandomSeed(1); } tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; this.setKernel(Kernel.forName(tmpStr, tmpOptions)); } String classifierString = Utils.getOption("calibrator", options); String[] classifierSpec = Utils.splitOptions(classifierString); String classifierName; if (classifierSpec.length == 0) { classifierName = "weka.classifiers.functions.Logistic"; } else { classifierName = classifierSpec[0]; classifierSpec[0] = ""; } this.setCalibrator(AbstractClassifier.forName(classifierName, classifierSpec)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<>(); if (this.getChecksTurnedOff()) { result.add("-no-checks"); } result.add("-C"); result.add("" + this.getC()); result.add("-L"); result.add("" + this.getToleranceParameter()); result.add("-P"); result.add("" + this.getEpsilon()); result.add("-N"); result.add("" + this.m_filterType); if (this.getBuildCalibrationModels()) { result.add("-M"); } result.add("-V"); result.add("" + this.getNumFolds()); result.add("-W"); result.add("" + this.getRandomSeed()); result.add("-K"); result.add("" + this.getKernel().getClass().getName() + " " + Utils.joinOptions(this.getKernel().getOptions())); result.add("-calibrator"); result.add(this.getCalibrator().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) this.getCalibrator()).getOptions())); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Disables or enables the checks (which could be time-consuming). Use with caution! * * @param value * if true turns off all checks */ public void setChecksTurnedOff(final boolean value) { if (value) { this.turnChecksOff(); } else { this.turnChecksOn(); } } /** * Returns whether the checks are turned off or not. * * @return true if the checks are turned off */ public boolean getChecksTurnedOff() { return this.m_checksTurnedOff; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String checksTurnedOffTipText() { return "Turns time-consuming checks off - use with caution."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * sets the kernel to use * * @param value * the kernel to use */ public void setKernel(final Kernel value) { this.m_kernel = value; } /** * Returns the kernel to use * * @return the current kernel */ public Kernel getKernel() { return this.m_kernel; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String calibratorTipText() { return "The calibration method to use."; } /** * sets the calibrator to use * * @param value * the calibrator to use */ public void setCalibrator(final Classifier value) { this.m_calibrator = value; } /** * Returns the calibrator to use * * @return the current calibrator */ public Classifier getCalibrator() { return this.m_calibrator; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String cTipText() { return "The complexity parameter C."; } /** * Get the value of C. * * @return Value of C. */ public double getC() { return this.m_C; } /** * Set the value of C. * * @param v * Value to assign to C. */ public void setC(final double v) { this.m_C = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String toleranceParameterTipText() { return "The tolerance parameter (shouldn't be changed)."; } /** * Get the value of tolerance parameter. * * @return Value of tolerance parameter. */ public double getToleranceParameter() { return this.m_tol; } /** * Set the value of tolerance parameter. * * @param v * Value to assign to tolerance parameter. */ public void setToleranceParameter(final double v) { this.m_tol = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String epsilonTipText() { return "The epsilon for round-off error (shouldn't be changed)."; } /** * Get the value of epsilon. * * @return Value of epsilon. */ public double getEpsilon() { return this.m_eps; } /** * Set the value of epsilon. * * @param v * Value to assign to epsilon. */ public void setEpsilon(final double v) { this.m_eps = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "Determines how/if the data will be transformed."; } /** * Gets how the training data will be transformed. Will be one of FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(this.m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType * the new filtering mode */ public void setFilterType(final SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { this.m_filterType = newType.getSelectedTag().getID(); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String buildCalibrationModelsTipText() { return "Whether to fit calibration models to the SVM's outputs (for proper probability estimates)."; } /** * Get the value of buildCalibrationModels. * * @return Value of buildCalibrationModels. */ public boolean getBuildCalibrationModels() { return this.m_fitCalibratorModels; } /** * Set the value of buildCalibrationModels. * * @param newbuildCalibrationModels * Value to assign to buildCalibrationModels. */ public void setBuildCalibrationModels(final boolean newbuildCalibrationModels) { this.m_fitCalibratorModels = newbuildCalibrationModels; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds for cross-validation used to generate " + "training data for calibration models (-1 means use training data)."; } /** * Get the value of numFolds. * * @return Value of numFolds. */ public int getNumFolds() { return this.m_numFolds; } /** * Set the value of numFolds. * * @param newnumFolds * Value to assign to numFolds. */ public void setNumFolds(final int newnumFolds) { this.m_numFolds = newnumFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String randomSeedTipText() { return "Random number seed for the cross-validation."; } /** * Get the value of randomSeed. * * @return Value of randomSeed. */ public int getRandomSeed() { return this.m_randomSeed; } /** * Set the value of randomSeed. * * @param newrandomSeed * Value to assign to randomSeed. */ public void setRandomSeed(final int newrandomSeed) { this.m_randomSeed = newrandomSeed; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); if ((this.m_classAttribute == null)) { return "SMO: No model built yet."; } try { text.append("SMO\n\n"); text.append("Kernel used:\n " + this.m_kernel.toString() + "\n\n"); for (int i = 0; i < this.m_classAttribute.numValues(); i++) { for (int j = i + 1; j < this.m_classAttribute.numValues(); j++) { text.append("Classifier for classes: " + this.m_classAttribute.value(i) + ", " + this.m_classAttribute.value(j) + "\n\n"); text.append(this.m_classifiers[i][j]); if (this.m_fitCalibratorModels) { text.append("\n\n"); if (this.m_classifiers[i][j].m_calibrator == null) { text.append("No calibration model has been fit.\n"); } else { text.append("Calibration model fit to the output:\n"); text.append(this.m_classifiers[i][j].m_calibrator); } } text.append("\n\n"); } } } catch (Exception e) { return "Can't print SMO classifier."; } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @throws Exception */ public static void main(final String[] argv) throws Exception { SMO c = new SMO(); Instances d = new Instances(new FileReader(new File("../datasets/classification/multi-class/ecoli.arff"))); d.setClassIndex(d.numAttributes() - 1); c.buildClassifier(d); // runClassifier(new SMO(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/SMOreg.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SMOreg.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.AbstractClassifier; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.functions.supportVector.RegOptimizer; import weka.classifiers.functions.supportVector.RegSMOImproved; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * SMOreg implements the support vector machine for regression. The parameters can be learned using various algorithms. The algorithm is selected by setting the RegOptimizer. The most popular algorithm (RegSMOImproved) is due to Shevade, Keerthi et al and this is the default RegOptimizer.<br/> * <br/> * For more information see:<br/> * <br/> * S.K. Shevade, S.S. Keerthi, C. Bhattacharyya, K.R.K. Murthy: Improvements to the SMO Algorithm for SVM Regression. In: IEEE Transactions on Neural Networks, 1999.<br/> * <br/> * A.J. Smola, B. Schoelkopf (1998). A tutorial on support vector regression. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Shevade1999, * author = {S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy}, * booktitle = {IEEE Transactions on Neural Networks}, * title = {Improvements to the SMO Algorithm for SVM Regression}, * year = {1999}, * PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/ieee_smo_reg.ps.gz} * } * * &#64;techreport{Smola1998, * author = {A.J. Smola and B. Schoelkopf}, * note = {NeuroCOLT2 Technical Report NC2-TR-1998-030}, * title = {A tutorial on support vector regression}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;double&gt; * The complexity constant C. * (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * * <pre> -I &lt;classname and parameters&gt; * Optimizer class used for solving quadratic optimization problem * (default weka.classifiers.functions.supportVector.RegSMOImproved)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to optimizer ('-I') weka.classifiers.functions.supportVector.RegSMOImproved: * </pre> * * <pre> -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001)</pre> * * <pre> -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * * <pre> * Options specific to kernel ('-K') weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision$ */ public class SMOreg extends AbstractClassifier implements WeightedInstancesHandler, AdditionalMeasureProducer, TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -7149606251113102827L; /** The filter to apply to the training data: Normalzie */ public static final int FILTER_NORMALIZE = 0; /** The filter to apply to the training data: Standardize */ public static final int FILTER_STANDARDIZE = 1; /** The filter to apply to the training data: None */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag[] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** Only numeric attributes in the dataset? If so, less need to filter */ protected boolean m_onlyNumeric; /** capacity parameter **/ protected double m_C = 1.0; /** coefficients used by normalization filter for doing its linear transformation * so that result = svmoutput * m_x1 + m_x0 **/ protected double m_x1 = 1.0; protected double m_x0 = 0.0; /** contains the algorithm used for learning **/ protected RegOptimizer m_optimizer = new RegSMOImproved(); /** the configured kernel */ protected Kernel m_kernel = new PolyKernel(); /** * Returns a string describing classifier * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "SMOreg implements the support vector machine for regression. " + "The parameters can be learned using various algorithms. The " + "algorithm is selected by setting the RegOptimizer. The most " + "popular algorithm (" + RegSMOImproved.class.getName().replaceAll(".*\\.", "") + ") is due to Shevade, Keerthi " + "et al and this is the default RegOptimizer.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy"); result.setValue(Field.TITLE, "Improvements to the SMO Algorithm for SVM Regression"); result.setValue(Field.BOOKTITLE, "IEEE Transactions on Neural Networks"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/ieee_smo_reg.ps.gz"); additional = result.add(Type.TECHREPORT); additional.setValue(Field.AUTHOR, "A.J. Smola and B. Schoelkopf"); additional.setValue(Field.TITLE, "A tutorial on support vector regression"); additional.setValue(Field.NOTE, "NeuroCOLT2 Technical Report NC2-TR-1998-030"); additional.setValue(Field.YEAR, "1998"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option( "\tThe complexity constant C.\n" + "\t(default 1)", "C", 1, "-C <double>")); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither.\n" + "\t(default 0=normalize)", "N", 1, "-N")); result.addElement(new Option( "\tOptimizer class used for solving quadratic optimization problem\n" + "\t(default " + RegSMOImproved.class.getName() + ")", "I", 1, "-I <classname and parameters>")); result.addElement(new Option( "\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addAll(Collections.list(super.listOptions())); result.addElement(new Option( "", "", 0, "\nOptions specific to optimizer ('-I') " + getRegOptimizer().getClass().getName() + ":")); result.addAll(Collections.list(((OptionHandler) getRegOptimizer()).listOptions())); result.addElement(new Option( "", "", 0, "\nOptions specific to kernel ('-K') " + getKernel().getClass().getName() + ":")); result.addAll(Collections.list(((OptionHandler) getKernel()).listOptions())); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;double&gt; * The complexity constant C. * (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * * <pre> -I &lt;classname and parameters&gt; * Optimizer class used for solving quadratic optimization problem * (default weka.classifiers.functions.supportVector.RegSMOImproved)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to optimizer ('-I') weka.classifiers.functions.supportVector.RegSMOImproved: * </pre> * * <pre> -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001)</pre> * * <pre> -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * * <pre> * Options specific to kernel ('-K') weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) { setC(Double.parseDouble(tmpStr)); } else { setC(1.0); } String nString = Utils.getOption('N', options); if (nString.length() != 0) { setFilterType(new SelectedTag(Integer.parseInt(nString), TAGS_FILTER)); } else { setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); } tmpStr = Utils.getOption('I', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setRegOptimizer( (RegOptimizer) Utils.forName(RegOptimizer.class, tmpStr, tmpOptions)); } else { setRegOptimizer(new RegSMOImproved()); } tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setKernel(Kernel.forName(tmpStr, tmpOptions)); } else { setKernel(new PolyKernel()); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-C"); result.add("" + getC()); result.add("-N"); result.add("" + m_filterType); result.add("-I"); result.add("" + getRegOptimizer().getClass().getName() + " " + Utils.joinOptions(getRegOptimizer().getOptions())); result.add("-K"); result.add("" + getKernel().getClass().getName() + " " + Utils.joinOptions(getKernel().getOptions())); Collections.addAll(result, super.getOptions()); return (String[]) result.toArray(new String[result.size()]); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = getKernel().getCapabilities(); result.setOwner(this); // attribute result.enableAllAttributeDependencies(); // with NominalToBinary we can also handle nominal attributes, but only // if the kernel can handle numeric attributes if (result.handles(Capability.NUMERIC_ATTRIBUTES)) result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.disable(Capability.NO_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method for building the classifier. * * @param instances the set of training instances * @throws Exception if the classifier can't be built successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // Removes all the instances with weight equal to 0. // MUST be done since condition (8) of Keerthi's paper // is made with the assertion Ci > 0 (See equation (3a). Instances data = new Instances(instances, 0); for (int i = 0; i < instances.numInstances(); i++) { if (instances.instance(i).weight() > 0) { data.add(instances.instance(i)); } } if (data.numInstances() == 0) { throw new Exception("No training instances left after removing " + "instance with either a weight null or a missing class!"); } instances = data; m_onlyNumeric = true; for (int i = 0; i < instances.numAttributes(); i++) { if (i != instances.classIndex()) { if (!instances.attribute(i).isNumeric()) { m_onlyNumeric = false; break; } } } m_Missing = new ReplaceMissingValues(); m_Missing.setInputFormat(instances); instances = Filter.useFilter(instances, m_Missing); if (getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { if (!m_onlyNumeric) { m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(instances); instances = Filter.useFilter(instances, m_NominalToBinary); } else { m_NominalToBinary = null; } } else { m_NominalToBinary = null; } // retrieve two different class values used to determine filter transformation double y0 = instances.instance(0).classValue(); int index = 1; while (index < instances.numInstances() && instances.instance(index).classValue() == y0) { index++; } if (index == instances.numInstances()) { // degenerate case, all class values are equal // we don't want to deal with this, too much hassle throw new Exception("All class values are the same. At least two class values should be different"); } double y1 = instances.instance(index).classValue(); // apply filters if (m_filterType == FILTER_STANDARDIZE) { m_Filter = new Standardize(); ((Standardize)m_Filter).setIgnoreClass(true); m_Filter.setInputFormat(instances); instances = Filter.useFilter(instances, m_Filter); } else if (m_filterType == FILTER_NORMALIZE) { m_Filter = new Normalize(); ((Normalize)m_Filter).setIgnoreClass(true); m_Filter.setInputFormat(instances); instances = Filter.useFilter(instances, m_Filter); } else { m_Filter = null; } if (m_Filter != null) { double z0 = instances.instance(0).classValue(); double z1 = instances.instance(index).classValue(); m_x1 = (y0-y1) / (z0 - z1); // no division by zero, since y0 != y1 guaranteed => z0 != z1 ??? m_x0 = (y0 - m_x1 * z0); // = y1 - m_x1 * z1 } else { m_x1 = 1.0; m_x0 = 0.0; } m_optimizer.setSMOReg(this); m_optimizer.buildClassifier(instances); } /** * Classifies the given instance using the linear regression function. * * @param instance the test instance * @return the classification * @throws Exception if classification can't be done successfully */ public double classifyInstance(Instance instance) throws Exception { // Filter instance m_Missing.input(instance); m_Missing.batchFinished(); instance = m_Missing.output(); if (!m_onlyNumeric && m_NominalToBinary != null) { m_NominalToBinary.input(instance); m_NominalToBinary.batchFinished(); instance = m_NominalToBinary.output(); } if (m_Filter != null) { m_Filter.input(instance); m_Filter.batchFinished(); instance = m_Filter.output(); } double result = m_optimizer.SVMOutput(instance); return result * m_x1 + m_x0; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String regOptimizerTipText() { return "The learning algorithm."; } /** * sets the learning algorithm * * @param regOptimizer the learning algorithm */ public void setRegOptimizer(RegOptimizer regOptimizer) { m_optimizer = regOptimizer; } /** * returns the learning algorithm * * @return the learning algorithm */ public RegOptimizer getRegOptimizer() { return m_optimizer; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * sets the kernel to use * * @param value the kernel to use */ public void setKernel(Kernel value) { m_kernel = value; } /** * Returns the kernel to use * * @return the current kernel */ public Kernel getKernel() { return m_kernel; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cTipText() { return "The complexity parameter C."; } /** * Get the value of C. * * @return Value of C. */ public double getC() { return m_C; } /** * Set the value of C. * * @param v Value to assign to C. */ public void setC(double v) { m_C = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "Determines how/if the data will be transformed."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); if (m_optimizer == null || !m_optimizer.modelBuilt()) { return "SMOreg: No model built yet."; } try { text.append(m_optimizer.toString()); } catch (Exception e) { return "Can't print SMVreg classifier."; } return text.toString(); } /** * Returns an enumeration of the measure names. Additional measures * must follow the naming convention of starting with "measure", eg. * double measureBlah() * * @return an enumeration of the measure names */ public Enumeration<String> enumerateMeasures() { Vector<String> result = new Vector<String>(); result.addElement("measureKernelEvaluations"); result.addElement("measureCacheHits"); return result.elements(); } /** * Returns the value of the named measure * @param measureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String measureName) { if (measureName.equalsIgnoreCase("measureKernelEvaluations")) return measureKernelEvaluations(); else if (measureName.equalsIgnoreCase("measureCacheHits")) return measureCacheHits(); else throw new IllegalArgumentException("Measure '" + measureName + "' is not supported!"); } /** * number of kernel evaluations used in learing * * @return the number of kernel evaluations */ protected double measureKernelEvaluations() { if (m_optimizer != null) { return m_optimizer.getKernelEvaluations(); } else { return 0; } } /** * number of kernel cache hits used during learing * * @return the number of kernel cache hits */ protected double measureCacheHits() { if (m_optimizer != null) { return m_optimizer.getCacheHits(); } else { return 0; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for running this classifier. * * @param args the commandline options */ public static void main(String[] args) { runClassifier(new SMOreg(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/SimpleLinearRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimpleLinearRegression.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.evaluation.RegressionAnalysis; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Learns a simple linear regression model. Picks the attribute that results in the lowest squared error. Can only deal with numeric attributes. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -additional-stats * Output additional statistics. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class SimpleLinearRegression extends AbstractClassifier implements WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = 1679336022895414137L; /** The chosen attribute */ private Attribute m_attribute; /** The index of the chosen attribute */ private int m_attributeIndex; /** The slope */ private double m_slope; /** The intercept */ private double m_intercept; /** The class mean for missing values */ private double m_classMeanForMissing; /** * Whether to output additional statistics such as std. dev. of coefficients and t-stats */ protected boolean m_outputAdditionalStats; /** Degrees of freedom, used in statistical calculations */ private int m_df; /** standard error of the slope */ private double m_seSlope = Double.NaN; /** standard error of the intercept */ private double m_seIntercept = Double.NaN; /** t-statistic of the slope */ private double m_tstatSlope = Double.NaN; /** t-statistic of the intercept */ private double m_tstatIntercept = Double.NaN; /** R^2 value for the regression */ private double m_rsquared = Double.NaN; /** Adjusted R^2 value for the regression */ private double m_rsquaredAdj = Double.NaN; /** F-statistic for the regression */ private double m_fstat = Double.NaN; /** If true, suppress error message if no useful attribute was found */ private boolean m_suppressErrorMessage = false; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Learns a simple linear regression model. " + "Picks the attribute that results in the lowest squared error. " + "Can only deal with numeric attributes."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(); newVector.addElement(new Option("\tOutput additional statistics.", "additional-stats", 0, "-additional-stats")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -additional-stats * Output additional statistics. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setOutputAdditionalStats(Utils.getFlag("additional-stats", options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<>(); if (this.getOutputAdditionalStats()) { result.add("-additional-stats"); } Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String outputAdditionalStatsTipText() { return "Output additional statistics (such as " + "std deviation of coefficients and t-statistics)"; } /** * Set whether to output additional statistics (such as std. deviation of coefficients and t-statistics * * @param additional * true if additional stats are to be output */ public void setOutputAdditionalStats(final boolean additional) { this.m_outputAdditionalStats = additional; } /** * Get whether to output additional statistics (such as std. deviation of coefficients and t-statistics * * @return true if additional stats are to be output */ public boolean getOutputAdditionalStats() { return this.m_outputAdditionalStats; } /** * Generate a prediction for the supplied instance. * * @param inst * the instance to predict. * @return the prediction * @throws Exception * if an error occurs */ @Override public double classifyInstance(final Instance inst) throws Exception { if (this.m_attribute == null) { return this.m_intercept; } else { if (inst.isMissing(this.m_attributeIndex)) { return this.m_classMeanForMissing; } return this.m_intercept + this.m_slope * inst.value(this.m_attributeIndex); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds a simple linear regression model given the supplied training data. * * @param insts * the training data. * @throws Exception * if an error occurs */ @Override public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(insts); if (this.m_outputAdditionalStats) { // check that the instances weights are all 1 // because the RegressionAnalysis class does // not handle weights boolean ok = true; for (int i = 0; i < insts.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (insts.instance(i).weight() != 1) { ok = false; break; } } if (!ok) { throw new Exception("Can only compute additional statistics on unweighted data"); } } // Compute sums and counts double[] sum = new double[insts.numAttributes()]; double[] count = new double[insts.numAttributes()]; double[] classSumForMissing = new double[insts.numAttributes()]; double[] classSumSquaredForMissing = new double[insts.numAttributes()]; double classCount = 0; double classSum = 0; for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); if (!inst.classIsMissing()) { for (int i = 0; i < insts.numAttributes(); i++) { if (!inst.isMissing(i)) { sum[i] += inst.weight() * inst.value(i); count[i] += inst.weight(); } else { classSumForMissing[i] += inst.classValue() * inst.weight(); classSumSquaredForMissing[i] += inst.classValue() * inst.classValue() * inst.weight(); } } classCount += inst.weight(); classSum += inst.weight() * inst.classValue(); } } // Compute means double[] mean = new double[insts.numAttributes()]; double[] classMeanForMissing = new double[insts.numAttributes()]; double[] classMeanForKnown = new double[insts.numAttributes()]; for (int i = 0; i < insts.numAttributes(); i++) { if (i != insts.classIndex()) { if (count[i] > 0) { mean[i] = sum[i] / count[i]; } if (classCount - count[i] > 0) { classMeanForMissing[i] = classSumForMissing[i] / (classCount - count[i]); } if (count[i] > 0) { classMeanForKnown[i] = (classSum - classSumForMissing[i]) / count[i]; } } } sum = null; count = null; double[] slopes = new double[insts.numAttributes()]; double[] sumWeightedDiffsSquared = new double[insts.numAttributes()]; double[] sumWeightedClassDiffsSquared = new double[insts.numAttributes()]; // For all instances for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); // Only need to do something if the class isn't missing if (!inst.classIsMissing()) { // For all attributes for (int i = 0; i < insts.numAttributes(); i++) { if (!inst.isMissing(i) && (i != insts.classIndex())) { double yDiff = inst.classValue() - classMeanForKnown[i]; double weightedYDiff = inst.weight() * yDiff; double diff = inst.value(i) - mean[i]; double weightedDiff = inst.weight() * diff; slopes[i] += weightedYDiff * diff; sumWeightedDiffsSquared[i] += weightedDiff * diff; sumWeightedClassDiffsSquared[i] += weightedYDiff * yDiff; } } } } // Pick the best attribute double minSSE = Double.MAX_VALUE; this.m_attribute = null; int chosen = -1; double chosenSlope = Double.NaN; double chosenIntercept = Double.NaN; double chosenMeanForMissing = Double.NaN; for (int i = 0; i < insts.numAttributes(); i++) { // Do we have missing values for this attribute? double sseForMissing = classSumSquaredForMissing[i] - (classSumForMissing[i] * classMeanForMissing[i]); // Should we skip this attribute? if ((i == insts.classIndex()) || (sumWeightedDiffsSquared[i] == 0)) { continue; } // Compute final slope and intercept double numerator = slopes[i]; slopes[i] /= sumWeightedDiffsSquared[i]; double intercept = classMeanForKnown[i] - slopes[i] * mean[i]; // Compute sum of squared errors double sse = sumWeightedClassDiffsSquared[i] - slopes[i] * numerator; // Add component due to missing value prediction sse += sseForMissing; // Check whether this is the best attribute if (sse < minSSE) { minSSE = sse; chosen = i; chosenSlope = slopes[i]; chosenIntercept = intercept; chosenMeanForMissing = classMeanForMissing[i]; } } // Set parameters if (chosen == -1) { if (!this.m_suppressErrorMessage) { System.err.println("----- no useful attribute found"); } this.m_attribute = null; this.m_attributeIndex = 0; this.m_slope = 0; this.m_intercept = classSum / classCount; this.m_classMeanForMissing = 0; } else { this.m_attribute = insts.attribute(chosen); this.m_attributeIndex = chosen; this.m_slope = chosenSlope; this.m_intercept = chosenIntercept; this.m_classMeanForMissing = chosenMeanForMissing; if (this.m_outputAdditionalStats) { // Reduce data so that stats are correct Instances newInsts = new Instances(insts, insts.numInstances()); for (int i = 0; i < insts.numInstances(); i++) { Instance inst = insts.instance(i); if (!inst.classIsMissing() && !inst.isMissing(this.m_attributeIndex)) { newInsts.add(inst); } } insts = newInsts; // do regression analysis this.m_df = insts.numInstances() - 2; double[] stdErrors = RegressionAnalysis.calculateStdErrorOfCoef(insts, this.m_attribute, this.m_slope, this.m_intercept, this.m_df); this.m_seSlope = stdErrors[0]; this.m_seIntercept = stdErrors[1]; double[] coef = new double[2]; coef[0] = this.m_slope; coef[1] = this.m_intercept; double[] tStats = RegressionAnalysis.calculateTStats(coef, stdErrors, 2); this.m_tstatSlope = tStats[0]; this.m_tstatIntercept = tStats[1]; double ssr = RegressionAnalysis.calculateSSR(insts, this.m_attribute, this.m_slope, this.m_intercept); this.m_rsquared = RegressionAnalysis.calculateRSquared(insts, ssr); this.m_rsquaredAdj = RegressionAnalysis.calculateAdjRSquared(this.m_rsquared, insts.numInstances(), 2); this.m_fstat = RegressionAnalysis.calculateFStat(this.m_rsquared, insts.numInstances(), 2); } } } /** * Returns true if a usable attribute was found. * * @return true if a usable attribute was found. */ public boolean foundUsefulAttribute() { return (this.m_attribute != null); } /** * Returns the index of the attribute used in the regression. * * @return the index of the attribute. */ public int getAttributeIndex() { return this.m_attributeIndex; } /** * Returns the slope of the function. * * @return the slope. */ public double getSlope() { return this.m_slope; } /** * Returns the intercept of the function. * * @return the intercept. */ public double getIntercept() { return this.m_intercept; } /** * Turn off the error message that is reported when no useful attribute is found. * * @param s * if set to true turns off the error message */ public void setSuppressErrorMessage(final boolean s) { this.m_suppressErrorMessage = s; } /** * Returns a description of this classifier as a string * * @return a description of the classifier. */ @Override public String toString() { StringBuffer text = new StringBuffer(); if (this.m_attribute == null) { text.append("Predicting constant " + this.m_intercept); } else { text.append("Linear regression on " + this.m_attribute.name() + "\n\n"); text.append(Utils.doubleToString(this.m_slope, 2) + " * " + this.m_attribute.name()); if (this.m_intercept > 0) { text.append(" + " + Utils.doubleToString(this.m_intercept, 2)); } else { text.append(" - " + Utils.doubleToString((-this.m_intercept), 2)); } text.append("\n\nPredicting " + Utils.doubleToString(this.m_classMeanForMissing, 2) + " if attribute value is missing."); if (this.m_outputAdditionalStats) { // put regression analysis here int attNameLength = this.m_attribute.name().length() + 3; if (attNameLength < "Variable".length() + 3) { attNameLength = "Variable".length() + 3; } text.append("\n\nRegression Analysis:\n\n" + Utils.padRight("Variable", attNameLength) + " Coefficient SE of Coef t-Stat"); text.append("\n" + Utils.padRight(this.m_attribute.name(), attNameLength)); text.append(Utils.doubleToString(this.m_slope, 12, 4)); text.append(" " + Utils.doubleToString(this.m_seSlope, 12, 5)); text.append(" " + Utils.doubleToString(this.m_tstatSlope, 12, 5)); text.append(Utils.padRight("\nconst", attNameLength + 1) + Utils.doubleToString(this.m_intercept, 12, 4)); text.append(" " + Utils.doubleToString(this.m_seIntercept, 12, 5)); text.append(" " + Utils.doubleToString(this.m_tstatIntercept, 12, 5)); text.append("\n\nDegrees of freedom = " + Integer.toString(this.m_df)); text.append("\nR^2 value = " + Utils.doubleToString(this.m_rsquared, 5)); text.append("\nAdjusted R^2 = " + Utils.doubleToString(this.m_rsquaredAdj, 5)); text.append("\nF-statistic = " + Utils.doubleToString(this.m_fstat, 5)); } } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class * * @param argv * options */ public static void main(final String[] argv) { runClassifier(new SimpleLinearRegression(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/SimpleLogistic.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimpleLogistic.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.trees.lmt.LogisticBase; import weka.core.*; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Classifier for building linear logistic regression * models. LogitBoost with simple regression functions as base learners is used * for fitting the logistic models. The optimal number of LogitBoost iterations * to perform is cross-validated, which leads to automatic attribute selection. * For more information see:<br/> * Niels Landwehr, Mark Hall, Eibe Frank (2005). Logistic Model Trees.<br/> * <br/> * Marc Sumner, Eibe Frank, Mark Hall: Speeding up Logistic Model Tree * Induction. In: 9th European Conference on Principles and Practice of * Knowledge Discovery in Databases, 675-683, 2005. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * * <pre> * &#64;article{Landwehr2005, * author = {Niels Landwehr and Mark Hall and Eibe Frank}, * booktitle = {Machine Learning}, * number = {1-2}, * pages = {161-205}, * title = {Logistic Model Trees}, * volume = {95}, * year = {2005} * } * * &#64;inproceedings{Sumner2005, * author = {Marc Sumner and Eibe Frank and Mark Hall}, * booktitle = {9th European Conference on Principles and Practice of Knowledge Discovery in Databases}, * pages = {675-683}, * publisher = {Springer}, * title = {Speeding up Logistic Model Tree Induction}, * year = {2005} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I &lt;iterations&gt; * Set fixed number of iterations for LogitBoost * </pre> * * <pre> * -S * Use stopping criterion on training set (instead of * cross-validation) * </pre> * * <pre> * -P * Use error on probabilities (rmse) instead of * misclassification error for stopping criterion * </pre> * * <pre> * -M &lt;iterations&gt; * Set maximum number of boosting iterations * </pre> * * <pre> * -H &lt;iterations&gt; * Set parameter for heuristic for early stopping of * LogitBoost. * If enabled, the minimum is selected greedily, stopping * if the current minimum has not changed for iter iterations. * By default, heuristic is enabled with value 50. Set to * zero to disable heuristic. * </pre> * * <pre> * -W &lt;beta&gt; * Set beta for weight trimming for LogitBoost. Set to 0 for no weight trimming. * </pre> * * <pre> * -A * The AIC is used to choose the best iteration (instead of CV or training error). * </pre> * <!-- options-end --> * * @author Niels Landwehr * @author Marc Sumner * @version $Revision$ */ public class SimpleLogistic extends AbstractClassifier implements OptionHandler, AdditionalMeasureProducer, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 7397710626304705059L; /** The actual logistic regression model */ protected LogisticBase m_boostedModel; /** Filter for converting nominal attributes to binary ones */ protected NominalToBinary m_NominalToBinary = null; /** Filter for replacing missing values */ protected ReplaceMissingValues m_ReplaceMissingValues = null; /** If non-negative, use this as fixed number of LogitBoost iterations */ protected int m_numBoostingIterations; /** Maximum number of iterations for LogitBoost */ protected int m_maxBoostingIterations = 500; /** Parameter for the heuristic for early stopping of LogitBoost */ protected int m_heuristicStop = 50; /** If true, cross-validate number of LogitBoost iterations */ protected boolean m_useCrossValidation; /** * If true, use minimize error on probabilities instead of misclassification * error */ protected boolean m_errorOnProbabilities; /** * Threshold for trimming weights. Instances with a weight lower than this (as * a percentage of total weights) are not included in the regression fit. */ protected double m_weightTrimBeta = 0; /** If true, the AIC is used to choose the best iteration */ private boolean m_useAIC = false; /** * Constructor for creating SimpleLogistic object with standard options. */ public SimpleLogistic() { m_numBoostingIterations = 0; m_useCrossValidation = true; m_errorOnProbabilities = false; m_weightTrimBeta = 0; m_useAIC = false; } /** * Constructor for creating SimpleLogistic object. * * @param numBoostingIterations if non-negative, use this as fixed number of * iterations for LogitBoost * @param useCrossValidation cross-validate number of LogitBoost iterations. * @param errorOnProbabilities minimize error on probabilities instead of * misclassification error */ public SimpleLogistic(int numBoostingIterations, boolean useCrossValidation, boolean errorOnProbabilities) { m_numBoostingIterations = numBoostingIterations; m_useCrossValidation = useCrossValidation; m_errorOnProbabilities = errorOnProbabilities; m_weightTrimBeta = 0; m_useAIC = false; } /** * Main method for testing this class * * @param argv commandline options */ public static void main(String[] argv) { runClassifier(new SimpleLogistic(), argv); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds the logistic regression using LogitBoost. * * @param data the training data * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); // replace missing values m_ReplaceMissingValues = new ReplaceMissingValues(); m_ReplaceMissingValues.setInputFormat(data); data = Filter.useFilter(data, m_ReplaceMissingValues); // convert nominal attributes m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(data); data = Filter.useFilter(data, m_NominalToBinary); // create actual logistic model m_boostedModel = new LogisticBase(m_numBoostingIterations, m_useCrossValidation, m_errorOnProbabilities); m_boostedModel.setMaxIterations(m_maxBoostingIterations); m_boostedModel.setHeuristicStop(m_heuristicStop); m_boostedModel.setWeightTrimBeta(m_weightTrimBeta); m_boostedModel.setUseAIC(m_useAIC); m_boostedModel.setNumDecimalPlaces(m_numDecimalPlaces); // build logistic model m_boostedModel.buildClassifier(data); } /** * Returns class probabilities for an instance. * * @param inst the instance to compute the probabilities for * @return the probabilities * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance inst) throws Exception { // replace missing values / convert nominal atts m_ReplaceMissingValues.input(inst); inst = m_ReplaceMissingValues.output(); m_NominalToBinary.input(inst); inst = m_NominalToBinary.output(); // obtain probs from logistic model return m_boostedModel.distributionForInstance(inst); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.addElement(new Option( "\tSet fixed number of iterations for LogitBoost", "I", 1, "-I <iterations>")); newVector.addElement(new Option( "\tUse stopping criterion on training set (instead of\n" + "\tcross-validation)", "S", 0, "-S")); newVector.addElement(new Option( "\tUse error on probabilities (rmse) instead of\n" + "\tmisclassification error for stopping criterion", "P", 0, "-P")); newVector .addElement(new Option("\tSet maximum number of boosting iterations", "M", 1, "-M <iterations>")); newVector.addElement(new Option( "\tSet parameter for heuristic for early stopping of\n" + "\tLogitBoost.\n" + "\tIf enabled, the minimum is selected greedily, stopping\n" + "\tif the current minimum has not changed for iter iterations.\n" + "\tBy default, heuristic is enabled with value 50. Set to\n" + "\tzero to disable heuristic.", "H", 1, "-H <iterations>")); newVector .addElement(new Option( "\tSet beta for weight trimming for LogitBoost. Set to 0 for no weight trimming.\n", "W", 1, "-W <beta>")); newVector .addElement(new Option( "\tThe AIC is used to choose the best iteration (instead of CV or training error).\n", "A", 0, "-A")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-I"); options.add("" + getNumBoostingIterations()); if (!getUseCrossValidation()) { options.add("-S"); } if (getErrorOnProbabilities()) { options.add("-P"); } options.add("-M"); options.add("" + getMaxBoostingIterations()); options.add("-H"); options.add("" + getHeuristicStop()); options.add("-W"); options.add("" + getWeightTrimBeta()); if (getUseAIC()) { options.add("-A"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: * <p/> * * <pre> * -I &lt;iterations&gt; * Set fixed number of iterations for LogitBoost * </pre> * * <pre> * -S * Use stopping criterion on training set (instead of * cross-validation) * </pre> * * <pre> * -P * Use error on probabilities (rmse) instead of * misclassification error for stopping criterion * </pre> * * <pre> * -M &lt;iterations&gt; * Set maximum number of boosting iterations * </pre> * * <pre> * -H &lt;iterations&gt; * Set parameter for heuristic for early stopping of * LogitBoost. * If enabled, the minimum is selected greedily, stopping * if the current minimum has not changed for iter iterations. * By default, heuristic is enabled with value 50. Set to * zero to disable heuristic. * </pre> * * <pre> * -W &lt;beta&gt; * Set beta for weight trimming for LogitBoost. Set to 0 for no weight trimming. * </pre> * * <pre> * -A * The AIC is used to choose the best iteration (instead of CV or training error). * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString = Utils.getOption('I', options); if (optionString.length() != 0) { setNumBoostingIterations((new Integer(optionString)).intValue()); } setUseCrossValidation(!Utils.getFlag('S', options)); setErrorOnProbabilities(Utils.getFlag('P', options)); optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMaxBoostingIterations((new Integer(optionString)).intValue()); } optionString = Utils.getOption('H', options); if (optionString.length() != 0) { setHeuristicStop((new Integer(optionString)).intValue()); } optionString = Utils.getOption('W', options); if (optionString.length() != 0) { setWeightTrimBeta((new Double(optionString)).doubleValue()); } setUseAIC(Utils.getFlag('A', options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Get the value of numBoostingIterations. * * @return the number of boosting iterations */ public int getNumBoostingIterations() { return m_numBoostingIterations; } /** * Set the value of numBoostingIterations. * * @param n the number of boosting iterations */ public void setNumBoostingIterations(int n) { m_numBoostingIterations = n; } /** * Get the value of useCrossValidation. * * @return true if cross-validation is used */ public boolean getUseCrossValidation() { return m_useCrossValidation; } /** * Set the value of useCrossValidation. * * @param l whether to use cross-validation */ public void setUseCrossValidation(boolean l) { m_useCrossValidation = l; } /** * Get the value of errorOnProbabilities. * * @return If true, use minimize error on probabilities instead of * misclassification error */ public boolean getErrorOnProbabilities() { return m_errorOnProbabilities; } /** * Set the value of errorOnProbabilities. * * @param l If true, use minimize error on probabilities instead of * misclassification error */ public void setErrorOnProbabilities(boolean l) { m_errorOnProbabilities = l; } /** * Get the value of maxBoostingIterations. * * @return the maximum number of boosting iterations */ public int getMaxBoostingIterations() { return m_maxBoostingIterations; } /** * Set the value of maxBoostingIterations. * * @param n the maximum number of boosting iterations */ public void setMaxBoostingIterations(int n) { m_maxBoostingIterations = n; } /** * Get the value of heuristicStop. * * @return the value of heuristicStop */ public int getHeuristicStop() { return m_heuristicStop; } /** * Set the value of heuristicStop. * * @param n the value of heuristicStop */ public void setHeuristicStop(int n) { if (n == 0) m_heuristicStop = m_maxBoostingIterations; else m_heuristicStop = n; } /** * Get the value of weightTrimBeta. */ public double getWeightTrimBeta() { return m_weightTrimBeta; } /** * Set the value of weightTrimBeta. */ public void setWeightTrimBeta(double n) { m_weightTrimBeta = n; } /** * Get the value of useAIC. * * @return Value of useAIC. */ public boolean getUseAIC() { return m_useAIC; } /** * Set the value of useAIC. * * @param c Value to assign to useAIC. */ public void setUseAIC(boolean c) { m_useAIC = c; } /** * Get the number of LogitBoost iterations performed (= the number of * regression functions fit by LogitBoost). * * @return the number of LogitBoost iterations performed */ public int getNumRegressions() { return m_boostedModel.getNumRegressions(); } /** * Returns a description of the logistic model (attributes/coefficients). * * @return the model as string */ public String toString() { if (m_boostedModel == null) return "No model built"; return "SimpleLogistic:\n" + m_boostedModel.toString(); } /** * Returns the fraction of all attributes in the data that are used in the * logistic model (in percent). An attribute is used in the model if it is * used in any of the models for the different classes. * * @return percentage of attributes used in the model */ public double measureAttributesUsed() { return m_boostedModel.percentAttributesUsed(); } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<String>(3); newVector.addElement("measureAttributesUsed"); newVector.addElement("measureNumIterations"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureAttributesUsed") == 0) { return measureAttributesUsed(); } else if (additionalMeasureName .compareToIgnoreCase("measureNumIterations") == 0) { return getNumRegressions(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (SimpleLogistic)"); } } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Classifier for building linear logistic regression models. LogitBoost with simple regression " + "functions as base learners is used for fitting the logistic models. The optimal number of LogitBoost " + "iterations to perform is cross-validated, which leads to automatic attribute selection. " + "For more information see:\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result .setValue(Field.AUTHOR, "Niels Landwehr and Mark Hall and Eibe Frank"); result.setValue(Field.TITLE, "Logistic Model Trees"); result.setValue(Field.BOOKTITLE, "Machine Learning"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.VOLUME, "95"); result.setValue(Field.PAGES, "161-205"); result.setValue(Field.NUMBER, "1-2"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Marc Sumner and Eibe Frank and Mark Hall"); additional.setValue(Field.TITLE, "Speeding up Logistic Model Tree Induction"); additional .setValue(Field.BOOKTITLE, "9th European Conference on Principles and Practice of Knowledge Discovery in Databases"); additional.setValue(Field.YEAR, "2005"); additional.setValue(Field.PAGES, "675-683"); additional.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numBoostingIterationsTipText() { return "Set fixed number of iterations for LogitBoost. If >= 0, this sets the number of LogitBoost iterations " + "to perform. If < 0, the number is cross-validated or a stopping criterion on the training set is used " + "(depending on the value of useCrossValidation)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useCrossValidationTipText() { return "Sets whether the number of LogitBoost iterations is to be cross-validated or the stopping criterion " + "on the training set should be used. If not set (and no fixed number of iterations was given), " + "the number of LogitBoost iterations is used that minimizes the error on the training set " + "(misclassification error or error on probabilities depending on errorOnProbabilities)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String errorOnProbabilitiesTipText() { return "Use error on the probabilties as error measure when determining the best number of LogitBoost iterations. " + "If set, the number of LogitBoost iterations is chosen that minimizes the root mean squared error " + "(either on the training set or in the cross-validation, depending on useCrossValidation)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String maxBoostingIterationsTipText() { return "Sets the maximum number of iterations for LogitBoost. Default value is 500, for very small/large " + "datasets a lower/higher value might be preferable."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String heuristicStopTipText() { return "If heuristicStop > 0, the heuristic for greedy stopping while cross-validating the number of " + "LogitBoost iterations is enabled. This means LogitBoost is stopped if no new error minimum " + "has been reached in the last heuristicStop iterations. It is recommended to use this heuristic, " + "it gives a large speed-up especially on small datasets. The default value is 50."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String weightTrimBetaTipText() { return "Set the beta value used for weight trimming in LogitBoost. " + "Only instances carrying (1 - beta)% of the weight from previous iteration " + "are used in the next iteration. Set to 0 for no weight trimming. " + "The default value is 0."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useAICTipText() { return "The AIC is used to determine when to stop LogitBoost iterations " + "(instead of cross-validation or training error)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numDecimalPlacesTipText() { return "The number of decimal places to be used for the output of coefficients."; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/VotedPerceptron.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * VotedPerceptron.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** * <!-- globalinfo-start --> Implementation of the voted perceptron algorithm by Freund and * Schapire. Globally replaces all missing values, and transforms nominal attributes into binary * ones.<br/> * <br/> * For more information, see:<br/> * <br/> * Y. Freund, R. E. Schapire: Large margin classification using the perceptron algorithm. In: 11th * Annual Conference on Computational Learning Theory, New York, NY, 209-217, 1998. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Freund1998, * address = {New York, NY}, * author = {Y. Freund and R. E. Schapire}, * booktitle = {11th Annual Conference on Computational Learning Theory}, * pages = {209-217}, * publisher = {ACM Press}, * title = {Large margin classification using the perceptron algorithm}, * year = {1998} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I &lt;int&gt; * The number of iterations to be performed. * (default 1) * </pre> * * <pre> * -E &lt;double&gt; * The exponent for the polynomial kernel. * (default 1) * </pre> * * <pre> * -S &lt;int&gt; * The seed for the random number generation. * (default 1) * </pre> * * <pre> * -M &lt;int&gt; * The maximum number of alterations allowed. * (default 10000) * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class VotedPerceptron extends AbstractClassifier implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1072429260104568698L; /** The maximum number of alterations to the perceptron */ private int m_MaxK = 10000; /** The number of iterations */ private int m_NumIterations = 1; /** The exponent */ private double m_Exponent = 1.0; /** The actual number of alterations */ private int m_K = 0; /** The training instances added to the perceptron */ private int[] m_Additions = null; /** Addition or subtraction? */ private boolean[] m_IsAddition = null; /** The weights for each perceptron */ private int[] m_Weights = null; /** The training instances */ private Instances m_Train = null; /** Seed used for shuffling the dataset */ private int m_Seed = 1; /** The filter used to make attributes numeric. */ private NominalToBinary m_NominalToBinary; /** The filter used to get rid of missing values. */ private ReplaceMissingValues m_ReplaceMissingValues; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implementation of the voted perceptron algorithm by Freund and " + "Schapire. Globally replaces all missing values, and transforms " + "nominal attributes into binary ones.\n\n" + "For more information, see:\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Y. Freund and R. E. Schapire"); result.setValue(Field.TITLE, "Large margin classification using the perceptron algorithm"); result.setValue(Field.BOOKTITLE, "11th Annual Conference on Computational Learning Theory"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.PAGES, "209-217"); result.setValue(Field.PUBLISHER, "ACM Press"); result.setValue(Field.ADDRESS, "New York, NY"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(4); newVector.addElement(new Option("\tThe number of iterations to be performed.\n" + "\t(default 1)", "I", 1, "-I <int>")); newVector.addElement(new Option("\tThe exponent for the polynomial kernel.\n" + "\t(default 1)", "E", 1, "-E <double>")); newVector.addElement(new Option("\tThe seed for the random number generation.\n" + "\t(default 1)", "S", 1, "-S <int>")); newVector.addElement(new Option("\tThe maximum number of alterations allowed.\n" + "\t(default 10000)", "M", 1, "-M <int>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I &lt;int&gt; * The number of iterations to be performed. * (default 1) * </pre> * * <pre> * -E &lt;double&gt; * The exponent for the polynomial kernel. * (default 1) * </pre> * * <pre> * -S &lt;int&gt; * The seed for the random number generation. * (default 1) * </pre> * * <pre> * -M &lt;int&gt; * The maximum number of alterations allowed. * (default 10000) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String iterationsString = Utils.getOption('I', options); if (iterationsString.length() != 0) { this.m_NumIterations = Integer.parseInt(iterationsString); } else { this.m_NumIterations = 1; } String exponentsString = Utils.getOption('E', options); if (exponentsString.length() != 0) { this.m_Exponent = (new Double(exponentsString)).doubleValue(); } else { this.m_Exponent = 1.0; } String seedString = Utils.getOption('S', options); if (seedString.length() != 0) { this.m_Seed = Integer.parseInt(seedString); } else { this.m_Seed = 1; } String alterationsString = Utils.getOption('M', options); if (alterationsString.length() != 0) { this.m_MaxK = Integer.parseInt(alterationsString); } else { this.m_MaxK = 10000; } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-I"); options.add("" + this.m_NumIterations); options.add("-E"); options.add("" + this.m_Exponent); options.add("-S"); options.add("" + this.m_Seed); options.add("-M"); options.add("" + this.m_MaxK); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Builds the ensemble of perceptrons. * * @param insts * the data to train the classifier with * @throws Exception * if something goes wrong during building */ @Override public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); // Filter data this.m_Train = new Instances(insts); this.m_ReplaceMissingValues = new ReplaceMissingValues(); this.m_ReplaceMissingValues.setInputFormat(this.m_Train); this.m_Train = Filter.useFilter(this.m_Train, this.m_ReplaceMissingValues); this.m_NominalToBinary = new NominalToBinary(); this.m_NominalToBinary.setInputFormat(this.m_Train); this.m_Train = Filter.useFilter(this.m_Train, this.m_NominalToBinary); /** Randomize training data */ this.m_Train.randomize(new Random(this.m_Seed)); /** Make space to store perceptrons */ this.m_Additions = new int[this.m_MaxK + 1]; this.m_IsAddition = new boolean[this.m_MaxK + 1]; this.m_Weights = new int[this.m_MaxK + 1]; /** Compute perceptrons */ this.m_K = 0; out: for (int it = 0; it < this.m_NumIterations; it++) { for (int i = 0; i < this.m_Train.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = this.m_Train.instance(i); if (!inst.classIsMissing()) { int prediction = this.makePrediction(this.m_K, inst); int classValue = (int) inst.classValue(); if (prediction == classValue) { this.m_Weights[this.m_K]++; } else { this.m_IsAddition[this.m_K] = (classValue == 1); this.m_Additions[this.m_K] = i; this.m_K++; this.m_Weights[this.m_K]++; } if (this.m_K == this.m_MaxK) { break out; } } } } } /** * Outputs the distribution for the given output. * * Pipes output of SVM through sigmoid function. * * @param inst * the instance for which distribution is to be computed * @return the distribution * @throws Exception * if something goes wrong */ @Override public double[] distributionForInstance(Instance inst) throws Exception { // Filter instance this.m_ReplaceMissingValues.input(inst); this.m_ReplaceMissingValues.batchFinished(); inst = this.m_ReplaceMissingValues.output(); this.m_NominalToBinary.input(inst); this.m_NominalToBinary.batchFinished(); inst = this.m_NominalToBinary.output(); // Get probabilities double output = 0, sumSoFar = 0; if (this.m_K > 0) { for (int i = 0; i <= this.m_K; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (sumSoFar < 0) { output -= this.m_Weights[i]; } else { output += this.m_Weights[i]; } if (this.m_IsAddition[i]) { sumSoFar += this.innerProduct(this.m_Train.instance(this.m_Additions[i]), inst); } else { sumSoFar -= this.innerProduct(this.m_Train.instance(this.m_Additions[i]), inst); } } } double[] result = new double[2]; result[1] = 1 / (1 + Math.exp(-output)); result[0] = 1 - result[1]; return result; } /** * Returns textual description of classifier. * * @return the model as string */ @Override public String toString() { return "VotedPerceptron: Number of perceptrons=" + this.m_K; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String maxKTipText() { return "The maximum number of alterations to the perceptron."; } /** * Get the value of maxK. * * @return Value of maxK. */ public int getMaxK() { return this.m_MaxK; } /** * Set the value of maxK. * * @param v * Value to assign to maxK. */ public void setMaxK(final int v) { this.m_MaxK = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "Number of iterations to be performed."; } /** * Get the value of NumIterations. * * @return Value of NumIterations. */ public int getNumIterations() { return this.m_NumIterations; } /** * Set the value of NumIterations. * * @param v * Value to assign to NumIterations. */ public void setNumIterations(final int v) { this.m_NumIterations = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String exponentTipText() { return "Exponent for the polynomial kernel."; } /** * Get the value of exponent. * * @return Value of exponent. */ public double getExponent() { return this.m_Exponent; } /** * Set the value of exponent. * * @param v * Value to assign to exponent. */ public void setExponent(final double v) { this.m_Exponent = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String seedTipText() { return "Seed for the random number generator."; } /** * Get the value of Seed. * * @return Value of Seed. */ public int getSeed() { return this.m_Seed; } /** * Set the value of Seed. * * @param v * Value to assign to Seed. */ public void setSeed(final int v) { this.m_Seed = v; } /** * Computes the inner product of two instances * * @param i1 * first instance * @param i2 * second instance * @return the inner product * @throws Exception * if computation fails */ private double innerProduct(final Instance i1, final Instance i2) throws Exception { // we can do a fast dot product double result = 0; int n1 = i1.numValues(); int n2 = i2.numValues(); int classIndex = this.m_Train.classIndex(); for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { int ind1 = i1.index(p1); int ind2 = i2.index(p2); if (ind1 == ind2) { if (ind1 != classIndex) { result += i1.valueSparse(p1) * i2.valueSparse(p2); } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } result += 1.0; if (this.m_Exponent != 1) { return Math.pow(result, this.m_Exponent); } else { return result; } } /** * Compute a prediction from a perceptron * * @param k * @param inst * the instance to make a prediction for * @return the prediction * @throws Exception * if computation fails */ private int makePrediction(final int k, final Instance inst) throws Exception { double result = 0; for (int i = 0; i < k; i++) { if (this.m_IsAddition[i]) { result += this.innerProduct(this.m_Train.instance(this.m_Additions[i]), inst); } else { result -= this.innerProduct(this.m_Train.instance(this.m_Additions[i]), inst); } } if (result < 0) { return 0; } else { return 1; } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method. * * @param argv * the commandline options */ public static void main(final String[] argv) { runClassifier(new VotedPerceptron(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/XNV.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * XNV.java * Copyright (C) 2018 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import no.uib.cipr.matrix.*; import no.uib.cipr.matrix.Matrix; import weka.classifiers.RandomizableClassifier; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.RBFKernel; import weka.core.*; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Nystroem; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; import weka.filters.unsupervised.instance.RemoveRange; import java.util.ArrayList; import java.util.Random; /** <!-- globalinfo-start --> * Implements the XNV method for semi-supervised learning using a kernel function (default: RBFKernel). Standardizes all attributes, including the target, by default. Applies (unsupervised) NominalToBinary and ReplaceMissingValues before anything else is done.<br> * <br> * For more information on the algorithm, see<br> * <br> * Brian McWilliams, David Balduzzi, Joachim M. Buhmann: Correlated random features for fast semi-supervised learning. In: Proc 27th Annual Conference on Neural Information Processing Systems, 440--448, 2013. * <br><br> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{McWilliams2013, * author = {Brian McWilliams and David Balduzzi and Joachim M. Buhmann}, * booktitle = {Proc 27th Annual Conference on Neural Information Processing Systems}, * pages = {440--448}, * title = {Correlated random features for fast semi-supervised learning}, * year = {2013}, * URL = {http://papers.nips.cc/paper/5000-correlated-random-features-for-fast-semi-supervised-learning.pdf} * } * </pre> * <br><br> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -G * The regularization parameter gamma.</pre> * * <pre> -M * The sample size for the Nystroem method.</pre> * * <pre> -K &lt;kernel specification&gt; * The kernel function to use.</pre> * * <pre> -S * If true, standardization will not be performed.</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <pre> -num-decimal-places * The number of decimal places for the output of numbers in the model (default 2).</pre> * * <pre> -batch-size * The desired batch size for batch prediction (default 100).</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 12341 $ */ public class XNV extends RandomizableClassifier implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1585383626378691736L; /** The two Nystroem filters to be used */ protected Nystroem m_N1; protected Nystroem m_N2; /** The coefficients from CCA regression */ protected Matrix m_wCCA; /** The CCA projection */ protected Matrix m_B1; /** The filter used for standardizing the data */ protected Standardize m_Standardize; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** Coefficients used compensate for standardization of the target */ protected double m_x1 = 1.0; protected double m_x0 = 0.0; /** The sample size for each Nystroem filter */ protected int m_M = 100; /** The kernel function to use. */ protected Kernel m_Kernel = new RBFKernel(); /** The regularization parameter. */ protected double m_Gamma = 0.01; /** Stores the number of labeled instances found in the training set. */ protected int m_numLabeled; /** Whether to apply standardization or not. */ protected boolean m_doNotStandardize; /** * Provides information regarding this class. * * @return string describing the method that this class implements */ public String globalInfo() { return "Implements the XNV method for semi-supervised learning using a kernel function (default: RBFKernel). " + "Standardizes all attributes, including the target, by default. Applies (unsupervised) " + "NominalToBinary and ReplaceMissingValues before anything else is done.\n\n" + "For more information on the algorithm, see\n\n" + getTechnicalInformation().toString(); } /** * Returns a reference to the algorithm implemented by this class. * * @return a reference to the algorithm implemented by this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result = new TechnicalInformation(TechnicalInformation.Type.INPROCEEDINGS); result.setValue(TechnicalInformation.Field.AUTHOR, "Brian McWilliams and David Balduzzi and Joachim M. Buhmann"); result.setValue(TechnicalInformation.Field.TITLE, "Correlated random features for fast semi-supervised learning"); result.setValue(TechnicalInformation.Field.BOOKTITLE, "Proc 27th Annual Conference on Neural Information Processing Systems"); result.setValue(TechnicalInformation.Field.PAGES, "440--448"); result.setValue(TechnicalInformation.Field.YEAR, "2013"); result.setValue(TechnicalInformation.Field.URL, "http://papers.nips.cc/paper/5000-correlated-random-features-for-fast-semi-supervised-learning.pdf"); return result; } @OptionMetadata( displayName = "Regularization parameter gamma", description = "The regularization parameter gamma.", displayOrder = 1, commandLineParamName = "G", commandLineParamSynopsis = "-G") public double getGamma() { return m_Gamma; } public void setGamma(double v) {m_Gamma = v; } @OptionMetadata( displayName = "Sample size for Nystroem method", description = "The sample size for the Nystroem method.", displayOrder = 2, commandLineParamName = "M", commandLineParamSynopsis = "-M") public int getM() { return m_M; } public void setM(int v) {m_M = v; } @OptionMetadata( displayName = "Kernel function", description = "The kernel function to use.", displayOrder = 3, commandLineParamName = "K", commandLineParamSynopsis = "-K <kernel specification>") public void setKernel(Kernel kernel) { m_Kernel = kernel; } public Kernel getKernel() { return m_Kernel; } @OptionMetadata( displayName = "Do not apply standardization", description = "If true, standardization will not be performed.", displayOrder = 4, commandLineParamName = "S", commandLineParamSynopsis = "-S") public boolean getDoNotStandardize() { return m_doNotStandardize; } public void setDoNotStandardize(boolean v) {m_doNotStandardize = v; } /** * Turns the given set of instances into a data matrix. * * @param data set of instances * @param center whether to center the matrix * @param transpose whether to transpose the matrix * @return the matrix */ public static DenseMatrix getMatrix(Instances data, boolean center, boolean transpose) { double[] means = new double[data.numAttributes()]; if (center) { for (int j = 0; j < data.numAttributes(); j++) { if (j != data.classIndex()) { means[j] = data.meanOrMode(j); } } } int numColumns = transpose ? data.numInstances() : data.numAttributes() - (data.classIndex() >= 0 ? 1 : 0); int numRows = transpose ? data.numAttributes() - (data.classIndex() >= 0 ? 1 : 0) : data.numInstances(); DenseMatrix X = new DenseMatrix(numRows, numColumns); for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); int index = 0; for (int j = 0; j < data.numAttributes(); j++) { if (j != data.classIndex()) { double value = inst.value(j) - means[j]; if (transpose) { X.set(index++, i, value); } else { X.set(i, index++, value); } } } } return X; } /** * Returns the inverse of the given matrix. * * @param M the matrix to invert * @return the inverse */ public static Matrix inverse(Matrix M) throws Exception { if (M.numRows() != M.numColumns()) { throw new IllegalArgumentException("Matrix is not square: cannot invert it."); } Matrix Minv = new DenseMatrix(M.numRows(), M.numRows()); Matrix I = Matrices.identity(M.numRows()); M.solve(I, Minv); return new DenseMatrix(Minv); } /** * Performs Canonical Correlation Analysis (CCA). * * @param X1 the first data matrix * @param X2 the second data matrix * @return the eigenvalue decomposition giving the basis in X1 */ public static EVD CCA(Matrix X1, Matrix X2) throws Exception { int M = X1.numRows(); int N = X1.numColumns(); UpperSPDDenseMatrix CX1X1 = (UpperSPDDenseMatrix) new UpperSPDDenseMatrix(M).rank1(X1); CX1X1.scale(1.0 / (N - 1.0)); for (int i = 0; i < M; i++) { CX1X1.set(i, i, CX1X1.get(i, i) + 1e-8); } UpperSPDDenseMatrix CX2X2 = (UpperSPDDenseMatrix) new UpperSPDDenseMatrix(M).rank1(X2); CX2X2.scale(1.0 / (N - 1.0)); for (int i = 0; i < M; i++) { CX2X2.set(i, i, CX2X2.get(i, i) + 1e-8); } // Compute covariance between views Matrix CX1X2 = X1.transBmult(X2, new DenseMatrix(M, M)); CX1X2.scale(1.0 / (N - 1.0)); Matrix CX2X1 = CX1X2.transpose(new DenseMatrix(M, M)); // Establish key matrix and perform eigenvalue decomposition Matrix CX1X1invMultCX1X2 = inverse(CX1X1).mult(CX1X2, new DenseMatrix(M, M)); Matrix CX2X2invMultCX2X1 = inverse(CX2X2).mult(CX2X1, new DenseMatrix(M, M)); Matrix CX1X1invMultCX1X2MultCX2X2invMultCX2X1 = CX1X1invMultCX1X2.mult(CX2X2invMultCX2X1, new DenseMatrix(M, M)); EVD evd = EVD.factorize(CX1X1invMultCX1X2MultCX2X2invMultCX2X1); return evd; } /** * Builds the XNV regressor. * * @param data set of instances serving as training data * @throws Exception */ public void buildClassifier(Instances data) throws Exception { getCapabilities().testWithFail(data); m_Missing = new ReplaceMissingValues(); m_Missing.setInputFormat(data); data = Filter.useFilter(data, m_Missing); m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(data); data = Filter.useFilter(data, m_NominalToBinary); // Shuffle the data data = new Instances(data); data.randomize(new Random(getSeed())); if (!getDoNotStandardize()) { // Retrieve two different class values int index0 = 0; while (index0 < data.numInstances() && data.instance(index0).classIsMissing()) { index0++; } if (index0 >= data.numInstances()) { throw new Exception("Need at least two instances with different target values."); } double y0 = data.instance(index0).classValue(); int index1 = index0 + 1; while (index1 < data.numInstances() && (data.instance(index1).classIsMissing() || data.instance(index1).classValue() == y0)) { index1++; } if (index1 >= data.numInstances()) { throw new Exception("Need at least two instances with different target values."); } double y1 = data.instance(index1).classValue(); // Apply filter m_Standardize = new Standardize(); m_Standardize.setIgnoreClass(true); m_Standardize.setInputFormat(data); data = Filter.useFilter(data, m_Standardize); // Establish coefficients enabling reversal of filter transformation for target double z0 = data.instance(index0).classValue(); double z1 = data.instance(index1).classValue(); m_x1 = (y0 - y1) / (z0 - z1); m_x0 = (y0 - m_x1 * z0); } // Reduce M if necessary int M = Math.min(m_M, data.numInstances() / 2); // Build first Nystroem filter and generate first view, including covariance matrix m_N1 = new Nystroem(); RemoveRange rr1 = new RemoveRange(); rr1.setInvertSelection(true); rr1.setInstancesIndices("first-" + M); m_N1.setFilter(rr1); m_N1.setKernel((Kernel) new SerializedObject(m_Kernel).getObject()); m_N1.setInputFormat(data); Instances N1data = Filter.useFilter(data, m_N1); Matrix X1 = getMatrix(N1data, true, true); // Build second Nystroem filter and generate second view, including covariance matrix m_N2 = new Nystroem(); RemoveRange rr2 = new RemoveRange(); rr2.setInvertSelection(true); rr2.setInstancesIndices((M + 1) + "-" + (2 * M)); m_N2.setFilter(rr2); m_N2.setKernel((Kernel) new SerializedObject(m_Kernel).getObject()); m_N2.setInputFormat(data); Instances N2data = Filter.useFilter(data, m_N2); Matrix X2 = getMatrix(N2data, true, true); EVD evd = CCA(X1, X2); X1 = X2 = null; N2data = null; double[] e1 = evd.getRealEigenvalues(); m_B1 = evd.getRightEigenvectors(); // Remove eigenvalues that are not positive and corresponding eigenvectors; also take sqrt of eigenvalues ArrayList<Integer> toKeep = new ArrayList<>(e1.length); for (int i = 0; i < e1.length; i++) { if (Double.isNaN(e1[i])) { throw new IllegalStateException("XNV: Eigenvalue is NaN, aborting. Consider modifying parameters."); } if (e1[i] > 0) { toKeep.add(i); } } double[] e1New = new double[toKeep.size()]; Matrix m_B1New = new DenseMatrix(m_B1.numRows(), e1New.length); int currentColumn = 0; for (int index : toKeep) { e1New[currentColumn] = Math.sqrt(e1[index]); // Take square root of eigenvalue for (int j = 0; j < m_B1.numRows(); j++) { m_B1New.set(j, currentColumn, m_B1.get(j, index)); } currentColumn++; } e1 = e1New; m_B1 = m_B1New; // Reduce M accordingly M = toKeep.size(); // Get labeled training data Instances labeledN1 = new Instances(N1data, N1data.numInstances()); for (Instance inst : N1data) { if (!inst.classIsMissing()) { labeledN1.add(inst); } } m_numLabeled = labeledN1.numInstances(); // Get matrix with labels DenseMatrix labels = new DenseMatrix(labeledN1.numInstances(), 1); for (int i = 0; i < labeledN1.numInstances(); i++) { labels.set(i, 0, labeledN1.instance(i).classValue()); } // Compute CCA regression DenseMatrix Z1 = getMatrix(labeledN1, false, false); Matrix Z = Z1.mult(m_B1, new DenseMatrix(labeledN1.numInstances(), M)); Matrix CCA_reg = new DenseMatrix(M, M); Matrix reg = new DenseMatrix(M, M); for (int i = 0; i < e1.length; i++) { CCA_reg.set(i, i, (1.0 - e1[i]) / e1[i]); reg.set(i, i, m_Gamma); } Matrix inv = inverse(Z.transAmult(Z, new DenseMatrix(M, M)).add(CCA_reg).add(reg)); m_wCCA = inv.transBmult(Z, new DenseMatrix(Z.numColumns(), Z.numRows())).mult(labels, new DenseMatrix(M, 1)); } /** * Returns prediction for an instance. * * @param inst * @return a one-element array with the prediction */ public double[] distributionForInstance(Instance inst) throws Exception { m_Missing.input(inst); m_Missing.batchFinished(); inst = m_Missing.output(); m_NominalToBinary.input(inst); m_NominalToBinary.batchFinished(); inst = m_NominalToBinary.output(); if (!getDoNotStandardize()) { m_Standardize.input(inst); inst = m_Standardize.output(); } m_N1.input(inst); inst = m_N1.output(); Matrix result = new DenseMatrix(1, inst.numAttributes() - 1); int index = 0; for (int i = 0; i < inst.numAttributes(); i++) { if (i != inst.classIndex()) { result.set(0, index++, inst.value(i)); } } result = result.mult(m_B1, new DenseMatrix(1, m_B1.numColumns())); result = result.mult(m_wCCA, new DenseMatrix(1, 1)); double[] pred = new double[1]; if (getDoNotStandardize()) { pred[0] = result.get(0, 0); } else { pred[0] = result.get(0, 0) * m_x1 + m_x0; } return pred; } /** * This class implements efficient batch prediction. * * @return true */ public boolean implementsMoreEfficientBatchPrediction() { return true; } /** * Returns predictions for a whole set of instances. * * @param insts the instances to make predictions for * @return the 2D array with results */ public double[][] distributionsForInstances(Instances insts) throws Exception { m_Missing = new ReplaceMissingValues(); m_Missing.setInputFormat(insts); insts = Filter.useFilter(insts, m_Missing); m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(insts); insts = Filter.useFilter(insts, m_NominalToBinary); if (!getDoNotStandardize()) { insts = Filter.useFilter(insts, m_Standardize); } Matrix result = getMatrix(Filter.useFilter(insts, m_N1), false, false); result = result.mult(m_B1, new DenseMatrix(result.numRows(), m_B1.numColumns())); result = result.mult(m_wCCA, new DenseMatrix(insts.numInstances(), 1)); double[][] preds = new double[insts.numInstances()][1]; for (int i = 0; i < insts.numInstances(); i++) { if (getDoNotStandardize()) { preds[i][0] = result.get(i, 0); } else { preds[i][0] = result.get(i, 0) * m_x1 + m_x0; } } return preds; } /** * Outputs the coefficients of the classifier. * * @return a textual description of the classifier */ public String toString() { if (m_wCCA == null) { return "XNV: No model built yet."; } else { return "XNV weight vector (beta) based on " + m_numLabeled + " instances:\n\n" + m_wCCA; } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES); result.enable(Capabilities.Capability.NOMINAL_ATTRIBUTES); result.enable(Capabilities.Capability.MISSING_VALUES); // class result.enable(Capabilities.Capability.NUMERIC_CLASS); result.enable(Capabilities.Capability.MISSING_CLASS_VALUES); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 12037 $"); } /** * Generates an XNV predictor. * * @param argv the options */ public static void main(String argv[]) throws Exception { runClassifier(new XNV(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/neural/LinearUnit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LinearUnit.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * This can be used by the * neuralnode to perform all it's computations (as a Linear unit). * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision$ */ public class LinearUnit implements NeuralMethod, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 8572152807755673630L; /** * This function calculates what the output value should be. * @param node The node to calculate the value for. * @return The value. */ public double outputValue(NeuralNode node) { double[] weights = node.getWeights(); NeuralConnection[] inputs = node.getInputs(); double value = weights[0]; for (int noa = 0; noa < node.getNumInputs(); noa++) { value += inputs[noa].outputValue(true) * weights[noa+1]; } return value; } /** * This function calculates what the error value should be. * @param node The node to calculate the error for. * @return The error. */ public double errorValue(NeuralNode node) { //then calculate the error. NeuralConnection[] outputs = node.getOutputs(); int[] oNums = node.getOutputNums(); double error = 0; for (int noa = 0; noa < node.getNumOutputs(); noa++) { error += outputs[noa].errorValue(true) * outputs[noa].weightValue(oNums[noa]); } return error; } /** * This function will calculate what the change in weights should be * and also update them. * @param node The node to update the weights for. * @param learn The learning rate to use. * @param momentum The momentum to use. */ public void updateWeights(NeuralNode node, double learn, double momentum) { NeuralConnection[] inputs = node.getInputs(); double[] cWeights = node.getChangeInWeights(); double[] weights = node.getWeights(); double learnTimesError = 0; learnTimesError = learn * node.errorValue(false); double c = learnTimesError + momentum * cWeights[0]; weights[0] += c; cWeights[0] = c; int stopValue = node.getNumInputs() + 1; for (int noa = 1; noa < stopValue; noa++) { c = learnTimesError * inputs[noa-1].outputValue(false); c += momentum * cWeights[noa]; weights[noa] += c; cWeights[noa] = c; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/neural/NeuralConnection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NeuralConnection.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import java.awt.Color; import java.awt.Graphics; import java.io.Serializable; import weka.core.RevisionHandler; /** * Abstract unit in a NeuralNetwork. * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class NeuralConnection implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -286208828571059163L; //bitwise flags for the types of unit. /** This unit is not connected to any others. */ public static final int UNCONNECTED = 0; /** This unit is a pure input unit. */ public static final int PURE_INPUT = 1; /** This unit is a pure output unit. */ public static final int PURE_OUTPUT = 2; /** This unit is an input unit. */ public static final int INPUT = 4; /** This unit is an output unit. */ public static final int OUTPUT = 8; /** This flag is set once the unit has a connection. */ public static final int CONNECTED = 16; /////The difference between pure and not is that pure is used to feed /////the neural network the attribute values and the errors on the outputs /////Beyond that they do no calculations, and have certain restrictions /////on the connections they can make. /** The list of inputs to this unit. */ protected NeuralConnection[] m_inputList; /** The list of outputs from this unit. */ protected NeuralConnection[] m_outputList; /** The numbering for the connections at the other end of the input lines. */ protected int[] m_inputNums; /** The numbering for the connections at the other end of the out lines. */ protected int[] m_outputNums; /** The number of inputs. */ protected int m_numInputs; /** The number of outputs. */ protected int m_numOutputs; /** The output value for this unit, NaN if not calculated. */ protected double m_unitValue; /** The error value for this unit, NaN if not calculated. */ protected double m_unitError; /** True if the weights have already been updated. */ protected boolean m_weightsUpdated; /** The string that uniquely (provided naming is done properly) identifies * this unit. */ protected String m_id; /** The type of unit this is. */ protected int m_type; /** The x coord of this unit purely for displaying purposes. */ protected double m_x; /** The y coord of this unit purely for displaying purposes. */ protected double m_y; /** * Constructs The unit with the basic connection information prepared for * use. * * @param id the unique id of the unit */ public NeuralConnection(String id) { m_id = id; m_inputList = new NeuralConnection[0]; m_outputList = new NeuralConnection[0]; m_inputNums = new int[0]; m_outputNums = new int[0]; m_numInputs = 0; m_numOutputs = 0; m_unitValue = Double.NaN; m_unitError = Double.NaN; m_weightsUpdated = false; m_x = 0; m_y = 0; m_type = UNCONNECTED; } /** * @return The identity string of this unit. */ public String getId() { return m_id; } /** * @return The type of this unit. */ public int getType() { return m_type; } /** * @param t The new type of this unit. */ public void setType(int t) { m_type = t; } /** * Call this to reset the unit for another run. * It is expected by that this unit will call the reset functions of all * input units to it. It is also expected that this will not be done * if the unit has already been reset (or atleast appears to be). */ public abstract void reset(); /** * Call this to get the output value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The output value, or NaN, if the value has not been calculated. */ public abstract double outputValue(boolean calculate); /** * Call this to get the error value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The error value, or NaN, if the value has not been calculated. */ public abstract double errorValue(boolean calculate); /** * Call this to have the connection save the current * weights. */ public abstract void saveWeights(); /** * Call this to have the connection restore from the saved * weights. */ public abstract void restoreWeights(); /** * Call this to get the weight value on a particular connection. * @param n The connection number to get the weight for, -1 if The threshold * weight should be returned. * @return This function will default to return 1. If overridden, it should * return the value for the specified connection or if -1 then it should * return the threshold value. If no value exists for the specified * connection, NaN will be returned. */ public double weightValue(int n) { return 1; } /** * Call this function to update the weight values at this unit. * After the weights have been updated at this unit, All the * input connections will then be called from this to have their * weights updated. * @param l The learning Rate to use. * @param m The momentum to use. */ public void updateWeights(double l, double m) { //the action the subclasses should perform is upto them //but if they coverride they should make a call to this to //call the method for all their inputs. if (!m_weightsUpdated) { for (int noa = 0; noa < m_numInputs; noa++) { m_inputList[noa].updateWeights(l, m); } m_weightsUpdated = true; } } /** * Use this to get easy access to the inputs. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The inputs list. */ public NeuralConnection[] getInputs() { return m_inputList; } /** * Use this to get easy access to the outputs. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The outputs list. */ public NeuralConnection[] getOutputs() { return m_outputList; } /** * Use this to get easy access to the input numbers. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The input nums list. */ public int[] getInputNums() { return m_inputNums; } /** * Use this to get easy access to the output numbers. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The outputs list. */ public int[] getOutputNums() { return m_outputNums; } /** * @return the x coord. */ public double getX() { return m_x; } /** * @return the y coord. */ public double getY() { return m_y; } /** * @param x The new value for it's x pos. */ public void setX(double x) { m_x = x; } /** * @param y The new value for it's y pos. */ public void setY(double y) { m_y = y; } /** * Call this function to determine if the point at x,y is on the unit. * @param g The graphics context for font size info. * @param x The x coord. * @param y The y coord. * @param w The width of the display. * @param h The height of the display. * @return True if the point is on the unit, false otherwise. */ public boolean onUnit(Graphics g, int x, int y, int w, int h) { int m = (int)(m_x * w); int c = (int)(m_y * h); if (x > m + 10 || x < m - 10 || y > c + 10 || y < c - 10) { return false; } return true; } /** * Call this function to draw the node. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawNode(Graphics g, int w, int h) { if ((m_type & OUTPUT) == OUTPUT) { g.setColor(Color.orange); } else { g.setColor(Color.red); } g.fillOval((int)(m_x * w) - 9, (int)(m_y * h) - 9, 19, 19); g.setColor(Color.gray); g.fillOval((int)(m_x * w) - 5, (int)(m_y * h) - 5, 11, 11); } /** * Call this function to draw the node highlighted. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawHighlight(Graphics g, int w, int h) { drawNode(g, w, h); g.setColor(Color.yellow); g.fillOval((int)(m_x * w) - 5, (int)(m_y * h) - 5, 11, 11); } /** * Call this function to draw the nodes input connections. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawInputLines(Graphics g, int w, int h) { g.setColor(Color.black); int px = (int)(m_x * w); int py = (int)(m_y * h); for (int noa = 0; noa < m_numInputs; noa++) { g.drawLine((int)(m_inputList[noa].getX() * w) , (int)(m_inputList[noa].getY() * h) , px, py); } } /** * Call this function to draw the nodes output connections. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawOutputLines(Graphics g, int w, int h) { g.setColor(Color.black); int px = (int)(m_x * w); int py = (int)(m_y * h); for (int noa = 0; noa < m_numOutputs; noa++) { g.drawLine(px, py , (int)(m_outputList[noa].getX() * w) , (int)(m_outputList[noa].getY() * h)); } } /** * This will connect the specified unit to be an input to this unit. * @param i The unit. * @param n It's connection number for this connection. * @return True if the connection was made, false otherwise. */ protected boolean connectInput(NeuralConnection i, int n) { for (int noa = 0; noa < m_numInputs; noa++) { if (i == m_inputList[noa]) { return false; } } if (m_numInputs >= m_inputList.length) { //then allocate more space to it. allocateInputs(); } m_inputList[m_numInputs] = i; m_inputNums[m_numInputs] = n; m_numInputs++; return true; } /** * This will allocate more space for input connection information * if the arrays for this have been filled up. */ protected void allocateInputs() { NeuralConnection[] temp1 = new NeuralConnection[m_inputList.length + 15]; int[] temp2 = new int[m_inputNums.length + 15]; for (int noa = 0; noa < m_numInputs; noa++) { temp1[noa] = m_inputList[noa]; temp2[noa] = m_inputNums[noa]; } m_inputList = temp1; m_inputNums = temp2; } /** * This will connect the specified unit to be an output to this unit. * @param o The unit. * @param n It's connection number for this connection. * @return True if the connection was made, false otherwise. */ protected boolean connectOutput(NeuralConnection o, int n) { for (int noa = 0; noa < m_numOutputs; noa++) { if (o == m_outputList[noa]) { return false; } } if (m_numOutputs >= m_outputList.length) { //then allocate more space to it. allocateOutputs(); } m_outputList[m_numOutputs] = o; m_outputNums[m_numOutputs] = n; m_numOutputs++; return true; } /** * Allocates more space for output connection information * if the arrays have been filled up. */ protected void allocateOutputs() { NeuralConnection[] temp1 = new NeuralConnection[m_outputList.length + 15]; int[] temp2 = new int[m_outputNums.length + 15]; for (int noa = 0; noa < m_numOutputs; noa++) { temp1[noa] = m_outputList[noa]; temp2[noa] = m_outputNums[noa]; } m_outputList = temp1; m_outputNums = temp2; } /** * This will disconnect the input with the specific connection number * From this node (only on this end however). * @param i The unit to disconnect. * @param n The connection number at the other end, -1 if all the connections * to this unit should be severed. * @return True if the connection was removed, false if the connection was * not found. */ protected boolean disconnectInput(NeuralConnection i, int n) { int loc = -1; boolean removed = false; do { loc = -1; for (int noa = 0; noa < m_numInputs; noa++) { if (i == m_inputList[noa] && (n == -1 || n == m_inputNums[noa])) { loc = noa; break; } } if (loc >= 0) { for (int noa = loc+1; noa < m_numInputs; noa++) { m_inputList[noa-1] = m_inputList[noa]; m_inputNums[noa-1] = m_inputNums[noa]; //set the other end to have the right connection number. m_inputList[noa-1].changeOutputNum(m_inputNums[noa-1], noa-1); } m_numInputs--; removed = true; } } while (n == -1 && loc != -1); return removed; } /** * This function will remove all the inputs to this unit. * In doing so it will also terminate the connections at the other end. */ public void removeAllInputs() { for (int noa = 0; noa < m_numInputs; noa++) { //this command will simply remove any connections this node has //with the other in 1 go, rather than seperately. m_inputList[noa].disconnectOutput(this, -1); } //now reset the inputs. m_inputList = new NeuralConnection[0]; setType(getType() & (~INPUT)); if (getNumOutputs() == 0) { setType(getType() & (~CONNECTED)); } m_inputNums = new int[0]; m_numInputs = 0; } /** * Changes the connection value information for one of the connections. * @param n The connection number to change. * @param v The value to change it to. */ protected void changeInputNum(int n, int v) { if (n >= m_numInputs || n < 0) { return; } m_inputNums[n] = v; } /** * This will disconnect the output with the specific connection number * From this node (only on this end however). * @param o The unit to disconnect. * @param n The connection number at the other end, -1 if all the connections * to this unit should be severed. * @return True if the connection was removed, false if the connection was * not found. */ protected boolean disconnectOutput(NeuralConnection o, int n) { int loc = -1; boolean removed = false; do { loc = -1; for (int noa = 0; noa < m_numOutputs; noa++) { if (o == m_outputList[noa] && (n == -1 || n == m_outputNums[noa])) { loc =noa; break; } } if (loc >= 0) { for (int noa = loc+1; noa < m_numOutputs; noa++) { m_outputList[noa-1] = m_outputList[noa]; m_outputNums[noa-1] = m_outputNums[noa]; //set the other end to have the right connection number m_outputList[noa-1].changeInputNum(m_outputNums[noa-1], noa-1); } m_numOutputs--; removed = true; } } while (n == -1 && loc != -1); return removed; } /** * This function will remove all outputs to this unit. * In doing so it will also terminate the connections at the other end. */ public void removeAllOutputs() { for (int noa = 0; noa < m_numOutputs; noa++) { //this command will simply remove any connections this node has //with the other in 1 go, rather than seperately. m_outputList[noa].disconnectInput(this, -1); } //now reset the inputs. m_outputList = new NeuralConnection[0]; m_outputNums = new int[0]; setType(getType() & (~OUTPUT)); if (getNumInputs() == 0) { setType(getType() & (~CONNECTED)); } m_numOutputs = 0; } /** * Changes the connection value information for one of the connections. * @param n The connection number to change. * @param v The value to change it to. */ protected void changeOutputNum(int n, int v) { if (n >= m_numOutputs || n < 0) { return; } m_outputNums[n] = v; } /** * @return The number of input connections. */ public int getNumInputs() { return m_numInputs; } /** * @return The number of output connections. */ public int getNumOutputs() { return m_numOutputs; } /** * Connects two units together. * @param s The source unit. * @param t The target unit. * @return True if the units were connected, false otherwise. */ public static boolean connect(NeuralConnection s, NeuralConnection t) { if (s == null || t == null) { return false; } //this ensures that there is no existing connection between these //two units already. This will also cause the current weight there to be //lost disconnect(s, t); if (s == t) { return false; } if ((t.getType() & PURE_INPUT) == PURE_INPUT) { return false; //target is an input node. } if ((s.getType() & PURE_OUTPUT) == PURE_OUTPUT) { return false; //source is an output node } if ((s.getType() & PURE_INPUT) == PURE_INPUT && (t.getType() & PURE_OUTPUT) == PURE_OUTPUT) { return false; //there is no actual working node in use } if ((t.getType() & PURE_OUTPUT) == PURE_OUTPUT && t.getNumInputs() > 0) { return false; //more than 1 node is trying to feed a particular output } if ((t.getType() & PURE_OUTPUT) == PURE_OUTPUT && (s.getType() & OUTPUT) == OUTPUT) { return false; //an output node already feeding out a final answer } if (!s.connectOutput(t, t.getNumInputs())) { return false; } if (!t.connectInput(s, s.getNumOutputs() - 1)) { s.disconnectOutput(t, t.getNumInputs()); return false; } //now ammend the type. if ((s.getType() & PURE_INPUT) == PURE_INPUT) { t.setType(t.getType() | INPUT); } else if ((t.getType() & PURE_OUTPUT) == PURE_OUTPUT) { s.setType(s.getType() | OUTPUT); } t.setType(t.getType() | CONNECTED); s.setType(s.getType() | CONNECTED); return true; } /** * Disconnects two units. * @param s The source unit. * @param t The target unit. * @return True if the units were disconnected, false if they weren't * (probably due to there being no connection). */ public static boolean disconnect(NeuralConnection s, NeuralConnection t) { if (s == null || t == null) { return false; } boolean stat1 = s.disconnectOutput(t, -1); boolean stat2 = t.disconnectInput(s, -1); if (stat1 && stat2) { if ((s.getType() & PURE_INPUT) == PURE_INPUT) { t.setType(t.getType() & (~INPUT)); } else if ((t.getType() & (PURE_OUTPUT)) == PURE_OUTPUT) { s.setType(s.getType() & (~OUTPUT)); } if (s.getNumInputs() == 0 && s.getNumOutputs() == 0) { s.setType(s.getType() & (~CONNECTED)); } if (t.getNumInputs() == 0 && t.getNumOutputs() == 0) { t.setType(t.getType() & (~CONNECTED)); } } return stat1 && stat2; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/neural/NeuralMethod.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NeuralMethod.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import java.io.Serializable; /** * This is an interface used to create classes that can be used by the * neuralnode to perform all it's computations. * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision$ */ public interface NeuralMethod extends Serializable { /** * This function calculates what the output value should be. * @param node The node to calculate the value for. * @return The value. */ double outputValue(NeuralNode node); /** * This function calculates what the error value should be. * @param node The node to calculate the error for. * @return The error. */ double errorValue(NeuralNode node); /** * This function will calculate what the change in weights should be * and also update them. * @param node The node to update the weights for. * @param learn The learning rate to use. * @param momentum The momentum to use. */ void updateWeights(NeuralNode node, double learn, double momentum); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/neural/NeuralNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NeuralNode.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import weka.core.RevisionUtils; import java.util.Random; /** * This class is used to represent a node in the neuralnet. * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision$ */ public class NeuralNode extends NeuralConnection { /** for serialization */ private static final long serialVersionUID = -1085750607680839163L; /** The weights for each of the input connections, and the threshold. */ private double[] m_weights; /** The best (lowest error) weights. Only used when validation set is used */ private double[] m_bestWeights; /** The change in the weights. */ private double[] m_changeInWeights; private Random m_random; /** Performs the operations for this node. Currently this * defines that the node is either a sigmoid or a linear unit. */ private NeuralMethod m_methods; /** * @param id The string name for this node (used to id this node). * @param r A random number generator used to generate initial weights. * @param m The methods this node should use to update. */ public NeuralNode(String id, Random r, NeuralMethod m) { super(id); m_weights = new double[1]; m_bestWeights = new double[1]; m_changeInWeights = new double[1]; m_random = r; m_weights[0] = m_random.nextDouble() * .1 - .05; m_changeInWeights[0] = 0; m_methods = m; } /** * Set how this node should operate (note that the neural method has no * internal state, so the same object can be used by any number of nodes. * @param m The new method. */ public void setMethod(NeuralMethod m) { m_methods = m; } public NeuralMethod getMethod() { return m_methods; } /** * Call this to get the output value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The output value, or NaN, if the value has not been calculated. */ public double outputValue(boolean calculate) { if (Double.isNaN(m_unitValue) && calculate) { //then calculate the output value; m_unitValue = m_methods.outputValue(this); } return m_unitValue; } /** * Call this to get the error value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The error value, or NaN, if the value has not been calculated. */ public double errorValue(boolean calculate) { if (!Double.isNaN(m_unitValue) && Double.isNaN(m_unitError) && calculate) { //then calculate the error. m_unitError = m_methods.errorValue(this); } return m_unitError; } /** * Call this to reset the value and error for this unit, ready for the next * run. This will also call the reset function of all units that are * connected as inputs to this one. * This is also the time that the update for the listeners will be performed. */ public void reset() { if (!Double.isNaN(m_unitValue) || !Double.isNaN(m_unitError)) { m_unitValue = Double.NaN; m_unitError = Double.NaN; m_weightsUpdated = false; for (int noa = 0; noa < m_numInputs; noa++) { m_inputList[noa].reset(); } } } /** * Call this to have the connection save the current * weights. */ public void saveWeights() { // copy the current weights System.arraycopy(m_weights, 0, m_bestWeights, 0, m_weights.length); // tell inputs to save weights for (int i = 0; i < m_numInputs; i++) { m_inputList[i].saveWeights(); } } /** * Call this to have the connection restore from the saved * weights. */ public void restoreWeights() { // copy the saved best weights back into the weights System.arraycopy(m_bestWeights, 0, m_weights, 0, m_weights.length); // tell inputs to restore weights for (int i = 0; i < m_numInputs; i++) { m_inputList[i].restoreWeights(); } } /** * Call this to get the weight value on a particular connection. * @param n The connection number to get the weight for, -1 if The threshold * weight should be returned. * @return The value for the specified connection or if -1 then it should * return the threshold value. If no value exists for the specified * connection, NaN will be returned. */ public double weightValue(int n) { if (n >= m_numInputs || n < -1) { return Double.NaN; } return m_weights[n + 1]; } /** * call this function to get the weights array. * This will also allow the weights to be updated. * @return The weights array. */ public double[] getWeights() { return m_weights; } /** * call this function to get the chnage in weights array. * This will also allow the change in weights to be updated. * @return The change in weights array. */ public double[] getChangeInWeights() { return m_changeInWeights; } /** * Call this function to update the weight values at this unit. * After the weights have been updated at this unit, All the * input connections will then be called from this to have their * weights updated. * @param l The learning rate to use. * @param m The momentum to use. */ public void updateWeights(double l, double m) { if (!m_weightsUpdated && !Double.isNaN(m_unitError)) { m_methods.updateWeights(this, l, m); //note that the super call to update the inputs is done here and //not in the m_method updateWeights, because it is not deemed to be //required to update the weights at this node (while the error and output //value ao need to be recursively calculated) super.updateWeights(l, m); //to call all of the inputs. } } /** * This will connect the specified unit to be an input to this unit. * @param i The unit. * @param n It's connection number for this connection. * @return True if the connection was made, false otherwise. */ protected boolean connectInput(NeuralConnection i, int n) { //the function that this overrides can do most of the work. if (!super.connectInput(i, n)) { return false; } //note that the weights are shifted 1 forward in the array so //it leaves the numinputs aligned on the space the weight needs to go. m_weights[m_numInputs] = m_random.nextDouble() * .1 - .05; m_changeInWeights[m_numInputs] = 0; return true; } /** * This will allocate more space for input connection information * if the arrays for this have been filled up. */ protected void allocateInputs() { NeuralConnection[] temp1 = new NeuralConnection[m_inputList.length + 15]; int[] temp2 = new int[m_inputNums.length + 15]; double[] temp4 = new double[m_weights.length + 15]; double[] temp5 = new double[m_changeInWeights.length + 15]; double[] temp6 = new double[m_bestWeights.length + 15]; temp4[0] = m_weights[0]; temp5[0] = m_changeInWeights[0]; temp6[0] = m_bestWeights[0]; for (int noa = 0; noa < m_numInputs; noa++) { temp1[noa] = m_inputList[noa]; temp2[noa] = m_inputNums[noa]; temp4[noa+1] = m_weights[noa+1]; temp5[noa+1] = m_changeInWeights[noa+1]; temp6[noa+1] = m_bestWeights[noa+1]; } m_inputList = temp1; m_inputNums = temp2; m_weights = temp4; m_changeInWeights = temp5; m_bestWeights = temp6; } /** * This will disconnect the input with the specific connection number * From this node (only on this end however). * @param i The unit to disconnect. * @param n The connection number at the other end, -1 if all the connections * to this unit should be severed (not the same as removeAllInputs). * @return True if the connection was removed, false if the connection was * not found. */ protected boolean disconnectInput(NeuralConnection i, int n) { int loc = -1; boolean removed = false; do { loc = -1; for (int noa = 0; noa < m_numInputs; noa++) { if (i == m_inputList[noa] && (n == -1 || n == m_inputNums[noa])) { loc = noa; break; } } if (loc >= 0) { for (int noa = loc+1; noa < m_numInputs; noa++) { m_inputList[noa-1] = m_inputList[noa]; m_inputNums[noa-1] = m_inputNums[noa]; m_weights[noa] = m_weights[noa+1]; m_changeInWeights[noa] = m_changeInWeights[noa+1]; m_inputList[noa-1].changeOutputNum(m_inputNums[noa-1], noa-1); } m_numInputs--; removed = true; } } while (n == -1 && loc != -1); return removed; } /** * This function will remove all the inputs to this unit. * In doing so it will also terminate the connections at the other end. */ public void removeAllInputs() { super.removeAllInputs(); double temp1 = m_weights[0]; double temp2 = m_changeInWeights[0]; m_weights = new double[1]; m_changeInWeights = new double[1]; m_weights[0] = temp1; m_changeInWeights[0] = temp2; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/neural/SigmoidUnit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SigmoidUnit.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * This can be used by the * neuralnode to perform all it's computations (as a sigmoid unit). * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision$ */ public class SigmoidUnit implements NeuralMethod, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -5162958458177475652L; /** * This function calculates what the output value should be. * @param node The node to calculate the value for. * @return The value. */ public double outputValue(NeuralNode node) { double[] weights = node.getWeights(); NeuralConnection[] inputs = node.getInputs(); double value = weights[0]; for (int noa = 0; noa < node.getNumInputs(); noa++) { value += inputs[noa].outputValue(true) * weights[noa+1]; } //this I got from the Neural Network faq to combat overflow //pretty simple solution really :) if (value < -45) { value = 0; } else if (value > 45) { value = 1; } else { value = 1 / (1 + Math.exp(-value)); } return value; } /** * This function calculates what the error value should be. * @param node The node to calculate the error for. * @return The error. */ public double errorValue(NeuralNode node) { //then calculate the error. NeuralConnection[] outputs = node.getOutputs(); int[] oNums = node.getOutputNums(); double error = 0; for (int noa = 0; noa < node.getNumOutputs(); noa++) { error += outputs[noa].errorValue(true) * outputs[noa].weightValue(oNums[noa]); } double value = node.outputValue(false); error *= value * (1 - value); return error; } /** * This function will calculate what the change in weights should be * and also update them. * @param node The node to update the weights for. * @param learn The learning rate to use. * @param momentum The momentum to use. */ public void updateWeights(NeuralNode node, double learn, double momentum) { NeuralConnection[] inputs = node.getInputs(); double[] cWeights = node.getChangeInWeights(); double[] weights = node.getWeights(); double learnTimesError = 0; learnTimesError = learn * node.errorValue(false); double c = learnTimesError + momentum * cWeights[0]; weights[0] += c; cWeights[0] = c; int stopValue = node.getNumInputs() + 1; for (int noa = 1; noa < stopValue; noa++) { c = learnTimesError * inputs[noa-1].outputValue(false); c += momentum * cWeights[noa]; weights[noa] += c; cWeights[noa] = c; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/CachedKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CachedKernel.java * Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.supportVector; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Base class for RBFKernel and PolyKernel that implements a simple LRU. (least-recently-used) cache if the cache size is set to a value > 0. Otherwise it uses a full cache. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @author Steven Hugg (hugg@fasterlight.com) (refactored, LRU cache) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) (full cache) * @version $Revision$ */ public abstract class CachedKernel extends Kernel { /** for serialization */ private static final long serialVersionUID = 702810182699015136L; /** Counts the number of kernel evaluations. */ protected int m_kernelEvals; /** Counts the number of kernel cache hits. */ protected int m_cacheHits; /** The size of the cache (a prime number) */ protected int m_cacheSize = 250007; /** Kernel cache */ protected double[] m_storage; protected long[] m_keys; /** The kernel matrix if full cache is used (i.e. size is set to 0) */ protected double[][] m_kernelMatrix; /** The number of instance in the dataset */ protected int m_numInsts; /** number of cache slots in an entry */ protected int m_cacheSlots = 4; /** * default constructor - does nothing. */ public CachedKernel() { super(); } /** * Initializes the kernel cache. The actual size of the cache in bytes is (64 * cacheSize). * * @param data * the data to use * @param cacheSize * the cache size * @throws Exception * if something goes wrong */ protected CachedKernel(final Instances data, final int cacheSize) throws Exception { super(); this.setCacheSize(cacheSize); this.buildKernel(data); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option("\tThe size of the cache (a prime number), 0 for full cache and \n" + "\t-1 to turn it off.\n" + "\t(default: 250007)", "C", 1, "-C <num>")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) { this.setCacheSize(Integer.parseInt(tmpStr)); } else { this.setCacheSize(250007); } super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-C"); result.add("" + this.getCacheSize()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * This method is overridden in subclasses to implement specific kernels. * * @param id1 * the index of instance 1 * @param id2 * the index of instance 2 * @param inst1 * the instance 1 object * @return the dot product * @throws Exception * if something goes wrong */ protected abstract double evaluate(int id1, int id2, Instance inst1) throws Exception; /** * Implements the abstract function of Kernel using the cache. This method uses the evaluate() method to do the actual dot product. * * @param id1 * the index of the first instance in the dataset * @param id2 * the index of the second instance in the dataset * @param inst1 * the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception * if something goes wrong */ @Override public double eval(final int id1, final int id2, final Instance inst1) throws Exception { double result = 0; long key = -1; int location = -1; // we can only cache if we know the indexes and caching is not // disabled (m_cacheSize == -1) if ((id1 >= 0) && (this.m_cacheSize != -1)) { // Use full cache? if (this.m_cacheSize == 0) { if (this.m_kernelMatrix == null) { this.m_kernelMatrix = new double[this.m_data.numInstances()][]; for (int i = 0; i < this.m_data.numInstances(); i++) { this.m_kernelMatrix[i] = new double[i + 1]; for (int j = 0; j <= i; j++) { this.m_kernelEvals++; this.m_kernelMatrix[i][j] = this.evaluate(i, j, this.m_data.instance(i)); } } } this.m_cacheHits++; result = (id1 > id2) ? this.m_kernelMatrix[id1][id2] : this.m_kernelMatrix[id2][id1]; return result; } // Use LRU cache if (id1 > id2) { key = (id1 + ((long) id2 * this.m_numInsts)); } else { key = (id2 + ((long) id1 * this.m_numInsts)); } location = (int) (key % this.m_cacheSize) * this.m_cacheSlots; int loc = location; for (int i = 0; i < this.m_cacheSlots; i++) { long thiskey = this.m_keys[loc]; if (thiskey == 0) { break; // empty slot, so break out of loop early } if (thiskey == (key + 1)) { this.m_cacheHits++; // move entry to front of cache (LRU) by swapping // only if it's not already at the front of cache if (i > 0) { double tmps = this.m_storage[loc]; this.m_storage[loc] = this.m_storage[location]; this.m_keys[loc] = this.m_keys[location]; this.m_storage[location] = tmps; this.m_keys[location] = thiskey; return tmps; } else { return this.m_storage[loc]; } } loc++; } } result = this.evaluate(id1, id2, inst1); this.m_kernelEvals++; // store result in cache if ((key != -1) && (this.m_cacheSize != -1)) { // move all cache slots forward one array index // to make room for the new entry System.arraycopy(this.m_keys, location, this.m_keys, location + 1, this.m_cacheSlots - 1); System.arraycopy(this.m_storage, location, this.m_storage, location + 1, this.m_cacheSlots - 1); this.m_storage[location] = result; this.m_keys[location] = (key + 1); } return result; } /** * Returns the number of time Eval has been called. * * @return the number of kernel evaluation. */ @Override public int numEvals() { return this.m_kernelEvals; } /** * Returns the number of cache hits on dot products. * * @return the number of cache hits. */ @Override public int numCacheHits() { return this.m_cacheHits; } /** * Frees the cache used by the kernel. */ @Override public void clean() { this.m_storage = null; this.m_keys = null; this.m_kernelMatrix = null; } /** * Calculates a dot product between two instances * * @param inst1 * the first instance * @param inst2 * the second instance * @return the dot product of the two instances. * @throws Exception * if an error occurs */ protected final double dotProd(final Instance inst1, final Instance inst2) throws Exception { double result = 0; // we can do a fast dot product int n1 = inst1.numValues(); int n2 = inst2.numValues(); int classIndex = this.m_data.classIndex(); for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } int ind1 = inst1.index(p1); int ind2 = inst2.index(p2); if (ind1 == ind2) { if (ind1 != classIndex) { result += inst1.valueSparse(p1) * inst2.valueSparse(p2); } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } return (result); } /** * Sets the size of the cache to use (a prime number) * * @param value * the size of the cache */ public void setCacheSize(final int value) { if (value >= -1) { this.m_cacheSize = value; this.clean(); } else { System.out.println("Cache size cannot be smaller than -1 (provided: " + value + ")!"); } } /** * Gets the size of the cache * * @return the cache size */ public int getCacheSize() { return this.m_cacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String cacheSizeTipText() { return "The size of the cache (a prime number), 0 for full cache and -1 to turn it off."; } /** * initializes variables etc. * * @param data * the data to use */ @Override protected void initVars(final Instances data) { super.initVars(data); this.m_kernelEvals = 0; this.m_cacheHits = 0; this.m_numInsts = this.m_data.numInstances(); if (this.getCacheSize() > 0) { // Use LRU cache this.m_storage = new double[this.m_cacheSize * this.m_cacheSlots]; this.m_keys = new long[this.m_cacheSize * this.m_cacheSlots]; } else { this.m_storage = null; this.m_keys = null; this.m_kernelMatrix = null; } } /** * builds the kernel with the given data. Initializes the kernel cache. The actual size of the cache in bytes is (64 * cacheSize). * * @param data * the data to base the kernel on * @throws Exception * if something goes wrong */ @Override public void buildKernel(final Instances data) throws Exception { this.initVars(data); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/CheckKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckKernel.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.CheckScheme; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.TestInstances; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * Class for examining the capabilities and finding problems with kernels. If * you implement an kernels using the WEKA.libraries, you should run the checks * on it to ensure robustness and correct operation. Passing all the tests of * this object does not mean bugs in the kernels don't exist, but this will help * find some common ones. * <p/> * * Typical usage: * <p/> * <code>java weka.classifiers.functions.supportVector.CheckKernel -W kernel_name * -- kernel_options </code> * <p/> * * CheckKernel reports on the following: * <ul> * <li>Kernel abilities * <ul> * <li>Possible command line options to the kernels</li> * <li>Whether the kernels can predict nominal, numeric, string, date or * relational class attributes.</li> * <li>Whether the kernels can handle numeric predictor attributes</li> * <li>Whether the kernels can handle nominal predictor attributes</li> * <li>Whether the kernels can handle string predictor attributes</li> * <li>Whether the kernels can handle date predictor attributes</li> * <li>Whether the kernels can handle relational predictor attributes</li> * <li>Whether the kernels can handle multi-instance data</li> * <li>Whether the kernels can handle missing predictor values</li> * <li>Whether the kernels can handle missing class values</li> * <li>Whether a nominal kernels only handles 2 class problems</li> * <li>Whether the kernels can handle instance weights</li> * </ul> * </li> * <li>Correct functioning * <ul> * <li>Correct initialisation during buildKernel (i.e. no result changes when * buildKernel called repeatedly)</li> * <li>Whether the kernels alters the data passed to it (number of instances, * instance order, instance weights, etc)</li> * </ul> * </li> * <li>Degenerate cases * <ul> * <li>building kernels with zero training instances</li> * <li>all but one predictor attribute values missing</li> * <li>all predictor attribute values missing</li> * <li>all but one class values missing</li> * <li>all class values missing</li> * </ul> * </li> * </ul> * Running CheckKernel with the debug option set will output the training and * test datasets for any failed tests. * <p/> * * The <code>weka.classifiers.AbstractKernelTest</code> uses this class to test * all the kernels. Any changes here, have to be checked in that abstract test * class, too. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -S * Silent mode - prints nothing to stdout. * </pre> * * <pre> * -N &lt;num&gt; * The number of instances in the datasets (default 20). * </pre> * * <pre> * -nominal &lt;num&gt; * The number of nominal attributes (default 2). * </pre> * * <pre> * -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1). * </pre> * * <pre> * -numeric &lt;num&gt; * The number of numeric attributes (default 1). * </pre> * * <pre> * -string &lt;num&gt; * The number of string attributes (default 1). * </pre> * * <pre> * -date &lt;num&gt; * The number of date attributes (default 1). * </pre> * * <pre> * -relational &lt;num&gt; * The number of relational attributes (default 1). * </pre> * * <pre> * -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10). * </pre> * * <pre> * -words &lt;comma-separated-list&gt; * The words to use in string attributes. * </pre> * * <pre> * -word-separators &lt;chars&gt; * The word separators to use in string attributes. * </pre> * * <pre> * -W * Full name of the kernel analysed. * eg: weka.classifiers.functions.supportVector.RBFKernel * (default weka.classifiers.functions.supportVector.RBFKernel) * </pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: * </pre> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -G &lt;num&gt; * The Gamma parameter. * (default: 0.01) * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated kernel. * <p/> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision$ * @see TestInstances */ public class CheckKernel extends CheckScheme { /* * Note about test methods: - methods return array of booleans - first index: * success or not - second index: acceptable or not (e.g., Exception is OK) * * FracPete (fracpete at waikato dot ac dot nz) */ /*** The kernel to be examined */ protected Kernel m_Kernel = new weka.classifiers.functions.supportVector.RBFKernel(); /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option("\tFull name of the kernel analysed.\n" + "\teg: weka.classifiers.functions.supportVector.RBFKernel\n" + "\t(default weka.classifiers.functions.supportVector.RBFKernel)", "W", 1, "-W")); result.addAll(Collections.list(super.listOptions())); if ((m_Kernel != null) && (m_Kernel instanceof OptionHandler)) { result.addElement(new Option("", "", 0, "\nOptions specific to kernel " + m_Kernel.getClass().getName() + ":")); result.addAll(Collections.list(((OptionHandler) m_Kernel).listOptions())); } return result.elements(); } /** * Parses a given list of options. * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -S * Silent mode - prints nothing to stdout. * </pre> * * <pre> * -N &lt;num&gt; * The number of instances in the datasets (default 20). * </pre> * * <pre> * -nominal &lt;num&gt; * The number of nominal attributes (default 2). * </pre> * * <pre> * -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1). * </pre> * * <pre> * -numeric &lt;num&gt; * The number of numeric attributes (default 1). * </pre> * * <pre> * -string &lt;num&gt; * The number of string attributes (default 1). * </pre> * * <pre> * -date &lt;num&gt; * The number of date attributes (default 1). * </pre> * * <pre> * -relational &lt;num&gt; * The number of relational attributes (default 1). * </pre> * * <pre> * -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10). * </pre> * * <pre> * -words &lt;comma-separated-list&gt; * The words to use in string attributes. * </pre> * * <pre> * -word-separators &lt;chars&gt; * The word separators to use in string attributes. * </pre> * * <pre> * -W * Full name of the kernel analysed. * eg: weka.classifiers.functions.supportVector.RBFKernel * (default weka.classifiers.functions.supportVector.RBFKernel) * </pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: * </pre> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -G &lt;num&gt; * The Gamma parameter. * (default: 0.01) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) { tmpStr = weka.classifiers.functions.supportVector.RBFKernel.class .getName(); } setKernel((Kernel) forName("weka.classifiers.functions.supportVector", Kernel.class, tmpStr, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckKernel. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); Collections.addAll(result, super.getOptions()); if (getKernel() != null) { result.add("-W"); result.add(getKernel().getClass().getName()); } if ((m_Kernel != null) && (m_Kernel instanceof OptionHandler)) { String[] options = ((OptionHandler) m_Kernel).getOptions(); if (options.length > 0) { result.add("--"); } Collections.addAll(result, options); } return result.toArray(new String[result.size()]); } /** * Begin the tests, reporting results to System.out */ @Override public void doTests() { if (getKernel() == null) { println("\n=== No kernel set ==="); return; } println("\n=== Check on kernel: " + getKernel().getClass().getName() + " ===\n"); // Start tests m_ClasspathProblems = false; println("--> Checking for interfaces"); canTakeOptions(); boolean weightedInstancesHandler = weightedInstancesHandler()[0]; boolean multiInstanceHandler = multiInstanceHandler()[0]; println("--> Kernel tests"); declaresSerialVersionUID(); testsPerClassType(Attribute.NOMINAL, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.NUMERIC, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.DATE, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.STRING, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.RELATIONAL, weightedInstancesHandler, multiInstanceHandler); } /** * Set the lernel to test. * * @param value the kernel to use. */ public void setKernel(Kernel value) { m_Kernel = value; } /** * Get the kernel being tested * * @return the kernel being tested */ public Kernel getKernel() { return m_Kernel; } /** * Run a battery of tests for a given class attribute type * * @param classType true if the class attribute should be numeric * @param weighted true if the kernel says it handles weights * @param multiInstance true if the kernel is a multi-instance kernel */ protected void testsPerClassType(int classType, boolean weighted, boolean multiInstance) { boolean PNom = canPredict(true, false, false, false, false, multiInstance, classType)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance, classType)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance, classType)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance, classType)[0]; boolean PRel; if (!multiInstance) { PRel = canPredict(false, false, false, false, true, multiInstance, classType)[0]; } else { PRel = false; } if (PNom || PNum || PStr || PDat || PRel) { if (weighted) { instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); } if (classType == Attribute.NOMINAL) { canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4); } if (!multiInstance) { canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 0); canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 1); } canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 20)[0]; if (handleMissingPredictors) { canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 100); } boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 20)[0]; if (handleMissingClass) { canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 100); } correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, handleMissingPredictors, handleMissingClass); } } /** * Checks whether the scheme can take command line options. * * @return index 0 is true if the kernel can take options */ protected boolean[] canTakeOptions() { boolean[] result = new boolean[2]; print("options..."); if (m_Kernel instanceof OptionHandler) { println("yes"); if (m_Debug) { println("\n=== Full report ==="); Enumeration<Option> enu = ((OptionHandler) m_Kernel).listOptions(); while (enu.hasMoreElements()) { Option option = enu.nextElement(); print(option.synopsis() + "\n" + option.description() + "\n"); } println("\n"); } result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme says it can handle instance weights. * * @return true if the kernel handles instance weights */ protected boolean[] weightedInstancesHandler() { boolean[] result = new boolean[2]; print("weighted instances kernel..."); if (m_Kernel instanceof WeightedInstancesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme handles multi-instance data. * * @return true if the kernel handles multi-instance data */ protected boolean[] multiInstanceHandler() { boolean[] result = new boolean[2]; print("multi-instance kernel..."); if (m_Kernel instanceof MultiInstanceCapabilitiesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * tests for a serialVersionUID. Fails in case the scheme doesn't declare a * UID. * * @return index 0 is true if the scheme declares a UID */ protected boolean[] declaresSerialVersionUID() { boolean[] result = new boolean[2]; print("serialVersionUID..."); result[0] = !SerializationHelper.needsUID(m_Kernel.getClass()); if (result[0]) { println("yes"); } else { println("no"); } return result; } /** * Checks basic prediction of the scheme, for simple non-troublesome datasets. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canPredict(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("basic predict"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("unary"); accepts.add("binary"); accepts.add("nominal"); accepts.add("numeric"); accepts.add("string"); accepts.add("date"); accepts.add("relational"); accepts.add("multi-instance"); accepts.add("not in classpath"); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether nominal schemes can handle more than two classes. If a * scheme is only designed for two-class problems it should throw an * appropriate exception for multi-class problems. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param numClasses the number of classes to test * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canHandleNClasses(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int numClasses) { print("more than two class problems"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("number"); accepts.add("class"); int numTrain = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle class attributes as Nth attribute. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class attribute (0-based, -1 means last * attribute) * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable * @see TestInstances#CLASS_IS_LAST */ protected boolean[] canHandleClassAsNthAttribute(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex) { if (classIndex == TestInstances.CLASS_IS_LAST) { print("class attribute as last attribute"); } else { print("class attribute as " + (classIndex + 1) + ". attribute"); } printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, classIndex, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle zero training instances. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canHandleZeroTraining(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("handle zero training instances"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("train"); accepts.add("value"); int numTrain = 0, numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme correctly initialises models when buildKernel is * called. This test calls buildKernel with one training dataset. buildKernel * is then called on a training set with different structure, and then again * with the original training set. If the equals method of the * KernelEvaluation class returns false, this is noted as incorrect build * initialisation. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed */ protected boolean[] correctBuildInitialisation(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { boolean[] result = new boolean[2]; print("correct initialisation during buildKernel"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; Instances train1 = null; Instances train2 = null; Kernel kernel = null; KernelEvaluation evaluation1A = null; KernelEvaluation evaluation1B = null; KernelEvaluation evaluation2 = null; int stage = 0; try { // Make two sets of train/test splits with different // numbers of attributes train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); train2 = makeTestDataset(84, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() + 1 : 0, datePredictor ? getNumDate() + 1 : 0, relationalPredictor ? getNumRelational() + 1 : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train1, missingLevel, predictorMissing, classMissing); addMissing(train2, missingLevel, predictorMissing, classMissing); } kernel = Kernel.makeCopy(getKernel()); evaluation1A = new KernelEvaluation(); evaluation1B = new KernelEvaluation(); evaluation2 = new KernelEvaluation(); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { stage = 0; evaluation1A.evaluate(kernel, train1); stage = 1; evaluation2.evaluate(kernel, train2); stage = 2; evaluation1B.evaluate(kernel, train1); stage = 3; if (!evaluation1A.equals(evaluation1B)) { if (m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildKernel()") + "\n\n"); println(evaluation1B.toSummaryString("\nSecond buildKernel()") + "\n\n"); } throw new Exception("Results differ between buildKernel calls"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); switch (stage) { case 0: print(" of dataset 1"); break; case 1: print(" of dataset 2"); break; case 2: print(" of dataset 1 (2nd build)"); break; case 3: print(", comparing results from builds of dataset 1"); break; } println(": " + ex.getMessage() + "\n"); println("here are the datasets:\n"); println("=== Train1 Dataset ===\n" + train1.toString() + "\n"); println("=== Train2 Dataset ===\n" + train2.toString() + "\n"); } } return result; } /** * Checks basic missing value handling of the scheme. If the missing values * cause an exception to be thrown by the scheme, this will be recorded. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if the missing values may be in the predictors * @param classMissing true if the missing values may be in the class * @param missingLevel the percentage of missing values * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canHandleMissing(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing, int missingLevel) { if (missingLevel == 100) { print("100% "); } print("missing"); if (predictorMissing) { print(" predictor"); if (classMissing) { print(" and"); } } if (classMissing) { print(" class"); } print(" values"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("missing"); accepts.add("value"); accepts.add("train"); int numTrain = getNumInstances(), numClasses = 2; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the kernel can handle instance weights. This test compares * the kernel performance on two datasets that are identical except for the * training weights. If the results change, then the kernel must be using the * weights. It may be possible to get a false positive from this test if the * weight changes aren't significant enough to induce a change in kernel * performance (but the weights are chosen to minimize the likelihood of * this). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 true if the test was passed */ protected boolean[] instanceWeights(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("kernel uses instance weights"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2 * getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Kernel[] kernels = null; KernelEvaluation evaluationB = null; KernelEvaluation evaluationI = null; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); } kernels = Kernel.makeCopies(getKernel(), 2); evaluationB = new KernelEvaluation(); evaluationI = new KernelEvaluation(); evaluationB.evaluate(kernels[0], train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now modify instance weights and re-built/test for (int i = 0; i < train.numInstances(); i++) { train.instance(i).setWeight(0); } Random random = new Random(1); for (int i = 0; i < train.numInstances() / 2; i++) { int inst = random.nextInt(train.numInstances()); int weight = random.nextInt(10) + 1; train.instance(inst).setWeight(weight); } evaluationI.evaluate(kernels[1], train); if (evaluationB.equals(evaluationI)) { // println("no"); evalFail = true; throw new Exception("evalFail"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results don't differ between non-weighted and " + "weighted instance models."); println("Here are the results:\n"); println(evaluationB.toSummaryString("\nboth methods\n")); } else { print("Problem during building"); println(": " + ex.getMessage() + "\n"); } println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } } } return result; } /** * Checks whether the scheme alters the training dataset during building. If * the scheme needs to modify the data it should take a copy of the training * data. Currently checks for changes to header structure, number of * instances, order of instances, instance weights. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if we know the kernel can handle (at least) * moderate missing predictor values * @param classMissing true if we know the kernel can handle (at least) * moderate missing class values * @return index 0 is true if the test was passed */ protected boolean[] datasetIntegrity(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing) { print("kernel doesn't alter original datasets"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 20; boolean[] result = new boolean[2]; Instances train = null; Kernel kernel = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); } kernel = Kernel.makeCopies(getKernel(), 1)[0]; } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { Instances trainCopy = new Instances(train); kernel.getCapabilities().testWithFail(train); kernel.buildKernel(trainCopy); compareDatasets(train, trainCopy); println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); println(": " + ex.getMessage() + "\n"); println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } return result; } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, ArrayList<String> accepts) { return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the attribute index of the class * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, ArrayList<String> accepts) { boolean[] result = new boolean[2]; Instances train = null; Kernel kernel = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); } kernel = Kernel.makeCopies(getKernel(), 1)[0]; } catch (Exception ex) { ex.printStackTrace(); throw new Error("Error setting up for tests: " + ex.getMessage()); } try { kernel.getCapabilities().testWithFail(train); kernel.buildKernel(train); println("yes"); result[0] = true; } catch (Exception ex) { boolean acceptable = false; String msg; if (ex.getMessage() == null) { msg = ""; } else { msg = ex.getMessage().toLowerCase(); } if (msg.indexOf("not in classpath") > -1) { m_ClasspathProblems = true; } for (int i = 0; i < accepts.size(); i++) { if (msg.indexOf(accepts.get(i)) >= 0) { acceptable = true; } } println("no" + (acceptable ? " (OK error message)" : "")); result[1] = acceptable; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); println(": " + ex.getMessage() + "\n"); if (!acceptable) { if (accepts.size() > 0) { print("Error message doesn't mention "); for (int i = 0; i < accepts.size(); i++) { if (i != 0) { print(" or "); } print('"' + accepts.get(i) + '"'); } } println("here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } } return result; } /** * Make a simple set of instances, which can later be modified for use in * specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, boolean multiInstance) throws Exception { return makeTestDataset(seed, numInstances, numNominal, numNumeric, numString, numDate, numRelational, numClasses, classType, TestInstances.CLASS_IS_LAST, multiInstance); } /** * Make a simple set of instances with variable position of the class * attribute, which can later be modified for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class (0-based, -1 as last) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see TestInstances#CLASS_IS_LAST * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, int classIndex, boolean multiInstance) throws Exception { TestInstances dataset = new TestInstances(); dataset.setSeed(seed); dataset.setNumInstances(numInstances); dataset.setNumNominal(numNominal); dataset.setNumNumeric(numNumeric); dataset.setNumString(numString); dataset.setNumDate(numDate); dataset.setNumRelational(numRelational); dataset.setNumClasses(numClasses); dataset.setClassType(classType); dataset.setClassIndex(classIndex); dataset.setNumClasses(numClasses); dataset.setMultiInstance(multiInstance); dataset.setWords(getWords()); dataset.setWordSeparators(getWordSeparators()); return process(dataset.generate()); } /** * Print out a short summary string for the dataset characteristics * * @param nominalPredictor true if nominal predictor attributes are present * @param numericPredictor true if numeric predictor attributes are present * @param stringPredictor true if string predictor attributes are present * @param datePredictor true if date predictor attributes are present * @param relationalPredictor true if relational predictor attributes are * present * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) */ protected void printAttributeSummary(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { String str = ""; if (numericPredictor) { str += " numeric"; } if (nominalPredictor) { if (str.length() > 0) { str += " &"; } str += " nominal"; } if (stringPredictor) { if (str.length() > 0) { str += " &"; } str += " string"; } if (datePredictor) { if (str.length() > 0) { str += " &"; } str += " date"; } if (relationalPredictor) { if (str.length() > 0) { str += " &"; } str += " relational"; } str += " predictors)"; switch (classType) { case Attribute.NUMERIC: str = " (numeric class," + str; break; case Attribute.NOMINAL: str = " (nominal class," + str; break; case Attribute.STRING: str = " (string class," + str; break; case Attribute.DATE: str = " (date class," + str; break; case Attribute.RELATIONAL: str = " (relational class," + str; break; } print(str); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Test method for this class * * @param args the commandline parameters */ public static void main(String[] args) { runCheck(new CheckKernel(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/Kernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Kernel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.Copyable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; import weka.gui.ProgrammaticProperty; /** * Abstract kernel. Kernels implementing this class must respect Mercer's * condition in order to ensure a correct behaviour of SMOreg. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public abstract class Kernel implements Serializable, OptionHandler, CapabilitiesHandler, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -6102771099905817064L; /** The dataset */ protected Instances m_data; /** enables debugging output */ protected boolean m_Debug = false; /** This value is now ignored. Checks are always turned off as they are the responsibility * of the class using the kernel. We are keeping this to allow deserialization. */ protected boolean m_ChecksTurnedOff = false; /** This value is now ignored. Checks are always turned off as they are the responsibility * of the class using the kernel. We are keeping this to allow deserialization. */ protected boolean m_DoNotCheckCapabilities = false; /** * These methods remain for backwards compatibility. The first one does nothing, the second one * always returns true. Checking capabilities is the responsibility of the class using the kernel. */ @ProgrammaticProperty public void setDoNotCheckCapabilities(boolean doNotCheckCapabilities) { } public boolean getDoNotCheckCapabilities() { return true; } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the explorer/experimenter * gui */ public abstract String globalInfo(); /** * Computes the result of the kernel function for two instances. If id1 == -1, * eval use inst1 instead of an instance in the dataset. * * @param id1 the index of the first instance in the dataset * @param id2 the index of the second instance in the dataset * @param inst1 the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception if something goes wrong */ public abstract double eval(int id1, int id2, Instance inst1) throws Exception; /** * Frees the memory used by the kernel. (Useful with kernels which use cache.) * This function is called when the training is done. i.e. after that, eval * will be called with id1 == -1. */ public abstract void clean(); /** * Returns the number of kernel evaluation performed. * * @return the number of kernel evaluation performed. */ public abstract int numEvals(); /** * Returns the number of dot product cache hits. * * @return the number of dot product cache hits, or -1 if not supported by * this kernel. */ public abstract int numCacheHits(); /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = Option.listOptionsForClassHierarchy(this.getClass(), Kernel.class); result.addElement(new Option( "\tEnables debugging output (if available) to be printed.\n" + "\t(default: off)", "output-debug-info", 0, "-output-debug-info")); return result.elements(); } /** * Parses a given list of options. * <p/> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { Option.setOptionsForHierarchy(options, this, Kernel.class); setDebug(Utils.getFlag("output-debug-info", options)); // This one does nothing but remains for backwards compatibility setChecksTurnedOff(Utils.getFlag("no-checks", options)); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); for (String s : Option.getOptionsForHierarchy(this, Kernel.class)) { result.add(s); } if (getDebug()) { result.add("-output-debug-info"); } return result.toArray(new String[result.size()]); } /** * Enables or disables the output of debug information (if the derived kernel * supports that) * * @param value whether to output debugging information */ public void setDebug(boolean value) { m_Debug = value; } /** * Gets whether debugging output is turned on or not. * * @return true if debugging output is produced. */ public boolean getDebug() { return m_Debug; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String debugTipText() { return "Turns on the output of debugging information."; } /** * These methods remain for backwards compatibility. The first one does nothing, the second one * always returns true. Checking capabilities is the responsibility of the class using the kernel. */ @ProgrammaticProperty public void setChecksTurnedOff(boolean value) { } public boolean getChecksTurnedOff() { return true; } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { m_data = data; } /** * Returns the Capabilities of this kernel. Derived kernels have to override * this method to enable capabilities. * * @return the capabilities of this object * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.enableAll(); return result; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * builds the kernel with the given data * * @param data the data to base the kernel on * @throws Exception if something goes wrong */ public void buildKernel(Instances data) throws Exception { initVars(data); } /** * Creates a shallow copy of the kernel (if it implements Copyable) otherwise * a deep copy using serialization. * * @param kernel the kernel to copy * @return a shallow or deep copy of the kernel * @throws Exception if an error occurs */ public static Kernel makeCopy(Kernel kernel) throws Exception { if (kernel instanceof Copyable) { return (Kernel) ((Copyable) kernel).copy(); } return (Kernel) new SerializedObject(kernel).getObject(); } /** * Creates a given number of deep or shallow (if the kernel implements * Copyable) copies of the given kernel using serialization. * * @param model the kernel to copy * @param num the number of kernel copies to create. * @return an array of kernels. * @throws Exception if an error occurs */ public static Kernel[] makeCopies(Kernel model, int num) throws Exception { if (model == null) { throw new Exception("No model kernel set"); } Kernel[] kernels = new Kernel[num]; if (model instanceof Copyable) { for (int i = 0; i < kernels.length; i++) { kernels[i] = (Kernel) ((Copyable) model).copy(); } } else { SerializedObject so = new SerializedObject(model); for (int i = 0; i < kernels.length; i++) { kernels[i] = (Kernel) so.getObject(); } } return kernels; } /** * Creates a new instance of a kernel given it's class name and (optional) * arguments to pass to it's setOptions method. * * @param kernelName the fully qualified class name of the classifier * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created classifier, ready for use. * @throws Exception if the classifier name is invalid, or the options * supplied are not acceptable to the classifier */ public static Kernel forName(String kernelName, String[] options) throws Exception { return (Kernel) Utils.forName(Kernel.class, kernelName, options); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/KernelEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * KernelEvaluation.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.supportVector; import java.io.BufferedReader; import java.io.FileReader; import java.util.Enumeration; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for evaluating Kernels. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class KernelEvaluation implements RevisionHandler { /** the result string */ protected StringBuffer m_Result; /** the kernel evaluation results */ protected double[][] m_Evaluations; /** the number of performed evaluations */ protected int m_NumEvals; /** the number of cache hits */ protected int m_NumCacheHits; /** user-supplied options */ protected String[] m_Options; /** * default constructor */ public KernelEvaluation() { super(); m_Result = new StringBuffer(); m_Evaluations = new double[0][0]; m_Options = new String[0]; m_NumEvals = 0; m_NumCacheHits = 0; } /** * sets the option the user supplied for the kernel * * @param options options that were supplied for the kernel */ public void setUserOptions(String[] options) { m_Options = options.clone(); } /** * returns the options the user supplied for the kernel * * @return the user supplied options for the kernel */ public String[] getUserOptions() { return m_Options.clone(); } /** * Generates an option string to output on the commandline. * * @param Kernel the Kernel to generate the string for * @return the option string */ protected static String makeOptionString(Kernel Kernel) { StringBuffer text; text = new StringBuffer(); // general options text.append("\nGeneral options:\n\n"); text.append("-t <training file>\n"); text.append("\tThe name of the training file.\n"); text.append("-c <class index>\n"); text.append("\tSets index of class attribute (default: last).\n"); // Kernel specific options, if any if (Kernel instanceof OptionHandler) { text.append("\nOptions specific to " + Kernel.getClass().getName().replaceAll(".*\\.", "") + ":\n\n"); Enumeration<Option> enm = ((OptionHandler) Kernel).listOptions(); while (enm.hasMoreElements()) { Option option = enm.nextElement(); text.append(option.synopsis() + "\n"); text.append(option.description() + "\n"); } } return text.toString(); } /** * Evaluates the Kernel with the given commandline options and returns the * evaluation string. * * @param Kernel the Kernel to evaluate * @param options the commandline options * @return the generated output string * @throws Exception if evaluation fails */ public static String evaluate(Kernel Kernel, String[] options) throws Exception { String trainFileString = ""; BufferedReader reader; KernelEvaluation eval; String classIndexString; int classIndex = -1; Instances train; String[] userOptions; // help? if (Utils.getFlag('h', options)) { throw new Exception("\nHelp requested.\n" + makeOptionString(Kernel)); } try { // general options trainFileString = Utils.getOption('t', options); if (trainFileString.length() == 0) { throw new Exception("No training file given!"); } reader = new BufferedReader(new FileReader(trainFileString)); classIndexString = Utils.getOption('c', options); if (classIndexString.length() != 0) { if (classIndexString.equals("first")) { classIndex = 1; } else if (classIndexString.equals("last")) { classIndex = -1; } else { classIndex = Integer.parseInt(classIndexString); } } // Kernel specific options userOptions = options.clone(); if (Kernel instanceof OptionHandler) { ((OptionHandler) Kernel).setOptions(options); } // left-over options? Utils.checkForRemainingOptions(options); } catch (Exception e) { throw new Exception("\nWeka exception: " + e.getMessage() + "\n" + makeOptionString(Kernel)); } // load file and build kernel eval = new KernelEvaluation(); eval.setUserOptions(userOptions); train = new Instances(reader); if (classIndex == -1) { train.setClassIndex(train.numAttributes() - 1); } else { train.setClassIndex(classIndex); } return eval.evaluate(Kernel, train); } /** * Evaluates a kernel with the options given in an array of strings. * * @param kernelString class of kernel as a string * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluate(String kernelString, String[] options) throws Exception { Kernel kernel; // Create kernel try { kernel = (Kernel) Class.forName(kernelString).newInstance(); } catch (Exception e) { throw new Exception("Can't find class with name " + kernelString + '.'); } return evaluate(kernel, options); } /** * Evaluates the Kernel with the given commandline options and returns the * evaluation string. * * @param kernel the Kernel to evaluate * @param data the data to run the Kernel with * @return the generated output string * @throws Exception if evaluation fails */ public String evaluate(Kernel kernel, Instances data) throws Exception { long startTime; long endTime; int i; int n; m_Result = new StringBuffer(); // build kernel startTime = System.currentTimeMillis(); kernel.getCapabilities().testWithFail(data); kernel.buildKernel(data); endTime = System.currentTimeMillis(); m_Result.append("\n=== Model ===\n\n"); if (Utils.joinOptions(getUserOptions()).trim().length() != 0) { m_Result.append("Options: " + Utils.joinOptions(getUserOptions()) + "\n\n"); } m_Result.append(kernel.toString() + "\n"); // evaluate dataset m_Evaluations = new double[data.numInstances()][data.numInstances()]; for (n = 0; n < data.numInstances(); n++) { for (i = n; i < data.numInstances(); i++) { m_Evaluations[n][i] = kernel.eval(n, i, data.instance(n)); } } // test cache for cached kernels if (kernel instanceof CachedKernel) { for (n = 0; n < data.numInstances(); n++) { for (i = n; i < data.numInstances(); i++) { m_Evaluations[n][i] = kernel.eval(n, i, data.instance(n)); } } } m_NumEvals = kernel.numEvals(); m_NumCacheHits = kernel.numCacheHits(); // summary m_Result.append("\n=== Evaluation ===\n\n"); if (kernel instanceof CachedKernel) { m_Result.append("Cache size : " + ((CachedKernel) kernel).getCacheSize() + "\n"); } m_Result.append("# Evaluations: " + m_NumEvals + "\n"); m_Result.append("# Cache hits : " + m_NumCacheHits + "\n"); m_Result.append("Elapsed time : " + (((double) (endTime - startTime)) / 1000) + "s\n"); return m_Result.toString(); } /** * Tests whether the current evaluation object is equal to another evaluation * object * * @param obj the object to compare against * @return true if the two objects are equal */ @Override public boolean equals(Object obj) { if ((obj == null) || !(obj.getClass().equals(this.getClass()))) { return false; } KernelEvaluation cmp = (KernelEvaluation) obj; if (m_NumEvals != cmp.m_NumEvals) { return false; } if (m_NumCacheHits != cmp.m_NumCacheHits) { return false; } if (m_Evaluations.length != cmp.m_Evaluations.length) { return false; } for (int n = 0; n < m_Evaluations.length; n++) { for (int i = 0; i < m_Evaluations[n].length; i++) { if (Double.isNaN(m_Evaluations[n][i]) && Double.isNaN(cmp.m_Evaluations[n][i])) { continue; } if (m_Evaluations[n][i] != cmp.m_Evaluations[n][i]) { return false; } } } return true; } /** * returns a summary string of the evaluation with a no title * * @return the summary string */ public String toSummaryString() { return toSummaryString(""); } /** * returns a summary string of the evaluation with a default title * * @param title the title to print before the result * @return the summary string */ public String toSummaryString(String title) { StringBuffer result; result = new StringBuffer(title); if (title.length() != 0) { result.append("\n"); } result.append(m_Result); return result.toString(); } /** * returns the current result * * @return the currently stored result * @see #toSummaryString() */ @Override public String toString() { return toSummaryString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * A test method for this class. Just extracts the first command line argument * as a kernel class name and calls evaluate. * * @param args an array of command line arguments, the first of which must be * the class name of a kernel. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception( "The first argument must be the class name of a kernel"); } String kernel = args[0]; args[0] = ""; System.out.println(evaluate(kernel, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/NormalizedPolyKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NormalizedPolyKernel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; /** * <!-- globalinfo-start --> The normalized polynomial kernel.<br/> * K(x,y) = &lt;x,y&gt;/sqrt(&lt;x,x&gt;&lt;y,y&gt;) where &lt;x,y&gt; = PolyKernel(x,y) * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -E &lt;num&gt; * The Exponent to use. * (default: 1.0) * </pre> * * <pre> * -L * Use lower-order terms. * (default: no) * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class NormalizedPolyKernel extends PolyKernel { /** for serialization */ static final long serialVersionUID = 1248574185532130851L; /** * default constructor - does nothing */ public NormalizedPolyKernel() { super(); this.setExponent(2.0); } /** * Creates a new <code>NormalizedPolyKernel</code> instance. * * @param dataset * the training dataset used. * @param cacheSize * the size of the cache (a prime number) * @param exponent * the exponent to use * @param lowerOrder * whether to use lower-order terms * @throws Exception * if something goes wrong */ public NormalizedPolyKernel(final Instances dataset, final int cacheSize, final double exponent, final boolean lowerOrder) throws Exception { super(dataset, cacheSize, exponent, lowerOrder); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the explorer/experimenter gui */ @Override public String globalInfo() { return "The normalized polynomial kernel.\n" + "K(x,y) = <x,y>/sqrt(<x,x><y,y>) where <x,y> = PolyKernel(x,y)"; } /** * Computes the result of the kernel function for two instances. If id1 == -1, eval use inst1 instead of an instance in the dataset. Redefines the eval function of PolyKernel. * * @param id1 * the index of the first instance in the dataset * @param id2 * the index of the second instance in the dataset * @param inst1 * the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception * if something goes wrong */ @Override public double eval(final int id1, final int id2, final Instance inst1) throws Exception { double div = Math.sqrt(super.eval(id1, id1, inst1) * ((this.m_keys != null) ? super.eval(id2, id2, this.m_data.instance(id2)) : super.eval(-1, -1, this.m_data.instance(id2)))); if (div != 0) { return super.eval(id1, id2, inst1) / div; } else { return 0; } } /** * Sets the exponent value (must be different from 1.0). * * @param value * the exponent value */ @Override public void setExponent(final double value) { if (value != 1.0) { super.setExponent(value); } } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ @Override public String toString() { String result; if (this.getUseLowerOrder()) { result = "Normalized Poly Kernel with lower order: K(x,y) = (<x,y>+1)^" + this.getExponent() + "/" + "((<x,x>+1)^" + this.getExponent() + "*" + "(<y,y>+1)^" + this.getExponent() + ")^(1/2)"; } else { result = "Normalized Poly Kernel: K(x,y) = <x,y>^" + this.getExponent() + "/" + "(<x,x>^" + this.getExponent() + "*" + "<y,y>^" + this.getExponent() + ")^(1/2)"; } return result; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/PolyKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PolyKernel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> The polynomial kernel : K(x, y) = &lt;x, y&gt;^p or * K(x, y) = (&lt;x, y&gt;+1)^p * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -E &lt;num&gt; * The Exponent to use. * (default: 1.0) * </pre> * * <pre> * -L * Use lower-order terms. * (default: no) * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @version $Revision$ */ public class PolyKernel extends CachedKernel { /** for serialization */ static final long serialVersionUID = -321831645846363201L; /** Use lower-order terms? */ protected boolean m_lowerOrder = false; /** The exponent for the polynomial kernel. */ protected double m_exponent = 1.0; /** * default constructor - does nothing. */ public PolyKernel() { super(); } /** * Creates a new <code>PolyKernel</code> instance. * * @param data the training dataset used. * @param cacheSize the size of the cache (a prime number) * @param exponent the exponent to use * @param lowerOrder whether to use lower-order terms * @throws Exception if something goes wrong */ public PolyKernel(Instances data, int cacheSize, double exponent, boolean lowerOrder) throws Exception { super(); setCacheSize(cacheSize); setExponent(exponent); setUseLowerOrder(lowerOrder); buildKernel(data); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the explorer/experimenter * gui */ @Override public String globalInfo() { return "The polynomial kernel : K(x, y) = <x, y>^p or K(x, y) = (<x, y>+1)^p"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option("\tThe Exponent to use.\n" + "\t(default: 1.0)", "E", 1, "-E <num>")); result.addElement(new Option("\tUse lower-order terms.\n" + "\t(default: no)", "L", 0, "-L")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -E &lt;num&gt; * The Exponent to use. * (default: 1.0) * </pre> * * <pre> * -L * Use lower-order terms. * (default: no) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('E', options); if (tmpStr.length() != 0) { setExponent(Double.parseDouble(tmpStr)); } else { setExponent(1.0); } setUseLowerOrder(Utils.getFlag('L', options)); super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-E"); result.add("" + getExponent()); if (getUseLowerOrder()) { result.add("-L"); } Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ @Override protected double evaluate(int id1, int id2, Instance inst1) throws Exception { double result; if (id1 == id2) { result = dotProd(inst1, inst1); } else { result = dotProd(inst1, m_data.instance(id2)); } // Use lower order terms? if (m_lowerOrder) { result += 1.0; } if (m_exponent != 1.0) { result = Math.pow(result, m_exponent); } return result; } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * Sets the exponent value. * * @param value the exponent value */ public void setExponent(double value) { m_exponent = value; } /** * Gets the exponent value. * * @return the exponent value */ public double getExponent() { return m_exponent; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String exponentTipText() { return "The exponent value."; } /** * Sets whether to use lower-order terms. * * @param value true if lower-order terms will be used */ public void setUseLowerOrder(boolean value) { m_lowerOrder = value; } /** * Gets whether lower-order terms are used. * * @return true if lower-order terms are used */ public boolean getUseLowerOrder() { return m_lowerOrder; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useLowerOrderTipText() { return "Whether to use lower-order terms."; } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ @Override public String toString() { String result; if (getExponent() == 1.0) { if (getUseLowerOrder()) { result = "Linear Kernel with lower order: K(x,y) = <x,y> + 1"; } else { result = "Linear Kernel: K(x,y) = <x,y>"; } } else { if (getUseLowerOrder()) { result = "Poly Kernel with lower order: K(x,y) = (<x,y> + 1)^" + getExponent(); } else { result = "Poly Kernel: K(x,y) = <x,y>^" + getExponent(); } } return result; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/PrecomputedKernelMatrixKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PrecomputedKernelMatrixKernel.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.File; import java.io.FileReader; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Copyable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.matrix.Matrix; /** * <!-- globalinfo-start --> This kernel is based on a static kernel matrix that * is read from a file. Instances must have a single nominal attribute * (excluding the class). This attribute must be the first attribute in the file * and its values are used to reference rows/columns in the kernel matrix. The * second attribute must be the class attribute. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -M &lt;file name&gt; * The file name of the file that holds the kernel matrix. * (default: kernelMatrix.matrix) * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class PrecomputedKernelMatrixKernel extends Kernel implements Copyable { /** for serialization */ static final long serialVersionUID = -321831645846363333L; /** The file holding the kernel matrix. */ protected File m_KernelMatrixFile = new File("kernelMatrix.matrix"); /** The kernel matrix. */ protected Matrix m_KernelMatrix; /** A classifier counter. */ protected int m_Counter; /** * Return a shallow copy of this kernel * * @return a shallow copy of this kernel */ @Override public Object copy() { PrecomputedKernelMatrixKernel newK = new PrecomputedKernelMatrixKernel(); newK.setKernelMatrix(m_KernelMatrix); newK.setKernelMatrixFile(m_KernelMatrixFile); newK.m_Counter = m_Counter; return newK; } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the explorer/experimenter * gui */ @Override public String globalInfo() { return "This kernel is based on a static kernel matrix that is read from a file. " + "Instances must have a single nominal attribute (excluding the class). " + "This attribute must be the first attribute in the file and its values are " + "used to reference rows/columns in the kernel matrix. The second attribute " + "must be the class attribute."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option( "\tThe file name of the file that holds the kernel matrix.\n" + "\t(default: kernelMatrix.matrix)", "M", 1, "-M <file name>")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -M &lt;file name&gt; * The file name of the file that holds the kernel matrix. * (default: kernelMatrix.matrix) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('M', options); if (tmpStr.length() != 0) { setKernelMatrixFile(new File(tmpStr)); } else { setKernelMatrixFile(new File("kernelMatrix.matrix")); } super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-M"); result.add("" + getKernelMatrixFile()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ @Override public double eval(int id1, int id2, Instance inst1) throws Exception { if (m_KernelMatrix == null) { throw new IllegalArgumentException( "Kernel matrix has not been loaded successfully."); } int index1 = -1; if (id1 > -1) { index1 = (int) m_data.instance(id1).value(0); } else { index1 = (int) inst1.value(0); } int index2 = (int) m_data.instance(id2).value(0); return m_KernelMatrix.get(index1, index2); } /** * initializes variables etc. * * @param data the data to use */ @Override protected void initVars(Instances data) { super.initVars(data); try { if (m_KernelMatrix == null) { m_KernelMatrix = new Matrix(new FileReader(m_KernelMatrixFile)); // System.err.println("Read kernel matrix."); } } catch (Exception e) { System.err.println("Problem reading matrix from " + m_KernelMatrixFile); } m_Counter++; // System.err.print("Building classifier: " + m_Counter + "\r"); } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NOMINAL_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * Sets the file holding the kernel matrix * * @param f the file holding the matrix */ public void setKernelMatrixFile(File f) { m_KernelMatrixFile = f; } /** * Gets the file containing the kernel matrix. * * @return the exponent value */ public File getKernelMatrixFile() { return m_KernelMatrixFile; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String kernelMatrixFileTipText() { return "The file holding the kernel matrix."; } /** * Set the kernel matrix. This method is used by the unit test for this class, * as it loads at test matrix as a system resource. * * @param km the kernel matrix to use */ protected void setKernelMatrix(Matrix km) { m_KernelMatrix = km; } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ @Override public String toString() { return "Using kernel matrix from file with name: " + getKernelMatrixFile(); } /** * Frees the memory used by the kernel. (Useful with kernels which use cache.) * This function is called when the training is done. i.e. after that, eval * will be called with id1 == -1. */ @Override public void clean() { // do nothing } /** * Returns the number of kernel evaluation performed. * * @return the number of kernel evaluation performed. */ @Override public int numEvals() { return 0; } /** * Returns the number of dot product cache hits. * * @return the number of dot product cache hits, or -1 if not supported by * this kernel. */ @Override public int numCacheHits() { return 0; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/Puk.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Puk.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> The Pearson VII function-based universal kernel.<br/> * <br/> * For more information see:<br/> * <br/> * B. Uestuen, W.J. Melssen, L.M.C. Buydens (2006). Facilitating the application of Support Vector Regression by using a universal Pearson VII function based kernel. Chemometrics and Intelligent Laboratory Systems. 81:29-40. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -O &lt;num&gt; * The Omega parameter. * (default: 1.0) * </pre> * * <pre> * -S &lt;num&gt; * The Sigma parameter. * (default: 1.0) * </pre> * * <!-- options-end --> * * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @version $Revision$ */ public class Puk extends CachedKernel implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = 1682161522559978851L; /** The precalculated dotproducts of &lt;inst_i,inst_i&gt; */ protected double m_kernelPrecalc[]; /** Omega for the Puk kernel. */ protected double m_omega = 1.0; /** Sigma for the Puk kernel. */ protected double m_sigma = 1.0; /** Cached factor for the Puk kernel. */ protected double m_factor = 1.0; /** * default constructor - does nothing. */ public Puk() { super(); } /** * Constructor. Initializes m_kernelPrecalc[]. * * @param data * the data to use * @param cacheSize * the size of the cache * @param omega * the exponent * @param sigma * the bandwidth * @throws Exception * if something goes wrong */ public Puk(final Instances data, final int cacheSize, final double omega, final double sigma) throws Exception { super(); this.setCacheSize(cacheSize); this.setOmega(omega); this.setSigma(sigma); this.buildKernel(data); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the explorer/experimenter gui */ @Override public String globalInfo() { return "The Pearson VII function-based universal kernel.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "B. Uestuen and W.J. Melssen and L.M.C. Buydens"); result.setValue(Field.YEAR, "2006"); result.setValue(Field.TITLE, "Facilitating the application of Support Vector Regression by using a universal Pearson VII function based kernel"); result.setValue(Field.JOURNAL, "Chemometrics and Intelligent Laboratory Systems"); result.setValue(Field.VOLUME, "81"); result.setValue(Field.PAGES, "29-40"); result.setValue(Field.PDF, "http://www.cac.science.ru.nl/research/publications/PDFs/ustun2006.pdf"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option("\tThe Omega parameter.\n" + "\t(default: 1.0)", "O", 1, "-O <num>")); result.addElement(new Option("\tThe Sigma parameter.\n" + "\t(default: 1.0)", "S", 1, "-S <num>")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -O &lt;num&gt; * The Omega parameter. * (default: 1.0) * </pre> * * <pre> * -S &lt;num&gt; * The Sigma parameter. * (default: 1.0) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('O', options); if (tmpStr.length() != 0) { this.setOmega(Double.parseDouble(tmpStr)); } else { this.setOmega(1.0); } tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) { this.setSigma(Double.parseDouble(tmpStr)); } else { this.setSigma(1.0); } super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-O"); result.add("" + this.getOmega()); result.add("-S"); result.add("" + this.getSigma()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * returns the dot product * * @param id1 * the index of instance 1 * @param id2 * the index of instance 2 * @param inst1 * the instance 1 object * @return the dot product * @throws Exception * if something goes wrong */ @Override protected double evaluate(final int id1, final int id2, final Instance inst1) throws Exception { if (id1 == id2) { return 1.0; } else { double precalc1; if (id1 == -1) { precalc1 = this.dotProd(inst1, inst1); } else { precalc1 = this.m_kernelPrecalc[id1]; } Instance inst2 = this.m_data.instance(id2); double squaredDifference = -2.0 * this.dotProd(inst1, inst2) + precalc1 + this.m_kernelPrecalc[id2]; double intermediate = this.m_factor * Math.sqrt(squaredDifference); double result = 1.0 / Math.pow(1.0 + intermediate * intermediate, this.getOmega()); return result; } } /** * Sets the omega value. * * @param value * the omega value */ public void setOmega(final double value) { this.m_omega = value; this.m_factor = this.computeFactor(this.m_omega, this.m_sigma); } /** * Gets the omega value. * * @return the omega value */ public double getOmega() { return this.m_omega; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String omegaTipText() { return "The Omega value."; } /** * Sets the sigma value. * * @param value * the sigma value */ public void setSigma(final double value) { this.m_sigma = value; this.m_factor = this.computeFactor(this.m_omega, this.m_sigma); } /** * Gets the sigma value. * * @return the sigma value */ public double getSigma() { return this.m_sigma; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String sigmaTipText() { return "The Sigma value."; } /** * computes the factor for curve-fitting (see equation (13) in paper) * * @param omega * the omega to use * @param sigma * the sigma to use * @return the factor for curve-fitting */ protected double computeFactor(final double omega, final double sigma) { double root = Math.sqrt(Math.pow(2.0, 1.0 / omega) - 1); return 2.0 * root / sigma; } /** * initializes variables etc. * * @param data * the data to use */ @Override protected void initVars(final Instances data) { super.initVars(data); this.m_factor = this.computeFactor(this.m_omega, this.m_sigma); this.m_kernelPrecalc = new double[data.numInstances()]; } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * builds the kernel with the given data. Initializes the kernel cache. The actual size of the cache in bytes is (64 * cacheSize). * * @param data * the data to base the kernel on * @throws Exception * if something goes wrong */ @Override public void buildKernel(final Instances data) throws Exception { this.initVars(data); for (int i = 0; i < data.numInstances(); i++) { if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_kernelPrecalc[i] = this.dotProd(data.instance(i), data.instance(i)); } } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ @Override public String toString() { return "Puk kernel"; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/RBFKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RBFKernel.java * Copyright (C) 1999-2017 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.OptionMetadata; import weka.core.RevisionUtils; /** * <!-- globalinfo-start --> The RBF kernel : K(x, y) = exp(-gamma*(x-y)^2) <br> * <br> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007) * </pre> * * <pre> * -G &lt;double&gt; * The value to use for the gamma parameter (default: 0.01). * </pre> * * <pre> * -output-debug-info * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @version $Revision$ */ public class RBFKernel extends CachedKernel { /** for serialization (value needs to be consistent with J. Lindgren's implementation) */ static final long serialVersionUID = 5247117544316387852L; /** The gamma parameter for the RBF kernel. */ protected double m_gamma = 0.01; /** The diagonal values of the dot product matrix (name needs to be consistent with J. Lindgren's implementation). */ protected double[] m_kernelPrecalc; /** * default constructor - does nothing. */ public RBFKernel() { super(); } /** * Creates a new <code>RBFKernel</code> instance. * * @param data * the training dataset used. * @param cacheSize * the size of the cache (a prime number) * @param gamma * the gamma to use * @throws Exception * if something goes wrong */ public RBFKernel(final Instances data, final int cacheSize, final double gamma) throws Exception { super(); this.setCacheSize(cacheSize); this.setGamma(gamma); this.buildKernel(data); } /** * Builds the kernel. Calls the super class method and then also initializes the cache for the diagonal of the dot product matrix. */ @Override public void buildKernel(final Instances data) throws Exception { super.buildKernel(data); this.m_kernelPrecalc = new double[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double sum = 0; Instance inst = data.instance(i); for (int j = 0; j < inst.numValues(); j++) { if (inst.index(j) != data.classIndex()) { sum += inst.valueSparse(j) * inst.valueSparse(j); } } this.m_kernelPrecalc[i] = sum; } } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the explorer/experimenter gui */ @Override public String globalInfo() { return "The RBF kernel : K(x, y) = exp(-gamma*(x-y)^2)"; } /** * * @param id1 * the index of instance 1 * @param id2 * the index of instance 2 * @param inst1 * the instance 1 object * @return the dot product * @throws Exception * if something goes wrong */ @Override protected double evaluate(final int id1, final int id2, final Instance inst1) throws Exception { if (id1 == id2) { return 1.0; } else { if (id1 == -1) { return Math.exp(-this.m_gamma * (this.dotProd(inst1, inst1) - 2 * this.dotProd(inst1, this.m_data.instance(id2)) + this.m_kernelPrecalc[id2])); } else { return Math.exp(-this.m_gamma * (this.m_kernelPrecalc[id1] - 2 * this.dotProd(inst1, this.m_data.instance(id2)) + this.m_kernelPrecalc[id2])); } } } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * Sets the gamma value. * * @param value * the gamma value */ @OptionMetadata(description = "The value to use for the gamma parameter (default: 0.01).", displayName = "gamma", commandLineParamName = "G", commandLineParamSynopsis = "-G <double>", displayOrder = 1) public void setGamma(final double value) { this.m_gamma = value; } /** * Gets the gamma value. * * @return the gamma value */ public double getGamma() { return this.m_gamma; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String gammaTipText() { return "The gamma value."; } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ @Override public String toString() { return "RBF Kernel: K(x,y) = exp(-" + this.m_gamma + "*(x-y)^2)"; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/RegOptimizer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegOptimizer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.functions.SMOreg; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Base class implementation for learning algorithm of SMOreg * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3) * </pre> * * <pre> * -W &lt;double&gt; * The random number seed. * (default 1) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision$ */ public class RegOptimizer implements OptionHandler, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -2198266997254461814L; /** loss type **/ // protected int m_nLossType = EPSILON; /** the loss type: L1 */ // public final static int L1 = 1; /** the loss type: L2 */ // public final static int L2 = 2; /** the loss type: HUBER */ // public final static int HUBER = 3; /** the loss type: EPSILON */ // public final static int EPSILON = 4; /** the loss type */ // public static final Tag[] TAGS_LOSS_TYPE = { // new Tag(L2, "L2"), // new Tag(L1, "L1"), // new Tag(HUBER, "Huber"), // new Tag(EPSILON, "EPSILON"), // }; /** alpha and alpha* arrays containing weights for solving dual problem **/ public double[] m_alpha; public double[] m_alphaStar; /** offset **/ protected double m_b; /** epsilon of epsilon-insensitive cost function **/ protected double m_epsilon = 1e-3; /** capacity parameter, copied from SMOreg **/ protected double m_C = 1.0; /** class values/desired output vector **/ protected double[] m_target; /** points to data set **/ protected Instances m_data; /** the kernel */ protected Kernel m_kernel; /** index of class variable in data set **/ protected int m_classIndex = -1; /** number of instances in data set **/ protected int m_nInstances = -1; /** random number generator **/ protected Random m_random; /** seed for initializing random number generator **/ protected int m_nSeed = 1; /** set of support vectors, that is, vectors with alpha(*)!=0 **/ protected SMOset m_supportVectors; /** number of kernel evaluations, used for printing statistics only **/ protected long m_nEvals = 0; /** number of kernel cache hits, used for printing statistics only **/ protected int m_nCacheHits = -1; /** weights for linear kernel **/ protected double[] m_weights; /** * Variables to hold weight vector in sparse form. (To reduce storage * requirements.) */ protected double[] m_sparseWeights; protected int[] m_sparseIndices; /** flag to indicate whether the model is built yet **/ protected boolean m_bModelBuilt = false; /** parent SMOreg class **/ protected SMOreg m_SVM = null; /** * the default constructor */ public RegOptimizer() { super(); m_random = new Random(m_nSeed); } /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option( "\tThe epsilon parameter in epsilon-insensitive loss function.\n" + "\t(default 1.0e-3)", "L", 1, "-L <double>")); // result.addElement(new Option( // "\tLoss type (L1, L2, Huber, Epsilon insensitive loss)\n", // "L", 1, "-L [L1|L2|HUBER|EPSILON]")); result.addElement(new Option("\tThe random number seed.\n" + "\t(default 1)", "W", 1, "-W <double>")); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3) * </pre> * * <pre> * -W &lt;double&gt; * The random number seed. * (default 1) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) { setEpsilonParameter(Double.parseDouble(tmpStr)); } else { setEpsilonParameter(1.0e-3); } /* * tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) * setLossType(new SelectedTag(tmpStr, TAGS_LOSS_TYPE)); else * setLossType(new SelectedTag(EPSILON, TAGS_LOSS_TYPE)); */ tmpStr = Utils.getOption('W', options); if (tmpStr.length() != 0) { setSeed(Integer.parseInt(tmpStr)); } else { setSeed(1); } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-L"); result.add("" + getEpsilonParameter()); result.add("-W"); result.add("" + getSeed()); // result.add("-S"; // result.add((new SelectedTag(m_nLossType, // TAGS_LOSS_TYPE)).getSelectedTag().getReadable(); return result.toArray(new String[result.size()]); } /** * flag to indicate whether the model was built yet * * @return true if the model was built */ public boolean modelBuilt() { return m_bModelBuilt; } /** * sets the parent SVM * * @param value the parent SVM */ public void setSMOReg(SMOreg value) { m_SVM = value; } /** * returns the number of kernel evaluations * * @return the number of kernel evaluations */ public long getKernelEvaluations() { return m_nEvals; } /** * return the number of kernel cache hits * * @return the number of hits */ public int getCacheHits() { return m_nCacheHits; } /** * initializes the algorithm * * @param data the data to work with * @throws Exception if m_SVM is null */ protected void init(Instances data) throws Exception { if (m_SVM == null) { throw new Exception( "SVM not initialized in optimizer. Use RegOptimizer.setSVMReg()"); } m_C = m_SVM.getC(); m_data = data; m_classIndex = data.classIndex(); m_nInstances = data.numInstances(); // Initialize kernel m_kernel = Kernel.makeCopy(m_SVM.getKernel()); m_kernel.buildKernel(data); // init m_target m_target = new double[m_nInstances]; for (int i = 0; i < m_nInstances; i++) { m_target[i] = data.instance(i).classValue(); } m_random = new Random(m_nSeed); // initialize alpha and alpha* array to all zero m_alpha = new double[m_target.length]; m_alphaStar = new double[m_target.length]; m_supportVectors = new SMOset(m_nInstances); m_b = 0.0; m_nEvals = 0; m_nCacheHits = -1; } /** * wrap up various variables to save memeory and do some housekeeping after * optimization has finished. * * @throws Exception if something goes wrong */ protected void wrapUp() throws Exception { m_target = null; m_nEvals = m_kernel.numEvals(); m_nCacheHits = m_kernel.numCacheHits(); if ((m_SVM.getKernel() instanceof PolyKernel) && ((PolyKernel) m_SVM.getKernel()).getExponent() == 1.0) { // convert alpha's to weights double[] weights = new double[m_data.numAttributes()]; for (int k = m_supportVectors.getNext(-1); k != -1; k = m_supportVectors .getNext(k)) { for (int j = 0; j < weights.length; j++) { if (j != m_classIndex) { weights[j] += (m_alpha[k] - m_alphaStar[k]) * m_data.instance(k).value(j); } } } m_weights = weights; // release memory m_alpha = null; m_alphaStar = null; m_kernel = null; } else { m_kernel.clean(); } m_bModelBuilt = true; } /** * Compute the value of the objective function. * * @return the score * @throws Exception if something goes wrong */ protected double getScore() throws Exception { double res = 0; double t = 0, t2 = 0; for (int i = 0; i < m_nInstances; i++) { for (int j = 0; j < m_nInstances; j++) { t += (m_alpha[i] - m_alphaStar[i]) * (m_alpha[j] - m_alphaStar[j]) * m_kernel.eval(i, j, m_data.instance(i)); } // switch(m_nLossType) { // case L1: // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]); // break; // case L2: // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]) - // (0.5/m_SVM.getC()) * (m_alpha[i]*m_alpha[i] + m_alpha_[i]*m_alpha_[i]); // break; // case HUBER: // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]) - // (0.5*m_SVM.getEpsilon()/m_SVM.getC()) * (m_alpha[i]*m_alpha[i] + // m_alpha_[i]*m_alpha_[i]); // break; // case EPSILON: // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alphaStar[i]) - // m_epsilon * (m_alpha[i] + m_alphaStar[i]); t2 += m_target[i] * (m_alpha[i] - m_alphaStar[i]) - m_epsilon * (m_alpha[i] + m_alphaStar[i]); // break; // } } res += -0.5 * t + t2; return res; } /** * learn SVM parameters from data. Subclasses should implement something more * interesting. * * @param data the data to work with * @throws Exception always an Exceoption since subclasses must override it */ public void buildClassifier(Instances data) throws Exception { throw new Exception("Don't call this directly, use subclass instead"); } /** * sets the loss type type to use * * @param newLossType the loss type to use */ // public void setLossType(SelectedTag newLossType) { // if (newLossType.getTags() == TAGS_LOSS_TYPE) { // m_nLossType = newLossType.getSelectedTag().getID(); // } // } /** * returns the current loss type * * @return the loss type */ // public SelectedTag getLossType() { // return new SelectedTag(m_nLossType, TAGS_LOSS_TYPE); // } /** * SVMOutput of an instance in the training set, m_data This uses the cache, * unlike SVMOutput(Instance) * * @param index index of the training instance in m_data * @return the SVM output * @throws Exception if something goes wrong */ protected double SVMOutput(int index) throws Exception { double result = -m_b; for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors .getNext(i)) { result += (m_alpha[i] - m_alphaStar[i]) * m_kernel.eval(index, i, m_data.instance(index)); } return result; } /** * * @param inst * @return * @throws Exception */ public double SVMOutput(Instance inst) throws Exception { double result = -m_b; // Is the machine linear? if (m_weights != null) { // Is weight vector stored in sparse format? for (int i = 0; i < inst.numValues(); i++) { if (inst.index(i) != m_classIndex) { result += m_weights[inst.index(i)] * inst.valueSparse(i); } } } else { for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors .getNext(i)) { result += (m_alpha[i] - m_alphaStar[i]) * m_kernel.eval(-1, i, inst); } } return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String seedTipText() { return "Seed for random number generator."; } /** * Gets the current seed value for the random number generator * * @return the seed value */ public int getSeed() { return m_nSeed; } /** * Sets the seed value for the random number generator * * @param value the seed value */ public void setSeed(int value) { m_nSeed = value; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String epsilonParameterTipText() { return "The epsilon parameter of the epsilon insensitive loss function.(default 0.001)."; } /** * Get the value of epsilon parameter of the epsilon insensitive loss * function. * * @return Value of epsilon parameter. */ public double getEpsilonParameter() { return m_epsilon; } /** * Set the value of epsilon parameter of the epsilon insensitive loss * function. * * @param v Value to assign to epsilon parameter. */ public void setEpsilonParameter(double v) { m_epsilon = v; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); text.append("SMOreg\n\n"); if (m_weights != null) { text.append("weights (not support vectors):\n"); // it's a linear machine for (int i = 0; i < m_data.numAttributes(); i++) { if (i != m_classIndex) { text.append((m_weights[i] >= 0 ? " + " : " - ") + Utils.doubleToString(Math.abs(m_weights[i]), 12, 4) + " * "); if (m_SVM.getFilterType().getSelectedTag().getID() == SMOreg.FILTER_STANDARDIZE) { text.append("(standardized) "); } else if (m_SVM.getFilterType().getSelectedTag().getID() == SMOreg.FILTER_NORMALIZE) { text.append("(normalized) "); } text.append(m_data.attribute(i).name() + "\n"); } } } else { // non linear, print out all supportvectors text.append("Support vectors:\n"); for (int i = 0; i < m_nInstances; i++) { if (m_alpha[i] > 0) { text.append("+" + m_alpha[i] + " * k[" + i + "]\n"); } if (m_alphaStar[i] > 0) { text.append("-" + m_alphaStar[i] + " * k[" + i + "]\n"); } } } text.append((m_b <= 0 ? " + " : " - ") + Utils.doubleToString(Math.abs(m_b), 12, 4) + "\n\n"); text.append("\n\nNumber of kernel evaluations: " + m_nEvals); if (m_nCacheHits >= 0 && m_nEvals > 0) { double hitRatio = 1 - m_nEvals * 1.0 / (m_nCacheHits + m_nEvals); text.append(" (" + Utils.doubleToString(hitRatio * 100, 7, 3).trim() + "% cached)"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/RegSMO.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegSMO.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> Implementation of SMO for support vector regression * as described in :<br/> * <br/> * A.J. Smola, B. Schoelkopf (1998). A tutorial on support vector regression. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;misc{Smola1998, * author = {A.J. Smola and B. Schoelkopf}, * note = {NeuroCOLT2 Technical Report NC2-TR-1998-030}, * title = {A tutorial on support vector regression}, * year = {1998} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12) * </pre> * * <pre> * -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3) * </pre> * * <pre> * -W &lt;double&gt; * The random number seed. * (default 1) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision$ */ public class RegSMO extends RegOptimizer implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -7504070793279598638L; /** * tolerance parameter, smaller changes on alpha in inner loop will be ignored **/ protected double m_eps = 1.0e-12; /** Precision constant for updating sets */ protected final static double m_Del = 1e-10; // 1000 * Double.MIN_VALUE; /** * error cache containing m_error[i] = SVMOutput(i) - m_target[i] - m_b <br/> * note, we don't need m_b in the cache, since if we do, we need to maintain * it when m_b is updated */ double[] m_error; /** alpha value for first candidate **/ protected double m_alpha1; /** alpha* value for first candidate **/ protected double m_alpha1Star; /** alpha value for second candidate **/ protected double m_alpha2; /** alpha* value for second candidate **/ protected double m_alpha2Star; /** * default constructor */ public RegSMO() { super(); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Implementation of SMO for support vector regression as described " + "in :\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "A.J. Smola and B. Schoelkopf"); result.setValue(Field.TITLE, "A tutorial on support vector regression"); result.setValue(Field.NOTE, "NeuroCOLT2 Technical Report NC2-TR-1998-030"); result.setValue(Field.YEAR, "1998"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option("\tThe epsilon for round-off error.\n" + "\t(default 1.0e-12)", "P", 1, "-P <double>")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12) * </pre> * * <pre> * -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3) * </pre> * * <pre> * -W &lt;double&gt; * The random number seed. * (default 1) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) { setEpsilon(Double.parseDouble(tmpStr)); } else { setEpsilon(1.0e-12); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-P"); result.add("" + getEpsilon()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String epsilonTipText() { return "The epsilon for round-off error (shouldn't be changed)."; } /** * Get the value of epsilon. * * @return Value of epsilon. */ public double getEpsilon() { return m_eps; } /** * Set the value of epsilon. * * @param v Value to assign to epsilon. */ public void setEpsilon(double v) { m_eps = v; } /** * initialize various variables before starting the actual optimizer * * @param data data set used for learning * @throws Exception if something goes wrong */ @Override protected void init(Instances data) throws Exception { super.init(data); // init error cache m_error = new double[m_nInstances]; for (int i = 0; i < m_nInstances; i++) { m_error[i] = -m_target[i]; } } /** * wrap up various variables to save memeory and do some housekeeping after * optimization has finished. * * @throws Exception if something goes wrong */ @Override protected void wrapUp() throws Exception { m_error = null; super.wrapUp(); } /** * Finds optimal point on line constrained by first (i1) and second (i2) * candidate. Parameters correspond to pseudocode (see technicalinformation) * * @param i1 * @param alpha1 * @param alpha1Star * @param C1 * @param i2 * @param alpha2 * @param alpha2Star * @param C2 * @param gamma * @param eta * @param deltaPhi * @return */ protected boolean findOptimalPointOnLine(int i1, double alpha1, double alpha1Star, double C1, int i2, double alpha2, double alpha2Star, double C2, double gamma, double eta, double deltaPhi) { if (eta <= 0) { // this may happen due to numeric instability // due to Mercer's condition, this should not happen, hence we give up return false; } boolean case1 = false; boolean case2 = false; boolean case3 = false; boolean case4 = false; boolean finished = false; // while !finished // % this loop is passed at most three times // % case variables needed to avoid attempting small changes twice while (!finished) { // if (case1 == 0) && // (alpha1 > 0 || (alpha1* == 0 && deltaPhi > 0)) && // (alpha2 > 0 || (alpha2* == 0 && deltaPhi < 0)) // compute L, H (wrt. alpha1, alpha2) // if L < H // a2 = alpha2 ? - deltaPhi/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1 ? - (a2 ? alpha2) // update alpha1, alpha2 if change is larger than some eps // else // finished = 1 // endif // case1 = 1; if ((case1 == false) && (alpha1 > 0 || (alpha1Star == 0 && deltaPhi > 0)) && (alpha2 > 0 || (alpha2Star == 0 && deltaPhi < 0))) { // compute L, H (wrt. alpha1, alpha2) double L = Math.max(0, gamma - C1); double H = Math.min(C2, gamma); if (L < H) { double a2 = alpha2 - deltaPhi / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1 - (a2 - alpha2); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1, alpha2 if change is larger than some eps if (Math.abs(alpha1 - a1) > m_eps) { deltaPhi += eta * (a2 - alpha2); alpha1 = a1; alpha2 = a2; } } else { finished = true; } case1 = true; } // elseif (case2 == 0) && // (alpha1 > 0 || (alpha1* == 0 && deltaPhi > 2 epsilon)) && // (alpha2* > 0 || (alpha2 == 0 && deltaPhi > 2 epsilon)) // compute L, H (wrt. alpha1, alpha2*) // if L < H // a2 = alpha2* + (deltaPhi ?- 2 epsilon)/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1 + (a2 ? alpha2*) // update alpha1, alpha2* if change is larger than some eps // else // finished = 1 // endif // case2 = 1; else if ((case2 == false) && (alpha1 > 0 || (alpha1Star == 0 && deltaPhi > 2 * m_epsilon)) && (alpha2Star > 0 || (alpha2 == 0 && deltaPhi > 2 * m_epsilon))) { // compute L, H (wrt. alpha1, alpha2*) double L = Math.max(0, -gamma); double H = Math.min(C2, -gamma + C1); if (L < H) { double a2 = alpha2Star + (deltaPhi - 2 * m_epsilon) / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1 + (a2 - alpha2Star); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1, alpha2* if change is larger than some eps if (Math.abs(alpha1 - a1) > m_eps) { deltaPhi += eta * (-a2 + alpha2Star); alpha1 = a1; alpha2Star = a2; } } else { finished = true; } case2 = true; } // elseif (case3 == 0) && // (alpha1* > 0 || (alpha1 == 0 && deltaPhi < -2 epsilon)) && // (alpha2 > 0 || (alpha2* == 0 && deltaPhi < -2 epsilon)) // compute L, H (wrt. alpha1*, alpha2) // if L < H // a2 = alpha2 ?- (deltaPhi ?+ 2 epsilon)/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1* + (a2 ? alpha2) // update alpha1*, alpha2 if change is larger than some eps // else // finished = 1 // endif // case3 = 1; else if ((case3 == false) && (alpha1Star > 0 || (alpha1 == 0 && deltaPhi < -2 * m_epsilon)) && (alpha2 > 0 || (alpha2Star == 0 && deltaPhi < -2 * m_epsilon))) { // compute L, H (wrt. alpha1*, alpha2) double L = Math.max(0, gamma); double H = Math.min(C2, C1 + gamma); if (L < H) { // note Smola's psuedocode has a minus, where there should be a plus // in the following line, Keerthi's is correct double a2 = alpha2 - (deltaPhi + 2 * m_epsilon) / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1Star + (a2 - alpha2); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1*, alpha2 if change is larger than some eps if (Math.abs(alpha1Star - a1) > m_eps) { deltaPhi += eta * (a2 - alpha2); alpha1Star = a1; alpha2 = a2; } } else { finished = true; } case3 = true; } // elseif (case4 == 0) && // (alpha1* > 0 || (alpha1 == 0 && deltaPhi < 0)) && // (alpha2* > 0 || (alpha2 == 0 && deltaPhi > 0)) // compute L, H (wrt. alpha1*, alpha2*) // if L < H // a2 = alpha2* + deltaPhi/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1* ? (a2 ? alpha2*) // update alpha1*, alpha2* if change is larger than some eps // else // finished = 1 // endif // case4 = 1; // else // finished = 1 // endif else if ((case4 == false) && (alpha1Star > 0 || (alpha1 == 0 && deltaPhi < 0)) && (alpha2Star > 0 || (alpha2 == 0 && deltaPhi > 0))) { // compute L, H (wrt. alpha1*, alpha2*) double L = Math.max(0, -gamma - C1); double H = Math.min(C2, -gamma); if (L < H) { double a2 = alpha2Star + deltaPhi / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1Star - (a2 - alpha2Star); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1*, alpha2* if change is larger than some eps if (Math.abs(alpha1Star - a1) > m_eps) { deltaPhi += eta * (-a2 + alpha2Star); alpha1Star = a1; alpha2Star = a2; } } else { finished = true; } case4 = true; } else { finished = true; } // update deltaPhi // using 4.36 from Smola's thesis: // deltaPhi = deltaPhi - eta * // ((alpha1New-alpha1StarNew)-(alpha1-alpha1Star)); // the update is done inside the loop, saving us to remember old values of // alpha1(*) // deltaPhi += eta * ((alpha2 - alpha2Star) - dAlpha2Old); // dAlpha2Old = (alpha2 - alpha2Star); // endwhile } if (Math.abs(alpha1 - m_alpha[i1]) > m_eps || Math.abs(alpha1Star - m_alphaStar[i1]) > m_eps || Math.abs(alpha2 - m_alpha[i2]) > m_eps || Math.abs(alpha2Star - m_alphaStar[i2]) > m_eps) { if (alpha1 > C1 - m_Del * C1) { alpha1 = C1; } else if (alpha1 <= m_Del * C1) { alpha1 = 0; } if (alpha1Star > C1 - m_Del * C1) { alpha1Star = C1; } else if (alpha1Star <= m_Del * C1) { alpha1Star = 0; } if (alpha2 > C2 - m_Del * C2) { alpha2 = C2; } else if (alpha2 <= m_Del * C2) { alpha2 = 0; } if (alpha2Star > C2 - m_Del * C2) { alpha2Star = C2; } else if (alpha2Star <= m_Del * C2) { alpha2Star = 0; } // store new alpha's m_alpha[i1] = alpha1; m_alphaStar[i1] = alpha1Star; m_alpha[i2] = alpha2; m_alphaStar[i2] = alpha2Star; // update supportvector set if (alpha1 != 0 || alpha1Star != 0) { if (!m_supportVectors.contains(i1)) { m_supportVectors.insert(i1); } } else { m_supportVectors.delete(i1); } if (alpha2 != 0 || alpha2Star != 0) { if (!m_supportVectors.contains(i2)) { m_supportVectors.insert(i2); } } else { m_supportVectors.delete(i2); } return true; } return false; } /** * takeStep method from pseudocode. Parameters correspond to pseudocode (see * technicalinformation) * * @param i1 * @param i2 * @param alpha2 * @param alpha2Star * @param phi2 * @return * @throws Exception */ protected int takeStep(int i1, int i2, double alpha2, double alpha2Star, double phi2) throws Exception { // if (i1 == i2) return 0 if (i1 == i2) { return 0; } double C1 = m_C * m_data.instance(i1).weight(); double C2 = m_C * m_data.instance(i2).weight(); // alpha1, alpha1* = Lagrange multipliers for i1 // y1 = target[i1] // phi1 = SVM output on point[i1] ? y1 (in error cache) double alpha1 = m_alpha[i1]; double alpha1Star = m_alphaStar[i1]; double phi1 = m_error[i1]; // k11 = kernel(point[i1],point[i1]) // k12 = kernel(point[i1],point[i2]) // k22 = kernel(point[i2],point[i2]) // eta = 2*k12? - k11? - k22 // gamma = alpha1 ?- alpha1* + alpha2 ?- alpha2* double k11 = m_kernel.eval(i1, i1, m_data.instance(i1)); double k12 = m_kernel.eval(i1, i2, m_data.instance(i1)); double k22 = m_kernel.eval(i2, i2, m_data.instance(i2)); double eta = -2 * k12 + k11 + k22; // note, Smola's psuedocode has signs // swapped, Keerthi's doesn't if (eta < 0) { // this may happen due to numeric instability // due to Mercer's condition, this should not happen, hence we give up return 0; } double gamma = alpha1 - alpha1Star + alpha2 - alpha2Star; // % we assume eta < 0. otherwise one has to repeat the complete // % reasoning similarly (compute objective function for L and H // % and decide which one is largest // case1 = case2 = case3 = case4 = finished = 0 // alpha1old = alpha1, alpha1old* = alpha1* // alpha2old = alpha2, alpha2old* = alpha2* // deltaPhi = phi1 ?- phi2 double alpha1old = alpha1; double alpha1Starold = alpha1Star; double alpha2old = alpha2; double alpha2Starold = alpha2Star; double deltaPhi = phi2 - phi1; if (findOptimalPointOnLine(i1, alpha1, alpha1Star, C1, i2, alpha2, alpha2Star, C2, gamma, eta, deltaPhi)) { alpha1 = m_alpha[i1]; alpha1Star = m_alphaStar[i1]; alpha2 = m_alpha[i2]; alpha2Star = m_alphaStar[i2]; // Update error cache using new Lagrange multipliers double dAlpha1 = alpha1 - alpha1old - (alpha1Star - alpha1Starold); double dAlpha2 = alpha2 - alpha2old - (alpha2Star - alpha2Starold); for (int j = 0; j < m_nInstances; j++) { if ((j != i1) && (j != i2)/* && m_error[j] != MAXERR */) { m_error[j] += dAlpha1 * m_kernel.eval(i1, j, m_data.instance(i1)) + dAlpha2 * m_kernel.eval(i2, j, m_data.instance(i2)); } } m_error[i1] += dAlpha1 * k11 + dAlpha2 * k12; m_error[i2] += dAlpha1 * k12 + dAlpha2 * k22; // Update threshold to reflect change in Lagrange multipliers double b1 = Double.MAX_VALUE; double b2 = Double.MAX_VALUE; if ((0 < alpha1 && alpha1 < C1) || (0 < alpha1Star && alpha1Star < C1) || (0 < alpha2 && alpha2 < C2) || (0 < alpha2Star && alpha2Star < C2)) { if (0 < alpha1 && alpha1 < C1) { b1 = m_error[i1] - m_epsilon; } else if (0 < alpha1Star && alpha1Star < C1) { b1 = m_error[i1] + m_epsilon; } if (0 < alpha2 && alpha2 < C2) { b2 = m_error[i2] - m_epsilon; } else if (0 < alpha2Star && alpha2Star < C2) { b2 = m_error[i2] + m_epsilon; } if (b1 < Double.MAX_VALUE) { m_b = b1; if (b2 < Double.MAX_VALUE) { m_b = (b1 + b2) / 2.0; } } else if (b2 < Double.MAX_VALUE) { m_b = b2; } } else if (m_b == 0) { // both alpha's are on the boundary, and m_b is not initialized m_b = (m_error[i1] + m_error[i2]) / 2.0; } // if changes in alpha1(*), alpha2(*) are larger than some eps // return 1 // else // return 0 // endif return 1; } else { return 0; } // endprocedure } /** * examineExample method from pseudocode. Parameters correspond to pseudocode * (see technicalinformation) * * @param i2 * @return * @throws Exception */ protected int examineExample(int i2) throws Exception { // alpha2, alpha2* = Lagrange multipliers for i2 double alpha2 = m_alpha[i2]; double alpha2Star = m_alphaStar[i2]; // C2, C2* = Constraints for i2 double C2 = m_C; double C2Star = m_C; // phi2 = SVM output on point[i2] ? y2 (in error cache) double phi2 = m_error[i2]; // phi2b contains the error, taking the offset in account double phi2b = phi2 - m_b; // if ((phi2 > epsilon && alpha2* < C2*) || // (phi2 < epsilon && alpha2* > 0 ) || // (-?phi2 > epsilon && alpha2 < C2 ) || // (?-phi2 > epsilon && alpha2 > 0 )) if ((phi2b > m_epsilon && alpha2Star < C2Star) || (phi2b < m_epsilon && alpha2Star > 0) || (-phi2b > m_epsilon && alpha2 < C2) || (-phi2b > m_epsilon && alpha2 > 0)) { // if (number of non?zero & non?C alpha > 1) // i1 = result of second choice heuristic // if takeStep(i1,i2) return 1 // endif int i1 = secondChoiceHeuristic(i2); if (i1 >= 0 && (takeStep(i1, i2, alpha2, alpha2Star, phi2) > 0)) { return 1; } // loop over all non?zero and non?C alpha, random start // i1 = identity of current alpha // if takeStep(i1,i2) return 1 // endloop for (i1 = 0; i1 < m_target.length; i1++) { if ((m_alpha[i1] > 0 && m_alpha[i1] < m_C) || (m_alphaStar[i1] > 0 && m_alphaStar[i1] < m_C)) { if (takeStep(i1, i2, alpha2, alpha2Star, phi2) > 0) { return 1; } } } // loop over all possible i1, with random start // i1 = loop variable // if takeStep(i1,i2) return 1 // endloop for (i1 = 0; i1 < m_target.length; i1++) { if (takeStep(i1, i2, alpha2, alpha2Star, phi2) > 0) { return 1; } } // endif } // return 0 return 0; // endprocedure } /** * applies heuristic for finding candidate that is expected to lead to good * gain when applying takeStep together with second candidate. * * @param i2 index of second candidate * @return */ protected int secondChoiceHeuristic(int i2) { // randomly select an index i1 (not equal to i2) with non?zero and non?C // alpha, if any for (int i = 0; i < 59; i++) { int i1 = m_random.nextInt(m_nInstances); if ((i1 != i2) && (m_alpha[i1] > 0 && m_alpha[i1] < m_C) || (m_alphaStar[i1] > 0 && m_alphaStar[i1] < m_C)) { return i1; } } return -1; } /** * finds alpha and alpha* parameters that optimize the SVM target function * * @throws Exception */ public void optimize() throws Exception { // main routine: // initialize threshold to zero // numChanged = 0 // examineAll = 1 // SigFig = -100 // LoopCounter = 0 int numChanged = 0; int examineAll = 1; int sigFig = -100; int loopCounter = 0; // while ((numChanged > 0 | examineAll) | (SigFig < 3)) while ((numChanged > 0 || (examineAll > 0)) | (sigFig < 3)) { // LoopCounter++ // numChanged = 0; loopCounter++; numChanged = 0; // if (examineAll) // loop I over all training examples // numChanged += examineExample(I) // else // loop I over examples where alpha is not 0 & not C // numChanged += examineExample(I) // endif int numSamples = 0; if (examineAll > 0) { for (int i = 0; i < m_nInstances; i++) { numChanged += examineExample(i); } } else { for (int i = 0; i < m_target.length; i++) { if ((m_alpha[i] > 0 && m_alpha[i] < m_C * m_data.instance(i).weight()) || (m_alphaStar[i] > 0 && m_alphaStar[i] < m_C * m_data.instance(i).weight())) { numSamples++; numChanged += examineExample(i); } } } // // if (mod(LoopCounter, 2) == 0) // MinimumNumChanged = max(1, 0.1*NumSamples) // else // MinimumNumChanged = 1 // endif int minimumNumChanged = 1; if (loopCounter % 2 == 0) { minimumNumChanged = (int) Math.max(1, 0.1 * numSamples); } // if (examineAll == 1) // examineAll = 0 // elseif (numChanged < MinimumNumChanged) // examineAll = 1 // endif if (examineAll == 1) { examineAll = 0; } else if (numChanged < minimumNumChanged) { examineAll = 1; } // endwhile if (loopCounter == 2500) { break; } } // endmain } /** * learn SVM parameters from data using Smola's SMO algorithm. Subclasses * should implement something more interesting. * * @param instances the data to learn from * @throws Exception if something goes wrong */ @Override public void buildClassifier(Instances instances) throws Exception { // initialize variables init(instances); // solve optimization problem optimize(); // clean up wrapUp(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/RegSMOImproved.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegSMOImproved.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> Learn SVM for regression using SMO with Shevade, * Keerthi, et al. adaption of the stopping criterion.<br/> * <br/> * For more information see:<br/> * <br/> * S.K. Shevade, S.S. Keerthi, C. Bhattacharyya, K.R.K. Murthy: Improvements to * the SMO Algorithm for SVM Regression. In: IEEE Transactions on Neural * Networks, 1999.<br/> * <br/> * S.K. Shevade, S.S. Keerthi, C. Bhattacharyya, K.R.K. Murthy (1999). * Improvements to the SMO Algorithm for SVM Regression. Control Division, Dept. * of Mechanical Engineering. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Shevade1999, * author = {S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy}, * booktitle = {IEEE Transactions on Neural Networks}, * title = {Improvements to the SMO Algorithm for SVM Regression}, * year = {1999}, * PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/ieee_smo_reg.ps.gz} * } * * &#64;techreport{Shevade1999, * address = {Control Division, Dept. of Mechanical Engineering}, * author = {S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy}, * institution = {National University of Singapore}, * number = {CD-99-16}, * title = {Improvements to the SMO Algorithm for SVM Regression}, * year = {1999}, * PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/smoreg_mod.ps.gz} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001) * </pre> * * <pre> * -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true) * </pre> * * <pre> * -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12) * </pre> * * <pre> * -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3) * </pre> * * <pre> * -W &lt;double&gt; * The random number seed. * (default 1) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision$ */ public class RegSMOImproved extends RegSMO implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = 471692841446029784L; public final static int I0 = 3; public final static int I0a = 1; public final static int I0b = 2; public final static int I1 = 4; public final static int I2 = 8; public final static int I3 = 16; /** The different sets used by the algorithm. */ protected SMOset m_I0; /** Index set {i: 0 < m_alpha[i] < C || 0 < m_alphaStar[i] < C}} */ protected int[] m_iSet; /** b.up and b.low boundaries used to determine stopping criterion */ protected double m_bUp, m_bLow; /** index of the instance that gave us b.up and b.low */ protected int m_iUp, m_iLow; /** * tolerance parameter used for checking stopping criterion b.up < b.low + 2 * tol */ double m_fTolerance = 0.001; /** set true to use variant 1 of the paper, otherwise use variant 2 */ boolean m_bUseVariant1 = true; /** * Returns a string describing the object * * @return a description suitable for displaying in the explorer/experimenter * gui */ @Override public String globalInfo() { return "Learn SVM for regression using SMO with Shevade, Keerthi, et al. " + "adaption of the stopping criterion.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy"); result.setValue(Field.TITLE, "Improvements to the SMO Algorithm for SVM Regression"); result.setValue(Field.BOOKTITLE, "IEEE Transactions on Neural Networks"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/ieee_smo_reg.ps.gz"); additional = result.add(Type.TECHREPORT); additional.setValue(Field.AUTHOR, "S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy"); additional.setValue(Field.TITLE, "Improvements to the SMO Algorithm for SVM Regression"); additional.setValue(Field.INSTITUTION, "National University of Singapore"); additional.setValue(Field.ADDRESS, "Control Division, Dept. of Mechanical Engineering"); additional.setValue(Field.NUMBER, "CD-99-16"); additional.setValue(Field.YEAR, "1999"); additional.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/smoreg_mod.ps.gz"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option( "\tThe tolerance parameter for checking the stopping criterion.\n" + "\t(default 0.001)", "T", 1, "-T <double>")); result.addElement(new Option( "\tUse variant 1 of the algorithm when true, otherwise use variant 2.\n" + "\t(default true)", "V", 0, "-V")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001) * </pre> * * <pre> * -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true) * </pre> * * <pre> * -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12) * </pre> * * <pre> * -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3) * </pre> * * <pre> * -W &lt;double&gt; * The random number seed. * (default 1) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('T', options); if (tmpStr.length() != 0) { setTolerance(Double.parseDouble(tmpStr)); } else { setTolerance(0.001); } setUseVariant1(Utils.getFlag('V', options)); super.setOptions(options); } /** * Gets the current settings of the object. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-T"); result.add("" + getTolerance()); if (m_bUseVariant1) { result.add("-V"); } Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String toleranceTipText() { return "tolerance parameter used for checking stopping criterion b.up < b.low + 2 tol"; } /** * returns the current tolerance * * @return the tolerance */ public double getTolerance() { return m_fTolerance; } /** * sets the tolerance * * @param d the new tolerance */ public void setTolerance(double d) { m_fTolerance = d; } /** * Returns the tip text for this property * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String useVariant1TipText() { return "set true to use variant 1 of the paper, otherwise use variant 2."; } /** * Whether variant 1 is used * * @return true if variant 1 is used */ public boolean isUseVariant1() { return m_bUseVariant1; } /** * Sets whether to use variant 1 * * @param b if true then variant 1 is used */ public void setUseVariant1(boolean b) { m_bUseVariant1 = b; } /** * takeStep method from Shevade et al.s paper. parameters correspond to * pseudocode from paper. * * @param i1 * @param i2 * @param alpha2 * @param alpha2Star * @param phi2 * @return * @throws Exception */ @Override protected int takeStep(int i1, int i2, double alpha2, double alpha2Star, double phi2) throws Exception { // procedure takeStep(i1, i2) // // if (i1 == i2) // return 0 if (i1 == i2) { return 0; } double C1 = m_C * m_data.instance(i1).weight(); double C2 = m_C * m_data.instance(i2).weight(); // alpha1, alpha1' = Lagrange multipliers for i1 double alpha1 = m_alpha[i1]; double alpha1Star = m_alphaStar[i1]; // double y1 = m_target[i1]; // TODO: verify we do not need to recompute m_error[i1] here // TODO: since m_error is only updated for indices in m_I0 double phi1 = m_error[i1]; // if ((m_iSet[i1] & I0)==0) { // phi1 = -SVMOutput(i1) - m_b + m_target[i1]; // m_error[i1] = phi1; // } // k11 = kernel(point[i1], point[i1]) // k12 = kernel(point[i1], point[i2]) // k22 = kernel(point[i2], point[i2]) // eta = -2*k12+k11+k22 // gamma = alpha1-alpha1'+alpha2-alpha2' // double k11 = m_kernel.eval(i1, i1, m_data.instance(i1)); double k12 = m_kernel.eval(i1, i2, m_data.instance(i1)); double k22 = m_kernel.eval(i2, i2, m_data.instance(i2)); double eta = -2 * k12 + k11 + k22; double gamma = alpha1 - alpha1Star + alpha2 - alpha2Star; // if (eta < 0) { // this may happen due to numeric instability // due to Mercer's condition, this should not happen, hence we give up // return 0; // } // % We assume that eta > 0. Otherwise one has to repeat the complete // % reasoning similarly (i.e. compute objective functions at L and H // % and decide which one is largest // // case1 = case2 = case3 = case4 = finished = 0 // alpha1old = alpha1, // alpha1old' = alpha1' // alpha2old = alpha2, // alpha2old' = alpha2' // deltaphi = F1 - F2 // // while !finished // % This loop is passed at most three times // % Case variables needed to avoid attempting small changes twice // if (case1 == 0) && // (alpha1 > 0 || (alpha1' == 0 && deltaphi > 0)) && // (alpha2 > 0 || (alpha2' == 0 && deltaphi < 0)) // compute L, H (w.r.t. alpha1, alpha2) // if (L < H) // a2 = alpha2 - (deltaphi / eta ) a2 = min(a2, H) a2 = max(L, a2) a1 = // alpha1 - (a2 - alpha2) // update alpha1, alpha2 if change is larger than some eps // else // finished = 1 // endif // case1 = 1 // elseif (case2 == 0) && // (alpha1 > 0 || (alpha1' == 0 && deltaphi > 2*epsilon)) && // (alpha2' > 0 || (alpha2 == 0 && deltaphi > 2*epsilon)) // // compute L, H (w.r.t. alpha1, alpha2') // if (L < H) // a2 = alpha2' + ((deltaphi - 2*epsilon)/eta)) a2 = min(a2, H) a2 = max(L, // a2) a1 = alpha1 + (a2-alpha2') // update alpha1, alpha2' if change is larger than some eps // else // finished = 1 // endif // case2 = 1 // elseif (case3 == 0) && // (alpha1' > 0 || (alpha1 == 0 && deltaphi < -2*epsilon)) && // (alpha2 > 0 || (alpha2' == 0 && deltaphi < -2*epsilon)) // compute L, H (w.r.t. alpha1', alpha2) // if (L < H) // a2 = alpha2 - ((deltaphi + 2*epsilon)/eta) a2 = min(a2, H) a2 = max(L, // a2) a1 = alpha1' + (a2 - alpha2) // update alpha1', alpha2 if change is larger than some eps // else // finished = 1 // endif // case3 = 1 // elseif (case4 == 0) && // (alpha1' > 0) || (alpha1 == 0 && deltaphi < 0)) && // (alpha2' > 0) || (alpha2 == 0 && deltaphi > 0)) // compute L, H (w.r.t. alpha1', alpha2') // if (L < H) // a2 = alpha2' + deltaphi/eta a2 = min(a2, H) a2 = max(L, a2) a1 = alpha1' // - (a2 - alpha2') // update alpha1, alpha2' if change is larger than some eps // else // finished = 1 // endif // case4 = 1 // else // finished = 1 // endif // update deltaphi // endwhile double alpha1old = alpha1; double alpha1Starold = alpha1Star; double alpha2old = alpha2; double alpha2Starold = alpha2Star; double deltaPhi = phi1 - phi2; if (findOptimalPointOnLine(i1, alpha1, alpha1Star, C1, i2, alpha2, alpha2Star, C2, gamma, eta, deltaPhi)) { alpha1 = m_alpha[i1]; alpha1Star = m_alphaStar[i1]; alpha2 = m_alpha[i2]; alpha2Star = m_alphaStar[i2]; // if changes in alpha('), alpha2(') are larger than some eps // Update f-cache[i] for i in I.0 using new Lagrange multipliers // Store the changes in alpha, alpha' array // Update I.0, I.1, I.2, I.3 // Compute (i.low, b.low) and (i.up, b.up) by applying the conditions // mentioned above, using only i1, i2 and indices in I.0 // return 1 // else // return 0 // endif endprocedure // Update error cache using new Lagrange multipliers double dAlpha1 = alpha1 - alpha1old - (alpha1Star - alpha1Starold); double dAlpha2 = alpha2 - alpha2old - (alpha2Star - alpha2Starold); for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { if ((j != i1) && (j != i2)) { m_error[j] -= dAlpha1 * m_kernel.eval(i1, j, m_data.instance(i1)) + dAlpha2 * m_kernel.eval(i2, j, m_data.instance(i2)); } } m_error[i1] -= dAlpha1 * k11 + dAlpha2 * k12; m_error[i2] -= dAlpha1 * k12 + dAlpha2 * k22; updateIndexSetFor(i1, C1); updateIndexSetFor(i2, C2); // Compute (i.low, b.low) and (i.up, b.up) by applying the conditions // mentioned above, using only i1, i2 and indices in I.0 m_bUp = Double.MAX_VALUE; m_bLow = -Double.MAX_VALUE; for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { updateBoundaries(j, m_error[j]); } if (!m_I0.contains(i1)) { updateBoundaries(i1, m_error[i1]); } if (!m_I0.contains(i2)) { updateBoundaries(i2, m_error[i2]); } return 1; } else { return 0; } } /** * updates the index sets I0a, IOb, I1, I2 and I3 for vector i * * @param i index of vector * @param C capacity for vector i * @throws Exception */ protected void updateIndexSetFor(int i, double C) throws Exception { /* * m_I0a.delete(i); m_I0b.delete(i); m_I1.delete(i); m_I2.delete(i); * m_I3.delete(i); */ if (m_alpha[i] == 0 && m_alphaStar[i] == 0) { // m_I1.insert(i); m_iSet[i] = I1; m_I0.delete(i); } else if (m_alpha[i] > 0) { if (m_alpha[i] < C) { if ((m_iSet[i] & I0) == 0) { // m_error[i] = -SVMOutput(i) - m_b + m_target[i]; m_I0.insert(i); } // m_I0a.insert(i); m_iSet[i] = I0a; } else { // m_alpha[i] == C // m_I3.insert(i); m_iSet[i] = I3; m_I0.delete(i); } } else {// m_alphaStar[i] > 0 if (m_alphaStar[i] < C) { if ((m_iSet[i] & I0) == 0) { // m_error[i] = -SVMOutput(i) - m_b + m_target[i]; m_I0.insert(i); } // m_I0b.insert(i); m_iSet[i] = I0b; } else { // m_alpha[i] == C // m_I2.insert(i); m_iSet[i] = I2; m_I0.delete(i); } } } /** * updates boundaries bLow and bHi and corresponding indexes * * @param i2 index of vector * @param F2 error of vector i2 */ protected void updateBoundaries(int i2, double F2) { int iSet = m_iSet[i2]; double FLow = m_bLow; if ((iSet & (I2 | I0b)) > 0) { FLow = F2 + m_epsilon; } else if ((iSet & (I1 | I0a)) > 0) { FLow = F2 - m_epsilon; } if (m_bLow < FLow) { m_bLow = FLow; m_iLow = i2; } double FUp = m_bUp; if ((iSet & (I3 | I0a)) > 0) { FUp = F2 - m_epsilon; } else if ((iSet & (I1 | I0b)) > 0) { FUp = F2 + m_epsilon; } if (m_bUp > FUp) { m_bUp = FUp; m_iUp = i2; } } /** * parameters correspond to pseudocode from paper. * * @param i2 index of candidate * @return * @throws Exception */ @Override protected int examineExample(int i2) throws Exception { // if (i2 is in I.0) // F2 = f-cache[i2] // else // compute F2 = F.i2 and set f-cache[i2] = F2 // % Update (b.low, i.low) or (b.up, i.up) using (F2, i2)... // if (i2 is in I.1) // if (F2+epsilon < b.up) // b.up = F2+epsilon, // i.up = i2 // elseif (F2-epsilon > b.low) // b.low = F2-epsilon, // i.low = i2 // end if // elseif ( (i2 is in I.2) && (F2+epsilon > b.low) ) // b.low = F2+epsilon, // i.low = i2 // elseif ( (i2 is in I.3) && (F2-epsilon < b.up) ) // b.up = F2-epsilon, // i.up = i2 // endif // endif int iSet = m_iSet[i2]; double F2 = m_error[i2]; if (!m_I0.contains(i2)) { F2 = -SVMOutput(i2) - m_b + m_target[i2]; m_error[i2] = F2; if (iSet == I1) { if (F2 + m_epsilon < m_bUp) { m_bUp = F2 + m_epsilon; m_iUp = i2; } else if (F2 - m_epsilon > m_bLow) { m_bLow = F2 - m_epsilon; m_iLow = i2; } } else if ((iSet == I2) && (F2 + m_epsilon > m_bLow)) { m_bLow = F2 + m_epsilon; m_iLow = i2; } else if ((iSet == I3) && (F2 - m_epsilon < m_bUp)) { m_bUp = F2 - m_epsilon; m_iUp = i2; } } // % Check optimality using current b.low and b.up and, if // % violated, find an index i1 to do joint optimization with i2... // optimality = 1; // case 1: i2 is in I.0a // if (b.low-(F2-epsilon) > 2 * tol) // optimality = 0; // i1 = i.low; // % For i2 in I.0a choose the better i1... // if ((F2-epsilon)-b.up > b.low-(F2-epsilon)) // i1 = i.up; // endif // elseif ((F2-epsilon)-b.up > 2 * tol) // optimality = 0; // i1 = i.up; // % For i2 in I.0a choose the better i1... // if ((b.low-(F2-epsilon) > (F2-epsilon)-b.up) // i1 = i.low; // endif // endif // case 2: i2 is in I.0b // if (b.low-(F2+epsilon) > 2 * tol) // optimality = 0; // i1 = i.low; // % For i2 in I.0b choose the better i1... // if ((F2+epsilon)-b.up > b.low-(F2+epsilon)) // i1 = i.up; // endif // elseif ((F2+epsilon)-b.up > 2 * tol) // optimality = 0; // i1 = i.up; // % For i2 in I.0b choose the better i1... // if ((b.low-(F2+epsilon) > (F2+epsilon)-b.up) // i1 = i.low; // endif // endif // case 3: i2 is in I.1 // if (b.low-(F2+epsilon) > 2 * tol) // optimality = 0; // i1 = i.low; // % For i2 in I1 choose the better i1... // if ((F2+epsilon)-b.up > b.low-(F2+epsilon) // i1 = i.up; // endif // elseif ((F2-epsilon)-b.up > 2 * tol) // optimality = 0; // i1 = i.up; // % For i2 in I1 choose the better i1... // if (b.low-(F2-epsilon) > (F2-epsilon)-b.up) // i1 = i.low; // endif // endif // case 4: i2 is in I.2 // if ((F2+epsilon)-b.up > 2*tol) // optimality = 0, // i1 = i.up // endif // case 5: i2 is in I.3 // if ((b.low-(F2-epsilon) > 2*tol) // optimality = 0, i1 = i.low // endif int i1 = i2; boolean bOptimality = true; // case 1: i2 is in I.0a if (iSet == I0a) { if (m_bLow - (F2 - m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; // % For i2 in I .0 a choose the better i1... if ((F2 - m_epsilon) - m_bUp > m_bLow - (F2 - m_epsilon)) { i1 = m_iUp; } } else if ((F2 - m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; // % For i2 in I.0a choose the better i1... if (m_bLow - (F2 - m_epsilon) > (F2 - m_epsilon) - m_bUp) { i1 = m_iLow; } } } // case 2: i2 is in I.0b else if (iSet == I0b) { if (m_bLow - (F2 + m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; // % For i2 in I.0b choose the better i1... if ((F2 + m_epsilon) - m_bUp > m_bLow - (F2 + m_epsilon)) { i1 = m_iUp; } } else if ((F2 + m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; // % For i2 in I.0b choose the better i1... if (m_bLow - (F2 + m_epsilon) > (F2 + m_epsilon) - m_bUp) { i1 = m_iLow; } } } // case 3: i2 is in I.1 else if (iSet == I1) { if (m_bLow - (F2 + m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; // % For i2 in I1 choose the better i1... if ((F2 + m_epsilon) - m_bUp > m_bLow - (F2 + m_epsilon)) { i1 = m_iUp; } } else if ((F2 - m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; // % For i2 in I1 choose the better i1... if (m_bLow - (F2 - m_epsilon) > (F2 - m_epsilon) - m_bUp) { i1 = m_iLow; } } } // case 4: i2 is in I.2 else if (iSet == I2) { if ((F2 + m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; } } // case 5: i2 is in I.3 else if (iSet == I3) { if (m_bLow - (F2 - m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; } } // if (optimality == 1) // return 0 // if (takeStep(i1, i2)) // return 1 // else // return 0 // endif // endprocedure if (bOptimality) { return 0; } return takeStep(i1, i2, m_alpha[i2], m_alphaStar[i2], F2); } /** * initialize various variables before starting the actual optimizer * * @param data data set used for learning * @throws Exception if something goes wrong */ @Override protected void init(Instances data) throws Exception { super.init(data); // from Keerthi's pseudo code: // set alpha and alpha' to zero for every example set I.1 to contain all the // examples // Choose any example i from the training set. // set b.up = target[i]+epsilon // set b.low = target[i]-espilon // i.up = i.low = i; // Initialize sets m_I0 = new SMOset(m_data.numInstances()); m_iSet = new int[m_data.numInstances()]; for (int i = 0; i < m_nInstances; i++) { m_iSet[i] = I1; } // m_iUp = m_random.nextInt(m_nInstances); m_iUp = 0; m_bUp = m_target[m_iUp] + m_epsilon; m_iLow = m_iUp; m_bLow = m_target[m_iLow] - m_epsilon; // init error cache m_error = new double[m_nInstances]; for (int i = 0; i < m_nInstances; i++) { m_error[i] = m_target[i]; } } /** * use variant 1 of Shevade's et al.s paper * * @throws Exception if something goes wrong */ protected void optimize1() throws Exception { // % main routine for modification 1 procedure main // while (numChanged > 0 || examineAll) // numChanged = 0; int nNumChanged = 0; boolean bExamineAll = true; // while (numChanged > 0 || examineAll) // numChanged = 0; while (nNumChanged > 0 || bExamineAll) { nNumChanged = 0; // if (examineAll) // loop I over all the training examples // numChanged += examineExample(I) // else // loop I over I.0 // numChanged += examineExample(I) // % It is easy to check if optimality on I.0 is attained... // if (b.up > b.low - 2*tol) at any I // exit the loop after setting numChanged = 0 // endif if (bExamineAll) { for (int i = 0; i < m_nInstances; i++) { nNumChanged += examineExample(i); } } else { for (int i = m_I0.getNext(-1); i != -1; i = m_I0.getNext(i)) { nNumChanged += examineExample(i); if (m_bLow - m_bUp < 2 * m_fTolerance) { nNumChanged = 0; break; } } } // if (examineAll == 1) // examineAll = 0; // elseif (numChanged == 0) // examineAll = 1; // endif // endwhile // endprocedure if (bExamineAll) { bExamineAll = false; } else if (nNumChanged == 0) { bExamineAll = true; } } } /** * use variant 2 of Shevade's et al.s paper * * @throws Exception if something goes wrong */ protected void optimize2() throws Exception { // % main routine for modification 2 procedure main int nNumChanged = 0; boolean bExamineAll = true; // while (numChanged > 0 || examineAll) // numChanged = 0; while (nNumChanged > 0 || bExamineAll) { nNumChanged = 0; // if (examineAll) // loop I over all the training examples // numChanged += examineExample(I) // else // % The following loop is the only difference between the two // % SMO modifications. Whereas, modification 1, the type II // % loop selects i2 fro I.0 sequentially, here i2 is always // % set to the current i.low and i1 is set to the current i.up; // % clearly, this corresponds to choosing the worst violating // % pair using members of I.0 and some other indices // inner.loop.success = 1; // do // i2 = i.low // alpha2, alpha2' = Lagrange multipliers for i2 // F2 = f-cache[i2] // i1 = i.up // inner.loop.success = takeStep(i.up, i.low) // numChanged += inner.loop.success // until ( (b.up > b.low - 2*tol) || inner.loop.success == 0) // numChanged = 0; // endif if (bExamineAll) { for (int i = 0; i < m_nInstances; i++) { nNumChanged += examineExample(i); } } else { boolean bInnerLoopSuccess = true; do { if (takeStep(m_iUp, m_iLow, m_alpha[m_iLow], m_alphaStar[m_iLow], m_error[m_iLow]) > 0) { bInnerLoopSuccess = true; nNumChanged += 1; } else { bInnerLoopSuccess = false; } } while ((m_bUp <= m_bLow - 2 * m_fTolerance) && bInnerLoopSuccess); nNumChanged = 0; } // // if (examineAll == 1) // examineAll = 0 // elseif (numChanged == 0) // examineAll = 1 // endif // endwhile // endprocedure // if (bExamineAll) { bExamineAll = false; } else if (nNumChanged == 0) { bExamineAll = true; } } } /** * wrap up various variables to save memeory and do some housekeeping after * optimization has finished. * * @throws Exception if something goes wrong */ @Override protected void wrapUp() throws Exception { m_b = -(m_bLow + m_bUp) / 2.0; m_target = null; m_error = null; super.wrapUp(); } /** * learn SVM parameters from data using Keerthi's SMO algorithm. Subclasses * should implement something more interesting. * * @param instances the data to work with * @throws Exception if something goes wrong */ @Override public void buildClassifier(Instances instances) throws Exception { // initialize variables init(instances); // solve optimization problem if (m_bUseVariant1) { optimize1(); } else { optimize2(); } // clean up wrapUp(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/SMOset.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SMOset.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Stores a set of integer of a given size. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class SMOset implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -8364829283188675777L; /** The current number of elements in the set */ private int m_number; /** The first element in the set */ private int m_first; /** Indicators */ private boolean[] m_indicators; /** The next element for each element */ private int[] m_next; /** The previous element for each element */ private int[] m_previous; /** * Creates a new set of the given size. */ public SMOset(int size) { m_indicators = new boolean[size]; m_next = new int[size]; m_previous = new int[size]; m_number = 0; m_first = -1; } /** * Checks whether an element is in the set. */ public boolean contains(int index) { return m_indicators[index]; } /** * Deletes an element from the set. */ public void delete(int index) { if (m_indicators[index]) { if (m_first == index) { m_first = m_next[index]; } else { m_next[m_previous[index]] = m_next[index]; } if (m_next[index] != -1) { m_previous[m_next[index]] = m_previous[index]; } m_indicators[index] = false; m_number--; } } /** * Inserts an element into the set. */ public void insert(int index) { if (!m_indicators[index]) { if (m_number == 0) { m_first = index; m_next[index] = -1; m_previous[index] = -1; } else { m_previous[m_first] = index; m_next[index] = m_first; m_previous[index] = -1; m_first = index; } m_indicators[index] = true; m_number++; } } /** * Gets the next element in the set. -1 gets the first one. */ public int getNext(int index) { if (index == -1) { return m_first; } else { return m_next[index]; } } /** * Prints all the current elements in the set. */ public void printElements() { for (int i = getNext(-1); i != -1; i = getNext(i)) { System.err.print(i + " "); } System.err.println(); for (int i = 0; i < m_indicators.length; i++) { if (m_indicators[i]) { System.err.print(i + " "); } } System.err.println(); System.err.println(m_number); } /** * Returns the number of elements in the set. */ public int numElements() { return m_number; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/supportVector/StringKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * StringKernel.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.supportVector; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> Implementation of the subsequence kernel (SSK) as * described in [1] and of the subsequence kernel with lambda pruning (SSK-LP) * as described in [2].<br/> * <br/> * For more information, see<br/> * <br/> * Huma Lodhi, Craig Saunders, John Shawe-Taylor, Nello Cristianini, Christopher * J. C. H. Watkins (2002). Text Classification using String Kernels. Journal of * Machine Learning Research. 2:419-444.<br/> * <br/> * F. Kleedorfer, A. Seewald (2005). Implementation of a String Kernel for WEKA. * Wien, Austria. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Lodhi2002, * author = {Huma Lodhi and Craig Saunders and John Shawe-Taylor and Nello Cristianini and Christopher J. C. H. Watkins}, * journal = {Journal of Machine Learning Research}, * pages = {419-444}, * title = {Text Classification using String Kernels}, * volume = {2}, * year = {2002}, * HTTP = {http://www.jmlr.org/papers/v2/lodhi02a.html} * } * * &#64;techreport{Kleedorfer2005, * address = {Wien, Austria}, * author = {F. Kleedorfer and A. Seewald}, * institution = {Oesterreichisches Forschungsinstitut fuer Artificial Intelligence}, * number = {TR-2005-13}, * title = {Implementation of a String Kernel for WEKA}, * year = {2005} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -P &lt;0|1&gt; * The pruning method to use: * 0 = No pruning * 1 = Lambda pruning * (default: 0) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number). * (default: 250007) * </pre> * * <pre> * -IC &lt;num&gt; * The size of the internal cache (a prime number). * (default: 200003) * </pre> * * <pre> * -L &lt;num&gt; * The lambda constant. Penalizes non-continuous subsequence * matches. Must be in (0,1). * (default: 0.5) * </pre> * * <pre> * -ssl &lt;num&gt; * The length of the subsequence. * (default: 3) * </pre> * * <pre> * -ssl-max &lt;num&gt; * The maximum length of the subsequence. * (default: 9) * </pre> * * <pre> * -N * Use normalization. * (default: no) * </pre> * * <!-- options-end --> * * <h1>Theory</h1> * <h2>Overview</h2> * The algorithm computes a measure of similarity between two texts based on the * number and form of their common subsequences, which need not be contiguous. * This method can be parametrized by specifying the subsequence length k, the * penalty factor lambda, which penalizes non-contiguous matches, and optional * 'lambda pruning', which takes maxLambdaExponent, <code>m</code>, as * parameter. Lambda pruning causes very 'stretched' substring matches not to be * counted, thus speeding up the computation. The functionality of SSK and * SSK-LP is explained in the following using simple examples. * * <h2>Explanation &amp; Examples</h2> * for all of the following examples, we assume these parameter values: * * <pre> * k=2 * lambda=0.5 * m=8 (for SSK-LP examples) * </pre> * * <h3>SSK</h3> * * <h4>Example 1</h4> * * <pre> * SSK(2,"ab","axb")=0.5^5 = 0,03125 * </pre> * * There is one subsequence of the length of 2 that both strings have in common, * "ab". The result of SSK is computed by raising lambda to the power of L, * where L is the length of the subsequence match in the one string plus the * length of the subsequence match in the other, in our case: * * <pre> * &nbsp; ab axb * L= 2 + 3 = 5 * </pre> * * hence, the kernel yields 0.5^5 = 0,03125 * * <h4>Example 2</h4> * * <pre> * SSK(2,"ab","abb")=0.5^5 + 0.5^4 = 0,09375 * </pre> * * Here, we also have one subsequence of the length of 2 that both strings have * in common, "ab". The result of SSK is actually computed by summing over all * values computed for each occurrence of a common subsequence match. In this * example, there are two possible cases: * * <pre> * ab abb * -- -- L=4 * -- - - L=5 * </pre> * * we have two matches, one of the length of 2+2=4, one of the length of 2+3=5, * so we get the result 0.5^5 + 0.5^4 = 0,09375. * * <h3>SSK-LP</h3> * Without lambda pruning, the string kernel finds *all* common subsequences of * the given length, whereas with lambda pruning, common subsequence matches * that are too much stretched in both strings are not taken into account. It is * argued that the value yielded for such a common subsequence is too low ( * <code>lambda ^(length[match_in_s] + length[match_in_t]</code>) . Tests have * shown that a tremendous speedup can be achieved using this technique while * suffering from very little quality loss. <br> * Lambda pruning is parametrized by the maximum lambda exponent. It is a good * idea to choose that value to be about 3 or 4 times the subsequence length as * a rule of thumb. YMMV. * * <h4>Example 3</h4> * Without lambda pruning, one common subsequence, "AB" would be found in the * following two strings. (With k=2) * * <pre> * SSK(2,"ab","axb")=0.5^14 = 0,00006103515625 * </pre> * * lambda pruning allows for the control of the match length. So, if m (the * maximum lambda exponent) is e.g. 8, these two strings would yield a kernel * value of 0: * * <pre> * with lambda pruning: SSK-LP(2,8,"AxxxxxxxxxB","AyB")= 0 * without lambda pruning: SSK(2,"AxxxxxxxxxB","AyB")= 0.5^14 = 0,00006103515625 * </pre> * * This is because the exponent for lambda (=the length of the subsequence * match) would be 14, which is &gt; 8. In Contrast, the next result is &gt; 0 * * <pre> * m=8 * SSK-LP(2,8,"AxxB","AyyB")=0.5^8 = 0,00390625 * </pre> * * because the lambda exponent would be 8, which is just accepted by lambda * pruning. * * <h3>Normalization</h3> * When the string kernel is used for its main purpose, as the kernel of a * support vector machine, it is not normalized. The normalized kernel can be * switched on by -F (feature space normalization) but is much slower. Like most * unnormalized kernels, K(x,x) is not a fixed value, see the next example. * * <h4>Example 4</h4> * * <pre> * SSK(2,"ab","ab")=0.5^4 = 0.0625 * SSK(2,"AxxxxxxxxxB","AxxxxxxxxxB") = 12.761724710464478 * </pre> * * SSK is evaluated twice, each time for two identical strings. A good measure * of similarity would produce the same value in both cases, which should * indicate the same level of similarity. The value of the normalized SSK would * be 1.0 in both cases. So for the purpose of computing string similarity the * normalized kernel should be used. For SVM the unnormalized kernel is usually * sufficient. * * <h2>Complexity of SSK and SSK-LP</h2> * The time complexity of this method (without lambda pruning and with an * infinitely large cache) is<br> * * <pre> * O(k*|s|*|t|) * </pre> * * Lambda Pruning has a complexity (without caching) of<br> * * <pre> * O(m*binom(m,k)^2*(|s|+n)*|t|) * </pre> * * <br> * * <pre> * k... subsequence length (ssl) * s,t... strings * |s|... length of string s * binom(x,y)... binomial coefficient (x!/[(x-y)!y!]) * m... maxLambdaExponent (ssl-max) * </pre> * * Keep in mind that execution time can increase fast for long strings and big * values for k, especially if you don't use lambda pruning. With lambda * pruning, computation is usually so fast that switching on the cache leads to * slower computation because of setup costs. Therefore caching is switched off * for lambda pruning. <br> * <br> * For details and qualitative experiments about SSK, see [1] <br> * For details about lambda pruning and performance comparison of SSK and SSK-LP * (SSK with lambda pruning), see [2] Note that the complexity estimation in [2] * assumes no caching of intermediate results, which has been implemented in the * meantime and greatly improves the speed of the SSK without lambda pruning. <br> * * <h1>Notes for usage within Weka</h1> * Only instances of the following form can be processed using string kernels: * * <pre> * +----------+-------------+---------------+ * |attribute#| 0 | 1 | * +----------+-------------+---------------+ * | content | [text data] | [class label] | * +----------------------------------------+ * ... or ... * +----------+---------------+-------------+ * |attribute#| 0 | 1 | * +----------+---------------+-------------+ * | content | [class label] | [text data] | * +----------------------------------------+ * </pre> * * @author Florian Kleedorfer (kleedorfer@austria.fm) * @author Alexander K. Seewald (alex@seewald.at) * @version $Revision$ */ public class StringKernel extends Kernel implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -4902954211202690123L; /** The size of the cache (a prime number) */ private int m_cacheSize = 250007; /** The size of the internal cache for intermediate results (a prime number) */ private int m_internalCacheSize = 200003; /** The attribute number of the string attribute */ private int m_strAttr; /** Kernel cache (i.e., cache for kernel evaluations) */ private double[] m_storage; private long[] m_keys; /** Counts the number of kernel evaluations. */ private int m_kernelEvals; /** The number of instance in the dataset */ private int m_numInsts; /** Pruning method: No Pruning */ public final static int PRUNING_NONE = 0; /** Pruning method: Lambda See [2] for details. */ public final static int PRUNING_LAMBDA = 1; /** Pruning methods */ public static final Tag[] TAGS_PRUNING = { new Tag(PRUNING_NONE, "No pruning"), new Tag(PRUNING_LAMBDA, "Lambda pruning"), }; /** the pruning method */ protected int m_PruningMethod = PRUNING_NONE; /** * the decay factor that penalizes non-continuous substring matches. See [1] * for details. */ protected double m_lambda = 0.5; /** The substring length */ private int m_subsequenceLength = 3; /** The maximum substring length for lambda pruning */ private int m_maxSubsequenceLength = 9; /** * powers of lambda are prepared prior to kernel evaluations. all powers * between 0 and this value are precalculated */ protected static final int MAX_POWER_OF_LAMBDA = 10000; /** the precalculated powers of lambda */ protected double[] m_powersOflambda = null; /** * flag for switching normalization on or off. This defaults to false and can * be turned on by the switch for feature space normalization in SMO */ private boolean m_normalize = false; /** private cache for intermediate results */ private int maxCache; // is set in unnormalizedKernel(s1,s2) private double[] cachekh; private int[] cachekhK; private double[] cachekh2; private int[] cachekh2K; /** cached indexes for private cache */ private int m_multX; private int m_multY; private int m_multZ; private int m_multZZ; private boolean m_useRecursionCache = true; /** * default constructor */ public StringKernel() { super(); } /** * creates a new StringKernel object. Initializes the kernel cache and the * 'lambda cache', i.e. the precalculated powers of lambda from lambda^2 to * lambda^MAX_POWER_OF_LAMBDA * * @param data the dataset to use * @param cacheSize the size of the cache * @param subsequenceLength the subsequence length * @param lambda the lambda value * @param debug whether to output debug information * @throws Exception if something goes wrong */ public StringKernel(Instances data, int cacheSize, int subsequenceLength, double lambda, boolean debug) throws Exception { setDebug(debug); setCacheSize(cacheSize); setInternalCacheSize(200003); setSubsequenceLength(subsequenceLength); setMaxSubsequenceLength(-1); setLambda(lambda); buildKernel(data); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the explorer/experimenter * gui */ @Override public String globalInfo() { return "Implementation of the subsequence kernel (SSK) as described in [1] " + "and of the subsequence kernel with lambda pruning (SSK-LP) as " + "described in [2].\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result .setValue( Field.AUTHOR, "Huma Lodhi and Craig Saunders and John Shawe-Taylor and Nello Cristianini and Christopher J. C. H. Watkins"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.TITLE, "Text Classification using String Kernels"); result.setValue(Field.JOURNAL, "Journal of Machine Learning Research"); result.setValue(Field.VOLUME, "2"); result.setValue(Field.PAGES, "419-444"); result.setValue(Field.HTTP, "http://www.jmlr.org/papers/v2/lodhi02a.html"); additional = result.add(Type.TECHREPORT); additional.setValue(Field.AUTHOR, "F. Kleedorfer and A. Seewald"); additional.setValue(Field.YEAR, "2005"); additional.setValue(Field.TITLE, "Implementation of a String Kernel for WEKA"); additional.setValue(Field.INSTITUTION, "Oesterreichisches Forschungsinstitut fuer Artificial Intelligence"); additional.setValue(Field.ADDRESS, "Wien, Austria"); additional.setValue(Field.NUMBER, "TR-2005-13"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); String desc; String param; int i; SelectedTag tag; result.addAll(Collections.list(super.listOptions())); desc = ""; param = ""; for (i = 0; i < TAGS_PRUNING.length; i++) { if (i > 0) { param += "|"; } tag = new SelectedTag(TAGS_PRUNING[i].getID(), TAGS_PRUNING); param += "" + tag.getSelectedTag().getID(); desc += "\t" + tag.getSelectedTag().getID() + " = " + tag.getSelectedTag().getReadable() + "\n"; } result.addElement(new Option("\tThe pruning method to use:\n" + desc + "\t(default: " + PRUNING_NONE + ")", "P", 1, "-P <" + param + ">")); result.addElement(new Option("\tThe size of the cache (a prime number).\n" + "\t(default: 250007)", "C", 1, "-C <num>")); result.addElement(new Option( "\tThe size of the internal cache (a prime number).\n" + "\t(default: 200003)", "IC", 1, "-IC <num>")); result.addElement(new Option( "\tThe lambda constant. Penalizes non-continuous subsequence\n" + "\tmatches. Must be in (0,1).\n" + "\t(default: 0.5)", "L", 1, "-L <num>")); result.addElement(new Option("\tThe length of the subsequence.\n" + "\t(default: 3)", "ssl", 1, "-ssl <num>")); result.addElement(new Option("\tThe maximum length of the subsequence.\n" + "\t(default: 9)", "ssl-max", 1, "-ssl-max <num>")); result.addElement(new Option("\tUse normalization.\n" + "\t(default: no)", "N", 0, "-N")); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -P &lt;0|1&gt; * The pruning method to use: * 0 = No pruning * 1 = Lambda pruning * (default: 0) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number). * (default: 250007) * </pre> * * <pre> * -IC &lt;num&gt; * The size of the internal cache (a prime number). * (default: 200003) * </pre> * * <pre> * -L &lt;num&gt; * The lambda constant. Penalizes non-continuous subsequence * matches. Must be in (0,1). * (default: 0.5) * </pre> * * <pre> * -ssl &lt;num&gt; * The length of the subsequence. * (default: 3) * </pre> * * <pre> * -ssl-max &lt;num&gt; * The maximum length of the subsequence. * (default: 9) * </pre> * * <pre> * -N * Use normalization. * (default: no) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) { setPruningMethod(new SelectedTag(Integer.parseInt(tmpStr), TAGS_PRUNING)); } else { setPruningMethod(new SelectedTag(PRUNING_NONE, TAGS_PRUNING)); } tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) { setCacheSize(Integer.parseInt(tmpStr)); } else { setCacheSize(250007); } tmpStr = Utils.getOption("IC", options); if (tmpStr.length() != 0) { setInternalCacheSize(Integer.parseInt(tmpStr)); } else { setInternalCacheSize(200003); } tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) { setLambda(Double.parseDouble(tmpStr)); } else { setLambda(0.5); } tmpStr = Utils.getOption("ssl", options); if (tmpStr.length() != 0) { setSubsequenceLength(Integer.parseInt(tmpStr)); } else { setSubsequenceLength(3); } tmpStr = Utils.getOption("ssl-max", options); if (tmpStr.length() != 0) { setMaxSubsequenceLength(Integer.parseInt(tmpStr)); } else { setMaxSubsequenceLength(9); } setUseNormalization(Utils.getFlag('N', options)); if (getMaxSubsequenceLength() < 2 * getSubsequenceLength()) { throw new IllegalArgumentException( "Lambda Pruning forbids even contiguous substring matches! " + "Use a bigger value for ssl-max (at least 2*ssl)."); } super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); Collections.addAll(result, super.getOptions()); result.add("-P"); result.add("" + m_PruningMethod); result.add("-C"); result.add("" + getCacheSize()); result.add("-IC"); result.add("" + getInternalCacheSize()); result.add("-L"); result.add("" + getLambda()); result.add("-ssl"); result.add("" + getSubsequenceLength()); result.add("-ssl-max"); result.add("" + getMaxSubsequenceLength()); if (getUseNormalization()) { result.add("-L"); } return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String pruningMethodTipText() { return "The pruning method."; } /** * Sets the method used to for pruning. * * @param value the pruning method to use. */ public void setPruningMethod(SelectedTag value) { if (value.getTags() == TAGS_PRUNING) { m_PruningMethod = value.getSelectedTag().getID(); } } /** * Gets the method used for pruning. * * @return the pruning method to use. */ public SelectedTag getPruningMethod() { return new SelectedTag(m_PruningMethod, TAGS_PRUNING); } /** * Sets the size of the cache to use (a prime number) * * @param value the size of the cache */ public void setCacheSize(int value) { if (value >= 0) { m_cacheSize = value; clean(); } else { System.out.println("Cache size cannot be smaller than 0 (provided: " + value + ")!"); } } /** * Gets the size of the cache * * @return the cache size */ public int getCacheSize() { return m_cacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String cacheSizeTipText() { return "The size of the cache (a prime number)."; } /** * sets the size of the internal cache for intermediate results. Memory * consumption is about 16x this amount in bytes. Only use when lambda pruning * is switched off. * * @param value the size of the internal cache */ public void setInternalCacheSize(int value) { if (value >= 0) { m_internalCacheSize = value; clean(); } else { System.out.println("Cache size cannot be smaller than 0 (provided: " + value + ")!"); } } /** * Gets the size of the internal cache * * @return the cache size */ public int getInternalCacheSize() { return m_internalCacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String internalCacheSizeTipText() { return "The size of the internal cache (a prime number)."; } /** * Sets the length of the subsequence. * * @param value the length */ public void setSubsequenceLength(int value) { m_subsequenceLength = value; } /** * Returns the length of the subsequence * * @return the length */ public int getSubsequenceLength() { return m_subsequenceLength; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String subsequenceLengthTipText() { return "The subsequence length."; } /** * Sets the maximum length of the subsequence. * * @param value the maximum length */ public void setMaxSubsequenceLength(int value) { m_maxSubsequenceLength = value; } /** * Returns the maximum length of the subsequence * * @return the maximum length */ public int getMaxSubsequenceLength() { return m_maxSubsequenceLength; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String maxSubsequenceLengthTipText() { return "The maximum subsequence length (theta in the paper)"; } /** * Sets the lambda constant used in the string kernel * * @param value the lambda value to use */ public void setLambda(double value) { m_lambda = value; } /** * Gets the lambda constant used in the string kernel * * @return the current lambda constant */ public double getLambda() { return m_lambda; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lambdaTipText() { return "Penalizes non-continuous subsequence matches, from (0,1)"; } /** * Sets whether to use normalization. Each time this value is changed, the * kernel cache is cleared. * * @param value whether to use normalization */ public void setUseNormalization(boolean value) { if (value != m_normalize) { clean(); } m_normalize = value; } /** * Returns whether normalization is used. * * @return true if normalization is used */ public boolean getUseNormalization() { return m_normalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useNormalizationTipText() { return "Whether to use normalization."; } /** * Computes the result of the kernel function for two instances. If id1 == -1, * eval use inst1 instead of an instance in the dataset. * * @param id1 the index of the first instance in the dataset * @param id2 the index of the second instance in the dataset * @param inst1 the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception if something goes wrong */ @Override public double eval(int id1, int id2, Instance inst1) throws Exception { if (m_Debug && id1 > -1 && id2 > -1) { System.err.println("\nEvaluation of string kernel for"); System.err.println(m_data.instance(id1).stringValue(m_strAttr)); System.err.println("and"); System.err.println(m_data.instance(id2).stringValue(m_strAttr)); } // the normalized kernel returns 1 for comparison of // two identical strings if (id1 == id2 && m_normalize) { return 1.0; } double result = 0; long key = -1; int location = -1; // we can only cache if we know the indexes if ((id1 >= 0) && (m_keys != null)) { if (id1 > id2) { key = (long) id1 * m_numInsts + id2; } else { key = (long) id2 * m_numInsts + id1; } if (key < 0) { throw new Exception("Cache overflow detected!"); } location = (int) (key % m_keys.length); if (m_keys[location] == (key + 1)) { if (m_Debug) { System.err.println("result (cached): " + m_storage[location]); } return m_storage[location]; } } m_kernelEvals++; long start = System.currentTimeMillis(); Instance inst2 = m_data.instance(id2); char[] s1 = inst1.stringValue(m_strAttr).toCharArray(); char[] s2 = inst2.stringValue(m_strAttr).toCharArray(); // prevent the kernel from returning NaN if (s1.length == 0 || s2.length == 0) { return 0; } if (m_normalize) { result = normalizedKernel(s1, s2); } else { result = unnormalizedKernel(s1, s2); } if (m_Debug) { long duration = System.currentTimeMillis() - start; System.err.println("result: " + result); System.err.println("evaluation time:" + duration + "\n"); } // store result in cache if (key != -1) { m_storage[location] = result; m_keys[location] = (key + 1); } return result; } /** * Frees the memory used by the kernel. (Useful with kernels which use cache.) * This function is called when the training is done. i.e. after that, eval * will be called with id1 == -1. */ @Override public void clean() { m_storage = null; m_keys = null; } /** * Returns the number of kernel evaluation performed. * * @return the number of kernel evaluation performed. */ @Override public int numEvals() { return m_kernelEvals; } /** * Returns the number of dot product cache hits. * * @return the number of dot product cache hits, or -1 if not supported by * this kernel. */ @Override public int numCacheHits() { // TODO: implement! return -1; } /** * evaluates the normalized kernel between s and t. See [1] for details about * the normalized SSK. * * @param s first input string * @param t second input string * @return a double indicating their distance, or similarity */ public double normalizedKernel(char[] s, char[] t) { double k1 = unnormalizedKernel(s, s); double k2 = unnormalizedKernel(t, t); double normTerm = Math.sqrt(k1 * k2); return unnormalizedKernel(s, t) / normTerm; } /** * evaluates the unnormalized kernel between s and t. See [1] for details * about the unnormalized SSK. * * @param s first input string * @param t second input string * @return a double indicating their distance, or similarity */ public double unnormalizedKernel(char[] s, char[] t) { if (t.length > s.length) { // swap because the algorithm is faster if s is // the longer string char[] buf = s; s = t; t = buf; } if (m_PruningMethod == PRUNING_NONE) { m_multX = (s.length + 1) * (t.length + 1); m_multY = (t.length + 1); m_multZ = 1; maxCache = m_internalCacheSize; if (maxCache == 0) { maxCache = (m_subsequenceLength + 1) * m_multX; } else if ((m_subsequenceLength + 1) * m_multX < maxCache) { maxCache = (m_subsequenceLength + 1) * m_multX; } m_useRecursionCache = true; cachekhK = new int[maxCache]; cachekh2K = new int[maxCache]; cachekh = new double[maxCache]; cachekh2 = new double[maxCache]; } else if (m_PruningMethod == PRUNING_LAMBDA) { maxCache = 0; m_useRecursionCache = false; } double res; if (m_PruningMethod == PRUNING_LAMBDA) { res = kernelLP(m_subsequenceLength, s, s.length - 1, t, t.length - 1, m_maxSubsequenceLength); } else { res = kernel(m_subsequenceLength, s, s.length - 1, t, t.length - 1); } cachekh = null; cachekhK = null; cachekh2 = null; cachekh2K = null; return res; } /** * Recursion-ending function that is called at the end of each recursion * branch. * * @param n * @return */ protected double getReturnValue(int n) { if (n == 0) { return 1; } else { return 0; } } /** * the kernel function (Kn). This function performs the outer loop * character-wise over the first input string s. For each character * encountered, a recursion branch is started that identifies all subsequences * in t starting with that character. <br> * See [1] for details but note that this code is optimized and may be hard to * recognize. * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @return a double indicating the distance or similarity between s and t, * according to and depending on the initial value for n. */ protected double kernel(int n, char[] s, int endIndexS, char[] t, int endIndexT) { // normal recursion ending case if (Math.min(endIndexS + 1, endIndexT + 1) < n) { return getReturnValue(n); } // accumulate all recursion results in one: double result = 0; // the tail-recursive function defined in [1] is turned into a // loop here, preventing stack overflows. // skim s from back to front for (int iS = endIndexS; iS > n - 2; iS--) { double buf = 0; // let the current character in s be x char x = s[iS]; // iterate over all occurrences of x in t for (int j = 0; j <= endIndexT; j++) { if (t[j] == x) { // this is a match for the current character, hence // 1. use previous chars in both strings (iS-1, j-1) // 2. decrement the remainingMatchLength (n-1) // and start a recursion branch for these parameters buf += kernelHelper(n - 1, s, iS - 1, t, j - 1); } } // ok, all occurrences of x in t have been found // multiply the result with lambda^2 // (one lambda for x, and the other for all matches of x in t) result += buf * m_powersOflambda[2]; } return result; } /** * The kernel helper function, called K' in [1] and [2]. * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @return a partial result for K */ protected double kernelHelper(int n, char[] s, int endIndexS, char[] t, int endIndexT) { // recursion ends if the current subsequence has maximal length, // which is the case here if (n <= 0) { return getReturnValue(n); } // recursion ends, too, if the current subsequence is shorter than // maximal length, but there is no chance that it will reach maximal length. // in this case, normally 0 is returned, but the EXPERIMENTAL // minSubsequenceLength feature allows shorter subsequence matches // also to contribute if (Math.min(endIndexS + 1, endIndexT + 1) < n) { return getReturnValue(n); } int adr = 0; if (m_useRecursionCache) { adr = m_multX * n + m_multY * endIndexS + m_multZ * endIndexT; if (cachekhK[adr % maxCache] == adr + 1) { return cachekh[adr % maxCache]; } } // the tail-recursive function defined in [1] is turned into a // loop here, preventing stack overflows. // loop over s, nearly from the start (skip the first n-1 characters) // and only up until endIndexS, and recursively apply K''. Thus, every // character between n-1 and endIndexS in s is counted once as // being part of the subsequence match and once just as a gap. // In both cases lambda is multiplied with the result. double result = 0; /* * for (int iS = n-1; iS <= endIndexS;iS++) { result *= m_lambda; result += * kernelHelper2(n,s,iS, t, endIndexT); } if (m_useRecursionCache) { * cachekhK[adr % maxCache]=adr+1; cachekh[adr % maxCache]=result; } return * result; */ /* ^^^ again, above code segment does not store some intermediate results... */ result = m_lambda * kernelHelper(n, s, endIndexS - 1, t, endIndexT) + kernelHelper2(n, s, endIndexS, t, endIndexT); if (m_useRecursionCache) { cachekhK[adr % maxCache] = adr + 1; cachekh[adr % maxCache] = result; } return result; } /** * helper function for the evaluation of the kernel K'' see section 'Efficient * Computation of SSK' in [1] * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @return a partial result for K' */ protected double kernelHelper2(int n, char[] s, int endIndexS, char[] t, int endIndexT) { // recursion ends if one of the indices in both strings is <0 if (endIndexS < 0 || endIndexT < 0) { return getReturnValue(n); } int adr = 0; if (m_useRecursionCache) { adr = m_multX * n + m_multY * endIndexS + m_multZ * endIndexT; if (cachekh2K[adr % maxCache] == adr + 1) { return cachekh2[adr % maxCache]; } } // spot the last character in s, we'll need it char x = s[endIndexS]; // recurse if the last characters of s and t, x (and y) are identical. // which is an easy case: just add up two recursions, // 1. one that counts x and y as a part of the subsequence match // -> n, endIndexS and endIndexT are decremented for next recursion level // -> lambda^2 is multiplied with the result to account for the length // of 2 that has been added to the length of the subsequence match // by accepting x and y. // 2. one that counts y as a gap in the match // -> only endIndexT is decremented for next recursion level // -> lambda is multiplied with the result to account for the length // of 1 that has been added to the length of the subsequence match // by omitting y. if (x == t[endIndexT]) { double ret = m_lambda * (kernelHelper2(n, s, endIndexS, t, endIndexT - 1) + m_lambda * kernelHelper(n - 1, s, endIndexS - 1, t, endIndexT - 1)); if (m_useRecursionCache) { cachekh2K[adr % maxCache] = adr + 1; cachekh2[adr % maxCache] = ret; } return ret; } else { double ret = m_lambda * kernelHelper2(n, s, endIndexS, t, endIndexT - 1); if (m_useRecursionCache) { cachekh2K[adr % maxCache] = adr + 1; cachekh2[adr % maxCache] = ret; } return ret; } // look for x in t from back to front. // this is actually an optimization from [1] that spares unneccessary // recursions iff // x is actually found in t, but not at the last position. /* * int i; int threshold = n>0?n-1:0; for (i=endIndexT-1; i >= threshold;i--) * { if (x == t[i]) { double ret=getPowerOfLambda(endIndexT-i) * * kernelHelper2(n,s,endIndexS, t, i); if (m_useRecursionCache) { * cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return * ret; } } */ // end the recursion if x is not found in t. /* * double ret = getReturnValue(n); if (m_useRecursionCache) { cachekh2K[adr * % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret; */ } /** * the kernel function K explained in [1] using lambda pruning, explained in * [2]. An additional parameter is introduced, which denotes the maximum * length of a subsequence match. This allows for the control of how relaxed * the subsequence matches are. <br> * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @param remainingMatchLength actually the initial value for * maxLambdaExponent * @return a double indicating the distance or similarity between s and t, * according to and depending on the initial value for n. */ protected double kernelLP(int n, char[] s, int endIndexS, char[] t, int endIndexT, int remainingMatchLength) { // see code docs in kernel() if (Math.min(endIndexS + 1, endIndexT + 1) < n) { return getReturnValue(n); } // lambda pruning check // stops recursion if the match is so long that the resulting // power of lambda is smaller than minLambda // if lambda pruning is not used, the remainingMatchLength is < 0 // and this check never stops the recursion if (remainingMatchLength == 0) { return getReturnValue(n); } double result = 0; // see code docs in kernel() for (int iS = endIndexS; iS > n - 2; iS--) { double buf = 0; char x = s[iS]; for (int j = 0; j <= endIndexT; j++) { if (t[j] == x) { // both t[j] and x are considered part of the subsequence match, hence // subtract 2 from the remainingMatchLength buf += kernelHelperLP(n - 1, s, iS - 1, t, j - 1, remainingMatchLength - 2); } } result += buf * m_powersOflambda[2]; } return result; } /** * helper function for the evaluation of the kernel (K'n) using lambda pruning * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @param remainingMatchLength the number of characters that may still be used * for matching (i.e. gaps + matches in both strings) * @return a partial result for K */ protected double kernelHelperLP(int n, char[] s, int endIndexS, char[] t, int endIndexT, int remainingMatchLength) { // see code docs in kernelHelper() if (n == 0) { return getReturnValue(n); } // see code docs in kernelHelper() if (Math.min(endIndexS + 1, endIndexT + 1) < n) { ; return getReturnValue(n); } // lambda pruning check // stops recursion if the match is so long that the resulting // power of lambda is smaller than minLambda // if lambda pruning is not used, the remainingMatchLength is < 0 // and this check never stops the recursion if (remainingMatchLength < 2 * n) { return getReturnValue(n); } int adr = 0; if (m_useRecursionCache) { adr = m_multX * n + m_multY * endIndexS + m_multZ * endIndexT + m_multZZ * remainingMatchLength; if (cachekh2K[adr % maxCache] == adr + 1) { return cachekh2[adr % maxCache]; } } int rml = 0; // counts the remaining match length double result = 0; // see code docs in kernelHelper() // difference to implementation in kernelHelper: // *)choose different starting point, which is found counting // the maximal remaining match length from endIndexS. // *)keep track of the remaining match length, rml, which is // incremented each loop for (int iS = (endIndexS - remainingMatchLength); iS <= endIndexS; iS++) { result *= m_lambda; result += kernelHelper2LP(n, s, iS, t, endIndexT, rml++); } if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekhK[adr % maxCache] = adr + 1; cachekh[adr % maxCache] = result; } return result; } /** * helper function for the evaluation of the kernel (K''n) using lambda * pruning * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @param remainingMatchLength the number of characters that may still be used * for matching (i.e. gaps + matches in both strings) * @return a partial result for K' */ protected double kernelHelper2LP(int n, char[] s, int endIndexS, char[] t, int endIndexT, int remainingMatchLength) { // lambda pruning check // stops recursion if the match is so long that the resulting // power of lambda is smaller than minLambda // if lambda pruning is not used, the remainingMatchLength is < 0 // and this check never stops the recursion // if (remainingMatchLength <= 0) return 0; if (remainingMatchLength < 2 * n) { return getReturnValue(n); } // see code docs in kernelHelper2() if (endIndexS < 0 || endIndexT < 0) { return getReturnValue(n); } int adr = 0; if (m_useRecursionCache) { adr = m_multX * n + m_multY * endIndexS + m_multZ * endIndexT + m_multZZ * remainingMatchLength; if (cachekh2K[adr % maxCache] == adr + 1) { return cachekh2[adr % maxCache]; } } char x = s[endIndexS]; if (x == t[endIndexT]) { double ret = m_lambda * (kernelHelper2LP(n, s, endIndexS, t, endIndexT - 1, remainingMatchLength - 1) + m_lambda * kernelHelperLP(n - 1, s, endIndexS - 1, t, endIndexT - 1, remainingMatchLength - 2)); if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekh2K[adr % maxCache] = adr + 1; cachekh2[adr % maxCache] = ret; } return ret; } // see code docs in kernelHelper() // differences to implementation in kernelHelper(): // *) choose a different ending point for the loop // based on the remaining match length int i; int minIndex = endIndexT - remainingMatchLength; if (minIndex < 0) { minIndex = 0; } for (i = endIndexT; i >= minIndex; i--) { if (x == t[i]) { int skipLength = endIndexT - i; double ret = getPowerOfLambda(skipLength) * kernelHelper2LP(n, s, endIndexS, t, i, remainingMatchLength - skipLength); if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekh2K[adr % maxCache] = adr + 1; cachekh2[adr % maxCache] = ret; } return ret; } } double ret = getReturnValue(n); if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekh2K[adr % maxCache] = adr + 1; cachekh2[adr % maxCache] = ret; } return ret; } /** * precalculates small powers of lambda to speed up the kernel evaluation * * @return the powers */ private double[] calculatePowersOfLambda() { double[] powers = new double[MAX_POWER_OF_LAMBDA + 1]; powers[0] = 1.0; double val = 1.0; for (int i = 1; i <= MAX_POWER_OF_LAMBDA; i++) { val *= m_lambda; powers[i] = val; } return powers; } /** * retrieves a power of lambda from the lambda cache or calculates it directly * * @param exponent the exponent to calculate * @return the exponent-th power of lambda */ private double getPowerOfLambda(int exponent) { if (exponent > MAX_POWER_OF_LAMBDA) { return Math.pow(m_lambda, exponent); } if (exponent < 0) { throw new IllegalArgumentException( "only positive powers of lambda may be computed"); } return m_powersOflambda[exponent]; } /** * initializes variables etc. * * @param data the data to use */ @Override protected void initVars(Instances data) { super.initVars(data); m_kernelEvals = 0; // take the first string attribute m_strAttr = -1; for (int i = 0; i < data.numAttributes(); i++) { if (i == data.classIndex()) { continue; } if (data.attribute(i).type() == Attribute.STRING) { m_strAttr = i; break; } } m_numInsts = m_data.numInstances(); m_storage = new double[m_cacheSize]; m_keys = new long[m_cacheSize]; m_powersOflambda = calculatePowersOfLambda(); } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.STRING_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * builds the kernel with the given data. * * @param data the data to base the kernel on * @throws Exception if something goes wrong, e.g., the data does not consist * of one string attribute and the class */ @Override public void buildKernel(Instances data) throws Exception { super.buildKernel(data); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/IBk.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IBk.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.lazy; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableClassifier; import weka.classifiers.rules.ZeroR; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.neighboursearch.LinearNNSearch; import weka.core.neighboursearch.NearestNeighbourSearch; /** * <!-- globalinfo-start --> K-nearest neighbours classifier. Can select appropriate value of K based on cross-validation. Can also do distance weighting.<br/> * <br/> * For more information, see<br/> * <br/> * D. Aha, D. Kibler (1991). Instance-based learning algorithms. Machine Learning. 6:37-66. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Aha1991, * author = {D. Aha and D. Kibler}, * journal = {Machine Learning}, * pages = {37-66}, * title = {Instance-based learning algorithms}, * volume = {6}, * year = {1991} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I * Weight neighbours by the inverse of their distance * (use when k &gt; 1) * </pre> * * <pre> * -F * Weight neighbours by 1 - their distance * (use when k &gt; 1) * </pre> * * <pre> * -K &lt;number of neighbors&gt; * Number of nearest neighbours (k) used in classification. * (Default = 1) * </pre> * * <pre> * -E * Minimise mean squared error rather than mean absolute * error when using -X option with numeric prediction. * </pre> * * <pre> * -W &lt;window size&gt; * Maximum number of training instances maintained. * Training instances are dropped FIFO. (Default = no window) * </pre> * * <pre> * -X * Select the number of nearest neighbours between 1 * and the k value specified using hold-one-out evaluation * on the training data (use when k &gt; 1) * </pre> * * <pre> * -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * * <!-- options-end --> * * @author Stuart Inglis (singlis@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class IBk extends AbstractClassifier implements OptionHandler, UpdateableClassifier, WeightedInstancesHandler, TechnicalInformationHandler, AdditionalMeasureProducer { /** for serialization. */ static final long serialVersionUID = -3080186098777067172L; /** The training instances used for classification. */ protected Instances m_Train; /** The number of class values (or 1 if predicting numeric). */ protected int m_NumClasses; /** The class attribute type. */ protected int m_ClassType; /** The number of neighbours to use for classification (currently). */ protected int m_kNN; /** * The value of kNN provided by the user. This may differ from m_kNN if cross-validation is being used. */ protected int m_kNNUpper; /** * Whether the value of k selected by cross validation has been invalidated by a change in the training instances. */ protected boolean m_kNNValid; /** * The maximum number of training instances allowed. When this limit is reached, old training instances are removed, so the training data is "windowed". Set to 0 for unlimited numbers of instances. */ protected int m_WindowSize; /** Whether the neighbours should be distance-weighted. */ protected int m_DistanceWeighting; /** Whether to select k by cross validation. */ protected boolean m_CrossValidate; /** * Whether to minimise mean squared error rather than mean absolute error when cross-validating on numeric prediction tasks. */ protected boolean m_MeanSquared; /** Default ZeroR model to use when there are no training instances */ protected ZeroR m_defaultModel; /** no weighting. */ public static final int WEIGHT_NONE = 1; /** weight by 1/distance. */ public static final int WEIGHT_INVERSE = 2; /** weight by 1-distance. */ public static final int WEIGHT_SIMILARITY = 4; /** possible instance weighting methods. */ public static final Tag[] TAGS_WEIGHTING = { new Tag(WEIGHT_NONE, "No distance weighting"), new Tag(WEIGHT_INVERSE, "Weight by 1/distance"), new Tag(WEIGHT_SIMILARITY, "Weight by 1-distance") }; /** for nearest-neighbor search. */ protected NearestNeighbourSearch m_NNSearch = new LinearNNSearch(); /** The number of attributes the contribute to a prediction. */ protected double m_NumAttributesUsed; /** * IBk classifier. Simple instance-based learner that uses the class of the nearest k training instances for the class of the test instances. * * @param k * the number of nearest neighbors to use for prediction */ public IBk(final int k) { this.init(); this.setKNN(k); } /** * IB1 classifer. Instance-based learner. Predicts the class of the single nearest training instance for each test instance. */ public IBk() { this.init(); } /** * Returns a string describing classifier. * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "K-nearest neighbours classifier. Can " + "select appropriate value of K based on cross-validation. Can also do " + "distance weighting.\n\n" + "For more information, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "D. Aha and D. Kibler"); result.setValue(Field.YEAR, "1991"); result.setValue(Field.TITLE, "Instance-based learning algorithms"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "6"); result.setValue(Field.PAGES, "37-66"); return result; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String KNNTipText() { return "The number of neighbours to use."; } /** * Set the number of neighbours the learner is to use. * * @param k * the number of neighbours. */ public void setKNN(final int k) { this.m_kNN = k; this.m_kNNUpper = k; this.m_kNNValid = false; } /** * Gets the number of neighbours the learner will use. * * @return the number of neighbours. */ public int getKNN() { return this.m_kNN; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String windowSizeTipText() { return "Gets the maximum number of instances allowed in the training " + "pool. The addition of new instances above this value will result " + "in old instances being removed. A value of 0 signifies no limit " + "to the number of training instances."; } /** * Gets the maximum number of instances allowed in the training pool. The addition of new instances above this value will result in old instances being removed. A value of 0 signifies no limit to the number of training instances. * * @return Value of WindowSize. */ public int getWindowSize() { return this.m_WindowSize; } /** * Sets the maximum number of instances allowed in the training pool. The addition of new instances above this value will result in old instances being removed. A value of 0 signifies no limit to the number of training instances. * * @param newWindowSize * Value to assign to WindowSize. */ public void setWindowSize(final int newWindowSize) { this.m_WindowSize = newWindowSize; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String distanceWeightingTipText() { return "Gets the distance weighting method used."; } /** * Gets the distance weighting method used. Will be one of WEIGHT_NONE, WEIGHT_INVERSE, or WEIGHT_SIMILARITY * * @return the distance weighting method used. */ public SelectedTag getDistanceWeighting() { return new SelectedTag(this.m_DistanceWeighting, TAGS_WEIGHTING); } /** * Sets the distance weighting method used. Values other than WEIGHT_NONE, WEIGHT_INVERSE, or WEIGHT_SIMILARITY will be ignored. * * @param newMethod * the distance weighting method to use */ public void setDistanceWeighting(final SelectedTag newMethod) { if (newMethod.getTags() == TAGS_WEIGHTING) { this.m_DistanceWeighting = newMethod.getSelectedTag().getID(); } } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String meanSquaredTipText() { return "Whether the mean squared error is used rather than mean " + "absolute error when doing cross-validation for regression problems."; } /** * Gets whether the mean squared error is used rather than mean absolute error when doing cross-validation. * * @return true if so. */ public boolean getMeanSquared() { return this.m_MeanSquared; } /** * Sets whether the mean squared error is used rather than mean absolute error when doing cross-validation. * * @param newMeanSquared * true if so. */ public void setMeanSquared(final boolean newMeanSquared) { this.m_MeanSquared = newMeanSquared; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String crossValidateTipText() { return "Whether hold-one-out cross-validation will be used to " + "select the best k value between 1 and the value specified as " + "the KNN parameter."; } /** * Gets whether hold-one-out cross-validation will be used to select the best k value. * * @return true if cross-validation will be used. */ public boolean getCrossValidate() { return this.m_CrossValidate; } /** * Sets whether hold-one-out cross-validation will be used to select the best k value. * * @param newCrossValidate * true if cross-validation should be used. */ public void setCrossValidate(final boolean newCrossValidate) { this.m_CrossValidate = newCrossValidate; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String nearestNeighbourSearchAlgorithmTipText() { return "The nearest neighbour search algorithm to use " + "(Default: weka.core.neighboursearch.LinearNNSearch)."; } /** * Returns the current nearestNeighbourSearch algorithm in use. * * @return the NearestNeighbourSearch algorithm currently in use. */ public NearestNeighbourSearch getNearestNeighbourSearchAlgorithm() { return this.m_NNSearch; } /** * Sets the nearestNeighbourSearch algorithm to be used for finding nearest neighbour(s). * * @param nearestNeighbourSearchAlgorithm * - The NearestNeighbourSearch class. */ public void setNearestNeighbourSearchAlgorithm(final NearestNeighbourSearch nearestNeighbourSearchAlgorithm) { this.m_NNSearch = nearestNeighbourSearchAlgorithm; } /** * Get the number of training instances the classifier is currently using. * * @return the number of training instances the classifier is currently using */ public int getNumTraining() { return this.m_Train.numInstances(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); this.m_NumClasses = instances.numClasses(); this.m_ClassType = instances.classAttribute().type(); this.m_Train = new Instances(instances, 0, instances.numInstances()); // Throw away initial instances until within the specified window size if ((this.m_WindowSize > 0) && (instances.numInstances() > this.m_WindowSize)) { this.m_Train = new Instances(this.m_Train, this.m_Train.numInstances() - this.m_WindowSize, this.m_WindowSize); } this.m_NumAttributesUsed = 0.0; for (int i = 0; i < this.m_Train.numAttributes(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if ((i != this.m_Train.classIndex()) && (this.m_Train.attribute(i).isNominal() || this.m_Train.attribute(i).isNumeric())) { this.m_NumAttributesUsed += 1.0; } } this.m_NNSearch.setInstances(this.m_Train); // Invalidate any currently cross-validation selected k this.m_kNNValid = false; this.m_defaultModel = new ZeroR(); this.m_defaultModel.buildClassifier(instances); } /** * Adds the supplied instance to the training set. * * @param instance * the instance to add * @throws Exception * if instance could not be incorporated successfully */ @Override public void updateClassifier(final Instance instance) throws Exception { if (this.m_Train.equalHeaders(instance.dataset()) == false) { throw new Exception("Incompatible instance types\n" + this.m_Train.equalHeadersMsg(instance.dataset())); } if (instance.classIsMissing()) { return; } this.m_Train.add(instance); this.m_NNSearch.update(instance); this.m_kNNValid = false; if ((this.m_WindowSize > 0) && (this.m_Train.numInstances() > this.m_WindowSize)) { boolean deletedInstance = false; while (this.m_Train.numInstances() > this.m_WindowSize) { this.m_Train.delete(0); deletedInstance = true; } // rebuild datastructure KDTree currently can't delete if (deletedInstance == true) { this.m_NNSearch.setInstances(this.m_Train); } } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if an error occurred during the prediction */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { if (this.m_Train.numInstances() == 0) { // throw new Exception("No training instances!"); return this.m_defaultModel.distributionForInstance(instance); } if ((this.m_WindowSize > 0) && (this.m_Train.numInstances() > this.m_WindowSize)) { this.m_kNNValid = false; boolean deletedInstance = false; while (this.m_Train.numInstances() > this.m_WindowSize) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_Train.delete(0); } // rebuild datastructure KDTree currently can't delete if (deletedInstance == true) { this.m_NNSearch.setInstances(this.m_Train); } } // Select k by cross validation if (!this.m_kNNValid && (this.m_CrossValidate) && (this.m_kNNUpper >= 1)) { this.crossValidate(); } this.m_NNSearch.addInstanceInfo(instance); Instances neighbours = this.m_NNSearch.kNearestNeighbours(instance, this.m_kNN); double[] distances = this.m_NNSearch.getDistances(); double[] distribution = this.makeDistribution(neighbours, distances); return distribution; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(7); newVector.addElement(new Option("\tWeight neighbours by the inverse of their distance\n" + "\t(use when k > 1)", "I", 0, "-I")); newVector.addElement(new Option("\tWeight neighbours by 1 - their distance\n" + "\t(use when k > 1)", "F", 0, "-F")); newVector.addElement(new Option("\tNumber of nearest neighbours (k) used in classification.\n" + "\t(Default = 1)", "K", 1, "-K <number of neighbors>")); newVector.addElement(new Option("\tMinimise mean squared error rather than mean absolute\n" + "\terror when using -X option with numeric prediction.", "E", 0, "-E")); newVector.addElement(new Option("\tMaximum number of training instances maintained.\n" + "\tTraining instances are dropped FIFO. (Default = no window)", "W", 1, "-W <window size>")); newVector.addElement(new Option("\tSelect the number of nearest neighbours between 1\n" + "\tand the k value specified using hold-one-out evaluation\n" + "\ton the training data (use when k > 1)", "X", 0, "-X")); newVector.addElement(new Option("\tThe nearest neighbour search algorithm to use " + "(default: weka.core.neighboursearch.LinearNNSearch).\n", "A", 0, "-A")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I * Weight neighbours by the inverse of their distance * (use when k &gt; 1) * </pre> * * <pre> * -F * Weight neighbours by 1 - their distance * (use when k &gt; 1) * </pre> * * <pre> * -K &lt;number of neighbors&gt; * Number of nearest neighbours (k) used in classification. * (Default = 1) * </pre> * * <pre> * -E * Minimise mean squared error rather than mean absolute * error when using -X option with numeric prediction. * </pre> * * <pre> * -W &lt;window size&gt; * Maximum number of training instances maintained. * Training instances are dropped FIFO. (Default = no window) * </pre> * * <pre> * -X * Select the number of nearest neighbours between 1 * and the k value specified using hold-one-out evaluation * on the training data (use when k &gt; 1) * </pre> * * <pre> * -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String knnString = Utils.getOption('K', options); if (knnString.length() != 0) { this.setKNN(Integer.parseInt(knnString)); } else { this.setKNN(1); } String windowString = Utils.getOption('W', options); if (windowString.length() != 0) { this.setWindowSize(Integer.parseInt(windowString)); } else { this.setWindowSize(0); } if (Utils.getFlag('I', options)) { this.setDistanceWeighting(new SelectedTag(WEIGHT_INVERSE, TAGS_WEIGHTING)); } else if (Utils.getFlag('F', options)) { this.setDistanceWeighting(new SelectedTag(WEIGHT_SIMILARITY, TAGS_WEIGHTING)); } else { this.setDistanceWeighting(new SelectedTag(WEIGHT_NONE, TAGS_WEIGHTING)); } this.setCrossValidate(Utils.getFlag('X', options)); this.setMeanSquared(Utils.getFlag('E', options)); String nnSearchClass = Utils.getOption('A', options); if (nnSearchClass.length() != 0) { String nnSearchClassSpec[] = Utils.splitOptions(nnSearchClass); if (nnSearchClassSpec.length == 0) { throw new Exception("Invalid NearestNeighbourSearch algorithm " + "specification string."); } String className = nnSearchClassSpec[0]; nnSearchClassSpec[0] = ""; this.setNearestNeighbourSearchAlgorithm((NearestNeighbourSearch) Utils.forName(NearestNeighbourSearch.class, className, nnSearchClassSpec)); } else { this.setNearestNeighbourSearchAlgorithm(new LinearNNSearch()); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of IBk. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-K"); options.add("" + this.getKNN()); options.add("-W"); options.add("" + this.m_WindowSize); if (this.getCrossValidate()) { options.add("-X"); } if (this.getMeanSquared()) { options.add("-E"); } if (this.m_DistanceWeighting == WEIGHT_INVERSE) { options.add("-I"); } else if (this.m_DistanceWeighting == WEIGHT_SIMILARITY) { options.add("-F"); } options.add("-A"); options.add(this.m_NNSearch.getClass().getName() + " " + Utils.joinOptions(this.m_NNSearch.getOptions())); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns an enumeration of the additional measure names produced by the neighbour search algorithm, plus the chosen K in case cross-validation is enabled. * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { if (this.m_CrossValidate) { Enumeration<String> enm = this.m_NNSearch.enumerateMeasures(); Vector<String> measures = new Vector<>(); while (enm.hasMoreElements()) { measures.add(enm.nextElement()); } measures.add("measureKNN"); return measures.elements(); } else { return this.m_NNSearch.enumerateMeasures(); } } /** * Returns the value of the named measure from the neighbour search algorithm, plus the chosen K in case cross-validation is enabled. * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.equals("measureKNN")) { return this.m_kNN; } else { return this.m_NNSearch.getMeasure(additionalMeasureName); } } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ @Override public String toString() { if (this.m_Train == null) { return "IBk: No model built yet."; } if (this.m_Train.numInstances() == 0) { return "Warning: no training instances - ZeroR model used."; } if (!this.m_kNNValid && this.m_CrossValidate) { try { this.crossValidate(); } catch (InterruptedException e) { e.printStackTrace(); } } String result = "IB1 instance-based classifier\n" + "using " + this.m_kNN; switch (this.m_DistanceWeighting) { case WEIGHT_INVERSE: result += " inverse-distance-weighted"; break; case WEIGHT_SIMILARITY: result += " similarity-weighted"; break; } result += " nearest neighbour(s) for classification\n"; if (this.m_WindowSize != 0) { result += "using a maximum of " + this.m_WindowSize + " (windowed) training instances\n"; } return result; } /** * Initialise scheme variables. */ protected void init() { this.setKNN(1); this.m_WindowSize = 0; this.m_DistanceWeighting = WEIGHT_NONE; this.m_CrossValidate = false; this.m_MeanSquared = false; } /** * Turn the list of nearest neighbors into a probability distribution. * * @param neighbours * the list of nearest neighboring instances * @param distances * the distances of the neighbors * @return the probability distribution * @throws Exception * if computation goes wrong or has no class attribute */ protected double[] makeDistribution(final Instances neighbours, final double[] distances) throws Exception { double total = 0, weight; double[] distribution = new double[this.m_NumClasses]; // Set up a correction to the estimator if (this.m_ClassType == Attribute.NOMINAL) { for (int i = 0; i < this.m_NumClasses; i++) { distribution[i] = 1.0 / Math.max(1, this.m_Train.numInstances()); } total = (double) this.m_NumClasses / Math.max(1, this.m_Train.numInstances()); } for (int i = 0; i < neighbours.numInstances(); i++) { // Collect class counts Instance current = neighbours.instance(i); distances[i] = distances[i] * distances[i]; distances[i] = Math.sqrt(distances[i] / this.m_NumAttributesUsed); switch (this.m_DistanceWeighting) { case WEIGHT_INVERSE: weight = 1.0 / (distances[i] + 0.001); // to avoid div by zero break; case WEIGHT_SIMILARITY: weight = 1.0 - distances[i]; break; default: // WEIGHT_NONE: weight = 1.0; break; } weight *= current.weight(); try { switch (this.m_ClassType) { case Attribute.NOMINAL: distribution[(int) current.classValue()] += weight; break; case Attribute.NUMERIC: distribution[0] += current.classValue() * weight; break; } } catch (Exception ex) { throw new Error("Data has no class attribute!"); } total += weight; } // Normalise distribution if (total > 0) { Utils.normalize(distribution, total); } return distribution; } /** * Select the best value for k by hold-one-out cross-validation. If the class attribute is nominal, classification error is minimised. If the class attribute is numeric, mean absolute error is minimised * * @throws InterruptedException */ protected void crossValidate() throws InterruptedException { try { if (this.m_NNSearch instanceof weka.core.neighboursearch.CoverTree) { throw new Exception("CoverTree doesn't support hold-one-out " + "cross-validation. Use some other NN " + "method."); } double[] performanceStats = new double[this.m_kNNUpper]; double[] performanceStatsSq = new double[this.m_kNNUpper]; for (int i = 0; i < this.m_kNNUpper; i++) { performanceStats[i] = 0; performanceStatsSq[i] = 0; } this.m_kNN = this.m_kNNUpper; Instance instance; Instances neighbours; double[] origDistances, convertedDistances; for (int i = 0; i < this.m_Train.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.m_Debug && (i % 50 == 0)) { System.err.print("Cross validating " + i + "/" + this.m_Train.numInstances() + "\r"); } instance = this.m_Train.instance(i); neighbours = this.m_NNSearch.kNearestNeighbours(instance, this.m_kNN); origDistances = this.m_NNSearch.getDistances(); for (int j = this.m_kNNUpper - 1; j >= 0; j--) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // Update the performance stats convertedDistances = new double[origDistances.length]; System.arraycopy(origDistances, 0, convertedDistances, 0, origDistances.length); double[] distribution = this.makeDistribution(neighbours, convertedDistances); double thisPrediction = Utils.maxIndex(distribution); if (this.m_Train.classAttribute().isNumeric()) { thisPrediction = distribution[0]; double err = thisPrediction - instance.classValue(); performanceStatsSq[j] += err * err; // Squared error performanceStats[j] += Math.abs(err); // Absolute error } else { if (thisPrediction != instance.classValue()) { performanceStats[j]++; // Classification error } } if (j >= 1) { neighbours = this.pruneToK(neighbours, convertedDistances, j); } } } // Display the results of the cross-validation for (int i = 0; i < this.m_kNNUpper; i++) { if (this.m_Debug) { System.err.print("Hold-one-out performance of " + (i + 1) + " neighbors "); } if (this.m_Train.classAttribute().isNumeric()) { if (this.m_Debug) { if (this.m_MeanSquared) { System.err.println("(RMSE) = " + Math.sqrt(performanceStatsSq[i] / this.m_Train.numInstances())); } else { System.err.println("(MAE) = " + performanceStats[i] / this.m_Train.numInstances()); } } } else { if (this.m_Debug) { System.err.println("(%ERR) = " + 100.0 * performanceStats[i] / this.m_Train.numInstances()); } } } // Check through the performance stats and select the best // k value (or the lowest k if more than one best) double[] searchStats = performanceStats; if (this.m_Train.classAttribute().isNumeric() && this.m_MeanSquared) { searchStats = performanceStatsSq; } double bestPerformance = Double.NaN; int bestK = 1; for (int i = 0; i < this.m_kNNUpper; i++) { if (Double.isNaN(bestPerformance) || (bestPerformance > searchStats[i])) { bestPerformance = searchStats[i]; bestK = i + 1; } } this.m_kNN = bestK; if (this.m_Debug) { System.err.println("Selected k = " + bestK); } this.m_kNNValid = true; } catch (InterruptedException e) { throw e; } catch (Exception ex) { throw new Error("Couldn't optimize by cross-validation: " + ex.getMessage()); } } /** * Prunes the list to contain the k nearest neighbors. If there are multiple neighbors at the k'th distance, all will be kept. * * @param neighbours * the neighbour instances. * @param distances * the distances of the neighbours from target instance. * @param k * the number of neighbors to keep. * @return the pruned neighbours. */ public Instances pruneToK(Instances neighbours, final double[] distances, int k) { if (neighbours == null || distances == null || neighbours.numInstances() == 0) { return null; } if (k < 1) { k = 1; } int currentK = 0; double currentDist; for (int i = 0; i < neighbours.numInstances(); i++) { currentK++; currentDist = distances[i]; if (currentK > k && currentDist != distances[i - 1]) { currentK--; neighbours = new Instances(neighbours, 0, currentK); break; } } return neighbours; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * should contain command line options (see setOptions) */ public static void main(final String[] argv) { runClassifier(new IBk(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/KStar.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * KStar.java * Copyright (C) 1995-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.lazy; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableClassifier; import weka.classifiers.lazy.kstar.KStarCache; import weka.classifiers.lazy.kstar.KStarConstants; import weka.classifiers.lazy.kstar.KStarNominalAttribute; import weka.classifiers.lazy.kstar.KStarNumericAttribute; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> K* is an instance-based classifier, that is the class of a test instance is based upon the class of those training instances similar to it, as determined by some similarity function. It differs from other * instance-based learners in that it uses an entropy-based distance function.<br/> * <br/> * For more information on K*, see<br/> * <br/> * John G. Cleary, Leonard E. Trigg: K*: An Instance-based Learner Using an Entropic Distance Measure. In: 12th International Conference on Machine Learning, 108-114, 1995. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Cleary1995, * author = {John G. Cleary and Leonard E. Trigg}, * booktitle = {12th International Conference on Machine Learning}, * pages = {108-114}, * title = {K*: An Instance-based Learner Using an Entropic Distance Measure}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B &lt;num&gt; * Manual blend setting (default 20%) * </pre> * * <pre> * -E * Enable entropic auto-blend setting (symbolic class only) * </pre> * * <pre> * -M &lt;char&gt; * Specify the missing value treatment mode (default a) * Valid options are: a(verage), d(elete), m(axdiff), n(ormal) * </pre> * * <!-- options-end --> * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) - Java port * @version $Revision$ */ public class KStar extends AbstractClassifier implements KStarConstants, UpdateableClassifier, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 332458330800479083L; /** The training instances used for classification. */ protected Instances m_Train; /** The number of instances in the dataset */ protected int m_NumInstances; /** The number of class values */ protected int m_NumClasses; /** The number of attributes */ protected int m_NumAttributes; /** The class attribute type */ protected int m_ClassType; /** Table of random class value colomns */ protected int[][] m_RandClassCols; /** Flag turning on and off the computation of random class colomns */ protected int m_ComputeRandomCols = ON; /** Flag turning on and off the initialisation of config variables */ protected int m_InitFlag = ON; /** * A custom data structure for caching distinct attribute values and their scale factor or stop parameter. */ protected KStarCache[] m_Cache; /** missing value treatment */ protected int m_MissingMode = M_AVERAGE; /** 0 = use specified blend, 1 = entropic blend setting */ protected int m_BlendMethod = B_SPHERE; /** default sphere of influence blend setting */ protected int m_GlobalBlend = 20; /** Define possible missing value handling methods */ public static final Tag[] TAGS_MISSING = { new Tag(M_DELETE, "Ignore the instances with missing values"), new Tag(M_MAXDIFF, "Treat missing values as maximally different"), new Tag(M_NORMAL, "Normalize over the attributes"), new Tag(M_AVERAGE, "Average column entropy curves") }; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "K* is an instance-based classifier, that is the class of a test " + "instance is based upon the class of those training instances " + "similar to it, as determined by some similarity function. It differs " + "from other instance-based learners in that it uses an entropy-based " + "distance function.\n\n" + "For more information on K*, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "John G. Cleary and Leonard E. Trigg"); result.setValue(Field.TITLE, "K*: An Instance-based Learner Using an Entropic Distance Measure"); result.setValue(Field.BOOKTITLE, "12th International Conference on Machine Learning"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.PAGES, "108-114"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); this.m_Train = new Instances(instances, 0, instances.numInstances()); // initializes class attributes ** java-speaking! :-) ** this.init_m_Attributes(); } /** * Adds the supplied instance to the training set * * @param instance * the instance to add * @throws Exception * if instance could not be incorporated successfully */ @Override public void updateClassifier(final Instance instance) throws Exception { if (this.m_Train.equalHeaders(instance.dataset()) == false) { throw new Exception("Incompatible instance types\n" + this.m_Train.equalHeadersMsg(instance.dataset())); } if (instance.classIsMissing()) { return; } this.m_Train.add(instance); // update relevant attributes ... this.update_m_Attributes(); } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if an error occurred during the prediction */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { double transProb = 0.0, temp = 0.0; double[] classProbability = new double[this.m_NumClasses]; double[] predictedValue = new double[1]; // initialization ... for (int i = 0; i < classProbability.length; i++) { classProbability[i] = 0.0; } predictedValue[0] = 0.0; if (this.m_InitFlag == ON) { // need to compute them only once and will be used for all instances. // We are doing this because the evaluation module controls the calls. if (this.m_BlendMethod == B_ENTROPY) { this.generateRandomClassColomns(); } this.m_Cache = new KStarCache[this.m_NumAttributes]; for (int i = 0; i < this.m_NumAttributes; i++) { this.m_Cache[i] = new KStarCache(); } this.m_InitFlag = OFF; } // init done. Instance trainInstance; Enumeration<Instance> enu = this.m_Train.enumerateInstances(); while (enu.hasMoreElements()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } trainInstance = enu.nextElement(); transProb = this.instanceTransformationProbability(instance, trainInstance); switch (this.m_ClassType) { case Attribute.NOMINAL: classProbability[(int) trainInstance.classValue()] += transProb; break; case Attribute.NUMERIC: predictedValue[0] += transProb * trainInstance.classValue(); temp += transProb; break; } } if (this.m_ClassType == Attribute.NOMINAL) { double sum = Utils.sum(classProbability); if (sum <= 0.0) { for (int i = 0; i < classProbability.length; i++) { classProbability[i] = (double) 1 / (double) this.m_NumClasses; } } else { Utils.normalize(classProbability, sum); } return classProbability; } else { predictedValue[0] = (temp != 0) ? predictedValue[0] / temp : 0.0; return predictedValue; } } /** * Calculate the probability of the first instance transforming into the second instance: the probability is the product of the transformation probabilities of the attributes normilized over the number of instances used. * * @param first * the test instance * @param second * the train instance * @return transformation probability value * @throws Exception */ private double instanceTransformationProbability(final Instance first, final Instance second) throws Exception { double transProb = 1.0; int numMissAttr = 0; for (int i = 0; i < this.m_NumAttributes; i++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (i == this.m_Train.classIndex()) { continue; // ignore class attribute } if (first.isMissing(i)) { // test instance attribute value is missing numMissAttr++; continue; } transProb *= this.attrTransProb(first, second, i); // normilize for missing values if (numMissAttr != this.m_NumAttributes) { transProb = Math.pow(transProb, (double) this.m_NumAttributes / (this.m_NumAttributes - numMissAttr)); } else { // weird case! transProb = 0.0; } } // normilize for the train dataset return transProb / this.m_NumInstances; } /** * Calculates the transformation probability of the indexed test attribute to the indexed train attribute. * * @param first * the test instance. * @param second * the train instance. * @param col * the index of the attribute in the instance. * @return the value of the transformation probability. * @throws Exception */ private double attrTransProb(final Instance first, final Instance second, final int col) throws Exception { double transProb = 0.0; KStarNominalAttribute ksNominalAttr; KStarNumericAttribute ksNumericAttr; switch (this.m_Train.attribute(col).type()) { case Attribute.NOMINAL: ksNominalAttr = new KStarNominalAttribute(first, second, col, this.m_Train, this.m_RandClassCols, this.m_Cache[col]); ksNominalAttr.setOptions(this.m_MissingMode, this.m_BlendMethod, this.m_GlobalBlend); transProb = ksNominalAttr.transProb(); ksNominalAttr = null; break; case Attribute.NUMERIC: ksNumericAttr = new KStarNumericAttribute(first, second, col, this.m_Train, this.m_RandClassCols, this.m_Cache[col]); ksNumericAttr.setOptions(this.m_MissingMode, this.m_BlendMethod, this.m_GlobalBlend); transProb = ksNumericAttr.transProb(); ksNumericAttr = null; break; } return transProb; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String missingModeTipText() { return "Determines how missing attribute values are treated."; } /** * Gets the method to use for handling missing values. Will be one of M_NORMAL, M_AVERAGE, M_MAXDIFF or M_DELETE. * * @return the method used for handling missing values. */ public SelectedTag getMissingMode() { return new SelectedTag(this.m_MissingMode, TAGS_MISSING); } /** * Sets the method to use for handling missing values. Values other than M_NORMAL, M_AVERAGE, M_MAXDIFF and M_DELETE will be ignored. * * @param newMode * the method to use for handling missing values. */ public void setMissingMode(final SelectedTag newMode) { if (newMode.getTags() == TAGS_MISSING) { this.m_MissingMode = newMode.getSelectedTag().getID(); } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> optVector = new Vector<>(3); optVector.addElement(new Option("\tManual blend setting (default 20%)\n", "B", 1, "-B <num>")); optVector.addElement(new Option("\tEnable entropic auto-blend setting (symbolic class only)\n", "E", 0, "-E")); optVector.addElement(new Option("\tSpecify the missing value treatment mode (default a)\n" + "\tValid options are: a(verage), d(elete), m(axdiff), n(ormal)\n", "M", 1, "-M <char>")); optVector.addAll(Collections.list(super.listOptions())); return optVector.elements(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String globalBlendTipText() { return "The parameter for global blending. Values are restricted to [0,100]."; } /** * Set the global blend parameter * * @param b * the value for global blending */ public void setGlobalBlend(final int b) { this.m_GlobalBlend = b; if (this.m_GlobalBlend > 100) { this.m_GlobalBlend = 100; } if (this.m_GlobalBlend < 0) { this.m_GlobalBlend = 0; } } /** * Get the value of the global blend parameter * * @return the value of the global blend parameter */ public int getGlobalBlend() { return this.m_GlobalBlend; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String entropicAutoBlendTipText() { return "Whether entropy-based blending is to be used."; } /** * Set whether entropic blending is to be used. * * @param e * true if entropic blending is to be used */ public void setEntropicAutoBlend(final boolean e) { if (e) { this.m_BlendMethod = B_ENTROPY; } else { this.m_BlendMethod = B_SPHERE; } } /** * Get whether entropic blending being used * * @return true if entropic blending is used */ public boolean getEntropicAutoBlend() { if (this.m_BlendMethod == B_ENTROPY) { return true; } return false; } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B &lt;num&gt; * Manual blend setting (default 20%) * </pre> * * <pre> * -E * Enable entropic auto-blend setting (symbolic class only) * </pre> * * <pre> * -M &lt;char&gt; * Specify the missing value treatment mode (default a) * Valid options are: a(verage), d(elete), m(axdiff), n(ormal) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String blendStr = Utils.getOption('B', options); if (blendStr.length() != 0) { this.setGlobalBlend(Integer.parseInt(blendStr)); } this.setEntropicAutoBlend(Utils.getFlag('E', options)); String missingModeStr = Utils.getOption('M', options); if (missingModeStr.length() != 0) { switch (missingModeStr.charAt(0)) { case 'a': this.setMissingMode(new SelectedTag(M_AVERAGE, TAGS_MISSING)); break; case 'd': this.setMissingMode(new SelectedTag(M_DELETE, TAGS_MISSING)); break; case 'm': this.setMissingMode(new SelectedTag(M_MAXDIFF, TAGS_MISSING)); break; case 'n': this.setMissingMode(new SelectedTag(M_NORMAL, TAGS_MISSING)); break; default: this.setMissingMode(new SelectedTag(M_AVERAGE, TAGS_MISSING)); } } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of K*. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { // -B <num> -E -M <char> Vector<String> options = new Vector<>(); options.add("-B"); options.add("" + this.m_GlobalBlend); if (this.getEntropicAutoBlend()) { options.add("-E"); } options.add("-M"); if (this.m_MissingMode == M_AVERAGE) { options.add("" + "a"); } else if (this.m_MissingMode == M_DELETE) { options.add("" + "d"); } else if (this.m_MissingMode == M_MAXDIFF) { options.add("" + "m"); } else if (this.m_MissingMode == M_NORMAL) { options.add("" + "n"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ @Override public String toString() { StringBuffer st = new StringBuffer(); st.append("KStar Beta Verion (0.1b).\n" + "Copyright (c) 1995-97 by Len Trigg (trigg@cs.waikato.ac.nz).\n" + "Java port to Weka by Abdelaziz Mahoui " + "(am14@cs.waikato.ac.nz).\n\nKStar options : "); String[] ops = this.getOptions(); for (int i = 0; i < ops.length; i++) { st.append(ops[i] + ' '); } return st.toString(); } /** * Main method for testing this class. * * @param argv * should contain command line options (see setOptions) */ public static void main(final String[] argv) { runClassifier(new KStar(), argv); } /** * Initializes the m_Attributes of the class. */ private void init_m_Attributes() { try { this.m_NumInstances = this.m_Train.numInstances(); this.m_NumClasses = this.m_Train.numClasses(); this.m_NumAttributes = this.m_Train.numAttributes(); this.m_ClassType = this.m_Train.classAttribute().type(); this.m_InitFlag = ON; } catch (Exception e) { e.printStackTrace(); } } /** * Updates the m_attributes of the class. */ private void update_m_Attributes() { this.m_NumInstances = this.m_Train.numInstances(); this.m_InitFlag = ON; } /** * Note: for Nominal Class Only! Generates a set of random versions of the class colomn. */ private void generateRandomClassColomns() { Random generator = new Random(42); // Random generator = new Random(); this.m_RandClassCols = new int[NUM_RAND_COLS + 1][]; int[] classvals = this.classValues(); for (int i = 0; i < NUM_RAND_COLS; i++) { // generate a randomized version of the class colomn this.m_RandClassCols[i] = this.randomize(classvals, generator); } // original colomn is preserved in colomn NUM_RAND_COLS this.m_RandClassCols[NUM_RAND_COLS] = classvals; } /** * Note: for Nominal Class Only! Returns an array of the class values * * @return an array of class values */ private int[] classValues() { int[] classval = new int[this.m_NumInstances]; for (int i = 0; i < this.m_NumInstances; i++) { try { classval[i] = (int) this.m_Train.instance(i).classValue(); } catch (Exception ex) { ex.printStackTrace(); } } return classval; } /** * Returns a copy of the array with its elements randomly redistributed. * * @param array * the array to randomize. * @param generator * the random number generator to use * @return a copy of the array with its elements randomly redistributed. */ private int[] randomize(final int[] array, final Random generator) { int index; int temp; int[] newArray = new int[array.length]; System.arraycopy(array, 0, newArray, 0, array.length); for (int j = newArray.length - 1; j > 0; j--) { index = (int) (generator.nextDouble() * j); temp = newArray[j]; newArray[j] = newArray[index]; newArray[index] = temp; } return newArray; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class end
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/LWL.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LWL.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.lazy; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.neighboursearch.LinearNNSearch; import weka.core.neighboursearch.NearestNeighbourSearch; /** * <!-- globalinfo-start --> Locally weighted learning. Uses an instance-based algorithm to assign * instance weights which are then used by a specified WeightedInstancesHandler.<br/> * Can do classification (e.g. using naive Bayes) or regression (e.g. using linear regression).<br/> * <br/> * For more info, see<br/> * <br/> * Eibe Frank, Mark Hall, Bernhard Pfahringer: Locally Weighted Naive Bayes. In: 19th Conference in * Uncertainty in Artificial Intelligence, 249-256, 2003.<br/> * <br/> * C. Atkeson, A. Moore, S. Schaal (1996). Locally weighted learning. AI Review.. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Frank2003, * author = {Eibe Frank and Mark Hall and Bernhard Pfahringer}, * booktitle = {19th Conference in Uncertainty in Artificial Intelligence}, * pages = {249-256}, * publisher = {Morgan Kaufmann}, * title = {Locally Weighted Naive Bayes}, * year = {2003} * } * * &#64;article{Atkeson1996, * author = {C. Atkeson and A. Moore and S. Schaal}, * journal = {AI Review}, * title = {Locally weighted learning}, * year = {1996} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * * <pre> * -K &lt;number of neighbours&gt; * Set the number of neighbours used to set the kernel bandwidth. * (default all) * </pre> * * <pre> * -U &lt;number of weighting method&gt; * Set the weighting kernel shape to use. 0=Linear, 1=Epanechnikov, * 2=Tricube, 3=Inverse, 4=Gaussian. * (default 0 = Linear) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Ashraf M. Kibriya (amk14[at-the-rate]cs[dot]waikato[dot]ac[dot]nz) * @version $Revision$ */ public class LWL extends SingleClassifierEnhancer implements UpdateableClassifier, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization. */ static final long serialVersionUID = 1979797405383665815L; /** The training instances used for classification. */ protected Instances m_Train; /** The number of neighbours used to select the kernel bandwidth. */ protected int m_kNN = -1; /** The weighting kernel method currently selected. */ protected int m_WeightKernel = LINEAR; /** True if m_kNN should be set to all instances. */ protected boolean m_UseAllK = true; /** * The nearest neighbour search algorithm to use. (Default: * weka.core.neighboursearch.LinearNNSearch) */ protected NearestNeighbourSearch m_NNSearch = new LinearNNSearch(); /** The available kernel weighting methods. */ public static final int LINEAR = 0; public static final int EPANECHNIKOV = 1; public static final int TRICUBE = 2; public static final int INVERSE = 3; public static final int GAUSS = 4; public static final int CONSTANT = 5; /** a ZeroR model in case no model can be built from the data. */ protected Classifier m_ZeroR; /** * Returns a string describing classifier. * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Locally weighted learning. Uses an instance-based algorithm to " + "assign instance weights which are then used by a specified " + "WeightedInstancesHandler.\n" + "Can do classification (e.g. using naive Bayes) or regression " + "(e.g. using linear regression).\n\n" + "For more info, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Eibe Frank and Mark Hall and Bernhard Pfahringer"); result.setValue(Field.YEAR, "2003"); result.setValue(Field.TITLE, "Locally Weighted Naive Bayes"); result.setValue(Field.BOOKTITLE, "19th Conference in Uncertainty in Artificial Intelligence"); result.setValue(Field.PAGES, "249-256"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "C. Atkeson and A. Moore and S. Schaal"); additional.setValue(Field.YEAR, "1996"); additional.setValue(Field.TITLE, "Locally weighted learning"); additional.setValue(Field.JOURNAL, "AI Review"); return result; } /** * Constructor. */ public LWL() { this.m_Classifier = new weka.classifiers.trees.DecisionStump(); } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Returns an enumeration of the additional measure names produced by the neighbour search * algorithm. * * @return an enumeration of the measure names */ public Enumeration<String> enumerateMeasures() { return this.m_NNSearch.enumerateMeasures(); } /** * Returns the value of the named measure from the neighbour search algorithm. * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ public double getMeasure(final String additionalMeasureName) { return this.m_NNSearch.getMeasure(additionalMeasureName); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(3); newVector.addElement(new Option("\tThe nearest neighbour search " + "algorithm to use " + "(default: weka.core.neighboursearch.LinearNNSearch).\n", "A", 0, "-A")); newVector.addElement(new Option("\tSet the number of neighbours used to set" + " the kernel bandwidth.\n" + "\t(default all)", "K", 1, "-K <number of neighbours>")); newVector.addElement(new Option("\tSet the weighting kernel shape to use." + " 0=Linear, 1=Epanechnikov,\n" + "\t2=Tricube, 3=Inverse, 4=Gaussian.\n" + "\t(default 0 = Linear)", "U", 1, "-U <number of weighting method>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * * <pre> * -K &lt;number of neighbours&gt; * Set the number of neighbours used to set the kernel bandwidth. * (default all) * </pre> * * <pre> * -U &lt;number of weighting method&gt; * Set the weighting kernel shape to use. 0=Linear, 1=Epanechnikov, * 2=Tricube, 3=Inverse, 4=Gaussian. * (default 0 = Linear) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String knnString = Utils.getOption('K', options); if (knnString.length() != 0) { this.setKNN(Integer.parseInt(knnString)); } else { this.setKNN(-1); } String weightString = Utils.getOption('U', options); if (weightString.length() != 0) { this.setWeightingKernel(Integer.parseInt(weightString)); } else { this.setWeightingKernel(LINEAR); } String nnSearchClass = Utils.getOption('A', options); if (nnSearchClass.length() != 0) { String nnSearchClassSpec[] = Utils.splitOptions(nnSearchClass); if (nnSearchClassSpec.length == 0) { throw new Exception("Invalid NearestNeighbourSearch algorithm " + "specification string."); } String className = nnSearchClassSpec[0]; nnSearchClassSpec[0] = ""; this.setNearestNeighbourSearchAlgorithm((NearestNeighbourSearch) Utils.forName(NearestNeighbourSearch.class, className, nnSearchClassSpec)); } else { this.setNearestNeighbourSearchAlgorithm(new LinearNNSearch()); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-U"); options.add("" + this.getWeightingKernel()); if ((this.getKNN() == 0) && this.m_UseAllK) { options.add("-K"); options.add("-1"); } else { options.add("-K"); options.add("" + this.getKNN()); } options.add("-A"); options.add(this.m_NNSearch.getClass().getName() + " " + Utils.joinOptions(this.m_NNSearch.getOptions())); ; Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String KNNTipText() { return "How many neighbours are used to determine the width of the " + "weighting function (<= 0 means all neighbours)."; } /** * Sets the number of neighbours used for kernel bandwidth setting. The bandwidth is taken as the * distance to the kth neighbour. * * @param knn * the number of neighbours included inside the kernel bandwidth, or 0 to specify using all * neighbors. */ public void setKNN(final int knn) { this.m_kNN = knn; if (knn <= 0) { this.m_kNN = 0; this.m_UseAllK = true; } else { this.m_UseAllK = false; } } /** * Gets the number of neighbours used for kernel bandwidth setting. The bandwidth is taken as the * distance to the kth neighbour. * * @return the number of neighbours included inside the kernel bandwidth, or 0 for all neighbours */ public int getKNN() { return this.m_kNN; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String weightingKernelTipText() { return "Determines weighting function. [0 = Linear, 1 = Epnechnikov," + "2 = Tricube, 3 = Inverse, 4 = Gaussian and 5 = Constant. " + "(default 0 = Linear)]."; } /** * Sets the kernel weighting method to use. Must be one of LINEAR, EPANECHNIKOV, TRICUBE, INVERSE, * GAUSS or CONSTANT, other values are ignored. * * @param kernel * the new kernel method to use. Must be one of LINEAR, EPANECHNIKOV, TRICUBE, INVERSE, * GAUSS or CONSTANT. */ public void setWeightingKernel(final int kernel) { if ((kernel != LINEAR) && (kernel != EPANECHNIKOV) && (kernel != TRICUBE) && (kernel != INVERSE) && (kernel != GAUSS) && (kernel != CONSTANT)) { return; } this.m_WeightKernel = kernel; } /** * Gets the kernel weighting method to use. * * @return the new kernel method to use. Will be one of LINEAR, EPANECHNIKOV, TRICUBE, INVERSE, * GAUSS or CONSTANT. */ public int getWeightingKernel() { return this.m_WeightKernel; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String nearestNeighbourSearchAlgorithmTipText() { return "The nearest neighbour search algorithm to use (Default: LinearNN)."; } /** * Returns the current nearestNeighbourSearch algorithm in use. * * @return the NearestNeighbourSearch algorithm currently in use. */ public NearestNeighbourSearch getNearestNeighbourSearchAlgorithm() { return this.m_NNSearch; } /** * Sets the nearestNeighbourSearch algorithm to be used for finding nearest neighbour(s). * * @param nearestNeighbourSearchAlgorithm * - The NearestNeighbourSearch class. */ public void setNearestNeighbourSearchAlgorithm(final NearestNeighbourSearch nearestNeighbourSearchAlgorithm) { this.m_NNSearch = nearestNeighbourSearchAlgorithm; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result; if (this.m_Classifier != null) { result = this.m_Classifier.getCapabilities(); } else { result = super.getCapabilities(); } result.setMinimumNumberInstances(0); // set dependencies for (Capability cap : Capability.values()) { result.enableDependency(cap); } return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances instances) throws Exception { if (!(this.m_Classifier instanceof WeightedInstancesHandler)) { throw new IllegalArgumentException("Classifier must be a " + "WeightedInstancesHandler!"); } // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // only class? -> build ZeroR model if (instances.numAttributes() == 1) { System.err.println("Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); this.m_ZeroR = new weka.classifiers.rules.ZeroR(); this.m_ZeroR.buildClassifier(instances); return; } else { this.m_ZeroR = null; } this.m_Train = new Instances(instances, 0, instances.numInstances()); this.m_NNSearch.setInstances(this.m_Train); } /** * Adds the supplied instance to the training set. * * @param instance * the instance to add * @throws Exception * if instance could not be incorporated successfully */ @Override public void updateClassifier(final Instance instance) throws Exception { if (this.m_Train == null) { throw new Exception("No training instance structure set!"); } else if (this.m_Train.equalHeaders(instance.dataset()) == false) { throw new Exception("Incompatible instance types\n" + this.m_Train.equalHeadersMsg(instance.dataset())); } if (!instance.classIsMissing()) { this.m_NNSearch.update(instance); this.m_Train.add(instance); } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return preedicted class probability distribution * @throws Exception * if distribution can't be computed successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { // default model? if (this.m_ZeroR != null) { return this.m_ZeroR.distributionForInstance(instance); } if (this.m_Train.numInstances() == 0) { throw new Exception("No training instances!"); } this.m_NNSearch.addInstanceInfo(instance); int k = this.m_Train.numInstances(); if ((!this.m_UseAllK && (this.m_kNN < k)) /* * && !(m_WeightKernel==INVERSE || m_WeightKernel==GAUSS) */ ) { k = this.m_kNN; } Instances neighbours = this.m_NNSearch.kNearestNeighbours(instance, k); double distances[] = this.m_NNSearch.getDistances(); if (this.m_Debug) { System.out.println("Test Instance: " + instance); System.out.println("For " + k + " kept " + neighbours.numInstances() + " out of " + this.m_Train.numInstances() + " instances."); } // IF LinearNN has skipped so much that <k neighbours are remaining. if (k > distances.length) { k = distances.length; } if (this.m_Debug) { System.out.println("Instance Distances"); for (int i = 0; i < distances.length; i++) { System.out.println("" + distances[i]); } } // Determine the bandwidth double bandwidth = distances[k - 1]; // Check for bandwidth zero if (bandwidth <= 0) { // if the kth distance is zero than give all instances the same weight for (int i = 0; i < distances.length; i++) { distances[i] = 1; } } else { // Rescale the distances by the bandwidth for (int i = 0; i < distances.length; i++) { distances[i] = distances[i] / bandwidth; } } // Pass the distances through a weighting kernel for (int i = 0; i < distances.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } switch (this.m_WeightKernel) { case LINEAR: distances[i] = 1.0001 - distances[i]; break; case EPANECHNIKOV: distances[i] = 3 / 4D * (1.0001 - distances[i] * distances[i]); break; case TRICUBE: distances[i] = Math.pow((1.0001 - Math.pow(distances[i], 3)), 3); break; case CONSTANT: // System.err.println("using constant kernel"); distances[i] = 1; break; case INVERSE: distances[i] = 1.0 / (1.0 + distances[i]); break; case GAUSS: distances[i] = Math.exp(-distances[i] * distances[i]); break; } } if (this.m_Debug) { System.out.println("Instance Weights"); for (int i = 0; i < distances.length; i++) { System.out.println("" + distances[i]); } } // Set the weights on the training data double sumOfWeights = 0, newSumOfWeights = 0; for (int i = 0; i < distances.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double weight = distances[i]; Instance inst = neighbours.instance(i); sumOfWeights += inst.weight(); newSumOfWeights += inst.weight() * weight; inst.setWeight(inst.weight() * weight); // weightedTrain.add(newInst); } // Rescale weights for (int i = 0; i < neighbours.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = neighbours.instance(i); inst.setWeight(inst.weight() * sumOfWeights / newSumOfWeights); } // Create a weighted classifier this.m_Classifier.buildClassifier(neighbours); if (this.m_Debug) { System.out.println("Classifying test instance: " + instance); System.out.println("Built base classifier:\n" + this.m_Classifier.toString()); } // Return the classifier's predictions return this.m_Classifier.distributionForInstance(instance); } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ @Override public String toString() { // only ZeroR model? if (this.m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_ZeroR.toString()); return buf.toString(); } if (this.m_Train == null) { return "Locally weighted learning: No model built yet."; } String result = "Locally weighted learning\n" + "===========================\n"; result += "Using classifier: " + this.m_Classifier.getClass().getName() + "\n"; switch (this.m_WeightKernel) { case LINEAR: result += "Using linear weighting kernels\n"; break; case EPANECHNIKOV: result += "Using epanechnikov weighting kernels\n"; break; case TRICUBE: result += "Using tricube weighting kernels\n"; break; case INVERSE: result += "Using inverse-distance weighting kernels\n"; break; case GAUSS: result += "Using gaussian weighting kernels\n"; break; case CONSTANT: result += "Using constant weighting kernels\n"; break; } result += "Using " + (this.m_UseAllK ? "all" : "" + this.m_kNN) + " neighbours"; return result; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new LWL(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/kstar/KStarCache.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarCache.java * Copyright (C) 1995-2012 University of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * A class representing the caching system used to keep track of each attribute * value and its corresponding scale factor or stop parameter. * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision$ */ public class KStarCache implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -7693632394267140678L; /** * cache table */ CacheTable m_Cache = new CacheTable(); /** * Stores the specified values in the cahce table for easy retrieval. * * @param key attribute value used key to lookup the cache table. * @param value cache parameter: attribute scale/stop parameter. * @param pmiss cache parameter: transformation probability to attribute with * missing value. */ public void store(double key, double value, double pmiss) { if (!m_Cache.containsKey(key)) { m_Cache.insert(key, value, pmiss); } } /** * Checks if the specified key maps with an entry in the cache table * * @param key the key to map with an entry in the hashtable. */ public boolean containsKey(double key) { if (m_Cache.containsKey(key)) { return true; } return false; } /** * Returns the values in the cache mapped by the specified key * * @param key the key used to retrieve the table entry. */ public TableEntry getCacheValues(double key) { if (m_Cache.containsKey(key)) { return m_Cache.getEntry(key); } return null; } /** * A custom hashtable class to support the caching system. * */ public class CacheTable implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -8086106452588253423L; /** The hash table data. */ private TableEntry[] m_Table; /** The total number of entries in the hash table. */ private int m_Count; /** Rehashes the table when count exceeds this threshold. */ private int m_Threshold; /** The load factor for the hashtable. */ private final float m_LoadFactor; /** Accuracy value for equality */ private final double EPSILON = 1.0E-5; /** * Constructs a new hashtable with a default capacity and load factor. */ public CacheTable(int size, float loadFactor) { m_Table = new TableEntry[size]; m_LoadFactor = loadFactor; m_Threshold = (int) (size * loadFactor); m_Count = 0; } /** * Constructs a new hashtable with a default capacity and load factor. */ public CacheTable() { this(101, 0.75f); } /** * Tests if the specified double is a key in this hashtable. */ public boolean containsKey(double key) { TableEntry[] table = m_Table; int hash = hashCode(key); int index = (hash & 0x7FFFFFFF) % table.length; for (TableEntry e = table[index]; e != null; e = e.next) { if ((e.hash == hash) && (Math.abs(e.key - key) < EPSILON)) { return true; } } return false; } /** * Inserts a new entry in the hashtable using the specified key. If the key * already exist in the hashtable, do nothing. */ public void insert(double key, double value, double pmiss) { // Makes sure the key is not already in the hashtable. TableEntry e, ne; TableEntry[] table = m_Table; int hash = hashCode(key); int index = (hash & 0x7FFFFFFF) % table.length; // start looking along the chain for (e = table[index]; e != null; e = e.next) { if ((e.hash == hash) && (Math.abs(e.key - key) < EPSILON)) { return; } } // At this point, key is not in table. // Creates a new entry. ne = new TableEntry(hash, key, value, pmiss, table[index]); // Put entry at the head of the chain. table[index] = ne; m_Count++; // Rehash the table if the threshold is exceeded if (m_Count >= m_Threshold) { rehash(); } } /** * Returns the table entry to which the specified key is mapped in this * hashtable. * * @return a table entry. */ public TableEntry getEntry(double key) { TableEntry[] table = m_Table; int hash = hashCode(key); int index = (hash & 0x7FFFFFFF) % table.length; for (TableEntry e = table[index]; e != null; e = e.next) { if ((e.hash == hash) && (Math.abs(e.key - key) < EPSILON)) { return e; } } return null; } /** * Returns the number of keys in this hashtable. * * @return the number of keys in this hashtable. */ public int size() { return m_Count; } /** * Tests if this hashtable maps no keys to values. * * @return true if this hastable maps no keys to values. */ public boolean isEmpty() { return m_Count == 0; } /** * Clears this hashtable so that it contains no keys. */ public void clear() { TableEntry table[] = m_Table; for (int index = table.length; --index >= 0;) { table[index] = null; } m_Count = 0; } /** * Rehashes the contents of the hashtable into a hashtable with a larger * capacity. This method is called automatically when the number of keys in * the hashtable exceeds this hashtable's capacity and load factor. */ private void rehash() { int oldCapacity = m_Table.length; TableEntry[] oldTable = m_Table; int newCapacity = oldCapacity * 2 + 1; TableEntry[] newTable = new TableEntry[newCapacity]; m_Threshold = (int) (newCapacity * m_LoadFactor); m_Table = newTable; TableEntry e, old; for (int i = oldCapacity; i-- > 0;) { for (old = oldTable[i]; old != null;) { e = old; old = old.next; int index = (e.hash & 0x7FFFFFFF) % newCapacity; e.next = newTable[index]; newTable[index] = e; } } } /** * Returns the hash code of the specified double. * * @return the hash code of the specified double. */ private int hashCode(double key) { long bits = Double.doubleToLongBits(key); return (int) (bits ^ (bits >> 32)); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // CacheTable /** * Hashtable collision list. */ public class TableEntry implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4057602386766259138L; /** attribute value hash code */ public int hash; /** attribute value */ public double key; /** scale factor or stop parameter */ public double value; /** transformation probability to missing value */ public double pmiss; /** next table entry (separate chaining) */ public TableEntry next = null; /** Constructor */ public TableEntry(int hash, double key, double value, double pmiss, TableEntry next) { this.hash = hash; this.key = key; this.value = value; this.pmiss = pmiss; this.next = next; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // TableEntry /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // Cache
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/kstar/KStarConstants.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarConstants.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; /** * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public interface KStarConstants { /** Some usefull constants */ int ON = 1; int OFF = 0; int NUM_RAND_COLS = 5; double FLOOR = 0.0; double FLOOR1 = 0.1; double INITIAL_STEP = 0.05; double LOG2 = 0.693147181; double EPSILON = 1.0e-5; /** How close the root finder for numeric and nominal have to get */ int ROOT_FINDER_MAX_ITER = 40; double ROOT_FINDER_ACCURACY = 0.01; /** Blend setting modes */ int B_SPHERE = 1; /* Use sphere of influence */ int B_ENTROPY = 2; /* Use entropic blend setting */ /** Missing value handling mode */ /* Ignore the instance with the missing value */ int M_DELETE = 1; /* Treat missing values as maximally different */ int M_MAXDIFF = 2; /* Normilize over the attributes */ int M_NORMAL = 3; /* Average column entropy curves */ int M_AVERAGE = 4; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/kstar/KStarNominalAttribute.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarNominalAttribute.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * A custom class which provides the environment for computing the transformation probability of a specified test instance nominal attribute to a specified train instance nominal attribute. * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public class KStarNominalAttribute implements KStarConstants, RevisionHandler { /** The training instances used for classification. */ protected Instances m_TrainSet; /** The test instance */ protected Instance m_Test; /** The train instance */ protected Instance m_Train; /** The index of the nominal attribute in the test and train instances */ protected int m_AttrIndex; /** The stop parameter */ protected double m_Stop = 1.0; /** * Probability of test attribute transforming into train attribute with missing value */ protected double m_MissingProb = 1.0; /** * Average probability of test attribute transforming into train attribute */ protected double m_AverageProb = 1.0; /** * Smallest probability of test attribute transforming into train attribute */ protected double m_SmallestProb = 1.0; /** Number of trai instances with no missing attribute values */ protected int m_TotalCount; /** Distribution of the attribute value in the train dataset */ protected int[] m_Distribution; /** * Set of colomns: each colomn representing a randomised version of the train dataset class colomn */ protected int[][] m_RandClassCols; /** * A cache for storing attribute values and their corresponding stop parameters */ protected KStarCache m_Cache; // KStar Global settings /** The number of instances in the dataset */ protected int m_NumInstances; /** The number of class values */ protected int m_NumClasses; /** The number of attributes */ protected int m_NumAttributes; /** The class attribute type */ protected int m_ClassType; /** missing value treatment */ protected int m_MissingMode = M_AVERAGE; /** B_SPHERE = use specified blend, B_ENTROPY = entropic blend setting */ protected int m_BlendMethod = B_SPHERE; /** default sphere of influence blend setting */ protected int m_BlendFactor = 20; /** * Constructor */ public KStarNominalAttribute(final Instance test, final Instance train, final int attrIndex, final Instances trainSet, final int[][] randClassCol, final KStarCache cache) { this.m_Test = test; this.m_Train = train; this.m_AttrIndex = attrIndex; this.m_TrainSet = trainSet; this.m_RandClassCols = randClassCol; this.m_Cache = cache; this.init(); } /** * Initializes the m_Attributes of the class. */ private void init() { try { this.m_NumInstances = this.m_TrainSet.numInstances(); this.m_NumClasses = this.m_TrainSet.numClasses(); this.m_NumAttributes = this.m_TrainSet.numAttributes(); this.m_ClassType = this.m_TrainSet.classAttribute().type(); } catch (Exception e) { e.printStackTrace(); } } /** * Calculates the probability of the indexed nominal attribute of the test instance transforming into the indexed nominal attribute of the training instance. * * @return the value of the transformation probability. * @throws Exception */ public double transProb() throws Exception { double transProb = 0.0; // check if the attribute value has been encountred before // in which case it should be in the nominal cache if (this.m_Cache.containsKey(this.m_Test.value(this.m_AttrIndex))) { KStarCache.TableEntry te = this.m_Cache.getCacheValues(this.m_Test.value(this.m_AttrIndex)); this.m_Stop = te.value; this.m_MissingProb = te.pmiss; } else { this.generateAttrDistribution(); // we have to compute the parameters if (this.m_BlendMethod == B_ENTROPY) { this.m_Stop = this.stopProbUsingEntropy(); } else { // default is B_SPHERE this.m_Stop = this.stopProbUsingBlend(); } // store the values in cache this.m_Cache.store(this.m_Test.value(this.m_AttrIndex), this.m_Stop, this.m_MissingProb); } // we've got our m_Stop, then what? if (this.m_Train.isMissing(this.m_AttrIndex)) { transProb = this.m_MissingProb; } else { try { transProb = (1.0 - this.m_Stop) / this.m_Test.attribute(this.m_AttrIndex).numValues(); if ((int) this.m_Test.value(this.m_AttrIndex) == (int) this.m_Train.value(this.m_AttrIndex)) { transProb += this.m_Stop; } } catch (Exception e) { e.printStackTrace(); } } return transProb; } /** * Calculates the "stop parameter" for this attribute using the entropy method: the value is computed using a root finder algorithm. The method takes advantage of the calculation to compute the smallest and average transformation * probabilities once the stop factor is obtained. It also sets the transformation probability to an attribute with a missing value. * * @return the value of the stop parameter. * @throws Exception * */ private double stopProbUsingEntropy() throws Exception { String debug = "(KStarNominalAttribute.stopProbUsingEntropy)"; if (this.m_ClassType != Attribute.NOMINAL) { System.err.println("Error: " + debug + " attribute class must be nominal!"); throw new Exception(); } int itcount = 0; double stopProb; double lower, upper, pstop; double bestminprob = 0.0, bestpsum = 0.0; double bestdiff = 0.0, bestpstop = 0.0; double currentdiff, lastdiff, stepsize, delta; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); // Initial values for root finder lower = 0.0 + ROOT_FINDER_ACCURACY / 2.0; upper = 1.0 - ROOT_FINDER_ACCURACY / 2.0; // Find (approx) entropy ranges this.calculateEntropy(upper, upvals); this.calculateEntropy(lower, botvals); if (upvals.avgProb == 0) { // When there are no training instances with the test value: // doesn't matter what exact value we use for pstop, just acts as // a constant scale factor in this case. this.calculateEntropy(lower, vals); } else { // Optimise the scale factor if ((upvals.randEntropy - upvals.actEntropy < botvals.randEntropy - botvals.actEntropy) && (botvals.randEntropy - botvals.actEntropy > FLOOR)) { bestpstop = pstop = lower; stepsize = INITIAL_STEP; bestminprob = botvals.minProb; bestpsum = botvals.avgProb; } else { bestpstop = pstop = upper; stepsize = -INITIAL_STEP; bestminprob = upvals.minProb; bestpsum = upvals.avgProb; } bestdiff = currentdiff = FLOOR; itcount = 0; /* Enter the root finder */ while (true) { itcount++; lastdiff = currentdiff; pstop += stepsize; if (pstop <= lower) { pstop = lower; currentdiff = 0.0; delta = -1.0; } else if (pstop >= upper) { pstop = upper; currentdiff = 0.0; delta = -1.0; } else { this.calculateEntropy(pstop, vals); currentdiff = vals.randEntropy - vals.actEntropy; if (currentdiff < FLOOR) { currentdiff = FLOOR; if ((Math.abs(stepsize) < INITIAL_STEP) && (bestdiff == FLOOR)) { bestpstop = lower; bestminprob = botvals.minProb; bestpsum = botvals.avgProb; break; } } delta = currentdiff - lastdiff; } if (currentdiff > bestdiff) { bestdiff = currentdiff; bestpstop = pstop; bestminprob = vals.minProb; bestpsum = vals.avgProb; } if (delta < 0) { if (Math.abs(stepsize) < ROOT_FINDER_ACCURACY) { break; } else { stepsize /= -2.0; } } if (itcount > ROOT_FINDER_MAX_ITER) { break; } } } this.m_SmallestProb = bestminprob; this.m_AverageProb = bestpsum; // Set the probability of transforming to a missing value switch (this.m_MissingMode) { case M_DELETE: this.m_MissingProb = 0.0; break; case M_NORMAL: this.m_MissingProb = 1.0; break; case M_MAXDIFF: this.m_MissingProb = this.m_SmallestProb; break; case M_AVERAGE: this.m_MissingProb = this.m_AverageProb; break; } if (Math.abs(bestpsum - this.m_TotalCount) < EPSILON) { // No difference in the values stopProb = 1.0; } else { stopProb = bestpstop; } return stopProb; } /** * Calculates the entropy of the actual class prediction and the entropy for random class prediction. It also calculates the smallest and average transformation probabilities. * * @param stop * the stop parameter * @param params * the object wrapper for the parameters: actual entropy, random entropy, average probability and smallest probability. * @return the values are returned in the object "params". * */ private void calculateEntropy(final double stop, final KStarWrapper params) { int i, j, k; Instance train; double actent = 0.0, randent = 0.0; double pstar, tprob, psum = 0.0, minprob = 1.0; double actClassProb, randClassProb; double[][] pseudoClassProb = new double[NUM_RAND_COLS + 1][this.m_NumClasses]; // init ... for (j = 0; j <= NUM_RAND_COLS; j++) { for (i = 0; i < this.m_NumClasses; i++) { pseudoClassProb[j][i] = 0.0; } } for (i = 0; i < this.m_NumInstances; i++) { train = this.m_TrainSet.instance(i); if (!train.isMissing(this.m_AttrIndex)) { pstar = this.PStar(this.m_Test, train, this.m_AttrIndex, stop); tprob = pstar / this.m_TotalCount; if (pstar < minprob) { minprob = pstar; } psum += tprob; // filter instances with same class value for (k = 0; k <= NUM_RAND_COLS; k++) { // instance i is assigned a random class value in colomn k; // colomn k = NUM_RAND_COLS contains the original mapping: // instance -> class vlaue pseudoClassProb[k][this.m_RandClassCols[k][i]] += tprob; } } } // compute the actual entropy using the class probs // with the original class value mapping (colomn NUM_RAND_COLS) for (j = this.m_NumClasses - 1; j >= 0; j--) { actClassProb = pseudoClassProb[NUM_RAND_COLS][j] / psum; if (actClassProb > 0) { actent -= actClassProb * Math.log(actClassProb) / LOG2; } } // compute a random entropy using the pseudo class probs // excluding the colomn NUM_RAND_COLS for (k = 0; k < NUM_RAND_COLS; k++) { for (i = this.m_NumClasses - 1; i >= 0; i--) { randClassProb = pseudoClassProb[k][i] / psum; if (randClassProb > 0) { randent -= randClassProb * Math.log(randClassProb) / LOG2; } } } randent /= NUM_RAND_COLS; // return the results ... Yuk !!! params.actEntropy = actent; params.randEntropy = randent; params.avgProb = psum; params.minProb = minprob; } /** * Calculates the "stop parameter" for this attribute using the blend method: the value is computed using a root finder algorithm. The method takes advantage of this calculation to compute the smallest and average transformation * probabilities once the stop factor is obtained. It also sets the transformation probability to an attribute with a missing value. * * @return the value of the stop parameter. * */ private double stopProbUsingBlend() { int itcount = 0; double stopProb, aimfor; double lower, upper, tstop; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); int testvalue = (int) this.m_Test.value(this.m_AttrIndex); aimfor = (this.m_TotalCount - this.m_Distribution[testvalue]) * (double) this.m_BlendFactor / 100.0 + this.m_Distribution[testvalue]; // Initial values for root finder tstop = 1.0 - this.m_BlendFactor / 100.0; lower = 0.0 + ROOT_FINDER_ACCURACY / 2.0; upper = 1.0 - ROOT_FINDER_ACCURACY / 2.0; // Find out function border values this.calculateSphereSize(testvalue, lower, botvals); botvals.sphere -= aimfor; this.calculateSphereSize(testvalue, upper, upvals); upvals.sphere -= aimfor; if (upvals.avgProb == 0) { // When there are no training instances with the test value: // doesn't matter what exact value we use for tstop, just acts as // a constant scale factor in this case. this.calculateSphereSize(testvalue, tstop, vals); } else if (upvals.sphere > 0) { // Can't include aimfor instances, going for min possible tstop = upper; vals.avgProb = upvals.avgProb; } else { // Enter the root finder for (;;) { itcount++; this.calculateSphereSize(testvalue, tstop, vals); vals.sphere -= aimfor; if (Math.abs(vals.sphere) <= ROOT_FINDER_ACCURACY || itcount >= ROOT_FINDER_MAX_ITER) { break; } if (vals.sphere > 0.0) { lower = tstop; tstop = (upper + lower) / 2.0; } else { upper = tstop; tstop = (upper + lower) / 2.0; } } } this.m_SmallestProb = vals.minProb; this.m_AverageProb = vals.avgProb; // Set the probability of transforming to a missing value switch (this.m_MissingMode) { case M_DELETE: this.m_MissingProb = 0.0; break; case M_NORMAL: this.m_MissingProb = 1.0; break; case M_MAXDIFF: this.m_MissingProb = this.m_SmallestProb; break; case M_AVERAGE: this.m_MissingProb = this.m_AverageProb; break; } if (Math.abs(vals.avgProb - this.m_TotalCount) < EPSILON) { // No difference in the values stopProb = 1.0; } else { stopProb = tstop; } return stopProb; } /** * Calculates the size of the "sphere of influence" defined as: sphere = sum(P^2)/sum(P)^2 P(i|j) = (1-tstop)*P(i) + ((i==j)?tstop:0). This method takes advantage of the calculation to compute the values of the "smallest" and "average" * transformation probabilities when using the specified stop parameter. * * @param testValue * the value of the test instance * @param stop * the stop parameter * @param params * a wrapper of the parameters to be computed: "sphere" the sphere size "avgprob" the average transformation probability "minProb" the smallest transformation probability * @return the values are returned in "params" object. * */ private void calculateSphereSize(final int testvalue, final double stop, final KStarWrapper params) { int i, thiscount; double tprob, tval = 0.0, t1 = 0.0; double sphere, minprob = 1.0, transprob = 0.0; for (i = 0; i < this.m_Distribution.length; i++) { thiscount = this.m_Distribution[i]; if (thiscount != 0) { if (testvalue == i) { tprob = (stop + (1 - stop) / this.m_Distribution.length) / this.m_TotalCount; tval += tprob * thiscount; t1 += tprob * tprob * thiscount; } else { tprob = ((1 - stop) / this.m_Distribution.length) / this.m_TotalCount; tval += tprob * thiscount; t1 += tprob * tprob * thiscount; } if (minprob > tprob * this.m_TotalCount) { minprob = tprob * this.m_TotalCount; } } } transprob = tval; sphere = (t1 == 0) ? 0 : ((tval * tval) / t1); // return values ... Yck!!! params.sphere = sphere; params.avgProb = transprob; params.minProb = minprob; } /** * Calculates the nominal probability function defined as: P(i|j) = (1-stop) * P(i) + ((i==j) ? stop : 0) In this case, it calculates the transformation probability of the indexed test attribute to the indexed train attribute. * * @param test * the test instance * @param train * the train instance * @param col * the attribute index * @return the value of the tranformation probability. * */ private double PStar(final Instance test, final Instance train, final int col, final double stop) { double pstar; int numvalues = 0; try { numvalues = test.attribute(col).numValues(); } catch (Exception ex) { ex.printStackTrace(); } if ((int) test.value(col) == (int) train.value(col)) { pstar = stop + (1 - stop) / numvalues; } else { pstar = (1 - stop) / numvalues; } return pstar; } /** * Calculates the distribution, in the dataset, of the indexed nominal attribute values. It also counts the actual number of training instances that contributed (those with non-missing values) to calculate the distribution. */ private void generateAttrDistribution() throws InterruptedException { this.m_Distribution = new int[this.m_TrainSet.attribute(this.m_AttrIndex).numValues()]; int i; Instance train; for (i = 0; i < this.m_NumInstances; i++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } train = this.m_TrainSet.instance(i); if (!train.isMissing(this.m_AttrIndex)) { this.m_TotalCount++; this.m_Distribution[(int) train.value(this.m_AttrIndex)]++; } } } /** * Sets the options. * */ public void setOptions(final int missingmode, final int blendmethod, final int blendfactor) { this.m_MissingMode = missingmode; this.m_BlendMethod = blendmethod; this.m_BlendFactor = blendfactor; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/kstar/KStarNumericAttribute.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarNumericAttribute.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * A custom class which provides the environment for computing the * transformation probability of a specified test instance numeric attribute to * a specified train instance numeric attribute. * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public class KStarNumericAttribute implements KStarConstants, RevisionHandler { /** The training instances used for classification. */ protected Instances m_TrainSet; /** The test instance */ protected Instance m_Test; /** The train instance */ protected Instance m_Train; /** The index of the attribute in the test and train instances */ protected int m_AttrIndex; /** The scale parameter */ protected double m_Scale = 1.0; /** * Probability of test attribute transforming into train attribute with * missing value */ protected double m_MissingProb = 1.0; /** * Average probability of test attribute transforming into train attribute */ protected double m_AverageProb = 1.0; /** * Smallest probability of test attribute transforming into train attribute */ protected double m_SmallestProb = 1.0; /** * The set of disctances from the test attribute to the set of train * attributes */ protected double[] m_Distances; /** * Set of colomns: each colomn representing a randomised version of the train * dataset class colomn */ protected int[][] m_RandClassCols; /** The number of train instances with no missing attribute values */ protected int m_ActualCount = 0; /** * A cache for storing attribute values and their corresponding scale * parameters */ protected KStarCache m_Cache; /** The number of instances in the dataset */ protected int m_NumInstances; /** The number of class values */ protected int m_NumClasses; /** The number of attributes */ protected int m_NumAttributes; /** The class attribute type */ protected int m_ClassType; /** missing value treatment */ protected int m_MissingMode = M_AVERAGE; /** 0 = use specified blend, 1 = entropic blend setting */ protected int m_BlendMethod = B_SPHERE; /** default sphere of influence blend setting */ protected int m_BlendFactor = 20; /** * Constructor */ public KStarNumericAttribute(final Instance test, final Instance train, final int attrIndex, final Instances trainSet, final int[][] randClassCols, final KStarCache cache) { this.m_Test = test; this.m_Train = train; this.m_AttrIndex = attrIndex; this.m_TrainSet = trainSet; this.m_RandClassCols = randClassCols; this.m_Cache = cache; this.init(); } /** * Initializes the m_Attributes of the class. */ private void init() { try { this.m_NumInstances = this.m_TrainSet.numInstances(); this.m_NumClasses = this.m_TrainSet.numClasses(); this.m_NumAttributes = this.m_TrainSet.numAttributes(); this.m_ClassType = this.m_TrainSet.classAttribute().type(); } catch (Exception e) { e.printStackTrace(); } } /** * Calculates the transformation probability of the attribute indexed * "m_AttrIndex" in test instance "m_Test" to the same attribute in the train * instance "m_Train". * * @return the probability value * @throws Exception */ public double transProb() throws Exception { double transProb, distance; // check if the attribute value has been encountred before // in which case it should be in the numeric cache if (this.m_Cache.containsKey(this.m_Test.value(this.m_AttrIndex))) { KStarCache.TableEntry te = this.m_Cache.getCacheValues(this.m_Test.value(this.m_AttrIndex)); this.m_Scale = te.value; this.m_MissingProb = te.pmiss; } else { if (this.m_BlendMethod == B_ENTROPY) { this.m_Scale = this.scaleFactorUsingEntropy(); } else { // default is B_SPHERE this.m_Scale = this.scaleFactorUsingBlend(); } this.m_Cache.store(this.m_Test.value(this.m_AttrIndex), this.m_Scale, this.m_MissingProb); } // now what??? if (this.m_Train.isMissing(this.m_AttrIndex)) { transProb = this.m_MissingProb; } else { distance = Math.abs(this.m_Test.value(this.m_AttrIndex) - this.m_Train.value(this.m_AttrIndex)); transProb = this.PStar(distance, this.m_Scale); } return transProb; } /** * Calculates the scale factor for the attribute indexed "m_AttrIndex" in test * instance "m_Test" using a global blending factor (default value is 20%). * * @return the scale factor value */ private double scaleFactorUsingBlend() { int i, j, lowestcount = 0; double lowest = -1.0, nextlowest = -1.0; double root, broot, up, bot; double aimfor, min_val = 9e300, scale = 1.0; double avgprob = 0.0, minprob = 0.0, min_pos = 0.0; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); this.m_Distances = new double[this.m_NumInstances]; for (j = 0; j < this.m_NumInstances; j++) { if (this.m_TrainSet.instance(j).isMissing(this.m_AttrIndex)) { // mark the train instance with a missing value by setting // the distance to -1.0 this.m_Distances[j] = -1.0; } else { this.m_Distances[j] = Math.abs(this.m_TrainSet.instance(j).value(this.m_AttrIndex) - this.m_Test.value(this.m_AttrIndex)); if ((this.m_Distances[j] + 1e-5) < nextlowest || nextlowest == -1.0) { if ((this.m_Distances[j] + 1e-5) < lowest || lowest == -1.0) { nextlowest = lowest; lowest = this.m_Distances[j]; lowestcount = 1; } else if (Math.abs(this.m_Distances[j] - lowest) < 1e-5) { // record the number training instances (number n0) at // the smallest distance from test instance lowestcount++; } else { nextlowest = this.m_Distances[j]; } } // records the actual number of instances with no missing value this.m_ActualCount++; } } if (nextlowest == -1 || lowest == -1) { // Data values are all the same scale = 1.0; this.m_SmallestProb = this.m_AverageProb = 1.0; return scale; } else { // starting point for root root = 1.0 / (nextlowest - lowest); i = 0; // given the expression: n0 <= E(scale) <= N // E(scale) = (N - n0) * b + n0 with blending factor: 0 <= b <= 1 // aimfor = (N - n0) * b + n0 aimfor = (this.m_ActualCount - lowestcount) * (double) this.m_BlendFactor / 100.0 + lowestcount; if (this.m_BlendFactor == 0) { aimfor += 1.0; } // root is bracketed in interval [bot,up] bot = 0.0 + ROOT_FINDER_ACCURACY / 2.0; up = root * 16; // This is bodgy // E(bot) this.calculateSphereSize(bot, botvals); botvals.sphere -= aimfor; // E(up) this.calculateSphereSize(up, upvals); upvals.sphere -= aimfor; if (botvals.sphere < 0) { // Couldn't include that many // instances - going for max possible min_pos = bot; avgprob = botvals.avgProb; minprob = botvals.minProb; } else if (upvals.sphere > 0) { // Couldn't include that few, // going for min possible min_pos = up; avgprob = upvals.avgProb; minprob = upvals.minProb; } else { // Root finding Algorithm starts here ! for (;;) { this.calculateSphereSize(root, vals); vals.sphere -= aimfor; if (Math.abs(vals.sphere) < min_val) { min_val = Math.abs(vals.sphere); min_pos = root; avgprob = vals.avgProb; minprob = vals.minProb; } if (Math.abs(vals.sphere) <= ROOT_FINDER_ACCURACY) { break; // converged to a solution, done! } if (vals.sphere > 0.0) { broot = (root + up) / 2.0; bot = root; root = broot; } else { broot = (root + bot) / 2.0; up = root; root = broot; } i++; if (i > ROOT_FINDER_MAX_ITER) { // System.err.println("Warning: "+debug+" // ROOT_FINDER_MAX_ITER exceeded"); root = min_pos; break; } } } this.m_SmallestProb = minprob; this.m_AverageProb = avgprob; // Set the probability of transforming to a missing value switch (this.m_MissingMode) { case M_DELETE: this.m_MissingProb = 0.0; break; case M_NORMAL: this.m_MissingProb = 1.0; break; case M_MAXDIFF: this.m_MissingProb = this.m_SmallestProb; break; case M_AVERAGE: this.m_MissingProb = this.m_AverageProb; break; } // set the scale factor value scale = min_pos; return scale; } } /** * Calculates the size of the "sphere of influence" defined as: sphere = * sum(P)^2/sum(P^2) where P(i) = root*exp(-2*i*root). Since there are n * different training instances we multiply P(i) by 1/n. */ private void calculateSphereSize(final double scale, final KStarWrapper params) { int i; double sphereSize, minprob = 1.0; double pstar; // P*(b|a) double pstarSum = 0.0; // sum(P*) double pstarSquareSum = 0.0; // sum(P*^2) double inc; for (i = 0; i < this.m_NumInstances; i++) { if (this.m_Distances[i] < 0) { // instance with missing value continue; } else { pstar = this.PStar(this.m_Distances[i], scale); if (minprob > pstar) { minprob = pstar; } inc = pstar / this.m_ActualCount; pstarSum += inc; pstarSquareSum += inc * inc; } } sphereSize = (pstarSquareSum == 0 ? 0 : pstarSum * pstarSum / pstarSquareSum); // return the values params.sphere = sphereSize; params.avgProb = pstarSum; params.minProb = minprob; } /** * Calculates the scale factor using entropy. * * @return the scale factor value * @throws Exception */ private double scaleFactorUsingEntropy() throws Exception { String debug = "(KStarNumericAttribute.scaleFactorUsingEntropy)"; if (this.m_ClassType != Attribute.NOMINAL) { System.err.println("Error: " + debug + " attribute class must be nominal!"); throw new Exception(); } int j, itcount; double lowest = -1.0, nextlowest = -1.0; double root, up, bot, stepsize, delta; double randscale; double bestdiff, bestroot, currentdiff, lastdiff; double bestpsum, bestminprob, scale = 1.0; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); this.m_Distances = new double[this.m_NumInstances]; for (j = 0; j < this.m_NumInstances; j++) { if (this.m_TrainSet.instance(j).isMissing(this.m_AttrIndex)) { // mark the train instance with a missing value by setting // the distance to -1.0 this.m_Distances[j] = -1.0; } else { this.m_Distances[j] = Math.abs(this.m_TrainSet.instance(j).value(this.m_AttrIndex) - this.m_Test.value(this.m_AttrIndex)); if ((this.m_Distances[j] + 1e-5) < nextlowest || nextlowest == -1.0) { if ((this.m_Distances[j] + 1e-5) < lowest || lowest == -1.0) { nextlowest = lowest; lowest = this.m_Distances[j]; } else if (Math.abs(this.m_Distances[j] - lowest) < 1e-5) { } else { nextlowest = this.m_Distances[j]; } } // records the actual number of instances with no missing value this.m_ActualCount++; } } // for if (nextlowest == -1 || lowest == -1) { // Data values are all the same scale = 1.0; this.m_SmallestProb = this.m_AverageProb = 1.0; return scale; } else { // starting point for root root = 1.0 / (nextlowest - lowest); // root is bracketed in interval [bot,up] bot = 0.0 + ROOT_FINDER_ACCURACY / 2; up = root * 8; // This is bodgy // Find (approx) entropy ranges this.calculateEntropy(up, upvals); this.calculateEntropy(bot, botvals); randscale = botvals.randEntropy - upvals.randEntropy; // Optimise the scale factor bestroot = root = bot; bestdiff = currentdiff = FLOOR1; bestpsum = botvals.avgProb; bestminprob = botvals.minProb; stepsize = (up - bot) / 20.0; itcount = 0; // Root finding algorithm starts here! while (true) { itcount++; lastdiff = currentdiff; root += Math.log(root + 1.0) * stepsize; if (root <= bot) { root = bot; currentdiff = 0.0; delta = -1.0; } else if (root >= up) { root = up; currentdiff = 0.0; delta = -1.0; } else { this.calculateEntropy(root, vals); // Normalise entropies vals.randEntropy = (vals.randEntropy - upvals.randEntropy) / randscale; vals.actEntropy = (vals.actEntropy - upvals.actEntropy) / randscale; currentdiff = vals.randEntropy - vals.actEntropy; if (currentdiff < FLOOR1) { currentdiff = FLOOR1; if (stepsize < 0) { // If we've hit the end and turned around we can't // have found any peaks bestdiff = currentdiff; bestroot = bot; bestpsum = botvals.avgProb; bestminprob = botvals.minProb; break; } } delta = currentdiff - lastdiff; } if (currentdiff > bestdiff) { bestdiff = currentdiff; bestroot = root; bestminprob = vals.minProb; bestpsum = vals.avgProb; } if (delta < 0) { if (Math.abs(stepsize) < ROOT_FINDER_ACCURACY) { break; } else { stepsize /= -4.0; } } if (itcount > ROOT_FINDER_MAX_ITER) { // System.err.println("Warning: "+debug+" ROOT_FINDER_MAX_ITER // exceeded"); break; } } // while this.m_SmallestProb = bestminprob; this.m_AverageProb = bestpsum; // Set the probability of transforming to a missing value switch (this.m_MissingMode) { case M_DELETE: this.m_MissingProb = 0.0; break; case M_NORMAL: this.m_MissingProb = 1.0; break; case M_MAXDIFF: this.m_MissingProb = this.m_SmallestProb; break; case M_AVERAGE: this.m_MissingProb = this.m_AverageProb; break; } // set scale factor scale = bestroot; } // else return scale; } /** * Calculates several parameters aside from the entropy: for a specified scale * factor, calculates the actual entropy, a random entropy using a randomized * set of class value colomns, and records the average and smallest * probabilities (for use in missing value case). */ private void calculateEntropy(final double scale, final KStarWrapper params) { int i, j, k; double actent = 0.0, randent = 0.0; double pstar, tprob, avgprob = 0.0, minprob = 1.0; double actClassProb, randClassProb; double[][] pseudoClassProbs = new double[NUM_RAND_COLS + 1][this.m_NumClasses]; // init for (j = 0; j <= NUM_RAND_COLS; j++) { for (i = 0; i < this.m_NumClasses; i++) { pseudoClassProbs[j][i] = 0.0; } } for (i = 0; i < this.m_NumInstances; i++) { if (this.m_Distances[i] < 0) { // train instance has mising value continue; } else { pstar = this.PStar(this.m_Distances[i], scale); tprob = pstar / this.m_ActualCount; avgprob += tprob; if (pstar < minprob) { minprob = pstar; } // filter instances with same class value for (k = 0; k <= NUM_RAND_COLS; k++) { // instance i is assigned a random class value in colomn k; // colomn k = NUM_RAND_COLS contains the original mapping: // instance -> class vlaue pseudoClassProbs[k][this.m_RandClassCols[k][i]] += tprob; } } } // compute the actual entropy using the class probabilities // with the original class value mapping (colomn NUM_RAND_COLS) for (j = this.m_NumClasses - 1; j >= 0; j--) { actClassProb = pseudoClassProbs[NUM_RAND_COLS][j] / avgprob; if (actClassProb > 0) { actent -= actClassProb * Math.log(actClassProb) / LOG2; } } // compute a random entropy using the pseudo class probs // excluding the colomn NUM_RAND_COLS for (k = 0; k < NUM_RAND_COLS; k++) { for (i = this.m_NumClasses - 1; i >= 0; i--) { randClassProb = pseudoClassProbs[k][i] / avgprob; if (randClassProb > 0) { randent -= randClassProb * Math.log(randClassProb) / LOG2; } } } randent /= NUM_RAND_COLS; // return the values params.actEntropy = actent; params.randEntropy = randent; params.avgProb = avgprob; params.minProb = minprob; } /** * Calculates the value of P for a given value x using the expression: P(x) = * scale * exp( -2.0 * x * scale ) * * @param x input value * @param scale the scale factor * @return output of the function P(x) */ private double PStar(final double x, final double scale) { return scale * Math.exp(-2.0 * x * scale); } /** * Set options. * * @param missingmode the missing value treatment to use * @param blendmethod the blending method to use * @param blendfactor the level of blending to use */ public void setOptions(final int missingmode, final int blendmethod, final int blendfactor) { this.m_MissingMode = missingmode; this.m_BlendMethod = blendmethod; this.m_BlendFactor = blendfactor; } /** * Set the missing value mode. * * @param mode the type of missing value treatment to use */ public void setMissingMode(final int mode) { this.m_MissingMode = mode; } /** * Set the blending method * * @param method the blending method to use */ public void setBlendMethod(final int method) { this.m_BlendMethod = method; } /** * Set the blending factor * * @param factor the level of blending to use */ public void setBlendFactor(final int factor) { this.m_BlendFactor = factor; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/lazy/kstar/KStarWrapper.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarWrapper.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /* * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public class KStarWrapper implements RevisionHandler { /** used/reused to hold the sphere size */ public double sphere = 0.0; /** used/reused to hold the actual entropy */ public double actEntropy = 0.0; /** used/reused to hold the random entropy */ public double randEntropy = 0.0; /** used/reused to hold the average transformation probability */ public double avgProb = 0.0; /** used/reused to hold the smallest transformation probability */ public double minProb = 0.0; /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/AdaBoostM1.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AdaBoostM1.java * Copyright (C) 1999-2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.IterativeClassifier; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.classifiers.Sourcable; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for boosting a nominal class classifier using the Adaboost M1 * method. Only nominal class problems can be tackled. Often dramatically improves performance, but * sometimes overfits.<br/> * <br/> * For more information, see<br/> * <br/> * Yoav Freund, Robert E. Schapire: Experiments with a new boosting algorithm. In: Thirteenth * International Conference on Machine Learning, San Francisco, 148-156, 1996. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Freund1996, * address = {San Francisco}, * author = {Yoav Freund and Robert E. Schapire}, * booktitle = {Thirteenth International Conference on Machine Learning}, * pages = {148-156}, * publisher = {Morgan Kaufmann}, * title = {Experiments with a new boosting algorithm}, * year = {1996} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;num&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up) * </pre> * * <pre> * -Q * Use resampling for boosting. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class AdaBoostM1 extends RandomizableIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, Sourcable, TechnicalInformationHandler, IterativeClassifier { /** for serialization */ static final long serialVersionUID = -1178107808933117974L; /** Max num iterations tried to find classifier with non-zero error. */ private static int MAX_NUM_RESAMPLING_ITERATIONS = 10; /** Array for storing the weights for the votes. */ protected double[] m_Betas; /** The number of successfully generated base classifiers. */ protected int m_NumIterationsPerformed; /** Weight Threshold. The percentage of weight mass used in training */ protected int m_WeightThreshold = 100; /** Use boosting with reweighting? */ protected boolean m_UseResampling; /** The number of classes */ protected int m_NumClasses; /** a ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** The (weighted) training data */ protected Instances m_TrainingData; /** Random number generator to be used for resampling */ protected Random m_RandomInstance; /** * Constructor. */ public AdaBoostM1() { this.m_Classifier = new weka.classifiers.trees.DecisionStump(); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for boosting a nominal class classifier using the Adaboost " + "M1 method. Only nominal class problems can be tackled. Often " + "dramatically improves performance, but sometimes overfits.\n\n" + "For more information, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Yoav Freund and Robert E. Schapire"); result.setValue(Field.TITLE, "Experiments with a new boosting algorithm"); result.setValue(Field.BOOKTITLE, "Thirteenth International Conference on Machine Learning"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.PAGES, "148-156"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.ADDRESS, "San Francisco"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Select only instances with weights that contribute to the specified quantile of the weight * distribution * * @param data * the input instances * @param quantile * the specified quantile eg 0.9 to select 90% of the weight mass * @return the selected instances * @throws InterruptedException */ protected Instances selectWeightQuantile(final Instances data, final double quantile) throws InterruptedException { int numInstances = data.numInstances(); Instances trainData = new Instances(data, numInstances); double[] weights = new double[numInstances]; double sumOfWeights = 0; for (int i = 0; i < numInstances; i++) { weights[i] = data.instance(i).weight(); sumOfWeights += weights[i]; } double weightMassToSelect = sumOfWeights * quantile; int[] sortedIndices = Utils.sort(weights); // Select the instances sumOfWeights = 0; for (int i = numInstances - 1; i >= 0; i--) { Instance instance = (Instance) data.instance(sortedIndices[i]).copy(); trainData.add(instance); sumOfWeights += weights[sortedIndices[i]]; if ((sumOfWeights > weightMassToSelect) && (i > 0) && (weights[sortedIndices[i]] != weights[sortedIndices[i - 1]])) { break; } } if (this.m_Debug) { System.err.println("Selected " + trainData.numInstances() + " out of " + numInstances); } return trainData; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(); newVector.addElement(new Option("\tPercentage of weight mass to base training on.\n" + "\t(default 100, reduce to around 90 speed up)", "P", 1, "-P <num>")); newVector.addElement(new Option("\tUse resampling for boosting.", "Q", 0, "-Q")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;num&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up) * </pre> * * <pre> * -Q * Use resampling for boosting. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String thresholdString = Utils.getOption('P', options); if (thresholdString.length() != 0) { this.setWeightThreshold(Integer.parseInt(thresholdString)); } else { this.setWeightThreshold(100); } this.setUseResampling(Utils.getFlag('Q', options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<>(); if (this.getUseResampling()) { result.add("-Q"); } result.add("-P"); result.add("" + this.getWeightThreshold()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String weightThresholdTipText() { return "Weight threshold for weight pruning."; } /** * Set weight threshold * * @param threshold * the percentage of weight mass used for training */ public void setWeightThreshold(final int threshold) { this.m_WeightThreshold = threshold; } /** * Get the degree of weight thresholding * * @return the percentage of weight mass used for training */ public int getWeightThreshold() { return this.m_WeightThreshold; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useResamplingTipText() { return "Whether resampling is used instead of reweighting."; } /** * Set resampling mode * * @param r * true if resampling should be done */ public void setUseResampling(final boolean r) { this.m_UseResampling = r; } /** * Get whether resampling is turned on * * @return true if resampling output is on */ public boolean getUseResampling() { return this.m_UseResampling; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); if (super.getCapabilities().handles(Capability.NOMINAL_CLASS)) { result.enable(Capability.NOMINAL_CLASS); } if (super.getCapabilities().handles(Capability.BINARY_CLASS)) { result.enable(Capability.BINARY_CLASS); } return result; } /** * Method used to build the classifier. */ @Override public void buildClassifier(final Instances data) throws Exception { // Initialize classifier this.initializeClassifier(data); // Perform boosting iterations while (this.next()) { } ; // Clean up this.done(); } /** * Initialize the classifier. * * @param data * the training data to be used for generating the boosted classifier. * @throws Exception * if the classifier could not be built successfully */ @Override public void initializeClassifier(Instances data) throws Exception { super.buildClassifier(data); // can classifier handle the data? this.getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); this.m_ZeroR = new weka.classifiers.rules.ZeroR(); this.m_ZeroR.buildClassifier(data); this.m_NumClasses = data.numClasses(); this.m_Betas = new double[this.m_Classifiers.length]; this.m_NumIterationsPerformed = 0; this.m_TrainingData = new Instances(data); this.m_RandomInstance = new Random(this.m_Seed); if ((this.m_UseResampling) || (!(this.m_Classifier instanceof WeightedInstancesHandler))) { // Normalize weights so that they sum to one and can be used as sampling probabilities double sumProbs = this.m_TrainingData.sumOfWeights(); for (int i = 0; i < this.m_TrainingData.numInstances(); i++) { this.m_TrainingData.instance(i).setWeight(this.m_TrainingData.instance(i).weight() / sumProbs); } } } /** * Perform the next boosting iteration. * * @throws Exception * if an unforeseen problem occurs */ @Override public boolean next() throws Exception { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // Have we reached the maximum? if (this.m_NumIterationsPerformed >= this.m_NumIterations) { return false; } // only class? -> just use ZeroR model if (this.m_TrainingData.numAttributes() == 1) { return false; } if (this.m_Debug) { System.err.println("Training classifier " + (this.m_NumIterationsPerformed + 1)); } // Select instances to train the classifier on Instances trainData = null; if (this.m_WeightThreshold < 100) { trainData = this.selectWeightQuantile(this.m_TrainingData, (double) this.m_WeightThreshold / 100); } else { trainData = new Instances(this.m_TrainingData); } double epsilon = 0; if ((this.m_UseResampling) || (!(this.m_Classifier instanceof WeightedInstancesHandler))) { // Resample int resamplingIterations = 0; double[] weights = new double[trainData.numInstances()]; for (int i = 0; i < weights.length; i++) { weights[i] = trainData.instance(i).weight(); } do { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instances sample = trainData.resampleWithWeights(this.m_RandomInstance, weights); // Build and evaluate classifier this.m_Classifiers[this.m_NumIterationsPerformed].buildClassifier(sample); Evaluation evaluation = new Evaluation(this.m_TrainingData); evaluation.evaluateModel(this.m_Classifiers[this.m_NumIterationsPerformed], this.m_TrainingData); epsilon = evaluation.errorRate(); resamplingIterations++; } while (Utils.eq(epsilon, 0) && (resamplingIterations < MAX_NUM_RESAMPLING_ITERATIONS)); } else { // Build the classifier if (this.m_Classifiers[this.m_NumIterationsPerformed] instanceof Randomizable) { ((Randomizable) this.m_Classifiers[this.m_NumIterationsPerformed]).setSeed(this.m_RandomInstance.nextInt()); } this.m_Classifiers[this.m_NumIterationsPerformed].buildClassifier(trainData); // Evaluate the classifier Evaluation evaluation = new Evaluation(this.m_TrainingData); // Does this need to be a copy evaluation.evaluateModel(this.m_Classifiers[this.m_NumIterationsPerformed], this.m_TrainingData); epsilon = evaluation.errorRate(); } // Stop if error too big or 0 if (Utils.grOrEq(epsilon, 0.5) || Utils.eq(epsilon, 0)) { if (this.m_NumIterationsPerformed == 0) { this.m_NumIterationsPerformed = 1; // If we're the first we have to use it } return false; } // Determine the weight to assign to this model double reweight = (1 - epsilon) / epsilon; this.m_Betas[this.m_NumIterationsPerformed] = Math.log(reweight); if (this.m_Debug) { System.err.println("\terror rate = " + epsilon + " beta = " + this.m_Betas[this.m_NumIterationsPerformed]); } // Update instance weights this.setWeights(this.m_TrainingData, reweight); // Model has been built successfully this.m_NumIterationsPerformed++; return true; } /** * Clean up after boosting. */ @Override public void done() { this.m_TrainingData = null; // Can discard ZeroR model if we don't need it anymore if (this.m_NumIterationsPerformed > 0) { this.m_ZeroR = null; } } /** * Sets the weights for the next iteration. * * @param training * the training instances * @param reweight * the reweighting factor * @throws Exception * if something goes wrong */ protected void setWeights(final Instances training, final double reweight) throws Exception { double oldSumOfWeights, newSumOfWeights; oldSumOfWeights = training.sumOfWeights(); Enumeration<Instance> enu = training.enumerateInstances(); while (enu.hasMoreElements()) { Instance instance = enu.nextElement(); if (!Utils.eq(this.m_Classifiers[this.m_NumIterationsPerformed].classifyInstance(instance), instance.classValue())) { instance.setWeight(instance.weight() * reweight); } } // Renormalize weights newSumOfWeights = training.sumOfWeights(); enu = training.enumerateInstances(); while (enu.hasMoreElements()) { Instance instance = enu.nextElement(); instance.setWeight(instance.weight() * oldSumOfWeights / newSumOfWeights); } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if instance could not be classified successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { // default model? if (this.m_NumIterationsPerformed == 0) { return this.m_ZeroR.distributionForInstance(instance); } if (this.m_NumIterationsPerformed == 0) { throw new Exception("No model built"); } double[] sums = new double[instance.numClasses()]; if (this.m_NumIterationsPerformed == 1) { return this.m_Classifiers[0].distributionForInstance(instance); } else { for (int i = 0; i < this.m_NumIterationsPerformed; i++) { sums[(int) this.m_Classifiers[i].classifyInstance(instance)] += this.m_Betas[i]; } return Utils.logs2probs(sums); } } /** * Returns the boosted model as Java source code. * * @param className * the classname of the generated class * @return the tree as Java source code * @throws Exception * if something goes wrong */ @Override public String toSource(final String className) throws Exception { if (this.m_NumIterationsPerformed == 0) { throw new Exception("No model built yet"); } if (!(this.m_Classifiers[0] instanceof Sourcable)) { throw new Exception("Base learner " + this.m_Classifier.getClass().getName() + " is not Sourcable"); } StringBuffer text = new StringBuffer("class "); text.append(className).append(" {\n\n"); text.append(" public static double classify(Object[] i) {\n"); if (this.m_NumIterationsPerformed == 1) { text.append(" return " + className + "_0.classify(i);\n"); } else { text.append(" double [] sums = new double [" + this.m_NumClasses + "];\n"); for (int i = 0; i < this.m_NumIterationsPerformed; i++) { text.append(" sums[(int) " + className + '_' + i + ".classify(i)] += " + this.m_Betas[i] + ";\n"); } text.append(" double maxV = sums[0];\n" + " int maxI = 0;\n" + " for (int j = 1; j < " + this.m_NumClasses + "; j++) {\n" + " if (sums[j] > maxV) { maxV = sums[j]; maxI = j; }\n" + " }\n return (double) maxI;\n"); } text.append(" }\n}\n"); for (int i = 0; i < this.m_Classifiers.length; i++) { text.append(((Sourcable) this.m_Classifiers[i]).toSource(className + '_' + i)); } return text.toString(); } /** * Returns description of the boosted classifier. * * @return description of the boosted classifier as a string */ @Override public String toString() { // only ZeroR model? if (this.m_NumIterationsPerformed == 0) { StringBuffer buf = new StringBuffer(); if (this.m_ZeroR == null) { buf.append("AdaBoostM1: No model built yet.\n"); } else { buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_ZeroR.toString()); } return buf.toString(); } StringBuffer text = new StringBuffer(); if (this.m_NumIterationsPerformed == 1) { text.append("AdaBoostM1: No boosting possible, one classifier used!\n"); text.append(this.m_Classifiers[0].toString() + "\n"); } else { text.append("AdaBoostM1: Base classifiers and their weights: \n\n"); for (int i = 0; i < this.m_NumIterationsPerformed; i++) { text.append(this.m_Classifiers[i].toString() + "\n\n"); text.append("Weight: " + Utils.roundDouble(this.m_Betas[i], 2) + "\n\n"); } text.append("Number of performed Iterations: " + this.m_NumIterationsPerformed + "\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new AdaBoostM1(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/AdditiveRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AdditiveRegression.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.IteratedSingleClassifierEnhancer; import weka.classifiers.IterativeClassifier; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.UnassignedClassException; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Meta classifier that enhances the performance of a regression base * classifier. Each iteration fits a model to the residuals left by the classifier on the previous * iteration. Prediction is accomplished by adding the predictions of each classifier. Reducing the * shrinkage (learning rate) parameter helps prevent overfitting and has a smoothing effect but * increases the learning time.<br/> * <br/> * For more information see:<br/> * <br/> * J.H. Friedman (1999). Stochastic Gradient Boosting. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;techreport{Friedman1999, * author = {J.H. Friedman}, * institution = {Stanford University}, * title = {Stochastic Gradient Boosting}, * year = {1999}, * PS = {http://www-stat.stanford.edu/\~jhf/ftp/stobst.ps} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -S * Specify shrinkage rate. (default = 1.0, ie. no shrinkage) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -A * Minimize absolute error instead of squared error (assumes that base learner minimizes absolute error). * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision$ */ public class AdditiveRegression extends IteratedSingleClassifierEnhancer implements OptionHandler, AdditionalMeasureProducer, WeightedInstancesHandler, TechnicalInformationHandler, IterativeClassifier { /** for serialization */ static final long serialVersionUID = -2368937577670527151L; /** * ArrayList for storing the generated base classifiers. Note: we are hiding the variable from * IteratedSingleClassifierEnhancer */ protected ArrayList<Classifier> m_Classifiers; /** Shrinkage (Learning rate). Default = no shrinkage. */ protected double m_shrinkage = 1.0; /** The mean or median */ protected double m_InitialPrediction; /** whether we have suitable data or nor (if only mean/mode is used) */ protected boolean m_SuitableData = true; /** The working data */ protected Instances m_Data; /** The sum of (absolute or squared) residuals. */ protected double m_Error; /** The improvement in the sum of (absolute or squared) residuals. */ protected double m_Diff; /** Whether to minimise absolute error instead of squared error. */ protected boolean m_MinimizeAbsoluteError = false; /** * Returns a string describing this attribute evaluator * * @return a description of the evaluator suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return " Meta classifier that enhances the performance of a regression " + "base classifier. Each iteration fits a model to the residuals left " + "by the classifier on the previous iteration. Prediction is " + "accomplished by adding the predictions of each classifier. " + "Reducing the shrinkage (learning rate) parameter helps prevent " + "overfitting and has a smoothing effect but increases the learning " + "time.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.TECHREPORT); result.setValue(Field.AUTHOR, "J.H. Friedman"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.TITLE, "Stochastic Gradient Boosting"); result.setValue(Field.INSTITUTION, "Stanford University"); result.setValue(Field.PS, "http://www-stat.stanford.edu/~jhf/ftp/stobst.ps"); return result; } /** * Default constructor specifying DecisionStump as the classifier */ public AdditiveRegression() { this(new weka.classifiers.trees.DecisionStump()); } /** * Constructor which takes base classifier as argument. * * @param classifier * the base classifier to use */ public AdditiveRegression(final Classifier classifier) { this.m_Classifier = classifier; } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(2); newVector.addElement(new Option("\tSpecify shrinkage rate. (default = 1.0, i.e., no shrinkage)", "S", 1, "-S")); newVector.addElement(new Option("\tMinimize absolute error instead of squared error (assumes that base learner minimizes absolute error).", "A", 0, "-A")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -S * Specify shrinkage rate. (default = 1.0, ie. no shrinkage) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -A * Minimize absolute error instead of squared error (assumes that base learner minimizes absolute error). * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String optionString = Utils.getOption('S', options); if (optionString.length() != 0) { Double temp = Double.valueOf(optionString); this.setShrinkage(temp.doubleValue()); } this.setMinimizeAbsoluteError(Utils.getFlag('A', options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-S"); options.add("" + this.getShrinkage()); if (this.getMinimizeAbsoluteError()) { options.add("-A"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String shrinkageTipText() { return "Shrinkage rate. Smaller values help prevent overfitting and " + "have a smoothing effect (but increase learning time). " + "Default = 1.0, ie. no shrinkage."; } /** * Set the shrinkage parameter * * @param l * the shrinkage rate. */ public void setShrinkage(final double l) { this.m_shrinkage = l; } /** * Get the shrinkage rate. * * @return the value of the learning rate */ public double getShrinkage() { return this.m_shrinkage; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minimizeAbsoluteErrorTipText() { return "Minimize absolute error instead of squared error (assume base learner minimizes absolute error)"; } /** * Sets whether absolute error is to be minimized. * * @param f * true if absolute error is to be minimized. */ public void setMinimizeAbsoluteError(final boolean f) { this.m_MinimizeAbsoluteError = f; } /** * Gets whether absolute error is to be minimized. * * @return true if absolute error is to be minimized */ public boolean getMinimizeAbsoluteError() { return this.m_MinimizeAbsoluteError; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); return result; } /** * Method used to build the classifier. */ @Override public void buildClassifier(final Instances data) throws Exception { // Initialize classifier this.initializeClassifier(data); // For the given number of iterations while (this.next()) { } ; // Clean up this.done(); } /** * Initialize classifier. * * @param data * the training data * @throws Exception * if the classifier could not be initialized successfully */ @Override public void initializeClassifier(final Instances data) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(data); // remove instances with missing class this.m_Data = new Instances(data); this.m_Data.deleteWithMissingClass(); // Add the model for the mean first if (this.getMinimizeAbsoluteError()) { this.m_InitialPrediction = this.m_Data.kthSmallestValue(this.m_Data.classIndex(), this.m_Data.numInstances() / 2); } else { this.m_InitialPrediction = this.m_Data.meanOrMode(this.m_Data.classIndex()); } // only class? -> use only ZeroR model if (this.m_Data.numAttributes() == 1) { System.err.println("Cannot build non-trivial model (only class attribute present in data!)."); this.m_SuitableData = false; return; } else { this.m_SuitableData = true; } // Initialize list of classifiers and data this.m_Classifiers = new ArrayList<>(this.m_NumIterations); this.m_Data = this.residualReplace(this.m_Data, this.m_InitialPrediction); // Calculate error this.m_Error = 0; this.m_Diff = Double.MAX_VALUE; for (int i = 0; i < this.m_Data.numInstances(); i++) { if (this.getMinimizeAbsoluteError()) { this.m_Error += this.m_Data.instance(i).weight() * Math.abs(this.m_Data.instance(i).classValue()); } else { this.m_Error += this.m_Data.instance(i).weight() * this.m_Data.instance(i).classValue() * this.m_Data.instance(i).classValue(); } } if (this.m_Debug) { if (this.getMinimizeAbsoluteError()) { System.err.println("Sum of absolute residuals (predicting the median) : " + this.m_Error); } else { System.err.println("Sum of squared residuals (predicting the mean) : " + this.m_Error); } } } /** * Perform another iteration. */ @Override public boolean next() throws Exception { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if ((!this.m_SuitableData) || (this.m_Classifiers.size() >= this.m_NumIterations) || (this.m_Diff <= Utils.SMALL)) { return false; } // Build the classifier this.m_Classifiers.add(AbstractClassifier.makeCopy(this.m_Classifier)); this.m_Classifiers.get(this.m_Classifiers.size() - 1).buildClassifier(this.m_Data); this.m_Data = this.residualReplace(this.m_Data, this.m_Classifiers.get(this.m_Classifiers.size() - 1)); double sum = 0; for (int i = 0; i < this.m_Data.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.getMinimizeAbsoluteError()) { sum += this.m_Data.instance(i).weight() * Math.abs(this.m_Data.instance(i).classValue()); } else { sum += this.m_Data.instance(i).weight() * this.m_Data.instance(i).classValue() * this.m_Data.instance(i).classValue(); } } if (this.m_Debug) { if (this.getMinimizeAbsoluteError()) { System.err.println("Sum of absolute residuals: " + sum); } else { System.err.println("Sum of squared residuals: " + sum); } } this.m_Diff = this.m_Error - sum; this.m_Error = sum; return true; } /** * Clean up. */ @Override public void done() { this.m_Data = null; } /** * Classify an instance. * * @param inst * the instance to predict * @return a prediction for the instance * @throws Exception * if an error occurs */ @Override public double classifyInstance(final Instance inst) throws Exception { double prediction = this.m_InitialPrediction; // default model? if (!this.m_SuitableData) { return prediction; } for (Classifier classifier : this.m_Classifiers) { double toAdd = classifier.classifyInstance(inst); if (Utils.isMissingValue(toAdd)) { throw new UnassignedClassException("AdditiveRegression: base learner predicted missing value."); } prediction += (toAdd * this.getShrinkage()); } return prediction; } /** * Replace the class values of the instances from the current iteration with residuals after * predicting with the supplied classifier. * * @param data * the instances to predict * @param c * the classifier to use * @return a new set of instances with class values replaced by residuals * @throws Exception * if something goes wrong */ private Instances residualReplace(final Instances data, final Classifier c) throws Exception { Instances newInst = new Instances(data); for (int i = 0; i < newInst.numInstances(); i++) { double pred = c.classifyInstance(newInst.instance(i)); if (Utils.isMissingValue(pred)) { throw new UnassignedClassException("AdditiveRegression: base learner predicted missing value."); } newInst.instance(i).setClassValue(newInst.instance(i).classValue() - (pred * this.getShrinkage())); } return newInst; } /** * Replace the class values of the instances from the current iteration with residuals after * predicting the given constant. * * @param data * the instances to predict * @param c * the constant to use * @return a new set of instances with class values replaced by residuals * @throws Exception * if something goes wrong */ private Instances residualReplace(final Instances data, final double c) throws Exception { Instances newInst = new Instances(data); for (int i = 0; i < newInst.numInstances(); i++) { newInst.instance(i).setClassValue(newInst.instance(i).classValue() - c); } return newInst; } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<>(1); newVector.addElement("measureNumIterations"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumIterations") == 0) { return this.measureNumIterations(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (AdditiveRegression)"); } } /** * return the number of iterations (base classifiers) completed * * @return the number of iterations (same as number of base classifier models) */ public double measureNumIterations() { return this.m_Classifiers.size(); } /** * Returns textual description of the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); if (this.m_SuitableData && this.m_Classifiers == null) { return "Classifier hasn't been built yet!"; } // only ZeroR model? if (!this.m_SuitableData) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: Non-trivial model could not be built, initial prediction is: "); buf.append(this.m_InitialPrediction); return buf.toString(); } text.append("Additive Regression\n\n"); text.append("Initial prediction: " + this.m_InitialPrediction + "\n\n"); text.append("Base classifier " + this.getClassifier().getClass().getName() + "\n\n"); text.append("" + this.m_Classifiers.size() + " models generated.\n"); for (int i = 0; i < this.m_Classifiers.size(); i++) { text.append("\nModel number " + i + "\n\n" + this.m_Classifiers.get(i) + "\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * should contain the following arguments: -t training file [-T test file] [-c class index] */ public static void main(final String[] argv) { runClassifier(new AdditiveRegression(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/AttributeSelectedClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AttributeSelectedClassifier.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.AttributeSelection; import weka.classifiers.SingleClassifierEnhancer; import weka.core.AdditionalMeasureProducer; import weka.core.BatchPredictor; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.WekaException; /** * <!-- globalinfo-start --> Dimensionality of training and test data is reduced by attribute * selection before being passed on to a classifier. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -E &lt;attribute evaluator specification&gt; * Full class name of attribute evaluator, followed * by its options. * eg: "weka.attributeSelection.CfsSubsetEval -L" * (default weka.attributeSelection.CfsSubsetEval) * </pre> * * <pre> * -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.J48) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> * -U * Use unpruned tree. * </pre> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -S * Don't perform subtree raising. * </pre> * * <pre> * -L * Do not clean up after the tree has been built. * </pre> * * <pre> * -A * Laplace smoothing for predicted probabilities. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision$ */ public class AttributeSelectedClassifier extends SingleClassifierEnhancer implements OptionHandler, Drawable, AdditionalMeasureProducer, WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -1151805453487947577L; /** The attribute selection object */ protected AttributeSelection m_AttributeSelection = null; /** The attribute evaluator to use */ protected ASEvaluation m_Evaluator = new weka.attributeSelection.CfsSubsetEval(); /** The search method to use */ protected ASSearch m_Search = new weka.attributeSelection.BestFirst(); /** The header of the dimensionally reduced data */ protected Instances m_ReducedHeader; /** The number of class vals in the training data (1 if class is numeric) */ protected int m_numClasses; /** The number of attributes selected by the attribute selection phase */ protected double m_numAttributesSelected; /** The time taken to select attributes in milliseconds */ protected double m_selectionTime; /** The time taken to select attributes AND build the classifier */ protected double m_totalTime; /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Default constructor. */ public AttributeSelectedClassifier() { this.m_Classifier = new weka.classifiers.trees.J48(); } /** * Returns a string describing this search method * * @return a description of the search method suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Dimensionality of training and test data is reduced by " + "attribute selection before being passed on to a classifier."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(2); newVector.addElement(new Option("\tFull class name of attribute evaluator, followed\n" + "\tby its options.\n" + "\teg: \"weka.attributeSelection.CfsSubsetEval -L\"\n" + "\t(default weka.attributeSelection.CfsSubsetEval)", "E", 1, "-E <attribute evaluator specification>")); newVector.addElement(new Option("\tFull class name of search method, followed\n" + "\tby its options.\n" + "\teg: \"weka.attributeSelection.BestFirst -D 1\"\n" + "\t(default weka.attributeSelection.BestFirst)", "S", 1, "-S <search method specification>")); newVector.addAll(Collections.list(super.listOptions())); if (this.getEvaluator() instanceof OptionHandler) { newVector.addElement(new Option("", "", 0, "\nOptions specific to attribute evaluator " + this.getEvaluator().getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler) this.getEvaluator()).listOptions())); } if (this.getSearch() instanceof OptionHandler) { newVector.addElement(new Option("", "", 0, "\nOptions specific to search method " + this.getSearch().getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler) this.getSearch()).listOptions())); } return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -E &lt;attribute evaluator specification&gt; * Full class name of attribute evaluator, followed * by its options. * eg: "weka.attributeSelection.CfsSubsetEval -L" * (default weka.attributeSelection.CfsSubsetEval) * </pre> * * <pre> * -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.J48) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> * -U * Use unpruned tree. * </pre> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -S * Don't perform subtree raising. * </pre> * * <pre> * -L * Do not clean up after the tree has been built. * </pre> * * <pre> * -A * Laplace smoothing for predicted probabilities. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { // same for attribute evaluator String evaluatorString = Utils.getOption('E', options); if (evaluatorString.length() == 0) { evaluatorString = weka.attributeSelection.CfsSubsetEval.class.getName(); } String[] evaluatorSpec = Utils.splitOptions(evaluatorString); if (evaluatorSpec.length == 0) { throw new Exception("Invalid attribute evaluator specification string"); } String evaluatorName = evaluatorSpec[0]; evaluatorSpec[0] = ""; this.setEvaluator(ASEvaluation.forName(evaluatorName, evaluatorSpec)); // same for search method String searchString = Utils.getOption('S', options); if (searchString.length() == 0) { searchString = weka.attributeSelection.BestFirst.class.getName(); } String[] searchSpec = Utils.splitOptions(searchString); if (searchSpec.length == 0) { throw new Exception("Invalid search specification string"); } String searchName = searchSpec[0]; searchSpec[0] = ""; this.setSearch(ASSearch.forName(searchName, searchSpec)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); // same attribute evaluator options.add("-E"); options.add("" + this.getEvaluatorSpec()); // same for search options.add("-S"); options.add("" + this.getSearchSpec()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String evaluatorTipText() { return "Set the attribute evaluator to use. This evaluator is used " + "during the attribute selection phase before the classifier is " + "invoked."; } /** * Sets the attribute evaluator * * @param evaluator * the evaluator with all options set. */ public void setEvaluator(final ASEvaluation evaluator) { this.m_Evaluator = evaluator; } /** * Gets the attribute evaluator used * * @return the attribute evaluator */ public ASEvaluation getEvaluator() { return this.m_Evaluator; } /** * Gets the evaluator specification string, which contains the class name of the attribute evaluator * and any options to it * * @return the evaluator string. */ protected String getEvaluatorSpec() { ASEvaluation e = this.getEvaluator(); if (e instanceof OptionHandler) { return e.getClass().getName() + " " + Utils.joinOptions(((OptionHandler) e).getOptions()); } return e.getClass().getName(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String searchTipText() { return "Set the search method. This search method is used " + "during the attribute selection phase before the classifier is " + "invoked."; } /** * Sets the search method * * @param search * the search method with all options set. */ public void setSearch(final ASSearch search) { this.m_Search = search; } /** * Gets the search method used * * @return the search method */ public ASSearch getSearch() { return this.m_Search; } /** * Gets the search specification string, which contains the class name of the search method and any * options to it * * @return the search string. */ protected String getSearchSpec() { ASSearch s = this.getSearch(); if (s instanceof OptionHandler) { return s.getClass().getName() + " " + Utils.joinOptions(((OptionHandler) s).getOptions()); } return s.getClass().getName(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result; if (this.getEvaluator() == null) { result = super.getCapabilities(); } else { result = this.getEvaluator().getCapabilities(); } // set dependencies for (Capability cap : Capability.values()) { result.enableDependency(cap); } return result; } /** * Build the classifier on the dimensionally reduced data. * * @param data * the training data * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { if (this.m_Classifier == null) { throw new Exception("No base classifier has been set!"); } if (this.m_Evaluator == null) { throw new Exception("No attribute evaluator has been set!"); } if (this.m_Search == null) { throw new Exception("No search method has been set!"); } // can classifier handle the data? this.getCapabilities().testWithFail(data); // get fresh Instances object Instances newData = new Instances(data); if (newData.numInstances() == 0) { this.m_Classifier.buildClassifier(newData); return; } if (newData.classAttribute().isNominal()) { this.m_numClasses = newData.classAttribute().numValues(); } else { this.m_numClasses = 1; } Instances resampledData = null; // check to see if training data has all equal weights double weight = newData.instance(0).weight(); boolean ok = false; for (int i = 1; i < newData.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (newData.instance(i).weight() != weight) { ok = true; break; } } if (ok) { if (!(this.m_Evaluator instanceof WeightedInstancesHandler) || !(this.m_Classifier instanceof WeightedInstancesHandler)) { Random r = new Random(1); for (int i = 0; i < 10; i++) { r.nextDouble(); } resampledData = newData.resampleWithWeights(r); } } else { // all equal weights in the training data so just use as is resampledData = newData; } this.m_AttributeSelection = new AttributeSelection(); this.m_AttributeSelection.setEvaluator(this.m_Evaluator); this.m_AttributeSelection.setSearch(this.m_Search); long start = System.currentTimeMillis(); this.m_AttributeSelection.SelectAttributes((this.m_Evaluator instanceof WeightedInstancesHandler) ? newData : resampledData); long end = System.currentTimeMillis(); if (this.m_Classifier instanceof WeightedInstancesHandler) { newData = this.m_AttributeSelection.reduceDimensionality(newData); this.m_Classifier.buildClassifier(newData); } else { resampledData = this.m_AttributeSelection.reduceDimensionality(resampledData); this.m_Classifier.buildClassifier(resampledData); } long end2 = System.currentTimeMillis(); this.m_numAttributesSelected = this.m_AttributeSelection.numberAttributesSelected(); this.m_ReducedHeader = new Instances((this.m_Classifier instanceof WeightedInstancesHandler) ? newData : resampledData, 0); this.m_selectionTime = end - start; this.m_totalTime = end2 - start; } /** * Classifies a given instance after attribute selection * * @param instance * the instance to be classified * @return the class distribution * @throws Exception * if instance could not be classified successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { Instance newInstance; if (this.m_AttributeSelection == null) { // throw new Exception("AttributeSelectedClassifier: No model built yet!"); newInstance = instance; } else { newInstance = this.m_AttributeSelection.reduceDimensionality(instance); } return this.m_Classifier.distributionForInstance(newInstance); } /** * Tool tip text for this property * * @return the tool tip for this property */ @Override public String batchSizeTipText() { return "Batch size to use if base learner is a BatchPredictor"; } /** * Set the batch size to use. Gets passed through to the base learner if it implements * BatchPredictor. Otherwise it is just ignored. * * @param size * the batch size to use */ @Override public void setBatchSize(final String size) { if (this.getClassifier() instanceof BatchPredictor) { ((BatchPredictor) this.getClassifier()).setBatchSize(size); } else { super.setBatchSize(size); } } /** * Gets the preferred batch size from the base learner if it implements BatchPredictor. Returns 1 as * the preferred batch size otherwise. * * @return the batch size to use */ @Override public String getBatchSize() { if (this.getClassifier() instanceof BatchPredictor) { return ((BatchPredictor) this.getClassifier()).getBatchSize(); } else { return super.getBatchSize(); } } /** * Batch scoring method. Calls the appropriate method for the base learner if it implements * BatchPredictor. Otherwise it simply calls the distributionForInstance() method repeatedly. * * @param insts * the instances to get predictions for * @return an array of probability distributions, one for each instance * @throws Exception * if a problem occurs */ @Override public double[][] distributionsForInstances(final Instances insts) throws Exception { if (this.getClassifier() instanceof BatchPredictor) { Instances newInstances; if (this.m_AttributeSelection == null) { // throw new Exception("AttributeSelectedClassifier: No model built yet!"); newInstances = insts; } else { newInstances = this.m_AttributeSelection.reduceDimensionality(insts); } if (newInstances.numInstances() != insts.numInstances()) { throw new WekaException("FilteredClassifier: filter has returned more/less instances than required."); } return ((BatchPredictor) this.getClassifier()).distributionsForInstances(newInstances); } else { double[][] result = new double[insts.numInstances()][insts.numClasses()]; for (int i = 0; i < insts.numInstances(); i++) { result[i] = this.distributionForInstance(insts.instance(i)); } return result; } } /** * Returns true if the base classifier implements BatchPredictor and is able to generate batch * predictions efficiently * * @return true if the base classifier can generate batch predictions efficiently */ @Override public boolean implementsMoreEfficientBatchPrediction() { if (!(this.getClassifier() instanceof BatchPredictor)) { return super.implementsMoreEfficientBatchPrediction(); } return ((BatchPredictor) this.getClassifier()).implementsMoreEfficientBatchPrediction(); } /** * Returns the type of graph this classifier represents. * * @return the type of graph */ @Override public int graphType() { if (this.m_Classifier instanceof Drawable) { return ((Drawable) this.m_Classifier).graphType(); } else { return Drawable.NOT_DRAWABLE; } } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception * if the classifier cannot be graphed */ @Override public String graph() throws Exception { if (this.m_Classifier instanceof Drawable) { return ((Drawable) this.m_Classifier).graph(); } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot be graphed"); } } /** * Output a representation of this classifier * * @return a representation of this classifier */ @Override public String toString() { if (this.m_AttributeSelection == null) { return "AttributeSelectedClassifier: No attribute selection possible.\n\n" + this.m_Classifier.toString(); } StringBuffer result = new StringBuffer(); result.append("AttributeSelectedClassifier:\n\n"); result.append(this.m_AttributeSelection.toResultsString()); result.append("\n\nHeader of reduced data:\n" + this.m_ReducedHeader.toString()); result.append("\n\nClassifier Model\n" + this.m_Classifier.toString()); return result.toString(); } /** * Additional measure --- number of attributes selected * * @return the number of attributes selected */ public double measureNumAttributesSelected() { return this.m_numAttributesSelected; } /** * Additional measure --- time taken (milliseconds) to select the attributes * * @return the time taken to select attributes */ public double measureSelectionTime() { return this.m_selectionTime; } /** * Additional measure --- time taken (milliseconds) to select attributes and build the classifier * * @return the total time (select attributes + build classifier) */ public double measureTime() { return this.m_totalTime; } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<>(3); newVector.addElement("measureNumAttributesSelected"); newVector.addElement("measureSelectionTime"); newVector.addElement("measureTime"); if (this.m_Classifier instanceof AdditionalMeasureProducer) { newVector.addAll(Collections.list(((AdditionalMeasureProducer) this.m_Classifier).enumerateMeasures())); } return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumAttributesSelected") == 0) { return this.measureNumAttributesSelected(); } else if (additionalMeasureName.compareToIgnoreCase("measureSelectionTime") == 0) { return this.measureSelectionTime(); } else if (additionalMeasureName.compareToIgnoreCase("measureTime") == 0) { return this.measureTime(); } else if (this.m_Classifier instanceof AdditionalMeasureProducer) { return ((AdditionalMeasureProducer) this.m_Classifier).getMeasure(additionalMeasureName); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (AttributeSelectedClassifier)"); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * should contain the following arguments: -t training file [-T test file] [-c class index] */ public static void main(final String[] argv) { runClassifier(new AttributeSelectedClassifier(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/Bagging.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Bagging.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.RandomizableParallelIteratedSingleClassifierEnhancer; import weka.classifiers.evaluation.Evaluation; import weka.core.AdditionalMeasureProducer; import weka.core.Aggregateable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.PartitionGenerator; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for bagging a classifier to reduce variance. Can do * classification and regression depending on the base learner. <br/> * <br/> * For more information, see<br/> * <br/> * Leo Breiman (1996). Bagging predictors. Machine Learning. 24(2):123-140. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Breiman1996, * author = {Leo Breiman}, * journal = {Machine Learning}, * number = {2}, * pages = {123-140}, * title = {Bagging predictors}, * volume = {24}, * year = {1996} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P * Size of each bag, as a percentage of the * training set size. (default 100) * </pre> * * <pre> * -O * Calculate the out of bag error. * </pre> * * <pre> * -print * Print the individual classifiers in the output * </pre> * * <pre> * -store-out-of-bag-predictions * Whether to store out of bag predictions in internal evaluation object. * </pre> * * <pre> * -output-out-of-bag-complexity-statistics * Whether to output complexity-based statistics when out-of-bag evaluation is performed. * </pre> * * <pre> * -represent-copies-using-weights * Represent copies of instances using weights rather than explicitly. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2). * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3). * </pre> * * <pre> * -S &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -P * No pruning. * </pre> * * <pre> * -L * Maximum tree depth (default -1, no maximum) * </pre> * * <pre> * -I * Initial class value count (default 0) * </pre> * * <pre> * -R * Spread initial count over all class values (i.e. don't use 1 per value) * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (len@reeltwo.com) * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision$ */ public class Bagging extends RandomizableParallelIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, AdditionalMeasureProducer, TechnicalInformationHandler, PartitionGenerator, Aggregateable<Bagging> { /** for serialization */ static final long serialVersionUID = -115879962237199703L; /** The size of each bag sample, as a percentage of the training size */ protected int m_BagSizePercent = 100; /** Whether to calculate the out of bag error */ protected boolean m_CalcOutOfBag = false; /** Whether to represent copies of instances using weights rather than explicitly */ protected boolean m_RepresentUsingWeights = false; /** The evaluation object holding the out of bag error, etc. */ protected Evaluation m_OutOfBagEvaluationObject = null; /** Whether to store the out of bag predictions in the evaluation object. */ private boolean m_StoreOutOfBagPredictions = false; /** Whether to output complexity-based statistics when OOB-evaluation is performed. */ private boolean m_OutputOutOfBagComplexityStatistics; /** Whether class is numeric. */ private boolean m_Numeric = false; /** Whether to print individual ensemble members in output. */ private boolean m_printClassifiers; /** Random number generator */ protected Random m_random; /** Used to indicate whether an instance is in a bag or not */ protected boolean[][] m_inBag; /** Reference to the training data */ protected Instances m_data; /** * Constructor. */ public Bagging() { this.m_Classifier = new weka.classifiers.trees.REPTree(); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for bagging a classifier to reduce variance. Can do classification " + "and regression depending on the base learner. \n\n" + "For more information, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Leo Breiman"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.TITLE, "Bagging predictors"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "24"); result.setValue(Field.NUMBER, "2"); result.setValue(Field.PAGES, "123-140"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.REPTree"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(4); newVector.addElement(new Option("\tSize of each bag, as a percentage of the\n" + "\ttraining set size. (default 100)", "P", 1, "-P")); newVector.addElement(new Option("\tCalculate the out of bag error.", "O", 0, "-O")); newVector.addElement(new Option("\tWhether to store out of bag predictions in internal evaluation object.", "store-out-of-bag-predictions", 0, "-store-out-of-bag-predictions")); newVector.addElement(new Option("\tWhether to output complexity-based statistics when out-of-bag evaluation is performed.", "output-out-of-bag-complexity-statistics", 0, "-output-out-of-bag-complexity-statistics")); newVector.addElement(new Option("\tRepresent copies of instances using weights rather than explicitly.", "represent-copies-using-weights", 0, "-represent-copies-using-weights")); newVector.addElement(new Option("\tPrint the individual classifiers in the output", "print", 0, "-print")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P * Size of each bag, as a percentage of the * training set size. (default 100) * </pre> * * <pre> * -O * Calculate the out of bag error. * </pre> * * <pre> * -print * Print the individual classifiers in the output * </pre> * * <pre> * -store-out-of-bag-predictions * Whether to store out of bag predictions in internal evaluation object. * </pre> * * <pre> * -output-out-of-bag-complexity-statistics * Whether to output complexity-based statistics when out-of-bag evaluation is performed. * </pre> * * <pre> * -represent-copies-using-weights * Represent copies of instances using weights rather than explicitly. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2). * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3). * </pre> * * <pre> * -S &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -P * No pruning. * </pre> * * <pre> * -L * Maximum tree depth (default -1, no maximum) * </pre> * * <pre> * -I * Initial class value count (default 0) * </pre> * * <pre> * -R * Spread initial count over all class values (i.e. don't use 1 per value) * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String bagSize = Utils.getOption('P', options); if (bagSize.length() != 0) { this.setBagSizePercent(Integer.parseInt(bagSize)); } else { this.setBagSizePercent(100); } this.setCalcOutOfBag(Utils.getFlag('O', options)); this.setStoreOutOfBagPredictions(Utils.getFlag("store-out-of-bag-predictions", options)); this.setOutputOutOfBagComplexityStatistics(Utils.getFlag("output-out-of-bag-complexity-statistics", options)); this.setRepresentCopiesUsingWeights(Utils.getFlag("represent-copies-using-weights", options)); this.setPrintClassifiers(Utils.getFlag("print", options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-P"); options.add("" + this.getBagSizePercent()); if (this.getCalcOutOfBag()) { options.add("-O"); } if (this.getStoreOutOfBagPredictions()) { options.add("-store-out-of-bag-predictions"); } if (this.getOutputOutOfBagComplexityStatistics()) { options.add("-output-out-of-bag-complexity-statistics"); } if (this.getRepresentCopiesUsingWeights()) { options.add("-represent-copies-using-weights"); } if (this.getPrintClassifiers()) { options.add("-print"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String bagSizePercentTipText() { return "Size of each bag, as a percentage of the training set size."; } /** * Gets the size of each bag, as a percentage of the training set size. * * @return the bag size, as a percentage. */ public int getBagSizePercent() { return this.m_BagSizePercent; } /** * Sets the size of each bag, as a percentage of the training set size. * * @param newBagSizePercent * the bag size, as a percentage. */ public void setBagSizePercent(final int newBagSizePercent) { this.m_BagSizePercent = newBagSizePercent; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String representCopiesUsingWeightsTipText() { return "Whether to represent copies of instances using weights rather than explicitly."; } /** * Set whether copies of instances are represented using weights rather than explicitly. * * @param representUsingWeights * whether to represent copies using weights */ public void setRepresentCopiesUsingWeights(final boolean representUsingWeights) { this.m_RepresentUsingWeights = representUsingWeights; } /** * Get whether copies of instances are represented using weights rather than explicitly. * * @return whether copies of instances are represented using weights rather than explicitly */ public boolean getRepresentCopiesUsingWeights() { return this.m_RepresentUsingWeights; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String storeOutOfBagPredictionsTipText() { return "Whether to store the out-of-bag predictions."; } /** * Set whether the out of bag predictions are stored. * * @param storeOutOfBag * whether the out of bag predictions are stored */ public void setStoreOutOfBagPredictions(final boolean storeOutOfBag) { this.m_StoreOutOfBagPredictions = storeOutOfBag; } /** * Get whether the out of bag predictions are stored. * * @return whether the out of bag predictions are stored */ public boolean getStoreOutOfBagPredictions() { return this.m_StoreOutOfBagPredictions; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String calcOutOfBagTipText() { return "Whether the out-of-bag error is calculated."; } /** * Set whether the out of bag error is calculated. * * @param calcOutOfBag * whether to calculate the out of bag error */ public void setCalcOutOfBag(final boolean calcOutOfBag) { this.m_CalcOutOfBag = calcOutOfBag; } /** * Get whether the out of bag error is calculated. * * @return whether the out of bag error is calculated */ public boolean getCalcOutOfBag() { return this.m_CalcOutOfBag; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String outputOutOfBagComplexityStatisticsTipText() { return "Whether to output complexity-based statistics when out-of-bag evaluation is performed."; } /** * Gets whether complexity statistics are output when OOB estimation is performed. * * @return whether statistics are calculated */ public boolean getOutputOutOfBagComplexityStatistics() { return this.m_OutputOutOfBagComplexityStatistics; } /** * Sets whether complexity statistics are output when OOB estimation is performed. * * @param b * whether statistics are calculated */ public void setOutputOutOfBagComplexityStatistics(final boolean b) { this.m_OutputOutOfBagComplexityStatistics = b; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String printClassifiersTipText() { return "Print the individual classifiers in the output"; } /** * Set whether to print the individual ensemble classifiers in the output * * @param print * true if the individual classifiers are to be printed */ public void setPrintClassifiers(final boolean print) { this.m_printClassifiers = print; } /** * Get whether to print the individual ensemble classifiers in the output * * @return true if the individual classifiers are to be printed */ public boolean getPrintClassifiers() { return this.m_printClassifiers; } /** * Gets the out of bag error that was calculated as the classifier was built. Returns error rate in * classification case and mean absolute error in regression case. * * @return the out of bag error; -1 if out-of-bag-error has not be estimated */ public double measureOutOfBagError() { if (this.m_OutOfBagEvaluationObject == null) { return -1; } if (this.m_Numeric) { return this.m_OutOfBagEvaluationObject.meanAbsoluteError(); } else { return this.m_OutOfBagEvaluationObject.errorRate(); } } /** * Returns an enumeration of the additional measure names. * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<>(1); newVector.addElement("measureOutOfBagError"); return newVector.elements(); } /** * Returns the value of the named measure. * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.equalsIgnoreCase("measureOutOfBagError")) { return this.measureOutOfBagError(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (Bagging)"); } } /** * Returns a training set for a particular iteration. * * @param iteration * the number of the iteration for the requested training set. * @return the training set for the supplied iteration number * @throws Exception * if something goes wrong when generating a training set. */ @Override protected synchronized Instances getTrainingSet(final int iteration) throws Exception { int bagSize = (int) (this.m_data.numInstances() * (this.m_BagSizePercent / 100.0)); Instances bagData = null; Random r = new Random(this.m_Seed + iteration); // create the in-bag dataset if (this.m_CalcOutOfBag) { this.m_inBag[iteration] = new boolean[this.m_data.numInstances()]; bagData = this.m_data.resampleWithWeights(r, this.m_inBag[iteration], this.getRepresentCopiesUsingWeights()); } else { if (bagSize < this.m_data.numInstances()) { bagData = this.m_data.resampleWithWeights(r, false); // Need to turn off representation using weights in this case. bagData.randomize(r); Instances newBagData = new Instances(bagData, 0, bagSize); bagData = newBagData; } else { bagData = this.m_data.resampleWithWeights(r, this.getRepresentCopiesUsingWeights()); } } return bagData; } /** * Returns the out-of-bag evaluation object. * * @return the out-of-bag evaluation object; null if out-of-bag error hasn't been calculated */ public Evaluation getOutOfBagEvaluationObject() { return this.m_OutOfBagEvaluationObject; } /** * Bagging method. * * @param data * the training data to be used for generating the bagged classifier. * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(data); // Has user asked to represent copies using weights? if (this.getRepresentCopiesUsingWeights() && !(this.m_Classifier instanceof WeightedInstancesHandler)) { throw new IllegalArgumentException("Cannot represent copies using weights when " + "base learner in bagging does not implement " + "WeightedInstancesHandler."); } // get fresh Instances object this.m_data = new Instances(data); super.buildClassifier(this.m_data); if (this.m_CalcOutOfBag && (this.m_BagSizePercent != 100)) { throw new IllegalArgumentException("Bag size needs to be 100% if " + "out-of-bag error is to be calculated!"); } this.m_random = new Random(this.m_Seed); this.m_inBag = null; if (this.m_CalcOutOfBag) { this.m_inBag = new boolean[this.m_Classifiers.length][]; } for (int j = 0; j < this.m_Classifiers.length; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.m_Classifier instanceof Randomizable) { ((Randomizable) this.m_Classifiers[j]).setSeed(this.m_random.nextInt()); } } this.m_Numeric = this.m_data.classAttribute().isNumeric(); this.buildClassifiers(); // calc OOB error? if (this.getCalcOutOfBag()) { this.m_OutOfBagEvaluationObject = new Evaluation(this.m_data); for (int i = 0; i < this.m_data.numInstances(); i++) { double[] votes; if (this.m_Numeric) { votes = new double[1]; } else { votes = new double[this.m_data.numClasses()]; } // determine predictions for instance int voteCount = 0; for (int j = 0; j < this.m_Classifiers.length; j++) { if (this.m_inBag[j][i]) { continue; } if (this.m_Numeric) { double pred = this.m_Classifiers[j].classifyInstance(this.m_data.instance(i)); if (!Utils.isMissingValue(pred)) { votes[0] += pred; voteCount++; } } else { voteCount++; double[] newProbs = this.m_Classifiers[j].distributionForInstance(this.m_data.instance(i)); // sum the probability estimates for (int k = 0; k < newProbs.length; k++) { votes[k] += newProbs[k]; } } } // "vote" if (this.m_Numeric) { if (voteCount > 0) { votes[0] /= voteCount; this.m_OutOfBagEvaluationObject.evaluationForSingleInstance(votes, this.m_data.instance(i), this.getStoreOutOfBagPredictions()); } } else { double sum = Utils.sum(votes); if (sum > 0) { Utils.normalize(votes, sum); this.m_OutOfBagEvaluationObject.evaluationForSingleInstance(votes, this.m_data.instance(i), this.getStoreOutOfBagPredictions()); } } } } else { this.m_OutOfBagEvaluationObject = null; } // save memory this.m_inBag = null; this.m_data = new Instances(this.m_data, 0); } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return preedicted class probability distribution * @throws Exception * if distribution can't be computed successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { double[] sums = new double[instance.numClasses()], newProbs; double numPreds = 0; for (int i = 0; i < this.m_NumIterations; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.m_Numeric) { double pred = this.m_Classifiers[i].classifyInstance(instance); if (!Utils.isMissingValue(pred)) { sums[0] += pred; numPreds++; } } else { newProbs = this.m_Classifiers[i].distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) { sums[j] += newProbs[j]; } } } if (this.m_Numeric) { if (numPreds == 0) { sums[0] = Utils.missingValue(); } else { sums[0] /= numPreds; } return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the bagged classifier. * * @return description of the bagged classifier as a string */ @Override public String toString() { if (this.m_Classifiers == null) { return "Bagging: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("Bagging with " + this.getNumIterations() + " iterations and base learner\n\n" + this.getClassifierSpec()); if (this.getPrintClassifiers()) { text.append("All the base classifiers: \n\n"); for (int i = 0; i < this.m_Classifiers.length; i++) { text.append(this.m_Classifiers[i].toString() + "\n\n"); } } if (this.m_CalcOutOfBag) { text.append(this.m_OutOfBagEvaluationObject.toSummaryString("\n\n*** Out-of-bag estimates ***\n", this.getOutputOutOfBagComplexityStatistics())); } return text.toString(); } /** * Builds the classifier to generate a partition. */ @Override public void generatePartition(final Instances data) throws Exception { if (this.m_Classifier instanceof PartitionGenerator) { this.buildClassifier(data); } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot generate a partition"); } } /** * Computes an array that indicates leaf membership */ @Override public double[] getMembershipValues(final Instance inst) throws Exception { if (this.m_Classifier instanceof PartitionGenerator) { ArrayList<double[]> al = new ArrayList<>(); int size = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { double[] r = ((PartitionGenerator) this.m_Classifiers[i]).getMembershipValues(inst); size += r.length; al.add(r); } double[] values = new double[size]; int pos = 0; for (double[] v : al) { System.arraycopy(v, 0, values, pos, v.length); pos += v.length; } return values; } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot generate a partition"); } } /** * Returns the number of elements in the partition. */ @Override public int numElements() throws Exception { if (this.m_Classifier instanceof PartitionGenerator) { int size = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { size += ((PartitionGenerator) this.m_Classifiers[i]).numElements(); } return size; } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot generate a partition"); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new Bagging(), argv); } protected List<Classifier> m_classifiersCache; /** * Aggregate an object with this one * * @param toAggregate * the object to aggregate * @return the result of aggregation * @throws Exception * if the supplied object can't be aggregated for some reason */ @Override public Bagging aggregate(final Bagging toAggregate) throws Exception { if (!this.m_Classifier.getClass().isAssignableFrom(toAggregate.m_Classifier.getClass())) { throw new Exception("Can't aggregate because base classifiers differ"); } if (this.m_classifiersCache == null) { this.m_classifiersCache = new ArrayList<>(); this.m_classifiersCache.addAll(Arrays.asList(this.m_Classifiers)); } this.m_classifiersCache.addAll(Arrays.asList(toAggregate.m_Classifiers)); return this; } /** * Call to complete the aggregation process. Allows implementers to do any final processing based on * how many objects were aggregated. * * @throws Exception * if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { this.m_Classifiers = this.m_classifiersCache.toArray(new Classifier[1]); this.m_NumIterations = this.m_Classifiers.length; this.m_classifiersCache = null; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/CVParameterSelection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CVParameterSelection.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.io.Serializable; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Summarizable; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for performing parameter selection by cross-validation for any classifier.<br/> * <br/> * For more information, see:<br/> * <br/> * R. Kohavi (1995). Wrappers for Performance Enhancement and Oblivious Decision Graphs. Department of Computer Science, Stanford University. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Kohavi1995, * address = {Department of Computer Science, Stanford University}, * author = {R. Kohavi}, * school = {Stanford University}, * title = {Wrappers for Performance Enhancement and Oblivious Decision Graphs}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Number of folds used for cross validation (default 10).</pre> * * <pre> -P &lt;classifier parameter&gt; * Classifier parameter options. * eg: "N 1 5 10" Sets an optimisation parameter for the * classifier with name -N, with lower bound 1, upper bound * 5, and 10 optimisation steps. The upper bound may be the * character 'A' or 'I' to substitute the number of * attributes or instances in the training data, * respectively. This parameter may be supplied more than * once to optimise over several classifier options * simultaneously.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-classifier. <p> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class CVParameterSelection extends RandomizableSingleClassifierEnhancer implements Drawable, Summarizable, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6529603380876641265L; /** * A data structure to hold values associated with a single * cross-validation search parameter */ protected class CVParameter implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -4668812017709421953L; /** Char used to identify the option of interest */ private String m_ParamChar; /** Lower bound for the CV search */ private double m_Lower; /** Upper bound for the CV search */ private double m_Upper; /** Number of steps during the search */ private double m_Steps; /** The parameter value with the best performance */ private double m_ParamValue; /** True if the parameter should be added at the end of the argument list */ private boolean m_AddAtEnd; /** True if the parameter should be rounded to an integer */ private boolean m_RoundParam; /** * Constructs a CVParameter. * * @param param the parameter definition * @throws Exception if construction of CVParameter fails */ public CVParameter(String param) throws Exception { String[] parts = param.split(" "); if (parts.length < 4 || parts.length > 5) { throw new Exception("CVParameter " + param + ": four or five components expected!"); } try { Double.parseDouble(parts[0]); throw new Exception("CVParameter " + param + ": Character parameter identifier expected"); } catch (NumberFormatException n) { m_ParamChar = parts[0]; } try { m_Lower = Double.parseDouble(parts[1]); } catch (NumberFormatException n) { throw new Exception("CVParameter " + param + ": Numeric lower bound expected"); } if (parts[2].equals("A")) { m_Upper = m_Lower - 1; } else if (parts[2].equals("I")) { m_Upper = m_Lower - 2; } else { try { m_Upper = Double.parseDouble(parts[2]); if (m_Upper < m_Lower) { throw new Exception("CVParameter " + param + ": Upper bound is less than lower bound"); } } catch (NumberFormatException n) { throw new Exception("CVParameter " + param + ": Upper bound must be numeric, or 'A' or 'N'"); } } try { m_Steps = Double.parseDouble(parts[3]); } catch (NumberFormatException n) { throw new Exception("CVParameter " + param + ": Numeric number of steps expected"); } if (parts.length == 5 && parts[4].equals("R")) { m_RoundParam = true; } } /** * Returns a CVParameter as a string. * * @return the CVParameter as string */ public String toString() { String result = m_ParamChar + " " + m_Lower + " "; switch ((int)(m_Lower - m_Upper + 0.5)) { case 1: result += "A"; break; case 2: result += "I"; break; default: result += m_Upper; break; } result += " " + m_Steps; if (m_RoundParam) { result += " R"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * The base classifier options (not including those being set * by cross-validation) */ protected String [] m_ClassifierOptions; /** The set of all classifier options as determined by cross-validation */ protected String [] m_BestClassifierOptions; /** The set of all options at initialization time. So that getOptions can return this. */ protected String [] m_InitOptions; /** The cross-validated performance of the best options */ protected double m_BestPerformance; /** The set of parameters to cross-validate over */ protected Vector<CVParameter> m_CVParams = new Vector<CVParameter>(); /** The number of attributes in the data */ protected int m_NumAttributes; /** The number of instances in a training fold */ protected int m_TrainFoldSize; /** The number of folds used in cross-validation */ protected int m_NumFolds = 10; /** * Create the options array to pass to the classifier. The parameter * values and positions are taken from m_ClassifierOptions and * m_CVParams. * * @return the options array */ protected String [] createOptions() { String [] options = new String [m_ClassifierOptions.length + 2 * m_CVParams.size()]; int start = 0, end = options.length; // Add the cross-validation parameters and their values for (int i = 0; i < m_CVParams.size(); i++) { CVParameter cvParam = (CVParameter)m_CVParams.elementAt(i); double paramValue = cvParam.m_ParamValue; if (cvParam.m_RoundParam) { // paramValue = (double)((int) (paramValue + 0.5)); paramValue = Math.rint(paramValue); } boolean isInt = ((paramValue - (int)paramValue) == 0); if (cvParam.m_AddAtEnd) { options[--end] = "" + ((cvParam.m_RoundParam || isInt) ? Utils.doubleToString(paramValue,4) : cvParam.m_ParamValue); //Utils.doubleToString(paramValue,4); options[--end] = "-" + cvParam.m_ParamChar; } else { options[start++] = "-" + cvParam.m_ParamChar; options[start++] = "" + ((cvParam.m_RoundParam || isInt) ? Utils.doubleToString(paramValue,4) : cvParam.m_ParamValue); //+ Utils.doubleToString(paramValue,4); } } // Add the static parameters System.arraycopy(m_ClassifierOptions, 0, options, start, m_ClassifierOptions.length); return options; } /** * Finds the best parameter combination. (recursive for each parameter * being optimised). * * @param depth the index of the parameter to be optimised at this level * @param trainData the data the search is based on * @param random a random number generator * @throws Exception if an error occurs */ protected void findParamsByCrossValidation(int depth, Instances trainData, Random random) throws Exception { if (depth < m_CVParams.size()) { CVParameter cvParam = (CVParameter) m_CVParams.elementAt(depth); double upper; switch ((int) (cvParam.m_Lower - cvParam.m_Upper + 0.5)) { case 1: upper = m_NumAttributes; break; case 2: upper = m_TrainFoldSize; break; default: upper = cvParam.m_Upper; break; } double increment = (upper - cvParam.m_Lower) / (cvParam.m_Steps - 1); for (cvParam.m_ParamValue = cvParam.m_Lower; cvParam.m_ParamValue <= upper; cvParam.m_ParamValue += increment) { findParamsByCrossValidation(depth + 1, trainData, random); } } else { Evaluation evaluation = new Evaluation(trainData); // Work with a copy of the base classifier in case the base classifier does not initialize itself properly Classifier copiedClassifier = AbstractClassifier.makeCopy(m_Classifier); // Set the classifier options String[] options = createOptions(); if (m_Debug) { System.err.print("Setting options for " + copiedClassifier.getClass().getName() + ":"); for (int i = 0; i < options.length; i++) { System.err.print(" " + options[i]); } System.err.println(""); } ((OptionHandler) copiedClassifier).setOptions(options); for (int j = 0; j < m_NumFolds; j++) { // We want to randomize the data the same way for every // learning scheme. Instances train = trainData.trainCV(m_NumFolds, j, new Random(1)); Instances test = trainData.testCV(m_NumFolds, j); copiedClassifier.buildClassifier(train); evaluation.setPriors(train); evaluation.evaluateModel(copiedClassifier, test); } double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Cross-validated error rate: " + Utils.doubleToString(error, 6, 4)); } if ((m_BestPerformance == -99) || (error < m_BestPerformance)) { m_BestPerformance = error; m_BestClassifierOptions = createOptions(); } } } /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for performing parameter selection by cross-validation " + "for any classifier.\n\n" + "For more information, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R. Kohavi"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Wrappers for Performance Enhancement and Oblivious Decision Graphs"); result.setValue(Field.SCHOOL, "Stanford University"); result.setValue(Field.ADDRESS, "Department of Computer Science, Stanford University"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option( "\tNumber of folds used for cross validation (default 10).", "X", 1, "-X <number of folds>")); newVector.addElement(new Option( "\tClassifier parameter options.\n" + "\teg: \"N 1 5 10\" Sets an optimisation parameter for the\n" + "\tclassifier with name -N, with lower bound 1, upper bound\n" + "\t5, and 10 optimisation steps. The upper bound may be the\n" + "\tcharacter 'A' or 'I' to substitute the number of\n" + "\tattributes or instances in the training data,\n" + "\trespectively. This parameter may be supplied more than\n" + "\tonce to optimise over several classifier options\n" + "\tsimultaneously.", "P", 1, "-P <classifier parameter>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Number of folds used for cross validation (default 10).</pre> * * <pre> -P &lt;classifier parameter&gt; * Classifier parameter options. * eg: "N 1 5 10" Sets an optimisation parameter for the * classifier with name -N, with lower bound 1, upper bound * 5, and 10 optimisation steps. The upper bound may be the * character 'A' or 'I' to substitute the number of * attributes or instances in the training data, * respectively. This parameter may be supplied more than * once to optimise over several classifier options * simultaneously.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-classifier. <p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String foldsString = Utils.getOption('X', options); if (foldsString.length() != 0) { setNumFolds(Integer.parseInt(foldsString)); } else { setNumFolds(10); } String cvParam; m_CVParams = new Vector<CVParameter>(); do { cvParam = Utils.getOption('P', options); if (cvParam.length() != 0) { addCVParameter(cvParam); } } while (cvParam.length() != 0); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); for (int i = 0; i < m_CVParams.size(); i++) { options.add("-P"); options.add("" + getCVParameter(i)); } options.add("-X"); options.add("" + getNumFolds()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns (a copy of) the best options found for the classifier. * * @return the best options */ public String[] getBestClassifierOptions() { return (String[]) m_BestClassifierOptions.clone(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.setMinimumNumberInstances(m_NumFolds); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class Instances trainData = new Instances(instances); trainData.deleteWithMissingClass(); Instances trainDataCopy = new Instances(trainData); // Just in case base classifier is sensitive to order of data. if (!(m_Classifier instanceof OptionHandler)) { throw new IllegalArgumentException("Base classifier should be OptionHandler."); } m_InitOptions = ((OptionHandler)m_Classifier).getOptions(); m_BestPerformance = -99; m_NumAttributes = trainData.numAttributes(); Random random = new Random(m_Seed); trainData.randomize(random); m_TrainFoldSize = trainData.trainCV(m_NumFolds, 0).numInstances(); // Check whether there are any parameters to optimize if (m_CVParams.size() == 0) { m_Classifier.buildClassifier(trainDataCopy); m_BestClassifierOptions = m_InitOptions; return; } if (trainData.classAttribute().isNominal()) { trainData.stratify(m_NumFolds); } m_BestClassifierOptions = null; // Set up m_ClassifierOptions -- take getOptions() and remove // those being optimised. m_ClassifierOptions = ((OptionHandler)m_Classifier).getOptions(); for (int i = 0; i < m_CVParams.size(); i++) { Utils.getOption(((CVParameter)m_CVParams.elementAt(i)).m_ParamChar, m_ClassifierOptions); } findParamsByCrossValidation(0, trainData, random); String [] options = (String [])m_BestClassifierOptions.clone(); ((OptionHandler)m_Classifier).setOptions(options); m_Classifier.buildClassifier(trainDataCopy); } /** * Predicts the class distribution for the given test instance. * * @param instance the instance to be classified * @return the predicted class value * @throws Exception if an error occurred during the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { return m_Classifier.distributionForInstance(instance); } /** * Adds a scheme parameter to the list of parameters to be set * by cross-validation * * @param cvParam the string representation of a scheme parameter. The * format is: <br> * param_char lower_bound upper_bound number_of_steps <br> * eg to search a parameter -P from 1 to 10 by increments of 1: <br> * P 1 10 11 <br> * @throws Exception if the parameter specifier is of the wrong format */ public void addCVParameter(String cvParam) throws Exception { CVParameter newCV = new CVParameter(cvParam); m_CVParams.addElement(newCV); } /** * Gets the scheme paramter with the given index. * * @param index the index for the parameter * @return the scheme parameter */ public String getCVParameter(int index) { if (m_CVParams.size() <= index) { return ""; } return ((CVParameter)m_CVParams.elementAt(index)).toString(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String CVParametersTipText() { return "Sets the scheme parameters which are to be set "+ "by cross-validation.\n"+ "The format for each string should be:\n"+ "param_char lower_bound upper_bound number_of_steps\n"+ "eg to search a parameter -P from 1 to 10 by increments of 1:\n"+ " \"P 1 10 10\" "; } /** * Get method for CVParameters. * * @return the CVParameters */ public Object[] getCVParameters() { Object[] CVParams = m_CVParams.toArray(); String params[] = new String[CVParams.length]; for(int i=0; i<CVParams.length; i++) params[i] = CVParams[i].toString(); return params; } /** * Set method for CVParameters. * * @param params the CVParameters to use * @throws Exception if the setting of the CVParameters fails */ public void setCVParameters(Object[] params) throws Exception { Vector<CVParameter> backup = m_CVParams; m_CVParams = new Vector<CVParameter>(); for(int i=0; i<params.length; i++) { try{ addCVParameter((String)params[i]); } catch(Exception ex) { m_CVParams = backup; throw ex; } } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "Get the number of folds used for cross-validation."; } /** * Gets the number of folds for the cross-validation. * * @return the number of folds for the cross-validation */ public int getNumFolds() { return m_NumFolds; } /** * Sets the number of folds for the cross-validation. * * @param numFolds the number of folds for the cross-validation * @throws Exception if parameter illegal */ public void setNumFolds(int numFolds) throws Exception { if (numFolds < 0) { throw new IllegalArgumentException("Stacking: Number of cross-validation " + "folds must be positive."); } m_NumFolds = numFolds; } /** * Returns the type of graph this classifier * represents. * * @return the type of graph this classifier represents */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + m_Classifier.getClass().getName() + " " + Utils.joinOptions(m_BestClassifierOptions) + " cannot be graphed"); } /** * Returns description of the cross-validated classifier. * * @return description of the cross-validated classifier as a string */ public String toString() { if (m_InitOptions == null) return "CVParameterSelection: No model built yet."; String result = "Cross-validated Parameter selection.\n" + "Classifier: " + m_Classifier.getClass().getName() + "\n"; try { for (int i = 0; i < m_CVParams.size(); i++) { CVParameter cvParam = (CVParameter)m_CVParams.elementAt(i); result += "Cross-validation Parameter: '-" + cvParam.m_ParamChar + "'" + " ranged from " + cvParam.m_Lower + " to "; switch ((int)(cvParam.m_Lower - cvParam.m_Upper + 0.5)) { case 1: result += m_NumAttributes; break; case 2: result += m_TrainFoldSize; break; default: result += cvParam.m_Upper; break; } result += " with " + cvParam.m_Steps + " steps\n"; } } catch (Exception ex) { result += ex.getMessage(); } result += "Classifier Options: " + Utils.joinOptions(m_BestClassifierOptions) + "\n\n" + m_Classifier.toString(); return result; } /** * A concise description of the model. * * @return a concise description of the model */ public String toSummaryString() { String result = "Selected values: " + Utils.joinOptions(m_BestClassifierOptions); return result + '\n'; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new CVParameterSelection(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/ClassificationViaRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClassificationViaRegression.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.BatchPredictor; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; /** * <!-- globalinfo-start --> Class for doing classification using regression methods. Class is * binarized and one regression model is built for each class value. For more information, see, for * example<br/> * <br/> * E. Frank, Y. Wang, S. Inglis, G. Holmes, I.H. Witten (1998). Using model trees for * classification. Machine Learning. 32(1):63-76. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Frank1998, * author = {E. Frank and Y. Wang and S. Inglis and G. Holmes and I.H. Witten}, * journal = {Machine Learning}, * number = {1}, * pages = {63-76}, * title = {Using model trees for classification}, * volume = {32}, * year = {1998} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.M5P) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.M5P: * </pre> * * <pre> * -N * Use unpruned tree/rules * </pre> * * <pre> * -U * Use unsmoothed predictions * </pre> * * <pre> * -R * Build regression tree/rule rather than a model tree/rule * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf * (default 4) * </pre> * * <pre> * -L * Save instances at the nodes in * the tree (for visualization purposes) * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class ClassificationViaRegression extends SingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 4500023123618669859L; /** The classifiers. (One for each class.) */ private Classifier[] m_Classifiers; /** The filters used to transform the class. */ private MakeIndicator[] m_ClassFilters; /** * Default constructor. */ public ClassificationViaRegression() { this.m_Classifier = new weka.classifiers.trees.M5P(); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for doing classification using regression methods. Class is " + "binarized and one regression model is built for each class value. For more " + "information, see, for example\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "E. Frank and Y. Wang and S. Inglis and G. Holmes and I.H. Witten"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Using model trees for classification"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "32"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.PAGES, "63-76"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.M5P"; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the classifiers. * * @param insts * the training data. * @throws Exception * if a classifier can't be built */ @Override public void buildClassifier(Instances insts) throws Exception { Instances newInsts; // can classifier handle the data? this.getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, insts.numClasses()); this.m_ClassFilters = new MakeIndicator[insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_ClassFilters[i] = new MakeIndicator(); this.m_ClassFilters[i].setAttributeIndex("" + (insts.classIndex() + 1)); this.m_ClassFilters[i].setValueIndex(i); this.m_ClassFilters[i].setNumeric(true); this.m_ClassFilters[i].setInputFormat(insts); newInsts = Filter.useFilter(insts, this.m_ClassFilters[i]); this.m_Classifiers[i].buildClassifier(newInsts); } } /** * Returns the distribution for an instance. * * @param inst * the instance to get the distribution for * @return the computed distribution * @throws Exception * if the distribution can't be computed successfully */ @Override public double[] distributionForInstance(final Instance inst) throws Exception { double[] probs = new double[inst.numClasses()]; Instance newInst; double sum = 0; for (int i = 0; i < inst.numClasses(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_ClassFilters[i].input(inst); this.m_ClassFilters[i].batchFinished(); newInst = this.m_ClassFilters[i].output(); probs[i] = this.m_Classifiers[i].classifyInstance(newInst); if (Utils.isMissingValue(probs[i])) { return new double[inst.numClasses()]; // Leave instance unclassified } if (probs[i] > 1) { probs[i] = 1; } if (probs[i] < 0) { probs[i] = 0; } sum += probs[i]; } if (sum != 0) { Utils.normalize(probs, sum); } return probs; } /** * Return whether this classifier configuration yields more efficient batch prediction * * @return the base classifier's flag indicating whether it can do batch prediction efficiently */ @Override public boolean implementsMoreEfficientBatchPrediction() { if (!(this.m_Classifier instanceof BatchPredictor)) { return false; } else { return ((BatchPredictor) this.m_Classifier).implementsMoreEfficientBatchPrediction(); } } /** * Returns predictions for a whole set of instances. * * @param insts * the instances to make predictions for * @return the 2D array with results */ @Override public double[][] distributionsForInstances(final Instances insts) throws Exception { double[][] probs; if (this.m_Classifier instanceof BatchPredictor) { probs = new double[insts.numInstances()][insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { double[][] p = ((BatchPredictor) this.m_Classifiers[i]).distributionsForInstances(Filter.useFilter(insts, this.m_ClassFilters[i])); for (int j = 0; j < p.length; j++) { if (p[j][0] > 1) { p[j][0] = 1; } if (p[j][0] < 0) { p[j][0] = 0; } probs[j][i] = p[j][0]; } } for (int i = 0; i < probs.length; i++) { Utils.normalize(probs[i]); } return probs; } else { return super.distributionsForInstances(insts); } } /** * Prints the classifiers. * * @return a string representation of the classifier */ @Override public String toString() { if (this.m_Classifiers == null) { return "Classification via Regression: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("Classification via Regression\n\n"); for (int i = 0; i < this.m_Classifiers.length; i++) { text.append("Classifier for class with index " + i + ":\n\n"); text.append(this.m_Classifiers[i].toString() + "\n\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options for the learner */ public static void main(final String[] argv) { runClassifier(new ClassificationViaRegression(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/CostSensitiveClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CostSensitiveClassifier.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.StringReader; import java.io.StringWriter; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.CostMatrix; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.core.*; import weka.core.Capabilities.Capability; /** <!-- globalinfo-start --> * A metaclassifier that makes its base classifier cost-sensitive. Two methods can be used to introduce cost-sensitivity: reweighting training instances according to the total cost assigned to each class; or predicting the class with minimum expected misclassification cost (rather than the most likely class). Performance can often be improved by using a Bagged classifier to improve the probability estimates of the base classifier. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * Minimize expected misclassification cost. Default is to * reweight training instances according to costs per class</pre> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class CostSensitiveClassifier extends RandomizableSingleClassifierEnhancer implements OptionHandler, Drawable, BatchPredictor { /** for serialization */ static final long serialVersionUID = -110658209263002404L; /** load cost matrix on demand */ public static final int MATRIX_ON_DEMAND = 1; /** use explicit cost matrix */ public static final int MATRIX_SUPPLIED = 2; /** Specify possible sources of the cost matrix */ public static final Tag [] TAGS_MATRIX_SOURCE = { new Tag(MATRIX_ON_DEMAND, "Load cost matrix on demand"), new Tag(MATRIX_SUPPLIED, "Use explicit cost matrix") }; /** Indicates the current cost matrix source */ protected int m_MatrixSource = MATRIX_ON_DEMAND; /** * The directory used when loading cost files on demand, null indicates * current directory */ protected File m_OnDemandDirectory = new File(System.getProperty("user.dir")); /** The name of the cost file, for command line options */ protected String m_CostFile; /** The cost matrix */ protected CostMatrix m_CostMatrix = new CostMatrix(1); /** * True if the costs should be used by selecting the minimum expected * cost (false means weight training data by the costs) */ protected boolean m_MinimizeExpectedCost; /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.rules.ZeroR"; } /** * Default constructor. */ public CostSensitiveClassifier() { m_Classifier = new weka.classifiers.rules.ZeroR(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option( "\tMinimize expected misclassification cost. Default is to\n" +"\treweight training instances according to costs per class", "M", 0, "-M")); newVector.addElement(new Option( "\tFile name of a cost matrix to use. If this is not supplied,\n" +"\ta cost matrix will be loaded on demand. The name of the\n" +"\ton-demand file is the relation name of the training data\n" +"\tplus \".cost\", and the path to the on-demand file is\n" +"\tspecified with the -N option.", "C", 1, "-C <cost file name>")); newVector.addElement(new Option( "\tName of a directory to search for cost files when loading\n" +"\tcosts on demand (default current directory).", "N", 1, "-N <directory>")); newVector.addElement(new Option( "\tThe cost matrix in Matlab single line format.", "cost-matrix", 1, "-cost-matrix <matrix>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * Minimize expected misclassification cost. Default is to * reweight training instances according to costs per class</pre> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setMinimizeExpectedCost(Utils.getFlag('M', options)); String costFile = Utils.getOption('C', options); if (costFile.length() != 0) { try { setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); } catch (Exception ex) { // now flag as possible old format cost matrix. Delay cost matrix // loading until buildClassifer is called setCostMatrix(null); } setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); m_CostFile = costFile; } else { setCostMatrixSource(new SelectedTag(MATRIX_ON_DEMAND, TAGS_MATRIX_SOURCE)); } String demandDir = Utils.getOption('N', options); if (demandDir.length() != 0) { setOnDemandDirectory(new File(demandDir)); } String cost_matrix = Utils.getOption("cost-matrix", options); if (cost_matrix.length() != 0) { StringWriter writer = new StringWriter(); CostMatrix.parseMatlab(cost_matrix).write(writer); setCostMatrix(new CostMatrix(new StringReader(writer.toString()))); setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); if (m_MatrixSource == MATRIX_SUPPLIED) { if (m_CostFile != null) { options.add("-C"); options.add("" + m_CostFile); } else { options.add("-cost-matrix"); options.add(getCostMatrix().toMatlab()); } } else { options.add("-N"); options.add("" + getOnDemandDirectory()); } if (getMinimizeExpectedCost()) { options.add("-M"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A metaclassifier that makes its base classifier cost-sensitive. " + "Two methods can be used to introduce cost-sensitivity: reweighting " + "training instances according to the total cost assigned to each " + "class; or predicting the class with minimum expected " + "misclassification cost (rather than the most likely class). " + "Performance can often be " + "improved by using a Bagged classifier to improve the probability " + "estimates of the base classifier."; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixSourceTipText() { return "Sets where to get the cost matrix. The two options are" + "to use the supplied explicit cost matrix (the setting of the " + "costMatrix property), or to load a cost matrix from a file when " + "required (this file will be loaded from the directory set by the " + "onDemandDirectory property and will be named relation_name" + CostMatrix.FILE_EXTENSION + ")."; } /** * Gets the source location method of the cost matrix. Will be one of * MATRIX_ON_DEMAND or MATRIX_SUPPLIED. * * @return the cost matrix source. */ public SelectedTag getCostMatrixSource() { return new SelectedTag(m_MatrixSource, TAGS_MATRIX_SOURCE); } /** * Sets the source location of the cost matrix. Values other than * MATRIX_ON_DEMAND or MATRIX_SUPPLIED will be ignored. * * @param newMethod the cost matrix location method. */ public void setCostMatrixSource(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_MATRIX_SOURCE) { m_MatrixSource = newMethod.getSelectedTag().getID(); } } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String onDemandDirectoryTipText() { return "Sets the directory where cost files are loaded from. This option " + "is used when the costMatrixSource is set to \"On Demand\"."; } /** * Returns the directory that will be searched for cost files when * loading on demand. * * @return The cost file search directory. */ public File getOnDemandDirectory() { return m_OnDemandDirectory; } /** * Sets the directory that will be searched for cost files when * loading on demand. * * @param newDir The cost file search directory. */ public void setOnDemandDirectory(File newDir) { if (newDir.isDirectory()) { m_OnDemandDirectory = newDir; } else { m_OnDemandDirectory = new File(newDir.getParent()); } m_MatrixSource = MATRIX_ON_DEMAND; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minimizeExpectedCostTipText() { return "Sets whether the minimum expected cost criteria will be used. If " + "this is false, the training data will be reweighted according to the " + "costs assigned to each class. If true, the minimum expected cost " + "criteria will be used."; } /** * Gets the value of MinimizeExpectedCost. * * @return Value of MinimizeExpectedCost. */ public boolean getMinimizeExpectedCost() { return m_MinimizeExpectedCost; } /** * Set the value of MinimizeExpectedCost. * * @param newMinimizeExpectedCost Value to assign to MinimizeExpectedCost. */ public void setMinimizeExpectedCost(boolean newMinimizeExpectedCost) { m_MinimizeExpectedCost = newMinimizeExpectedCost; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @return the classifier string. */ protected String getClassifierSpec() { Classifier c = getClassifier(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixTipText() { return "Sets the cost matrix explicitly. This matrix is used if the " + "costMatrixSource property is set to \"Supplied\"."; } /** * Gets the misclassification cost matrix. * * @return the cost matrix */ public CostMatrix getCostMatrix() { return m_CostMatrix; } /** * Sets the misclassification cost matrix. * * @param newCostMatrix the cost matrix */ public void setCostMatrix(CostMatrix newCostMatrix) { m_CostMatrix = newCostMatrix; m_MatrixSource = MATRIX_SUPPLIED; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the model of the base learner. * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } if (m_MatrixSource == MATRIX_ON_DEMAND) { String costName = data.relationName() + CostMatrix.FILE_EXTENSION; File costFile = new File(getOnDemandDirectory(), costName); if (!costFile.exists()) { throw new Exception("On-demand cost file doesn't exist: " + costFile); } setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); } else if (m_CostMatrix == null) { // try loading an old format cost file m_CostMatrix = new CostMatrix(data.numClasses()); m_CostMatrix.readOldFormat(new BufferedReader( new FileReader(m_CostFile))); } if (!m_MinimizeExpectedCost) { Random random = null; if (!(m_Classifier instanceof WeightedInstancesHandler)) { random = new Random(m_Seed); } data = m_CostMatrix.applyCostMatrix(data, random); } m_Classifier.buildClassifier(data); } /** * Returns class probabilities. When minimum expected cost approach is chosen, * returns probability one for class with the minimum expected misclassification * cost. Otherwise it returns the probability distribution returned by * the base classifier. * * @param instance the instance to be classified * @return the computed distribution for the given instance * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { if (!m_MinimizeExpectedCost) { return m_Classifier.distributionForInstance(instance); } else { return convertDistribution(m_Classifier.distributionForInstance(instance), instance); } } /** * Convert distribution using minimum expected cost approach. The incoming * array is modified and returned! * * @param pred the predicted distribution * @param instance the instance * @return the modified distribution */ protected double[] convertDistribution(double[] pred, Instance instance) throws Exception { double [] costs = m_CostMatrix.expectedCosts(pred, instance); // This is probably not ideal int classIndex = Utils.minIndex(costs); for (int i = 0; i < pred.length; i++) { if (i == classIndex) { pred[i] = 1.0; } else { pred[i] = 0.0; } } return pred; } /** * Batch scoring method. Calls the appropriate method for the base learner if * it implements BatchPredictor. Otherwise it simply calls the * distributionForInstance() method repeatedly. * * @param insts the instances to get predictions for * @return an array of probability distributions, one for each instance * @throws Exception if a problem occurs */ public double[][] distributionsForInstances(Instances insts) throws Exception { if (getClassifier() instanceof BatchPredictor) { double[][] dists = ((BatchPredictor) getClassifier()).distributionsForInstances(insts); if (!m_MinimizeExpectedCost) { return dists; } else { for (int i = 0; i < dists.length; i++) { dists[i] = convertDistribution(dists[i], insts.instance(i)); } return dists; } } else { double[][] result = new double[insts.numInstances()][insts.numClasses()]; for (int i = 0; i < insts.numInstances(); i++) { result[i] = distributionForInstance(insts.instance(i)); } return result; } } /** * Tool tip text for this property * * @return the tool tip for this property */ public String batchSizeTipText() { return "Batch size to use if base learner is a BatchPredictor"; } /** * Set the batch size to use. Gets passed through to the base learner if it * implements BatchPredictor. Otherwise it is just ignored. * * @param size the batch size to use */ public void setBatchSize(String size) { if (getClassifier() instanceof BatchPredictor) { ((BatchPredictor) getClassifier()).setBatchSize(size); } else { super.setBatchSize(size); } } /** * Gets the preferred batch size from the base learner if it implements * BatchPredictor. Returns 1 as the preferred batch size otherwise. * * @return the batch size to use */ public String getBatchSize() { if (getClassifier() instanceof BatchPredictor) { return ((BatchPredictor) getClassifier()).getBatchSize(); } else { return super.getBatchSize(); } } /** * Returns true if the base classifier implements BatchPredictor and is able * to generate batch predictions efficiently * * @return true if the base classifier can generate batch predictions * efficiently */ public boolean implementsMoreEfficientBatchPrediction() { if (!(getClassifier() instanceof BatchPredictor)) { return false; } return ((BatchPredictor) getClassifier()) .implementsMoreEfficientBatchPrediction(); } /** * Returns the type of graph this classifier * represents. * * @return the type of graph this classifier represents */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot be graphed"); } /** * Output a representation of this classifier * * @return a string representation of the classifier */ public String toString() { if (m_Classifier == null) { return "CostSensitiveClassifier: No model built yet."; } String result = "CostSensitiveClassifier using "; if (m_MinimizeExpectedCost) { result += "minimized expected misclasification cost\n"; } else { result += "reweighted training instances\n"; } result += "\n" + getClassifierSpec() + "\n\nClassifier Model\n" + m_Classifier.toString() + "\n\nCost Matrix\n" + m_CostMatrix.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new CostSensitiveClassifier(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/FilteredClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FilteredClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.IterativeClassifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.SingleClassifierEnhancer; import weka.core.*; import weka.core.Capabilities.Capability; import weka.filters.Filter; import weka.filters.supervised.attribute.AttributeSelection; import weka.filters.unsupervised.attribute.Reorder; import java.util.*; /** * <!-- globalinfo-start --> Class for running an arbitrary classifier on data * that has been passed through an arbitrary filter. Like the classifier, the * structure of the filter is based exclusively on the training data and test * instances will be processed by the filter without changing their structure. * If unequal instance weights or attribute weights are present, and the filter * or the classifier are unable to deal with them, the instances and/or attributes * are resampled with replacement based on the weights before they are passed * to the filter or the classifier (as appropriate). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * default: "weka.filters.supervised.attribute.Discretize -R first-last -precision 6" * </pre> * * <pre> * -W &lt;classifier name&gt; * Full name of base classifier. * (default: weka.classifiers.trees.J48) * </pre> * * <pre> -S num * The random number seed to be used (default 1). </pre> * * -doNotCheckForModifiedClassAttribute <br> * If this is set, the classifier will not check whether the filter modifies the class attribute (use with caution). * <p> * * -output-debug-info <br> * If set, classifier is run in debug mode and may output additional info to * the console. * <p> * * -do-not-check-capabilities <br> * If set, classifier capabilities are not checked before classifier is built * (use with caution). * <p> * * -num-decimal-places <br> * The number of decimal places for the output of numbers in the model. * <p> * * -batch-size <br> * The desired batch size for batch prediction. * <p> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> * -U * Use unpruned tree. * </pre> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -S * Don't perform subtree raising. * </pre> * * <pre> * -L * Do not clean up after the tree has been built. * </pre> * * <pre> * -A * Laplace smoothing for predicted probabilities. * </pre> * * <pre> * -S &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class FilteredClassifier extends RandomizableSingleClassifierEnhancer implements Drawable, PartitionGenerator, IterativeClassifier, BatchPredictor, WeightedInstancesHandler, WeightedAttributesHandler { /** for serialization */ static final long serialVersionUID = -4523450618538717400L; /** The filter */ protected Filter m_Filter = new AttributeSelection(); /** The instance structure of the filtered instances */ protected Instances m_FilteredInstances; /** Flag that can be set to true if class attribute is not to be checked for modifications by the filer. */ protected boolean m_DoNotCheckForModifiedClassAttribute = false; /** If the attributes are resampled, we store the filter for this */ protected Reorder m_ReorderOriginal; protected Reorder m_ReorderFiltered; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "Class for running an arbitrary classifier on data that has been passed " + "through an arbitrary filter. Like the classifier, the structure of the filter " + "is based exclusively on the training data and test instances will be processed " + "by the filter without changing their structure.\n\n" + "If unequal instance weights or attribute weights are present, and the filter " + "or the classifier are unable to deal with them, the instances and/or attributes " + "are resampled with replacement based on the weights before they are passed " + "to the filter or the classifier (as appropriate)."; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * String describing default filter. */ protected String defaultFilterString() { return "weka.filters.supervised.attribute.Discretize -R first-last -precision 6"; } /** * Default constructor. */ public FilteredClassifier() { m_Classifier = new weka.classifiers.trees.J48(); m_Filter = new weka.filters.supervised.attribute.Discretize(); } /** * Returns the type of graph this classifier represents. * * @return the graph type of this classifier */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable) m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable) m_Classifier).graph(); else throw new Exception( "Classifier: " + getClassifierSpec() + " cannot be graphed"); } /** * Builds the classifier to generate a partition. (If the base classifier * supports this.) */ public void generatePartition(Instances data) throws Exception { if (m_Classifier instanceof PartitionGenerator) buildClassifier(data); else throw new Exception( "Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Computes an array that has a value for each element in the partition. (If * the base classifier supports this.) */ public double[] getMembershipValues(Instance inst) throws Exception { if (m_Classifier instanceof PartitionGenerator) { if (m_ReorderOriginal != null) { m_ReorderOriginal.input(inst); inst = m_ReorderOriginal.output(); } Instance newInstance = filterInstance(inst); if (newInstance == null) { double[] unclassified = new double[numElements()]; for (int i = 0; i < unclassified.length; i++) { unclassified[i] = Utils.missingValue(); } return unclassified; } else { if (m_ReorderFiltered != null) { m_ReorderFiltered.input(newInstance); newInstance = m_ReorderFiltered.output(); } return ((PartitionGenerator) m_Classifier).getMembershipValues(newInstance); } } else throw new Exception( "Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Returns the number of elements in the partition. (If the base classifier * supports this.) */ public int numElements() throws Exception { if (m_Classifier instanceof PartitionGenerator) return ((PartitionGenerator) m_Classifier).numElements(); else throw new Exception( "Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Initializes an iterative classifier. (If the base classifier supports * this.) * * @param data the instances to be used in induction * @exception Exception if the model cannot be initialized */ public void initializeClassifier(Instances data) throws Exception { if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } getCapabilities().testWithFail(data); if (m_Classifier instanceof IterativeClassifier) { Random r = (data.numInstances() > 0) ? data.getRandomNumberGenerator(getSeed()) : new Random(getSeed()); data = setUp(data, r); if (!data.allInstanceWeightsIdentical() && !(m_Classifier instanceof WeightedInstancesHandler)) { data = data.resampleWithWeights(r); // The filter may have assigned weights. } if (!data.allAttributeWeightsIdentical() && !(m_Classifier instanceof WeightedAttributesHandler)) { data = resampleAttributes(data, false, r); } // can classifier handle the data? getClassifier().getCapabilities().testWithFail(data); if (m_Classifier instanceof Randomizable) { ((Randomizable)m_Classifier).setSeed(r.nextInt()); } ((IterativeClassifier) m_Classifier).initializeClassifier(data); } else { throw new Exception("Classifier: " + getClassifierSpec() + " is not an IterativeClassifier"); } } /** * Performs one iteration. (If the base classifier supports this.) * * @return false if no further iterations could be performed, true otherwise * @exception Exception if this iteration fails for unexpected reasons */ public boolean next() throws Exception { if (m_Classifier instanceof IterativeClassifier) return ((IterativeClassifier) m_Classifier).next(); else throw new Exception("Classifier: " + getClassifierSpec() + " is not an IterativeClassifier"); } /** * Signal end of iterating, useful for any house-keeping/cleanup (If the base * classifier supports this.) * * @exception Exception if cleanup fails */ public void done() throws Exception { if (m_Classifier instanceof IterativeClassifier) ((IterativeClassifier) m_Classifier).done(); else throw new Exception("Classifier: " + getClassifierSpec() + " is not an IterativeClassifier"); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option( "\tFull class name of filter to use, followed\n" + "\tby filter options.\n" + "\tdefault: \"" + defaultFilterString() + "\"", "F", 1, "-F <filter specification>")); newVector.addElement(new Option( "\tIf set, classifier will not check whether the filter modifies the class (use with caution).", "doNotCheckForModifiedClassAttribute", 0, "-doNotCheckForModifiedClassAttribute")); newVector.addAll(Collections.list(super.listOptions())); if (getFilter() instanceof OptionHandler) { newVector.addElement(new Option("", "", 0, "\nOptions specific to filter " + getFilter().getClass().getName() + ":")); newVector .addAll(Collections.list(((OptionHandler) getFilter()).listOptions())); } return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * default: "weka.filters.supervised.attribute.Discretize -R first-last -precision 6" * </pre> * * <pre> * -W &lt;classifier name&gt; * Full name of base classifier. * (default: weka.classifiers.trees.J48) * </pre> * * <pre> -S num * The random number seed to be used. </pre> * * -doNotCheckForModifiedClassAttribute <br> * If this is set, the classifier will not check whether the filter modifies the class attribute (use with caution). * <p> * * -output-debug-info <br> * If set, classifier is run in debug mode and may output additional info to * the console. * <p> * * -do-not-check-capabilities <br> * If set, classifier capabilities are not checked before classifier is built * (use with caution). * <p> * * -num-decimal-laces <br> * The number of decimal places for the output of numbers in the model. * <p> * * -batch-size <br> * The desired batch size for batch prediction. * <p> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> * -U * Use unpruned tree. * </pre> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -S * Don't perform subtree raising. * </pre> * * <pre> * -L * Do not clean up after the tree has been built. * </pre> * * <pre> * -A * Laplace smoothing for predicted probabilities. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String filterString = Utils.getOption('F', options); if (filterString.length() <= 0) { filterString = defaultFilterString(); } String[] filterSpec = Utils.splitOptions(filterString); if (filterSpec.length == 0) { throw new IllegalArgumentException("Invalid filter specification string"); } String filterName = filterSpec[0]; filterSpec[0] = ""; setFilter((Filter) Utils.forName(Filter.class, filterName, filterSpec)); setDoNotCheckForModifiedClassAttribute(Utils.getFlag("doNotCheckForModifiedClassAttribute", options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String doNotCheckForModifiedClassAttributeTipText() { return "Turns off check for modified class attribute - use with caution."; } /** * Returns true if classifier checks whether class attribute has been modified by filter. */ public boolean getDoNotCheckForModifiedClassAttribute() { return m_DoNotCheckForModifiedClassAttribute; } /** * Use this method to determine whether classifier checks whether class attribute has been modified by filter. */ public void setDoNotCheckForModifiedClassAttribute(boolean flag) { m_DoNotCheckForModifiedClassAttribute = flag; } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-F"); options.add("" + getFilterSpec()); if (getDoNotCheckForModifiedClassAttribute()) { options.add("-doNotCheckForModifiedClassAttribute"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String filterTipText() { return "The filter to be used."; } /** * Sets the filter * * @param filter the filter with all options set. */ public void setFilter(Filter filter) { m_Filter = filter; } /** * Gets the filter used. * * @return the filter */ public Filter getFilter() { return m_Filter; } /** * Gets the filter specification string, which contains the class name of the * filter and any options to the filter * * @return the filter string. */ protected String getFilterSpec() { Filter c = getFilter(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler) c).getOptions()); } return c.getClass().getName(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result; if (getFilter() == null) result = super.getCapabilities(); else { result = getFilter().getCapabilities(); // By default, check that classifier can handle the class attribute if (!getDoNotCheckForModifiedClassAttribute()) { Capabilities classes = super.getCapabilities().getClassCapabilities(); Iterator<Capability> iter = classes.capabilities(); result.disableAllClasses(); while (iter.hasNext()) { result.enable(iter.next()); } } } // the filtered classifier always needs a class result.disable(Capability.NO_CLASS); // set dependencies for (Capability cap : Capability.values()) result.enableDependency(cap); result.setOwner(this); return result; } /** * Sets up the filter and runs checks. * * @return filtered data */ protected Instances setUp(Instances data, Random random) throws Exception { m_ReorderOriginal = null; m_ReorderFiltered = null; if (data.allInstanceWeightsIdentical() || (m_Filter instanceof WeightedInstancesHandler)) { data = new Instances(data); } else { data = data.resampleWithWeights(random); } if (!data.allAttributeWeightsIdentical() && !(m_Filter instanceof WeightedAttributesHandler)) { data = resampleAttributes(data, true, random); } /* * String fname = m_Filter.getClass().getName(); fname = * fname.substring(fname.lastIndexOf('.') + 1); util.Timer t = * util.Timer.getTimer("FilteredClassifier::" + fname); t.start(); */ Attribute classAttribute = (Attribute)data.classAttribute().copy(); if (m_Filter instanceof Randomizable) { ((Randomizable)m_Filter).setSeed(random.nextInt()); } m_Filter.setInputFormat(data); // filter capabilities are checked here data = Filter.useFilter(data, m_Filter); if ((!classAttribute.equals(data.classAttribute())) && (!m_DoNotCheckForModifiedClassAttribute)) { throw new IllegalArgumentException("Cannot proceed: " + getFilterSpec() + " has modified the class attribute!"); } // t.stop(); m_FilteredInstances = data.stringFreeStructure(); return data; } /** * Resamples set of attributes. * * @param data the data to be filtered * @param original whether the original or the filtered data are to be resampled * @param random the random number generator to use */ protected Instances resampleAttributes(Instances data, boolean original, Random random) throws Exception { // Need to sample attributes so that weights are represented by sample int nAtt = (data.classIndex() >= 0) ? data.numAttributes() - 1: data.numAttributes(); int index = 0; double[] attributeWeights = new double[nAtt]; for (int i = 0; i < data.numAttributes(); i++) { if (i != data.classIndex()) { attributeWeights[index++] = data.attribute(i).weight(); } } int[] frequencies = Utils.takeSample(attributeWeights, random); // Make list of attribute indices based on frequencies in sample ArrayList<Integer> al = new ArrayList<Integer>(); index = 0; for (int j = 0; j < data.numAttributes(); j++) { if (j == data.classIndex()) { al.add(j); } else { for (int i = 0; i < frequencies[index]; i++) { al.add(j); } index++; } } // Filter data if (original) { m_ReorderOriginal = new Reorder(); m_ReorderOriginal.setAttributeIndicesArray(al.stream().mapToInt(j -> j).toArray()); m_ReorderOriginal.setAllAttributeWeightsToOne(true); m_ReorderOriginal.setInputFormat(data); return Filter.useFilter(data, m_ReorderOriginal); } else { m_ReorderFiltered = new Reorder(); m_ReorderFiltered.setAttributeIndicesArray(al.stream().mapToInt(j -> j).toArray()); m_ReorderFiltered.setAllAttributeWeightsToOne(true); m_ReorderFiltered.setInputFormat(data); return Filter.useFilter(data, m_ReorderFiltered); } } /** * Build the classifier on the filtered data. * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } getCapabilities().testWithFail(data); Random r = (data.numInstances() > 0) ? data.getRandomNumberGenerator(getSeed()) : new Random(getSeed()); data = setUp(data, r); if (!data.allInstanceWeightsIdentical() && !(m_Classifier instanceof WeightedInstancesHandler)) { data = data.resampleWithWeights(r); // The filter may have assigned weights. } if (!data.allAttributeWeightsIdentical() && !(m_Classifier instanceof WeightedAttributesHandler)) { data = resampleAttributes(data, false, r); } // can classifier handle the data? getClassifier().getCapabilities().testWithFail(data); if (m_Classifier instanceof Randomizable) { ((Randomizable)m_Classifier).setSeed(r.nextInt()); } m_Classifier.buildClassifier(data); } /** * Filters the instance so that it can subsequently be classified. */ protected Instance filterInstance(Instance instance) throws Exception { /* * System.err.println("FilteredClassifier:: " + * m_Filter.getClass().getName() + " in: " + instance); */ if (m_Filter.numPendingOutput() > 0) { throw new Exception("Filter output queue not empty!"); } /* * String fname = m_Filter.getClass().getName(); fname = * fname.substring(fname.lastIndexOf('.') + 1); util.Timer t = * util.Timer.getTimer("FilteredClassifier::" + fname); t.start(); */ if (!m_Filter.input(instance)) { if (!m_Filter.mayRemoveInstanceAfterFirstBatchDone()) { throw new Exception( "Filter didn't make the test instance" + " immediately available!"); } else { m_Filter.batchFinished(); return null; } } m_Filter.batchFinished(); return m_Filter.output(); // t.stop(); /* * System.err.println("FilteredClassifier:: " + * m_Filter.getClass().getName() + " out: " + newInstance); */ } /** * Classifies a given instance after filtering. * * @param instance the instance to be classified * @return the class distribution for the given instance * @throws Exception if instance could not be classified successfully */ public double[] distributionForInstance(Instance instance) throws Exception { if (m_ReorderOriginal != null) { m_ReorderOriginal.input(instance); instance = m_ReorderOriginal.output(); } Instance newInstance = filterInstance(instance); if (newInstance == null) { // filter has consumed the instance (e.g. RemoveWithValues // may do this). We will indicate no prediction for this // instance double[] unclassified = null; if (instance.classAttribute().isNumeric()) { unclassified = new double[1]; unclassified[0] = Utils.missingValue(); } else { // all zeros unclassified = new double[instance.classAttribute().numValues()]; } return unclassified; } else { if (m_ReorderFiltered != null) { m_ReorderFiltered.input(newInstance); newInstance = m_ReorderFiltered.output(); } return m_Classifier.distributionForInstance(newInstance); } } /** * Tool tip text for this property * * @return the tool tip for this property */ public String batchSizeTipText() { return "Batch size to use if base learner is a BatchPredictor"; } /** * Set the batch size to use. Gets passed through to the base learner if it * implements BatchPredictor. Otherwise it is just ignored. * * @param size the batch size to use */ public void setBatchSize(String size) { if (getClassifier() instanceof BatchPredictor) { ((BatchPredictor) getClassifier()).setBatchSize(size); } else { super.setBatchSize(size); } } /** * Gets the preferred batch size from the base learner if it implements * BatchPredictor. Returns 1 as the preferred batch size otherwise. * * @return the batch size to use */ public String getBatchSize() { if (getClassifier() instanceof BatchPredictor) { return ((BatchPredictor) getClassifier()).getBatchSize(); } else { return super.getBatchSize(); } } /** * Batch scoring method. Calls the appropriate method for the base learner if * it implements BatchPredictor. Otherwise it simply calls the * distributionForInstance() method repeatedly. * * @param insts the instances to get predictions for * @return an array of probability distributions, one for each instance * @throws Exception if a problem occurs */ public double[][] distributionsForInstances(Instances insts) throws Exception { if (getClassifier() instanceof BatchPredictor) { if (m_ReorderOriginal != null) { insts = Filter.useFilter(insts, m_ReorderOriginal); } Instances filteredInsts = Filter.useFilter(insts, m_Filter); if (filteredInsts.numInstances() != insts.numInstances()) { throw new WekaException( "FilteredClassifier: filter has returned more/less instances than required."); } if (m_ReorderOriginal != null) { filteredInsts = Filter.useFilter(filteredInsts, m_ReorderFiltered); } return ((BatchPredictor) getClassifier()).distributionsForInstances(filteredInsts); } else { double[][] result = new double[insts.numInstances()][insts.numClasses()]; for (int i = 0; i < insts.numInstances(); i++) { result[i] = distributionForInstance(insts.instance(i)); } return result; } } /** * Returns true if the base classifier implements BatchPredictor and is able * to generate batch predictions efficiently * * @return true if the base classifier can generate batch predictions * efficiently */ public boolean implementsMoreEfficientBatchPrediction() { if (!(getClassifier() instanceof BatchPredictor)) { return super.implementsMoreEfficientBatchPrediction(); } return ((BatchPredictor) getClassifier()) .implementsMoreEfficientBatchPrediction(); } /** * Output a representation of this classifier * * @return a representation of this classifier */ public String toString() { if (m_FilteredInstances == null) { return "FilteredClassifier: No model built yet."; } String result = "FilteredClassifier using " + getClassifierSpec() + " on data filtered through " + getFilterSpec() + "\n\nFiltered Header\n" + m_FilteredInstances.toString() + "\n\nClassifier Model\n" + m_Classifier.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: -t training file [-T * test file] [-c class index] */ public static void main(String[] argv) { runClassifier(new FilteredClassifier(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/IterativeClassifierOptimizer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IterativeClassifierOptimizer.java * Copyright (C) 2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; import java.util.List; import java.util.Random; import java.util.Set; import java.util.Vector; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.IterativeClassifier; import weka.classifiers.RandomizableClassifier; import weka.classifiers.evaluation.Evaluation; import weka.classifiers.evaluation.EvaluationMetricHelper; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.AdditionalMeasureProducer; /** * Chooses the best number of iterations for an IterativeClassifier such as * LogitBoost using cross-validation or a percentage split evaluation. * <!-- globalinfo-start --> * Optimizes the number of iterations of the given iterative classifier using cross-validation or a percentage * split evaluation. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A * If set, average estimate is used rather than one estimate from pooled predictions. * </pre> * * <pre> -L &lt;num&gt; * The number of iterations to look ahead for to find a better optimum. * (default 50)</pre> * * <pre> -P &lt;int&gt; * The size of the thread pool, for example, the number of cores in the CPU. * (default 1)</pre> * * <pre> -E &lt;int&gt; * The number of threads to use, which should be &gt;= size of thread pool. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Step size for the evaluation, if evaluation is time consuming. * (default 1)</pre> * * <pre> -F &lt;num&gt; * Number of folds for cross-validation. * (default 10)</pre> * * <pre> -R &lt;num&gt; * Number of runs for cross-validation. * (default 1)</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.meta.LogitBoost)</pre> * * <pre> -metric &lt;name&gt; * Evaluation metric to optimise (default rmse). Available metrics: * correct,incorrect,kappa,total cost,average cost,kb relative,kb information, * correlation,complexity 0,complexity scheme,complexity improvement, * mae,rmse,rae,rrse,coverage,region size,tp rate,fp rate,precision,recall, * f-measure,mcc,roc area,prc area</pre> * * <pre> -class-value-index &lt;0-based index&gt; * Class value index to optimise. Ignored for all but information-retrieval * type metrics (such as roc area). If unspecified (or a negative value is supplied), * and an information-retrieval metric is specified, then the class-weighted average * metric used. (default -1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <pre> * Options specific to classifier weka.classifiers.meta.LogitBoost: * </pre> * * <pre> -Q * Use resampling instead of reweighting for boosting.</pre> * * <pre> -P &lt;percent&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up)</pre> * * <pre> -L &lt;num&gt; * Threshold on the improvement of the likelihood. * (default -Double.MAX_VALUE)</pre> * * <pre> -H &lt;num&gt; * Shrinkage parameter. * (default 1)</pre> * * <pre> -Z &lt;num&gt; * Z max threshold for responses. * (default 3)</pre> * * <pre> -O &lt;int&gt; * The size of the thread pool, for example, the number of cores in the CPU. (default 1)</pre> * * <pre> -E &lt;int&gt; * The number of threads to use for batch prediction, which should be &gt;= size of thread pool. * (default 1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -percentage &lt;num&gt; * The percentage of data to be used for training (if 0, k-fold cross-validation is used). * (default 0)</pre> * * <pre> -order * Whether to preserve order when a percentage split evaluation is performed. * </pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 10141 $ */ public class IterativeClassifierOptimizer extends RandomizableClassifier implements AdditionalMeasureProducer { /** for serialization */ private static final long serialVersionUID = -3665485256313525864L; /** The base classifier to use */ protected IterativeClassifier m_IterativeClassifier = new LogitBoost(); /** The number of folds for the cross-validation. */ protected int m_NumFolds = 10; /** The number of runs for the cross-validation. */ protected int m_NumRuns = 1; /** The steps size determining when evaluations happen. */ protected int m_StepSize = 1; /** Whether to use average. */ protected boolean m_UseAverage = false; /** The number of iterations to look ahead for to find a better optimum. */ protected int m_lookAheadIterations = 50; public static Tag[] TAGS_EVAL; static { List<String> evalNames = EvaluationMetricHelper.getAllMetricNames(); TAGS_EVAL = new Tag[evalNames.size()]; for (int i = 0; i < evalNames.size(); i++) { TAGS_EVAL[i] = new Tag(i, evalNames.get(i), evalNames.get(i), false); } } /** The evaluation metric to use */ protected String m_evalMetric = "rmse"; /** * The class value index to use with information retrieval type metrics. < 0 * indicates to use the class weighted average version of the metric". */ protected int m_classValueIndex = -1; /** * The thresholds to be used for classification, if the metric implements * ThresholdProducingMetric. */ protected double[] m_thresholds = null; /** The best value found for the criterion to be optimized. */ protected double m_bestResult = Double.MAX_VALUE; /** The best number of iterations identified. */ protected int m_bestNumIts; /** The number of threads to use for parallel building of classifiers. */ protected int m_numThreads = 1; /** The size of the thread pool. */ protected int m_poolSize = 1; /** The percentage of data to be used for training (if 0, k-fold cross-validation is used). */ protected double m_splitPercentage = 0.0; /** Whether to preserve order when a percentage split evaluation is performed. */ protected boolean m_preserveOrderInPercentageSplitEvaluation = false; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Optimizes the number of iterations of the given iterative classifier using cross-validation " + "or a percentage split evaluation."; } /** * String describing default classifier. */ protected String defaultIterativeClassifierString() { return "weka.classifiers.meta.LogitBoost"; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useAverageTipText() { return "If true, average estimates are used instead of one estimate from pooled predictions."; } /** * Get the value of UseAverage. * * @return Value of UseAverage. */ public boolean getUseAverage() { return m_UseAverage; } /** * Set the value of UseAverage. * * @param newUseAverage Value to assign to UseAverage. */ public void setUseAverage(boolean newUseAverage) { m_UseAverage = newUseAverage; } /** * @return a string to describe the option */ public String numThreadsTipText() { return "The number of threads to use, which should be >= size of thread pool."; } /** * Gets the number of threads. */ public int getNumThreads() { return m_numThreads; } /** * Sets the number of threads */ public void setNumThreads(int nT) { m_numThreads = nT; } /** * @return a string to describe the option */ public String poolSizeTipText() { return "The size of the thread pool, for example, the number of cores in the CPU."; } /** * Gets the number of threads. */ public int getPoolSize() { return m_poolSize; } /** * Sets the number of threads */ public void setPoolSize(int nT) { m_poolSize = nT; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String stepSizeTipText() { return "Step size for the evaluation, if evaluation is time consuming."; } /** * Get the value of StepSize. * * @return Value of StepSize. */ public int getStepSize() { return m_StepSize; } /** * Set the value of StepSize. * * @param newStepSize Value to assign to StepSize. */ public void setStepSize(int newStepSize) { m_StepSize = newStepSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numRunsTipText() { return "Number of runs for cross-validation."; } /** * Get the value of NumRuns. * * @return Value of NumRuns. */ public int getNumRuns() { return m_NumRuns; } /** * Set the value of NumRuns. * * @param newNumRuns Value to assign to NumRuns. */ public void setNumRuns(int newNumRuns) { m_NumRuns = newNumRuns; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numFoldsTipText() { return "Number of folds for cross-validation."; } /** * Get the value of NumFolds. * * @return Value of NumFolds. */ public int getNumFolds() { return m_NumFolds; } /** * Set the value of NumFolds. * * @param newNumFolds Value to assign to NumFolds. */ public void setNumFolds(int newNumFolds) { m_NumFolds = newNumFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lookAheadIterationsTipText() { return "The number of iterations to look ahead for to find a better optimum."; } /** * Get the value of LookAheadIterations. * * @return Value of LookAheadIterations. */ public int getLookAheadIterations() { return m_lookAheadIterations; } /** * Set the value of LookAheadIterations. * * @param newLookAheadIterations Value to assign to LookAheadIterations. */ public void setLookAheadIterations(int newLookAheadIterations) { m_lookAheadIterations = newLookAheadIterations; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String splitPercentageTipText() { return "The percentage of data to be used for training (if 0, k-fold cross-validation is used)."; } /** * Get the value of SplitPercentage. * * @return Value of SplitPercentage. */ public double getSplitPercentage() { return m_splitPercentage; } /** * Set the value of SplitPercentage. * * @param newSplitPercentage Value to assign to SplitPercentage. */ public void setSplitPercentage(double newSplitPercentage) { m_splitPercentage = newSplitPercentage; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String preserveOrderInPercentageSplitEvaluationTipText() { return "Whether to preserve order when a percentage split evaluation is performed."; } /** * Get the value of PreserveOrderInPercentageSplitEvaluation. * * @return Value of PreserveOrderInPercentageSplitEvaluation. */ public boolean getPreserveOrderInPercentageSplitEvaluation() { return m_preserveOrderInPercentageSplitEvaluation; } /** * Set the value of PreserveOrderInPercentageSplitEvaluation. * * @param newPreserveOrderInPercentageSplitEvaluation Value to assign to PreserveOrderInPercentageSplitEvaluation. */ public void setPreserveOrderInPercentageSplitEvaluation(boolean newPreserveOrderInPercentageSplitEvaluation) { m_preserveOrderInPercentageSplitEvaluation = newPreserveOrderInPercentageSplitEvaluation; } /** * Builds the classifier. */ @Override public void buildClassifier(Instances data) throws Exception { if (m_IterativeClassifier == null) { throw new Exception("A base classifier has not been specified!"); } // Can classifier handle the data? getCapabilities().testWithFail(data); // Need to shuffle the data Random randomInstance = new Random(m_Seed); // Save reference to original data Instances origData = data; // Remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); if (data.numInstances() < m_NumFolds) { System.err.println("WARNING: reducing number of folds to number of instances in " + "IterativeClassifierOptimizer"); m_NumFolds = data.numInstances(); } // Local variables holding the actual number of folds and runs int numFolds = m_NumFolds; int numRuns = m_NumRuns; if (getSplitPercentage() != 0) { if ((getSplitPercentage() < 0) || (getSplitPercentage() > 100)) { throw new IllegalArgumentException("Split percentage in IterativeClassifierOptimizer not in [0,100]"); } numFolds = 1; numRuns = 1; } // Initialize datasets and classifiers Instances[][] trainingSets = new Instances[numRuns][numFolds]; Instances[][] testSets = new Instances[numRuns][numFolds]; final IterativeClassifier[][] classifiers = new IterativeClassifier[numRuns][numFolds]; if (getSplitPercentage() == 0) { for (int j = 0; j < numRuns; j++) { data.randomize(randomInstance); if (data.classAttribute().isNominal()) { data.stratify(numFolds); } for (int i = 0; i < numFolds; i++) { trainingSets[j][i] = data.trainCV(numFolds, i, randomInstance); testSets[j][i] = data.testCV(numFolds, i); classifiers[j][i] = (IterativeClassifier) AbstractClassifier.makeCopy(m_IterativeClassifier); classifiers[j][i].initializeClassifier(trainingSets[j][i]); } } } else { if (!getPreserveOrderInPercentageSplitEvaluation()) { data.randomize(randomInstance); } int trainSize = (int) Math.round(data.numInstances() * getSplitPercentage() / 100); int testSize = data.numInstances() - trainSize; trainingSets[0][0] = new Instances(data, 0, trainSize); testSets[0][0] = new Instances(data, trainSize, testSize); classifiers[0][0] = (IterativeClassifier) AbstractClassifier.makeCopy(m_IterativeClassifier); classifiers[0][0].initializeClassifier(trainingSets[0][0]); } // The thread pool to be used for parallel execution. ExecutorService pool = Executors.newFixedThreadPool(m_poolSize);; // Perform evaluation Evaluation eval = new Evaluation(data); EvaluationMetricHelper helper = new EvaluationMetricHelper(eval); boolean maximise = helper.metricIsMaximisable(m_evalMetric); if (maximise) { m_bestResult = Double.MIN_VALUE; } else { m_bestResult = Double.MAX_VALUE; } m_thresholds = null; int numIts = 0; m_bestNumIts = 0; int numberOfIterationsSinceMinimum = -1; while (true) { // Should we perform an evaluation? if (numIts % m_StepSize == 0) { double result = 0; double[] tempThresholds = null; // Shall we use the average score obtained from the folds or not? if (!m_UseAverage) { eval = new Evaluation(data); helper.setEvaluation(eval); for (int r = 0; r < numRuns; r++) { for (int i = 0; i < numFolds; i++) { eval.evaluateModel(classifiers[r][i], testSets[r][i]); } } result = getClassValueIndex() >= 0 ? helper.getNamedMetric(m_evalMetric, getClassValueIndex()) : helper.getNamedMetric(m_evalMetric); tempThresholds = helper.getNamedMetricThresholds(m_evalMetric); } else { // Using average score for (int r = 0; r < numRuns; r++) { for (int i = 0; i < numFolds; i++) { eval = new Evaluation(trainingSets[r][i]); helper.setEvaluation(eval); eval.evaluateModel(classifiers[r][i], testSets[r][i]); result += getClassValueIndex() >= 0 ? helper.getNamedMetric(m_evalMetric, getClassValueIndex()) : helper.getNamedMetric(m_evalMetric); double[] thresholds = helper.getNamedMetricThresholds(m_evalMetric); // Add thresholds (if applicable) so that we can compute average thresholds later if (thresholds != null) { if (tempThresholds == null) { tempThresholds = new double[data.numClasses()]; } for (int j = 0; j < thresholds.length; j++) { tempThresholds[j] += thresholds[j]; } } } } result /= (double)(numFolds * numRuns); // Compute average thresholds if applicable if (tempThresholds != null) { for (int j = 0; j < tempThresholds.length; j++) { tempThresholds[j] /= (double) (numRuns * numFolds); } } } if (m_Debug) { System.err.println("Iteration: " + numIts + " " + "Measure: " + result); if (tempThresholds != null) { System.err.print("Thresholds:"); for (int j = 0; j < tempThresholds.length; j++) { System.err.print(" " + tempThresholds[j]); } System.err.println(); } } double delta = maximise ? m_bestResult - result : result - m_bestResult; // Is there an improvement? if (delta < 0) { m_bestResult = result; m_bestNumIts = numIts; m_thresholds = tempThresholds; numberOfIterationsSinceMinimum = -1; } } numberOfIterationsSinceMinimum++; numIts++; if (numberOfIterationsSinceMinimum >= m_lookAheadIterations) { break; } // Set up result set, and chunk size int numberOfInvocations = numRuns * numFolds; final int N = numFolds; final int chunksize = numberOfInvocations / m_numThreads; Set<Future<Boolean>> results = new HashSet<Future<Boolean>>(); final int nIts = numIts; // For each thread for (int j = 0; j < m_numThreads; j++) { // Determine batch to be processed final int lo = j * chunksize; final int hi = (j < m_numThreads - 1) ? (lo + chunksize) : numberOfInvocations; // Create and submit new job Future<Boolean> futureT = pool.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { for (int k = lo; k < hi; k++) { if (!classifiers[k / N][k % N].next()) { if (m_Debug) { System.err.println("Classifier failed to iterate for run " + ((k / N) + 1) + " and fold " + ((k % N) + 1) + " when performing iteration " + nIts + "."); } return false; } } return true; } }); results.add(futureT); } // Check that all classifiers succeeded try { boolean failure = false; for (Future<Boolean> futureT : results) { if (!futureT.get()) { failure = true; break; // Break out if one classifier fails to iterate } } if (failure) { break; } } catch (Exception e) { System.out.println("Classifiers could not be generated."); e.printStackTrace(); } } trainingSets = null; testSets = null; data = null; // Build classifier based on identified number of iterations m_IterativeClassifier.initializeClassifier(origData); int i = 0; while (i++ < m_bestNumIts && m_IterativeClassifier.next()) { } ; m_IterativeClassifier.done(); // Shut down thread pool pool.shutdown(); } /** * Returns the class distribution for an instance. */ @Override public double[] distributionForInstance(Instance inst) throws Exception { // Does the metric produce thresholds that need to be applied? if (m_thresholds != null) { double[] dist = m_IterativeClassifier.distributionForInstance(inst); double[] newDist = new double[dist.length]; for (int i = 0; i < dist.length; i++) { if (dist[i] >= m_thresholds[i]) { newDist[i] = 1.0; } } Utils.normalize(newDist); // Could have multiple 1.0 entries return newDist; } else { return m_IterativeClassifier.distributionForInstance(inst); } } /** * Returns a string describing the classifier. */ @Override public String toString() { if (m_IterativeClassifier == null) { return "No classifier built yet."; } else { StringBuffer sb = new StringBuffer(); sb.append("Best value found: " + m_bestResult + "\n"); sb.append("Best number of iterations found: " + m_bestNumIts + "\n\n"); if (m_thresholds != null) { sb.append("Thresholds found: "); for (int i = 0; i < m_thresholds.length; i++) { sb.append(m_thresholds[i] + " "); } } sb.append("\n\n"); sb.append(m_IterativeClassifier.toString()); return sb.toString(); } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(7); newVector.addElement(new Option("\tIf set, average estimate is used rather " + "than one estimate from pooled predictions.", "A", 0, "-A")); newVector.addElement(new Option("\t" + lookAheadIterationsTipText() + "\n" + "\t(default 50)", "L", 1, "-L <num>")); newVector.addElement(new Option( "\t" + poolSizeTipText() + "\n\t(default 1)", "P", 1, "-P <int>")); newVector.addElement(new Option("\t" + numThreadsTipText() + "\n" + "\t(default 1)", "E", 1, "-E <int>")); newVector.addElement(new Option("\t" + stepSizeTipText() + "\n" + "\t(default 1)", "I", 1, "-I <num>")); newVector.addElement(new Option("\tNumber of folds for cross-validation.\n" + "\t(default 10)", "F", 1, "-F <num>")); newVector.addElement(new Option("\tNumber of runs for cross-validation.\n" + "\t(default 1)", "R", 1, "-R <num>")); newVector.addElement(new Option("\tThe percentage of data to be used for training (if 0, k-fold " + "cross-validation is used)\n" + "\t(default 0)", "percentage", 1, "-percentage <num>")); newVector.addElement(new Option("\tWhether to preserve order when a percentage split evaluation " + "is performed.", "order", 0, "-order")); newVector .addElement(new Option("\tFull name of base classifier.\n" + "\t(default: " + defaultIterativeClassifierString() + ")", "W", 1, "-W")); List<String> metrics = EvaluationMetricHelper.getAllMetricNames(); StringBuilder b = new StringBuilder(); int length = 0; for (String m : metrics) { b.append(m.toLowerCase()).append(","); length += m.length(); if (length >= 60) { b.append("\n\t"); length = 0; } } newVector.addElement(new Option( "\tEvaluation metric to optimise (default rmse). Available metrics:\n\t" + b.substring(0, b.length() - 1), "metric", 1, "-metric <name>")); newVector .addElement(new Option( "\tClass value index to optimise. Ignored for all but information-retrieval\n\t" + "type metrics (such as roc area). If unspecified (or a negative value is supplied),\n\t" + "and an information-retrieval metric is specified, then the class-weighted average\n\t" + "metric used. (default -1)", "class-value-index", 1, "-class-value-index <0-based index>")); newVector.addAll(Collections.list(super.listOptions())); newVector.addElement(new Option("", "", 0, "\nOptions specific to classifier " + m_IterativeClassifier.getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler) m_IterativeClassifier) .listOptions())); return newVector.elements(); } /** * Parses a given list of options. * Options after -- are passed to the designated classifier. * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { super.setOptions(options); setUseAverage(Utils.getFlag('A', options)); String lookAheadIterations = Utils.getOption('L', options); if (lookAheadIterations.length() != 0) { setLookAheadIterations(Integer.parseInt(lookAheadIterations)); } else { setLookAheadIterations(50); } String PoolSize = Utils.getOption('P', options); if (PoolSize.length() != 0) { setPoolSize(Integer.parseInt(PoolSize)); } else { setPoolSize(1); } String NumThreads = Utils.getOption('E', options); if (NumThreads.length() != 0) { setNumThreads(Integer.parseInt(NumThreads)); } else { setNumThreads(1); } String stepSize = Utils.getOption('I', options); if (stepSize.length() != 0) { setStepSize(Integer.parseInt(stepSize)); } else { setStepSize(1); } String numFolds = Utils.getOption('F', options); if (numFolds.length() != 0) { setNumFolds(Integer.parseInt(numFolds)); } else { setNumFolds(10); } String numRuns = Utils.getOption('R', options); if (numRuns.length() != 0) { setNumRuns(Integer.parseInt(numRuns)); } else { setNumRuns(1); } String splitPercentage = Utils.getOption("percentage", options); if (splitPercentage.length() != 0) { setSplitPercentage(Double.parseDouble(splitPercentage)); } else { setSplitPercentage(0.0); } setPreserveOrderInPercentageSplitEvaluation(Utils.getFlag("order", options)); String evalMetric = Utils.getOption("metric", options); if (evalMetric.length() > 0) { boolean found = false; for (int i = 0; i < TAGS_EVAL.length; i++) { if (TAGS_EVAL[i].getIDStr().equalsIgnoreCase(evalMetric)) { setEvaluationMetric(new SelectedTag(i, TAGS_EVAL)); found = true; break; } } if (!found) { throw new Exception("Unknown evaluation metric: " + evalMetric); } } String classValIndex = Utils.getOption("class-value-index", options); if (classValIndex.length() > 0) { setClassValueIndex(Integer.parseInt(classValIndex)); } else { setClassValueIndex(-1); } String classifierName = Utils.getOption('W', options); if (classifierName.length() > 0) { setIterativeClassifier(getIterativeClassifier(classifierName, Utils.partitionOptions(options))); } else { setIterativeClassifier(getIterativeClassifier( defaultIterativeClassifierString(), Utils.partitionOptions(options))); } } /** * Get classifier for string. * * @return a classifier * @throws Exception if a problem occurs */ protected IterativeClassifier getIterativeClassifier(String name, String[] options) throws Exception { Classifier c = AbstractClassifier.forName(name, options); if (c instanceof IterativeClassifier) { return (IterativeClassifier) c; } else { throw new IllegalArgumentException(name + " is not an IterativeClassifier."); } } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (getUseAverage()) { options.add("-A"); } options.add("-W"); options.add(getIterativeClassifier().getClass().getName()); options.add("-L"); options.add("" + getLookAheadIterations()); options.add("-P"); options.add("" + getPoolSize()); options.add("-E"); options.add("" + getNumThreads()); options.add("-I"); options.add("" + getStepSize()); options.add("-F"); options.add("" + getNumFolds()); options.add("-R"); options.add("" + getNumRuns()); options.add("-percentage"); options.add("" + getSplitPercentage()); if (getPreserveOrderInPercentageSplitEvaluation()) { options.add("-order"); } options.add("-metric"); options.add(getEvaluationMetric().getSelectedTag().getIDStr()); if (getClassValueIndex() >= 0) { options.add("-class-value-index"); options.add("" + getClassValueIndex()); } Collections.addAll(options, super.getOptions()); String[] classifierOptions = ((OptionHandler) m_IterativeClassifier).getOptions(); if (classifierOptions.length > 0) { options.add("--"); Collections.addAll(options, classifierOptions); } return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String evaluationMetricTipText() { return "The evaluation metric to use"; } /** * Set the evaluation metric to use * * @param metric the metric to use */ public void setEvaluationMetric(SelectedTag metric) { if (metric.getTags() == TAGS_EVAL) { m_evalMetric = metric.getSelectedTag().getIDStr(); } } /** * Get the evaluation metric to use * * @return the evaluation metric to use */ public SelectedTag getEvaluationMetric() { for (int i = 0; i < TAGS_EVAL.length; i++) { if (TAGS_EVAL[i].getIDStr().equalsIgnoreCase(m_evalMetric)) { return new SelectedTag(i, TAGS_EVAL); } } // if we get here then it could be because a plugin // metric is no longer available. Default to rmse return new SelectedTag(12, TAGS_EVAL); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String classValueIndexTipText() { return "The class value index to use with information retrieval type metrics. A value < 0" + " indicates to use the class weighted average version of the metric."; } /** * Set the class value index to use * * @param i the class value index to use */ public void setClassValueIndex(int i) { m_classValueIndex = i; } /** * Get the class value index to use * * @return the class value index to use */ public int getClassValueIndex() { return m_classValueIndex; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String iterativeClassifierTipText() { return "The iterative classifier to be optimized."; } /** * Returns default capabilities of the base classifier. * * @return the capabilities of the base classifier */ @Override public Capabilities getCapabilities() { Capabilities result; if (getIterativeClassifier() != null) { result = getIterativeClassifier().getCapabilities(); } else { result = new Capabilities(this); result.disableAll(); } // set dependencies for (Capability cap : Capability.values()) { result.enableDependency(cap); } result.setOwner(this); return result; } /** * Set the base learner. * * @param newIterativeClassifier the classifier to use. */ public void setIterativeClassifier(IterativeClassifier newIterativeClassifier) { m_IterativeClassifier = newIterativeClassifier; } /** * Get the classifier used as the base learner. * * @return the classifier used as the classifier */ public IterativeClassifier getIterativeClassifier() { return m_IterativeClassifier; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @return the classifier string */ protected String getIterativeClassifierSpec() { IterativeClassifier c = getIterativeClassifier(); return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler) c).getOptions()); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 10649 $"); } /** * Returns the best number of iterations * * @return the best number of iterations */ public double measureBestNumIts() { return m_bestNumIts; } /** * Returns the measure for the best model * * @return the number of leaves */ public double measureBestVal() { return m_bestResult; } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<String>(2); newVector.addElement("measureBestNumIts"); newVector.addElement("measureBestVal"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ @Override public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureBestNumIts") == 0) { return measureBestNumIts(); } else if (additionalMeasureName.compareToIgnoreCase("measureBestVal") == 0) { return measureBestVal(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (IterativeClassifierOptimizer)"); } } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new IterativeClassifierOptimizer(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/LogitBoost.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LogitBoost.java * Copyright (C) 1999-2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; import java.util.Random; import java.util.Set; import java.util.Vector; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.IterativeClassifier; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.classifiers.Sourcable; import weka.classifiers.rules.ZeroR; import weka.core.Attribute; import weka.core.BatchPredictor; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.UnassignedClassException; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for performing additive logistic regression. <br/> * This class performs classification using a regression scheme as the base learner, and can handle * multi-class problems. For more information, see<br/> * <br/> * J. Friedman, T. Hastie, R. Tibshirani (1998). Additive Logistic Regression: a Statistical View of * Boosting. Stanford University.<br/> * <br/> * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;techreport{Friedman1998, * address = {Stanford University}, * author = {J. Friedman and T. Hastie and R. Tibshirani}, * title = {Additive Logistic Regression: a Statistical View of Boosting}, * year = {1998}, * PS = {http://www-stat.stanford.edu/\~jhf/ftp/boost.ps} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -Q * Use resampling instead of reweighting for boosting. * </pre> * * <pre> * -use-estimated-priors * Use estimated priors rather than uniform ones. * </pre> * * <pre> * -P &lt;percent&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up) * </pre> * * <pre> * -L &lt;num&gt; * Threshold on the improvement of the likelihood. * (default -Double.MAX_VALUE) * </pre> * * <pre> * -H &lt;num&gt; * Shrinkage parameter. * (default 1) * </pre> * * <pre> * -Z &lt;num&gt; * Z max threshold for responses. * (default 3) * </pre> * * <pre> * -O &lt;int&gt; * The size of the thread pool, for example, the number of cores in the CPU. (default 1) * </pre> * * <pre> * -E &lt;int&gt; * The number of threads to use for batch prediction, which should be &gt;= size of thread pool. * (default 1) * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated learner. * <p> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class LogitBoost extends RandomizableIteratedSingleClassifierEnhancer implements Sourcable, WeightedInstancesHandler, TechnicalInformationHandler, IterativeClassifier, BatchPredictor { /** for serialization */ static final long serialVersionUID = -1105660358715833753L; /** * ArrayList for storing the generated base classifiers. Note: we are hiding the variable from * IteratedSingleClassifierEnhancer */ protected ArrayList<Classifier[]> m_Classifiers; /** The number of classes */ protected int m_NumClasses; /** The number of successfully generated base classifiers. */ protected int m_NumGenerated; /** Weight thresholding. The percentage of weight mass used in training */ protected int m_WeightThreshold = 100; /** A threshold for responses (Friedman suggests between 2 and 4) */ protected static final double DEFAULT_Z_MAX = 3; /** Dummy dataset with a numeric class */ protected Instances m_NumericClassData; /** The actual class attribute (for getting class names) */ protected Attribute m_ClassAttribute; /** Use boosting with reweighting? */ protected boolean m_UseResampling; /** The threshold on the improvement of the likelihood */ protected double m_Precision = -Double.MAX_VALUE; /** The value of the shrinkage parameter */ protected double m_Shrinkage = 1; /** Whether to start with class priors estimated from the training data */ protected boolean m_UseEstimatedPriors = false; /** The random number generator used */ protected Random m_RandomInstance = null; /** * The value by which the actual target value for the true class is offset. */ protected double m_Offset = 0.0; /** A ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** The initial F scores (0 by default) */ protected double[] m_InitialFs; /** The Z max value to use */ protected double m_zMax = DEFAULT_Z_MAX; /** The y values used during the training process. */ protected double[][] m_trainYs; /** The F scores used during the training process. */ protected double[][] m_trainFs; /** The probabilities used during the training process. */ protected double[][] m_probs; /** The current loglikelihood. */ protected double m_logLikelihood; /** The total weight of the data. */ protected double m_sumOfWeights; /** The training data. */ protected Instances m_data; /** The number of threads to use at prediction time in batch prediction. */ protected int m_numThreads = 1; /** The size of the thread pool. */ protected int m_poolSize = 1; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for performing additive logistic regression. \n" + "This class performs classification using a regression scheme as the " + "base learner, and can handle multi-class problems. For more " + "information, see\n\n" + this.getTechnicalInformation().toString(); } /** * Constructor. */ public LogitBoost() { this.m_Classifier = new weka.classifiers.trees.DecisionStump(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.TECHREPORT); result.setValue(Field.AUTHOR, "J. Friedman and T. Hastie and R. Tibshirani"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Additive Logistic Regression: a Statistical View of Boosting"); result.setValue(Field.ADDRESS, "Stanford University"); result.setValue(Field.PS, "http://www-stat.stanford.edu/~jhf/ftp/boost.ps"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Select only instances with weights that contribute to the specified quantile of the weight * distribution * * @param data * the input instances * @param quantile * the specified quantile eg 0.9 to select 90% of the weight mass * @return the selected instances * @throws InterruptedException */ protected Instances selectWeightQuantile(final Instances data, final double quantile) throws InterruptedException { int numInstances = data.numInstances(); Instances trainData = new Instances(data, numInstances); double[] weights = new double[numInstances]; double sumOfWeights = 0; for (int i = 0; i < numInstances; i++) { weights[i] = data.instance(i).weight(); sumOfWeights += weights[i]; } double weightMassToSelect = sumOfWeights * quantile; int[] sortedIndices = Utils.sort(weights); // Select the instances sumOfWeights = 0; for (int i = numInstances - 1; i >= 0; i--) { Instance instance = (Instance) data.instance(sortedIndices[i]).copy(); trainData.add(instance); sumOfWeights += weights[sortedIndices[i]]; if ((sumOfWeights > weightMassToSelect) && (i > 0) && (weights[sortedIndices[i]] != weights[sortedIndices[i - 1]])) { break; } } if (this.m_Debug) { System.err.println("Selected " + trainData.numInstances() + " out of " + numInstances); } return trainData; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(5); newVector.addElement(new Option("\tUse resampling instead of reweighting for boosting.", "Q", 0, "-Q")); newVector.addElement(new Option("\tUse estimated priors rather than uniform ones.", "use-estimated-priors", 0, "-use-estimated-priors")); newVector.addElement(new Option("\tPercentage of weight mass to base training on.\n" + "\t(default 100, reduce to around 90 speed up)", "P", 1, "-P <percent>")); newVector.addElement(new Option("\tThreshold on the improvement of the likelihood.\n" + "\t(default -Double.MAX_VALUE)", "L", 1, "-L <num>")); newVector.addElement(new Option("\tShrinkage parameter.\n" + "\t(default 1)", "H", 1, "-H <num>")); newVector.addElement(new Option("\tZ max threshold for responses." + "\n\t(default 3)", "Z", 1, "-Z <num>")); newVector.addElement(new Option("\t" + this.poolSizeTipText() + " (default 1)", "O", 1, "-O <int>")); newVector.addElement(new Option("\t" + this.numThreadsTipText() + "\n" + "\t(default 1)", "E", 1, "-E <int>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -Q * Use resampling instead of reweighting for boosting. * </pre> * * <pre> * -use-estimated-priors * Use estimated priors rather than uniform ones. * </pre> * * <pre> * -P &lt;percent&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up) * </pre> * * <pre> * -L &lt;num&gt; * Threshold on the improvement of the likelihood. * (default -Double.MAX_VALUE) * </pre> * * <pre> * -H &lt;num&gt; * Shrinkage parameter. * (default 1) * </pre> * * <pre> * -Z &lt;num&gt; * Z max threshold for responses. * (default 3) * </pre> * * <pre> * -O &lt;int&gt; * The size of the thread pool, for example, the number of cores in the CPU. (default 1) * </pre> * * <pre> * -E &lt;int&gt; * The number of threads to use for batch prediction, which should be &gt;= size of thread pool. * (default 1) * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump) * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated learner. * <p> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String thresholdString = Utils.getOption('P', options); if (thresholdString.length() != 0) { this.setWeightThreshold(Integer.parseInt(thresholdString)); } else { this.setWeightThreshold(100); } String precisionString = Utils.getOption('L', options); if (precisionString.length() != 0) { this.setLikelihoodThreshold(new Double(precisionString).doubleValue()); } else { this.setLikelihoodThreshold(-Double.MAX_VALUE); } String shrinkageString = Utils.getOption('H', options); if (shrinkageString.length() != 0) { this.setShrinkage(new Double(shrinkageString).doubleValue()); } else { this.setShrinkage(1.0); } String zString = Utils.getOption('Z', options); if (zString.length() > 0) { this.setZMax(Double.parseDouble(zString)); } this.setUseResampling(Utils.getFlag('Q', options)); if (this.m_UseResampling && (thresholdString.length() != 0)) { throw new Exception("Weight pruning with resampling" + "not allowed."); } this.setUseEstimatedPriors(Utils.getFlag("use-estimated-priors", options)); String PoolSize = Utils.getOption('O', options); if (PoolSize.length() != 0) { this.setPoolSize(Integer.parseInt(PoolSize)); } else { this.setPoolSize(1); } String NumThreads = Utils.getOption('E', options); if (NumThreads.length() != 0) { this.setNumThreads(Integer.parseInt(NumThreads)); } else { this.setNumThreads(1); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); if (this.getUseResampling()) { options.add("-Q"); } else { options.add("-P"); options.add("" + this.getWeightThreshold()); } if (this.getUseEstimatedPriors()) { options.add("-use-estimated-priors"); } options.add("-L"); options.add("" + this.getLikelihoodThreshold()); options.add("-H"); options.add("" + this.getShrinkage()); options.add("-Z"); options.add("" + this.getZMax()); options.add("-O"); options.add("" + this.getPoolSize()); options.add("-E"); options.add("" + this.getNumThreads()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String ZMaxTipText() { return "Z max threshold for responses"; } /** * Set the Z max threshold on the responses * * @param zMax * the threshold to use */ public void setZMax(final double zMax) { this.m_zMax = zMax; } /** * Get the Z max threshold on the responses * * @return the threshold to use */ public double getZMax() { return this.m_zMax; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String shrinkageTipText() { return "Shrinkage parameter (use small value like 0.1 to reduce " + "overfitting)."; } /** * Get the value of Shrinkage. * * @return Value of Shrinkage. */ public double getShrinkage() { return this.m_Shrinkage; } /** * Set the value of Shrinkage. * * @param newShrinkage * Value to assign to Shrinkage. */ public void setShrinkage(final double newShrinkage) { this.m_Shrinkage = newShrinkage; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String likelihoodThresholdTipText() { return "Threshold on improvement in likelihood."; } /** * Get the value of Precision. * * @return Value of Precision. */ public double getLikelihoodThreshold() { return this.m_Precision; } /** * Set the value of Precision. * * @param newPrecision * Value to assign to Precision. */ public void setLikelihoodThreshold(final double newPrecision) { this.m_Precision = newPrecision; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useResamplingTipText() { return "Whether resampling is used instead of reweighting."; } /** * Set resampling mode * * @param r * true if resampling should be done */ public void setUseResampling(final boolean r) { this.m_UseResampling = r; } /** * Get whether resampling is turned on * * @return true if resampling output is on */ public boolean getUseResampling() { return this.m_UseResampling; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useEstimatedPriorsTipText() { return "Whether estimated priors are used rather than uniform ones."; } /** * Set resampling mode * * @param r * true if resampling should be done */ public void setUseEstimatedPriors(final boolean r) { this.m_UseEstimatedPriors = r; } /** * Get whether resampling is turned on * * @return true if resampling output is on */ public boolean getUseEstimatedPriors() { return this.m_UseEstimatedPriors; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String weightThresholdTipText() { return "Weight threshold for weight pruning (reduce to 90 " + "for speeding up learning process)."; } /** * Set weight thresholding * * @param threshold * the percentage of weight mass used for training */ public void setWeightThreshold(final int threshold) { this.m_WeightThreshold = threshold; } /** * Get the degree of weight thresholding * * @return the percentage of weight mass used for training */ public int getWeightThreshold() { return this.m_WeightThreshold; } /** * @return a string to describe the option */ public String numThreadsTipText() { return "The number of threads to use for batch prediction, which should be >= size of thread pool."; } /** * Gets the number of threads. */ public int getNumThreads() { return this.m_numThreads; } /** * Sets the number of threads */ public void setNumThreads(final int nT) { this.m_numThreads = nT; } /** * @return a string to describe the option */ public String poolSizeTipText() { return "The size of the thread pool, for example, the number of cores in the CPU."; } /** * Gets the number of threads. */ public int getPoolSize() { return this.m_poolSize; } /** * Sets the number of threads */ public void setPoolSize(final int nT) { this.m_poolSize = nT; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Method used to build the classifier. */ @Override public void buildClassifier(final Instances data) throws Exception { // Initialize classifier this.initializeClassifier(data); // For the given number of iterations while (this.next()) { } // Clean up this.done(); } /** * Builds the boosted classifier * * @param data * the data to train the classifier with * @throws Exception * if building fails, e.g., can't handle data */ @Override public void initializeClassifier(final Instances data) throws Exception { this.m_RandomInstance = new Random(this.m_Seed); int classIndex = data.classIndex(); if (this.m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } if (!(this.m_Classifier instanceof WeightedInstancesHandler) && !this.m_UseResampling) { this.m_UseResampling = true; } // can classifier handle the data? this.getCapabilities().testWithFail(data); if (this.m_Debug) { System.err.println("Creating copy of the training data"); } // remove instances with missing class this.m_data = new Instances(data); this.m_data.deleteWithMissingClass(); // only class? -> build ZeroR model if ((this.m_data.numAttributes() == 1) || (this.m_data.numInstances() == 0)) { System.err.println("Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); this.m_ZeroR = new ZeroR(); this.m_ZeroR.buildClassifier(this.m_data); return; } // Set up initial probabilities and Fs int numInstances = this.m_data.numInstances(); this.m_NumClasses = this.m_data.numClasses(); this.m_ClassAttribute = this.m_data.classAttribute(); this.m_probs = new double[numInstances][this.m_NumClasses]; this.m_InitialFs = new double[this.m_NumClasses]; this.m_trainFs = new double[numInstances][this.m_NumClasses]; if (!this.m_UseEstimatedPriors) { // Default behaviour: equal probabilities for all classes initially for (int i = 0; i < numInstances; i++) { for (int j = 0; j < this.m_NumClasses; j++) { this.m_probs[i][j] = 1.0 / this.m_NumClasses; } } } else { // If requested, used priors estimated from the training set initially this.m_ZeroR = new ZeroR(); this.m_ZeroR.buildClassifier(this.m_data); for (int i = 0; i < numInstances; i++) { this.m_probs[i] = this.m_ZeroR.distributionForInstance(this.m_data.instance(i)); } double avg = 0; for (int j = 0; j < this.m_NumClasses; j++) { avg += Math.log(this.m_probs[0][j]); } avg /= this.m_NumClasses; for (int j = 0; j < this.m_NumClasses; j++) { this.m_InitialFs[j] = Math.log(this.m_probs[0][j]) - avg; } for (int i = 0; i < numInstances; i++) { for (int j = 0; j < this.m_NumClasses; j++) { this.m_trainFs[i][j] = this.m_InitialFs[j]; } } this.m_ZeroR = null; } // Create the base classifiers if (this.m_Debug) { System.err.println("Creating base classifiers"); } this.m_Classifiers = new ArrayList<>(); // Build classifier on all the data this.m_trainYs = new double[numInstances][this.m_NumClasses]; for (int j = 0; j < this.m_NumClasses; j++) { for (int i = 0, k = 0; i < numInstances; i++, k++) { this.m_trainYs[i][j] = (this.m_data.instance(k).classValue() == j) ? 1.0 - this.m_Offset : 0.0 + (this.m_Offset / this.m_NumClasses); } } // Make class numeric this.m_data.setClassIndex(-1); this.m_data.deleteAttributeAt(classIndex); this.m_data.insertAttributeAt(new Attribute("'pseudo class'"), classIndex); this.m_data.setClassIndex(classIndex); this.m_NumericClassData = new Instances(this.m_data, 0); // Perform iterations this.m_sumOfWeights = this.m_data.sumOfWeights(); this.m_logLikelihood = this.negativeLogLikelihood(this.m_trainYs, this.m_probs, this.m_data, this.m_sumOfWeights); if (this.m_Debug) { System.err.println("Avg. negative log-likelihood: " + this.m_logLikelihood); } this.m_NumGenerated = 0; } /** * Perform another iteration of boosting. */ @Override public boolean next() throws Exception { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.m_NumGenerated >= this.m_NumIterations) { return false; } // Do we only have a ZeroR model if (this.m_ZeroR != null) { return false; } double previousLoglikelihood = this.m_logLikelihood; this.performIteration(this.m_trainYs, this.m_trainFs, this.m_probs, this.m_data, this.m_sumOfWeights); this.m_logLikelihood = this.negativeLogLikelihood(this.m_trainYs, this.m_probs, this.m_data, this.m_sumOfWeights); if (this.m_Debug) { System.err.println("Avg. negative log-likelihood: " + this.m_logLikelihood); } if (Math.abs(previousLoglikelihood - this.m_logLikelihood) < this.m_Precision) { return false; } return true; } /** * Clean up after boosting. */ @Override public void done() { this.m_trainYs = this.m_trainFs = this.m_probs = null; this.m_data = null; } /** * Computes negative loglikelihood given class values and estimated probabilities. * * @param trainYs * class values * @param probs * estimated probabilities * @param data * the data * @param sumOfWeights * the sum of weights * @return the computed negative loglikelihood */ private double negativeLogLikelihood(final double[][] trainYs, final double[][] probs, final Instances data, final double sumOfWeights) { double logLikelihood = 0; for (int i = 0; i < trainYs.length; i++) { for (int j = 0; j < this.m_NumClasses; j++) { if (trainYs[i][j] == 1.0 - this.m_Offset) { logLikelihood -= data.instance(i).weight() * Math.log(probs[i][j]); } } } return logLikelihood / sumOfWeights; } /** * Performs one boosting iteration. * * @param trainYs * class values * @param trainFs * F scores * @param probs * probabilities * @param data * the data to run the iteration on * @param origSumOfWeights * the original sum of weights * @throws Exception * in case base classifiers run into problems */ private void performIteration(final double[][] trainYs, final double[][] trainFs, final double[][] probs, final Instances data, final double origSumOfWeights) throws Exception { if (this.m_Debug) { System.err.println("Training classifier " + (this.m_NumGenerated + 1)); } // Make space for classifiers Classifier[] classifiers = new Classifier[this.m_NumClasses]; // Build the new models for (int j = 0; j < this.m_NumClasses; j++) { if (this.m_Debug) { System.err.println("\t...for class " + (j + 1) + " (" + this.m_ClassAttribute.name() + "=" + this.m_ClassAttribute.value(j) + ")"); } // Make copy because we want to save the weights Instances boostData = new Instances(data); // Set instance pseudoclass and weights for (int i = 0; i < probs.length; i++) { // Compute response and weight double p = probs[i][j]; double z, actual = trainYs[i][j]; if (actual == 1 - this.m_Offset) { z = 1.0 / p; if (z > this.m_zMax) { // threshold z = this.m_zMax; } } else { z = -1.0 / (1.0 - p); if (z < -this.m_zMax) { // threshold z = -this.m_zMax; } } double w = (actual - p) / z; // Set values for instance Instance current = boostData.instance(i); current.setValue(boostData.classIndex(), z); current.setWeight(current.weight() * w); } // Scale the weights (helps with some base learners) double sumOfWeights = boostData.sumOfWeights(); double scalingFactor = origSumOfWeights / sumOfWeights; for (int i = 0; i < probs.length; i++) { Instance current = boostData.instance(i); current.setWeight(current.weight() * scalingFactor); } // Select instances to train the classifier on Instances trainData = boostData; if (this.m_WeightThreshold < 100) { trainData = this.selectWeightQuantile(boostData, (double) this.m_WeightThreshold / 100); } else { if (this.m_UseResampling) { double[] weights = new double[boostData.numInstances()]; for (int kk = 0; kk < weights.length; kk++) { weights[kk] = boostData.instance(kk).weight(); } trainData = boostData.resampleWithWeights(this.m_RandomInstance, weights); } } // Build the classifier classifiers[j] = AbstractClassifier.makeCopy(this.m_Classifier); classifiers[j].buildClassifier(trainData); if (this.m_NumClasses == 2) { break; // Don't actually need to build the other model in the two-class // case } } this.m_Classifiers.add(classifiers); // Evaluate / increment trainFs from the classifier for (int i = 0; i < trainFs.length; i++) { double[] pred = new double[this.m_NumClasses]; double predSum = 0; for (int j = 0; j < this.m_NumClasses; j++) { double tempPred = this.m_Shrinkage * classifiers[j].classifyInstance(data.instance(i)); if (Utils.isMissingValue(tempPred)) { throw new UnassignedClassException("LogitBoost: base learner predicted missing value."); } pred[j] = tempPred; if (this.m_NumClasses == 2) { pred[1] = -tempPred; // Can treat 2 classes as special case break; } predSum += pred[j]; } predSum /= this.m_NumClasses; for (int j = 0; j < this.m_NumClasses; j++) { trainFs[i][j] += (pred[j] - predSum) * (this.m_NumClasses - 1) / this.m_NumClasses; } } this.m_NumGenerated = this.m_Classifiers.size(); // Compute the current probability estimates for (int i = 0; i < trainYs.length; i++) { probs[i] = this.probs(trainFs[i]); } } /** * Returns the array of classifiers that have been built. * * @return the built classifiers */ public Classifier[][] classifiers() { return this.m_Classifiers.toArray(new Classifier[0][0]); } /** * Computes probabilities from F scores * * @param Fs * the F scores * @return the computed probabilities */ private double[] probs(final double[] Fs) { double maxF = -Double.MAX_VALUE; for (int i = 0; i < Fs.length; i++) { if (Fs[i] > maxF) { maxF = Fs[i]; } } double sum = 0; double[] probs = new double[Fs.length]; for (int i = 0; i < Fs.length; i++) { probs[i] = Math.exp(Fs[i] - maxF); sum += probs[i]; } Utils.normalize(probs, sum); return probs; } /** * Performs efficient batch prediction * * @return true, as LogitBoost can perform efficient batch prediction */ @Override public boolean implementsMoreEfficientBatchPrediction() { return true; } /** * Calculates the class membership probabilities for the given test instance. * * @param inst * the instance to be classified * @return predicted class probability distribution * @throws Exception * if instance could not be classified successfully */ @Override public double[] distributionForInstance(final Instance inst) throws Exception { // default model? if (this.m_ZeroR != null) { return this.m_ZeroR.distributionForInstance(inst); } Instance instance = (Instance) inst.copy(); instance.setDataset(this.m_NumericClassData); return this.processInstance(instance); } /** * Applies models to an instance to get class probabilities. */ protected double[] processInstance(final Instance instance) throws Exception { double[] Fs = new double[this.m_NumClasses]; double[] pred = new double[this.m_NumClasses]; if (this.m_InitialFs != null) { for (int i = 0; i < this.m_NumClasses; i++) { Fs[i] = this.m_InitialFs[i]; } } for (int i = 0; i < this.m_NumGenerated; i++) { double predSum = 0; for (int j = 0; j < this.m_NumClasses; j++) { double tempPred = this.m_Shrinkage * this.m_Classifiers.get(i)[j].classifyInstance(instance); if (Utils.isMissingValue(tempPred)) { throw new UnassignedClassException("LogitBoost: base learner predicted missing value."); } pred[j] = tempPred; if (this.m_NumClasses == 2) { pred[1] = -tempPred; // Can treat 2 classes as special case break; } predSum += pred[j]; } predSum /= this.m_NumClasses; for (int j = 0; j < this.m_NumClasses; j++) { Fs[j] += (pred[j] - predSum) * (this.m_NumClasses - 1) / this.m_NumClasses; } } return this.probs(Fs); } /** * Calculates the class membership probabilities for the given test instances. Uses multi-threading * if requested. * * @param insts * the instances to be classified * @return predicted class probability distributions * @throws Exception * if instances could not be classified successfully */ @Override public double[][] distributionsForInstances(final Instances insts) throws Exception { // default model? if (this.m_ZeroR != null) { double[][] preds = new double[insts.numInstances()][]; for (int i = 0; i < preds.length; i++) { preds[i] = this.m_ZeroR.distributionForInstance(insts.instance(i)); } return preds; } final Instances numericClassInsts = new Instances(this.m_NumericClassData); for (int i = 0; i < insts.numInstances(); i++) { numericClassInsts.add(insts.instance(i)); } // Start thread pool ExecutorService pool = Executors.newFixedThreadPool(this.m_poolSize); // Set up result set, and chunk size final int chunksize = numericClassInsts.numInstances() / this.m_numThreads; Set<Future<Void>> results = new HashSet<>(); double[][] preds = new double[insts.numInstances()][]; // For each thread for (int j = 0; j < this.m_numThreads; j++) { // Determine batch to be processed final int lo = j * chunksize; final int hi = (j < this.m_numThreads - 1) ? (lo + chunksize) : numericClassInsts.numInstances(); // Create and submit new job for each batch of instances Future<Void> futureT = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { for (int i = lo; i < hi; i++) { preds[i] = LogitBoost.this.processInstance(numericClassInsts.instance(i)); } return null; } }); results.add(futureT); } // Incorporate predictions try { for (Future<Void> futureT : results) { futureT.get(); } } catch (Exception e) { System.out.println("Predictions could not be generated."); e.printStackTrace(); } pool.shutdown(); return preds; } /** * Returns the boosted model as Java source code. * * @param className * the classname in the generated code * @return the tree as Java source code * @throws Exception * if something goes wrong */ @Override public String toSource(final String className) throws Exception { if (this.m_NumGenerated == 0) { throw new Exception("No model built yet"); } if (!(this.m_Classifiers.get(0)[0] instanceof Sourcable)) { throw new Exception("Base learner " + this.m_Classifier.getClass().getName() + " is not Sourcable"); } StringBuffer text = new StringBuffer("class "); text.append(className).append(" {\n\n"); text.append(" private static double RtoP(double []R, int j) {\n" + " double Rcenter = 0;\n" + " for (int i = 0; i < R.length; i++) {\n" + " Rcenter += R[i];\n" + " }\n" + " Rcenter /= R.length;\n" + " double Rsum = 0;\n" + " for (int i = 0; i < R.length; i++) {\n" + " Rsum += Math.exp(R[i] - Rcenter);\n" + " }\n" + " return Math.exp(R[j]) / Rsum;\n" + " }\n\n"); text.append(" public static double classify(Object[] i) {\n" + " double [] d = distribution(i);\n" + " double maxV = d[0];\n" + " int maxI = 0;\n" + " for (int j = 1; j < " + this.m_NumClasses + "; j++) {\n" + " if (d[j] > maxV) { maxV = d[j]; maxI = j; }\n" + " }\n return (double) maxI;\n }\n\n"); text.append(" public static double [] distribution(Object [] i) {\n"); text.append(" double [] Fs = new double [" + this.m_NumClasses + "];\n"); text.append(" double [] Fi = new double [" + this.m_NumClasses + "];\n"); if (this.m_InitialFs != null) { for (int j = 0; j < this.m_NumClasses; j++) { text.append(" Fs[" + j + "] = " + this.m_InitialFs[j] + ";\n"); } } text.append(" double Fsum;\n"); for (int i = 0; i < this.m_NumGenerated; i++) { text.append(" Fsum = 0;\n"); for (int j = 0; j < this.m_NumClasses; j++) { text.append(" Fi[" + j + "] = " + this.m_Shrinkage + " * " + className + '_' + j + '_' + i + ".classify(i); Fsum += Fi[" + j + "];\n"); if (this.m_NumClasses == 2) { text.append(" Fi[1] = -Fi[0];\n"); // 2-class case is special break; } } text.append(" Fsum /= " + this.m_NumClasses + ";\n"); text.append(" for (int j = 0; j < " + this.m_NumClasses + "; j++) {"); text.append(" Fs[j] += (Fi[j] - Fsum) * " + (this.m_NumClasses - 1) + " / " + this.m_NumClasses + "; }\n"); } text.append(" double [] dist = new double [" + this.m_NumClasses + "];\n" + " for (int j = 0; j < " + this.m_NumClasses + "; j++) {\n" + " dist[j] = RtoP(Fs, j);\n" + " }\n return dist;\n"); text.append(" }\n}\n"); for (int i = 0; i < this.m_Classifiers.get(0).length; i++) { for (int j = 0; j < this.m_Classifiers.size(); j++) { text.append(((Sourcable) this.m_Classifiers.get(j)[i]).toSource(className + '_' + i + '_' + j)); } if (this.m_NumClasses == 2) { break; // Only need one classifier per iteration in this case } } return text.toString(); } /** * Returns description of the boosted classifier. * * @return description of the boosted classifier as a string */ @Override public String toString() { // only ZeroR model? if (this.m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_ZeroR.toString()); return buf.toString(); } StringBuffer text = new StringBuffer(); if ((this.m_InitialFs != null) && this.getUseEstimatedPriors()) { text.append("Initial Fs: \n"); for (int j = 0; j < this.m_NumClasses; j++) { text.append("\n\tClass " + (j + 1) + " (" + this.m_ClassAttribute.name() + "=" + this.m_ClassAttribute.value(j) + "): " + Utils.doubleToString(this.m_InitialFs[j], this.getNumDecimalPlaces()) + "\n"); } text.append("\n"); } if (this.m_NumGenerated == 0) { text.append("LogitBoost: No model built yet."); // text.append(m_Classifiers[0].toString()+"\n"); } else { text.append("LogitBoost: Base classifiers and their weights: \n"); for (int i = 0; i < this.m_NumGenerated; i++) { text.append("\nIteration " + (i + 1)); for (int j = 0; j < this.m_NumClasses; j++) { text.append("\n\tClass " + (j + 1) + " (" + this.m_ClassAttribute.name() + "=" + this.m_ClassAttribute.value(j) + ")\n\n" + this.m_Classifiers.get(i)[j].toString() + "\n"); if (this.m_NumClasses == 2) { text.append("Two-class case: second classifier predicts " + "additive inverse of first classifier and " + "is not explicitly computed.\n\n"); break; } } } text.append("Number of performed iterations: " + this.m_NumGenerated + "\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new LogitBoost(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/MultiClassClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiClassClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.rules.ZeroR; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import weka.filters.unsupervised.instance.RemoveWithValues; /** * <!-- globalinfo-start --> A metaclassifier for handling multi-class datasets with 2-class * classifiers. This classifier is also capable of applying error correcting output codes for * increased accuracy. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M &lt;num&gt; * Sets the method to use. Valid values are 0 (1-against-all), * 1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0) * </pre> * * <pre> * -R &lt;num&gt; * Sets the multiplier when using random codes. (default 2.0) * </pre> * * <pre> * -P * Use pairwise coupling (only has an effect for 1-against1) * </pre> * * <pre> * -L * Use log loss decoding for random and exhaustive codes. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.functions.Logistic) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.functions.Logistic: * </pre> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -R &lt;ridge&gt; * Set the ridge in the log-likelihood. * </pre> * * <pre> * -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence). * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (len@reeltwo.com) * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision$ */ public class MultiClassClassifier extends RandomizableSingleClassifierEnhancer implements OptionHandler { /** for serialization */ static final long serialVersionUID = -3879602011542849141L; /** The classifiers. */ protected Classifier[] m_Classifiers; /** Use pairwise coupling with 1-vs-1 */ protected boolean m_pairwiseCoupling = false; /** Needed for pairwise coupling */ protected double[] m_SumOfWeights; /** The filters used to transform the class. */ protected Filter[] m_ClassFilters; /** ZeroR classifier for when all base classifier return zero probability. */ private ZeroR m_ZeroR; /** Internal copy of the class attribute for output purposes */ protected Attribute m_ClassAttribute; /** A transformed dataset header used by the 1-against-1 method */ protected Instances m_TwoClassDataset; /** * The multiplier when generating random codes. Will generate numClasses * m_RandomWidthFactor codes */ private double m_RandomWidthFactor = 2.0; /** True if log loss decoding is to be used for random and exhaustive codes. */ protected boolean m_logLossDecoding = false; /** The multiclass method to use */ protected int m_Method = METHOD_1_AGAINST_ALL; /** 1-against-all */ public static final int METHOD_1_AGAINST_ALL = 0; /** random correction code */ public static final int METHOD_ERROR_RANDOM = 1; /** exhaustive correction code */ public static final int METHOD_ERROR_EXHAUSTIVE = 2; /** 1-against-1 */ public static final int METHOD_1_AGAINST_1 = 3; /** The error correction modes */ public static final Tag[] TAGS_METHOD = { new Tag(METHOD_1_AGAINST_ALL, "1-against-all"), new Tag(METHOD_ERROR_RANDOM, "Random correction code"), new Tag(METHOD_ERROR_EXHAUSTIVE, "Exhaustive correction code"), new Tag(METHOD_1_AGAINST_1, "1-against-1") }; /** * Constructor. */ public MultiClassClassifier() { this.m_Classifier = new weka.classifiers.functions.Logistic(); } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.functions.Logistic"; } /** * Interface for the code constructors */ private abstract class Code implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 418095077487120846L; /** * Subclasses must allocate and fill these. First dimension is number of codes. Second dimension is * number of classes. */ protected boolean[][] m_Codebits; /** * Returns the number of codes. * * @return the number of codes */ public int size() { return this.m_Codebits.length; } /** * Returns the indices of the values set to true for this code, using 1-based indexing (for input to * Range). * * @param which * the index * @return the 1-based indices */ public String getIndices(final int which) { StringBuffer sb = new StringBuffer(); for (int i = 0; i < this.m_Codebits[which].length; i++) { if (this.m_Codebits[which][i]) { if (sb.length() != 0) { sb.append(','); } sb.append(i + 1); } } return sb.toString(); } /** * Returns a human-readable representation of the codes. * * @return a string representation of the codes */ @Override public String toString() { StringBuffer sb = new StringBuffer(); for (int i = 0; i < this.m_Codebits[0].length; i++) { for (int j = 0; j < this.m_Codebits.length; j++) { sb.append(this.m_Codebits[j][i] ? " 1" : " 0"); } sb.append('\n'); } return sb.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * Constructs a code with no error correction */ private class StandardCode extends Code { /** for serialization */ static final long serialVersionUID = 3707829689461467358L; /** * constructor * * @param numClasses * the number of classes */ public StandardCode(final int numClasses) { this.m_Codebits = new boolean[numClasses][numClasses]; for (int i = 0; i < numClasses; i++) { this.m_Codebits[i][i] = true; } // System.err.println("Code:\n" + this); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * Constructs a random code assignment */ private class RandomCode extends Code { /** for serialization */ static final long serialVersionUID = 4413410540703926563L; /** random number generator */ Random r = null; /** * constructor * * @param numClasses * the number of classes * @param numCodes * the number of codes * @param data * the data to use */ public RandomCode(final int numClasses, int numCodes, final Instances data) { this.r = data.getRandomNumberGenerator(MultiClassClassifier.this.m_Seed); numCodes = Math.max(2, numCodes); // Need at least two classes this.m_Codebits = new boolean[numCodes][numClasses]; int i = 0; do { this.randomize(); // System.err.println(this); } while (!this.good() && (i++ < 100)); // System.err.println("Code:\n" + this); } private boolean good() { boolean[] ninClass = new boolean[this.m_Codebits[0].length]; boolean[] ainClass = new boolean[this.m_Codebits[0].length]; for (int i = 0; i < ainClass.length; i++) { ainClass[i] = true; } for (int i = 0; i < this.m_Codebits.length; i++) { boolean ninCode = false; boolean ainCode = true; for (int j = 0; j < this.m_Codebits[i].length; j++) { boolean current = this.m_Codebits[i][j]; ninCode = ninCode || current; ainCode = ainCode && current; ninClass[j] = ninClass[j] || current; ainClass[j] = ainClass[j] && current; } if (!ninCode || ainCode) { return false; } } for (int j = 0; j < ninClass.length; j++) { if (!ninClass[j] || ainClass[j]) { return false; } } return true; } /** * randomizes */ private void randomize() { for (int i = 0; i < this.m_Codebits.length; i++) { for (int j = 0; j < this.m_Codebits[i].length; j++) { double temp = this.r.nextDouble(); this.m_Codebits[i][j] = (temp < 0.5) ? false : true; } } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /* * TODO: Constructs codes as per: Bose, R.C., Ray Chaudhuri (1960), On a class of error-correcting * binary group codes, Information and Control, 3, 68-79. Hocquenghem, A. (1959) Codes corecteurs * d'erreurs, Chiffres, 2, 147-156. */ // private class BCHCode extends Code {...} /** Constructs an exhaustive code assignment */ private class ExhaustiveCode extends Code { /** for serialization */ static final long serialVersionUID = 8090991039670804047L; /** * constructor * * @param numClasses * the number of classes */ public ExhaustiveCode(final int numClasses) { int width = (int) Math.pow(2, numClasses - 1) - 1; this.m_Codebits = new boolean[width][numClasses]; for (int j = 0; j < width; j++) { this.m_Codebits[j][0] = true; } for (int i = 1; i < numClasses; i++) { int skip = (int) Math.pow(2, numClasses - (i + 1)); for (int j = 0; j < width; j++) { this.m_Codebits[j][i] = ((j / skip) % 2 != 0); } } // System.err.println("Code:\n" + this); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the classifiers. * * @param insts * the training data. * @throws Exception * if a classifier can't be built */ @Override public void buildClassifier(Instances insts) throws Exception { Instances newInsts; // can classifier handle the data? this.getCapabilities().testWithFail(insts); // zero training instances - could be incremental boolean zeroTrainingInstances = insts.numInstances() == 0; // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); if (this.m_Classifier == null) { throw new Exception("No base classifier has been set!"); } this.m_ZeroR = new ZeroR(); this.m_ZeroR.buildClassifier(insts); this.m_TwoClassDataset = null; int numClassifiers = insts.numClasses(); if (numClassifiers <= 2) { this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, 1); this.m_Classifiers[0].buildClassifier(insts); this.m_ClassFilters = null; } else if (this.m_Method == METHOD_1_AGAINST_1) { // generate fastvector of pairs ArrayList<int[]> pairs = new ArrayList<>(); for (int i = 0; i < insts.numClasses(); i++) { for (int j = 0; j < insts.numClasses(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (j <= i) { continue; } int[] pair = new int[2]; pair[0] = i; pair[1] = j; pairs.add(pair); } } numClassifiers = pairs.size(); this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, numClassifiers); this.m_ClassFilters = new Filter[numClassifiers]; this.m_SumOfWeights = new double[numClassifiers]; // generate the classifiers for (int i = 0; i < numClassifiers; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } RemoveWithValues classFilter = new RemoveWithValues(); classFilter.setAttributeIndex("" + (insts.classIndex() + 1)); classFilter.setModifyHeader(true); classFilter.setInvertSelection(true); classFilter.setNominalIndicesArr(pairs.get(i)); Instances tempInstances = new Instances(insts, 0); tempInstances.setClassIndex(-1); classFilter.setInputFormat(tempInstances); newInsts = Filter.useFilter(insts, classFilter); if (newInsts.numInstances() > 0 || zeroTrainingInstances) { newInsts.setClassIndex(insts.classIndex()); this.m_Classifiers[i].buildClassifier(newInsts); this.m_ClassFilters[i] = classFilter; this.m_SumOfWeights[i] = newInsts.sumOfWeights(); } else { this.m_Classifiers[i] = null; this.m_ClassFilters[i] = null; } } // construct a two-class header version of the dataset this.m_TwoClassDataset = new Instances(insts, 0); int classIndex = this.m_TwoClassDataset.classIndex(); this.m_TwoClassDataset.setClassIndex(-1); ArrayList<String> classLabels = new ArrayList<>(); classLabels.add("class0"); classLabels.add("class1"); this.m_TwoClassDataset.replaceAttributeAt(new Attribute("class", classLabels), classIndex); this.m_TwoClassDataset.setClassIndex(classIndex); } else { // use error correcting code style methods Code code = null; switch (this.m_Method) { case METHOD_ERROR_EXHAUSTIVE: code = new ExhaustiveCode(numClassifiers); break; case METHOD_ERROR_RANDOM: code = new RandomCode(numClassifiers, (int) (numClassifiers * this.m_RandomWidthFactor), insts); break; case METHOD_1_AGAINST_ALL: code = new StandardCode(numClassifiers); break; default: throw new Exception("Unrecognized correction code type"); } numClassifiers = code.size(); this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, numClassifiers); this.m_ClassFilters = new MakeIndicator[numClassifiers]; for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_ClassFilters[i] = new MakeIndicator(); MakeIndicator classFilter = (MakeIndicator) this.m_ClassFilters[i]; classFilter.setAttributeIndex("" + (insts.classIndex() + 1)); classFilter.setValueIndices(code.getIndices(i)); classFilter.setNumeric(false); classFilter.setInputFormat(insts); newInsts = Filter.useFilter(insts, this.m_ClassFilters[i]); if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } this.m_Classifiers[i].buildClassifier(newInsts); } } this.m_ClassAttribute = insts.classAttribute(); } /** * Returns the individual predictions of the base classifiers for an instance. Used by * StackedMultiClassClassifier. Returns the probability for the second "class" predicted by each * base classifier. * * @param inst * the instance to get the prediction for * @return the individual predictions * @throws Exception * if the predictions can't be computed successfully */ public double[] individualPredictions(final Instance inst) throws Exception { double[] result = null; if (this.m_Classifiers.length == 1) { result = new double[1]; result[0] = this.m_Classifiers[0].distributionForInstance(inst)[1]; } else { result = new double[this.m_ClassFilters.length]; for (int i = 0; i < this.m_ClassFilters.length; i++) { if (this.m_Classifiers[i] != null) { if (this.m_Method == METHOD_1_AGAINST_1) { Instance tempInst = (Instance) inst.copy(); tempInst.setDataset(this.m_TwoClassDataset); result[i] = this.m_Classifiers[i].distributionForInstance(tempInst)[1]; } else { this.m_ClassFilters[i].input(inst); this.m_ClassFilters[i].batchFinished(); result[i] = this.m_Classifiers[i].distributionForInstance(this.m_ClassFilters[i].output())[1]; } } } } return result; } /** * Returns the distribution for an instance. * * @param inst * the instance to get the distribution for * @return the distribution * @throws Exception * if the distribution can't be computed successfully */ @Override public double[] distributionForInstance(final Instance inst) throws Exception { if (this.m_Classifiers.length == 1) { return this.m_Classifiers[0].distributionForInstance(inst); } double[] probs = new double[inst.numClasses()]; if (this.m_Method == METHOD_1_AGAINST_1) { double[][] r = new double[inst.numClasses()][inst.numClasses()]; double[][] n = new double[inst.numClasses()][inst.numClasses()]; for (int i = 0; i < this.m_ClassFilters.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.m_Classifiers[i] != null) { Instance tempInst = (Instance) inst.copy(); tempInst.setDataset(this.m_TwoClassDataset); double[] current = this.m_Classifiers[i].distributionForInstance(tempInst); Range range = new Range(((RemoveWithValues) this.m_ClassFilters[i]).getNominalIndices()); range.setUpper(this.m_ClassAttribute.numValues()); int[] pair = range.getSelection(); if (this.m_pairwiseCoupling && inst.numClasses() > 2) { r[pair[0]][pair[1]] = current[0]; n[pair[0]][pair[1]] = this.m_SumOfWeights[i]; } else { if (current[0] > current[1]) { probs[pair[0]] += 1.0; } else { probs[pair[1]] += 1.0; } } } } if (this.m_pairwiseCoupling && inst.numClasses() > 2) { return pairwiseCoupling(n, r); } } else if (this.m_Method == METHOD_1_AGAINST_ALL) { for (int i = 0; i < this.m_ClassFilters.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_ClassFilters[i].input(inst); this.m_ClassFilters[i].batchFinished(); probs[i] = this.m_Classifiers[i].distributionForInstance(this.m_ClassFilters[i].output())[1]; } } else { if (this.getLogLossDecoding()) { Arrays.fill(probs, 1.0); for (int i = 0; i < this.m_ClassFilters.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_ClassFilters[i].input(inst); this.m_ClassFilters[i].batchFinished(); double[] current = this.m_Classifiers[i].distributionForInstance(this.m_ClassFilters[i].output()); for (int j = 0; j < this.m_ClassAttribute.numValues(); j++) { if (((MakeIndicator) this.m_ClassFilters[i]).getValueRange().isInRange(j)) { probs[j] += Math.log(Utils.SMALL + (1.0 - 2 * Utils.SMALL) * current[1]); } else { probs[j] += Math.log(Utils.SMALL + (1.0 - 2 * Utils.SMALL) * current[0]); } } } probs = Utils.logs2probs(probs); } else { // Use old-style decoding for (int i = 0; i < this.m_ClassFilters.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_ClassFilters[i].input(inst); this.m_ClassFilters[i].batchFinished(); double[] current = this.m_Classifiers[i].distributionForInstance(this.m_ClassFilters[i].output()); for (int j = 0; j < this.m_ClassAttribute.numValues(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (((MakeIndicator) this.m_ClassFilters[i]).getValueRange().isInRange(j)) { probs[j] += current[1]; } else { probs[j] += current[0]; } } } } } if (Utils.gr(Utils.sum(probs), 0)) { Utils.normalize(probs); return probs; } else { return this.m_ZeroR.distributionForInstance(inst); } } /** * Prints the classifiers. * * @return a string representation of the classifier */ @Override public String toString() { if (this.m_Classifiers == null) { return "MultiClassClassifier: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("MultiClassClassifier\n\n"); for (int i = 0; i < this.m_Classifiers.length; i++) { text.append("Classifier ").append(i + 1); if (this.m_Classifiers[i] != null) { if ((this.m_ClassFilters != null) && (this.m_ClassFilters[i] != null)) { if (this.m_ClassFilters[i] instanceof RemoveWithValues) { Range range = new Range(((RemoveWithValues) this.m_ClassFilters[i]).getNominalIndices()); range.setUpper(this.m_ClassAttribute.numValues()); int[] pair = range.getSelection(); text.append(", " + (pair[0] + 1) + " vs " + (pair[1] + 1)); } else if (this.m_ClassFilters[i] instanceof MakeIndicator) { text.append(", using indicator values: "); text.append(((MakeIndicator) this.m_ClassFilters[i]).getValueRange()); } } text.append('\n'); text.append(this.m_Classifiers[i].toString() + "\n\n"); } else { text.append(" Skipped (no training examples)\n"); } } return text.toString(); } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> vec = new Vector<>(3); vec.addElement(new Option("\tSets the method to use. Valid values are 0 (1-against-all),\n" + "\t1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0)\n", "M", 1, "-M <num>")); vec.addElement(new Option("\tSets the multiplier when using random codes. (default 2.0)", "R", 1, "-R <num>")); vec.addElement(new Option("\tUse pairwise coupling (only has an effect for 1-against1)", "P", 0, "-P")); vec.addElement(new Option("\tUse log loss decoding for random and exhaustive codes", "L", 0, "-L")); vec.addAll(Collections.list(super.listOptions())); return vec.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M &lt;num&gt; * Sets the method to use. Valid values are 0 (1-against-all), * 1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0) * </pre> * * <pre> * -R &lt;num&gt; * Sets the multiplier when using random codes. (default 2.0) * </pre> * * <pre> * -P * Use pairwise coupling (only has an effect for 1-against1) * </pre> * * <pre> * -L * Use log loss decoding for random and exhaustive codes. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.functions.Logistic) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.functions.Logistic: * </pre> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -R &lt;ridge&gt; * Set the ridge in the log-likelihood. * </pre> * * <pre> * -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String errorString = Utils.getOption('M', options); if (errorString.length() != 0) { this.setMethod(new SelectedTag(Integer.parseInt(errorString), TAGS_METHOD)); } else { this.setMethod(new SelectedTag(METHOD_1_AGAINST_ALL, TAGS_METHOD)); } String rfactorString = Utils.getOption('R', options); if (rfactorString.length() != 0) { this.setRandomWidthFactor((new Double(rfactorString)).doubleValue()); } else { this.setRandomWidthFactor(2.0); } this.setUsePairwiseCoupling(Utils.getFlag('P', options)); this.setLogLossDecoding(Utils.getFlag('L', options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-M"); options.add("" + this.m_Method); if (this.getUsePairwiseCoupling()) { options.add("-P"); } if (this.getLogLossDecoding()) { options.add("-L"); } options.add("-R"); options.add("" + this.m_RandomWidthFactor); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "A metaclassifier for handling multi-class datasets with 2-class " + "classifiers. This classifier is also capable of " + "applying error correcting output codes for increased accuracy."; } /** * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String logLossDecodingTipText() { return "Use log loss decoding for random or exhaustive codes."; } /** * Whether log loss decoding is used for random or exhaustive codes. * * @return true if log loss is used */ public boolean getLogLossDecoding() { return this.m_logLossDecoding; } /** * Sets whether log loss decoding is used for random or exhaustive codes. * * @param newlogLossDecoding * true if log loss is to be used */ public void setLogLossDecoding(final boolean newlogLossDecoding) { this.m_logLossDecoding = newlogLossDecoding; } /** * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String randomWidthFactorTipText() { return "Sets the width multiplier when using random codes. The number " + "of codes generated will be thus number multiplied by the number of " + "classes."; } /** * Gets the multiplier when generating random codes. Will generate numClasses * m_RandomWidthFactor * codes. * * @return the width multiplier */ public double getRandomWidthFactor() { return this.m_RandomWidthFactor; } /** * Sets the multiplier when generating random codes. Will generate numClasses * m_RandomWidthFactor * codes. * * @param newRandomWidthFactor * the new width multiplier */ public void setRandomWidthFactor(final double newRandomWidthFactor) { this.m_RandomWidthFactor = newRandomWidthFactor; } /** * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String methodTipText() { return "Sets the method to use for transforming the multi-class problem into " + "several 2-class ones."; } /** * Gets the method used. Will be one of METHOD_1_AGAINST_ALL, METHOD_ERROR_RANDOM, * METHOD_ERROR_EXHAUSTIVE, or METHOD_1_AGAINST_1. * * @return the current method. */ public SelectedTag getMethod() { return new SelectedTag(this.m_Method, TAGS_METHOD); } /** * Sets the method used. Will be one of METHOD_1_AGAINST_ALL, METHOD_ERROR_RANDOM, * METHOD_ERROR_EXHAUSTIVE, or METHOD_1_AGAINST_1. * * @param newMethod * the new method. */ public void setMethod(final SelectedTag newMethod) { if (newMethod.getTags() == TAGS_METHOD) { this.m_Method = newMethod.getSelectedTag().getID(); } } /** * Set whether to use pairwise coupling with 1-vs-1 classification to improve probability estimates. * * @param p * true if pairwise coupling is to be used */ public void setUsePairwiseCoupling(final boolean p) { this.m_pairwiseCoupling = p; } /** * Gets whether to use pairwise coupling with 1-vs-1 classification to improve probability * estimates. * * @return true if pairwise coupling is to be used */ public boolean getUsePairwiseCoupling() { return this.m_pairwiseCoupling; } /** * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String usePairwiseCouplingTipText() { return "Use pairwise coupling (only has an effect for 1-against-1)."; } /** * Implements pairwise coupling. * * @param n * the sum of weights used to train each model * @param r * the probability estimate from each model * @return the coupled estimates */ public static double[] pairwiseCoupling(final double[][] n, final double[][] r) { // Initialize p and u array double[] p = new double[r.length]; for (int i = 0; i < p.length; i++) { p[i] = 1.0 / p.length; } double[][] u = new double[r.length][r.length]; for (int i = 0; i < r.length; i++) { for (int j = i + 1; j < r.length; j++) { u[i][j] = 0.5; } } // firstSum doesn't change double[] firstSum = new double[p.length]; for (int i = 0; i < p.length; i++) { for (int j = i + 1; j < p.length; j++) { firstSum[i] += n[i][j] * r[i][j]; firstSum[j] += n[i][j] * (1 - r[i][j]); } } // Iterate until convergence boolean changed; do { changed = false; double[] secondSum = new double[p.length]; for (int i = 0; i < p.length; i++) { for (int j = i + 1; j < p.length; j++) { secondSum[i] += n[i][j] * u[i][j]; secondSum[j] += n[i][j] * (1 - u[i][j]); } } for (int i = 0; i < p.length; i++) { if ((firstSum[i] == 0) || (secondSum[i] == 0)) { if (p[i] > 0) { changed = true; } p[i] = 0; } else { double factor = firstSum[i] / secondSum[i]; double pOld = p[i]; p[i] *= factor; if (Math.abs(pOld - p[i]) > 1.0e-3) { changed = true; } } } Utils.normalize(p); for (int i = 0; i < r.length; i++) { for (int j = i + 1; j < r.length; j++) { u[i][j] = p[i] / (p[i] + p[j]); } } } while (changed); return p; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new MultiClassClassifier(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/MultiClassClassifierUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiClassClassifierUpdateable.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.UpdateableClassifier; import weka.core.Instance; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.unsupervised.instance.RemoveWithValues; /** <!-- globalinfo-start --> * A metaclassifier for handling multi-class datasets with 2-class classifiers. This classifier is also capable of applying error correcting output codes for increased accuracy. The base classifier must be an updateable classifier * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num&gt; * Sets the method to use. Valid values are 0 (1-against-all), * 1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0) * </pre> * * <pre> -R &lt;num&gt; * Sets the multiplier when using random codes. (default 2.0)</pre> * * <pre> -P * Use pairwise coupling (only has an effect for 1-against1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.SGD)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (len@reeltwo.com) * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * * @version $Revision$ */ public class MultiClassClassifierUpdateable extends MultiClassClassifier implements OptionHandler, UpdateableClassifier { /** For serialization */ private static final long serialVersionUID = -1619685269774366430L; /** * Constructor */ public MultiClassClassifierUpdateable() { m_Classifier = new weka.classifiers.functions.SGD(); } /** * @return a description of the classifier suitable for displaying in the * explorer/experimenter gui */ @Override public String globalInfo() { return "A metaclassifier for handling multi-class datasets with 2-class " + "classifiers. This classifier is also capable of " + "applying error correcting output codes for increased accuracy. " + "The base classifier must be an updateable classifier"; } @Override public void buildClassifier(Instances insts) throws Exception { if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } if (!(m_Classifier instanceof UpdateableClassifier)) { throw new Exception("Base classifier must be updateable!"); } super.buildClassifier(insts); } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in the * model. */ @Override public void updateClassifier(Instance instance) throws Exception { if (!instance.classIsMissing()) { if (m_Classifiers.length == 1) { ((UpdateableClassifier) m_Classifiers[0]).updateClassifier(instance); return; } for (int i = 0; i < m_Classifiers.length; i++) { if (m_Classifiers[i] != null) { m_ClassFilters[i].input(instance); Instance converted = m_ClassFilters[i].output(); if (converted != null) { converted.dataset().setClassIndex(m_ClassAttribute.index()); ((UpdateableClassifier) m_Classifiers[i]) .updateClassifier(converted); if (m_Method == METHOD_1_AGAINST_1) { m_SumOfWeights[i] += converted.weight(); } } } } } } /** * Returns the distribution for an instance. * * @param inst the instance to get the distribution for * @return the distribution * @throws Exception if the distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance inst) throws Exception { if (m_Classifiers.length == 1) { return m_Classifiers[0].distributionForInstance(inst); } double[] probs = new double[inst.numClasses()]; if (m_Method == METHOD_1_AGAINST_1) { double[][] r = new double[inst.numClasses()][inst.numClasses()]; double[][] n = new double[inst.numClasses()][inst.numClasses()]; for (int i = 0; i < m_ClassFilters.length; i++) { if (m_Classifiers[i] != null && m_SumOfWeights[i] > 0) { Instance tempInst = (Instance) inst.copy(); tempInst.setDataset(m_TwoClassDataset); double[] current = m_Classifiers[i].distributionForInstance(tempInst); Range range = new Range( ((RemoveWithValues) m_ClassFilters[i]).getNominalIndices()); range.setUpper(m_ClassAttribute.numValues()); int[] pair = range.getSelection(); if (m_pairwiseCoupling && inst.numClasses() > 2) { r[pair[0]][pair[1]] = current[0]; n[pair[0]][pair[1]] = m_SumOfWeights[i]; } else { if (current[0] > current[1]) { probs[pair[0]] += 1.0; } else { probs[pair[1]] += 1.0; } } } } if (m_pairwiseCoupling && inst.numClasses() > 2) { try { return pairwiseCoupling(n, r); } catch (IllegalArgumentException ex) { } } if (Utils.gr(Utils.sum(probs), 0)) { Utils.normalize(probs); } return probs; } else { probs = super.distributionForInstance(inst); } /* * if (probs.length == 1) { // ZeroR made the prediction return new * double[m_ClassAttribute.numValues()]; } */ return probs; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new MultiClassClassifierUpdateable(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/MultiScheme.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiScheme.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.RandomizableMultipleClassifiersCombiner; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for selecting a classifier from among several using cross validation on the training data or the performance on the training data. Performance is measured based on percent correct (classification) or mean-squared error (regression). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Use cross validation for model selection using the * given number of folds. (default 0, is to * use training error)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class MultiScheme extends RandomizableMultipleClassifiersCombiner { /** for serialization */ static final long serialVersionUID = 5710744346128957520L; /** The classifier that had the best performance on training data. */ protected Classifier m_Classifier; /** The index into the vector for the selected scheme */ protected int m_ClassifierIndex; /** * Number of folds to use for cross validation (0 means use training * error for selection) */ protected int m_NumXValFolds; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for selecting a classifier from among several using cross " + "validation on the training data or the performance on the " + "training data. Performance is measured based on percent correct " + "(classification) or mean-squared error (regression)."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option( "\tUse cross validation for model selection using the\n" + "\tgiven number of folds. (default 0, is to\n" + "\tuse training error)", "X", 1, "-X <number of folds>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Use cross validation for model selection using the * given number of folds. (default 0, is to * use training error)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String numFoldsString = Utils.getOption('X', options); if (numFoldsString.length() != 0) { setNumFolds(Integer.parseInt(numFoldsString)); } else { setNumFolds(0); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-X"; options[current++] = "" + getNumFolds(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifiersTipText() { return "The classifiers to be chosen from."; } /** * Sets the list of possible classifers to choose from. * * @param classifiers an array of classifiers with all options set. */ public void setClassifiers(Classifier [] classifiers) { m_Classifiers = classifiers; } /** * Gets the list of possible classifers to choose from. * * @return the array of Classifiers */ public Classifier [] getClassifiers() { return m_Classifiers; } /** * Gets a single classifier from the set of available classifiers. * * @param index the index of the classifier wanted * @return the Classifier */ public Classifier getClassifier(int index) { return m_Classifiers[index]; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @param index the index of the classifier string to retrieve, starting from * 0. * @return the classifier string, or the empty string if no classifier * has been assigned (or the index given is out of range). */ protected String getClassifierSpec(int index) { if (m_Classifiers.length < index) { return ""; } Classifier c = getClassifier(index); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The seed used for randomizing the data " + "for cross-validation."; } /** * Sets the seed for random number generation. * * @param seed the random number seed */ public void setSeed(int seed) { m_Seed = seed;; } /** * Gets the random number seed. * * @return the random number seed */ public int getSeed() { return m_Seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds used for cross-validation (if 0, " + "performance on training data will be used)."; } /** * Gets the number of folds for cross-validation. A number less * than 2 specifies using training error rather than cross-validation. * * @return the number of folds for cross-validation */ public int getNumFolds() { return m_NumXValFolds; } /** * Sets the number of folds for cross-validation. A number less * than 2 specifies using training error rather than cross-validation. * * @param numFolds the number of folds for cross-validation */ public void setNumFolds(int numFolds) { m_NumXValFolds = numFolds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Whether debug information is output to console."; } /** * Set debugging mode * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Get whether debugging is turned on * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Get the index of the classifier that was determined as best during * cross-validation. * * @return the index in the classifier array */ public int getBestClassifierIndex() { return m_ClassifierIndex; } /** * Buildclassifier selects a classifier from the set of classifiers * by minimising error on the training data. * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_Classifiers.length == 0) { throw new Exception("No base classifiers have been set!"); } // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances newData = new Instances(data); newData.deleteWithMissingClass(); Random random = new Random(m_Seed); newData.randomize(random); if (newData.classAttribute().isNominal() && (m_NumXValFolds > 1)) { newData.stratify(m_NumXValFolds); } Instances train = newData; // train on all data by default Instances test = newData; // test on training data by default Classifier bestClassifier = null; int bestIndex = -1; double bestPerformance = Double.NaN; int numClassifiers = m_Classifiers.length; for (int i = 0; i < numClassifiers; i++) { Classifier currentClassifier = getClassifier(i); Evaluation evaluation; if (m_NumXValFolds > 1) { evaluation = new Evaluation(newData); for (int j = 0; j < m_NumXValFolds; j++) { // We want to randomize the data the same way for every // learning scheme. train = newData.trainCV(m_NumXValFolds, j, new Random (1)); test = newData.testCV(m_NumXValFolds, j); currentClassifier.buildClassifier(train); evaluation.setPriors(train); evaluation.evaluateModel(currentClassifier, test); } } else { currentClassifier.buildClassifier(train); evaluation = new Evaluation(train); evaluation.evaluateModel(currentClassifier, test); } double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Error rate: " + Utils.doubleToString(error, 6, 4) + " for classifier " + currentClassifier.getClass().getName()); } if ((i == 0) || (error < bestPerformance)) { bestClassifier = currentClassifier; bestPerformance = error; bestIndex = i; } } m_ClassifierIndex = bestIndex; if (m_NumXValFolds > 1) { bestClassifier.buildClassifier(newData); } m_Classifier = bestClassifier; } /** * Returns class probabilities. * * @param instance the instance to be classified * @return the distribution for the instance * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { return m_Classifier.distributionForInstance(instance); } /** * Output a representation of this classifier * @return a string representation of the classifier */ public String toString() { if (m_Classifier == null) { return "MultiScheme: No model built yet."; } String result = "MultiScheme selection using"; if (m_NumXValFolds > 1) { result += " cross validation error"; } else { result += " error on training data"; } result += " from the following:\n"; for (int i = 0; i < m_Classifiers.length; i++) { result += '\t' + getClassifierSpec(i) + '\n'; } result += "Selected scheme: " + getClassifierSpec(m_ClassifierIndex) + "\n\n" + m_Classifier.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new MultiScheme(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/RandomCommittee.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomCommittee.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.ArrayList; import java.util.Random; import weka.classifiers.AbstractClassifier; import weka.classifiers.RandomizableParallelIteratedSingleClassifierEnhancer; import weka.core.Instance; import weka.core.Instances; import weka.core.PartitionGenerator; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for building an ensemble of randomizable base classifiers. Each * base classifiers is built using a different random number seed (but based one the same data). The * final prediction is a straight average of the predictions generated by the individual base * classifiers. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.RandomTree) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.RandomTree: * </pre> * * <pre> * -K &lt;number of attributes&gt; * Number of attributes to randomly investigate * (&lt;1 = int(log(#attributes)+1)). * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * </pre> * * <pre> * -S &lt;num&gt; * Seed for random number generator. * (default 1) * </pre> * * <pre> * -depth &lt;num&gt; * The maximum depth of the tree, 0 for unlimited. * (default 0) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class RandomCommittee extends RandomizableParallelIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, PartitionGenerator { /** for serialization */ static final long serialVersionUID = -9204394360557300093L; /** training data */ protected Instances m_data; /** * Constructor. */ public RandomCommittee() { this.m_Classifier = new weka.classifiers.trees.RandomTree(); } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.RandomTree"; } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building an ensemble of randomizable base classifiers. Each " + "base classifiers is built using a different random number seed (but based " + "one the same data). The final prediction is a straight average of the " + "predictions generated by the individual base classifiers."; } /** * Builds the committee of randomizable classifiers. * * @param data * the training data to be used for generating the bagged classifier. * @exception Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(data); // get fresh instances this.m_data = new Instances(data); super.buildClassifier(this.m_data); if (!(this.m_Classifier instanceof Randomizable)) { throw new IllegalArgumentException("Base learner must implement Randomizable!"); } this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, this.m_NumIterations); Random random = this.m_data.getRandomNumberGenerator(this.m_Seed); // Resample data based on weights if base learner can't handle weights if (!(this.m_Classifier instanceof WeightedInstancesHandler)) { this.m_data = this.m_data.resampleWithWeights(random); } for (int j = 0; j < this.m_Classifiers.length; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // Set the random number seed for the current classifier. ((Randomizable) this.m_Classifiers[j]).setSeed(random.nextInt()); // Build the classifier. // m_Classifiers[j].buildClassifier(m_data); } this.buildClassifiers(); // save memory this.m_data = null; } /** * Returns a training set for a particular iteration. * * @param iteration * the number of the iteration for the requested training set. * @return the training set for the supplied iteration number * @throws Exception * if something goes wrong when generating a training set. */ @Override protected synchronized Instances getTrainingSet(final int iteration) throws Exception { // we don't manipulate the training data in any way. return this.m_data; } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return preedicted class probability distribution * @exception Exception * if distribution can't be computed successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { double[] sums = new double[instance.numClasses()], newProbs; double numPreds = 0; for (int i = 0; i < this.m_NumIterations; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (instance.classAttribute().isNumeric() == true) { double pred = this.m_Classifiers[i].classifyInstance(instance); if (!Utils.isMissingValue(pred)) { sums[0] += pred; numPreds++; } } else { newProbs = this.m_Classifiers[i].distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) { sums[j] += newProbs[j]; } } } if (instance.classAttribute().isNumeric() == true) { if (numPreds == 0) { sums[0] = Utils.missingValue(); } else { sums[0] /= numPreds; } return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the committee. * * @return description of the committee as a string */ @Override public String toString() { if (this.m_Classifiers == null) { return "RandomCommittee: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("All the base classifiers: \n\n"); for (int i = 0; i < this.m_Classifiers.length; i++) { text.append(this.m_Classifiers[i].toString() + "\n\n"); } return text.toString(); } /** * Builds the classifier to generate a partition. */ @Override public void generatePartition(final Instances data) throws Exception { if (this.m_Classifier instanceof PartitionGenerator) { this.buildClassifier(data); } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot generate a partition"); } } /** * Computes an array that indicates leaf membership */ @Override public double[] getMembershipValues(final Instance inst) throws Exception { if (this.m_Classifier instanceof PartitionGenerator) { ArrayList<double[]> al = new ArrayList<>(); int size = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { double[] r = ((PartitionGenerator) this.m_Classifiers[i]).getMembershipValues(inst); size += r.length; al.add(r); } double[] values = new double[size]; int pos = 0; for (double[] v : al) { System.arraycopy(v, 0, values, pos, v.length); pos += v.length; } return values; } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot generate a partition"); } } /** * Returns the number of elements in the partition. */ @Override public int numElements() throws Exception { if (this.m_Classifier instanceof PartitionGenerator) { int size = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { size += ((PartitionGenerator) this.m_Classifiers[i]).numElements(); } return size; } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot generate a partition"); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new RandomCommittee(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/RandomSubSpace.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomSubSpace.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.RandomizableParallelIteratedSingleClassifierEnhancer; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.unsupervised.attribute.Remove; /** * <!-- globalinfo-start --> This method constructs a decision tree based classifier that maintains * highest accuracy on training data and improves on generalization accuracy as it grows in * complexity. The classifier consists of multiple trees constructed systematically by * pseudorandomly selecting subsets of components of the feature vector, that is, trees constructed * in randomly chosen subspaces.<br/> * <br/> * For more information, see<br/> * <br/> * Tin Kam Ho (1998). The Random Subspace Method for Constructing Decision Forests. IEEE * Transactions on Pattern Analysis and Machine Intelligence. 20(8):832-844. URL * http://citeseer.ist.psu.edu/ho98random.html. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Ho1998, * author = {Tin Kam Ho}, * journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, * number = {8}, * pages = {832-844}, * title = {The Random Subspace Method for Constructing Decision Forests}, * volume = {20}, * year = {1998}, * ISSN = {0162-8828}, * URL = {http://citeseer.ist.psu.edu/ho98random.html} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P * Size of each subspace: * &lt; 1: percentage of the number of attributes * &gt;=1: absolute number of attributes * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2). * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3). * </pre> * * <pre> * -S &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -P * No pruning. * </pre> * * <pre> * -L * Maximum tree depth (default -1, no maximum) * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @author Peter Reutemann (fracpete@cs.waikato.ac.nz) * @version $Revision$ */ public class RandomSubSpace extends RandomizableParallelIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = 1278172513912424947L; /** The size of each bag sample, as a percentage of the training size */ protected double m_SubSpaceSize = 0.5; /** a ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** Training data */ protected Instances m_data; /** * Constructor. */ public RandomSubSpace() { super(); this.m_Classifier = new weka.classifiers.trees.REPTree(); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "This method constructs a decision tree based classifier that " + "maintains highest accuracy on training data and improves on " + "generalization accuracy as it grows in complexity. The classifier " + "consists of multiple trees constructed systematically by " + "pseudorandomly selecting subsets of components of the feature vector, " + "that is, trees constructed in randomly chosen subspaces.\n\n" + "For more information, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Tin Kam Ho"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "The Random Subspace Method for Constructing Decision Forests"); result.setValue(Field.JOURNAL, "IEEE Transactions on Pattern Analysis and Machine Intelligence"); result.setValue(Field.VOLUME, "20"); result.setValue(Field.NUMBER, "8"); result.setValue(Field.PAGES, "832-844"); result.setValue(Field.URL, "http://citeseer.ist.psu.edu/ho98random.html"); result.setValue(Field.ISSN, "0162-8828"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.REPTree"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<>(); result.addElement(new Option("\tSize of each subspace:\n" + "\t\t< 1: percentage of the number of attributes\n" + "\t\t>=1: absolute number of attributes\n", "P", 1, "-P")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P * Size of each subspace: * &lt; 1: percentage of the number of attributes * &gt;=1: absolute number of attributes * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (default 10) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2). * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3). * </pre> * * <pre> * -S &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -P * No pruning. * </pre> * * <pre> * -L * Maximum tree depth (default -1, no maximum) * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) { this.setSubSpaceSize(Double.parseDouble(tmpStr)); } else { this.setSubSpaceSize(0.5); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<>(); result.add("-P"); result.add("" + this.getSubSpaceSize()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String subSpaceSizeTipText() { return "Size of each subSpace: if less than 1 as a percentage of the " + "number of attributes, otherwise the absolute number of attributes."; } /** * Gets the size of each subSpace, as a percentage of the training set size. * * @return the subSpace size, as a percentage. */ public double getSubSpaceSize() { return this.m_SubSpaceSize; } /** * Sets the size of each subSpace, as a percentage of the training set size. * * @param value * the subSpace size, as a percentage. */ public void setSubSpaceSize(final double value) { this.m_SubSpaceSize = value; } /** * calculates the number of attributes * * @param total * the available number of attributes * @param fraction * the fraction - if less than 1 it represents the percentage, otherwise the absolute * number of attributes * @return the number of attributes to use */ protected int numberOfAttributes(final int total, final double fraction) { int k = (int) Math.round((fraction < 1.0) ? total * fraction : fraction); if (k > total) { k = total; } if (k < 1) { k = 1; } return k; } /** * generates an index string describing a random subspace, suitable for the Remove filter. * * @param indices * the attribute indices * @param subSpaceSize * the size of the subspace * @param classIndex * the class index * @param random * the random number generator * @return the generated string describing the subspace */ protected String randomSubSpace(final Integer[] indices, final int subSpaceSize, final int classIndex, final Random random) { Collections.shuffle(Arrays.asList(indices), random); StringBuffer sb = new StringBuffer(""); for (int i = 0; i < subSpaceSize; i++) { sb.append(indices[i] + ","); } sb.append(classIndex); if (this.getDebug()) { System.out.println("subSPACE = " + sb); } return sb.toString(); } /** * builds the classifier. * * @param data * the training data to be used for generating the classifier. * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(data); // get fresh Instances object this.m_data = new Instances(data); // only class? -> build ZeroR model if (this.m_data.numAttributes() == 1) { System.err.println("Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); this.m_ZeroR = new weka.classifiers.rules.ZeroR(); this.m_ZeroR.buildClassifier(this.m_data); return; } else { this.m_ZeroR = null; } super.buildClassifier(data); Integer[] indices = new Integer[data.numAttributes() - 1]; int classIndex = data.classIndex(); int offset = 0; for (int i = 0; i < indices.length + 1; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (i != classIndex) { indices[offset++] = i + 1; } } int subSpaceSize = this.numberOfAttributes(indices.length, this.getSubSpaceSize()); Random random = data.getRandomNumberGenerator(this.m_Seed); for (int j = 0; j < this.m_Classifiers.length; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } FilteredClassifier fc = new FilteredClassifier(); fc.setSeed(random.nextInt()); fc.setClassifier(this.m_Classifiers[j]); this.m_Classifiers[j] = fc; Remove rm = new Remove(); rm.setOptions(new String[] { "-V", "-R", this.randomSubSpace(indices, subSpaceSize, classIndex + 1, random) }); fc.setFilter(rm); } this.buildClassifiers(); // save memory this.m_data = null; } /** * Returns a training set for a particular iteration. * * @param iteration * the number of the iteration for the requested training set. * @return the training set for the supplied iteration number * @throws Exception * if something goes wrong when generating a training set. */ @Override protected synchronized Instances getTrainingSet(final int iteration) throws Exception { // We don't manipulate the training data in any way. The FilteredClassifiers // take care of generating the sub-spaces. return this.m_data; } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return preedicted class probability distribution * @throws Exception * if distribution can't be computed successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { // default model? if (this.m_ZeroR != null) { return this.m_ZeroR.distributionForInstance(instance); } double[] sums = new double[instance.numClasses()], newProbs; double numPreds = 0; for (int i = 0; i < this.m_NumIterations; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (instance.classAttribute().isNumeric() == true) { double pred = this.m_Classifiers[i].classifyInstance(instance); if (!Utils.isMissingValue(pred)) { sums[0] += pred; numPreds++; } } else { newProbs = this.m_Classifiers[i].distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) { sums[j] += newProbs[j]; } } } if (instance.classAttribute().isNumeric() == true) { if (numPreds == 0) { sums[0] = Utils.missingValue(); } else { sums[0] /= numPreds; } return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the bagged classifier. * * @return description of the bagged classifier as a string */ @Override public String toString() { // only ZeroR model? if (this.m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_ZeroR.toString()); return buf.toString(); } if (this.m_Classifiers == null) { return "RandomSubSpace: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("All the base classifiers: \n\n"); for (int i = 0; i < this.m_Classifiers.length; i++) { text.append(this.m_Classifiers[i].toString() + "\n\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param args * the options */ public static void main(final String[] args) { runClassifier(new RandomSubSpace(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/RandomizableFilteredClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableFilteredClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.core.*; /** <!-- globalinfo-start --> * Class for running an arbitrary classifier on data that has been passed through an arbitrary filter. Like the classifier, the structure of the filter is based exclusively on the training data and test instances will be processed by the filter without changing their structure. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * default: "weka.filters.unsupervised.attribute.RandomProjection -N 10 -D Sparse1"</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.lazy.IBk)</pre> * * <pre> -S num * Set the random number seed (default 1). </pre> * * <pre> * Options specific to classifier weka.classifiers.lazy.IBk: * </pre> * * <pre> -I * Weight neighbours by the inverse of their distance * (use when k &gt; 1)</pre> * * <pre> -F * Weight neighbours by 1 - their distance * (use when k &gt; 1)</pre> * * <pre> -K &lt;number of neighbors&gt; * Number of nearest neighbours (k) used in classification. * (Default = 1)</pre> * * <pre> -E * Minimise mean squared error rather than mean absolute * error when using -X option with numeric prediction.</pre> * * <pre> -W &lt;window size&gt; * Maximum number of training instances maintained. * Training instances are dropped FIFO. (Default = no window)</pre> * * <pre> -X * Select the number of nearest neighbours between 1 * and the k value specified using hold-one-out evaluation * on the training data (use when k &gt; 1)</pre> * * <pre> -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * <!-- options-end --> * * @author Eibe Frank * @version $Revision: 9117 $ */ public class RandomizableFilteredClassifier extends FilteredClassifier { /** for serialization */ static final long serialVersionUID = -4523466618555717333L; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A simple variant of the FilteredClassifier that instantiates the model with a randomizable filter, " + "more specifically, RandomProjection, and IBk as the base classifier. Other than this, and checking " + "that at least one of the two base schemes implements the Randomizable interface, it implements " + "exactly the same functionality as FilteredClassifier, which (now) also implements Randomizable."; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.lazy.IBk"; } /** * String describing default filter. */ protected String defaultFilterString() { return "weka.filters.unsupervised.attribute.RandomProjection -N 10 -D Sparse1"; } /** * Default constructor. */ public RandomizableFilteredClassifier() { m_Classifier = new weka.classifiers.lazy.IBk(); m_Filter = new weka.filters.unsupervised.attribute.RandomProjection(); } /** * Initializes an iterative classifier. (If the base classifier supports * this.) * * @param data the instances to be used in induction * @exception Exception if the model cannot be initialized */ public void initializeClassifier(Instances data) throws Exception { if (!(m_Classifier instanceof Randomizable) && !(m_Filter instanceof Randomizable)) { throw new Exception("Either the classifier or the filter must implement the Randomizable interface."); } super.initializeClassifier(data); } /** * Build the classifier on the filtered data. * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (!(m_Classifier instanceof Randomizable) && !(m_Filter instanceof Randomizable)) { throw new Exception("Either the classifier or the filter must implement the Randomizable interface."); } super.buildClassifier(data); } /** * Output a representation of this classifier * * @return a representation of this classifier */ public String toString() { if (m_FilteredInstances == null) { return "RandomizableFilteredClassifier: No model built yet."; } String result = "RandomizableFilteredClassifier using " + getClassifierSpec() + " on data filtered through " + getFilterSpec() + "\n\nFiltered Header\n" + m_FilteredInstances.toString() + "\n\nClassifier Model\n" + m_Classifier.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new RandomizableFilteredClassifier(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/RegressionByDiscretization.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegressionByDiscretization.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.ConditionalDensityEstimator; import weka.classifiers.IntervalEstimator; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.Utils; import weka.estimators.UnivariateDensityEstimator; import weka.estimators.UnivariateEqualFrequencyHistogramEstimator; import weka.estimators.UnivariateIntervalEstimator; import weka.estimators.UnivariateQuantileEstimator; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Discretize; /** * <!-- globalinfo-start --> A regression scheme that employs any classifier on a copy of the data * that has the class attribute (equal-width) discretized. The predicted value is the expected value * of the mean class value for each discretized interval (based on the predicted probabilities for * each interval). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B &lt;int&gt; * Number of bins for equal-width discretization * (default 10). * </pre> * * <pre> * -E * Whether to delete empty bins after discretization * (default false). * </pre> * * <pre> * -F * Use equal-frequency instead of equal-width discretization. * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.trees.J48) * </pre> * * <pre> * * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> * -U * Use unpruned tree. * </pre> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -S * Don't perform subtree raising. * </pre> * * <pre> * -L * Do not clean up after the tree has been built. * </pre> * * <pre> * -A * Laplace smoothing for predicted probabilities. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class RegressionByDiscretization extends SingleClassifierEnhancer implements IntervalEstimator, ConditionalDensityEstimator { /** for serialization */ static final long serialVersionUID = 5066426153134050378L; /** The discretization filter. */ protected Discretize m_Discretizer = new Discretize(); /** The number of discretization intervals. */ protected int m_NumBins = 10; /** The mean values for each Discretized class interval. */ protected double[] m_ClassMeans; /** The class counts for each Discretized class interval. */ protected int[] m_ClassCounts; /** Whether to delete empty intervals. */ protected boolean m_DeleteEmptyBins; /** Mapping to convert indices in case empty bins are deleted. */ protected int[] m_OldIndexToNewIndex; /** Header of discretized data. */ protected Instances m_DiscretizedHeader = null; /** Use equal-frequency binning */ protected boolean m_UseEqualFrequency = false; /** Whether to minimize absolute error, rather than squared error. */ protected boolean m_MinimizeAbsoluteError = false; /** Which estimator to use (default: histogram) */ protected UnivariateDensityEstimator m_Estimator = new UnivariateEqualFrequencyHistogramEstimator(); /** The original target values in the training data */ protected double[] m_OriginalTargetValues = null; /** The converted target values in the training data */ protected int[] m_NewTargetValues = null; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "A regression scheme that employs any " + "classifier on a copy of the data that has the class attribute " + "discretized. The predicted value is the expected value of the " + "mean class value for each discretized interval (based on the " + "predicted probabilities for each interval). This class now " + "also supports conditional density estimation by building " + "a univariate density estimator from the target values in " + "the training data, weighted by the class probabilities. \n\n" + "For more information on this process, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Eibe Frank and Remco R. Bouckaert"); result.setValue(Field.TITLE, "Conditional Density Estimation with Class Probability Estimators"); result.setValue(Field.BOOKTITLE, "First Asian Conference on Machine Learning"); result.setValue(Field.YEAR, "2009"); result.setValue(Field.PAGES, "65-81"); result.setValue(Field.PUBLISHER, "Springer Verlag"); result.setValue(Field.ADDRESS, "Berlin"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Default constructor. */ public RegressionByDiscretization() { this.m_Classifier = new weka.classifiers.trees.J48(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.setMinimumNumberInstances(2); return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // Discretize the training data this.m_Discretizer.setIgnoreClass(true); this.m_Discretizer.setAttributeIndices("" + (instances.classIndex() + 1)); this.m_Discretizer.setBins(this.getNumBins()); this.m_Discretizer.setUseEqualFrequency(this.getUseEqualFrequency()); this.m_Discretizer.setInputFormat(instances); Instances newTrain = Filter.useFilter(instances, this.m_Discretizer); // Should empty bins be deleted? this.m_OldIndexToNewIndex = null; if (this.m_DeleteEmptyBins) { // Figure out which classes are empty after discretization int numNonEmptyClasses = 0; boolean[] notEmptyClass = new boolean[newTrain.numClasses()]; for (int i = 0; i < newTrain.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (!notEmptyClass[(int) newTrain.instance(i).classValue()]) { numNonEmptyClasses++; notEmptyClass[(int) newTrain.instance(i).classValue()] = true; } } // Compute new list of non-empty classes and mapping of indices ArrayList<String> newClassVals = new ArrayList<>(numNonEmptyClasses); this.m_OldIndexToNewIndex = new int[newTrain.numClasses()]; for (int i = 0; i < newTrain.numClasses(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (notEmptyClass[i]) { this.m_OldIndexToNewIndex[i] = newClassVals.size(); newClassVals.add(newTrain.classAttribute().value(i)); } } // Compute new header information Attribute newClass = new Attribute(newTrain.classAttribute().name(), newClassVals); ArrayList<Attribute> newAttributes = new ArrayList<>(newTrain.numAttributes()); for (int i = 0; i < newTrain.numAttributes(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (i != newTrain.classIndex()) { newAttributes.add((Attribute) newTrain.attribute(i).copy()); } else { newAttributes.add(newClass); } } // Create new header and modify instances Instances newTrainTransformed = new Instances(newTrain.relationName(), newAttributes, newTrain.numInstances()); newTrainTransformed.setClassIndex(newTrain.classIndex()); for (int i = 0; i < newTrain.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = newTrain.instance(i); newTrainTransformed.add(inst); newTrainTransformed.lastInstance().setClassValue(this.m_OldIndexToNewIndex[(int) inst.classValue()]); } newTrain = newTrainTransformed; } // Store target values, in case a prediction interval or computation of median is required this.m_OriginalTargetValues = new double[instances.numInstances()]; this.m_NewTargetValues = new int[instances.numInstances()]; for (int i = 0; i < this.m_OriginalTargetValues.length; i++) { this.m_OriginalTargetValues[i] = instances.instance(i).classValue(); this.m_NewTargetValues[i] = (int) newTrain.instance(i).classValue(); } this.m_DiscretizedHeader = new Instances(newTrain, 0); int numClasses = newTrain.numClasses(); // Calculate the mean value for each bin of the new class attribute this.m_ClassMeans = new double[numClasses]; this.m_ClassCounts = new int[numClasses]; for (int i = 0; i < instances.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = newTrain.instance(i); if (!inst.classIsMissing()) { int classVal = (int) inst.classValue(); this.m_ClassCounts[classVal]++; this.m_ClassMeans[classVal] += instances.instance(i).classValue(); } } for (int i = 0; i < numClasses; i++) { if (this.m_ClassCounts[i] > 0) { this.m_ClassMeans[i] /= this.m_ClassCounts[i]; } } if (this.m_Debug) { System.out.println("Bin Means"); System.out.println("=========="); for (int i = 0; i < this.m_ClassMeans.length; i++) { System.out.println(this.m_ClassMeans[i]); } System.out.println(); } // Train the sub-classifier this.m_Classifier.buildClassifier(newTrain); } /** * Get density estimator for given instance. * * @param inst * the instance * @return the univariate density estimator * @exception Exception * if the estimator can't be computed */ protected UnivariateDensityEstimator getDensityEstimator(final Instance instance, final boolean print) throws Exception { // Initialize estimator UnivariateDensityEstimator e = (UnivariateDensityEstimator) new SerializedObject(this.m_Estimator).getObject(); if (e instanceof UnivariateEqualFrequencyHistogramEstimator) { // Set the number of bins appropriately ((UnivariateEqualFrequencyHistogramEstimator) e).setNumBins(this.getNumBins()); // Initialize boundaries of equal frequency estimator for (int i = 0; i < this.m_OriginalTargetValues.length; i++) { e.addValue(this.m_OriginalTargetValues[i], 1.0); } // Construct estimator, then initialize statistics, so that only boundaries will be kept ((UnivariateEqualFrequencyHistogramEstimator) e).initializeStatistics(); // Now that boundaries have been determined, we only need to update the bin weights ((UnivariateEqualFrequencyHistogramEstimator) e).setUpdateWeightsOnly(true); } // Make sure structure of class attribute correct this.m_Discretizer.input(instance); this.m_Discretizer.batchFinished(); Instance newInstance = this.m_Discretizer.output();// (Instance)instance.copy(); if (this.m_OldIndexToNewIndex != null) { newInstance.setClassValue(this.m_OldIndexToNewIndex[(int) newInstance.classValue()]); } newInstance.setDataset(this.m_DiscretizedHeader); double[] probs = this.m_Classifier.distributionForInstance(newInstance); // Add values to estimator for (int i = 0; i < this.m_OriginalTargetValues.length; i++) { e.addValue(this.m_OriginalTargetValues[i], probs[this.m_NewTargetValues[i]] * this.m_OriginalTargetValues.length / this.m_ClassCounts[this.m_NewTargetValues[i]]); } // Return estimator return e; } /** * Returns an N * 2 array, where N is the number of prediction intervals. In each row, the first * element contains the lower boundary of the corresponding prediction interval and the second * element the upper boundary. * * @param inst * the instance to make the prediction for. * @param confidenceLevel * the percentage of cases that the interval should cover. * @return an array of prediction intervals * @exception Exception * if the intervals can't be computed */ @Override public double[][] predictIntervals(final Instance instance, final double confidenceLevel) throws Exception { // Get density estimator UnivariateIntervalEstimator e = (UnivariateIntervalEstimator) this.getDensityEstimator(instance, false); // Return intervals return e.predictIntervals(confidenceLevel); } /** * Returns natural logarithm of density estimate for given value based on given instance. * * @param inst * the instance to make the prediction for. * @param the * value to make the prediction for. * @return the natural logarithm of the density estimate * @exception Exception * if the intervals can't be computed */ @Override public double logDensity(final Instance instance, final double value) throws Exception { // Get density estimator UnivariateDensityEstimator e = this.getDensityEstimator(instance, true); // Return estimate return e.logDensity(value); } /** * Returns a predicted class for the test instance. * * @param instance * the instance to be classified * @return predicted class value * @throws Exception * if the prediction couldn't be made */ @Override public double classifyInstance(final Instance instance) throws Exception { // Make sure structure of class attribute correct this.m_Discretizer.input(instance); this.m_Discretizer.batchFinished(); Instance newInstance = this.m_Discretizer.output();// (Instance)instance.copy(); if (this.m_OldIndexToNewIndex != null) { newInstance.setClassValue(this.m_OldIndexToNewIndex[(int) newInstance.classValue()]); } newInstance.setDataset(this.m_DiscretizedHeader); double[] probs = this.m_Classifier.distributionForInstance(newInstance); if (!this.m_MinimizeAbsoluteError) { // Compute actual prediction double prediction = 0, probSum = 0; for (int j = 0; j < probs.length; j++) { prediction += probs[j] * this.m_ClassMeans[j]; probSum += probs[j]; } return prediction / probSum; } else { // Get density estimator UnivariateQuantileEstimator e = (UnivariateQuantileEstimator) this.getDensityEstimator(instance, true); // Return estimate return e.predictQuantile(0.5); } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(5); newVector.addElement(new Option("\tNumber of bins for equal-width discretization\n" + "\t(default 10).\n", "B", 1, "-B <int>")); newVector.addElement(new Option("\tWhether to delete empty bins after discretization\n" + "\t(default false).\n", "E", 0, "-E")); newVector.addElement(new Option("\tWhether to minimize absolute error, rather than squared error.\n" + "\t(default false).\n", "A", 0, "-A")); newVector.addElement(new Option("\tUse equal-frequency instead of equal-width discretization.", "F", 0, "-F")); newVector.addElement(new Option("\tThe density estimator to use (including parameters).", "K", 1, "-K <estimator name and parameters")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String binsString = Utils.getOption('B', options); if (binsString.length() != 0) { this.setNumBins(Integer.parseInt(binsString)); } else { this.setNumBins(10); } this.setDeleteEmptyBins(Utils.getFlag('E', options)); this.setUseEqualFrequency(Utils.getFlag('F', options)); this.setMinimizeAbsoluteError(Utils.getFlag('A', options)); String tmpStr = Utils.getOption('K', options); String[] tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; this.setEstimator(((UnivariateDensityEstimator) Utils.forName(UnivariateDensityEstimator.class, tmpStr, tmpOptions))); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-B"); options.add("" + this.getNumBins()); if (this.getDeleteEmptyBins()) { options.add("-E"); } if (this.getUseEqualFrequency()) { options.add("-F"); } if (this.getMinimizeAbsoluteError()) { options.add("-A"); } options.add("-K"); if (this.getEstimator() instanceof OptionHandler) { options.add("" + this.getEstimator().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) this.getEstimator()).getOptions())); } else { options.add("" + this.getEstimator().getClass().getName()); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numBinsTipText() { return "Number of bins for discretization."; } /** * Gets the number of bins numeric attributes will be divided into * * @return the number of bins. */ public int getNumBins() { return this.m_NumBins; } /** * Sets the number of bins to divide each selected numeric attribute into * * @param numBins * the number of bins */ public void setNumBins(final int numBins) { this.m_NumBins = numBins; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String deleteEmptyBinsTipText() { return "Whether to delete empty bins after discretization."; } /** * Gets whether empty bins are deleted. * * @return true if empty bins get deleted. */ public boolean getDeleteEmptyBins() { return this.m_DeleteEmptyBins; } /** * Sets whether to delete empty bins. * * @param b * if true, empty bins will be deleted */ public void setDeleteEmptyBins(final boolean b) { this.m_DeleteEmptyBins = b; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minimizeAbsoluteErrorTipText() { return "Whether to minimize absolute error."; } /** * Gets whether to min. abs. error * * @return true if abs. err. is to be minimized */ public boolean getMinimizeAbsoluteError() { return this.m_MinimizeAbsoluteError; } /** * Sets whether to min. abs. error. * * @param b * if true, abs. err. is minimized */ public void setMinimizeAbsoluteError(final boolean b) { this.m_MinimizeAbsoluteError = b; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useEqualFrequencyTipText() { return "If set to true, equal-frequency binning will be used instead of" + " equal-width binning."; } /** * Get the value of UseEqualFrequency. * * @return Value of UseEqualFrequency. */ public boolean getUseEqualFrequency() { return this.m_UseEqualFrequency; } /** * Set the value of UseEqualFrequency. * * @param newUseEqualFrequency * Value to assign to UseEqualFrequency. */ public void setUseEqualFrequency(final boolean newUseEqualFrequency) { this.m_UseEqualFrequency = newUseEqualFrequency; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String estimatorTipText() { return "The density estimator to use."; } /** * Get the estimator * * @return the estimator */ public UnivariateDensityEstimator getEstimator() { return this.m_Estimator; } /** * Set the estimator * * @param newEstimator * the estimator to use */ public void setEstimator(final UnivariateDensityEstimator estimator) { this.m_Estimator = estimator; } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ @Override public String toString() { StringBuffer text = new StringBuffer(); text.append("Regression by discretization"); if (this.m_ClassMeans == null) { text.append(": No model built yet."); } else { text.append("\n\nClass attribute discretized into " + this.m_ClassMeans.length + " values\n"); text.append("\nClassifier spec: " + this.getClassifierSpec() + "\n"); text.append(this.m_Classifier.toString()); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new RegressionByDiscretization(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/Stacking.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Stacking.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.RandomizableParallelMultipleClassifiersCombiner; import weka.classifiers.rules.ZeroR; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.CommandlineRunnable; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> Combines several classifiers using the stacking method. Can do * classification or regression.<br/> * <br/> * For more information, see<br/> * <br/> * David H. Wolpert (1992). Stacked generalization. Neural Networks. 5:241-259. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Wolpert1992, * author = {David H. Wolpert}, * journal = {Neural Networks}, * pages = {241-259}, * publisher = {Pergamon Press}, * title = {Stacked generalization}, * volume = {5}, * year = {1992} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M &lt;scheme specification&gt; * Full name of meta classifier, followed by options. * (default: "weka.classifiers.rules.Zero") * </pre> * * <pre> * -X &lt;number of folds&gt; * Sets the number of cross-validation folds. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR") * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class Stacking extends RandomizableParallelMultipleClassifiersCombiner implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5134738557155845452L; /** The meta classifier */ protected Classifier m_MetaClassifier = new ZeroR(); /** Format for meta data */ protected Instances m_MetaFormat = null; /** Format for base data */ protected Instances m_BaseFormat = null; /** Set the number of folds for the cross-validation */ protected int m_NumFolds = 10; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Combines several classifiers using the stacking method. " + "Can do classification or regression.\n\n" + "For more information, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "David H. Wolpert"); result.setValue(Field.YEAR, "1992"); result.setValue(Field.TITLE, "Stacked generalization"); result.setValue(Field.JOURNAL, "Neural Networks"); result.setValue(Field.VOLUME, "5"); result.setValue(Field.PAGES, "241-259"); result.setValue(Field.PUBLISHER, "Pergamon Press"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(2); newVector.addElement(new Option(this.metaOption(), "M", 0, "-M <scheme specification>")); newVector.addElement(new Option("\tSets the number of cross-validation folds.", "X", 1, "-X <number of folds>")); newVector.addAll(Collections.list(super.listOptions())); if (this.getMetaClassifier() instanceof OptionHandler) { newVector.addElement(new Option("", "", 0, "\nOptions specific to meta classifier " + this.getMetaClassifier().getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler) this.getMetaClassifier()).listOptions())); } return newVector.elements(); } /** * String describing option for setting meta classifier * * @return the string describing the option */ protected String metaOption() { return "\tFull name of meta classifier, followed by options.\n" + "\t(default: \"weka.classifiers.rules.Zero\")"; } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M &lt;scheme specification&gt; * Full name of meta classifier, followed by options. * (default: "weka.classifiers.rules.Zero") * </pre> * * <pre> * -X &lt;number of folds&gt; * Sets the number of cross-validation folds. * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR") * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String numFoldsString = Utils.getOption('X', options); if (numFoldsString.length() != 0) { this.setNumFolds(Integer.parseInt(numFoldsString)); } else { this.setNumFolds(10); } this.processMetaOptions(options); super.setOptions(options); } /** * Process options setting meta classifier. * * @param options * the options to parse * @throws Exception * if the parsing fails */ protected void processMetaOptions(final String[] options) throws Exception { String classifierString = Utils.getOption('M', options); String[] classifierSpec = Utils.splitOptions(classifierString); String classifierName; if (classifierSpec.length == 0) { classifierName = "weka.classifiers.rules.ZeroR"; } else { classifierName = classifierSpec[0]; classifierSpec[0] = ""; } this.setMetaClassifier(AbstractClassifier.forName(classifierName, classifierSpec)); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[superOptions.length + 4]; int current = 0; options[current++] = "-X"; options[current++] = "" + this.getNumFolds(); options[current++] = "-M"; options[current++] = this.getMetaClassifier().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) this.getMetaClassifier()).getOptions()); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds used for cross-validation."; } /** * Gets the number of folds for the cross-validation. * * @return the number of folds for the cross-validation */ public int getNumFolds() { return this.m_NumFolds; } /** * Sets the number of folds for the cross-validation. * * @param numFolds * the number of folds for the cross-validation * @throws Exception * if parameter illegal */ public void setNumFolds(final int numFolds) throws Exception { if (numFolds < 0) { throw new IllegalArgumentException("Stacking: Number of cross-validation " + "folds must be positive."); } this.m_NumFolds = numFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String metaClassifierTipText() { return "The meta classifiers to be used."; } /** * Adds meta classifier * * @param classifier * the classifier with all options set. */ public void setMetaClassifier(final Classifier classifier) { this.m_MetaClassifier = classifier; } /** * Gets the meta classifier. * * @return the meta classifier */ public Classifier getMetaClassifier() { return this.m_MetaClassifier; } /** * Returns combined capabilities of the base classifiers, i.e., the capabilities all of them have in * common. * * @return the capabilities of the base classifiers */ @Override public Capabilities getCapabilities() { Capabilities result; result = super.getCapabilities(); result.setMinimumNumberInstances(this.getNumFolds()); return result; } /** * Buildclassifier selects a classifier from the set of classifiers by minimising error on the * training data. * * @param data * the training data to be used for generating the boosted classifier. * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { if (this.m_MetaClassifier == null) { throw new IllegalArgumentException("No meta classifier has been set"); } // can classifier handle the data? this.getCapabilities().testWithFail(data); // remove instances with missing class Instances newData = new Instances(data); this.m_BaseFormat = new Instances(data, 0); newData.deleteWithMissingClass(); Random random = new Random(this.m_Seed); newData.randomize(random); if (newData.classAttribute().isNominal()) { newData.stratify(this.m_NumFolds); } // Create meta level this.generateMetaLevel(newData, random); // restart the executor pool because at the end of processing // a set of classifiers it gets shutdown to prevent the program // executing as a server super.buildClassifier(newData); // Rebuild all the base classifiers on the full training data this.buildClassifiers(newData); } /** * Generates the meta data * * @param newData * the data to work on * @param random * the random number generator to use for cross-validation * @throws Exception * if generation fails */ protected void generateMetaLevel(final Instances newData, final Random random) throws Exception { Instances metaData = this.metaFormat(newData); this.m_MetaFormat = new Instances(metaData, 0); for (int j = 0; j < this.m_NumFolds; j++) { Instances train = newData.trainCV(this.m_NumFolds, j, random); // start the executor pool (if necessary) // has to be done after each set of classifiers as the // executor pool gets shut down in order to prevent the // program executing as a server (and not returning to // the command prompt when run from the command line super.buildClassifier(train); // construct the actual classifiers this.buildClassifiers(train); // Classify test instances and add to meta data Instances test = newData.testCV(this.m_NumFolds, j); for (int i = 0; i < test.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } metaData.add(this.metaInstance(test.instance(i))); } } this.m_MetaClassifier.buildClassifier(metaData); } /** * Returns class probabilities. * * @param instance * the instance to be classified * @return the distribution * @throws Exception * if instance could not be classified successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { return this.m_MetaClassifier.distributionForInstance(this.metaInstance(instance)); } /** * Output a representation of this classifier * * @return a string representation of the classifier */ @Override public String toString() { if (this.m_Classifiers.length == 0) { return "Stacking: No base schemes entered."; } if (this.m_MetaClassifier == null) { return "Stacking: No meta scheme selected."; } if (this.m_MetaFormat == null) { return "Stacking: No model built yet."; } String result = "Stacking\n\nBase classifiers\n\n"; for (int i = 0; i < this.m_Classifiers.length; i++) { result += this.getClassifier(i).toString() + "\n\n"; } result += "\n\nMeta classifier\n\n"; result += this.m_MetaClassifier.toString(); return result; } /** * Makes the format for the level-1 data. * * @param instances * the level-0 format * @return the format for the meta data * @throws Exception * if the format generation fails */ protected Instances metaFormat(final Instances instances) throws Exception { ArrayList<Attribute> attributes = new ArrayList<>(); Instances metaFormat; for (int k = 0; k < this.m_Classifiers.length; k++) { Classifier classifier = this.getClassifier(k); String name = classifier.getClass().getName() + "-" + (k + 1); if (this.m_BaseFormat.classAttribute().isNumeric()) { attributes.add(new Attribute(name)); } else { for (int j = 0; j < this.m_BaseFormat.classAttribute().numValues(); j++) { attributes.add(new Attribute(name + ":" + this.m_BaseFormat.classAttribute().value(j))); } } } attributes.add((Attribute) this.m_BaseFormat.classAttribute().copy()); metaFormat = new Instances("Meta format", attributes, 0); metaFormat.setClassIndex(metaFormat.numAttributes() - 1); return metaFormat; } /** * Makes a level-1 instance from the given instance. * * @param instance * the instance to be transformed * @return the level-1 instance * @throws Exception * if the instance generation fails */ protected Instance metaInstance(final Instance instance) throws Exception { double[] values = new double[this.m_MetaFormat.numAttributes()]; Instance metaInstance; int i = 0; for (int k = 0; k < this.m_Classifiers.length; k++) { Classifier classifier = this.getClassifier(k); if (this.m_BaseFormat.classAttribute().isNumeric()) { values[i++] = classifier.classifyInstance(instance); } else { double[] dist = classifier.distributionForInstance(instance); for (int j = 0; j < dist.length; j++) { values[i++] = dist[j]; } } } values[i] = instance.classValue(); metaInstance = new DenseInstance(1, values); metaInstance.setDataset(this.m_MetaFormat); return metaInstance; } @Override public void preExecution() throws Exception { super.preExecution(); if (this.getMetaClassifier() instanceof CommandlineRunnable) { ((CommandlineRunnable) this.getMetaClassifier()).preExecution(); } } @Override public void postExecution() throws Exception { super.postExecution(); if (this.getMetaClassifier() instanceof CommandlineRunnable) { ((CommandlineRunnable) this.getMetaClassifier()).postExecution(); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * should contain the following arguments: -t training file [-T test file] [-c class index] */ public static void main(final String[] argv) { runClassifier(new Stacking(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/Vote.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Vote.java * Copyright (C) 2000-2012 University of Waikato * */ package weka.classifiers.meta; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.ObjectInputStream; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.RandomizableMultipleClassifiersCombiner; import weka.classifiers.misc.InputMappedClassifier; import weka.core.Aggregateable; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Environment; import weka.core.EnvironmentHandler; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> Class for combining classifiers. Different combinations of probability * estimates for classification are available.<br/> * <br/> * For more information see:<br/> * <br/> * Ludmila I. Kuncheva (2004). Combining Pattern Classifiers: Methods and Algorithms. John Wiley and * Sons, Inc..<br/> * <br/> * J. Kittler, M. Hatef, Robert P.W. Duin, J. Matas (1998). On combining classifiers. IEEE * Transactions on Pattern Analysis and Machine Intelligence. 20(3):226-239. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;path to serialized classifier&gt; * Full path to serialized classifier to include. * May be specified multiple times to include * multiple serialized classifiers. Note: it does * not make sense to use pre-built classifiers in * a cross-validation. * </pre> * * <pre> * -R &lt;AVG|PROD|MAJ|MIN|MAX|MED&gt; * The combination rule to use * (default: AVG) * </pre> * * <pre> * -print * Print the individual models in the output * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR") * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;book{Kuncheva2004, * author = {Ludmila I. Kuncheva}, * publisher = {John Wiley and Sons, Inc.}, * title = {Combining Pattern Classifiers: Methods and Algorithms}, * year = {2004} * } * * &#64;article{Kittler1998, * author = {J. Kittler and M. Hatef and Robert P.W. Duin and J. Matas}, * journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, * number = {3}, * pages = {226-239}, * title = {On combining classifiers}, * volume = {20}, * year = {1998} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * @author Alexander K. Seewald (alex@seewald.at) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Roberto Perdisci (roberto.perdisci@gmail.com) * @version $Revision$ */ public class Vote extends RandomizableMultipleClassifiersCombiner implements TechnicalInformationHandler, EnvironmentHandler, Aggregateable<Classifier> { /** for serialization */ static final long serialVersionUID = -637891196294399624L; /** combination rule: Average of Probabilities */ public static final int AVERAGE_RULE = 1; /** combination rule: Product of Probabilities (only nominal classes) */ public static final int PRODUCT_RULE = 2; /** combination rule: Majority Voting (only nominal classes) */ public static final int MAJORITY_VOTING_RULE = 3; /** combination rule: Minimum Probability */ public static final int MIN_RULE = 4; /** combination rule: Maximum Probability */ public static final int MAX_RULE = 5; /** combination rule: Median Probability (only numeric class) */ public static final int MEDIAN_RULE = 6; /** combination rules */ public static final Tag[] TAGS_RULES = { new Tag(AVERAGE_RULE, "AVG", "Average of Probabilities"), new Tag(PRODUCT_RULE, "PROD", "Product of Probabilities"), new Tag(MAJORITY_VOTING_RULE, "MAJ", "Majority Voting"), new Tag(MIN_RULE, "MIN", "Minimum Probability"), new Tag(MAX_RULE, "MAX", "Maximum Probability"), new Tag(MEDIAN_RULE, "MED", "Median") }; /** Combination Rule variable */ protected int m_CombinationRule = AVERAGE_RULE; /** List of file paths to serialized models to load */ protected List<String> m_classifiersToLoad = new ArrayList<>(); /** List of de-serialized pre-built classifiers to include in the ensemble */ protected List<Classifier> m_preBuiltClassifiers = new ArrayList<>(); /** Environment variables */ protected transient Environment m_env = Environment.getSystemWide(); /** Structure of the training data */ protected Instances m_structure; /** Print the individual models in the output */ protected boolean m_dontPrintModels; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for combining classifiers. Different combinations of " + "probability estimates for classification are available.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<>(); result.addElement(new Option("\tFull path to serialized classifier to include.\n" + "\tMay be specified multiple times to include\n" + "\tmultiple serialized classifiers. Note: it does\n" + "\tnot make sense to use pre-built classifiers in\n" + "\ta cross-validation.", "P", 1, "-P <path to serialized " + "classifier>")); result.addElement(new Option("\tThe combination rule to use\n" + "\t(default: AVG)", "R", 1, "-R " + Tag.toOptionList(TAGS_RULES))); result.addElement(new Option("\tSuppress the printing of the individual models in the output", "do-not-print", 0, "-do-not-print")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Gets the current settings of Vote. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { int i; Vector<String> result = new Vector<>(); String[] options; options = super.getOptions(); for (i = 0; i < options.length; i++) { result.add(options[i]); } result.add("-R"); result.add("" + this.getCombinationRule()); for (i = 0; i < this.m_classifiersToLoad.size(); i++) { result.add("-P"); result.add(this.m_classifiersToLoad.get(i)); } if (this.m_dontPrintModels) { result.add("-do-not-print"); } return result.toArray(new String[result.size()]); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;path to serialized classifier&gt; * Full path to serialized classifier to include. * May be specified multiple times to include * multiple serialized classifiers. Note: it does * not make sense to use pre-built classifiers in * a cross-validation. * </pre> * * <pre> * -R &lt;AVG|PROD|MAJ|MIN|MAX|MED&gt; * The combination rule to use * (default: AVG) * </pre> * * <pre> * -print * Print the individual models in the output * </pre> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR") * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('R', options); if (tmpStr.length() != 0) { this.setCombinationRule(new SelectedTag(tmpStr, TAGS_RULES)); } else { this.setCombinationRule(new SelectedTag(AVERAGE_RULE, TAGS_RULES)); } this.m_classifiersToLoad.clear(); while (true) { String loadString = Utils.getOption('P', options); if (loadString.length() == 0) { break; } this.m_classifiersToLoad.add(loadString); } this.setDoNotPrintModels(Utils.getFlag("do-not-print", options)); super.setOptions(options); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.BOOK); result.setValue(Field.AUTHOR, "Ludmila I. Kuncheva"); result.setValue(Field.TITLE, "Combining Pattern Classifiers: Methods and Algorithms"); result.setValue(Field.YEAR, "2004"); result.setValue(Field.PUBLISHER, "John Wiley and Sons, Inc."); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "J. Kittler and M. Hatef and Robert P.W. Duin and J. Matas"); additional.setValue(Field.YEAR, "1998"); additional.setValue(Field.TITLE, "On combining classifiers"); additional.setValue(Field.JOURNAL, "IEEE Transactions on Pattern Analysis and Machine Intelligence"); additional.setValue(Field.VOLUME, "20"); additional.setValue(Field.NUMBER, "3"); additional.setValue(Field.PAGES, "226-239"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); if (this.m_preBuiltClassifiers.size() == 0 && this.m_classifiersToLoad.size() > 0) { try { this.loadClassifiers(null); } catch (Exception e) { e.printStackTrace(); } } if (this.m_preBuiltClassifiers.size() > 0) { if (this.m_Classifiers.length == 0) { result = (Capabilities) this.m_preBuiltClassifiers.get(0).getCapabilities().clone(); } for (int i = 1; i < this.m_preBuiltClassifiers.size(); i++) { result.and(this.m_preBuiltClassifiers.get(i).getCapabilities()); } for (Capability cap : Capability.values()) { result.enableDependency(cap); } } // class if ((this.m_CombinationRule == PRODUCT_RULE) || (this.m_CombinationRule == MAJORITY_VOTING_RULE)) { result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); result.enableDependency(Capability.NOMINAL_CLASS); } else if (this.m_CombinationRule == MEDIAN_RULE) { result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enableDependency(Capability.NUMERIC_CLASS); } return result; } /** * Buildclassifier selects a classifier from the set of classifiers by minimising error on the * training data. * * @param data * the training data to be used for generating the boosted classifier. * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { // remove instances with missing class Instances newData = new Instances(data); newData.deleteWithMissingClass(); this.m_structure = new Instances(newData, 0); if (this.m_classifiersToLoad.size() > 0) { this.m_preBuiltClassifiers.clear(); this.loadClassifiers(data); if (this.m_Classifiers.length == 1 && this.m_Classifiers[0] instanceof weka.classifiers.rules.ZeroR) { // remove the single ZeroR this.m_Classifiers = new Classifier[0]; } } // can classifier handle the data? this.getCapabilities().testWithFail(data); for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.getClassifier(i).buildClassifier(newData); } } /** * Load serialized models to include in the ensemble * * @param data * training instances (used in a header compatibility check with each of the loaded models) * * @throws Exception * if there is a problem de-serializing a model */ private void loadClassifiers(final Instances data) throws Exception { for (String path : this.m_classifiersToLoad) { if (Environment.containsEnvVariables(path)) { try { path = this.m_env.substitute(path); } catch (Exception ex) { } } File toLoad = new File(path); if (!toLoad.isFile()) { throw new Exception("\"" + path + "\" does not seem to be a valid file!"); } ObjectInputStream is = new ObjectInputStream(new BufferedInputStream(new FileInputStream(toLoad))); Object c = is.readObject(); if (!(c instanceof Classifier)) { is.close(); throw new Exception("\"" + path + "\" does not contain a classifier!"); } Object header = null; header = is.readObject(); if ((header instanceof Instances) && !(c instanceof InputMappedClassifier)) { if (data != null && !data.equalHeaders((Instances) header)) { is.close(); throw new Exception("\"" + path + "\" was trained with data that is " + "of a differnet structure than the incoming training data"); } } if (header == null) { System.out.println("[Vote] warning: no header instances for \"" + path + "\""); } is.close(); this.addPreBuiltClassifier((Classifier) c); } } /** * Add a prebuilt classifier to the list for use in the ensemble * * @param c * a prebuilt Classifier to add. */ public void addPreBuiltClassifier(final Classifier c) { this.m_preBuiltClassifiers.add(c); } /** * Remove a prebuilt classifier from the list to use in the ensemble * * @param c * the classifier to remove */ public void removePreBuiltClassifier(final Classifier c) { this.m_preBuiltClassifiers.remove(c); } /** * Classifies the given test instance. * * @param instance * the instance to be classified * @return the predicted most likely class for the instance or Utils.missingValue() if no prediction * is made * @throws Exception * if an error occurred during the prediction */ @Override public double classifyInstance(final Instance instance) throws Exception { double result; double[] dist; int index; switch (this.m_CombinationRule) { case AVERAGE_RULE: case PRODUCT_RULE: case MAJORITY_VOTING_RULE: case MIN_RULE: case MAX_RULE: dist = this.distributionForInstance(instance); if (instance.classAttribute().isNominal()) { index = Utils.maxIndex(dist); if (dist[index] == 0) { result = Utils.missingValue(); } else { result = index; } } else if (instance.classAttribute().isNumeric()) { result = dist[0]; } else { result = Utils.missingValue(); } break; case MEDIAN_RULE: result = this.classifyInstanceMedian(instance); break; default: throw new IllegalStateException("Unknown combination rule '" + this.m_CombinationRule + "'!"); } return result; } /** * Classifies the given test instance, returning the median from all classifiers. Can assume that * class is numeric. * * @param instance * the instance to be classified * @return the predicted most likely class for the instance or Utils.missingValue() if no prediction * is made * @throws Exception * if an error occurred during the prediction */ protected double classifyInstanceMedian(final Instance instance) throws Exception { double[] results = new double[this.m_Classifiers.length + this.m_preBuiltClassifiers.size()]; int numResults = 0; for (Classifier m_Classifier : this.m_Classifiers) { double pred = m_Classifier.classifyInstance(instance); if (!Utils.isMissingValue(pred)) { results[numResults++] = pred; } } for (int i = 0; i < this.m_preBuiltClassifiers.size(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double pred = this.m_preBuiltClassifiers.get(i).classifyInstance(instance); if (!Utils.isMissingValue(pred)) { results[numResults++] = pred; } } if (numResults == 0) { return Utils.missingValue(); } else if (numResults == 1) { return results[0]; } else { double[] actualResults = new double[numResults]; System.arraycopy(results, 0, actualResults, 0, numResults); return Utils.kthSmallestValue(actualResults, actualResults.length / 2); } } /** * Classifies a given instance using the selected combination rule. * * @param instance * the instance to be classified * @return the distribution * @throws Exception * if instance could not be classified successfully */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { double[] result = new double[instance.numClasses()]; switch (this.m_CombinationRule) { case AVERAGE_RULE: result = this.distributionForInstanceAverage(instance); break; case PRODUCT_RULE: result = this.distributionForInstanceProduct(instance); break; case MAJORITY_VOTING_RULE: result = this.distributionForInstanceMajorityVoting(instance); break; case MIN_RULE: result = this.distributionForInstanceMin(instance); break; case MAX_RULE: result = this.distributionForInstanceMax(instance); break; case MEDIAN_RULE: result[0] = this.classifyInstance(instance); break; default: throw new IllegalStateException("Unknown combination rule '" + this.m_CombinationRule + "'!"); } if (!instance.classAttribute().isNumeric() && (Utils.sum(result) > 0)) { Utils.normalize(result); } return result; } /** * Classifies a given instance using the Average of Probabilities combination rule. * * @param instance * the instance to be classified * @return the distribution * @throws Exception * if instance could not be classified successfully */ protected double[] distributionForInstanceAverage(final Instance instance) throws Exception { double[] probs = new double[instance.numClasses()]; double numPredictions = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.getClassifier(i).distributionForInstance(instance); if (!instance.classAttribute().isNumeric() || !Utils.isMissingValue(dist[0])) { for (int j = 0; j < dist.length; j++) { probs[j] += dist[j]; } numPredictions++; } } for (int i = 0; i < this.m_preBuiltClassifiers.size(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.m_preBuiltClassifiers.get(i).distributionForInstance(instance); if (!instance.classAttribute().isNumeric() || !Utils.isMissingValue(dist[0])) { for (int j = 0; j < dist.length; j++) { probs[j] += dist[j]; } numPredictions++; } } if (instance.classAttribute().isNumeric()) { if (numPredictions == 0) { probs[0] = Utils.missingValue(); } else { for (int j = 0; j < probs.length; j++) { probs[j] /= numPredictions; } } } else { // Should normalize "probability" distribution if (Utils.sum(probs) > 0) { Utils.normalize(probs); } } return probs; } /** * Classifies a given instance using the Product of Probabilities combination rule. Can assume that * class is nominal. * * @param instance * the instance to be classified * @return the distribution * @throws Exception * if instance could not be classified successfully */ protected double[] distributionForInstanceProduct(final Instance instance) throws Exception { double[] probs = new double[instance.numClasses()]; for (int i = 0; i < probs.length; i++) { probs[i] = 1.0; } int numPredictions = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.getClassifier(i).distributionForInstance(instance); if (Utils.sum(dist) > 0) { for (int j = 0; j < dist.length; j++) { probs[j] *= dist[j]; } numPredictions++; } } for (int i = 0; i < this.m_preBuiltClassifiers.size(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.m_preBuiltClassifiers.get(i).distributionForInstance(instance); if (Utils.sum(dist) > 0) { for (int j = 0; j < dist.length; j++) { probs[j] *= dist[j]; } numPredictions++; } } // No predictions? if (numPredictions == 0) { return new double[instance.numClasses()]; } // Should normalize to get "probabilities" if (Utils.sum(probs) > 0) { Utils.normalize(probs); } return probs; } /** * Classifies a given instance using the Majority Voting combination rule. Can assume that class is * nominal. * * @param instance * the instance to be classified * @return the distribution * @throws Exception * if instance could not be classified successfully */ protected double[] distributionForInstanceMajorityVoting(final Instance instance) throws Exception { double[] probs = new double[instance.classAttribute().numValues()]; double[] votes = new double[probs.length]; for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } probs = this.getClassifier(i).distributionForInstance(instance); int maxIndex = 0; for (int j = 0; j < probs.length; j++) { if (probs[j] > probs[maxIndex]) { maxIndex = j; } } // Consider the cases when multiple classes happen to have the same // probability if (probs[maxIndex] > 0) { for (int j = 0; j < probs.length; j++) { if (probs[j] == probs[maxIndex]) { votes[j]++; } } } } for (int i = 0; i < this.m_preBuiltClassifiers.size(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } probs = this.m_preBuiltClassifiers.get(i).distributionForInstance(instance); int maxIndex = 0; for (int j = 0; j < probs.length; j++) { if (probs[j] > probs[maxIndex]) { maxIndex = j; } } // Consider the cases when multiple classes happen to have the same // probability if (probs[maxIndex] > 0) { for (int j = 0; j < probs.length; j++) { if (probs[j] == probs[maxIndex]) { votes[j]++; } } } } int tmpMajorityIndex = 0; for (int k = 1; k < votes.length; k++) { if (votes[k] > votes[tmpMajorityIndex]) { tmpMajorityIndex = k; } } // No votes received if (votes[tmpMajorityIndex] == 0) { return new double[instance.numClasses()]; } // Consider the cases when multiple classes receive the same amount of votes Vector<Integer> majorityIndexes = new Vector<>(); for (int k = 0; k < votes.length; k++) { if (votes[k] == votes[tmpMajorityIndex]) { majorityIndexes.add(k); } } int majorityIndex = tmpMajorityIndex; if (majorityIndexes.size() > 1) { // resolve ties by looking at the predicted distribution double[] distPreds = this.distributionForInstanceAverage(instance); majorityIndex = Utils.maxIndex(distPreds); // Resolve the ties according to a uniform random distribution // majorityIndex = majorityIndexes.get(m_Random.nextInt(majorityIndexes.size())); } // set probs to 0 probs = new double[probs.length]; probs[majorityIndex] = 1; // the class that have been voted the most // receives 1 return probs; } /** * Classifies a given instance using the Maximum Probability combination rule. * * @param instance * the instance to be classified * @return the distribution * @throws Exception * if instance could not be classified successfully */ protected double[] distributionForInstanceMax(final Instance instance) throws Exception { double[] probs = new double[instance.numClasses()]; double numPredictions = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.getClassifier(i).distributionForInstance(instance); if (!instance.classAttribute().isNumeric() || !Utils.isMissingValue(dist[0])) { for (int j = 0; j < dist.length; j++) { if ((probs[j] < dist[j]) || (numPredictions == 0)) { probs[j] = dist[j]; } } numPredictions++; } } for (int i = 0; i < this.m_preBuiltClassifiers.size(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.m_preBuiltClassifiers.get(i).distributionForInstance(instance); if (!instance.classAttribute().isNumeric() || !Utils.isMissingValue(dist[0])) { for (int j = 0; j < dist.length; j++) { if ((probs[j] < dist[j]) || (numPredictions == 0)) { probs[j] = dist[j]; } } numPredictions++; } } if (instance.classAttribute().isNumeric()) { if (numPredictions == 0) { probs[0] = Utils.missingValue(); } } else { // Should normalize "probability" distribution if (Utils.sum(probs) > 0) { Utils.normalize(probs); } } return probs; } /** * Classifies a given instance using the Minimum Probability combination rule. * * @param instance * the instance to be classified * @return the distribution * @throws Exception * if instance could not be classified successfully */ protected double[] distributionForInstanceMin(final Instance instance) throws Exception { double[] probs = new double[instance.numClasses()]; double numPredictions = 0; for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.getClassifier(i).distributionForInstance(instance); if (!instance.classAttribute().isNumeric() || !Utils.isMissingValue(dist[0])) { for (int j = 0; j < dist.length; j++) { if ((probs[j] > dist[j]) || (numPredictions == 0)) { probs[j] = dist[j]; } } numPredictions++; } } for (int i = 0; i < this.m_preBuiltClassifiers.size(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] dist = this.m_preBuiltClassifiers.get(i).distributionForInstance(instance); if (!instance.classAttribute().isNumeric() || !Utils.isMissingValue(dist[0])) { for (int j = 0; j < dist.length; j++) { if ((probs[j] > dist[j]) || (numPredictions == 0)) { probs[j] = dist[j]; } } numPredictions++; } } if (instance.classAttribute().isNumeric()) { if (numPredictions == 0) { probs[0] = Utils.missingValue(); } } else { // Should normalize "probability" distribution if (Utils.sum(probs) > 0) { Utils.normalize(probs); } } return probs; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String combinationRuleTipText() { return "The combination rule used."; } /** * Gets the combination rule used * * @return the combination rule used */ public SelectedTag getCombinationRule() { return new SelectedTag(this.m_CombinationRule, TAGS_RULES); } /** * Sets the combination rule to use. Values other than * * @param newRule * the combination rule method to use */ public void setCombinationRule(final SelectedTag newRule) { if (newRule.getTags() == TAGS_RULES) { this.m_CombinationRule = newRule.getSelectedTag().getID(); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String preBuiltClassifiersTipText() { return "The pre-built serialized classifiers to include. Multiple " + "serialized classifiers can be included alongside those " + "that are built from scratch when this classifier runs. " + "Note that it does not make sense to include pre-built " + "classifiers in a cross-validation since they are static " + "and their models do not change from fold to fold."; } /** * Set the paths to pre-built serialized classifiers to load and include in the ensemble * * @param preBuilt * an array of File paths to serialized models */ public void setPreBuiltClassifiers(final File[] preBuilt) { this.m_classifiersToLoad.clear(); if (preBuilt != null && preBuilt.length > 0) { for (File element : preBuilt) { String path = element.toString(); this.m_classifiersToLoad.add(path); } } } /** * Get the paths to pre-built serialized classifiers to load and include in the ensemble * * @return an array of File paths to serialized models */ public File[] getPreBuiltClassifiers() { File[] result = new File[this.m_classifiersToLoad.size()]; for (int i = 0; i < this.m_classifiersToLoad.size(); i++) { result[i] = new File(this.m_classifiersToLoad.get(i)); } return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String doNotPrintModelsTipText() { return "Do not print the individual trees in the output"; } /** * Set whether to print the individual ensemble models in the output * * @param print * true if the individual models are to be printed */ public void setDoNotPrintModels(final boolean print) { this.m_dontPrintModels = print; } /** * Get whether to print the individual ensemble models in the output * * @return true if the individual models are to be printed */ public boolean getDoNotPrintModels() { return this.m_dontPrintModels; } /** * Output a representation of this classifier * * @return a string representation of the classifier */ @Override public String toString() { if (this.m_Classifiers == null) { return "Vote: No model built yet."; } String result = "Vote combines"; result += " the probability distributions of these base learners:\n"; for (int i = 0; i < this.m_Classifiers.length; i++) { result += '\t' + this.getClassifierSpec(i) + '\n'; } for (Classifier c : this.m_preBuiltClassifiers) { result += "\t" + c.getClass().getName() + Utils.joinOptions(((OptionHandler) c).getOptions()) + "\n"; } result += "using the '"; switch (this.m_CombinationRule) { case AVERAGE_RULE: result += "Average"; break; case PRODUCT_RULE: result += "Product"; break; case MAJORITY_VOTING_RULE: result += "Majority Voting"; break; case MIN_RULE: result += "Minimum"; break; case MAX_RULE: result += "Maximum"; break; case MEDIAN_RULE: result += "Median"; break; default: throw new IllegalStateException("Unknown combination rule '" + this.m_CombinationRule + "'!"); } result += "' combination rule \n"; StringBuilder resultBuilder = null; if (!this.m_dontPrintModels) { resultBuilder = new StringBuilder(); resultBuilder.append(result).append("\nAll the models:\n\n"); for (Classifier c : this.m_Classifiers) { resultBuilder.append(c).append("\n"); } for (Classifier c : this.m_preBuiltClassifiers) { resultBuilder.append(c).append("\n"); } } return resultBuilder == null ? result : resultBuilder.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Set environment variable values to substitute in the paths of serialized models to load * * @param env * the environment variables to use */ @Override public void setEnvironment(final Environment env) { this.m_env = env; } /** * Aggregate an object with this one * * @param toAggregate * the object to aggregate * @return the result of aggregation * @throws Exception * if the supplied object can't be aggregated for some reason */ @Override public Classifier aggregate(final Classifier toAggregate) throws Exception { if (this.m_structure == null && this.m_Classifiers.length == 1 && (this.m_Classifiers[0] instanceof weka.classifiers.rules.ZeroR)) { // remove the single untrained ZeroR this.setClassifiers(new Classifier[0]); } // Can't do any training data compatibility checks unfortunately this.addPreBuiltClassifier(toAggregate); return this; } /** * Call to complete the aggregation process. Allows implementers to do any final processing based on * how many objects were aggregated. * * @throws Exception * if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { // nothing to do } /** * Main method for testing this class. * * @param argv * should contain the following arguments: -t training file [-T test file] [-c class index] */ public static void main(final String[] argv) { runClassifier(new Vote(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/meta/WeightedInstancesHandlerWrapper.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * WeightedInstancesHandlerWrapper.java * Copyright (C) 2015 University of Waikato, Hamilton, NZ */ package weka.classifiers.meta; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.ResampleUtils; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * Generic wrapper around any classifier to enable weighted instances support.<br> * Uses resampling with weights if the base classifier is not implementing the weka.core.WeightedInstancesHandler interface and there are instance weights other 1.0 present. By default, the training data is passed through to the base classifier if it can handle instance weights. However, it is possible to force the use of resampling with weights as well. * <br><br> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p> * * <pre> -force-resample-with-weights * Forces resampling of weights, regardless of whether * base classifier handles instance weights</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <pre> -num-decimal-places * The number of decimal places for the output of numbers in the model (default 2).</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <pre> -num-decimal-places * The number of decimal places for the output of numbers in the model (default 2).</pre> * <!-- options-end --> * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class WeightedInstancesHandlerWrapper extends RandomizableSingleClassifierEnhancer implements WeightedInstancesHandler { private static final long serialVersionUID = 2980789213434466135L; /** command-line option for resampling with weights. */ public static final String FORCE_RESAMPLE_WITH_WEIGHTS = "force-resample-with-weights"; /** whether to force resampling with weights. */ protected boolean m_ForceResampleWithWeights = false; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Generic wrapper around any classifier to enable weighted instances support.\n" + "Uses resampling with weights if the base classifier is not implementing " + "the " + WeightedInstancesHandler.class.getName() + " interface and there " + "are instance weights other than 1.0 present. By default, " + "the training data is passed through to the base classifier if it can handle " + "instance weights. However, it is possible to force the use of resampling " + "with weights as well."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> result; result = new Vector<Option>(); result.addElement(new Option( "\tForces resampling of weights, regardless of whether\n" + "\tbase classifier handles instance weights", FORCE_RESAMPLE_WITH_WEIGHTS, 0, "-" + FORCE_RESAMPLE_WITH_WEIGHTS)); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Parses a given list of options. * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setForceResampleWithWeights(Utils.getFlag(FORCE_RESAMPLE_WITH_WEIGHTS, options)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { List<String> result; result = new ArrayList<String>(); if (getForceResampleWithWeights()) result.add("-" + FORCE_RESAMPLE_WITH_WEIGHTS); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Gets the size of each subSpace, as a percentage of the training set size. * * @return the subSpace size, as a percentage. */ public boolean getForceResampleWithWeights() { return m_ForceResampleWithWeights; } /** * Sets the size of each subSpace, as a percentage of the training set size. * * @param value the subSpace size, as a percentage. */ public void setForceResampleWithWeights(boolean value) { m_ForceResampleWithWeights = value; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String forceResampleWithWeightsTipText() { return "If enabled, forces the data to be resampled with weights, regardless " + "of whether the base classifier can handle instance weights."; } /** * builds the classifier. * * @param data the training data to be used for generating the * classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); boolean resample = getForceResampleWithWeights() || (!(m_Classifier instanceof WeightedInstancesHandler) && ResampleUtils.hasInstanceWeights(data)); if (resample) { if (getDebug()) System.err.println(getClass().getName() + ": resampling training data"); data = data.resampleWithWeights(new Random(m_Seed)); } m_Classifier.buildClassifier(data); } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { return m_Classifier.distributionForInstance(instance); } /** * Classifies the given test instance. * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @throws Exception if an error occurred during the prediction */ @Override public double classifyInstance(Instance instance) throws Exception { return m_Classifier.classifyInstance(instance); } /** * Returns a string description of the model. * * @return the model */ public String toString() { StringBuilder result; result = new StringBuilder(); result.append(getClass().getSimpleName()).append("\n"); result.append(getClass().getSimpleName().replaceAll(".", "=")).append("\n\n"); result.append("Force resample with weights: " + getForceResampleWithWeights() + "\n"); result.append("Base classifier:\n"); result.append("- command-line: " + Utils.toCommandLine(m_Classifier) + "\n"); result.append("- handles instance weights: " + (m_Classifier instanceof WeightedInstancesHandler) + "\n\n"); result.append(m_Classifier.toString()); return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runClassifier(new RandomSubSpace(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/misc/InputMappedClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InputMappedClassifier.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.misc; import java.io.Serializable; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.Drawable; import weka.core.Environment; import weka.core.EnvironmentHandler; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Wrapper classifier that addresses incompatible training and test data by building a mapping between the training data that a classifier has been built with and the incoming test instances' structure. Model * attributes that are not found in the incoming instances receive missing values, so do incoming nominal attribute values that the classifier has not seen before. A new classifier can be trained or an existing one loaded from a file. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I * Ignore case when matching attribute names and nominal values. * </pre> * * <pre> * -M * Suppress the output of the mapping report. * </pre> * * <pre> * -trim * Trim white space from either end of names before matching. * </pre> * * <pre> * -L &lt;path to model to load&gt; * Path to a model to load. If set, this model * will be used for prediction and any base classifier * specification will be ignored. Environment variables * may be used in the path (e.g. ${HOME}/myModel.model) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR) * </pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ * */ public class InputMappedClassifier extends SingleClassifierEnhancer implements Serializable, OptionHandler, Drawable, WeightedInstancesHandler, AdditionalMeasureProducer, EnvironmentHandler { /** For serialization */ private static final long serialVersionUID = 4901630631723287761L; /** The path to the serialized model to use (if any) */ protected String m_modelPath = ""; /** The header of the last known set of incoming test instances */ protected transient Instances m_inputHeader; /** The instances structure used to train the classifier with */ protected Instances m_modelHeader; /** Handle any environment variables used in the model path */ protected transient Environment m_env; /** Map from model attributes to incoming attributes */ protected transient int[] m_attributeMap; protected transient int[] m_attributeStatus; /** * For each model attribute, map from incoming nominal values to model nominal values */ protected transient int[][] m_nominalValueMap; /** Trim white space from both ends of attribute names and nominal values? */ protected boolean m_trim = true; /** Ignore case when matching attribute names and nominal values? */ protected boolean m_ignoreCase = true; /** Dont output mapping report if set to true */ protected boolean m_suppressMappingReport = false; /** * If true, then a call to buildClassifier() will not overwrite any test structure that has been recorded with the current training structure. This is useful for getting a correct mapping report output in toString() after * buildClassifier has been called and before any test instance has been seen. Test structure and mapping will get reset if a test instance is received whose structure does not match the recorded test structure. */ protected boolean m_initialTestStructureKnown = false; /** Holds values for instances constructed for prediction */ protected double[] m_vals; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Wrapper classifier that addresses incompatible training and test " + "data by building a mapping between the training data that " + "a classifier has been built with and the incoming test instances' " + "structure. Model attributes that are not found in the incoming " + "instances receive missing values, so do incoming nominal attribute " + "values that the classifier has not seen before. A new classifier " + "can be trained or an existing one loaded from a file."; } /** * Set the environment variables to use * * @param env * the environment variables to use */ @Override public void setEnvironment(final Environment env) { this.m_env = env; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String ignoreCaseForNamesTipText() { return "Ignore case when matching attribute names and nomina values."; } /** * Set whether to ignore case when matching attribute names and nominal values. * * @param ignore * true if case is to be ignored */ public void setIgnoreCaseForNames(final boolean ignore) { this.m_ignoreCase = ignore; } /** * Get whether to ignore case when matching attribute names and nominal values. * * @return true if case is to be ignored. */ public boolean getIgnoreCaseForNames() { return this.m_ignoreCase; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String trimTipText() { return "Trim white space from each end of attribute names and " + "nominal values before matching."; } /** * Set whether to trim white space from each end of names before matching. * * @param trim * true to trim white space. */ public void setTrim(final boolean trim) { this.m_trim = trim; } /** * Get whether to trim white space from each end of names before matching. * * @return true if white space is to be trimmed. */ public boolean getTrim() { return this.m_trim; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String suppressMappingReportTipText() { return "Don't output a report of model-to-input mappings."; } /** * Set whether to suppress output the report of model to input mappings. * * @param suppress * true to suppress this output. */ public void setSuppressMappingReport(final boolean suppress) { this.m_suppressMappingReport = suppress; } /** * Get whether to suppress output the report of model to input mappings. * * @return true if this output is to be suppressed. */ public boolean getSuppressMappingReport() { return this.m_suppressMappingReport; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String modelPathTipText() { return "Set the path from which to load a model. " + "Loading occurs when the first test instance " + "is received. Environment variables can be used in the " + "supplied path."; } /** * Set the path from which to load a model. Loading occurs when the first test instance is received or getModelHeader() is called programatically. Environment variables can be used in the supplied path - e.g. ${HOME}/myModel.model. * * @param modelPath * the path to the model to load. * @throws Exception * if a problem occurs during loading. */ public void setModelPath(final String modelPath) throws Exception { if (this.m_env == null) { this.m_env = Environment.getSystemWide(); } this.m_modelPath = modelPath; // loadModel(modelPath); } /** * Get the path used for loading a model. * * @return the path used for loading a model. */ public String getModelPath() { return this.m_modelPath; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disable(Capability.RELATIONAL_ATTRIBUTES); return result; } /** * Returns an enumeration describing the available options. * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I * Ignore case when matching attribute names and nominal values. * </pre> * * <pre> * -M * Suppress the output of the mapping report. * </pre> * * <pre> * -trim * Trim white space from either end of names before matching. * </pre> * * <pre> * -L &lt;path to model to load&gt; * Path to a model to load. If set, this model * will be used for prediction and any base classifier * specification will be ignored. Environment variables * may be used in the path (e.g. ${HOME}/myModel.model) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR) * </pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tIgnore case when matching attribute " + "names and nominal values.", "I", 0, "-I")); newVector.addElement(new Option("\tSuppress the output of the mapping report.", "M", 0, "-M")); newVector.addElement(new Option("\tTrim white space from either end of names " + "before matching.", "trim", 0, "-trim")); newVector.addElement(new Option("\tPath to a model to load. If set, this model" + "\n\twill be used for prediction and any base classifier" + "\n\tspecification will be ignored. Environment variables" + "\n\tmay be used in the path (e.g. ${HOME}/myModel.model)", "L", 1, "-L <path to model to load>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -I * Ignore case when matching attribute names and nominal values. * </pre> * * <pre> * -M * Suppress the output of the mapping report. * </pre> * * <pre> * -trim * Trim white space from either end of names before matching. * </pre> * * <pre> * -L &lt;path to model to load&gt; * Path to a model to load. If set, this model * will be used for prediction and any base classifier * specification will be ignored. Environment variables * may be used in the path (e.g. ${HOME}/myModel.model) * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR) * </pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setIgnoreCaseForNames(Utils.getFlag('I', options)); this.setSuppressMappingReport(Utils.getFlag('M', options)); this.setTrim(Utils.getFlag("trim", options)); String modelPath = Utils.getOption('L', options); if (modelPath.length() > 0) { this.setModelPath(modelPath); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[superOptions.length + 5]; int current = 0; if (this.getIgnoreCaseForNames()) { options[current++] = "-I"; } if (this.getSuppressMappingReport()) { options[current++] = "-M"; } if (this.getTrim()) { options[current++] = "-trim"; } if (this.getModelPath() != null && this.getModelPath().length() > 0) { options[current++] = "-L"; options[current++] = this.getModelPath(); } System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Set the test structure (if known in advance) that we are likely to see. If set, then a call to buildClassifier() will not overwrite any test structure that has been recorded with the current training structure. This is useful for * getting a correct mapping report output in toString() after buildClassifier has been called and before any test instance has been seen. Test structure and mapping will get reset if a test instance is received whose structure does not * match the recorded test structure. * * @param testStructure * the structure of the test instances that we are likely to see (if known in advance) */ public void setTestStructure(final Instances testStructure) { this.m_inputHeader = testStructure; this.m_initialTestStructureKnown = true; } /** * Set the structure of the data used to create the model. This method is useful for clients who have an existing in-memory model that they'd like to wrap in the InputMappedClassifier * * @param modelHeader * the structure of the data used to build the wrapped model */ public void setModelHeader(final Instances modelHeader) { this.m_modelHeader = modelHeader; } private void loadModel(String modelPath) throws Exception { if (modelPath != null && modelPath.length() > 0) { try { if (this.m_env == null) { this.m_env = Environment.getSystemWide(); } modelPath = this.m_env.substitute(modelPath); } catch (Exception ex) { // ignore any problems } try { Object[] modelAndHeader = SerializationHelper.readAll(modelPath); if (modelAndHeader.length != 2) { throw new Exception("[InputMappedClassifier] serialized model file " + "does not seem to contain both a model and " + "the instances header used in training it!"); } else { this.setClassifier((Classifier) modelAndHeader[0]); this.m_modelHeader = (Instances) modelAndHeader[1]; } } catch (Exception ex) { ex.printStackTrace(); } } } /** * Build the classifier * * @param data * the training data to be used for generating the bagged classifier. * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { if (!this.m_initialTestStructureKnown) { this.m_inputHeader = new Instances(data, 0); } this.m_attributeMap = null; if (this.m_modelPath != null && this.m_modelPath.length() > 0) { return; // Don't build a classifier if a path has been specified } // can classifier handle the data? this.getCapabilities().testWithFail(data); this.m_Classifier.buildClassifier(data); // m_loadedClassifier = m_Classifier; this.m_modelHeader = new Instances(data, 0); } private boolean stringMatch(String one, String two) { if (this.m_trim) { one = one.trim(); two = two.trim(); } if (this.m_ignoreCase) { return one.equalsIgnoreCase(two); } else { return one.equals(two); } } /** * Helper method to pad/truncate strings * * @param s * String to modify * @param pad * character to pad with * @param len * length of final string * @return final String */ private String getFixedLengthString(final String s, final char pad, final int len) { String padded = null; if (len <= 0) { return s; } // truncate? if (s.length() >= len) { return s.substring(0, len); } else { char[] buf = new char[len - s.length()]; for (int j = 0; j < len - s.length(); j++) { buf[j] = pad; } padded = s + new String(buf); } return padded; } private StringBuffer createMappingReport() { StringBuffer result = new StringBuffer(); result.append("Attribute mappings:\n\n"); int maxLength = 0; for (int i = 0; i < this.m_modelHeader.numAttributes(); i++) { if (this.m_modelHeader.attribute(i).name().length() > maxLength) { maxLength = this.m_modelHeader.attribute(i).name().length(); } } maxLength += 12; int minLength = 16; String headerS = "Model attributes"; String sep = "----------------"; if (maxLength < minLength) { maxLength = minLength; } headerS = this.getFixedLengthString(headerS, ' ', maxLength); sep = this.getFixedLengthString(sep, '-', maxLength); sep += "\t ----------------\n"; headerS += "\t Incoming attributes\n"; result.append(headerS); result.append(sep); for (int i = 0; i < this.m_modelHeader.numAttributes(); i++) { Attribute temp = this.m_modelHeader.attribute(i); String attName = "(" + ((temp.isNumeric()) ? "numeric)" : "nominal)") + " " + temp.name(); attName = this.getFixedLengthString(attName, ' ', maxLength); attName += "\t--> "; result.append(attName); String inAttNum = ""; if (this.m_attributeStatus[i] == NO_MATCH) { inAttNum += "- "; result.append(inAttNum + "missing (no match)\n"); } else if (this.m_attributeStatus[i] == TYPE_MISMATCH) { inAttNum += (this.m_attributeMap[i] + 1) + " "; result.append(inAttNum + "missing (type mis-match)\n"); } else { Attribute inAtt = this.m_inputHeader.attribute(this.m_attributeMap[i]); String inName = "" + (this.m_attributeMap[i] + 1) + " (" + ((inAtt.isNumeric()) ? "numeric)" : "nominal)") + " " + inAtt.name(); result.append(inName + "\n"); } } return result; } protected static final int NO_MATCH = -1; protected static final int TYPE_MISMATCH = -2; protected static final int OK = -3; private boolean regenerateMapping() throws Exception { this.loadModel(this.m_modelPath); // load a model (if specified) if (this.m_modelHeader == null) { return false; } this.m_attributeMap = new int[this.m_modelHeader.numAttributes()]; this.m_attributeStatus = new int[this.m_modelHeader.numAttributes()]; this.m_nominalValueMap = new int[this.m_modelHeader.numAttributes()][]; for (int i = 0; i < this.m_modelHeader.numAttributes(); i++) { String modelAttName = this.m_modelHeader.attribute(i).name(); this.m_attributeStatus[i] = NO_MATCH; for (int j = 0; j < this.m_inputHeader.numAttributes(); j++) { String incomingAttName = this.m_inputHeader.attribute(j).name(); if (this.stringMatch(modelAttName, incomingAttName)) { this.m_attributeMap[i] = j; this.m_attributeStatus[i] = OK; Attribute modelAtt = this.m_modelHeader.attribute(i); Attribute incomingAtt = this.m_inputHeader.attribute(j); // check types if (modelAtt.type() != incomingAtt.type()) { this.m_attributeStatus[i] = TYPE_MISMATCH; break; } // now check nominal values (number, names...) if (modelAtt.numValues() != incomingAtt.numValues()) { System.out.println("[InputMappedClassifier] Warning: incoming nominal " + "attribute " + incomingAttName + " does not have the same " + "number of values as model attribute " + modelAttName); } if (modelAtt.isNominal() && incomingAtt.isNominal()) { int[] valuesMap = new int[incomingAtt.numValues()]; for (int k = 0; k < incomingAtt.numValues(); k++) { String incomingNomValue = incomingAtt.value(k); int indexInModel = modelAtt.indexOfValue(incomingNomValue); if (indexInModel < 0) { valuesMap[k] = NO_MATCH; } else { valuesMap[k] = indexInModel; } } this.m_nominalValueMap[i] = valuesMap; } } } } return true; } /** * Return the instance structure that the encapsulated model was built with. If the classifier will be built from scratch by InputMappedClassifier then this method just returns the default structure that is passed in as argument. * * @param defaultH * the default instances structure * @return the instances structure used to create the encapsulated model * @throws Exception * if a problem occurs */ public Instances getModelHeader(final Instances defaultH) throws Exception { this.loadModel(this.m_modelPath); // If the model header is null, then we must be going to build from // scratch in buildClassifier. Therefore, just return the supplied default, // since this has to match what we will build with Instances toReturn = (this.m_modelHeader == null) ? defaultH : this.m_modelHeader; return new Instances(toReturn, 0); } // get the mapped class index (i.e. the index in the incoming data of // the attribute that the model uses as the class public int getMappedClassIndex() throws Exception { if (this.m_modelHeader == null) { throw new Exception("[InputMappedClassifier] No model available!"); } if (this.m_attributeMap[this.m_modelHeader.classIndex()] == NO_MATCH) { return -1; } return this.m_attributeMap[this.m_modelHeader.classIndex()]; } public synchronized Instance constructMappedInstance(final Instance incoming) throws Exception { boolean regenerateMapping = false; if (this.m_inputHeader == null) { this.m_inputHeader = incoming.dataset(); regenerateMapping = true; this.m_initialTestStructureKnown = false; } else if (!this.m_inputHeader.equalHeaders(incoming.dataset())) { this.m_inputHeader = incoming.dataset(); regenerateMapping = true; this.m_initialTestStructureKnown = false; } else if (this.m_attributeMap == null) { regenerateMapping = true; this.m_initialTestStructureKnown = false; } if (regenerateMapping) { this.regenerateMapping(); this.m_vals = null; if (!this.m_suppressMappingReport) { StringBuffer result = this.createMappingReport(); } } this.m_vals = new double[this.m_modelHeader.numAttributes()]; for (int i = 0; i < this.m_modelHeader.numAttributes(); i++) { if (this.m_attributeStatus[i] == OK) { Attribute modelAtt = this.m_modelHeader.attribute(i); this.m_inputHeader.attribute(this.m_attributeMap[i]); if (Utils.isMissingValue(incoming.value(this.m_attributeMap[i]))) { this.m_vals[i] = Utils.missingValue(); continue; } if (modelAtt.isNumeric()) { this.m_vals[i] = incoming.value(this.m_attributeMap[i]); } else if (modelAtt.isNominal()) { int mapVal = this.m_nominalValueMap[i][(int) incoming.value(this.m_attributeMap[i])]; if (mapVal == NO_MATCH) { this.m_vals[i] = Utils.missingValue(); } else { this.m_vals[i] = mapVal; } } } else { this.m_vals[i] = Utils.missingValue(); } } Instance newInst = new DenseInstance(incoming.weight(), this.m_vals); newInst.setDataset(this.m_modelHeader); return newInst; } @Override public double classifyInstance(final Instance inst) throws Exception { Instance converted = this.constructMappedInstance(inst); return this.m_Classifier.classifyInstance(converted); } @Override public double[] distributionForInstance(final Instance inst) throws Exception { Instance converted = this.constructMappedInstance(inst); return this.m_Classifier.distributionForInstance(converted); } @Override public String toString() { StringBuffer buff = new StringBuffer(); buff.append("InputMappedClassifier:\n\n"); try { this.loadModel(this.m_modelPath); } catch (Exception ex) { return "[InputMappedClassifier] Problem loading model."; } if (this.m_modelPath != null && this.m_modelPath.length() > 0) { buff.append("Model sourced from: " + this.m_modelPath + "\n\n"); } /* * if (m_loadedClassifier != null) { buff.append(m_loadedClassifier); } else * { */ buff.append(this.m_Classifier); // } if (!this.m_suppressMappingReport && this.m_inputHeader != null) { try { this.regenerateMapping(); } catch (Exception ex) { ex.printStackTrace(); return "[InputMappedClassifier] Problem loading model."; } if (this.m_attributeMap != null) { buff.append("\n" + this.createMappingReport().toString()); } } return buff.toString(); } /** * Returns the type of graph this classifier represents. * * @return the type of graph */ @Override public int graphType() { if (this.m_Classifier instanceof Drawable) { return ((Drawable) this.m_Classifier).graphType(); } else { return Drawable.NOT_DRAWABLE; } } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<String>(); if (this.m_Classifier instanceof AdditionalMeasureProducer) { Enumeration<String> en = ((AdditionalMeasureProducer) this.m_Classifier).enumerateMeasures(); while (en.hasMoreElements()) { String mname = en.nextElement(); newVector.addElement(mname); } } return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (this.m_Classifier instanceof AdditionalMeasureProducer) { return ((AdditionalMeasureProducer) this.m_Classifier).getMeasure(additionalMeasureName); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (InputMappedClassifier)"); } } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception * if the classifier cannot be graphed */ @Override public String graph() throws Exception { if (this.m_Classifier != null && this.m_Classifier instanceof Drawable) { return ((Drawable) this.m_Classifier).graph(); } else { throw new Exception("Classifier: " + this.getClassifierSpec() + " cannot be graphed"); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * should contain the following arguments: -t training file [-T test file] [-c class index] */ public static void main(final String[] argv) { runClassifier(new InputMappedClassifier(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/misc/InputMappedClassifierBeanInfo.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InputMappedClassifierBeanInfo.java * Copyright 2010-2012 University of Waikato */ package weka.classifiers.misc; import java.beans.PropertyDescriptor; import java.beans.SimpleBeanInfo; import java.util.ArrayList; /** * Bean info class for the InputMappedClassifier. Ensures that the * FileEnvironmentField class is used as the custom property editor * in the GOE for the modelPath property. * * @author Mar Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ * */ public class InputMappedClassifierBeanInfo extends SimpleBeanInfo { /** * Get an array of PropertyDescriptors for the InputMappedClassifier's * public properties. * * @return an array of PropertyDescriptors */ public PropertyDescriptor[] getPropertyDescriptors() { try { PropertyDescriptor p1; ArrayList<PropertyDescriptor> pds = new ArrayList<PropertyDescriptor>(); p1 = new PropertyDescriptor("modelPath", InputMappedClassifier.class); p1.setPropertyEditorClass(weka.gui.beans.FileEnvironmentField.class); pds.add(p1); pds.add(new PropertyDescriptor("ignoreCaseForNames", InputMappedClassifier.class)); pds.add(new PropertyDescriptor("suppressMappingReport", InputMappedClassifier.class)); pds.add(new PropertyDescriptor("trim", InputMappedClassifier.class)); pds.add(new PropertyDescriptor("classifier", InputMappedClassifier.class)); // this one is only really needed for XMLSerialization pds.add(new PropertyDescriptor("options", InputMappedClassifier.class)); return pds.toArray(new PropertyDescriptor[1]); } catch (Exception ex) { ex.printStackTrace(); } return null; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/misc/SerializedClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SerializedClassifier.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.misc; import java.io.File; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.Utils; /** * <!-- globalinfo-start --> A wrapper around a serialized classifier model. * This classifier loads a serialized models and uses it to make predictions.<br/> * <br/> * Warning: since the serialized model doesn't get changed, cross-validation * cannot bet used with this classifier. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -model &lt;filename&gt; * The file containing the serialized model. * (required) * </pre> * * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class SerializedClassifier extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = 4599593909947628642L; /** the serialized classifier model used for making predictions */ protected transient Classifier m_Model = null; /** the file where the serialized model is stored */ protected File m_ModelFile = new File(System.getProperty("user.dir")); /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "A wrapper around a serialized classifier model. This classifier loads " + "a serialized models and uses it to make predictions.\n\n" + "Warning: since the serialized model doesn't get changed, cross-validation " + "cannot bet used with this classifier."; } /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option( "\tThe file containing the serialized model.\n" + "\t(required)", "model", 1, "-model <filename>")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * returns the options of the current setup * * @return the current options */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-model"); result.add("" + getModelFile()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Parses the options for this object. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -model &lt;filename&gt; * The file containing the serialized model. * (required) * </pre> * * <!-- options-end --> * * @param options the options to use * @throws Exception if setting of options fails */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption("model", options); if (tmpStr.length() != 0) { setModelFile(new File(tmpStr)); } else { setModelFile(new File(System.getProperty("user.dir"))); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String modelFileTipText() { return "The serialized classifier model to use for predictions."; } /** * Gets the file containing the serialized model. * * @return the file. */ public File getModelFile() { return m_ModelFile; } /** * Sets the file containing the serialized model. * * @param value the file. */ public void setModelFile(File value) { m_ModelFile = value; if (value.exists() && value.isFile()) { try { initModel(); } catch (Exception e) { throw new IllegalArgumentException("Cannot load model from file '" + value + "': " + e); } } } /** * Sets the fully built model to use, if one doesn't want to load a model from * a file or already deserialized a model from somewhere else. * * @param value the built model * @see #getCurrentModel() */ public void setModel(Classifier value) { m_Model = value; } /** * Gets the currently loaded model (can be null). Call buildClassifier method * to load model from file. * * @return the current model * @see #setModel(Classifier) */ public Classifier getCurrentModel() { return m_Model; } /** * loads the serialized model if necessary, throws an Exception if the * derserialization fails. * * @throws Exception if deserialization fails */ protected void initModel() throws Exception { if (m_Model == null) { m_Model = (Classifier) SerializationHelper.read(m_ModelFile .getAbsolutePath()); } } /** * Returns default capabilities of the base classifier. * * @return the capabilities of the base classifier */ @Override public Capabilities getCapabilities() { Capabilities result; // init model if necessary if (m_ModelFile != null && m_ModelFile.exists() && m_ModelFile.isFile()) { try { initModel(); } catch (Exception e) { System.err.println(e); } } if (m_Model != null) { result = m_Model.getCapabilities(); } else { result = new Capabilities(this); result.disableAll(); } // set dependencies for (Capability cap : Capability.values()) { result.enableDependency(cap); } result.setOwner(this); return result; } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { double[] result; // init model if necessary initModel(); result = m_Model.distributionForInstance(instance); return result; } /** * loads only the serialized classifier * * @param data the training instances * @throws Exception if something goes wrong */ @Override public void buildClassifier(Instances data) throws Exception { // init model if necessary initModel(); // can classifier handle the data? getCapabilities().testWithFail(data); } /** * Returns a string representation of the classifier * * @return the string representation of the classifier */ @Override public String toString() { StringBuffer result; if (m_Model == null) { result = new StringBuffer("No model loaded yet."); } else { result = new StringBuffer(); result.append("SerializedClassifier\n"); result.append("====================\n\n"); result.append("File: " + getModelFile() + "\n\n"); result.append(m_Model.toString()); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Runs the classifier with the given options * * @param args the commandline options */ public static void main(String[] args) { runClassifier(new SerializedClassifier(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/consumer/GeneralRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GeneralRegression.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.consumer; import java.io.Serializable; import java.util.ArrayList; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.pmml.MiningSchema; import weka.core.pmml.PMMLUtils; import weka.core.pmml.TargetMetaInfo; /** * Class implementing import of PMML General Regression model. Can be * used as a Weka classifier for prediction (buildClassifier() * raises an Exception). * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class GeneralRegression extends PMMLClassifier implements Serializable { /** * For serialization */ private static final long serialVersionUID = 2583880411828388959L; /** * Enumerated type for the model type. */ enum ModelType { // same type of model REGRESSION ("regression"), GENERALLINEAR ("generalLinear"), MULTINOMIALLOGISTIC ("multinomialLogistic"), ORDINALMULTINOMIAL ("ordinalMultinomial"), GENERALIZEDLINEAR ("generalizedLinear"); private final String m_stringVal; ModelType(String name) { m_stringVal = name; } public String toString() { return m_stringVal; } } // the model type protected ModelType m_modelType = ModelType.REGRESSION; // the model name (if defined) protected String m_modelName; // the algorithm name (if defined) protected String m_algorithmName; // the function type (regression or classification) protected int m_functionType = Regression.RegressionTable.REGRESSION; /** * Enumerated type for the cumulative link function * (ordinal multinomial model type only). */ enum CumulativeLinkFunction { NONE ("none") { double eval(double value, double offset) { return Double.NaN; // no evaluation defined in this case! } }, LOGIT ("logit") { double eval(double value, double offset) { return 1.0 / (1.0 + Math.exp(-(value + offset))); } }, PROBIT ("probit") { double eval(double value, double offset) { return weka.core.matrix.Maths.pnorm(value + offset); } }, CLOGLOG ("cloglog") { double eval(double value, double offset) { return 1.0 - Math.exp(-Math.exp(value + offset)); } }, LOGLOG ("loglog") { double eval(double value, double offset) { return Math.exp(-Math.exp(-(value + offset))); } }, CAUCHIT ("cauchit") { double eval(double value, double offset) { return 0.5 + (1.0 / Math.PI) * Math.atan(value + offset); } }; /** * Evaluation function. * * @param value the raw response value * @param offset the offset to add to the raw value * @return the result of the link function */ abstract double eval(double value, double offset); private final String m_stringVal; /** * Constructor * * @param name textual name for this enum */ CumulativeLinkFunction(String name) { m_stringVal = name; } /* (non-Javadoc) * @see java.lang.Enum#toString() */ public String toString() { return m_stringVal; } } // cumulative link function (ordinal multinomial only) protected CumulativeLinkFunction m_cumulativeLinkFunction = CumulativeLinkFunction.NONE; /** * Enumerated type for the link function (general linear and * generalized linear model types only). */ enum LinkFunction { NONE ("none") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return Double.NaN; // no evaluation defined in this case! } }, CLOGLOG ("cloglog") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return (1.0 - Math.exp(-Math.exp(value + offset))) * trials; } }, IDENTITY ("identity") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return (value + offset) * trials; } }, LOG ("log") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return Math.exp(value + offset) * trials; } }, LOGC ("logc") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return (1.0 - Math.exp(value + offset)) * trials; } }, LOGIT ("logit") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return (1.0 / (1.0 + Math.exp(-(value + offset)))) * trials; } }, LOGLOG ("loglog") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return Math.exp(-Math.exp(-(value + offset))) * trials; } }, NEGBIN ("negbin") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return (1.0 / (distParam * (Math.exp(-(value + offset)) - 1.0))) * trials; } }, ODDSPOWER ("oddspower") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return (linkParam < 0.0 || linkParam > 0.0) ? (1.0 / (1.0 + Math.pow(1.0 + linkParam * (value + offset), (-1.0 / linkParam)))) * trials : (1.0 / (1.0 + Math.exp(-(value + offset)))) * trials; } }, POWER ("power") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return (linkParam < 0.0 || linkParam > 0.0) ? Math.pow(value + offset, (1.0 / linkParam)) * trials : Math.exp(value + offset) * trials; } }, PROBIT ("probit") { double eval(double value, double offset, double trials, double distParam, double linkParam) { return weka.core.matrix.Maths.pnorm(value + offset) * trials; } }; /** * Evaluation function. * * @param value the raw response value * @param offset the offset to add to the raw value * @param trials the trials value to multiply the result by * @param distParam the distribution parameter (negbin only) * @param linkParam the link parameter (power and oddspower only) * @return the result of the link function */ abstract double eval(double value, double offset, double trials, double distParam, double linkParam); private final String m_stringVal; /** * Constructor. * * @param name the textual name of this link function */ LinkFunction(String name) { m_stringVal = name; } /* (non-Javadoc) * @see java.lang.Enum#toString() */ public String toString() { return m_stringVal; } } // link function (generalLinear model type only) protected LinkFunction m_linkFunction = LinkFunction.NONE; protected double m_linkParameter = Double.NaN; protected String m_trialsVariable; protected double m_trialsValue = Double.NaN; /** * Enumerated type for the distribution (general linear * and generalized linear model types only). */ enum Distribution { NONE ("none"), NORMAL ("normal"), BINOMIAL ("binomial"), GAMMA ("gamma"), INVGAUSSIAN ("igauss"), NEGBINOMIAL ("negbin"), POISSON ("poisson"); private final String m_stringVal; Distribution(String name) { m_stringVal = name; } /* (non-Javadoc) * @see java.lang.Enum#toString() */ public String toString() { return m_stringVal; } } // generalLinear and generalizedLinear model type only protected Distribution m_distribution = Distribution.NORMAL; // ancillary parameter value for the negative binomial distribution protected double m_distParameter = Double.NaN; // if present, this variable is used during scoring generalizedLinear/generalLinear or // ordinalMultinomial models protected String m_offsetVariable; // if present, this variable is used during scoring generalizedLinear/generalLinear or // ordinalMultinomial models. It works like a user-specified intercept. // At most, only one of offsetVariable or offsetValue may be specified. protected double m_offsetValue = Double.NaN; /** * Small inner class to hold the name of a parameter plus * its optional descriptive label */ static class Parameter implements Serializable { // ESCA-JAVA0096: /** For serialization */ // CHECK ME WITH serialver private static final long serialVersionUID = 6502780192411755341L; protected String m_name = null; protected String m_label = null; } // List of model parameters protected ArrayList<Parameter> m_parameterList = new ArrayList<Parameter>(); /** * Small inner class to hold the name of a factor or covariate, * plus the index of the attribute it corresponds to in the * mining schema. */ static class Predictor implements Serializable { /** For serialization */ // CHECK ME WITH serialver private static final long serialVersionUID = 6502780192411755341L; protected String m_name = null; protected int m_miningSchemaIndex = -1; public String toString() { return m_name; } } // FactorList protected ArrayList<Predictor> m_factorList = new ArrayList<Predictor>(); // CovariateList protected ArrayList<Predictor> m_covariateList = new ArrayList<Predictor>(); /** * Small inner class to hold details on a predictor-to-parameter * correlation. */ static class PPCell implements Serializable { /** For serialization */ // CHECK ME WITH serialver private static final long serialVersionUID = 6502780192411755341L; protected String m_predictorName = null; protected String m_parameterName = null; // either the exponent of a numeric attribute or the index of // a discrete value protected double m_value = 0; // optional. The default is for all target categories to // share the same PPMatrix. // TO-DO: implement multiple PPMatrixes protected String m_targetCategory = null; } // PPMatrix (predictor-to-parameter matrix) // rows = parameters, columns = predictors (attributes) protected PPCell[][] m_ppMatrix; /** * Small inner class to hold a single entry in the * ParamMatrix (parameter matrix). */ static class PCell implements Serializable { /** For serialization */ // CHECK ME WITH serialver private static final long serialVersionUID = 6502780192411755341L; // may be null for numeric target. May also be null if this coefficent // applies to all target categories. protected String m_targetCategory = null; protected String m_parameterName = null; // coefficient protected double m_beta = 0.0; // optional degrees of freedom protected int m_df = -1; } // ParamMatrix. rows = target categories (only one if target is numeric), // columns = parameters (in order that they occur in the parameter list). protected PCell[][] m_paramMatrix; /** * Constructs a GeneralRegression classifier. * * @param model the Element that holds the model definition * @param dataDictionary the data dictionary as a set of Instances * @param miningSchema the mining schema * @throws Exception if there is a problem constructing the general regression * object from the PMML. */ public GeneralRegression(Element model, Instances dataDictionary, MiningSchema miningSchema) throws Exception { super(dataDictionary, miningSchema); // get the model type String mType = model.getAttribute("modelType"); boolean found = false; for (ModelType m : ModelType.values()) { if (m.toString().equals(mType)) { m_modelType = m; found = true; break; } } if (!found) { throw new Exception("[GeneralRegression] unknown model type: " + mType); } if (m_modelType == ModelType.ORDINALMULTINOMIAL) { // get the cumulative link function String cLink = model.getAttribute("cumulativeLink"); found = false; for (CumulativeLinkFunction c : CumulativeLinkFunction.values()) { if (c.toString().equals(cLink)) { m_cumulativeLinkFunction = c; found = true; break; } } if (!found) { throw new Exception("[GeneralRegression] cumulative link function " + cLink); } } else if (m_modelType == ModelType.GENERALIZEDLINEAR || m_modelType == ModelType.GENERALLINEAR) { // get the link function String link = model.getAttribute("linkFunction"); found = false; for (LinkFunction l : LinkFunction.values()) { if (l.toString().equals(link)) { m_linkFunction = l; found = true; break; } } if (!found) { throw new Exception("[GeneralRegression] unknown link function " + link); } // get the link parameter String linkP = model.getAttribute("linkParameter"); if (linkP != null && linkP.length() > 0) { try { m_linkParameter = Double.parseDouble(linkP); } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegression] unable to parse the link parameter"); } } // get the trials variable String trials = model.getAttribute("trialsVariable"); if (trials != null && trials.length() > 0) { m_trialsVariable = trials; } // get the trials value String trialsV = model.getAttribute("trialsValue"); if (trialsV != null && trialsV.length() > 0) { try { m_trialsValue = Double.parseDouble(trialsV); } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegression] unable to parse the trials value"); } } } String mName = model.getAttribute("modelName"); if (mName != null && mName.length() > 0) { m_modelName = mName; } String fName = model.getAttribute("functionName"); if (fName.equals("classification")) { m_functionType = Regression.RegressionTable.CLASSIFICATION; } String algName = model.getAttribute("algorithmName"); if (algName != null && algName.length() > 0) { m_algorithmName = algName; } String distribution = model.getAttribute("distribution"); if (distribution != null && distribution.length() > 0) { found = false; for (Distribution d : Distribution.values()) { if (d.toString().equals(distribution)) { m_distribution = d; found = true; break; } } if (!found) { throw new Exception("[GeneralRegression] unknown distribution type " + distribution); } } String distP = model.getAttribute("distParameter"); if (distP != null && distP.length() > 0) { try { m_distParameter = Double.parseDouble(distP); } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegression] unable to parse the distribution parameter"); } } String offsetV = model.getAttribute("offsetVariable"); if (offsetV != null && offsetV.length() > 0) { m_offsetVariable = offsetV; } String offsetVal = model.getAttribute("offsetValue"); if (offsetVal != null && offsetVal.length() > 0) { try { m_offsetValue = Double.parseDouble(offsetVal); } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegression] unable to parse the offset value"); } } // get the parameter list readParameterList(model); // get the factors and covariates readFactorsAndCovariates(model, "FactorList"); readFactorsAndCovariates(model, "CovariateList"); // read the PPMatrix readPPMatrix(model); // read the parameter estimates readParamMatrix(model); } /** * Read the list of parameters. * * @param model the Element that contains the model * @throws Exception if there is some problem with extracting the * parameters. */ protected void readParameterList(Element model) throws Exception { NodeList paramL = model.getElementsByTagName("ParameterList"); // should be just one parameter list if (paramL.getLength() == 1) { Node paramN = paramL.item(0); if (paramN.getNodeType() == Node.ELEMENT_NODE) { NodeList parameterList = ((Element)paramN).getElementsByTagName("Parameter"); for (int i = 0; i < parameterList.getLength(); i++) { Node parameter = parameterList.item(i); if (parameter.getNodeType() == Node.ELEMENT_NODE) { Parameter p = new Parameter(); p.m_name = ((Element)parameter).getAttribute("name"); String label = ((Element)parameter).getAttribute("label"); if (label != null && label.length() > 0) { p.m_label = label; } m_parameterList.add(p); } } } } else { throw new Exception("[GeneralRegression] more than one parameter list!"); } } /** * Read the lists of factors and covariates. * * @param model the Element that contains the model * @param factorOrCovariate holds the String "FactorList" or * "CovariateList" * @throws Exception if there is a factor or covariate listed * that isn't in the mining schema */ protected void readFactorsAndCovariates(Element model, String factorOrCovariate) throws Exception { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); NodeList factorL = model.getElementsByTagName(factorOrCovariate); if (factorL.getLength() == 1) { // should be 0 or 1 FactorList element Node factor = factorL.item(0); if (factor.getNodeType() == Node.ELEMENT_NODE) { NodeList predL = ((Element)factor).getElementsByTagName("Predictor"); for (int i = 0; i < predL.getLength(); i++) { Node pred = predL.item(i); if (pred.getNodeType() == Node.ELEMENT_NODE) { Predictor p = new Predictor(); p.m_name = ((Element)pred).getAttribute("name"); // find the index of this predictor in the mining schema boolean found = false; for (int j = 0; j < miningSchemaI.numAttributes(); j++) { if (miningSchemaI.attribute(j).name().equals(p.m_name)) { found = true; p.m_miningSchemaIndex = j; break; } } if (found) { if (factorOrCovariate.equals("FactorList")) { m_factorList.add(p); } else { m_covariateList.add(p); } } else { throw new Exception("[GeneralRegression] reading factors and covariates - " + "unable to find predictor " + p.m_name + " in the mining schema"); } } } } } else if (factorL.getLength() > 1){ throw new Exception("[GeneralRegression] more than one " + factorOrCovariate + "! "); } } /** * Read the PPMatrix from the xml. Does not handle multiple PPMatrixes yet. * * @param model the Element that contains the model * @throws Exception if there is a problem parsing cell values. */ protected void readPPMatrix(Element model) throws Exception { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); NodeList matrixL = model.getElementsByTagName("PPMatrix"); // should be exactly one PPMatrix if (matrixL.getLength() == 1) { // allocate space for the matrix // column that corresponds to the class will be empty (and will be missed out // when printing the model). m_ppMatrix = new PPCell[m_parameterList.size()][miningSchemaI.numAttributes()]; Node ppM = matrixL.item(0); if (ppM.getNodeType() == Node.ELEMENT_NODE) { NodeList cellL = ((Element)ppM).getElementsByTagName("PPCell"); for (int i = 0; i < cellL.getLength(); i++) { Node cell = cellL.item(i); if (cell.getNodeType() == Node.ELEMENT_NODE) { String predictorName = ((Element)cell).getAttribute("predictorName"); String parameterName = ((Element)cell).getAttribute("parameterName"); String value = ((Element)cell).getAttribute("value"); double expOrIndex = -1; int predictorIndex = -1; int parameterIndex = -1; for (int j = 0; j < m_parameterList.size(); j++) { if (m_parameterList.get(j).m_name.equals(parameterName)) { parameterIndex = j; break; } } if (parameterIndex == -1) { throw new Exception("[GeneralRegression] unable to find parameter name " + parameterName + " in parameter list"); } Predictor p = getCovariate(predictorName); if (p != null) { try { expOrIndex = Double.parseDouble(value); predictorIndex = p.m_miningSchemaIndex; } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegression] unable to parse PPCell value: " + value); } } else { // try as a factor p = getFactor(predictorName); if (p != null) { // An example pmml file from DMG seems to suggest that it // is possible for a continuous variable in the mining schema // to be treated as a factor, so we have to check for this if (miningSchemaI.attribute(p.m_miningSchemaIndex).isNumeric()) { // parse this value as a double. It will be treated as a value // to match rather than an exponent since we are dealing with // a factor here try { expOrIndex = Double.parseDouble(value); } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegresion] unable to parse PPCell value: " + value); } } else { // it is a nominal attribute in the mining schema so find // the index that correponds to this value Attribute att = miningSchemaI.attribute(p.m_miningSchemaIndex); expOrIndex = att.indexOfValue(value); if (expOrIndex == -1) { throw new Exception("[GeneralRegression] unable to find PPCell value " + value + " in mining schema attribute " + att.name()); } } } else { throw new Exception("[GeneralRegression] cant find predictor " + predictorName + "in either the factors list " + "or the covariates list"); } predictorIndex = p.m_miningSchemaIndex; } // fill in cell value PPCell ppc = new PPCell(); ppc.m_predictorName = predictorName; ppc.m_parameterName = parameterName; ppc.m_value = expOrIndex; // TO-DO: ppc.m_targetCategory (when handling for multiple PPMatrixes is implemented) m_ppMatrix[parameterIndex][predictorIndex] = ppc; } } } } else { throw new Exception("[GeneralRegression] more than one PPMatrix!"); } } private Predictor getCovariate(String predictorName) { for (int i = 0; i < m_covariateList.size(); i++) { if (predictorName.equals(m_covariateList.get(i).m_name)) { return m_covariateList.get(i); } } return null; } private Predictor getFactor(String predictorName) { for (int i = 0; i < m_factorList.size(); i++) { if (predictorName.equals(m_factorList.get(i).m_name)) { return m_factorList.get(i); } } return null; } /** * Read the parameter matrix from the xml. * * @param model Element that holds the model * @throws Exception if a problem is encountered during extraction of * the parameter matrix */ private void readParamMatrix(Element model) throws Exception { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); Attribute classAtt = miningSchemaI.classAttribute(); // used when function type is classification but class attribute is numeric // in the mining schema. We will assume that there is a Target specified in // the pmml that defines the legal values for this class. ArrayList<String> targetVals = null; NodeList matrixL = model.getElementsByTagName("ParamMatrix"); if (matrixL.getLength() != 1) { throw new Exception("[GeneralRegression] more than one ParamMatrix!"); } Element matrix = (Element)matrixL.item(0); // check for the case where the class in the mining schema is numeric, // but this attribute is treated as discrete if (m_functionType == Regression.RegressionTable.CLASSIFICATION && classAtt.isNumeric()) { // try and convert the class attribute to nominal. For this to succeed // there has to be a Target element defined in the PMML. if (!m_miningSchema.hasTargetMetaData()) { throw new Exception("[GeneralRegression] function type is classification and " + "class attribute in mining schema is numeric, however, " + "there is no Target element " + "specifying legal discrete values for the target!"); } if (m_miningSchema.getTargetMetaData().getOptype() != TargetMetaInfo.Optype.CATEGORICAL) { throw new Exception("[GeneralRegression] function type is classification and " + "class attribute in mining schema is numeric, however " + "Target element in PMML does not have optype categorical!"); } // OK now get legal values targetVals = m_miningSchema.getTargetMetaData().getValues(); if (targetVals.size() == 0) { throw new Exception("[GeneralRegression] function type is classification and " + "class attribute in mining schema is numeric, however " + "Target element in PMML does not have any discrete values " + "defined!"); } // Finally, convert the class in the mining schema to nominal m_miningSchema.convertNumericAttToNominal(miningSchemaI.classIndex(), targetVals); } // allocate space for the matrix m_paramMatrix = new PCell[(classAtt.isNumeric()) ? 1 : classAtt.numValues()][m_parameterList.size()]; NodeList pcellL = matrix.getElementsByTagName("PCell"); for (int i = 0; i < pcellL.getLength(); i++) { // indicates that that this beta applies to all target categories // or target is numeric int targetCategoryIndex = -1; int parameterIndex = -1; Node pcell = pcellL.item(i); if (pcell.getNodeType() == Node.ELEMENT_NODE) { String paramName = ((Element)pcell).getAttribute("parameterName"); String targetCatName = ((Element)pcell).getAttribute("targetCategory"); String coefficient = ((Element)pcell).getAttribute("beta"); String df = ((Element)pcell).getAttribute("df"); for (int j = 0; j < m_parameterList.size(); j++) { if (m_parameterList.get(j).m_name.equals(paramName)) { parameterIndex = j; // use the label if defined if (m_parameterList.get(j).m_label != null) { paramName = m_parameterList.get(j).m_label; } break; } } if (parameterIndex == -1) { throw new Exception("[GeneralRegression] unable to find parameter name " + paramName + " in parameter list"); } if (targetCatName != null && targetCatName.length() > 0) { if (classAtt.isNominal() || classAtt.isString()) { targetCategoryIndex = classAtt.indexOfValue(targetCatName); } else { throw new Exception("[GeneralRegression] found a PCell with a named " + "target category: " + targetCatName + " but class attribute is numeric in " + "mining schema"); } } PCell p = new PCell(); if (targetCategoryIndex != -1) { p.m_targetCategory = targetCatName; } p.m_parameterName = paramName; try { p.m_beta = Double.parseDouble(coefficient); } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegression] unable to parse beta value " + coefficient + " as a double from PCell"); } if (df != null && df.length() > 0) { try { p.m_df = Integer.parseInt(df); } catch (IllegalArgumentException ex) { throw new Exception("[GeneralRegression] unable to parse df value " + df + " as an int from PCell"); } } if (targetCategoryIndex != -1) { m_paramMatrix[targetCategoryIndex][parameterIndex] = p; } else { // this PCell to all target categories (covers numeric class, in // which case there will be only one row in the matrix anyway) for (int j = 0; j < m_paramMatrix.length; j++) { m_paramMatrix[j][parameterIndex] = p; } } } } } /** * Return a textual description of this general regression. * * @return a description of this general regression */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("PMML version " + getPMMLVersion()); if (!getCreatorApplication().equals("?")) { temp.append("\nApplication: " + getCreatorApplication()); } temp.append("\nPMML Model: " + m_modelType); temp.append("\n\n"); temp.append(m_miningSchema); if (m_factorList.size() > 0) { temp.append("Factors:\n"); for (Predictor p : m_factorList) { temp.append("\t" + p + "\n"); } } temp.append("\n"); if (m_covariateList.size() > 0) { temp.append("Covariates:\n"); for (Predictor p : m_covariateList) { temp.append("\t" + p + "\n"); } } temp.append("\n"); printPPMatrix(temp); temp.append("\n"); printParameterMatrix(temp); // do the link function stuff temp.append("\n"); if (m_linkFunction != LinkFunction.NONE) { temp.append("Link function: " + m_linkFunction); if (m_offsetVariable != null) { temp.append("\n\tOffset variable " + m_offsetVariable); } else if (!Double.isNaN(m_offsetValue)) { temp.append("\n\tOffset value " + m_offsetValue); } if (m_trialsVariable != null) { temp.append("\n\tTrials variable " + m_trialsVariable); } else if (!Double.isNaN(m_trialsValue)) { temp.append("\n\tTrials value " + m_trialsValue); } if (m_distribution != Distribution.NONE) { temp.append("\nDistribution: " + m_distribution); } if (m_linkFunction == LinkFunction.NEGBIN && m_distribution == Distribution.NEGBINOMIAL && !Double.isNaN(m_distParameter)) { temp.append("\n\tDistribution parameter " + m_distParameter); } if (m_linkFunction == LinkFunction.POWER || m_linkFunction == LinkFunction.ODDSPOWER) { if (!Double.isNaN(m_linkParameter)) { temp.append("\n\nLink parameter " + m_linkParameter); } } } if (m_cumulativeLinkFunction != CumulativeLinkFunction.NONE) { temp.append("Cumulative link function: " + m_cumulativeLinkFunction); if (m_offsetVariable != null) { temp.append("\n\tOffset variable " + m_offsetVariable); } else if (!Double.isNaN(m_offsetValue)) { temp.append("\n\tOffset value " + m_offsetValue); } } temp.append("\n"); return temp.toString(); } /** * Format and print the PPMatrix to the supplied StringBuffer. * * @param buff the StringBuffer to append to */ protected void printPPMatrix(StringBuffer buff) { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); int maxAttWidth = 0; for (int i = 0; i < miningSchemaI.numAttributes(); i++) { Attribute a = miningSchemaI.attribute(i); if (a.name().length() > maxAttWidth) { maxAttWidth = a.name().length(); } } // check the width of the values for (int i = 0; i < m_parameterList.size(); i++) { for (int j = 0; j < miningSchemaI.numAttributes(); j++) { if (m_ppMatrix[i][j] != null) { double width = Math.log(Math.abs(m_ppMatrix[i][j].m_value)) / Math.log(10.0); if (width < 0) { width = 1; } // decimal + # decimal places + 1 width += 2.0; if ((int)width > maxAttWidth) { maxAttWidth = (int)width; } if (miningSchemaI.attribute(j).isNominal() || miningSchemaI.attribute(j).isString()) { // check the width of this value String val = miningSchemaI.attribute(j).value((int)m_ppMatrix[i][j].m_value) + " "; if (val.length() > maxAttWidth) { maxAttWidth = val.length(); } } } } } // get the max parameter width int maxParamWidth = "Parameter ".length(); for (Parameter p : m_parameterList) { String temp = (p.m_label != null) ? p.m_label + " " : p.m_name + " "; if (temp.length() > maxParamWidth) { maxParamWidth = temp.length(); } } buff.append("Predictor-to-Parameter matrix:\n"); buff.append(PMMLUtils.pad("Predictor", " ", (maxParamWidth + (maxAttWidth * 2 + 2)) - "Predictor".length(), true)); buff.append("\n" + PMMLUtils.pad("Parameter", " ", maxParamWidth - "Parameter".length(), false)); // attribute names for (int i = 0; i < miningSchemaI.numAttributes(); i++) { if (i != miningSchemaI.classIndex()) { String attName = miningSchemaI.attribute(i).name(); buff.append(PMMLUtils.pad(attName, " ", maxAttWidth + 1 - attName.length(), true)); } } buff.append("\n"); for (int i = 0; i < m_parameterList.size(); i++) { Parameter param = m_parameterList.get(i); String paramS = (param.m_label != null) ? param.m_label : param.m_name; buff.append(PMMLUtils.pad(paramS, " ", maxParamWidth - paramS.length(), false)); for (int j = 0; j < miningSchemaI.numAttributes(); j++) { if (j != miningSchemaI.classIndex()) { PPCell p = m_ppMatrix[i][j]; String val = " "; if (p != null) { if (miningSchemaI.attribute(j).isNominal() || miningSchemaI.attribute(j).isString()) { val = miningSchemaI.attribute(j).value((int)p.m_value); } else { val = "" + Utils.doubleToString(p.m_value, maxAttWidth, 4).trim(); } } buff.append(PMMLUtils.pad(val, " ", maxAttWidth + 1 - val.length(), true)); } } buff.append("\n"); } } /** * Format and print the parameter matrix to the supplied StringBuffer. * * @param buff the StringBuffer to append to */ protected void printParameterMatrix(StringBuffer buff) { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); // get the maximum class value width (nominal) int maxClassWidth = miningSchemaI.classAttribute().name().length(); if (miningSchemaI.classAttribute().isNominal() || miningSchemaI.classAttribute().isString()) { for (int i = 0; i < miningSchemaI.classAttribute().numValues(); i++) { if (miningSchemaI.classAttribute().value(i).length() > maxClassWidth) { maxClassWidth = miningSchemaI.classAttribute().value(i).length(); } } } // get the maximum parameter name/label width int maxParamWidth = 0; for (int i = 0; i < m_parameterList.size(); i++) { Parameter p = m_parameterList.get(i); String val = (p.m_label != null) ? p.m_label + " " : p.m_name + " "; if (val.length() > maxParamWidth) { maxParamWidth = val.length(); } } // get the max beta value width int maxBetaWidth = "Coeff.".length(); for (int i = 0; i < m_paramMatrix.length; i++) { for (int j = 0; j < m_parameterList.size(); j++) { PCell p = m_paramMatrix[i][j]; if (p != null) { double width = Math.log(Math.abs(p.m_beta)) / Math.log(10); if (width < 0) { width = 1; } // decimal + # decimal places + 1 width += 7.0; if ((int)width > maxBetaWidth) { maxBetaWidth = (int)width; } } } } buff.append("Parameter estimates:\n"); buff.append(PMMLUtils.pad(miningSchemaI.classAttribute().name(), " ", maxClassWidth + maxParamWidth + 2 - miningSchemaI.classAttribute().name().length(), false)); buff.append(PMMLUtils.pad("Coeff.", " ", maxBetaWidth + 1 - "Coeff.".length(), true)); buff.append(PMMLUtils.pad("df", " ", maxBetaWidth - "df".length(), true)); buff.append("\n"); for (int i = 0; i < m_paramMatrix.length; i++) { // scan for non-null entry for this class value boolean ok = false; for (int j = 0; j < m_parameterList.size(); j++) { if (m_paramMatrix[i][j] != null) { ok = true; } } if (!ok) { continue; } // first the class value (if nominal) String cVal = (miningSchemaI.classAttribute().isNominal() || miningSchemaI.classAttribute().isString()) ? miningSchemaI.classAttribute().value(i) : " "; buff.append(PMMLUtils.pad(cVal, " ", maxClassWidth - cVal.length(), false)); buff.append("\n"); for (int j = 0; j < m_parameterList.size(); j++) { PCell p = m_paramMatrix[i][j]; if (p != null) { String label = p.m_parameterName; buff.append(PMMLUtils.pad(label, " ", maxClassWidth + maxParamWidth + 2 - label.length(), true)); String betaS = Utils.doubleToString(p.m_beta, maxBetaWidth, 4).trim(); buff.append(PMMLUtils.pad(betaS, " ", maxBetaWidth + 1 - betaS.length(), true)); String dfS = Utils.doubleToString(p.m_df, maxBetaWidth, 4).trim(); buff.append(PMMLUtils.pad(dfS, " ", maxBetaWidth - dfS.length(), true)); buff.append("\n"); } } } } /** * Construct the incoming parameter vector based on the values * in the incoming test instance. * * @param incomingInst the values of the incoming test instance * @return the populated parameter vector ready to be multiplied against * the vector of coefficients. * @throws Exception if there is some problem whilst constructing the * parameter vector */ private double[] incomingParamVector(double[] incomingInst) throws Exception { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); double[] incomingPV = new double[m_parameterList.size()]; for (int i = 0; i < m_parameterList.size(); i++) { // // default is that this row represents the intercept. // this will be the case if there are all null entries in this row incomingPV[i] = 1.0; // loop over the attributes (predictors) for (int j = 0; j < miningSchemaI.numAttributes(); j++) { PPCell cellEntry = m_ppMatrix[i][j]; Predictor p = null; if (cellEntry != null) { if ((p = getFactor(cellEntry.m_predictorName)) != null) { if ((int)incomingInst[p.m_miningSchemaIndex] == (int)cellEntry.m_value) { incomingPV[i] *= 1.0; // we have a match } else { incomingPV[i] *= 0.0; } } else if ((p = getCovariate(cellEntry.m_predictorName)) != null) { incomingPV[i] *= Math.pow(incomingInst[p.m_miningSchemaIndex], cellEntry.m_value); } else { throw new Exception("[GeneralRegression] can't find predictor " + cellEntry.m_predictorName + " in either the list of factors or covariates"); } } } } return incomingPV; } /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. * * @param inst the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double[] distributionForInstance(Instance inst) throws Exception { if (!m_initialized) { mapToMiningSchema(inst.dataset()); } double[] preds = null; if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds = new double[1]; } else { preds = new double[m_miningSchema.getFieldsAsInstances().classAttribute().numValues()]; } // create an array of doubles that holds values from the incoming // instance; in order of the fields in the mining schema. We will // also handle missing values and outliers here. double[] incoming = m_fieldsMap.instanceToSchema(inst, m_miningSchema); // In this implementation we will default to information in the Target element (default // value for numeric prediction and prior probabilities for classification). If there is // no Target element defined, then an Exception is thrown. boolean hasMissing = false; for (int i = 0; i < incoming.length; i++) { if (i != m_miningSchema.getFieldsAsInstances().classIndex() && Double.isNaN(incoming[i])) { hasMissing = true; break; } } if (hasMissing) { if (!m_miningSchema.hasTargetMetaData()) { String message = "[GeneralRegression] WARNING: Instance to predict has missing value(s) but " + "there is no missing value handling meta data and no " + "prior probabilities/default value to fall back to. No " + "prediction will be made (" + ((m_miningSchema.getFieldsAsInstances().classAttribute().isNominal() || m_miningSchema.getFieldsAsInstances().classAttribute().isString()) ? "zero probabilities output)." : "NaN output)."); if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = Utils.missingValue(); } return preds; } else { // use prior probablilities/default value TargetMetaInfo targetData = m_miningSchema.getTargetMetaData(); if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = targetData.getDefaultValue(); } else { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); for (int i = 0; i < miningSchemaI.classAttribute().numValues(); i++) { preds[i] = targetData.getPriorProbability(miningSchemaI.classAttribute().value(i)); } } return preds; } } else { // construct input parameter vector here double[] inputParamVector = incomingParamVector(incoming); computeResponses(incoming, inputParamVector, preds); } return preds; } /** * Compute the responses for the function given the parameter values corresponding * to the current incoming instance. * * @param incomingInst raw incoming instance values (after missing value * replacement and outlier treatment) * @param incomingParamVector incoming instance values mapped to parameters * @param responses will contain the responses computed by the function * @throws Exception if something goes wrong */ private void computeResponses(double[] incomingInst, double[] incomingParamVector, double[] responses) throws Exception { for (int i = 0; i < responses.length; i++) { for (int j = 0; j < m_parameterList.size(); j++) { // a row of the parameter matrix should have all non-null entries // except for the last class (in the case of classification) which // should have just an intercept of 0. Need to handle the case where // no intercept has been defined in the pmml file for the last class PCell p = m_paramMatrix[i][j]; if (p == null) { responses[i] += 0.0 * incomingParamVector[j]; } else { responses[i] += incomingParamVector[j] * p.m_beta; } } } switch(m_modelType) { case MULTINOMIALLOGISTIC: computeProbabilitiesMultinomialLogistic(responses); break; case REGRESSION: // nothing to be done break; case GENERALLINEAR: case GENERALIZEDLINEAR: if (m_linkFunction != LinkFunction.NONE) { computeResponseGeneralizedLinear(incomingInst, responses); } else { throw new Exception("[GeneralRegression] no link function specified!"); } break; case ORDINALMULTINOMIAL: if (m_cumulativeLinkFunction != CumulativeLinkFunction.NONE) { computeResponseOrdinalMultinomial(incomingInst, responses); } else { throw new Exception("[GeneralRegression] no cumulative link function specified!"); } break; default: throw new Exception("[GeneralRegression] unknown model type"); } } /** * Computes probabilities for the multinomial logistic model type. * * @param responses will hold the responses computed by the function. */ private static void computeProbabilitiesMultinomialLogistic(double[] responses) { double[] r = responses.clone(); for (int j = 0; j < r.length; j++) { double sum = 0; boolean overflow = false; for (int k = 0; k < r.length; k++) { if (r[k] - r[j] > 700) { overflow = true; break; } sum += Math.exp(r[k] - r[j]); } if (overflow) { responses[j] = 0.0; } else { responses[j] = 1.0 / sum; } } } /** * Computes responses for the general linear and generalized linear model * types. * * @param incomingInst the raw incoming instance values (after missing value * replacement and outlier treatment etc). * @param responses will hold the responses computed by the function * @throws Exception if a problem occurs. */ private void computeResponseGeneralizedLinear(double[] incomingInst, double[] responses) throws Exception { double[] r = responses.clone(); double offset = 0; if (m_offsetVariable != null) { Attribute offsetAtt = m_miningSchema.getFieldsAsInstances().attribute(m_offsetVariable); if (offsetAtt == null) { throw new Exception("[GeneralRegression] unable to find offset variable " + m_offsetVariable + " in the mining schema!"); } offset = incomingInst[offsetAtt.index()]; } else if (!Double.isNaN(m_offsetValue)) { offset = m_offsetValue; } double trials = 1; if (m_trialsVariable != null) { Attribute trialsAtt = m_miningSchema.getFieldsAsInstances().attribute(m_trialsVariable); if (trialsAtt == null) { throw new Exception("[GeneralRegression] unable to find trials variable " + m_trialsVariable + " in the mining schema!"); } trials = incomingInst[trialsAtt.index()]; } else if (!Double.isNaN(m_trialsValue)) { trials = m_trialsValue; } double distParam = 0; if (m_linkFunction == LinkFunction.NEGBIN && m_distribution == Distribution.NEGBINOMIAL) { if (Double.isNaN(m_distParameter)) { throw new Exception("[GeneralRegression] no distribution parameter defined!"); } distParam = m_distParameter; } double linkParam = 0; if (m_linkFunction == LinkFunction.POWER || m_linkFunction == LinkFunction.ODDSPOWER) { if (Double.isNaN(m_linkParameter)) { throw new Exception("[GeneralRegression] no link parameter defined!"); } linkParam = m_linkParameter; } for (int i = 0; i < r.length; i++) { responses[i] = m_linkFunction.eval(r[i], offset, trials, distParam, linkParam); } } /** * Computes responses for the ordinal multinomial model type. * * @param incomingInst the raw incoming instance values (after missing value * replacement and outlier treatment etc). * @param responses will hold the responses computed by the function * @throws Exception if a problem occurs. */ private void computeResponseOrdinalMultinomial(double[] incomingInst, double[] responses) throws Exception { double[] r = responses.clone(); double offset = 0; if (m_offsetVariable != null) { Attribute offsetAtt = m_miningSchema.getFieldsAsInstances().attribute(m_offsetVariable); if (offsetAtt == null) { throw new Exception("[GeneralRegression] unable to find offset variable " + m_offsetVariable + " in the mining schema!"); } offset = incomingInst[offsetAtt.index()]; } else if (!Double.isNaN(m_offsetValue)) { offset = m_offsetValue; } for (int i = 0; i < r.length; i++) { if (i == 0) { responses[i] = m_cumulativeLinkFunction.eval(r[i], offset); } else if (i == (r.length - 1)) { responses[i] = 1.0 - responses[i - 1]; } else { responses[i] = m_cumulativeLinkFunction.eval(r[i], offset) - responses[i - 1]; } } } /* (non-Javadoc) * @see weka.core.RevisionHandler#getRevision() */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/consumer/NeuralNetwork.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NeuralNetwork.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.consumer; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.pmml.DerivedFieldMetaInfo; import weka.core.pmml.FieldMetaInfo; import weka.core.pmml.MiningSchema; import weka.core.pmml.NormContinuous; import weka.core.pmml.TargetMetaInfo; /** * Class implementing import of PMML Neural Network model. Can be used as a Weka * classifier for prediction (buildClassifier() raises an Exception). * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision 1.0 $ */ public class NeuralNetwork extends PMMLClassifier { /** * For serialization */ private static final long serialVersionUID = -4545904813133921249L; /** * Small inner class for a NeuralInput (essentially just * wraps a DerivedField and adds an ID) */ static class NeuralInput implements Serializable { /** * For serialization */ private static final long serialVersionUID = -1902233762824835563L; /** Field that this input refers to */ private DerivedFieldMetaInfo m_field; /** ID string */ private String m_ID = null; private String getID() { return m_ID; } protected NeuralInput(Element input, MiningSchema miningSchema) throws Exception { m_ID = input.getAttribute("id"); NodeList fL = input.getElementsByTagName("DerivedField"); if (fL.getLength() != 1) { throw new Exception("[NeuralInput] expecting just one derived field!"); } Element dF = (Element)fL.item(0); Instances allFields = miningSchema.getFieldsAsInstances(); ArrayList<Attribute> fieldDefs = new ArrayList<Attribute>(); for (int i = 0; i < allFields.numAttributes(); i++) { fieldDefs.add(allFields.attribute(i)); } m_field = new DerivedFieldMetaInfo(dF, fieldDefs, miningSchema.getTransformationDictionary()); } protected double getValue(double[] incoming) throws Exception { return m_field.getDerivedValue(incoming); } public String toString() { StringBuffer temp = new StringBuffer(); temp.append("Nueral input (" + getID() + ")\n"); temp.append(m_field); return temp.toString(); } } /** * Inner class representing a layer in the network. */ class NeuralLayer implements Serializable { /** * For serialization */ private static final long serialVersionUID = -8386042001675763922L; /** The number of neurons in this layer */ private int m_numNeurons = 0; /** Activation function (if defined, overrides one in NeuralNetwork) */ private ActivationFunction m_layerActivationFunction = null; /** Threshold (if defined overrides one in NeuralNetwork) */ private double m_layerThreshold = Double.NaN; /** Width (if defined overrides one in NeuralNetwork) */ private double m_layerWidth = Double.NaN; /** Altitude (if defined overrides one in NeuralNetwork) */ private double m_layerAltitude = Double.NaN; /** Normalization (if defined overrides one in NeuralNetwork) */ private Normalization m_layerNormalization = null; /** The neurons at this hidden layer */ private Neuron[] m_layerNeurons = null; /** Stores the output of this layer (for given inputs) */ private HashMap<String, Double> m_layerOutput = new HashMap<String, Double>(); protected NeuralLayer(Element layerE) { String activationFunction = layerE.getAttribute("activationFunction"); if (activationFunction != null && activationFunction.length() > 0) { for (ActivationFunction a : ActivationFunction.values()) { if (a.toString().equals(activationFunction)) { m_layerActivationFunction = a; break; } } } else { // use the network-level activation function m_layerActivationFunction = m_activationFunction; } String threshold = layerE.getAttribute("threshold"); if (threshold != null && threshold.length() > 0) { m_layerThreshold = Double.parseDouble(threshold); } else { // use network-level threshold m_layerThreshold = m_threshold; } String width = layerE.getAttribute("width"); if (width != null && width.length() > 0) { m_layerWidth = Double.parseDouble(width); } else { // use network-level width m_layerWidth = m_width; } String altitude = layerE.getAttribute("altitude"); if (altitude != null && altitude.length() > 0) { m_layerAltitude = Double.parseDouble(altitude); } else { // use network-level altitude m_layerAltitude = m_altitude; } String normMethod = layerE.getAttribute("normalizationMethod"); if (normMethod != null && normMethod.length() > 0) { for (Normalization n : Normalization.values()) { if (n.toString().equals(normMethod)) { m_layerNormalization = n; break; } } } else { // use network-level normalization method m_layerNormalization = m_normalizationMethod; } NodeList neuronL = layerE.getElementsByTagName("Neuron"); m_numNeurons = neuronL.getLength(); m_layerNeurons = new Neuron[m_numNeurons]; for (int i = 0; i < neuronL.getLength(); i++) { Node neuronN = neuronL.item(i); if (neuronN.getNodeType() == Node.ELEMENT_NODE) { m_layerNeurons[i] = new Neuron((Element)neuronN, this); } } } protected ActivationFunction getActivationFunction() { return m_layerActivationFunction; } protected double getThreshold() { return m_layerThreshold; } protected double getWidth() { return m_layerWidth; } protected double getAltitude() { return m_layerAltitude; } protected Normalization getNormalization() { return m_layerNormalization; } /** * Compute the output values for this layer. * * @param incoming the incoming values * @return the output values for this layer * @throws Exception if there is a problem computing the outputs */ protected HashMap<String, Double> computeOutput(HashMap<String, Double> incoming) throws Exception { m_layerOutput.clear(); double normSum = 0; for (int i = 0; i < m_layerNeurons.length; i++) { double neuronOut = m_layerNeurons[i].getValue(incoming); String neuronID = m_layerNeurons[i].getID(); if (m_layerNormalization == Normalization.SOFTMAX) { normSum += Math.exp(neuronOut); } else if (m_layerNormalization == Normalization.SIMPLEMAX) { normSum += neuronOut; } //System.err.println("Inserting ID " + neuronID + " " + neuronOut); m_layerOutput.put(neuronID, neuronOut); } // apply the normalization (if necessary) if (m_layerNormalization != Normalization.NONE) { for (int i = 0; i < m_layerNeurons.length; i++) { double val = m_layerOutput.get(m_layerNeurons[i].getID()); // System.err.println("Normalizing ID " + m_layerNeurons[i].getID() + " " + val); if (m_layerNormalization == Normalization.SOFTMAX) { val = Math.exp(val) / normSum; } else { val = (val / normSum); } m_layerOutput.put(m_layerNeurons[i].getID(), val); } } return m_layerOutput; } public String toString() { StringBuffer temp = new StringBuffer(); temp.append("activation: " + getActivationFunction() + "\n"); if (!Double.isNaN(getThreshold())) { temp.append("threshold: " + getThreshold() + "\n"); } if (!Double.isNaN(getWidth())) { temp.append("width: " + getWidth() + "\n"); } if (!Double.isNaN(getAltitude())) { temp.append("altitude: " + getAltitude() + "\n"); } temp.append("normalization: " + m_layerNormalization + "\n"); for (int i = 0; i < m_numNeurons; i++) { temp.append(m_layerNeurons[i] + "\n"); } return temp.toString(); } } /** * Inner class encapsulating a Neuron */ static class Neuron implements Serializable { /** * For serialization */ private static final long serialVersionUID = -3817434025682603443L; /** ID string */ private String m_ID = null; /** The layer we belong to (for accessing activation function, threshold etc.) */ private NeuralLayer m_layer; /** The bias */ private double m_bias = 0.0; /** The width (if defined overrides the one in NeuralLayer or NeuralNetwork) */ private double m_neuronWidth = Double.NaN; /** The altitude (if defined overrides the one in NeuralLayer or NeuralNetwork) */ private double m_neuronAltitude = Double.NaN; /** The IDs of the neurons/neural inputs that we are connected to */ private String[] m_connectionIDs = null; /** The weights corresponding to the connections */ private double[] m_weights = null; protected Neuron(Element neuronE, NeuralLayer layer) { m_layer = layer; m_ID = neuronE.getAttribute("id"); String bias = neuronE.getAttribute("bias"); if (bias != null && bias.length() > 0) { m_bias = Double.parseDouble(bias); } String width = neuronE.getAttribute("width"); if (width != null && width.length() > 0) { m_neuronWidth = Double.parseDouble(width); } String altitude = neuronE.getAttribute("altitude"); if (altitude != null && altitude.length() > 0) { m_neuronAltitude = Double.parseDouble(altitude); } // get the connection details NodeList conL = neuronE.getElementsByTagName("Con"); m_connectionIDs = new String[conL.getLength()]; m_weights = new double[conL.getLength()]; for (int i = 0; i < conL.getLength(); i++) { Node conN = conL.item(i); if (conN.getNodeType() == Node.ELEMENT_NODE) { Element conE = (Element)conN; m_connectionIDs[i] = conE.getAttribute("from"); String weight = conE.getAttribute("weight"); m_weights[i] = Double.parseDouble(weight); } } } protected String getID() { return m_ID; } /** * Compute the output of this Neuron. * * @param incoming a Map of input values. The keys are the IDs * of incoming connections (either neural inputs or neurons) and * the values are the output values of the neural input/neuron in * question. * * @return the output of this neuron * @throws Exception if any of our incoming connection IDs cannot be * located in the Map */ protected double getValue(HashMap<String, Double> incoming) throws Exception { double z = 0; double result = Double.NaN; double width = (Double.isNaN(m_neuronWidth)) ? m_layer.getWidth() : m_neuronWidth; z = m_bias; for (int i = 0; i < m_connectionIDs.length; i++) { Double inVal = incoming.get(m_connectionIDs[i]); if (inVal == null) { throw new Exception("[Neuron] unable to find connection " + m_connectionIDs[i] + " in input Map!"); } if (m_layer.getActivationFunction() != ActivationFunction.RADIALBASIS) { // multiply with weight double inV = inVal.doubleValue() * m_weights[i]; z += inV; } else { // Euclidean distance to the center (stored in m_weights) double inV = Math.pow((inVal.doubleValue() - m_weights[i]), 2.0); z += inV; } } // apply the width if necessary if (m_layer.getActivationFunction() == ActivationFunction.RADIALBASIS) { z /= (2.0 * (width * width)); } double threshold = m_layer.getThreshold(); double altitude = (Double.isNaN(m_neuronAltitude)) ? m_layer.getAltitude() : m_neuronAltitude; double fanIn = m_connectionIDs.length; result = m_layer.getActivationFunction().eval(z, threshold, altitude, fanIn); return result; } public String toString() { StringBuffer temp = new StringBuffer(); temp.append("Nueron (" + m_ID + ") [bias:" + m_bias); if (!Double.isNaN(m_neuronWidth)) { temp.append(" width:" + m_neuronWidth); } if (!Double.isNaN(m_neuronAltitude)) { temp.append(" altitude:" + m_neuronAltitude); } temp.append("]\n"); temp.append(" con. (ID:weight): "); for (int i = 0; i < m_connectionIDs.length; i++) { temp.append(m_connectionIDs[i] + ":" + Utils.doubleToString(m_weights[i], 2)); if ((i + 1) % 10 == 0 || i == m_connectionIDs.length - 1) { temp.append("\n "); } else { temp.append(", "); } } return temp.toString(); } } static class NeuralOutputs implements Serializable { /** * For serialization */ private static final long serialVersionUID = -233611113950482952L; /** The neurons we are mapping */ private String[] m_outputNeurons = null; /** * In the case of a nominal class, the index of the value * being predicted by each output neuron */ private int[] m_categoricalIndexes = null; /** The class attribute we are mapping to */ private Attribute m_classAttribute = null; /** Used when the class is numeric */ private NormContinuous m_regressionMapping = null; protected NeuralOutputs(Element outputs, MiningSchema miningSchema) throws Exception { m_classAttribute = miningSchema.getMiningSchemaAsInstances().classAttribute(); int vals = (m_classAttribute.isNumeric()) ? 1 : m_classAttribute.numValues(); m_outputNeurons = new String[vals]; m_categoricalIndexes = new int[vals]; NodeList outputL = outputs.getElementsByTagName("NeuralOutput"); if (outputL.getLength() != m_outputNeurons.length) { throw new Exception("[NeuralOutputs] the number of neural outputs does not match " + "the number expected!"); } for (int i = 0; i < outputL.getLength(); i++) { Node outputN = outputL.item(i); if (outputN.getNodeType() == Node.ELEMENT_NODE) { Element outputE = (Element)outputN; // get the ID for this output neuron m_outputNeurons[i] = outputE.getAttribute("outputNeuron"); if (m_classAttribute.isNumeric()) { // get the single norm continuous NodeList contL = outputE.getElementsByTagName("NormContinuous"); if (contL.getLength() != 1) { throw new Exception("[NeuralOutputs] Should be exactly one norm continuous element " + "for numeric class!"); } Node normContNode = contL.item(0); String attName = ((Element)normContNode).getAttribute("field"); Attribute dummyTargetDef = new Attribute(attName); ArrayList<Attribute> dummyFieldDefs = new ArrayList<Attribute>(); dummyFieldDefs.add(dummyTargetDef); m_regressionMapping = new NormContinuous((Element)normContNode, FieldMetaInfo.Optype.CONTINUOUS, dummyFieldDefs); break; } else { // we just need to grab the categorical value (out of the NormDiscrete element) // that this output neuron is associated with NodeList discL = outputE.getElementsByTagName("NormDiscrete"); if (discL.getLength() != 1) { throw new Exception("[NeuralOutputs] Should be only one norm discrete element " + "per derived field/neural output for a nominal class!"); } Node normDiscNode = discL.item(0); String attValue = ((Element)normDiscNode).getAttribute("value"); int index = m_classAttribute.indexOfValue(attValue); if (index < 0) { throw new Exception("[NeuralOutputs] Can't find specified target value " + attValue + " in class attribute " + m_classAttribute.name()); } m_categoricalIndexes[i] = index; } } } } /** * Compute the output. Either a probability distribution or a single * value (regression). * * @param incoming the values from the last hidden layer * @param preds the array to fill with predicted values * @throws Exception if there is a problem computing the output */ protected void getOuput(HashMap<String, Double> incoming, double[] preds) throws Exception { if (preds.length != m_outputNeurons.length) { throw new Exception("[NeuralOutputs] Incorrect number of predictions requested: " + preds.length + "requested, " + m_outputNeurons.length + " expected"); } for (int i = 0; i < m_outputNeurons.length; i++) { Double neuronOut = incoming.get(m_outputNeurons[i]); if (neuronOut == null) { throw new Exception("[NeuralOutputs] Unable to find output neuron " + m_outputNeurons[i] + " in the incoming HashMap!!"); } if (m_classAttribute.isNumeric()) { // will be only one output neuron anyway preds[0] = neuronOut.doubleValue(); preds[0] = m_regressionMapping.getResultInverse(preds); } else { // clip at zero // preds[m_categoricalIndexes[i]] = (neuronOut < 0) ? 0.0 : neuronOut; preds[m_categoricalIndexes[i]] = neuronOut; } } if (m_classAttribute.isNominal()) { // check for negative values and adjust double min = preds[Utils.minIndex(preds)]; if (min < 0) { for (int i = 0; i < preds.length; i++) { preds[i] -= min; } } // do a simplemax normalization Utils.normalize(preds); } } public String toString() { StringBuffer temp = new StringBuffer(); for (int i = 0; i < m_outputNeurons.length; i++) { temp.append("Output neuron (" + m_outputNeurons[i] + ")\n"); temp.append("mapping:\n"); if (m_classAttribute.isNumeric()) { temp.append(m_regressionMapping +"\n"); } else { temp.append(m_classAttribute.name() + " = " + m_classAttribute.value(m_categoricalIndexes[i]) + "\n"); } } return temp.toString(); } } /** * Enumerated type for the mining function */ enum MiningFunction { CLASSIFICATION, REGRESSION; } /** The mining function */ protected MiningFunction m_functionType = MiningFunction.CLASSIFICATION; /** * Enumerated type for the activation function. */ enum ActivationFunction { THRESHOLD("threshold") { double eval(double z, double threshold, double altitude, double fanIn) { if (z > threshold) { return 1.0; } return 0.0; } }, LOGISTIC("logistic") { double eval(double z, double threshold, double altitude, double fanIn) { return 1.0 / (1.0 + Math.exp(-z)); } }, TANH("tanh") { double eval(double z, double threshold, double altitude, double fanIn) { double a = Math.exp( z ); double b = Math.exp( -z ); return ((a-b)/(a+b)); //return (1.0 - Math.exp(-2.0 * z)) / (1.0 + Math.exp(-2.0 * z)); } }, IDENTITY("identity") { double eval(double z, double threshold, double altitude, double fanIn) { return z; } }, EXPONENTIAL("exponential") { double eval(double z, double threshold, double altitude, double fanIn) { return Math.exp(z); } }, RECIPROCAL("reciprocal") { double eval(double z, double threshold, double altitude, double fanIn) { return 1.0 / z; } }, SQUARE("square") { double eval(double z, double threshold, double altitude, double fanIn) { return z * z; } }, GAUSS("gauss") { double eval(double z, double threshold, double altitude, double fanIn) { return Math.exp(-(z * z)); } }, SINE("sine") { double eval(double z, double threshold, double altitude, double fanIn) { return Math.sin(z); } }, COSINE("cosine") { double eval(double z, double threshold, double altitude, double fanIn) { return Math.cos(z); } }, ELLICOT("ellicot") { double eval(double z, double threshold, double altitude, double fanIn) { return z / (1.0 + Math.abs(z)); } }, ARCTAN("arctan") { double eval(double z, double threshold, double altitude, double fanIn) { return 2.0 * Math.atan(z) / Math.PI; } }, RADIALBASIS("radialBasis") { double eval(double z, double threshold, double altitude, double fanIn) { return Math.exp(fanIn * Math.log(altitude) - z); } }; abstract double eval(double z, double threshold, double altitude, double fanIn); private final String m_stringVal; ActivationFunction(String name) { m_stringVal = name; } public String toString() { return m_stringVal; } } /** The activation function to use */ protected ActivationFunction m_activationFunction = ActivationFunction.ARCTAN; /** * Enumerated type for the normalization method */ enum Normalization { NONE ("none"), SIMPLEMAX ("simplemax"), SOFTMAX ("softmax"); private final String m_stringVal; Normalization(String name) { m_stringVal = name; } public String toString() { return m_stringVal; } } /** The normalization method */ protected Normalization m_normalizationMethod = Normalization.NONE; /** Threshold activation */ protected double m_threshold = 0.0; // default = 0 /** Width for radial basis */ protected double m_width = Double.NaN; // no default /** Altitude for radial basis */ protected double m_altitude = 1.0; // default = 1 /** The number of inputs to the network */ protected int m_numberOfInputs = 0; /** Number of hidden layers in the network */ protected int m_numberOfLayers = 0; /** The inputs to the network */ protected NeuralInput[] m_inputs = null; /** A map for storing network input values (computed from an incoming instance) */ protected HashMap<String, Double> m_inputMap = new HashMap<String, Double>(); /** The hidden layers in the network */ protected NeuralLayer[] m_layers = null; /** The outputs of the network */ protected NeuralOutputs m_outputs = null; public NeuralNetwork(Element model, Instances dataDictionary, MiningSchema miningSchema) throws Exception { super(dataDictionary, miningSchema); String fn = model.getAttribute("functionName"); if (fn.equals("regression")) { m_functionType = MiningFunction.REGRESSION; } String act = model.getAttribute("activationFunction"); if (act == null || act.length() == 0) { throw new Exception("[NeuralNetwork] no activation functon defined"); } // get the activation function for (ActivationFunction a : ActivationFunction.values()) { if (a.toString().equals(act)) { m_activationFunction = a; break; } } // get the normalization method (if specified) String norm = model.getAttribute("normalizationMethod"); if (norm != null && norm.length() > 0) { for (Normalization n : Normalization.values()) { if (n.toString().equals(norm)) { m_normalizationMethod = n; break; } } } String thresh = model.getAttribute("threshold"); if (thresh != null && thresh.length() > 0) { m_threshold = Double.parseDouble(thresh); } String width = model.getAttribute("width"); if (width != null && width.length() > 0) { m_width = Double.parseDouble(width); } String alt = model.getAttribute("altitude"); if (alt != null && alt.length() > 0) { m_altitude = Double.parseDouble(alt); } // get all the inputs NodeList inputL = model.getElementsByTagName("NeuralInput"); m_numberOfInputs = inputL.getLength(); m_inputs = new NeuralInput[m_numberOfInputs]; for (int i = 0; i < m_numberOfInputs; i++) { Node inputN = inputL.item(i); if (inputN.getNodeType() == Node.ELEMENT_NODE) { NeuralInput nI = new NeuralInput((Element)inputN, m_miningSchema); m_inputs[i] = nI; } } // get the layers NodeList layerL = model.getElementsByTagName("NeuralLayer"); m_numberOfLayers = layerL.getLength(); m_layers = new NeuralLayer[m_numberOfLayers]; for (int i = 0; i < m_numberOfLayers; i++) { Node layerN = layerL.item(i); if (layerN.getNodeType() == Node.ELEMENT_NODE) { NeuralLayer nL = new NeuralLayer((Element)layerN); m_layers[i] = nL; } } // get the outputs NodeList outputL = model.getElementsByTagName("NeuralOutputs"); if (outputL.getLength() != 1) { throw new Exception("[NeuralNetwork] Should be just one NeuralOutputs element defined!"); } m_outputs = new NeuralOutputs((Element)outputL.item(0), m_miningSchema); } /* (non-Javadoc) * @see weka.core.RevisionHandler#getRevision() */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. * * @param inst the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double[] distributionForInstance(Instance inst) throws Exception { if (!m_initialized) { mapToMiningSchema(inst.dataset()); } double[] preds = null; if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds = new double[1]; } else { preds = new double[m_miningSchema.getFieldsAsInstances().classAttribute().numValues()]; } double[] incoming = m_fieldsMap.instanceToSchema(inst, m_miningSchema); boolean hasMissing = false; for (int i = 0; i < incoming.length; i++) { if (i != m_miningSchema.getFieldsAsInstances().classIndex() && Double.isNaN(incoming[i])) { hasMissing = true; //System.err.println("Missing value for att : " + m_miningSchema.getFieldsAsInstances().attribute(i).name()); break; } } if (hasMissing) { if (!m_miningSchema.hasTargetMetaData()) { String message = "[NeuralNetwork] WARNING: Instance to predict has missing value(s) but " + "there is no missing value handling meta data and no " + "prior probabilities/default value to fall back to. No " + "prediction will be made (" + ((m_miningSchema.getFieldsAsInstances().classAttribute().isNominal() || m_miningSchema.getFieldsAsInstances().classAttribute().isString()) ? "zero probabilities output)." : "NaN output)."); if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = Utils.missingValue(); } return preds; } else { // use prior probablilities/default value TargetMetaInfo targetData = m_miningSchema.getTargetMetaData(); if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = targetData.getDefaultValue(); } else { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); for (int i = 0; i < miningSchemaI.classAttribute().numValues(); i++) { preds[i] = targetData.getPriorProbability(miningSchemaI.classAttribute().value(i)); } } return preds; } } else { // construct the input to the network for this instance m_inputMap.clear(); for (int i = 0; i < m_inputs.length; i++) { double networkInVal = m_inputs[i].getValue(incoming); String ID = m_inputs[i].getID(); m_inputMap.put(ID, networkInVal); } // now compute the output of each layer HashMap<String, Double> layerOut = m_layers[0].computeOutput(m_inputMap); for (int i = 1; i < m_layers.length; i++) { layerOut = m_layers[i].computeOutput(layerOut); } // now do the output m_outputs.getOuput(layerOut, preds); } return preds; } public String toString() { StringBuffer temp = new StringBuffer(); temp.append("PMML version " + getPMMLVersion()); if (!getCreatorApplication().equals("?")) { temp.append("\nApplication: " + getCreatorApplication()); } temp.append("\nPMML Model: Neural network"); temp.append("\n\n"); temp.append(m_miningSchema); temp.append("Inputs:\n"); for (int i = 0; i < m_inputs.length; i++) { temp.append(m_inputs[i] + "\n"); } for (int i = 0; i < m_layers.length; i++) { temp.append("Layer: " + (i+1) + "\n"); temp.append(m_layers[i] + "\n"); } temp.append("Outputs:\n"); temp.append(m_outputs); return temp.toString(); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/consumer/PMMLClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PMMLClassifier.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.consumer; import java.io.Serializable; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.classifiers.AbstractClassifier; import weka.core.Instances; import weka.core.pmml.MappingInfo; import weka.core.pmml.MiningSchema; import weka.core.pmml.PMMLModel; import weka.gui.Logger; /** * Abstract base class for all PMML classifiers. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public abstract class PMMLClassifier extends AbstractClassifier implements Serializable, PMMLModel { /** For serialization */ private static final long serialVersionUID = -5371600590320702971L; /** PMML version */ protected String m_pmmlVersion = "?"; /** Creator application */ protected String m_creatorApplication = "?"; /** Logger */ protected Logger m_log = null; /** The data dictionary */ protected Instances m_dataDictionary; /** The fields and meta data used by the model */ protected MiningSchema m_miningSchema; /** The mapping between mining schema fields and incoming instance attributes */ protected transient MappingInfo m_fieldsMap; /** Has the classifier been initialized (i.e. have we established a mapping between the mining schema and the incoming instances)? */ protected transient boolean m_initialized = false; /** * Constructor. * * @param dataDictionary the data dictionary * @param miningSchema the mining schema */ PMMLClassifier(Instances dataDictionary, MiningSchema miningSchema) { m_dataDictionary = dataDictionary; m_miningSchema = miningSchema; } /** * Set the version of PMML used for this model. * * @param doc the Document encapsulating the pmml */ public void setPMMLVersion(Document doc) { NodeList tempL = doc.getElementsByTagName("PMML"); Node pmml = tempL.item(0); if (pmml.getNodeType() == Node.ELEMENT_NODE) { String version = ((Element)pmml).getAttribute("version"); if (version.length() > 0) { m_pmmlVersion = version; } } } /** * Set the name of the application (if specified) that created this * model * * @param doc the Document encapsulating the pmml */ public void setCreatorApplication(Document doc) { NodeList tempL = doc.getElementsByTagName("Header"); Node header = tempL.item(0); if (header.getNodeType() == Node.ELEMENT_NODE) { NodeList appL = ((Element)header).getElementsByTagName("Application"); if (appL.getLength() > 0) { Node app = appL.item(0); if (app.getNodeType() == Node.ELEMENT_NODE) { String appName = ((Element)app).getAttribute("name"); if (appName != null && appName.length() > 0) { String version = ((Element)app).getAttribute("version"); if (version != null && version.length() > 0) { appName += " v. " + version; } m_creatorApplication = appName; } } } } } /** * Get the data dictionary. * * @return the data dictionary */ public Instances getDataDictionary() { return m_dataDictionary; } /** * Get the mining schema for this model. * * @return the mining schema */ public MiningSchema getMiningSchema() { return m_miningSchema; } /** * Get the PMML version used for this model. * * @return the PMML version */ public String getPMMLVersion() { return m_pmmlVersion; } /** * Get the name of the application that created this model * * @return the name of the creating application or null * if not specified in the pmml. */ public String getCreatorApplication() { return m_creatorApplication; } /** * Set a logger to use. * * @param log the logger to use */ public void setLog(Logger log) { m_log = log; } /** * Get the logger. * * @return the logger (or null if none is being used) */ public Logger getLog() { return m_log; } /** * Throw an exception - PMML models are pre-built. * * @param data the Instances to learn from * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { throw new Exception("[PMMLClassifier] PMML models are pre-built " + "and static!"); } /** * Signal that a scoring run has been completed. Resets * the initialized state to false so that a subsequent * scoring run will trigger the mapping of the mining * schema to incoming instances. If not called after a * scoring run, then the classifier will assume that * the current mapping is still valid. */ public void done() { m_initialized = false; m_fieldsMap = null; } /** * Map mining schema to incoming instances. * * @param dataSet the structure of the incoming Instances * @throws Exception if something goes wrong */ public void mapToMiningSchema(Instances dataSet) throws Exception { if (m_fieldsMap == null) { // PMMLUtils.mapToMiningSchema(dataSet, m_miningSchema); m_fieldsMap = new MappingInfo(dataSet, m_miningSchema, m_log); m_initialized = true; } } /** * Get a textual description of the mapping between mining schema * fields and incoming data fields. * * @return a description of the fields mapping as a String or null if * no mapping has been constructed yet. */ public String getFieldsMappingString() { if (!m_initialized) { return null; } return m_fieldsMap.getFieldsMappingString(); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/consumer/Regression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Regression.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.consumer; import java.io.Serializable; import java.util.ArrayList; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.pmml.MiningSchema; import weka.core.pmml.TargetMetaInfo; /** * Class implementing import of PMML Regression model. Can be * used as a Weka classifier for prediction (buildClassifier() * raises an Exception). * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com * @version $Revision$ */ public class Regression extends PMMLClassifier implements Serializable { /** For serialization */ private static final long serialVersionUID = -5551125528409488634L; /** * Inner class for encapsulating a regression table */ static class RegressionTable implements Serializable { /** For serialization */ private static final long serialVersionUID = -5259866093996338995L; /** * Abstract inner base class for different predictor types. */ abstract static class Predictor implements Serializable { /** For serialization */ private static final long serialVersionUID = 7043831847273383618L; /** Name of this predictor */ protected String m_name; /** * Index of the attribute in the mining schema that corresponds to this * predictor */ protected int m_miningSchemaAttIndex = -1; /** Coefficient for this predictor */ protected double m_coefficient = 1.0; /** * Constructs a new Predictor. * * @param predictor the <code>Element</code> encapsulating this predictor * @param miningSchema the mining schema as an Instances object * @throws Exception if there is a problem constructing this Predictor */ protected Predictor(Element predictor, Instances miningSchema) throws Exception { m_name = predictor.getAttribute("name"); for (int i = 0; i < miningSchema.numAttributes(); i++) { Attribute temp = miningSchema.attribute(i); if (temp.name().equals(m_name)) { m_miningSchemaAttIndex = i; } } if (m_miningSchemaAttIndex == -1) { throw new Exception("[Predictor] unable to find matching attribute for " + "predictor " + m_name); } String coeff = predictor.getAttribute("coefficient"); if (coeff.length() > 0) { m_coefficient = Double.parseDouble(coeff); } } /** * Returns a textual description of this predictor applicable * to all sub classes. */ public String toString() { return Utils.doubleToString(m_coefficient, 12, 4) + " * "; } /** * Abstract add method. Adds this predictor into the sum for the * current prediction. * * @param preds the prediction computed so far. For regression, it is a * single element array; for classification it is a multi-element array * @param input the input instance's values */ public abstract void add(double[] preds, double[] input); } /** * Inner class for a numeric predictor */ protected class NumericPredictor extends Predictor { /** * For serialization */ private static final long serialVersionUID = -4335075205696648273L; /** The exponent*/ protected double m_exponent = 1.0; /** * Constructs a NumericPredictor. * * @param predictor the <code>Element</code> holding the predictor * @param miningSchema the mining schema as an Instances object * @throws Exception if something goes wrong while constructing this * predictor */ protected NumericPredictor(Element predictor, Instances miningSchema) throws Exception { super(predictor, miningSchema); String exponent = predictor.getAttribute("exponent"); if (exponent.length() > 0) { m_exponent = Double.parseDouble(exponent); } } /** * Return a textual description of this predictor. */ public String toString() { String output = super.toString(); output += m_name; if (m_exponent > 1.0 || m_exponent < 1.0) { output += "^" + Utils.doubleToString(m_exponent, 4); } return output; } /** * Adds this predictor into the sum for the * current prediction. * * @param preds the prediction computed so far. For regression, it is a * single element array; for classification it is a multi-element array * @param input the input instance's values */ public void add(double[] preds, double[] input) { if (m_targetCategory == -1) { preds[0] += m_coefficient * Math.pow(input[m_miningSchemaAttIndex], m_exponent); } else { preds[m_targetCategory] += m_coefficient * Math.pow(input[m_miningSchemaAttIndex], m_exponent); } } } /** * Inner class encapsulating a categorical predictor. */ protected class CategoricalPredictor extends Predictor { /**For serialization */ private static final long serialVersionUID = 3077920125549906819L; /** The attribute value for this predictor */ protected String m_valueName; /** The index of the attribute value for this predictor */ protected int m_valueIndex = -1; /** * Constructs a CategoricalPredictor. * * @param predictor the <code>Element</code> containing the predictor * @param miningSchema the mining schema as an Instances object * @throws Exception if something goes wrong while constructing * this predictor */ protected CategoricalPredictor(Element predictor, Instances miningSchema) throws Exception { super(predictor, miningSchema); String valName = predictor.getAttribute("value"); if (valName.length() == 0) { throw new Exception("[CategoricalPredictor] attribute value not specified!"); } m_valueName = valName; Attribute att = miningSchema.attribute(m_miningSchemaAttIndex); if (att.isString()) { // means that there were no Value elements defined in the // data dictionary (and hence the mining schema). // We add our value here. att.addStringValue(m_valueName); } m_valueIndex = att.indexOfValue(m_valueName); /* for (int i = 0; i < att.numValues(); i++) { if (att.value(i).equals(m_valueName)) { m_valueIndex = i; } }*/ if (m_valueIndex == -1) { throw new Exception("[CategoricalPredictor] unable to find value " + m_valueName + " in mining schema attribute " + att.name()); } } /** * Return a textual description of this predictor. */ public String toString() { String output = super.toString(); output += m_name + "=" + m_valueName; return output; } /** * Adds this predictor into the sum for the * current prediction. * * @param preds the prediction computed so far. For regression, it is a * single element array; for classification it is a multi-element array * @param input the input instance's values */ public void add(double[] preds, double[] input) { // if the value is equal to the one in the input then add the coefficient if (m_valueIndex == (int)input[m_miningSchemaAttIndex]) { if (m_targetCategory == -1) { preds[0] += m_coefficient; } else { preds[m_targetCategory] += m_coefficient; } } } } /** * Inner class to handle PredictorTerms. */ protected class PredictorTerm implements Serializable { /** For serialization */ private static final long serialVersionUID = 5493100145890252757L; /** The coefficient for this predictor term */ protected double m_coefficient = 1.0; /** the indexes of the terms to be multiplied */ protected int[] m_indexes; /** The names of the terms (attributes) to be multiplied */ protected String[] m_fieldNames; /** * Construct a new PredictorTerm. * * @param predictorTerm the <code>Element</code> describing the predictor term * @param miningSchema the mining schema as an Instances object * @throws Exception if something goes wrong while constructing this * predictor term */ protected PredictorTerm(Element predictorTerm, Instances miningSchema) throws Exception { String coeff = predictorTerm.getAttribute("coefficient"); if (coeff != null && coeff.length() > 0) { try { m_coefficient = Double.parseDouble(coeff); } catch (IllegalArgumentException ex) { throw new Exception("[PredictorTerm] unable to parse coefficient"); } } NodeList fields = predictorTerm.getElementsByTagName("FieldRef"); if (fields.getLength() > 0) { m_indexes = new int[fields.getLength()]; m_fieldNames = new String[fields.getLength()]; for (int i = 0; i < fields.getLength(); i++) { Node fieldRef = fields.item(i); if (fieldRef.getNodeType() == Node.ELEMENT_NODE) { String fieldName = ((Element)fieldRef).getAttribute("field"); if (fieldName != null && fieldName.length() > 0) { boolean found = false; // look for this field in the mining schema for (int j = 0; j < miningSchema.numAttributes(); j++) { if (miningSchema.attribute(j).name().equals(fieldName)) { // all referenced fields MUST be numeric if (!miningSchema.attribute(j).isNumeric()) { throw new Exception("[PredictorTerm] field is not continuous: " + fieldName); } found = true; m_indexes[i] = j; m_fieldNames[i] = fieldName; break; } } if (!found) { throw new Exception("[PredictorTerm] Unable to find field " + fieldName + " in mining schema!"); } } } } } } /** * Return a textual description of this predictor term. */ public String toString() { StringBuffer result = new StringBuffer(); result.append("(" + Utils.doubleToString(m_coefficient, 12, 4)); for (int i = 0; i < m_fieldNames.length; i++) { result.append(" * " + m_fieldNames[i]); } result.append(")"); return result.toString(); } /** * Adds this predictor term into the sum for the * current prediction. * * @param preds the prediction computed so far. For regression, it is a * single element array; for classification it is a multi-element array * @param input the input instance's values */ public void add(double[] preds, double[] input) { int indx = 0; if (m_targetCategory != -1) { indx = m_targetCategory; } double result = m_coefficient; for (int i = 0; i < m_indexes.length; i++) { result *= input[m_indexes[i]]; } preds[indx] += result; } } /** Constant for regression model type */ public static final int REGRESSION = 0; /** Constant for classification model type */ public static final int CLASSIFICATION = 1; /** The type of function - regression or classification */ protected int m_functionType = REGRESSION; /** The mining schema */ protected MiningSchema m_miningSchema; /** The intercept */ protected double m_intercept = 0.0; /** classification only */ protected int m_targetCategory = -1; /** Numeric and categorical predictors */ protected ArrayList<Predictor> m_predictors = new ArrayList<Predictor>(); /** Interaction terms */ protected ArrayList<PredictorTerm> m_predictorTerms = new ArrayList<PredictorTerm>(); /** * Return a textual description of this RegressionTable. */ public String toString() { Instances miningSchema = m_miningSchema.getFieldsAsInstances(); StringBuffer temp = new StringBuffer(); temp.append("Regression table:\n"); temp.append(miningSchema.classAttribute().name()); if (m_functionType == CLASSIFICATION) { temp.append("=" + miningSchema. classAttribute().value(m_targetCategory)); } temp.append(" =\n\n"); // do the predictors for (int i = 0; i < m_predictors.size(); i++) { temp.append(m_predictors.get(i).toString() + " +\n"); } // do the predictor terms for (int i = 0; i < m_predictorTerms.size(); i++) { temp.append(m_predictorTerms.get(i).toString() + " +\n"); } temp.append(Utils.doubleToString(m_intercept, 12, 4)); temp.append("\n\n"); return temp.toString(); } /** * Construct a regression table from an <code>Element</code> * * @param table the table to encapsulate * @param functionType the type of function * (regression or classification) * to use * @param mSchema the mining schema * @throws Exception if there is a problem while constructing * this regression table */ protected RegressionTable(Element table, int functionType, MiningSchema mSchema) throws Exception { m_miningSchema = mSchema; m_functionType = functionType; Instances miningSchema = m_miningSchema.getFieldsAsInstances(); // get the intercept String intercept = table.getAttribute("intercept"); if (intercept.length() > 0) { m_intercept = Double.parseDouble(intercept); } // get the target category (if classification) if (m_functionType == CLASSIFICATION) { // target category MUST be defined String targetCat = table.getAttribute("targetCategory"); if (targetCat.length() > 0) { Attribute classA = miningSchema.classAttribute(); for (int i = 0; i < classA.numValues(); i++) { if (classA.value(i).equals(targetCat)) { m_targetCategory = i; } } } if (m_targetCategory == -1) { throw new Exception("[RegressionTable] No target categories defined for classification"); } } // read all the numeric predictors NodeList numericPs = table.getElementsByTagName("NumericPredictor"); for (int i = 0; i < numericPs.getLength(); i++) { Node nP = numericPs.item(i); if (nP.getNodeType() == Node.ELEMENT_NODE) { NumericPredictor numP = new NumericPredictor((Element)nP, miningSchema); m_predictors.add(numP); } } // read all the categorical predictors NodeList categoricalPs = table.getElementsByTagName("CategoricalPredictor"); for (int i = 0; i < categoricalPs.getLength(); i++) { Node cP = categoricalPs.item(i); if (cP.getNodeType() == Node.ELEMENT_NODE) { CategoricalPredictor catP = new CategoricalPredictor((Element)cP, miningSchema); m_predictors.add(catP); } } // read all the PredictorTerms NodeList predictorTerms = table.getElementsByTagName("PredictorTerm"); for (int i = 0; i < predictorTerms.getLength(); i++) { Node pT = predictorTerms.item(i); PredictorTerm predT = new PredictorTerm((Element)pT, miningSchema); m_predictorTerms.add(predT); } } public void predict(double[] preds, double[] input) { if (m_targetCategory == -1) { preds[0] = m_intercept; } else { preds[m_targetCategory] = m_intercept; } // add the predictors for (int i = 0; i < m_predictors.size(); i++) { Predictor p = m_predictors.get(i); p.add(preds, input); } // add the PredictorTerms for (int i = 0; i < m_predictorTerms.size(); i++) { PredictorTerm pt = m_predictorTerms.get(i); pt.add(preds, input); } } } /** Description of the algorithm */ protected String m_algorithmName; /** The regression tables for this regression */ protected RegressionTable[] m_regressionTables; /** * Enum for the normalization methods. */ enum Normalization { NONE, SIMPLEMAX, SOFTMAX, LOGIT, PROBIT, CLOGLOG, EXP, LOGLOG, CAUCHIT} /** The normalization to use */ protected Normalization m_normalizationMethod = Normalization.NONE; /** * Constructs a new PMML Regression. * * @param model the <code>Element</code> containing the regression model * @param dataDictionary the data dictionary as an Instances object * @param miningSchema the mining schema * @throws Exception if there is a problem constructing this Regression */ public Regression(Element model, Instances dataDictionary, MiningSchema miningSchema) throws Exception { super(dataDictionary, miningSchema); int functionType = RegressionTable.REGRESSION; // determine function name first String fName = model.getAttribute("functionName"); if (fName.equals("regression")) { functionType = RegressionTable.REGRESSION; } else if (fName.equals("classification")) { functionType = RegressionTable.CLASSIFICATION; } else { throw new Exception("[PMML Regression] Function name not defined in pmml!"); } // do we have an algorithm name? String algName = model.getAttribute("algorithmName"); if (algName != null && algName.length() > 0) { m_algorithmName = algName; } // determine normalization method (if any) m_normalizationMethod = determineNormalization(model); setUpRegressionTables(model, functionType); // convert any string attributes in the mining schema //miningSchema.convertStringAttsToNominal(); } /** * Create all the RegressionTables for this model. * * @param model the <code>Element</code> holding this regression model * @param functionType the type of function (regression or * classification) * @throws Exception if there is a problem setting up the regression * tables */ private void setUpRegressionTables(Element model, int functionType) throws Exception { NodeList tableList = model.getElementsByTagName("RegressionTable"); if (tableList.getLength() == 0) { throw new Exception("[Regression] no regression tables defined!"); } m_regressionTables = new RegressionTable[tableList.getLength()]; for (int i = 0; i < tableList.getLength(); i++) { Node table = tableList.item(i); if (table.getNodeType() == Node.ELEMENT_NODE) { RegressionTable tempRTable = new RegressionTable((Element)table, functionType, m_miningSchema); m_regressionTables[i] = tempRTable; } } } /** * Return the type of normalization used for this regression * * @param model the <code>Element</code> holding the model * @return the normalization used in this regression */ private static Normalization determineNormalization(Element model) { Normalization normMethod = Normalization.NONE; String normName = model.getAttribute("normalizationMethod"); if (normName.equals("simplemax")) { normMethod = Normalization.SIMPLEMAX; } else if (normName.equals("softmax")) { normMethod = Normalization.SOFTMAX; } else if (normName.equals("logit")) { normMethod = Normalization.LOGIT; } else if (normName.equals("probit")) { normMethod = Normalization.PROBIT; } else if (normName.equals("cloglog")) { normMethod = Normalization.CLOGLOG; } else if (normName.equals("exp")) { normMethod = Normalization.EXP; } else if (normName.equals("loglog")) { normMethod = Normalization.LOGLOG; } else if (normName.equals("cauchit")) { normMethod = Normalization.CAUCHIT; } return normMethod; } /** * Return a textual description of this Regression model. */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("PMML version " + getPMMLVersion()); if (!getCreatorApplication().equals("?")) { temp.append("\nApplication: " + getCreatorApplication()); } if (m_algorithmName != null) { temp.append("\nPMML Model: " + m_algorithmName); } temp.append("\n\n"); temp.append(m_miningSchema); for (RegressionTable table : m_regressionTables) { temp.append(table); } if (m_normalizationMethod != Normalization.NONE) { temp.append("Normalization: " + m_normalizationMethod); } temp.append("\n"); return temp.toString(); } /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. * * @param inst the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double[] distributionForInstance(Instance inst) throws Exception { if (!m_initialized) { mapToMiningSchema(inst.dataset()); } double[] preds = null; if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds = new double[1]; } else { preds = new double[m_miningSchema.getFieldsAsInstances().classAttribute().numValues()]; } // create an array of doubles that holds values from the incoming // instance; in order of the fields in the mining schema. We will // also handle missing values and outliers here. // System.err.println(inst); double[] incoming = m_fieldsMap.instanceToSchema(inst, m_miningSchema); // scan for missing values. If there are still missing values after instanceToSchema(), // then missing value handling has been deferred to the PMML scheme. The specification // (Regression PMML 3.2) seems to contradict itself with regards to classification and categorical // variables. In one place it states that if a categorical variable is missing then // variable_name=value is 0 for any value. Further down in the document it states: "if // one or more of the y_j cannot be evaluated because the value in one of the referenced // fields is missing, then the following formulas (for computing p_j) do not apply. In // that case the predictions are defined by the priorProbability values in the Target // element". // In this implementation we will default to information in the Target element (default // value for numeric prediction and prior probabilities for classification). If there is // no Target element defined, then an Exception is thrown. boolean hasMissing = false; for (int i = 0; i < incoming.length; i++) { if (i != m_miningSchema.getFieldsAsInstances().classIndex() && Utils.isMissingValue(incoming[i])) { hasMissing = true; break; } } if (hasMissing) { if (!m_miningSchema.hasTargetMetaData()) { String message = "[Regression] WARNING: Instance to predict has missing value(s) but " + "there is no missing value handling meta data and no " + "prior probabilities/default value to fall back to. No " + "prediction will be made (" + ((m_miningSchema.getFieldsAsInstances().classAttribute().isNominal() || m_miningSchema.getFieldsAsInstances().classAttribute().isString()) ? "zero probabilities output)." : "NaN output)."); if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = Utils.missingValue(); } return preds; } else { // use prior probablilities/default value TargetMetaInfo targetData = m_miningSchema.getTargetMetaData(); if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = targetData.getDefaultValue(); } else { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); for (int i = 0; i < miningSchemaI.classAttribute().numValues(); i++) { preds[i] = targetData.getPriorProbability(miningSchemaI.classAttribute().value(i)); } } return preds; } } else { // loop through the RegressionTables for (int i = 0; i < m_regressionTables.length; i++) { m_regressionTables[i].predict(preds, incoming); } // Now apply the normalization switch (m_normalizationMethod) { case NONE: // nothing to be done break; case SIMPLEMAX: Utils.normalize(preds); break; case SOFTMAX: for (int i = 0; i < preds.length; i++) { preds[i] = Math.exp(preds[i]); } if (preds.length == 1) { // hack for those models that do binary logistic regression as // a numeric prediction model preds[0] = preds[0] / (preds[0] + 1.0); } else { Utils.normalize(preds); } break; case LOGIT: for (int i = 0; i < preds.length; i++) { preds[i] = 1.0 / (1.0 + Math.exp(-preds[i])); } Utils.normalize(preds); break; case PROBIT: for (int i = 0; i < preds.length; i++) { preds[i] = weka.core.matrix.Maths.pnorm(preds[i]); } Utils.normalize(preds); break; case CLOGLOG: // note this is supposed to be illegal for regression for (int i = 0; i < preds.length; i++) { preds[i] = 1.0 - Math.exp(-Math.exp(-preds[i])); } Utils.normalize(preds); break; case EXP: for (int i = 0; i < preds.length; i++) { preds[i] = Math.exp(preds[i]); } Utils.normalize(preds); break; case LOGLOG: // note this is supposed to be illegal for regression for (int i = 0; i < preds.length; i++) { preds[i] = Math.exp(-Math.exp(-preds[i])); } Utils.normalize(preds); break; case CAUCHIT: for (int i = 0; i < preds.length; i++) { preds[i] = 0.5 + (1.0 / Math.PI) * Math.atan(preds[i]); } Utils.normalize(preds); break; default: throw new Exception("[Regression] unknown normalization method"); } // If there is a Target defined, and this is a numeric prediction problem, // then apply any min, max, rescaling etc. if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric() && m_miningSchema.hasTargetMetaData()) { TargetMetaInfo targetData = m_miningSchema.getTargetMetaData(); preds[0] = targetData.applyMinMaxRescaleCast(preds[0]); } } return preds; } /* (non-Javadoc) * @see weka.core.RevisionHandler#getRevision() */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/consumer/RuleSetModel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RuleSetModel.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.consumer; import java.io.Serializable; import java.util.ArrayList; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.classifiers.pmml.consumer.TreeModel.MiningFunction; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.pmml.MiningSchema; /** * Class implementing import of PMML RuleSetModel. Can be used as a Weka * classifier for prediction only (buildClassifier() raises an Exception). * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class RuleSetModel extends PMMLClassifier { /** For serialization */ private static final long serialVersionUID = 1993161168811020547L; /** * Abstract inner base class for Rules */ static abstract class Rule implements Serializable { /** For serialization */ private static final long serialVersionUID = 6236231263477446102L; /** The predicate for this rule */ protected TreeModel.Predicate m_predicate; public Rule(Element ruleE, MiningSchema miningSchema) throws Exception { // Set up the predicate m_predicate = TreeModel.Predicate.getPredicate(ruleE, miningSchema); } /** * Collect the rule(s) that fire for the supplied incoming instance * * @param input a vector of independent and derived independent variables * @param ruleCollection the array list to add any firing rules into */ public abstract void fires(double[] input, ArrayList<SimpleRule> ruleCollection); /** * Get a textual description of this Rule * * @param prefix prefix string (typically some number of spaces) to prepend * @param indent the number of additional spaces to add to the prefix * @return a description of this Rule as a String */ public abstract String toString(String prefix, int indent); } /** * Inner class for representing simple rules */ static class SimpleRule extends Rule { /** For serialization */ private static final long serialVersionUID = -2612893679476049682L; /** The ID for the rule (optional) */ protected String m_ID; /** The predicted value when the rule fires (required) */ protected String m_scoreString; /** * The predicted value as a number (regression) or index (classification) * when the rule fires (required) */ protected double m_score = Utils.missingValue(); /** The number of training/test instances on which the rule fired (optional) */ protected double m_recordCount = Utils.missingValue(); /** * The number of training/test instances on which the rule fired and the * prediction was correct (optional) */ protected double m_nbCorrect = Utils.missingValue(); /** The confidence of the rule (optional) */ protected double m_confidence = Utils.missingValue(); /** The score distributions for this rule (if any) */ protected ArrayList<TreeModel.ScoreDistribution> m_scoreDistributions = new ArrayList<TreeModel.ScoreDistribution>(); /** * The relative importance of the rule. May or may not be equal to the * confidence (optional). */ protected double m_weight = Utils.missingValue(); public String toString(String prefix, int indent) { StringBuffer temp = new StringBuffer(); for (int i = 0; i < indent; i++) { prefix += " "; } temp.append(prefix + "Simple rule: " + m_predicate + "\n"); temp.append(prefix + " => " + m_scoreString + "\n"); if (!Utils.isMissingValue(m_recordCount)) { temp.append(prefix + " recordCount: " + m_recordCount + "\n"); } if (!Utils.isMissingValue(m_nbCorrect)) { temp.append(prefix + " nbCorrect: " + m_nbCorrect + "\n"); } if (!Utils.isMissingValue(m_confidence)) { temp.append(prefix + " confidence: " + m_confidence + "\n"); } if (!Utils.isMissingValue(m_weight)) { temp.append(prefix + " weight: " + m_weight + "\n"); } return temp.toString(); } public String toString() { return toString("", 0); } /** * Constructor for a simple rule * * @param ruleE the XML element holding the simple rule * @param miningSchema the mining schema to use * @throws Exception if something goes wrong */ public SimpleRule(Element ruleE, MiningSchema miningSchema) throws Exception { super(ruleE, miningSchema); String id = ruleE.getAttribute("id"); if (id != null && id.length() > 0) { m_ID = id; } m_scoreString = ruleE.getAttribute("score"); Attribute classAtt = miningSchema.getFieldsAsInstances().classAttribute(); if (classAtt.isNumeric()) { m_score = Double.parseDouble(m_scoreString); } else { if (classAtt.indexOfValue(m_scoreString) < 0) { throw new Exception("[SimpleRule] class value " + m_scoreString + "does not exist in class attribute " + classAtt.name()); } m_score = classAtt.indexOfValue(m_scoreString); } String recordCount = ruleE.getAttribute("recordCount"); if (recordCount != null && recordCount.length() > 0) { m_recordCount = Double.parseDouble(recordCount); } String nbCorrect = ruleE.getAttribute("nbCorrect"); if (nbCorrect != null && nbCorrect.length() > 0) { m_nbCorrect = Double.parseDouble(nbCorrect); } String confidence = ruleE.getAttribute("confidence"); if (confidence != null && confidence.length() > 0) { m_confidence = Double.parseDouble(confidence); } String weight = ruleE.getAttribute("weight"); if (weight != null && weight.length() > 0) { m_weight = Double.parseDouble(weight); } // get the ScoreDistributions (if any) if (miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { // see if we have any ScoreDistribution entries NodeList scoreChildren = ruleE.getChildNodes(); for (int i = 0; i < scoreChildren.getLength(); i++) { Node child = scoreChildren.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element)child).getTagName(); if (tagName.equals("ScoreDistribution")) { TreeModel.ScoreDistribution newDist = new TreeModel.ScoreDistribution((Element)child, miningSchema, m_recordCount); m_scoreDistributions.add(newDist); } } } // check that we have as many score distribution elements as there // are class labels in the data if (m_scoreDistributions.size() > 0 && m_scoreDistributions.size() != miningSchema.getFieldsAsInstances().classAttribute().numValues()) { throw new Exception("[SimpleRule] Number of score distribution elements is " + " different than the number of class labels!"); } //backfit the confidence values (if necessary) if (Utils.isMissingValue(m_recordCount)) { double baseCount = 0; for (TreeModel.ScoreDistribution s : m_scoreDistributions) { baseCount += s.getRecordCount(); } for (TreeModel.ScoreDistribution s : m_scoreDistributions) { s.deriveConfidenceValue(baseCount); } } } } /** * Collect the rule(s) that fire for the supplied incoming instance * * @param input a vector of independent and derived independent variables * @param ruleCollection the array list to add any firing rules into */ public void fires(double[] input, ArrayList<SimpleRule> ruleCollection) { if (m_predicate.evaluate(input) == TreeModel.Predicate.Eval.TRUE) { ruleCollection.add(this); } } /** * Score the incoming instance * * @param instance a vector containing the incoming independent and * derived independent variables * @param classAtt the class attribute * @param rsm the rule selection method (ignored by simple rules) * @return a probability distribution over the class labels or * the predicted value (in element zero of the array if the class is numeric) * @throws Exception if something goes wrong */ public double[] score(double[] instance, Attribute classAtt) throws Exception { double[] preds; if (classAtt.isNumeric()) { preds = new double[1]; preds[0] = m_score; } else { preds = new double[classAtt.numValues()]; if (m_scoreDistributions.size() > 0) { for (TreeModel.ScoreDistribution s : m_scoreDistributions) { preds[s.getClassLabelIndex()] = s.getConfidence(); } } else if (!Utils.isMissingValue(m_confidence)) { preds[classAtt.indexOfValue(m_scoreString)] = m_confidence; } else { preds[classAtt.indexOfValue(m_scoreString)] = 1.0; } } return preds; } /** * Get the weight of the rule * * @return the weight of the rule */ public double getWeight() { return m_weight; } /** * Get the ID of the rule * * @return the ID of the rule */ public String getID() { return m_ID; } /** * Get the predicted value of this rule (either a number * for regression problems or an index of a class label for * classification problems) * * @return the predicted value of this rule */ public double getScore() { return m_score; } } /** * Inner class representing a compound rule */ static class CompoundRule extends Rule { /** For serialization */ private static final long serialVersionUID = -2853658811459970718L; /** The child rules of this compound rule */ ArrayList<Rule> m_childRules = new ArrayList<Rule>(); public String toString(String prefix, int indent) { StringBuffer temp = new StringBuffer(); for (int i = 0; i < indent; i++) { prefix += " "; } temp.append(prefix + "Compound rule: " + m_predicate + "\n"); for (Rule r : m_childRules) { temp.append(r.toString(prefix, indent + 1)); } return temp.toString(); } public String toString() { return toString("", 0); } /** * Constructor. * * @param ruleE XML node holding the rule * @param miningSchema the mining schema to use * @throws Exception if something goes wrong */ public CompoundRule(Element ruleE, MiningSchema miningSchema) throws Exception { // get the Predicate super(ruleE, miningSchema); // get the nested rules NodeList ruleChildren = ruleE.getChildNodes(); for (int i = 0; i < ruleChildren.getLength(); i++) { Node child = ruleChildren.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element)child).getTagName(); if (tagName.equals("SimpleRule")) { Rule childRule = new SimpleRule(((Element)child), miningSchema); m_childRules.add(childRule); } else if (tagName.equals("CompoundRule")) { Rule childRule = new CompoundRule(((Element)child), miningSchema); m_childRules.add(childRule); } } } } /** * Collect the rule(s) that fire for the supplied incoming instance * * @param input a vector of independent and derived independent variables * @param ruleCollection the array list to add any firing rules into */ public void fires(double[] input, ArrayList<SimpleRule> ruleCollection) { // evaluate our predicate first if (m_predicate.evaluate(input) == TreeModel.Predicate.Eval.TRUE) { // now check the child rules for (Rule r : m_childRules) { r.fires(input, ruleCollection); } } } } /** * Inner class representing a set of rules */ static class RuleSet implements Serializable { /** For serialization */ private static final long serialVersionUID = -8718126887943074376L; enum RuleSelectionMethod { WEIGHTEDSUM("weightedSum"), WEIGHTEDMAX("weightedMax"), FIRSTHIT("firstHit"); private final String m_stringVal; RuleSelectionMethod(String name) { m_stringVal = name; } public String toString() { return m_stringVal; } } /** * The number of training/test cases to which the ruleset was * applied to generate support and confidence measures for individual * rules (optional) */ private double m_recordCount = Utils.missingValue(); /** * The number of training/test cases for which the default * score is correct (optional) */ private double m_nbCorrect = Utils.missingValue(); /** * The default value to predict when no rule in the * ruleset fires (as a String; optional) * */ private String m_defaultScore; /** * The default value to predict (either a real value or an * index) * */ private double m_defaultPrediction = Utils.missingValue(); /** * The default distribution to predict when no rule in the * ruleset fires (nominal class only, optional) */ private ArrayList<TreeModel.ScoreDistribution> m_scoreDistributions = new ArrayList<TreeModel.ScoreDistribution>(); /** * The default confidence value to return along with a score * when no rules in the set fire (optional) */ private double m_defaultConfidence = Utils.missingValue(); /** The active rule selection method */ private RuleSelectionMethod m_currentMethod; /** The selection of rule selection methods allowed */ private ArrayList<RuleSelectionMethod> m_availableRuleSelectionMethods = new ArrayList<RuleSelectionMethod>(); /** The rules contained in the rule set */ private ArrayList<Rule> m_rules = new ArrayList<Rule>(); /* (non-Javadoc) * @see java.lang.Object#toString() */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("Rule selection method: " + m_currentMethod + "\n"); if (m_defaultScore != null) { temp.append("Default prediction: " + m_defaultScore + "\n"); if (!Utils.isMissingValue(m_recordCount)) { temp.append(" recordCount: " + m_recordCount + "\n"); } if (!Utils.isMissingValue(m_nbCorrect)) { temp.append(" nbCorrect: " + m_nbCorrect + "\n"); } if (!Utils.isMissingValue(m_defaultConfidence)) { temp.append(" defaultConfidence: " + m_defaultConfidence + "\n"); } temp.append("\n"); } for (Rule r : m_rules) { temp.append(r + "\n"); } return temp.toString(); } /** * Constructor for a RuleSet. * * @param ruleSetNode the XML node holding the RuleSet * @param miningSchema the mining schema to use * @throws Exception if something goes wrong */ public RuleSet(Element ruleSetNode, MiningSchema miningSchema) throws Exception { String recordCount = ruleSetNode.getAttribute("recordCount"); if (recordCount != null && recordCount.length() > 0) { m_recordCount = Double.parseDouble(recordCount); } String nbCorrect = ruleSetNode.getAttribute("nbCorrect"); if (nbCorrect != null & nbCorrect.length() > 0) { m_nbCorrect = Double.parseDouble(nbCorrect); } String defaultScore = ruleSetNode.getAttribute("defaultScore"); if (defaultScore != null && defaultScore.length() > 0) { m_defaultScore = defaultScore; Attribute classAtt = miningSchema.getFieldsAsInstances().classAttribute(); if (classAtt == null) { throw new Exception("[RuleSet] class attribute not set!"); } if (classAtt.isNumeric()) { m_defaultPrediction = Double.parseDouble(defaultScore); } else { if (classAtt.indexOfValue(defaultScore) < 0) { throw new Exception("[RuleSet] class value " + defaultScore + " not found!"); } m_defaultPrediction = classAtt.indexOfValue(defaultScore); } } String defaultConfidence = ruleSetNode.getAttribute("defaultConfidence"); if (defaultConfidence != null && defaultConfidence.length() > 0) { m_defaultConfidence = Double.parseDouble(defaultConfidence); } // get the rule selection methods NodeList selectionNL = ruleSetNode.getElementsByTagName("RuleSelectionMethod"); for (int i = 0; i < selectionNL.getLength(); i++) { Node selectN = selectionNL.item(i); if (selectN.getNodeType() == Node.ELEMENT_NODE) { Element sN = (Element)selectN; String criterion = sN.getAttribute("criterion"); for (RuleSelectionMethod m : RuleSelectionMethod.values()) { if (m.toString().equals(criterion)) { m_availableRuleSelectionMethods.add(m); if (i == 0) { // set the default (first specified one) m_currentMethod = m; } } } } } if (miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { // see if we have any ScoreDistribution entries NodeList scoreChildren = ruleSetNode.getChildNodes(); for (int i = 0; i < scoreChildren.getLength(); i++) { Node child = scoreChildren.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element)child).getTagName(); if (tagName.equals("ScoreDistribution")) { TreeModel.ScoreDistribution newDist = new TreeModel.ScoreDistribution((Element)child, miningSchema, m_recordCount); m_scoreDistributions.add(newDist); } } } //backfit the confidence values (if necessary) if (Utils.isMissingValue(m_recordCount)) { double baseCount = 0; for (TreeModel.ScoreDistribution s : m_scoreDistributions) { baseCount += s.getRecordCount(); } for (TreeModel.ScoreDistribution s : m_scoreDistributions) { s.deriveConfidenceValue(baseCount); } } } // Get the rules in this rule set NodeList ruleChildren = ruleSetNode.getChildNodes(); for (int i = 0; i < ruleChildren.getLength(); i++) { Node child = ruleChildren.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element)child).getTagName(); if (tagName.equals("SimpleRule")) { Rule tempRule = new SimpleRule(((Element)child), miningSchema); m_rules.add(tempRule); } else if (tagName.equals("CompoundRule")) { Rule tempRule = new CompoundRule(((Element)child), miningSchema); m_rules.add(tempRule); } } } } /** * Score an incoming instance by collecting all rules that fire. * * @param instance a vector of incoming attribte and derived field values * @param classAtt the class attribute * @return a predicted probability distribution * @throws Exception is something goes wrong */ protected double[] score(double[] instance, Attribute classAtt) throws Exception { double[] preds = null; if (classAtt.isNumeric()) { preds = new double[1]; } else { preds = new double[classAtt.numValues()]; } // holds the rules that fire for this test case ArrayList<SimpleRule> firingRules = new ArrayList<SimpleRule>(); for (Rule r : m_rules) { r.fires(instance, firingRules); } if (firingRules.size() > 0) { if (m_currentMethod == RuleSelectionMethod.FIRSTHIT) { preds = firingRules.get(0).score(instance, classAtt); } else if (m_currentMethod == RuleSelectionMethod.WEIGHTEDMAX) { double wMax = Double.NEGATIVE_INFINITY; SimpleRule best = null; for (SimpleRule s : firingRules) { if (Utils.isMissingValue(s.getWeight())) { throw new Exception("[RuleSet] Scoring criterion is WEIGHTEDMAX, but " + "rule " + s.getID() + " does not have a weight defined!"); } if (s.getWeight() > wMax) { wMax = s.getWeight(); best = s; } } if (best == null) { throw new Exception("[RuleSet] Unable to determine the best rule under " + "the WEIGHTEDMAX criterion!"); } preds = best.score(instance, classAtt); } else if (m_currentMethod == RuleSelectionMethod.WEIGHTEDSUM) { double sumOfWeights = 0; for (SimpleRule s : firingRules) { if (Utils.isMissingValue(s.getWeight())) { throw new Exception("[RuleSet] Scoring criterion is WEIGHTEDSUM, but " + "rule " + s.getID() + " does not have a weight defined!"); } if (classAtt.isNumeric()) { sumOfWeights += s.getWeight(); preds[0] += (s.getScore() * s.getWeight()); } else { preds[(int)s.getScore()] += s.getWeight(); } } if (classAtt.isNumeric()) { if (sumOfWeights == 0) { throw new Exception("[RuleSet] Sum of weights is zero!"); } preds[0] /= sumOfWeights; } else { // array gets normalized in the distributionForInstance() method } } } else { // default prediction if (classAtt.isNumeric()) { preds[0] = m_defaultPrediction; } else { if (m_scoreDistributions.size() > 0) { for (TreeModel.ScoreDistribution s : m_scoreDistributions) { preds[s.getClassLabelIndex()] = s.getConfidence(); } } else if (!Utils.isMissingValue(m_defaultConfidence)) { preds[(int)m_defaultPrediction] = m_defaultConfidence; } else { preds[(int)m_defaultPrediction] = 1.0; } } } return preds; } } /** The mining function */ protected MiningFunction m_functionType = MiningFunction.CLASSIFICATION; /** The model name (if defined) */ protected String m_modelName; /** The algorithm name (if defined) */ protected String m_algorithmName; /** The set of rules */ protected RuleSet m_ruleSet; /** * Constructor for a RuleSetModel * * @param model the XML element encapsulating the RuleSetModel * @param dataDictionary the data dictionary to use * @param miningSchema the mining schema to use * @throws Exception if something goes wrong */ public RuleSetModel(Element model, Instances dataDictionary, MiningSchema miningSchema) throws Exception { super(dataDictionary, miningSchema); if (!getPMMLVersion().equals("3.2")) { // TODO: might have to throw an exception and only support 3.2 } String fn = model.getAttribute("functionName"); if (fn.equals("regression")) { m_functionType = MiningFunction.REGRESSION; } String modelName = model.getAttribute("modelName"); if (modelName != null && modelName.length() > 0) { m_modelName = modelName; } String algoName = model.getAttribute("algorithmName"); if (algoName != null && algoName.length() > 0) { m_algorithmName = algoName; } NodeList ruleset = model.getElementsByTagName("RuleSet"); if (ruleset.getLength() == 1) { Node ruleSetNode = ruleset.item(0); if (ruleSetNode.getNodeType() == Node.ELEMENT_NODE) { m_ruleSet = new RuleSet((Element)ruleSetNode, miningSchema); } } else { throw new Exception ("[RuleSetModel] Should only have a single RuleSet!"); } } /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. * * @param inst the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double[] distributionForInstance(Instance inst) throws Exception { if (!m_initialized) { mapToMiningSchema(inst.dataset()); } double[] preds = null; if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds = new double[1]; } else { preds = new double[m_miningSchema.getFieldsAsInstances().classAttribute().numValues()]; } double[] incoming = m_fieldsMap.instanceToSchema(inst, m_miningSchema); preds = m_ruleSet.score(incoming, m_miningSchema.getFieldsAsInstances().classAttribute()); if (m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { Utils.normalize(preds); } return preds; } /** * Return a textual description of this model. * * @return a textual description of this model */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("PMML version " + getPMMLVersion()); if (!getCreatorApplication().equals("?")) { temp.append("\nApplication: " + getCreatorApplication()); } temp.append("\nPMML Model: RuleSetModel"); temp.append("\n\n"); temp.append(m_miningSchema); if (m_algorithmName != null) { temp.append("\nAlgorithm: " + m_algorithmName + "\n"); } temp.append(m_ruleSet); return temp.toString(); } /** * Get the revision string for this class * * @return the revision string */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/consumer/SupportVectorMachineModel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SupportVectorMachineModel.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.consumer; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.classifiers.pmml.consumer.NeuralNetwork.MiningFunction; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.pmml.MiningSchema; import weka.core.pmml.TargetMetaInfo; import weka.core.pmml.VectorDictionary; import weka.core.pmml.VectorInstance; import weka.gui.Logger; /** * Implements a PMML SupportVectorMachineModel * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class SupportVectorMachineModel extends PMMLClassifier implements Serializable { /** For serialization */ private static final long serialVersionUID = 6225095165118374296L; /** * Abstract base class for kernels */ static abstract class Kernel implements Serializable { /** The log object to use */ protected Logger m_log = null; protected Kernel(Logger log) { m_log = log; } /** For serialization */ private static final long serialVersionUID = -6696443459968934767L; /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector instance * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public abstract double evaluate(VectorInstance x, VectorInstance y) throws Exception; /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector (as an array of values) * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public abstract double evaluate(VectorInstance x, double[] y) throws Exception; /** * Factory method returning a new Kernel constructed from * the supplied XML element * * @param svmMachineModelElement the XML element containing the kernel * @param log the logging object to use * @return a new Kernel * @throws Exception if something goes wrong */ public static Kernel getKernel(Element svmMachineModelElement, Logger log) throws Exception { NodeList kList = svmMachineModelElement.getElementsByTagName("LinearKernelType"); if (kList.getLength() > 0) { return new LinearKernel(log); } kList = svmMachineModelElement.getElementsByTagName("PolynomialKernelType"); if (kList.getLength() > 0) { return new PolynomialKernel((Element)kList.item(0), log); } kList = svmMachineModelElement.getElementsByTagName("RadialBasisKernelType"); if (kList.getLength() > 0) { return new RadialBasisKernel((Element)kList.item(0), log); } kList = svmMachineModelElement.getElementsByTagName("SigmoidKernelType"); if (kList.getLength() > 0) { return new SigmoidKernel((Element)kList.item(0), log); } throw new Exception("[Kernel] Can't find a kernel that I recognize!"); } } /** * Subclass of Kernel implementing a simple linear (dot product) kernel */ static class LinearKernel extends Kernel implements Serializable { public LinearKernel(Logger log) { super(log); } public LinearKernel() { super(null); } /** For serialization */ private static final long serialVersionUID = 8991716708484953837L; /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector instance * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, VectorInstance y) throws Exception { return x.dotProduct(y); } /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector (as an array of values) * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, double[] y) throws Exception { return x.dotProduct(y); } /** * Return a textual description of this kernel * * @return a string describing this kernel */ public String toString() { return "Linear kernel: K(x,y) = <x,y>"; } } /** * Subclass of Kernel implementing a polynomial kernel */ static class PolynomialKernel extends Kernel implements Serializable { /** For serialization */ private static final long serialVersionUID = -616176630397865281L; protected double m_gamma = 1; protected double m_coef0 = 1; protected double m_degree = 1; public PolynomialKernel(Element polyNode) { this(polyNode, null); } public PolynomialKernel(Element polyNode, Logger log) { super(log); String gammaString = polyNode.getAttribute("gamma"); if (gammaString != null && gammaString.length() > 0) { try { m_gamma = Double.parseDouble(gammaString); } catch (NumberFormatException e) { String message = "[PolynomialKernel] : WARNING, can't parse " + "gamma attribute. Using default value of 1."; if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } } } String coefString = polyNode.getAttribute("coef0"); if (coefString != null && coefString.length() > 0) { try { m_coef0 = Double.parseDouble(coefString); } catch (NumberFormatException e) { String message = "[PolynomialKernel] : WARNING, can't parse " + "coef0 attribute. Using default value of 1."; if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } } } String degreeString = polyNode.getAttribute("degree"); if (degreeString != null && degreeString.length() > 0) { try { m_degree = Double.parseDouble(degreeString); } catch (NumberFormatException e) { String message = "[PolynomialKernel] : WARNING, can't parse " + "degree attribute. Using default value of 1."; if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } } } } /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector instance * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, VectorInstance y) throws Exception { double dotProd = x.dotProduct(y); return Math.pow(m_gamma * dotProd + m_coef0, m_degree); } /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector (as an array of values) * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, double[] y) throws Exception { double dotProd = x.dotProduct(y); return Math.pow(m_gamma * dotProd + m_coef0, m_degree); } /** * Return a textual description of this kernel * * @return a string describing this kernel */ public String toString() { return "Polynomial kernel: K(x,y) = (" + m_gamma + " * <x,y> + " + m_coef0 +")^" + m_degree; } } /** * Subclass of Kernel implementing a radial basis function kernel */ static class RadialBasisKernel extends Kernel implements Serializable { /** For serialization */ private static final long serialVersionUID = -3834238621822239042L; protected double m_gamma = 1; public RadialBasisKernel(Element radialElement) { this(radialElement, null); } public RadialBasisKernel(Element radialElement, Logger log) { super(log); String gammaString = radialElement.getAttribute("gamma"); if (gammaString != null && gammaString.length() > 0) { try { m_gamma = Double.parseDouble(gammaString); } catch (NumberFormatException e) { String message = "[RadialBasisKernel] : WARNING, can't parse " + "gamma attribute. Using default value of 1."; if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } } } } /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector instance * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, VectorInstance y) throws Exception { VectorInstance diff = x.subtract(y); double result = -m_gamma * diff.dotProduct(diff); return Math.exp(result); } /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector (as an array of values) * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, double[] y) throws Exception { VectorInstance diff = x.subtract(y); // System.err.println("diff: " + diff.getValues()); double result = -m_gamma * diff.dotProduct(diff); // System.err.println("Result: " + result); return Math.exp(result); } /** * Return a textual description of this kernel * * @return a string describing this kernel */ public String toString() { return "Radial kernel: K(x,y) = exp(-" + m_gamma + " * ||x - y||^2)"; } } /** * Subclass of Kernel implementing a sigmoid function */ static class SigmoidKernel extends Kernel implements Serializable { /** For serialization */ private static final long serialVersionUID = 8713475894705750117L; protected double m_gamma = 1; protected double m_coef0 = 1; public SigmoidKernel(Element sigElement) { this(sigElement, null); } public SigmoidKernel(Element sigElement, Logger log) { super(log); String gammaString = sigElement.getAttribute("gamma"); if (gammaString != null && gammaString.length() > 0) { try { m_gamma = Double.parseDouble(gammaString); } catch (NumberFormatException e) { String message = "[SigmoidKernel] : WARNING, can't parse " + "gamma attribute. Using default value of 1."; if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } } } String coefString = sigElement.getAttribute("coef0"); if (coefString != null && coefString.length() > 0) { try { m_coef0 = Double.parseDouble(coefString); } catch (NumberFormatException e) { String message = "[SigmoidKernel] : WARNING, can't parse " + "coef0 attribute. Using default value of 1."; if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } } } } /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector instance * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, VectorInstance y) throws Exception { double dotProd = x.dotProduct(y); double z = m_gamma * dotProd + m_coef0; double a = Math.exp(z); double b = Math.exp(-z); return ((a - b) / (a + b)); } /** * Compute the result of the kernel evaluation on the supplied vectors * * @param x the first vector instance * @param y the second vector (as an array of values) * @return the result of the kernel evaluation * @throws Exception if something goes wrong */ public double evaluate(VectorInstance x, double[] y) throws Exception { double dotProd = x.dotProduct(y); double z = m_gamma * dotProd + m_coef0; double a = Math.exp(z); double b = Math.exp(-z); return ((a - b) / (a + b)); } /** * Return a textual description of this kernel * * @return a string describing this kernel */ public String toString() { return "Sigmoid kernel: K(x,y) = tanh(" + m_gamma + " * <x,y> + " + m_coef0 +")"; } } /** * Inner class implementing a single binary (classification) SVM */ static class SupportVectorMachine implements Serializable { /** For serialization */ private static final long serialVersionUID = -7650496802836815608L; /** The target label for classification problems */ protected String m_targetCategory; /** The index of the global alternate target label for classification */ protected int m_globalAlternateTargetCategoryIndex = -1; /** The index of the target label for classification problems */ protected int m_targetCategoryIndex = -1; /** PMML 4.0 - index of the alternate category for one-vs-one */ protected int m_localAlternateTargetCategoryIndex = -1; /** PMML 4.0 - local threshold (overrides the global one, if set) */ protected double m_localThreshold = Double.MAX_VALUE; // default - not set /** The mining schema */ protected MiningSchema m_miningSchema; /** The log object to use (if supplied) */ protected Logger m_log; /** * True if this is a linear machine expressed in terms of the original * attributes */ protected boolean m_coeffsOnly = false; /** The support vectors used by this machine (if not linear with coeffs only */ protected List<VectorInstance> m_supportVectors = new ArrayList<VectorInstance>(); /** The constant term - b */ protected double m_intercept = 0; /** The coefficients for the vectors */ protected double[] m_coefficients; /** * Computes the prediction from this support vector machine. For classification, * it fills in the appropriate element in the preds array with either a 0 or a 1. * If the output of the machine is < 0, then a 1 is entered into the array * element corresponding to this machine's targetCategory, otherwise a 0 is entered. * Note that this is different to the scoring procedure in the 3.2 spec (see the comments * in the source code of this method for more information about this). * * @param input the test instance to predict * @param kernel the kernel to use * @param vecDict the vector dictionary (null if the machine is linear and expressed * in terms of attribute weights) * @param preds the prediction array to fill in * @param cMethod the classification method to use (for classification problems) * @param globalThreshold the global threshold (used if there is no local threshold * for this machine) * @throws Exception if something goes wrong */ public void distributionForInstance(double[] input, Kernel kernel, VectorDictionary vecDict, double[] preds, classificationMethod cMethod, double globalThreshold) throws Exception { int targetIndex = 0; if (!m_coeffsOnly) { // get an array that only holds the values that are referenced // by the support vectors input = vecDict.incomingInstanceToVectorFieldVals(input); } if (m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { targetIndex = m_targetCategoryIndex; } double result = 0; for (int i = 0; i < m_coefficients.length; i++) { // System.err.println("X : " + m_supportVectors.get(i).getValues()); double val = 0; if (!m_coeffsOnly) { val = kernel.evaluate(m_supportVectors.get(i), input); } else { val = input[i]; } val *= m_coefficients[i]; // System.err.println("val " + val); result += val; } result += m_intercept; /* if (result < 0 && m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { System.err.println("[SupportVectorMachine] result (" + result + ") is less than zero" + " for a nominal class value!"); result = 0; } else if (result > 1 && m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { System.err.println("[SupportVectorMachine] result (" + result + ") is greater than one" + " for a nominal class value!"); result = 1; } System.err.println("result " + result); */ // TODO revisit this when I actually find out what is going on with // the Zementis model (the only easily available SVM model out there // at this time). if (cMethod == classificationMethod.NONE || m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { // ---------------------------------------------------------------------- // The PMML 3.2 spec states that the output of the machine should lie // between 0 and 1 for a binary classification case with 1 corresponding // to the machine's targetCategory and 0 corresponding to the // alternateBinaryTargetCategory (implying a threshold of 0.5). This seems kind of // non standard, and indeed, in the 4.0 spec, it has changed to output < threshold // corresponding to the machine's targetCategory and >= threshold corresponding // to the alternateBinaryTargetCategory. What has been implemented here is // the later with a default threshold of 0 (since the 3.2 spec doesn't have // a way to specify a threshold). The example SVM PMML model from Zementis, // which is PMML version 3.2, produces output between -1 and 1 (give or take). // Implementing the 3.2 scoring mechanism as described in the spec (and truncating // output at 0 and 1) results in all the predicted labels getting flipped on the data // used to construct the Zementis model! // // April 2010 - the Zementis guys have emailed me to say that their model // has been prepared for PMML 4.0, even though it states it is 3.2 in the // XML. // ---------------------------------------------------------------------- if (m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { if (result < 0) { preds[targetIndex] = 1; } else { preds[targetIndex] = 0; } } else { preds[targetIndex] = result; } } else { // PMML 4.0 if (cMethod == classificationMethod.ONE_AGAINST_ALL) { // smallest value output by a machine is the predicted class preds[targetIndex] = result; } else { // one-vs-one double threshold = (m_localThreshold < Double.MAX_VALUE) ? m_localThreshold : globalThreshold; // vote if (result < threshold) { preds[targetIndex]++; } else { int altCat = (m_localAlternateTargetCategoryIndex != -1) ? m_localAlternateTargetCategoryIndex : m_globalAlternateTargetCategoryIndex; preds[altCat]++; } } } // preds[targetIndex] = result; } /** * Constructs a new SupportVectorMachine from the supplied XML element * * @param machineElement the XML element containing the SVM * @param miningSchema the mining schema for the PMML model * @param dictionary the VectorDictionary from which to look up the support * vectors used by this machine (may be null if the machine is linear and * expressed in terms of attribute weights) * @param svmRep the representation of this SVM (uses support vectors or is linear * an uses attribute weights) * @param altCategoryInd the index of the global alternateBinaryTarget (if classification) * @param log the log object to use * @throws Exception if something goes wrong */ public SupportVectorMachine(Element machineElement, MiningSchema miningSchema, VectorDictionary dictionary, SVM_representation svmRep, int altCategoryInd, Logger log) throws Exception { m_miningSchema = miningSchema; m_log = log; String targetCat = machineElement.getAttribute("targetCategory"); if (targetCat != null && targetCat.length() > 0) { m_targetCategory = targetCat; Attribute classAtt = m_miningSchema.getFieldsAsInstances().classAttribute(); if (classAtt.isNominal()) { int index = classAtt.indexOfValue(m_targetCategory); if (index < 0) { throw new Exception("[SupportVectorMachine] : can't find target category: " + m_targetCategory + " in the class attribute!"); } m_targetCategoryIndex = index; // now check for the PMML 4.0 alternateTargetCategory String altTargetCat = machineElement.getAttribute("alternateTargetCategory"); if (altTargetCat != null && altTargetCat.length() > 0) { index = classAtt.indexOfValue(altTargetCat); if (index < 0) { throw new Exception("[SupportVectorMachine] : can't find alternate target category: " + altTargetCat + " in the class attribute!"); } m_localAlternateTargetCategoryIndex = index; } else { // set the global one m_globalAlternateTargetCategoryIndex = altCategoryInd; } } else { throw new Exception("[SupportVectorMachine] : target category supplied " + "but class attribute is numeric!"); } } else { if (m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { m_targetCategoryIndex = (altCategoryInd == 0) ? 1 : 0; m_globalAlternateTargetCategoryIndex = altCategoryInd; System.err.println("Setting target index for machine to " + m_targetCategoryIndex); } } if (svmRep == SVM_representation.SUPPORT_VECTORS) { // get the vectors NodeList vectorsL = machineElement.getElementsByTagName("SupportVectors"); if (vectorsL.getLength() > 0) { Element vectors = (Element)vectorsL.item(0); NodeList allTheVectorsL = vectors.getElementsByTagName("SupportVector"); for (int i = 0; i < allTheVectorsL.getLength(); i++) { Node vec = allTheVectorsL.item(i); String vecId = ((Element)vec).getAttribute("vectorId"); VectorInstance suppV = dictionary.getVector(vecId); if (suppV == null) { throw new Exception("[SupportVectorMachine] : can't find " + "vector with ID: " + vecId + " in the " + "vector dictionary!"); } m_supportVectors.add(suppV); } } } else { m_coeffsOnly = true; } // get the coefficients NodeList coefficientsL = machineElement.getElementsByTagName("Coefficients"); // should be just one list of coefficients if (coefficientsL.getLength() != 1) { throw new Exception("[SupportVectorMachine] Should be just one list of " + "coefficients per binary SVM!"); } Element cL = (Element)coefficientsL.item(0); String intercept = cL.getAttribute("absoluteValue"); if (intercept != null && intercept.length() > 0) { m_intercept = Double.parseDouble(intercept); } // now get the individual coefficient elements NodeList coeffL = cL.getElementsByTagName("Coefficient"); if (coeffL.getLength() == 0) { throw new Exception("[SupportVectorMachine] No coefficients defined!"); } m_coefficients = new double[coeffL.getLength()]; for (int i = 0; i < coeffL.getLength(); i++) { Element coeff = ((Element) coeffL.item(i)); String val = coeff.getAttribute("value"); m_coefficients[i] = Double.parseDouble(val); } } /** * Get a textual description of this binary SVM * * @return a description of this SVM as a string. */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("Binary SVM"); if (m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { temp.append(" (target category = " + m_targetCategory + ")"); if (m_localAlternateTargetCategoryIndex != -1) { temp.append("\n (alternate category = " + m_miningSchema.getFieldsAsInstances().classAttribute(). value(m_localAlternateTargetCategoryIndex) + ")"); } } temp.append("\n\n"); for (int i = 0; i < m_supportVectors.size(); i++) { temp.append("\n" + m_coefficients[i] + " * [" + m_supportVectors.get(i).getValues() + " * X]"); } if (m_intercept >= 0) { temp.append("\n +" + m_intercept); } else { temp.append("\n " + m_intercept); } return temp.toString(); } } static enum SVM_representation { SUPPORT_VECTORS, COEFFICIENTS; // for the inputs if machine is linear and expressed in terms of the attributes } static enum classificationMethod { NONE, // PMML 3.x ONE_AGAINST_ALL, // PMML 4.0 default ONE_AGAINST_ONE; } /** The mining function **/ protected MiningFunction m_functionType = MiningFunction.CLASSIFICATION; /** The classification method (PMML 4.0) */ protected classificationMethod m_classificationMethod = classificationMethod.NONE; // PMML 3.x (only handles binary problems) /** The model name (if defined) */ protected String m_modelName; /** The algorithm name (if defined) */ protected String m_algorithmName; /** The dictionary of support vectors */ protected VectorDictionary m_vectorDictionary; /** The kernel function to use */ protected Kernel m_kernel; /** The individual binary SVMs */ protected List<SupportVectorMachine> m_machines = new ArrayList<SupportVectorMachine>(); /** The other class index (in the case of a single binary SVM - PMML 3.2). */ protected int m_alternateBinaryTargetCategory = -1; /** Do we have support vectors, or just attribute coefficients for a linear machine? */ protected SVM_representation m_svmRepresentation = SVM_representation.SUPPORT_VECTORS; /** PMML 4.0 threshold value */ protected double m_threshold = 0; /** * Construct a new SupportVectorMachineModel encapsulating the information provided * in the PMML document. * * @param model the SVM element from the PMML document * @param dataDictionary the data dictionary * @param miningSchema the mining schema * @throws Exception if the model can't be constructed from the PMML */ public SupportVectorMachineModel(Element model, Instances dataDictionary, MiningSchema miningSchema) throws Exception { super(dataDictionary, miningSchema); if (!getPMMLVersion().equals("3.2")) { //TODO might have to throw an exception and only support 3.2 } String fn = model.getAttribute("functionName"); if (fn.equals("regression")) { m_functionType = MiningFunction.REGRESSION; } String modelName = model.getAttribute("modelName"); if (modelName != null && modelName.length() > 0) { m_modelName = modelName; } String algoName = model.getAttribute("algorithmName"); if (algoName != null && algoName.length() > 0) { m_algorithmName = algoName; } String svmRep = model.getAttribute("svmRepresentation"); if (svmRep != null && svmRep.length() > 0) { if (svmRep.equals("Coefficients")) { m_svmRepresentation = SVM_representation.COEFFICIENTS; } } String altTargetCat = model.getAttribute("alternateBinaryTargetCategory"); if (altTargetCat != null && altTargetCat.length() > 0) { int altTargetInd = m_miningSchema.getFieldsAsInstances().classAttribute().indexOfValue(altTargetCat); if (altTargetInd < 0) { throw new Exception("[SupportVectorMachineModel] can't find alternate " + "target value " + altTargetCat); } m_alternateBinaryTargetCategory = altTargetInd; } // PMML 4.0 String thresholdS = model.getAttribute("threshold"); if (thresholdS != null && thresholdS.length() > 0) { m_threshold = Double.parseDouble(thresholdS); } // PMML 4.0 if (getPMMLVersion().startsWith("4.")) { m_classificationMethod = classificationMethod.ONE_AGAINST_ALL; // default for PMML 4.0 } String classificationMethodS = model.getAttribute("classificationMethod"); if (classificationMethodS != null && classificationMethodS.length()> 0) { if (classificationMethodS.equals("OneAgainstOne")) { m_classificationMethod = classificationMethod.ONE_AGAINST_ONE; } } if (m_svmRepresentation == SVM_representation.SUPPORT_VECTORS) { m_vectorDictionary = VectorDictionary.getVectorDictionary(model, miningSchema); } m_kernel = Kernel.getKernel(model, m_log); if (m_svmRepresentation == SVM_representation.COEFFICIENTS && !(m_kernel instanceof LinearKernel)) { throw new Exception("[SupportVectorMachineModel] representation is " + "coefficients, but kernel is not linear!"); } // Get the individual machines NodeList machineL = model.getElementsByTagName("SupportVectorMachine"); if (machineL.getLength() == 0) { throw new Exception("[SupportVectorMachineModel] No binary SVMs" + " defined in model file!"); } for (int i = 0; i < machineL.getLength(); i++) { Node machine = machineL.item(i); SupportVectorMachine newMach = new SupportVectorMachine((Element)machine, m_miningSchema, m_vectorDictionary, m_svmRepresentation, m_alternateBinaryTargetCategory, m_log); m_machines.add(newMach); } } /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. * * @param inst the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double[] distributionForInstance(Instance inst) throws Exception { if (!m_initialized) { mapToMiningSchema(inst.dataset()); } double[] preds = null; if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds = new double[1]; } else { preds = new double[m_miningSchema.getFieldsAsInstances().classAttribute().numValues()]; for (int i = 0; i < preds.length; i++) { preds[i] = -1; // mark all entries as not calculated to begin with } } double[] incoming = m_fieldsMap.instanceToSchema(inst, m_miningSchema); boolean hasMissing = false; for (int i = 0; i < incoming.length; i++) { if (i != m_miningSchema.getFieldsAsInstances().classIndex() && Double.isNaN(incoming[i])) { hasMissing = true; //System.err.println("Missing value for att : " + m_miningSchema.getFieldsAsInstances().attribute(i).name()); break; } } if (hasMissing) { if (!m_miningSchema.hasTargetMetaData()) { String message = "[SupportVectorMachineModel] WARNING: Instance to predict has missing value(s) but " + "there is no missing value handling meta data and no " + "prior probabilities/default value to fall back to. No " + "prediction will be made (" + ((m_miningSchema.getFieldsAsInstances().classAttribute().isNominal() || m_miningSchema.getFieldsAsInstances().classAttribute().isString()) ? "zero probabilities output)." : "NaN output)."); if (m_log == null) { System.err.println(message); } else { m_log.logMessage(message); } if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = Utils.missingValue(); } return preds; } else { // use prior probablilities/default value TargetMetaInfo targetData = m_miningSchema.getTargetMetaData(); if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds[0] = targetData.getDefaultValue(); } else { Instances miningSchemaI = m_miningSchema.getFieldsAsInstances(); for (int i = 0; i < miningSchemaI.classAttribute().numValues(); i++) { preds[i] = targetData.getPriorProbability(miningSchemaI.classAttribute().value(i)); } } return preds; } } else { for (SupportVectorMachine m : m_machines) { m.distributionForInstance(incoming, m_kernel, m_vectorDictionary, preds, m_classificationMethod, m_threshold); } } if (m_classificationMethod != classificationMethod.NONE && m_miningSchema.getFieldsAsInstances().classAttribute().isNominal()) { // PMML 4.0 if (m_classificationMethod == classificationMethod.ONE_AGAINST_ALL) { // find the minimum value int minI = Utils.minIndex(preds); preds = new double[preds.length]; preds[minI] = 1.0; } else { // nothing to do for one-against-one - just normalize the // votes } } if (m_machines.size() == preds.length - 1) { double total = 0; int unset = -1; for (int i = 0; i < preds.length; i++) { if (preds[i] != -1) { total += preds[i]; } else { unset = i; } } if (total > 1.0) { throw new Exception("[SupportVectorMachineModel] total of probabilities" + " is greater than 1!"); } preds[unset] = 1.0 - total; } if (preds.length > 1) { Utils.normalize(preds); } return preds; } public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Get a textual description of this SupportVectorMachineModel * * @return a description of this SupportVectorMachineModel as a string */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("PMML version " + getPMMLVersion()); if (!getCreatorApplication().equals("?")) { temp.append("\nApplication: " + getCreatorApplication()); } temp.append("\nPMML Model: Support Vector Machine Model"); temp.append("\n\n"); temp.append(m_miningSchema); temp.append("Kernel: \n\t"); temp.append(m_kernel); temp.append("\n"); if (m_classificationMethod != classificationMethod.NONE) { temp.append("Multi-class classifcation using "); if (m_classificationMethod == classificationMethod.ONE_AGAINST_ALL) { temp.append("one-against-all"); } else { temp.append("one-against-one"); } temp.append("\n\n"); } for (SupportVectorMachine v : m_machines) { temp.append("\n" + v); } return temp.toString(); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/consumer/TreeModel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TreeModel.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.consumer; import java.io.Serializable; import java.util.ArrayList; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.core.Attribute; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.pmml.Array; import weka.core.pmml.MiningSchema; /** * Class implementing import of PMML TreeModel. Can be used as a Weka classifier * for prediction (buildClassifier() raises and Exception). * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$; */ public class TreeModel extends PMMLClassifier implements Drawable { /** * For serialization */ private static final long serialVersionUID = -2065158088298753129L; /** * Inner class representing the ScoreDistribution element */ static class ScoreDistribution implements Serializable { /** * For serialization */ private static final long serialVersionUID = -123506262094299933L; /** The class label for this distribution element */ private final String m_classLabel; /** The index of the class label */ private int m_classLabelIndex = -1; /** The count for this label */ private final double m_recordCount; /** The optional confidence value */ private double m_confidence = Utils.missingValue(); /** * Construct a ScoreDistribution entry * * @param scoreE the node containing the distribution * @param miningSchema the mining schema * @param baseCount the number of records at the node that owns this * distribution entry * @throws Exception if something goes wrong */ protected ScoreDistribution(Element scoreE, MiningSchema miningSchema, double baseCount) throws Exception { // get the label m_classLabel = scoreE.getAttribute("value"); Attribute classAtt = miningSchema.getFieldsAsInstances().classAttribute(); if (classAtt == null || classAtt.indexOfValue(m_classLabel) < 0) { throw new Exception( "[ScoreDistribution] class attribute not set or class value " + m_classLabel + " not found!"); } m_classLabelIndex = classAtt.indexOfValue(m_classLabel); // get the frequency String recordC = scoreE.getAttribute("recordCount"); m_recordCount = Double.parseDouble(recordC); // get the optional confidence String confidence = scoreE.getAttribute("confidence"); if (confidence != null && confidence.length() > 0) { m_confidence = Double.parseDouble(confidence); } else if (!Utils.isMissingValue(baseCount) && baseCount > 0) { m_confidence = m_recordCount / baseCount; } } /** * Backfit confidence value (does nothing if the confidence value is already * set). * * @param baseCount the total number of records (supplied either explicitly * from the node that owns this distribution entry or most likely * computed from summing the recordCounts of all the distribution * entries in the distribution that owns this entry). */ void deriveConfidenceValue(double baseCount) { if (Utils.isMissingValue(m_confidence) && !Utils.isMissingValue(baseCount) && baseCount > 0) { m_confidence = m_recordCount / baseCount; } } String getClassLabel() { return m_classLabel; } int getClassLabelIndex() { return m_classLabelIndex; } double getRecordCount() { return m_recordCount; } double getConfidence() { return m_confidence; } @Override public String toString() { return m_classLabel + ": " + m_recordCount + " (" + Utils.doubleToString(m_confidence, 2) + ") "; } } /** * Base class for Predicates */ static abstract class Predicate implements Serializable { /** * For serialization */ private static final long serialVersionUID = 1035344165452733887L; enum Eval { TRUE, FALSE, UNKNOWN; } /** * Evaluate this predicate. * * @param input the input vector of attribute and derived field values. * * @return the evaluation status of this predicate. */ abstract Eval evaluate(double[] input); protected String toString(int level, boolean cr) { return toString(level); } protected String toString(int level) { StringBuffer text = new StringBuffer(); for (int j = 0; j < level; j++) { text.append("| "); } return text.append(toString()).toString(); } static Eval booleanToEval(boolean missing, boolean result) { if (missing) { return Eval.UNKNOWN; } else if (result) { return Eval.TRUE; } else { return Eval.FALSE; } } /** * Factory method to return the appropriate predicate for a given node in * the tree. * * @param nodeE the XML node encapsulating the tree node. * @param miningSchema the mining schema in use * @return a Predicate * @throws Exception of something goes wrong. */ static Predicate getPredicate(Element nodeE, MiningSchema miningSchema) throws Exception { Predicate result = null; NodeList children = nodeE.getChildNodes(); for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element) child).getTagName(); if (tagName.equals("True")) { result = new True(); break; } else if (tagName.equals("False")) { result = new False(); break; } else if (tagName.equals("SimplePredicate")) { result = new SimplePredicate((Element) child, miningSchema); break; } else if (tagName.equals("CompoundPredicate")) { result = new CompoundPredicate((Element) child, miningSchema); break; } else if (tagName.equals("SimpleSetPredicate")) { result = new SimpleSetPredicate((Element) child, miningSchema); break; } } } if (result == null) { throw new Exception( "[Predicate] unknown or missing predicate type in node"); } return result; } } /** * Simple True Predicate */ static class True extends Predicate { /** * For serialization */ private static final long serialVersionUID = 1817942234610531627L; @Override public Predicate.Eval evaluate(double[] input) { return Predicate.Eval.TRUE; } @Override public String toString() { return "True: "; } } /** * Simple False Predicate */ static class False extends Predicate { /** * For serialization */ private static final long serialVersionUID = -3647261386442860365L; @Override public Predicate.Eval evaluate(double[] input) { return Predicate.Eval.FALSE; } @Override public String toString() { return "False: "; } } /** * Class representing the SimplePredicate */ static class SimplePredicate extends Predicate { /** * For serialization */ private static final long serialVersionUID = -6156684285069327400L; enum Operator { EQUAL("equal") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), weka.core.Utils.eq(input[fieldIndex], value)); } @Override String shortName() { return "=="; } }, NOTEQUAL("notEqual") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), (input[fieldIndex] != value)); } @Override String shortName() { return "!="; } }, LESSTHAN("lessThan") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), (input[fieldIndex] < value)); } @Override String shortName() { return "<"; } }, LESSOREQUAL("lessOrEqual") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), (input[fieldIndex] <= value)); } @Override String shortName() { return "<="; } }, GREATERTHAN("greaterThan") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), (input[fieldIndex] > value)); } @Override String shortName() { return ">"; } }, GREATEROREQUAL("greaterOrEqual") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), (input[fieldIndex] >= value)); } @Override String shortName() { return ">="; } }, ISMISSING("isMissing") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval(false, Utils.isMissingValue(input[fieldIndex])); } @Override String shortName() { return toString(); } }, ISNOTMISSING("isNotMissing") { @Override Predicate.Eval evaluate(double[] input, double value, int fieldIndex) { return Predicate.booleanToEval(false, !Utils.isMissingValue(input[fieldIndex])); } @Override String shortName() { return toString(); } }; abstract Predicate.Eval evaluate(double[] input, double value, int fieldIndex); abstract String shortName(); private final String m_stringVal; Operator(String name) { m_stringVal = name; } @Override public String toString() { return m_stringVal; } } /** the field that we are comparing against */ int m_fieldIndex = -1; /** the name of the field */ String m_fieldName; /** true if the field is nominal */ boolean m_isNominal; /** the value as a string (if nominal) */ String m_nominalValue; /** * the value to compare against (if nominal it holds the index of the value) */ double m_value; /** the operator to use */ Operator m_operator; public SimplePredicate(Element simpleP, MiningSchema miningSchema) throws Exception { Instances totalStructure = miningSchema.getFieldsAsInstances(); // get the field name and set up the index String fieldS = simpleP.getAttribute("field"); Attribute att = totalStructure.attribute(fieldS); if (att == null) { throw new Exception("[SimplePredicate] unable to find field " + fieldS + " in the incoming instance structure!"); } // find the index int index = -1; for (int i = 0; i < totalStructure.numAttributes(); i++) { if (totalStructure.attribute(i).name().equals(fieldS)) { index = i; m_fieldName = totalStructure.attribute(i).name(); break; } } m_fieldIndex = index; if (att.isNominal()) { m_isNominal = true; } // get the operator String oppS = simpleP.getAttribute("operator"); for (Operator o : Operator.values()) { if (o.toString().equals(oppS)) { m_operator = o; break; } } if (m_operator != Operator.ISMISSING && m_operator != Operator.ISNOTMISSING) { String valueS = simpleP.getAttribute("value"); if (att.isNumeric()) { m_value = Double.parseDouble(valueS); } else { m_nominalValue = valueS; m_value = att.indexOfValue(valueS); if (m_value < 0) { throw new Exception("[SimplePredicate] can't find value " + valueS + " in nominal " + "attribute " + att.name()); } } } } @Override public Predicate.Eval evaluate(double[] input) { return m_operator.evaluate(input, m_value, m_fieldIndex); } @Override public String toString() { StringBuffer temp = new StringBuffer(); temp.append(m_fieldName + " " + m_operator.shortName()); if (m_operator != Operator.ISMISSING && m_operator != Operator.ISNOTMISSING) { temp.append(" " + ((m_isNominal) ? m_nominalValue : "" + m_value)); } return temp.toString(); } } /** * Class representing the CompoundPredicate */ static class CompoundPredicate extends Predicate { /** * For serialization */ private static final long serialVersionUID = -3332091529764559077L; enum BooleanOperator { OR("or") { @Override Predicate.Eval evaluate(ArrayList<Predicate> constituents, double[] input) { Predicate.Eval currentStatus = Predicate.Eval.FALSE; for (Predicate p : constituents) { Predicate.Eval temp = p.evaluate(input); if (temp == Predicate.Eval.TRUE) { currentStatus = temp; break; } else if (temp == Predicate.Eval.UNKNOWN) { currentStatus = temp; } } return currentStatus; } }, AND("and") { @Override Predicate.Eval evaluate(ArrayList<Predicate> constituents, double[] input) { Predicate.Eval currentStatus = Predicate.Eval.TRUE; for (Predicate p : constituents) { Predicate.Eval temp = p.evaluate(input); if (temp == Predicate.Eval.FALSE) { currentStatus = temp; break; } else if (temp == Predicate.Eval.UNKNOWN) { currentStatus = temp; } } return currentStatus; } }, XOR("xor") { @Override Predicate.Eval evaluate(ArrayList<Predicate> constituents, double[] input) { Predicate.Eval currentStatus = constituents.get(0).evaluate(input); if (currentStatus != Predicate.Eval.UNKNOWN) { for (int i = 1; i < constituents.size(); i++) { Predicate.Eval temp = constituents.get(i).evaluate(input); if (temp == Predicate.Eval.UNKNOWN) { currentStatus = temp; break; } else { if (currentStatus != temp) { currentStatus = Predicate.Eval.TRUE; } else { currentStatus = Predicate.Eval.FALSE; } } } } return currentStatus; } }, SURROGATE("surrogate") { @Override Predicate.Eval evaluate(ArrayList<Predicate> constituents, double[] input) { Predicate.Eval currentStatus = constituents.get(0).evaluate(input); int i = 1; while (currentStatus == Predicate.Eval.UNKNOWN) { currentStatus = constituents.get(i).evaluate(input); } // return false if all our surrogates evaluate to unknown. if (currentStatus == Predicate.Eval.UNKNOWN) { currentStatus = Predicate.Eval.FALSE; } return currentStatus; } }; abstract Predicate.Eval evaluate(ArrayList<Predicate> constituents, double[] input); private final String m_stringVal; BooleanOperator(String name) { m_stringVal = name; } @Override public String toString() { return m_stringVal; } } /** the constituent Predicates */ ArrayList<Predicate> m_components = new ArrayList<Predicate>(); /** the boolean operator */ BooleanOperator m_booleanOperator; public CompoundPredicate(Element compoundP, MiningSchema miningSchema) throws Exception { // Instances totalStructure = miningSchema.getFieldsAsInstances(); String booleanOpp = compoundP.getAttribute("booleanOperator"); for (BooleanOperator b : BooleanOperator.values()) { if (b.toString().equals(booleanOpp)) { m_booleanOperator = b; } } // now get all the encapsulated operators NodeList children = compoundP.getChildNodes(); for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element) child).getTagName(); if (tagName.equals("True")) { m_components.add(new True()); } else if (tagName.equals("False")) { m_components.add(new False()); } else if (tagName.equals("SimplePredicate")) { m_components .add(new SimplePredicate((Element) child, miningSchema)); } else if (tagName.equals("CompoundPredicate")) { m_components.add(new CompoundPredicate((Element) child, miningSchema)); } else { m_components.add(new SimpleSetPredicate((Element) child, miningSchema)); } } } } @Override public Predicate.Eval evaluate(double[] input) { return m_booleanOperator.evaluate(m_components, input); } @Override public String toString() { return toString(0, false); } @Override public String toString(int level, boolean cr) { StringBuffer text = new StringBuffer(); for (int j = 0; j < level; j++) { text.append("| "); } text.append("Compound [" + m_booleanOperator.toString() + "]"); if (cr) { text.append("\\n"); } else { text.append("\n"); } for (int i = 0; i < m_components.size(); i++) { text.append(m_components.get(i).toString(level, cr).replace(":", "")); if (i != m_components.size() - 1) { if (cr) { text.append("\\n"); } else { text.append("\n"); } } } return text.toString(); } } /** * Class representing the SimpleSetPredicate */ static class SimpleSetPredicate extends Predicate { /** * For serialization */ private static final long serialVersionUID = -2711995401345708486L; enum BooleanOperator { IS_IN("isIn") { @Override Predicate.Eval evaluate(double[] input, int fieldIndex, Array set, Attribute nominalLookup) { if (set.getType() == Array.ArrayType.STRING) { String value = ""; if (!Utils.isMissingValue(input[fieldIndex])) { value = nominalLookup.value((int) input[fieldIndex]); } return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), set.contains(value)); } else if (set.getType() == Array.ArrayType.NUM || set.getType() == Array.ArrayType.REAL) { return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), set.contains(input[fieldIndex])); } return Predicate.booleanToEval( Utils.isMissingValue(input[fieldIndex]), set.contains((int) input[fieldIndex])); } }, IS_NOT_IN("isNotIn") { @Override Predicate.Eval evaluate(double[] input, int fieldIndex, Array set, Attribute nominalLookup) { Predicate.Eval result = IS_IN.evaluate(input, fieldIndex, set, nominalLookup); if (result == Predicate.Eval.FALSE) { result = Predicate.Eval.TRUE; } else if (result == Predicate.Eval.TRUE) { result = Predicate.Eval.FALSE; } return result; } }; abstract Predicate.Eval evaluate(double[] input, int fieldIndex, Array set, Attribute nominalLookup); private final String m_stringVal; BooleanOperator(String name) { m_stringVal = name; } @Override public String toString() { return m_stringVal; } } /** the field to reference */ int m_fieldIndex = -1; /** the name of the field */ String m_fieldName; /** is the referenced field nominal? */ boolean m_isNominal = false; /** the attribute to lookup nominal values from */ Attribute m_nominalLookup; /** the boolean operator */ BooleanOperator m_operator = BooleanOperator.IS_IN; /** the array holding the set of values */ Array m_set; public SimpleSetPredicate(Element setP, MiningSchema miningSchema) throws Exception { Instances totalStructure = miningSchema.getFieldsAsInstances(); // get the field name and set up the index String fieldS = setP.getAttribute("field"); Attribute att = totalStructure.attribute(fieldS); if (att == null) { throw new Exception("[SimplePredicate] unable to find field " + fieldS + " in the incoming instance structure!"); } // find the index int index = -1; for (int i = 0; i < totalStructure.numAttributes(); i++) { if (totalStructure.attribute(i).name().equals(fieldS)) { index = i; m_fieldName = totalStructure.attribute(i).name(); break; } } m_fieldIndex = index; if (att.isNominal()) { m_isNominal = true; m_nominalLookup = att; } // need to scan the children looking for an array type NodeList children = setP.getChildNodes(); for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { if (Array.isArray((Element) child)) { // found the array m_set = Array.create((Element) child); break; } } } if (m_set == null) { throw new Exception("[SimpleSetPredictate] couldn't find an " + "array containing the set values!"); } // check array type against field type if (m_set.getType() == Array.ArrayType.STRING && !m_isNominal) { throw new Exception("[SimpleSetPredicate] referenced field " + totalStructure.attribute(m_fieldIndex).name() + " is numeric but array type is string!"); } else if (m_set.getType() != Array.ArrayType.STRING && m_isNominal) { throw new Exception("[SimpleSetPredicate] referenced field " + totalStructure.attribute(m_fieldIndex).name() + " is nominal but array type is numeric!"); } } @Override public Predicate.Eval evaluate(double[] input) { return m_operator.evaluate(input, m_fieldIndex, m_set, m_nominalLookup); } @Override public String toString() { StringBuffer temp = new StringBuffer(); temp.append(m_fieldName + " " + m_operator.toString() + " "); temp.append(m_set.toString()); return temp.toString(); } } /** * Class for handling a Node in the tree */ class TreeNode implements Serializable { // TODO: perhaps implement a class called Statistics that contains // Partitions? /** * For serialization */ private static final long serialVersionUID = 3011062274167063699L; /** ID for this node */ private String m_ID = "" + this.hashCode(); /** The score as a string */ private String m_scoreString; /** The index of this predicted value (if class is nominal) */ private int m_scoreIndex = -1; /** The score as a number (if target is numeric) */ private double m_scoreNumeric = Utils.missingValue(); /** The record count at this node (if defined) */ private double m_recordCount = Utils.missingValue(); /** The ID of the default child (if applicable) */ private String m_defaultChildID; /** Holds the node of the default child (if defined) */ private TreeNode m_defaultChild; /** The distribution for labels (classification) */ private final ArrayList<ScoreDistribution> m_scoreDistributions = new ArrayList<ScoreDistribution>(); /** The predicate for this node */ private final Predicate m_predicate; /** The children of this node */ private final ArrayList<TreeNode> m_childNodes = new ArrayList<TreeNode>(); protected TreeNode(Element nodeE, MiningSchema miningSchema) throws Exception { Attribute classAtt = miningSchema.getFieldsAsInstances().classAttribute(); // get the ID String id = nodeE.getAttribute("id"); if (id != null && id.length() > 0) { m_ID = id; } // get the score for this node String scoreS = nodeE.getAttribute("score"); if (scoreS != null && scoreS.length() > 0) { m_scoreString = scoreS; // try to parse as a number in case we // are part of a regression tree if (classAtt.isNumeric()) { try { m_scoreNumeric = Double.parseDouble(scoreS); } catch (NumberFormatException ex) { throw new Exception( "[TreeNode] class is numeric but unable to parse score " + m_scoreString + " as a number!"); } } else { // store the index of this class value m_scoreIndex = classAtt.indexOfValue(m_scoreString); if (m_scoreIndex < 0) { throw new Exception( "[TreeNode] can't find match for predicted value " + m_scoreString + " in class attribute!"); } } } // get the record count if defined String recordC = nodeE.getAttribute("recordCount"); if (recordC != null && recordC.length() > 0) { m_recordCount = Double.parseDouble(recordC); } // get the default child (if applicable) String defaultC = nodeE.getAttribute("defaultChild"); if (defaultC != null && defaultC.length() > 0) { m_defaultChildID = defaultC; } // TODO: Embedded model (once we support model composition) // Now get the ScoreDistributions (if any and mining function // is classification) at this level if (m_functionType == MiningFunction.CLASSIFICATION) { getScoreDistributions(nodeE, miningSchema); } // Now get the Predicate m_predicate = Predicate.getPredicate(nodeE, miningSchema); // Now get the child Node(s) getChildNodes(nodeE, miningSchema); // If we have a default child specified, find it now if (m_defaultChildID != null) { for (TreeNode t : m_childNodes) { if (t.getID().equals(m_defaultChildID)) { m_defaultChild = t; break; } } } } private void getChildNodes(Element nodeE, MiningSchema miningSchema) throws Exception { NodeList children = nodeE.getChildNodes(); for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element) child).getTagName(); if (tagName.equals("Node")) { TreeNode tempN = new TreeNode((Element) child, miningSchema); m_childNodes.add(tempN); } } } } private void getScoreDistributions(Element nodeE, MiningSchema miningSchema) throws Exception { NodeList scoreChildren = nodeE.getChildNodes(); for (int i = 0; i < scoreChildren.getLength(); i++) { Node child = scoreChildren.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element) child).getTagName(); if (tagName.equals("ScoreDistribution")) { ScoreDistribution newDist = new ScoreDistribution((Element) child, miningSchema, m_recordCount); m_scoreDistributions.add(newDist); } } } // backfit the confidence values if (Utils.isMissingValue(m_recordCount)) { double baseCount = 0; for (ScoreDistribution s : m_scoreDistributions) { baseCount += s.getRecordCount(); } for (ScoreDistribution s : m_scoreDistributions) { s.deriveConfidenceValue(baseCount); } } } /** * Get the score value as a string. * * @return the score value as a String. */ protected String getScore() { return m_scoreString; } /** * Get the score value as a number (regression trees only). * * @return the score as a number */ protected double getScoreNumeric() { return m_scoreNumeric; } /** * Get the ID of this node. * * @return the ID of this node. */ protected String getID() { return m_ID; } /** * Get the Predicate at this node. * * @return the predicate at this node. */ protected Predicate getPredicate() { return m_predicate; } /** * Get the record count at this node. * * @return the record count at this node. */ protected double getRecordCount() { return m_recordCount; } protected void dumpGraph(StringBuffer text) throws Exception { text.append("N" + m_ID + " "); if (m_scoreString != null) { text.append("[label=\"score=" + m_scoreString); } if (m_scoreDistributions.size() > 0 && m_childNodes.size() == 0) { text.append("\\n"); for (ScoreDistribution s : m_scoreDistributions) { text.append(s + "\\n"); } } text.append("\""); if (m_childNodes.size() == 0) { text.append(" shape=box style=filled"); } text.append("]\n"); for (TreeNode c : m_childNodes) { text.append("N" + m_ID + "->" + "N" + c.getID()); text.append(" [label=\"" + c.getPredicate().toString(0, true)); text.append("\"]\n"); c.dumpGraph(text); } } @Override public String toString() { StringBuffer text = new StringBuffer(); // print out the root dumpTree(0, text); return text.toString(); } protected void dumpTree(int level, StringBuffer text) { if (m_childNodes.size() > 0) { for (int i = 0; i < m_childNodes.size(); i++) { text.append("\n"); /* * for (int j = 0; j < level; j++) { text.append("| "); } */ // output the predicate for this child node TreeNode child = m_childNodes.get(i); text.append(child.getPredicate().toString(level, false)); // process recursively child.dumpTree(level + 1, text); } } else { // leaf text.append(": "); if (!Utils.isMissingValue(m_scoreNumeric)) { text.append(m_scoreNumeric); } else { text.append(m_scoreString + " "); if (m_scoreDistributions.size() > 0) { text.append("["); for (ScoreDistribution s : m_scoreDistributions) { text.append(s); } text.append("]"); } else { text.append(m_scoreString); } } } } /** * Score an incoming instance. Invokes a missing value handling strategy. * * @param instance a vector of incoming attribute and derived field values. * @param classAtt the class attribute * @return a predicted probability distribution. * @throws Exception if something goes wrong. */ protected double[] score(double[] instance, Attribute classAtt) throws Exception { double[] preds = null; if (classAtt.isNumeric()) { preds = new double[1]; } else { preds = new double[classAtt.numValues()]; } // leaf? if (m_childNodes.size() == 0) { doLeaf(classAtt, preds); } else { // process the children switch (TreeModel.this.m_missingValueStrategy) { case NONE: preds = missingValueStrategyNone(instance, classAtt); break; case LASTPREDICTION: preds = missingValueStrategyLastPrediction(instance, classAtt); break; case DEFAULTCHILD: preds = missingValueStrategyDefaultChild(instance, classAtt); break; default: throw new Exception("[TreeModel] not implemented!"); } } return preds; } /** * Compute the predictions for a leaf. * * @param classAtt the class attribute * @param preds an array to hold the predicted probabilities. * @throws Exception if something goes wrong. */ protected void doLeaf(Attribute classAtt, double[] preds) throws Exception { if (classAtt.isNumeric()) { preds[0] = m_scoreNumeric; } else { if (m_scoreDistributions.size() == 0) { preds[m_scoreIndex] = 1.0; } else { // collect confidences from the score distributions for (ScoreDistribution s : m_scoreDistributions) { preds[s.getClassLabelIndex()] = s.getConfidence(); } } } } /** * Evaluate on the basis of the no true child strategy. * * @param classAtt the class attribute. * @param preds an array to hold the predicted probabilities. * @throws Exception if something goes wrong. */ protected void doNoTrueChild(Attribute classAtt, double[] preds) throws Exception { if (TreeModel.this.m_noTrueChildStrategy == NoTrueChildStrategy.RETURNNULLPREDICTION) { for (int i = 0; i < classAtt.numValues(); i++) { preds[i] = Utils.missingValue(); } } else { // return the predictions at this node doLeaf(classAtt, preds); } } /** * Compute predictions and optionally invoke the weighted confidence missing * value handling strategy. * * @param instance the incoming vector of attribute and derived field * values. * @param classAtt the class attribute. * @return the predicted probability distribution. * @throws Exception if something goes wrong. */ protected double[] missingValueStrategyWeightedConfidence( double[] instance, Attribute classAtt) throws Exception { if (classAtt.isNumeric()) { throw new Exception( "[TreeNode] missing value strategy weighted confidence, " + "but class is numeric!"); } double[] preds = null; TreeNode trueNode = null; boolean strategyInvoked = false; int nodeCount = 0; // look at the evaluation of the child predicates for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE) { // note the first child to evaluate to true if (trueNode == null) { trueNode = c; } nodeCount++; } else if (c.getPredicate().evaluate(instance) == Predicate.Eval.UNKNOWN) { strategyInvoked = true; nodeCount++; } } if (strategyInvoked) { // we expect to combine nodeCount distributions double[][] dists = new double[nodeCount][]; double[] weights = new double[nodeCount]; // collect the distributions and weights int count = 0; for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE || c.getPredicate().evaluate(instance) == Predicate.Eval.UNKNOWN) { weights[count] = c.getRecordCount(); if (Utils.isMissingValue(weights[count])) { throw new Exception( "[TreeNode] weighted confidence missing value " + "strategy invoked, but no record count defined for node " + c.getID()); } dists[count++] = c.score(instance, classAtt); } } // do the combination preds = new double[classAtt.numValues()]; for (int i = 0; i < classAtt.numValues(); i++) { for (int j = 0; j < nodeCount; j++) { preds[i] += ((weights[j] / m_recordCount) * dists[j][i]); } } } else { if (trueNode != null) { preds = trueNode.score(instance, classAtt); } else { doNoTrueChild(classAtt, preds); } } return preds; } protected double[] freqCountsForAggNodesStrategy(double[] instance, Attribute classAtt) throws Exception { double[] counts = new double[classAtt.numValues()]; if (m_childNodes.size() > 0) { // collect the counts for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE || c.getPredicate().evaluate(instance) == Predicate.Eval.UNKNOWN) { double[] temp = c.freqCountsForAggNodesStrategy(instance, classAtt); for (int i = 0; i < classAtt.numValues(); i++) { counts[i] += temp[i]; } } } } else { // process the score distributions if (m_scoreDistributions.size() == 0) { throw new Exception( "[TreeModel] missing value strategy aggregate nodes:" + " no score distributions at leaf " + m_ID); } for (ScoreDistribution s : m_scoreDistributions) { counts[s.getClassLabelIndex()] = s.getRecordCount(); } } return counts; } /** * Compute predictions and optionally invoke the aggregate nodes missing * value handling strategy. * * @param instance the incoming vector of attribute and derived field * values. * @param classAtt the class attribute. * @return the predicted probability distribution. * @throws Exception if something goes wrong. */ protected double[] missingValueStrategyAggregateNodes(double[] instance, Attribute classAtt) throws Exception { if (classAtt.isNumeric()) { throw new Exception( "[TreeNode] missing value strategy aggregate nodes, " + "but class is numeric!"); } double[] preds = null; TreeNode trueNode = null; boolean strategyInvoked = false; // look at the evaluation of the child predicates for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE) { // note the first child to evaluate to true if (trueNode == null) { trueNode = c; } } else if (c.getPredicate().evaluate(instance) == Predicate.Eval.UNKNOWN) { strategyInvoked = true; } } if (strategyInvoked) { double[] aggregatedCounts = freqCountsForAggNodesStrategy(instance, classAtt); // normalize Utils.normalize(aggregatedCounts); preds = aggregatedCounts; } else { if (trueNode != null) { preds = trueNode.score(instance, classAtt); } else { doNoTrueChild(classAtt, preds); } } return preds; } /** * Compute predictions and optionally invoke the default child missing value * handling strategy. * * @param instance the incoming vector of attribute and derived field * values. * @param classAtt the class attribute. * @return the predicted probability distribution. * @throws Exception if something goes wrong. */ protected double[] missingValueStrategyDefaultChild(double[] instance, Attribute classAtt) throws Exception { double[] preds = null; boolean strategyInvoked = false; // look for a child whose predicate evaluates to TRUE for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE) { preds = c.score(instance, classAtt); break; } else if (c.getPredicate().evaluate(instance) == Predicate.Eval.UNKNOWN) { strategyInvoked = true; } } // no true child found if (preds == null) { if (!strategyInvoked) { doNoTrueChild(classAtt, preds); } else { // do the strategy // NOTE: we don't actually implement the missing value penalty since // we always return a full probability distribution. if (m_defaultChild != null) { preds = m_defaultChild.score(instance, classAtt); } else { throw new Exception( "[TreeNode] missing value strategy is defaultChild, but " + "no default child has been specified in node " + m_ID); } } } return preds; } /** * Compute predictions and optionally invoke the last prediction missing * value handling strategy. * * @param instance the incoming vector of attribute and derived field * values. * @param classAtt the class attribute. * @return the predicted probability distribution. * @throws Exception if something goes wrong. */ protected double[] missingValueStrategyLastPrediction(double[] instance, Attribute classAtt) throws Exception { double[] preds = null; boolean strategyInvoked = false; // look for a child whose predicate evaluates to TRUE for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE) { preds = c.score(instance, classAtt); break; } else if (c.getPredicate().evaluate(instance) == Predicate.Eval.UNKNOWN) { strategyInvoked = true; } } // no true child found if (preds == null) { preds = new double[classAtt.numValues()]; if (!strategyInvoked) { // no true child doNoTrueChild(classAtt, preds); } else { // do the strategy doLeaf(classAtt, preds); } } return preds; } /** * Compute predictions and optionally invoke the null prediction missing * value handling strategy. * * @param instance the incoming vector of attribute and derived field * values. * @param classAtt the class attribute. * @return the predicted probability distribution. * @throws Exception if something goes wrong. */ protected double[] missingValueStrategyNullPrediction(double[] instance, Attribute classAtt) throws Exception { double[] preds = null; boolean strategyInvoked = false; // look for a child whose predicate evaluates to TRUE for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE) { preds = c.score(instance, classAtt); break; } else if (c.getPredicate().evaluate(instance) == Predicate.Eval.UNKNOWN) { strategyInvoked = true; } } // no true child found if (preds == null) { preds = new double[classAtt.numValues()]; if (!strategyInvoked) { doNoTrueChild(classAtt, preds); } else { // do the strategy for (int i = 0; i < classAtt.numValues(); i++) { preds[i] = Utils.missingValue(); } } } return preds; } /** * Compute predictions and optionally invoke the "none" missing value * handling strategy (invokes no true child). * * @param instance the incoming vector of attribute and derived field * values. * @param classAtt the class attribute. * @return the predicted probability distribution. * @throws Exception if something goes wrong. */ protected double[] missingValueStrategyNone(double[] instance, Attribute classAtt) throws Exception { double[] preds = null; // look for a child whose predicate evaluates to TRUE for (TreeNode c : m_childNodes) { if (c.getPredicate().evaluate(instance) == Predicate.Eval.TRUE) { preds = c.score(instance, classAtt); break; } } if (preds == null) { preds = new double[classAtt.numValues()]; // no true child strategy doNoTrueChild(classAtt, preds); } return preds; } } /** * Enumerated type for the mining function */ enum MiningFunction { CLASSIFICATION, REGRESSION; } enum MissingValueStrategy { LASTPREDICTION("lastPrediction"), NULLPREDICTION("nullPrediction"), DEFAULTCHILD( "defaultChild"), WEIGHTEDCONFIDENCE("weightedConfidence"), AGGREGATENODES( "aggregateNodes"), NONE("none"); private final String m_stringVal; MissingValueStrategy(String name) { m_stringVal = name; } @Override public String toString() { return m_stringVal; } } enum NoTrueChildStrategy { RETURNNULLPREDICTION("returnNullPrediction"), RETURNLASTPREDICTION( "returnLastPrediction"); private final String m_stringVal; NoTrueChildStrategy(String name) { m_stringVal = name; } @Override public String toString() { return m_stringVal; } } enum SplitCharacteristic { BINARYSPLIT("binarySplit"), MULTISPLIT("multiSplit"); private final String m_stringVal; SplitCharacteristic(String name) { m_stringVal = name; } @Override public String toString() { return m_stringVal; } } /** The mining function */ protected MiningFunction m_functionType = MiningFunction.CLASSIFICATION; /** The missing value strategy */ protected MissingValueStrategy m_missingValueStrategy = MissingValueStrategy.NONE; /** * The missing value penalty (if defined). We don't actually make use of this * since we always return full probability distributions. */ protected double m_missingValuePenalty = Utils.missingValue(); /** The no true child strategy to use */ protected NoTrueChildStrategy m_noTrueChildStrategy = NoTrueChildStrategy.RETURNNULLPREDICTION; /** The splitting type */ protected SplitCharacteristic m_splitCharacteristic = SplitCharacteristic.MULTISPLIT; /** The root of the tree */ protected TreeNode m_root; public TreeModel(Element model, Instances dataDictionary, MiningSchema miningSchema) throws Exception { super(dataDictionary, miningSchema); if (!getPMMLVersion().equals("3.2")) { // TODO: might have to throw an exception and only support 3.2 } String fn = model.getAttribute("functionName"); if (fn.equals("regression")) { m_functionType = MiningFunction.REGRESSION; } // get the missing value strategy (if any) String missingVS = model.getAttribute("missingValueStrategy"); if (missingVS != null && missingVS.length() > 0) { for (MissingValueStrategy m : MissingValueStrategy.values()) { if (m.toString().equals(missingVS)) { m_missingValueStrategy = m; break; } } } // get the missing value penalty (if any) String missingP = model.getAttribute("missingValuePenalty"); if (missingP != null && missingP.length() > 0) { // try to parse as a number try { m_missingValuePenalty = Double.parseDouble(missingP); } catch (NumberFormatException ex) { System.err.println("[TreeModel] WARNING: " + "couldn't parse supplied missingValuePenalty as a number"); } } String splitC = model.getAttribute("splitCharacteristic"); if (splitC != null && splitC.length() > 0) { for (SplitCharacteristic s : SplitCharacteristic.values()) { if (s.toString().equals(splitC)) { m_splitCharacteristic = s; break; } } } // find the root node of the tree NodeList children = model.getChildNodes(); for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { String tagName = ((Element) child).getTagName(); if (tagName.equals("Node")) { m_root = new TreeNode((Element) child, miningSchema); break; } } } } /** * Classifies the given test instance. The instance has to belong to a dataset * when it's being classified. * * @param inst the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ @Override public double[] distributionForInstance(Instance inst) throws Exception { if (!m_initialized) { mapToMiningSchema(inst.dataset()); } double[] preds = null; if (m_miningSchema.getFieldsAsInstances().classAttribute().isNumeric()) { preds = new double[1]; } else { preds = new double[m_miningSchema.getFieldsAsInstances().classAttribute() .numValues()]; } double[] incoming = m_fieldsMap.instanceToSchema(inst, m_miningSchema); preds = m_root.score(incoming, m_miningSchema.getFieldsAsInstances() .classAttribute()); return preds; } @Override public String toString() { StringBuffer temp = new StringBuffer(); temp.append("PMML version " + getPMMLVersion()); if (!getCreatorApplication().equals("?")) { temp.append("\nApplication: " + getCreatorApplication()); } temp.append("\nPMML Model: TreeModel"); temp.append("\n\n"); temp.append(m_miningSchema); temp.append("Split-type: " + m_splitCharacteristic + "\n"); temp.append("No true child strategy: " + m_noTrueChildStrategy + "\n"); temp.append("Missing value strategy: " + m_missingValueStrategy + "\n"); temp.append(m_root.toString()); return temp.toString(); } @Override public String graph() throws Exception { StringBuffer text = new StringBuffer(); text.append("digraph PMMTree {\n"); m_root.dumpGraph(text); text.append("}\n"); return text.toString(); } @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } @Override public int graphType() { return Drawable.TREE; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/producer/AbstractPMMLProducerHelper.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractPMMLProducerHelper.java * Copyright (C) 2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.producer; import weka.core.Attribute; import weka.core.Instances; import weka.core.Version; import weka.core.pmml.jaxbbindings.Application; import weka.core.pmml.jaxbbindings.DataDictionary; import weka.core.pmml.jaxbbindings.DataField; import weka.core.pmml.jaxbbindings.Header; import weka.core.pmml.jaxbbindings.OPTYPE; import weka.core.pmml.jaxbbindings.PMML; import weka.core.pmml.jaxbbindings.Value; /** * Abstract base class for PMMLProducer helper classes to extend. * * @author David Persons * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: $ */ public abstract class AbstractPMMLProducerHelper { /** PMML version that the jaxbbindings were created from */ public static final String PMML_VERSION = "4.1"; /** * Initializes a PMML object with header information. * * @return an initialized PMML object */ public static PMML initPMML() { PMML pmml = new PMML(); pmml.setVersion(PMML_VERSION); Header header = new Header(); header.setCopyright("WEKA"); header.setApplication(new Application("WEKA", Version.VERSION)); pmml.setHeader(header); return pmml; } /** * Adds a data dictionary to the supplied PMML object. * * @param trainHeader the training data header - i.e. the header of the data * that enters the buildClassifier() method of the model in question * @param pmml the PMML object to add the data dictionary to */ public static void addDataDictionary(Instances trainHeader, PMML pmml) { DataDictionary dictionary = new DataDictionary(); for (int i = 0; i < trainHeader.numAttributes(); i++) { String name = trainHeader.attribute(i).name(); OPTYPE optype = getOPTYPE(trainHeader.attribute(i).type()); DataField field = new DataField(name, optype); if (trainHeader.attribute(i).isNominal()) { for (int j = 0; j < trainHeader.attribute(i).numValues(); j++) { Value val = new Value(trainHeader.attribute(i).value(j)); field.addValue(val); } } dictionary.addDataField(field); } pmml.setDataDictionary(dictionary); } /** * Returns an OPTYPE for a weka attribute type. Note that PMML only supports * categorical, continuous and ordinal types. * * @param wekaType the type of the weka attribute * @return the PMML type */ public static OPTYPE getOPTYPE(int wekaType) { switch (wekaType) { case Attribute.NUMERIC: case Attribute.DATE: return OPTYPE.CONTINUOUS; default: return OPTYPE.CATEGORICAL; } } /** * Extracts the original attribute name and value from the name of a binary * indicator attribute created by unsupervised NominalToBinary. Handles the * case where one or more equals signs might be present in the original * attribute name. * * @param train the original, unfiltered training header * @param derived the derived attribute from which to extract the original * name and value from the name created by NominalToBinary. * @return */ public static String[] getNameAndValueFromUnsupervisedNominalToBinaryDerivedAttribute( Instances train, Attribute derived) { String[] nameAndVal = new String[2]; // need to try and locate the equals sign that separates the attribute name // from the value boolean success = false; String derivedName = derived.name(); int currentEqualsIndex = derivedName.indexOf('='); String leftSide = derivedName.substring(0, currentEqualsIndex); String rightSide = derivedName.substring(currentEqualsIndex + 1, derivedName.length()); while (!success) { if (train.attribute(leftSide) != null) { nameAndVal[0] = leftSide; nameAndVal[1] = rightSide; success = true; } else { // try the next equals sign... leftSide += ("=" + rightSide.substring(0, rightSide.indexOf('='))); rightSide = rightSide.substring(rightSide.indexOf('=') + 1, rightSide.length()); } } return nameAndVal; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/pmml/producer/LogisticProducerHelper.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LogisticProducerHelper.java * Copyright (C) 2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.pmml.producer; import java.io.StringWriter; import java.math.BigInteger; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Marshaller; import weka.core.Attribute; import weka.core.Instances; import weka.core.pmml.jaxbbindings.DATATYPE; import weka.core.pmml.jaxbbindings.DerivedField; import weka.core.pmml.jaxbbindings.FIELDUSAGETYPE; import weka.core.pmml.jaxbbindings.LocalTransformations; import weka.core.pmml.jaxbbindings.MININGFUNCTION; import weka.core.pmml.jaxbbindings.MISSINGVALUETREATMENTMETHOD; import weka.core.pmml.jaxbbindings.MiningField; import weka.core.pmml.jaxbbindings.MiningSchema; import weka.core.pmml.jaxbbindings.NormDiscrete; import weka.core.pmml.jaxbbindings.NumericPredictor; import weka.core.pmml.jaxbbindings.OPTYPE; import weka.core.pmml.jaxbbindings.Output; import weka.core.pmml.jaxbbindings.OutputField; import weka.core.pmml.jaxbbindings.PMML; import weka.core.pmml.jaxbbindings.REGRESSIONNORMALIZATIONMETHOD; import weka.core.pmml.jaxbbindings.RegressionModel; import weka.core.pmml.jaxbbindings.RegressionTable; import weka.core.pmml.jaxbbindings.TransformationDictionary; /** * Helper class for producing PMML for a Logistic classifier. Not designed to be * used directly - you should call toPMML() on a trained Logistic classifier. * * @author David Persons * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: $ */ public class LogisticProducerHelper extends AbstractPMMLProducerHelper { /** * Produce the PMML for a Logistic classifier * * @param train the training data used to build the Logistic model * @param structureAfterFiltering the structure of the training data after * filtering * @param par the parameters of the function(s) * @param numClasses the number of classes in the data * @return the PMML for the classifier */ public static String toPMML(Instances train, Instances structureAfterFiltering, double[][] par, int numClasses) { PMML pmml = initPMML(); addDataDictionary(train, pmml); String currentAttrName = null; TransformationDictionary transformDict = null; LocalTransformations localTransforms = null; MiningSchema schema = new MiningSchema(); for (int i = 0; i < structureAfterFiltering.numAttributes(); i++) { Attribute attr = structureAfterFiltering.attribute(i); Attribute originalAttr = train.attribute(attr.name()); if (i == structureAfterFiltering.classIndex()) { schema.addMiningFields(new MiningField(attr.name(), FIELDUSAGETYPE.PREDICTED)); } if (originalAttr == null) { // this must be a derived one if (localTransforms == null) { localTransforms = new LocalTransformations(); } if (transformDict == null) { transformDict = new TransformationDictionary(); } String[] nameAndValue = getNameAndValueFromUnsupervisedNominalToBinaryDerivedAttribute( train, attr); if (!nameAndValue[0].equals(currentAttrName)) { currentAttrName = nameAndValue[0]; if (i != structureAfterFiltering.classIndex()) { // add a mining field int mode = (int) train.meanOrMode(train.attribute(nameAndValue[0])); schema.addMiningFields(new MiningField(nameAndValue[0], FIELDUSAGETYPE.ACTIVE, MISSINGVALUETREATMENTMETHOD.AS_MODE, train .attribute(nameAndValue[0]).value(mode))); } } DerivedField derivedfield = new DerivedField(attr.name(), DATATYPE.DOUBLE, OPTYPE.CONTINUOUS); NormDiscrete normDiscrete = new NormDiscrete(nameAndValue[0], nameAndValue[1]); derivedfield.setNormDiscrete(normDiscrete); transformDict.addDerivedField(derivedfield); } else { // its either already numeric or was a binary nominal one if (i != structureAfterFiltering.classIndex()) { if (originalAttr.isNumeric()) { String mean = "" + train.meanOrMode(originalAttr); schema .addMiningFields(new MiningField(originalAttr.name(), FIELDUSAGETYPE.ACTIVE, MISSINGVALUETREATMENTMETHOD.AS_MEAN, mean)); } else { int mode = (int) train.meanOrMode(originalAttr); schema.addMiningFields(new MiningField(originalAttr.name(), FIELDUSAGETYPE.ACTIVE, MISSINGVALUETREATMENTMETHOD.AS_MODE, originalAttr.value(mode))); } } } } RegressionModel model = new RegressionModel(); if (transformDict != null) { pmml.setTransformationDictionary(transformDict); } model.addContent(schema); model.setFunctionName(MININGFUNCTION.CLASSIFICATION); model.setAlgorithmName("logisticRegression"); model.setModelType("logisticRegression"); model.setNormalizationMethod(REGRESSIONNORMALIZATIONMETHOD.SOFTMAX); Output output = new Output(); Attribute classAttribute = structureAfterFiltering.classAttribute(); for (int i = 0; i < classAttribute.numValues(); i++) { OutputField outputField = new OutputField(); outputField.setName(classAttribute.name()); outputField.setValue(classAttribute.value(i)); output.addOutputField(outputField); } model.addContent(output); for (int i = 0; i < numClasses - 1; i++) { RegressionTable table = new RegressionTable(structureAfterFiltering .classAttribute().value(i)); // coefficients int j = 1; for (int k = 0; k < structureAfterFiltering.numAttributes(); k++) { if (k != structureAfterFiltering.classIndex()) { Attribute attr = structureAfterFiltering.attribute(k); table.addNumericPredictor(new NumericPredictor(attr.name(), BigInteger.valueOf(1), par[j][i])); j++; } } table.setIntercept(par[0][i]); model.addContent(table); } pmml.addAssociationModelOrBaselineModelOrClusteringModes(model); try { StringWriter sw = new StringWriter(); JAXBContext jc = JAXBContext.newInstance(PMML.class); Marshaller marshaller = jc.createMarshaller(); marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true); marshaller.marshal(pmml, sw); return sw.toString(); } catch (JAXBException e) { e.printStackTrace(); } return ""; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/DecisionTable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DecisionTable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import java.util.Arrays; import java.util.BitSet; import java.util.Collections; import java.util.Enumeration; import java.util.Hashtable; import java.util.Random; import java.util.Vector; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.BestFirst; import weka.attributeSelection.SubsetEvaluator; import weka.classifiers.AbstractClassifier; import weka.classifiers.Evaluation; import weka.classifiers.lazy.IBk; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** * <!-- globalinfo-start --> Class for building and using a simple decision table majority classifier.<br/> * <br/> * For more information see: <br/> * <br/> * Ron Kohavi: The Power of Decision Tables. In: 8th European Conference on Machine Learning, 174-189, 1995. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Kohavi1995, * author = {Ron Kohavi}, * booktitle = {8th European Conference on Machine Learning}, * pages = {174-189}, * publisher = {Springer}, * title = {The Power of Decision Tables}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst) * </pre> * * <pre> * -X &lt;number of folds&gt; * Use cross validation to evaluate features. * Use number of folds = 1 for leave one out CV. * (Default = leave one out CV) * </pre> * * <pre> * -E &lt;acc | rmse | mae | auc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class) * </pre> * * <pre> * -I * Use nearest neighbour instead of global table majority. * </pre> * * <pre> * -R * Display decision table rules. * </pre> * * <pre> * Options specific to search method weka.attributeSelection.BestFirst: * </pre> * * <pre> * -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * </pre> * * <pre> * -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1). * </pre> * * <pre> * -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search. * </pre> * * <pre> * -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1) * </pre> * * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision$ */ public class DecisionTable extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, AdditionalMeasureProducer, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 2888557078165701326L; /** The hashtable used to hold training instances */ protected Hashtable<DecisionTableHashKey, double[]> m_entries; /** The class priors to use when there is no match in the table */ protected double[] m_classPriorCounts; protected double[] m_classPriors; /** Holds the final feature set */ protected int[] m_decisionFeatures; /** Discretization filter */ protected Filter m_disTransform; /** Filter used to remove columns discarded by feature selection */ protected Remove m_delTransform; /** IB1 used to classify non matching instances rather than majority class */ protected IBk m_ibk; /** Holds the original training instances */ protected Instances m_theInstances; /** Holds the final feature selected set of instances */ protected Instances m_dtInstances; /** The number of attributes in the dataset */ protected int m_numAttributes; /** The number of instances in the dataset */ private int m_numInstances; /** Class is nominal */ protected boolean m_classIsNominal; /** Use the IBk classifier rather than majority class */ protected boolean m_useIBk; /** Display Rules */ protected boolean m_displayRules; /** Number of folds for cross validating feature sets */ private int m_CVFolds; /** Random numbers for use in cross validation */ private Random m_rr; /** Holds the majority class */ protected double m_majority; /** The search method to use */ protected ASSearch m_search = new BestFirst(); /** Our own internal evaluator */ protected ASEvaluation m_evaluator; /** The evaluation object used to evaluate subsets */ protected Evaluation m_evaluation; /** default is accuracy for discrete class and RMSE for numeric class */ public static final int EVAL_DEFAULT = 1; public static final int EVAL_ACCURACY = 2; public static final int EVAL_RMSE = 3; public static final int EVAL_MAE = 4; public static final int EVAL_AUC = 5; public static final Tag[] TAGS_EVALUATION = { new Tag(EVAL_DEFAULT, "Default: accuracy (discrete class); RMSE (numeric class)"), new Tag(EVAL_ACCURACY, "Accuracy (discrete class only"), new Tag(EVAL_RMSE, "RMSE (of the class probabilities for discrete class)"), new Tag(EVAL_MAE, "MAE (of the class probabilities for discrete class)"), new Tag(EVAL_AUC, "AUC (area under the ROC curve - discrete class only)") }; protected int m_evaluationMeasure = EVAL_DEFAULT; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a simple decision table majority " + "classifier.\n\n" + "For more information see: \n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Ron Kohavi"); result.setValue(Field.TITLE, "The Power of Decision Tables"); result.setValue(Field.BOOKTITLE, "8th European Conference on Machine Learning"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.PAGES, "174-189"); result.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Inserts an instance into the hash table * * @param inst * instance to be inserted * @param instA * to create the hash key from * @throws Exception * if the instance can't be inserted */ private void insertIntoTable(final Instance inst, final double[] instA) throws Exception { double[] tempClassDist2; double[] newDist; DecisionTableHashKey thekey; if (instA != null) { thekey = new DecisionTableHashKey(instA); } else { thekey = new DecisionTableHashKey(inst, inst.numAttributes(), false); } // see if this one is already in the table tempClassDist2 = this.m_entries.get(thekey); if (tempClassDist2 == null) { if (this.m_classIsNominal) { newDist = new double[this.m_theInstances.classAttribute().numValues()]; // Leplace estimation for (int i = 0; i < this.m_theInstances.classAttribute().numValues(); i++) { newDist[i] = 1.0; } newDist[(int) inst.classValue()] = inst.weight(); // add to the table this.m_entries.put(thekey, newDist); } else { newDist = new double[2]; newDist[0] = inst.classValue() * inst.weight(); newDist[1] = inst.weight(); // add to the table this.m_entries.put(thekey, newDist); } } else { // update the distribution for this instance if (this.m_classIsNominal) { tempClassDist2[(int) inst.classValue()] += inst.weight(); // update the table this.m_entries.put(thekey, tempClassDist2); } else { tempClassDist2[0] += (inst.classValue() * inst.weight()); tempClassDist2[1] += inst.weight(); // update the table this.m_entries.put(thekey, tempClassDist2); } } } /** * Classifies an instance for internal leave one out cross validation of feature sets * * @param instance * instance to be "left out" and classified * @param instA * feature values of the selected features for the instance * @return the classification of the instance * @throws Exception * if something goes wrong */ protected double evaluateInstanceLeaveOneOut(final Instance instance, final double[] instA) throws Exception { // System.err.println("---------------- superclass leave-one-out ------------"); DecisionTableHashKey thekey; double[] tempDist; double[] normDist; thekey = new DecisionTableHashKey(instA); if (this.m_classIsNominal) { // if this one is not in the table if ((tempDist = this.m_entries.get(thekey)) == null) { throw new Error("This should never happen!"); } else { normDist = new double[tempDist.length]; System.arraycopy(tempDist, 0, normDist, 0, tempDist.length); normDist[(int) instance.classValue()] -= instance.weight(); // update the table // first check to see if the class counts are all zero now boolean ok = false; for (double element : normDist) { if (Utils.gr(element, 1.0)) { ok = true; break; } } // downdate the class prior counts this.m_classPriorCounts[(int) instance.classValue()] -= instance.weight(); double[] classPriors = this.m_classPriorCounts.clone(); Utils.normalize(classPriors); if (!ok) { // majority class normDist = classPriors; } this.m_classPriorCounts[(int) instance.classValue()] += instance.weight(); // if (ok) { Utils.normalize(normDist); if (this.m_evaluationMeasure == EVAL_AUC) { this.m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, instance); } else { this.m_evaluation.evaluateModelOnce(normDist, instance); } return Utils.maxIndex(normDist); /* * } else { normDist = new double [normDist.length]; normDist[(int)m_majority] = 1.0; if * (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, * instance); } else { m_evaluation.evaluateModelOnce(normDist, instance); } return m_majority; } */ } // return Utils.maxIndex(tempDist); } else { // see if this one is already in the table if ((tempDist = this.m_entries.get(thekey)) != null) { normDist = new double[tempDist.length]; System.arraycopy(tempDist, 0, normDist, 0, tempDist.length); normDist[0] -= (instance.classValue() * instance.weight()); normDist[1] -= instance.weight(); if (Utils.eq(normDist[1], 0.0)) { double[] temp = new double[1]; temp[0] = this.m_majority; this.m_evaluation.evaluateModelOnce(temp, instance); return this.m_majority; } else { double[] temp = new double[1]; temp[0] = normDist[0] / normDist[1]; this.m_evaluation.evaluateModelOnce(temp, instance); return temp[0]; } } else { throw new Error("This should never happen!"); } } // shouldn't get here // return 0.0; } /** * Calculates the accuracy on a test fold for internal cross validation of feature sets * * @param fold * set of instances to be "left out" and classified * @param fs * currently selected feature set * @return the accuracy for the fold * @throws Exception * if something goes wrong */ protected double evaluateFoldCV(final Instances fold, final int[] fs) throws Exception { int i; int numFold = fold.numInstances(); int numCl = this.m_theInstances.classAttribute().numValues(); double[][] class_distribs = new double[numFold][numCl]; double[] instA = new double[fs.length]; double[] normDist; DecisionTableHashKey thekey; double acc = 0.0; int classI = this.m_theInstances.classIndex(); Instance inst; if (this.m_classIsNominal) { normDist = new double[numCl]; } else { normDist = new double[2]; } // first *remove* instances for (i = 0; i < numFold; i++) { inst = fold.instance(i); for (int j = 0; j < fs.length; j++) { if (fs[j] == classI) { instA[j] = Double.MAX_VALUE; // missing for the class } else if (inst.isMissing(fs[j])) { instA[j] = Double.MAX_VALUE; } else { instA[j] = inst.value(fs[j]); } } thekey = new DecisionTableHashKey(instA); if ((class_distribs[i] = this.m_entries.get(thekey)) == null) { throw new Error("This should never happen!"); } else { if (this.m_classIsNominal) { class_distribs[i][(int) inst.classValue()] -= inst.weight(); } else { class_distribs[i][0] -= (inst.classValue() * inst.weight()); class_distribs[i][1] -= inst.weight(); } } this.m_classPriorCounts[(int) inst.classValue()] -= inst.weight(); } double[] classPriors = this.m_classPriorCounts.clone(); Utils.normalize(classPriors); // now classify instances for (i = 0; i < numFold; i++) { inst = fold.instance(i); System.arraycopy(class_distribs[i], 0, normDist, 0, normDist.length); if (this.m_classIsNominal) { boolean ok = false; for (double element : normDist) { if (Utils.gr(element, 1.0)) { ok = true; break; } } if (!ok) { // majority class normDist = classPriors.clone(); } // if (ok) { Utils.normalize(normDist); if (this.m_evaluationMeasure == EVAL_AUC) { this.m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, inst); } else { this.m_evaluation.evaluateModelOnce(normDist, inst); } /* * } else { normDist[(int)m_majority] = 1.0; if (m_evaluationMeasure == EVAL_AUC) { * m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, inst); } else { * m_evaluation.evaluateModelOnce(normDist, inst); } } */ } else { if (Utils.eq(normDist[1], 0.0)) { double[] temp = new double[1]; temp[0] = this.m_majority; this.m_evaluation.evaluateModelOnce(temp, inst); } else { double[] temp = new double[1]; temp[0] = normDist[0] / normDist[1]; this.m_evaluation.evaluateModelOnce(temp, inst); } } } // now re-insert instances for (i = 0; i < numFold; i++) { inst = fold.instance(i); this.m_classPriorCounts[(int) inst.classValue()] += inst.weight(); if (this.m_classIsNominal) { class_distribs[i][(int) inst.classValue()] += inst.weight(); } else { class_distribs[i][0] += (inst.classValue() * inst.weight()); class_distribs[i][1] += inst.weight(); } } return acc; } /** * Evaluates a feature subset by cross validation * * @param feature_set * the subset to be evaluated * @param num_atts * the number of attributes in the subset * @return the estimated accuracy * @throws Exception * if subset can't be evaluated */ protected double estimatePerformance(final BitSet feature_set, final int num_atts) throws Exception { this.m_evaluation = new Evaluation(this.m_theInstances); int i; int[] fs = new int[num_atts]; double[] instA = new double[num_atts]; int classI = this.m_theInstances.classIndex(); int index = 0; for (i = 0; i < this.m_numAttributes; i++) { if (feature_set.get(i)) { fs[index++] = i; } } // create new hash table this.m_entries = new Hashtable<>((int) (this.m_theInstances.numInstances() * 1.5)); // insert instances into the hash table for (i = 0; i < this.m_numInstances; i++) { Instance inst = this.m_theInstances.instance(i); for (int j = 0; j < fs.length; j++) { if (fs[j] == classI) { instA[j] = Double.MAX_VALUE; // missing for the class } else if (inst.isMissing(fs[j])) { instA[j] = Double.MAX_VALUE; } else { instA[j] = inst.value(fs[j]); } } this.insertIntoTable(inst, instA); } if (this.m_CVFolds == 1) { // calculate leave one out error for (i = 0; i < this.m_numInstances; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = this.m_theInstances.instance(i); for (int j = 0; j < fs.length; j++) { if (fs[j] == classI) { instA[j] = Double.MAX_VALUE; // missing for the class } else if (inst.isMissing(fs[j])) { instA[j] = Double.MAX_VALUE; } else { instA[j] = inst.value(fs[j]); } } this.evaluateInstanceLeaveOneOut(inst, instA); } } else { this.m_theInstances.randomize(this.m_rr); this.m_theInstances.stratify(this.m_CVFolds); // calculate 10 fold cross validation error for (i = 0; i < this.m_CVFolds; i++) { Instances insts = this.m_theInstances.testCV(this.m_CVFolds, i); this.evaluateFoldCV(insts, fs); } } switch (this.m_evaluationMeasure) { case EVAL_DEFAULT: if (this.m_classIsNominal) { return this.m_evaluation.pctCorrect(); } return -this.m_evaluation.rootMeanSquaredError(); case EVAL_ACCURACY: return this.m_evaluation.pctCorrect(); case EVAL_RMSE: return -this.m_evaluation.rootMeanSquaredError(); case EVAL_MAE: return -this.m_evaluation.meanAbsoluteError(); case EVAL_AUC: double[] classPriors = this.m_evaluation.getClassPriors(); Utils.normalize(classPriors); double weightedAUC = 0; for (i = 0; i < this.m_theInstances.classAttribute().numValues(); i++) { double tempAUC = this.m_evaluation.areaUnderROC(i); if (!Utils.isMissingValue(tempAUC)) { weightedAUC += (classPriors[i] * tempAUC); } else { // System.err.println("Undefined AUC!!"); } } return weightedAUC; } // shouldn't get here return 0.0; } /** * Resets the options. */ protected void resetOptions() { this.m_entries = null; this.m_decisionFeatures = null; this.m_useIBk = false; this.m_CVFolds = 1; this.m_displayRules = false; this.m_evaluationMeasure = EVAL_DEFAULT; } /** * Constructor for a DecisionTable */ public DecisionTable() { this.resetOptions(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(6); newVector.addElement(new Option("\tFull class name of search method, followed\n" + "\tby its options.\n" + "\teg: \"weka.attributeSelection.BestFirst -D 1\"\n" + "\t(default weka.attributeSelection.BestFirst)", "S", 1, "-S <search method specification>")); newVector.addElement(new Option("\tUse cross validation to evaluate features.\n" + "\tUse number of folds = 1 for leave one out CV.\n" + "\t(Default = leave one out CV)", "X", 1, "-X <number of folds>")); newVector.addElement(new Option("\tPerformance evaluation measure to use for selecting attributes.\n" + "\t(Default = accuracy for discrete class and rmse for numeric class)", "E", 1, "-E <acc | rmse | mae | auc>")); newVector.addElement(new Option("\tUse nearest neighbour instead of global table majority.", "I", 0, "-I")); newVector.addElement(new Option("\tDisplay decision table rules.\n", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); newVector.addElement(new Option("", "", 0, "\nOptions specific to search method " + this.m_search.getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler) this.m_search).listOptions())); return newVector.elements(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String crossValTipText() { return "Sets the number of folds for cross validation (1 = leave one out)."; } /** * Sets the number of folds for cross validation (1 = leave one out) * * @param folds * the number of folds */ public void setCrossVal(final int folds) { this.m_CVFolds = folds; } /** * Gets the number of folds for cross validation * * @return the number of cross validation folds */ public int getCrossVal() { return this.m_CVFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useIBkTipText() { return "Sets whether IBk should be used instead of the majority class."; } /** * Sets whether IBk should be used instead of the majority class * * @param ibk * true if IBk is to be used */ public void setUseIBk(final boolean ibk) { this.m_useIBk = ibk; } /** * Gets whether IBk is being used instead of the majority class * * @return true if IBk is being used */ public boolean getUseIBk() { return this.m_useIBk; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String displayRulesTipText() { return "Sets whether rules are to be printed."; } /** * Sets whether rules are to be printed * * @param rules * true if rules are to be printed */ public void setDisplayRules(final boolean rules) { this.m_displayRules = rules; } /** * Gets whether rules are being printed * * @return true if rules are being printed */ public boolean getDisplayRules() { return this.m_displayRules; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String searchTipText() { return "The search method used to find good attribute combinations for the " + "decision table."; } /** * Sets the search method to use * * @param search */ public void setSearch(final ASSearch search) { this.m_search = search; } /** * Gets the current search method * * @return the search method used */ public ASSearch getSearch() { return this.m_search; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String evaluationMeasureTipText() { return "The measure used to evaluate the performance of attribute combinations " + "used in the decision table."; } /** * Gets the currently set performance evaluation measure used for selecting attributes for the decision table * * @return the performance evaluation measure */ public SelectedTag getEvaluationMeasure() { return new SelectedTag(this.m_evaluationMeasure, TAGS_EVALUATION); } /** * Sets the performance evaluation measure to use for selecting attributes for the decision table * * @param newMethod * the new performance evaluation metric to use */ public void setEvaluationMeasure(final SelectedTag newMethod) { if (newMethod.getTags() == TAGS_EVALUATION) { this.m_evaluationMeasure = newMethod.getSelectedTag().getID(); } } /** * Parses the options for this object. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst) * </pre> * * <pre> * -X &lt;number of folds&gt; * Use cross validation to evaluate features. * Use number of folds = 1 for leave one out CV. * (Default = leave one out CV) * </pre> * * <pre> * -E &lt;acc | rmse | mae | auc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class) * </pre> * * <pre> * -I * Use nearest neighbour instead of global table majority. * </pre> * * <pre> * -R * Display decision table rules. * </pre> * * <pre> * Options specific to search method weka.attributeSelection.BestFirst: * </pre> * * <pre> * -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * </pre> * * <pre> * -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1). * </pre> * * <pre> * -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search. * </pre> * * <pre> * -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String optionString; this.resetOptions(); super.setOptions(options); optionString = Utils.getOption('X', options); if (optionString.length() != 0) { this.m_CVFolds = Integer.parseInt(optionString); } this.m_useIBk = Utils.getFlag('I', options); this.m_displayRules = Utils.getFlag('R', options); optionString = Utils.getOption('E', options); if (optionString.length() != 0) { if (optionString.equals("acc")) { this.setEvaluationMeasure(new SelectedTag(EVAL_ACCURACY, TAGS_EVALUATION)); } else if (optionString.equals("rmse")) { this.setEvaluationMeasure(new SelectedTag(EVAL_RMSE, TAGS_EVALUATION)); } else if (optionString.equals("mae")) { this.setEvaluationMeasure(new SelectedTag(EVAL_MAE, TAGS_EVALUATION)); } else if (optionString.equals("auc")) { this.setEvaluationMeasure(new SelectedTag(EVAL_AUC, TAGS_EVALUATION)); } else { throw new IllegalArgumentException("Invalid evaluation measure"); } } String searchString = Utils.getOption('S', options); if (searchString.length() == 0) { searchString = weka.attributeSelection.BestFirst.class.getName(); } String[] searchSpec = Utils.splitOptions(searchString); if (searchSpec.length == 0) { throw new IllegalArgumentException("Invalid search specification string"); } String searchName = searchSpec[0]; searchSpec[0] = ""; this.setSearch(ASSearch.forName(searchName, searchSpec)); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-X"); options.add("" + this.m_CVFolds); if (this.m_evaluationMeasure != EVAL_DEFAULT) { options.add("-E"); switch (this.m_evaluationMeasure) { case EVAL_ACCURACY: options.add("acc"); break; case EVAL_RMSE: options.add("rmse"); break; case EVAL_MAE: options.add("mae"); break; case EVAL_AUC: options.add("auc"); break; } } if (this.m_useIBk) { options.add("-I"); } if (this.m_displayRules) { options.add("-R"); } options.add("-S"); options.add("" + this.getSearchSpec()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Gets the search specification string, which contains the class name of the search method and any options to it * * @return the search string. */ protected String getSearchSpec() { ASSearch s = this.getSearch(); if (s instanceof OptionHandler) { return s.getClass().getName() + " " + Utils.joinOptions(((OptionHandler) s).getOptions()); } return s.getClass().getName(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); if (this.m_evaluationMeasure != EVAL_ACCURACY && this.m_evaluationMeasure != EVAL_AUC) { result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); } result.enable(Capability.MISSING_CLASS_VALUES); return result; } private class DummySubsetEvaluator extends ASEvaluation implements SubsetEvaluator { /** for serialization */ private static final long serialVersionUID = 3927442457704974150L; @Override public void buildEvaluator(final Instances data) throws Exception { } @Override public double evaluateSubset(final BitSet subset) throws Exception { int fc = 0; for (int jj = 0; jj < DecisionTable.this.m_numAttributes; jj++) { if (subset.get(jj)) { fc++; } } return DecisionTable.this.estimatePerformance(subset, fc); } } /** * Sets up a dummy subset evaluator that basically just delegates evaluation to the estimatePerformance method in DecisionTable */ protected void setUpEvaluator() throws Exception { this.m_evaluator = new DummySubsetEvaluator(); } protected boolean m_saveMemory = true; /** * Generates the classifier. * * @param data * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(final Instances data) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(data); // remove instances with missing class this.m_theInstances = new Instances(data); this.m_theInstances.deleteWithMissingClass(); this.m_rr = new Random(1); if (this.m_theInstances.classAttribute().isNominal()) {// Set up class priors this.m_classPriorCounts = new double[data.classAttribute().numValues()]; Arrays.fill(this.m_classPriorCounts, 1.0); for (int i = 0; i < data.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance curr = data.instance(i); this.m_classPriorCounts[(int) curr.classValue()] += curr.weight(); } this.m_classPriors = this.m_classPriorCounts.clone(); Utils.normalize(this.m_classPriors); } this.setUpEvaluator(); if (this.m_theInstances.classAttribute().isNumeric()) { this.m_disTransform = new weka.filters.unsupervised.attribute.Discretize(); this.m_classIsNominal = false; // use binned discretisation if the class is numeric ((weka.filters.unsupervised.attribute.Discretize) this.m_disTransform).setBins(10); ((weka.filters.unsupervised.attribute.Discretize) this.m_disTransform).setInvertSelection(true); // Discretize all attributes EXCEPT the class String rangeList = ""; rangeList += (this.m_theInstances.classIndex() + 1); ((weka.filters.unsupervised.attribute.Discretize) this.m_disTransform).setAttributeIndices(rangeList); } else { this.m_disTransform = new weka.filters.supervised.attribute.Discretize(); ((weka.filters.supervised.attribute.Discretize) this.m_disTransform).setUseBetterEncoding(true); this.m_classIsNominal = true; } this.m_disTransform.setInputFormat(this.m_theInstances); this.m_theInstances = Filter.useFilter(this.m_theInstances, this.m_disTransform); this.m_numAttributes = this.m_theInstances.numAttributes(); this.m_numInstances = this.m_theInstances.numInstances(); this.m_majority = this.m_theInstances.meanOrMode(this.m_theInstances.classAttribute()); // Perform the search int[] selected = this.m_search.search(this.m_evaluator, this.m_theInstances); this.m_decisionFeatures = new int[selected.length + 1]; System.arraycopy(selected, 0, this.m_decisionFeatures, 0, selected.length); this.m_decisionFeatures[this.m_decisionFeatures.length - 1] = this.m_theInstances.classIndex(); // reduce instances to selected features this.m_delTransform = new Remove(); this.m_delTransform.setInvertSelection(true); // set features to keep this.m_delTransform.setAttributeIndicesArray(this.m_decisionFeatures); this.m_delTransform.setInputFormat(this.m_theInstances); this.m_dtInstances = Filter.useFilter(this.m_theInstances, this.m_delTransform); // reset the number of attributes this.m_numAttributes = this.m_dtInstances.numAttributes(); // create hash table this.m_entries = new Hashtable<>((int) (this.m_dtInstances.numInstances() * 1.5)); // insert instances into the hash table for (int i = 0; i < this.m_numInstances; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = this.m_dtInstances.instance(i); this.insertIntoTable(inst, null); } // Replace the global table majority with nearest neighbour? if (this.m_useIBk) { this.m_ibk = new IBk(); this.m_ibk.buildClassifier(this.m_dtInstances); } // Save memory if (this.m_saveMemory) { this.m_theInstances = new Instances(this.m_theInstances, 0); this.m_dtInstances = new Instances(this.m_dtInstances, 0); } this.m_evaluation = null; } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if distribution can't be computed */ @Override public double[] distributionForInstance(Instance instance) throws Exception { DecisionTableHashKey thekey; double[] tempDist; double[] normDist; this.m_disTransform.input(instance); this.m_disTransform.batchFinished(); instance = this.m_disTransform.output(); this.m_delTransform.input(instance); this.m_delTransform.batchFinished(); instance = this.m_delTransform.output(); thekey = new DecisionTableHashKey(instance, instance.numAttributes(), false); // if this one is not in the table if ((tempDist = this.m_entries.get(thekey)) == null) { if (this.m_useIBk) { tempDist = this.m_ibk.distributionForInstance(instance); } else { if (!this.m_classIsNominal) { tempDist = new double[1]; tempDist[0] = this.m_majority; } else { tempDist = this.m_classPriors.clone(); /* * tempDist = new double [m_theInstances.classAttribute().numValues()]; tempDist[(int)m_majority] = * 1.0; */ } } } else { if (!this.m_classIsNominal) { normDist = new double[1]; normDist[0] = (tempDist[0] / tempDist[1]); tempDist = normDist; } else { // normalise distribution normDist = new double[tempDist.length]; System.arraycopy(tempDist, 0, normDist, 0, tempDist.length); Utils.normalize(normDist); tempDist = normDist; } } return tempDist; } /** * Returns a string description of the features selected * * @return a string of features */ public String printFeatures() { int i; String s = ""; for (i = 0; i < this.m_decisionFeatures.length; i++) { if (i == 0) { s = "" + (this.m_decisionFeatures[i] + 1); } else { s += "," + (this.m_decisionFeatures[i] + 1); } } return s; } /** * Returns the number of rules * * @return the number of rules */ public double measureNumRules() { return this.m_entries.size(); } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<>(1); newVector.addElement("measureNumRules"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) { return this.measureNumRules(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (DecisionTable)"); } } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ @Override public String toString() { if (this.m_entries == null) { return "Decision Table: No model built yet."; } else { StringBuffer text = new StringBuffer(); text.append("Decision Table:" + "\n\nNumber of training instances: " + this.m_numInstances + "\nNumber of Rules : " + this.m_entries.size() + "\n"); if (this.m_useIBk) { text.append("Non matches covered by IB1.\n"); } else { text.append("Non matches covered by Majority class.\n"); } text.append(this.m_search.toString()); /* * text.append("Best first search for feature set,\nterminated after "+ * m_maxStale+" non improving subsets.\n"); */ text.append("Evaluation (for feature selection): CV "); if (this.m_CVFolds > 1) { text.append("(" + this.m_CVFolds + " fold) "); } else { text.append("(leave one out) "); } text.append("\nFeature set: " + this.printFeatures()); if (this.m_displayRules) { // find out the max column width int maxColWidth = 0; for (int i = 0; i < this.m_dtInstances.numAttributes(); i++) { if (this.m_dtInstances.attribute(i).name().length() > maxColWidth) { maxColWidth = this.m_dtInstances.attribute(i).name().length(); } if (this.m_classIsNominal || (i != this.m_dtInstances.classIndex())) { Enumeration<Object> e = this.m_dtInstances.attribute(i).enumerateValues(); while (e.hasMoreElements()) { String ss = (String) e.nextElement(); if (ss.length() > maxColWidth) { maxColWidth = ss.length(); } } } } text.append("\n\nRules:\n"); StringBuffer tm = new StringBuffer(); for (int i = 0; i < this.m_dtInstances.numAttributes(); i++) { if (this.m_dtInstances.classIndex() != i) { int d = maxColWidth - this.m_dtInstances.attribute(i).name().length(); tm.append(this.m_dtInstances.attribute(i).name()); for (int j = 0; j < d + 1; j++) { tm.append(" "); } } } tm.append(this.m_dtInstances.attribute(this.m_dtInstances.classIndex()).name() + " "); for (int i = 0; i < tm.length() + 10; i++) { text.append("="); } text.append("\n"); text.append(tm); text.append("\n"); for (int i = 0; i < tm.length() + 10; i++) { text.append("="); } text.append("\n"); Enumeration<DecisionTableHashKey> e = this.m_entries.keys(); while (e.hasMoreElements()) { DecisionTableHashKey tt = e.nextElement(); text.append(tt.toString(this.m_dtInstances, maxColWidth)); double[] ClassDist = this.m_entries.get(tt); if (this.m_classIsNominal) { int m = Utils.maxIndex(ClassDist); try { text.append(this.m_dtInstances.classAttribute().value(m) + "\n"); } catch (Exception ee) { System.out.println(ee.getMessage()); } } else { text.append((ClassDist[0] / ClassDist[1]) + "\n"); } } for (int i = 0; i < tm.length() + 10; i++) { text.append("="); } text.append("\n"); text.append("\n"); } return text.toString(); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the command-line options */ public static void main(final String[] argv) { runClassifier(new DecisionTable(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/DecisionTableHashKey.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DecisionTableHashKey.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import java.io.Serializable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Class providing hash table keys for DecisionTable */ public class DecisionTableHashKey implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 5674163500154964602L; /** Array of attribute values for an instance */ private double [] attributes; /** True for an index if the corresponding attribute value is missing. */ private boolean [] missing; /** The key */ private int key; /** * Constructor for a hashKey * * @param t an instance from which to generate a key * @param numAtts the number of attributes * @param ignoreClass if true treat the class as a normal attribute * @throws Exception if something goes wrong */ public DecisionTableHashKey(Instance t, int numAtts, boolean ignoreClass) throws Exception { int i; int cindex = t.classIndex(); key = -999; attributes = new double [numAtts]; missing = new boolean [numAtts]; for (i=0;i<numAtts;i++) { if (i == cindex && !ignoreClass) { missing[i] = true; } else { if ((missing[i] = t.isMissing(i)) == false) { attributes[i] = t.value(i); } } } } /** * Convert a hash entry to a string * * @param t the set of instances * @param maxColWidth width to make the fields * @return string representation of the hash entry */ public String toString(Instances t, int maxColWidth) { int i; int cindex = t.classIndex(); StringBuffer text = new StringBuffer(); for (i=0;i<attributes.length;i++) { if (i != cindex) { if (missing[i]) { text.append("?"); for (int j=0;j<maxColWidth;j++) { text.append(" "); } } else { String ss = t.attribute(i).value((int)attributes[i]); StringBuffer sb = new StringBuffer(ss); for (int j=0;j < (maxColWidth-ss.length()+1); j++) { sb.append(" "); } text.append(sb); } } } return text.toString(); } /** * Constructor for a hashKey * * @param t an array of feature values */ public DecisionTableHashKey(double [] t) { int i; int l = t.length; key = -999; attributes = new double [l]; missing = new boolean [l]; for (i=0;i<l;i++) { if (t[i] == Double.MAX_VALUE) { missing[i] = true; } else { missing[i] = false; attributes[i] = t[i]; } } } /** * Calculates a hash code * * @return the hash code as an integer */ public int hashCode() { int hv = 0; if (key != -999) return key; for (int i=0;i<attributes.length;i++) { if (missing[i]) { hv += (i*13); } else { hv += (i * 5 * (attributes[i]+1)); } } if (key == -999) { key = hv; } return hv; } /** * Tests if two instances are equal * * @param b a key to compare with * @return true if both objects are equal */ public boolean equals(Object b) { if ((b == null) || !(b.getClass().equals(this.getClass()))) { return false; } boolean ok = true; boolean l; if (b instanceof DecisionTableHashKey) { DecisionTableHashKey n = (DecisionTableHashKey)b; for (int i=0;i<attributes.length;i++) { l = n.missing[i]; if (missing[i] || l) { if ((missing[i] && !l) || (!missing[i] && l)) { ok = false; break; } } else { if (attributes[i] != n.attributes[i]) { ok = false; break; } } } } else { return false; } return ok; } /** * Prints the hash code */ public void print_hash_code() { System.out.println("Hash val: "+hashCode()); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/JRip.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * JRip.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.rules; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Copyable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.supervised.attribute.ClassOrder; /** * <!-- globalinfo-start --> This class implements a propositional rule learner, Repeated Incremental Pruning to Produce Error Reduction (RIPPER), which was proposed by William W. Cohen as an optimized version of IREP. <br/> * <br/> * The algorithm is briefly described as follows: <br/> * <br/> * Initialize RS = {}, and for each class from the less prevalent one to the more frequent one, DO: <br/> * <br/> * 1. Building stage:<br/> * Repeat 1.1 and 1.2 until the descrition length (DL) of the ruleset and examples is 64 bits greater than the smallest DL met so far, or there are no positive examples, or the error rate &gt;= 50%. <br/> * <br/> * 1.1. Grow phase:<br/> * Grow one rule by greedily adding antecedents (or conditions) to the rule until the rule is perfect (i.e. 100% accurate). The procedure tries every possible value of each attribute and selects the condition with highest information gain: * p(log(p/t)-log(P/T)).<br/> * <br/> * 1.2. Prune phase:<br/> * Incrementally prune each rule and allow the pruning of any final sequences of the antecedents;The pruning metric is (p-n)/(p+n) -- but it's actually 2p/(p+n) -1, so in this implementation we simply use p/(p+n) (actually (p+1)/(p+n+2), * thus if p+n is 0, it's 0.5).<br/> * <br/> * 2. Optimization stage:<br/> * after generating the initial ruleset {Ri}, generate and prune two variants of each rule Ri from randomized data using procedure 1.1 and 1.2. But one variant is generated from an empty rule while the other is generated by greedily adding * antecedents to the original rule. Moreover, the pruning metric used here is (TP+TN)/(P+N).Then the smallest possible DL for each variant and the original rule is computed. The variant with the minimal DL is selected as the final * representative of Ri in the ruleset.After all the rules in {Ri} have been examined and if there are still residual positives, more rules are generated based on the residual positives using Building Stage again. <br/> * 3. Delete the rules from the ruleset that would increase the DL of the whole ruleset if it were in it. and add resultant ruleset to RS. <br/> * ENDDO<br/> * <br/> * Note that there seem to be 2 bugs in the original ripper program that would affect the ruleset size and accuracy slightly. This implementation avoids these bugs and thus is a little bit different from Cohen's original implementation. * Even after fixing the bugs, since the order of classes with the same frequency is not defined in ripper, there still seems to be some trivial difference between this implementation and the original ripper, especially for audiology data * in UCI repository, where there are lots of classes of few instances.<br/> * <br/> * Details please see:<br/> * <br/> * William W. Cohen: Fast Effective Rule Induction. In: Twelfth International Conference on Machine Learning, 115-123, 1995.<br/> * <br/> * PS. We have compared this implementation with the original ripper implementation in aspects of accuracy, ruleset size and running time on both artificial data "ab+bcd+defg" and UCI datasets. In all these aspects it seems to be quite * comparable to the original ripper implementation. However, we didn't consider memory consumption optimization in this implementation.<br/> * <br/> * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Cohen1995, * author = {William W. Cohen}, * booktitle = {Twelfth International Conference on Machine Learning}, * pages = {115-123}, * publisher = {Morgan Kaufmann}, * title = {Fast Effective Rule Induction}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -F &lt;number of folds&gt; * Set number of folds for REP * One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -N &lt;min. weights&gt; * Set the minimal weights of instances * within a split. * (default 2.0) * </pre> * * <pre> * -O &lt;number of runs&gt; * Set the number of runs of * optimizations. (Default: 2) * </pre> * * <pre> * -D * Set whether turn on the * debug mode (Default: false) * </pre> * * <pre> * -S &lt;seed&gt; * The seed of randomization * (Default: 1) * </pre> * * <pre> * -E * Whether NOT check the error rate&gt;=0.5 * in stopping criteria (default: check) * </pre> * * <pre> * -P * Whether NOT use pruning * (default: use pruning) * </pre> * * <!-- options-end --> * * @author Xin Xu (xx5@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class JRip extends AbstractClassifier implements AdditionalMeasureProducer, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6589312996832147161L; /** The limit of description length surplus in ruleset generation */ private static double MAX_DL_SURPLUS = 64.0; /** The class attribute of the data */ private Attribute m_Class; /** The ruleset */ private ArrayList<Rule> m_Ruleset; /** The predicted class distribution */ private ArrayList<double[]> m_Distributions; /** Runs of optimizations */ private int m_Optimizations = 2; /** Random object used in this class */ private Random m_Random = null; /** # of all the possible conditions in a rule */ private double m_Total = 0; /** The seed to perform randomization */ private long m_Seed = 1; /** The number of folds to split data into Grow and Prune for IREP */ private int m_Folds = 3; /** The minimal number of instance weights within a split */ private double m_MinNo = 2.0; /** Whether in a debug mode */ private boolean m_Debug = false; /** Whether check the error rate >= 0.5 in stopping criteria */ private boolean m_CheckErr = true; /** Whether use pruning, i.e. the data is clean or not */ private boolean m_UsePruning = true; /** The filter used to randomize the class order */ private Filter m_Filter = null; /** The RuleStats for the ruleset of each class value */ private ArrayList<RuleStats> m_RulesetStats; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "This class implements a propositional rule learner, Repeated Incremental " + "Pruning to Produce Error Reduction (RIPPER), which was proposed by William " + "W. Cohen as an optimized version of IREP. \n\n" + "The algorithm is briefly described as follows: \n\n" + "Initialize RS = {}, and for each class from the less prevalent one to " + "the more frequent one, DO: \n\n" + "1. Building stage:\nRepeat 1.1 and 1.2 until the descrition length (DL) " + "of the ruleset and examples is 64 bits greater than the smallest DL " + "met so far, or there are no positive examples, or the error rate >= 50%. " + "\n\n" + "1.1. Grow phase:\n" + "Grow one rule by greedily adding antecedents (or conditions) to " + "the rule until the rule is perfect (i.e. 100% accurate). The " + "procedure tries every possible value of each attribute and selects " + "the condition with highest information gain: p(log(p/t)-log(P/T))." + "\n\n" + "1.2. Prune phase:\n" + "Incrementally prune each rule and allow the pruning of any " + "final sequences of the antecedents;" + "The pruning metric is (p-n)/(p+n) -- but it's actually " + "2p/(p+n) -1, so in this implementation we simply use p/(p+n) " + "(actually (p+1)/(p+n+2), thus if p+n is 0, it's 0.5).\n\n" + "2. Optimization stage:\n after generating the initial ruleset {Ri}, " + "generate and prune two variants of each rule Ri from randomized data " + "using procedure 1.1 and 1.2. But one variant is generated from an " + "empty rule while the other is generated by greedily adding antecedents " + "to the original rule. Moreover, the pruning metric used here is " + "(TP+TN)/(P+N)." + "Then the smallest possible DL for each variant and the original rule " + "is computed. The variant with the minimal DL is selected as the final " + "representative of Ri in the ruleset." + "After all the rules in {Ri} have been examined and if there are still " + "residual positives, more rules are generated based on the residual " + "positives using Building Stage again. \n" + "3. Delete the rules from the ruleset that would increase the DL of the " + "whole ruleset if it were in it. and add resultant ruleset to RS. \n" + "ENDDO\n\n" + "Note that there seem to be 2 bugs in the original ripper program that would " + "affect the ruleset size and accuracy slightly. This implementation avoids " + "these bugs and thus is a little bit different from Cohen's original " + "implementation. Even after fixing the bugs, since the order of classes with " + "the same frequency is not defined in ripper, there still seems to be " + "some trivial difference between this implementation and the original ripper, " + "especially for audiology data in UCI repository, where there are lots of " + "classes of few instances.\n\n" + "Details please see:\n\n" + this.getTechnicalInformation().toString() + "\n\n" + "PS. We have compared this implementation with the original ripper " + "implementation in aspects of accuracy, ruleset size and running time " + "on both artificial data \"ab+bcd+defg\" and UCI datasets. In all these " + "aspects it seems to be quite comparable to the original ripper " + "implementation. However, we didn't consider memory consumption " + "optimization in this implementation.\n\n"; } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "William W. Cohen"); result.setValue(Field.TITLE, "Fast Effective Rule Induction"); result.setValue(Field.BOOKTITLE, "Twelfth International Conference on Machine Learning"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.PAGES, "115-123"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); return result; } /** * Returns an enumeration describing the available options Valid options are: * <p> * * -F number <br> * The number of folds for reduced error pruning. One fold is used as the pruning set. (Default: 3) * <p> * * -N number <br> * The minimal weights of instances within a split. (Default: 2) * <p> * * -O number <br> * Set the number of runs of optimizations. (Default: 2) * <p> * * -D <br> * Whether turn on the debug mode * * -S number <br> * The seed of randomization used in Ripper.(Default: 1) * <p> * * -E <br> * Whether NOT check the error rate >= 0.5 in stopping criteria. (default: check) * <p> * * -P <br> * Whether NOT use pruning. (default: use pruning) * <p> * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(7); newVector.add(new Option("\tSet number of folds for REP\n" + "\tOne fold is used as pruning set.\n" + "\t(default 3)", "F", 1, "-F <number of folds>")); newVector.add(new Option("\tSet the minimal weights of instances\n" + "\twithin a split.\n" + "\t(default 2.0)", "N", 1, "-N <min. weights>")); newVector.add(new Option("\tSet the number of runs of\n" + "\toptimizations. (Default: 2)", "O", 1, "-O <number of runs>")); newVector.add(new Option("\tSet whether turn on the\n" + "\tdebug mode (Default: false)", "D", 0, "-D")); newVector.add(new Option("\tThe seed of randomization\n" + "\t(Default: 1)", "S", 1, "-S <seed>")); newVector.add(new Option("\tWhether NOT check the error rate>=0.5\n" + "\tin stopping criteria " + "\t(default: check)", "E", 0, "-E")); newVector.add(new Option("\tWhether NOT use pruning\n" + "\t(default: use pruning)", "P", 0, "-P")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -F &lt;number of folds&gt; * Set number of folds for REP * One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -N &lt;min. weights&gt; * Set the minimal weights of instances * within a split. * (default 2.0) * </pre> * * <pre> * -O &lt;number of runs&gt; * Set the number of runs of * optimizations. (Default: 2) * </pre> * * <pre> * -D * Set whether turn on the * debug mode (Default: false) * </pre> * * <pre> * -S &lt;seed&gt; * The seed of randomization * (Default: 1) * </pre> * * <pre> * -E * Whether NOT check the error rate&gt;=0.5 * in stopping criteria (default: check) * </pre> * * <pre> * -P * Whether NOT use pruning * (default: use pruning) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String numFoldsString = Utils.getOption('F', options); if (numFoldsString.length() != 0) { this.m_Folds = Integer.parseInt(numFoldsString); } else { this.m_Folds = 3; } String minNoString = Utils.getOption('N', options); if (minNoString.length() != 0) { this.m_MinNo = Double.parseDouble(minNoString); } else { this.m_MinNo = 2.0; } String seedString = Utils.getOption('S', options); if (seedString.length() != 0) { this.m_Seed = Long.parseLong(seedString); } else { this.m_Seed = 1; } String runString = Utils.getOption('O', options); if (runString.length() != 0) { this.m_Optimizations = Integer.parseInt(runString); } else { this.m_Optimizations = 2; } this.m_Debug = Utils.getFlag('D', options); this.m_CheckErr = !Utils.getFlag('E', options); this.m_UsePruning = !Utils.getFlag('P', options); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-F"); options.add("" + this.m_Folds); options.add("-N"); options.add("" + this.m_MinNo); options.add("-O"); options.add("" + this.m_Optimizations); options.add("-S"); options.add("" + this.m_Seed); if (this.m_Debug) { options.add("-D"); } if (!this.m_CheckErr) { options.add("-E"); } if (!this.m_UsePruning) { options.add("-P"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<>(1); newVector.add("measureNumRules"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) { return this.m_Ruleset.size(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (RIPPER)"); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String foldsTipText() { return "Determines the amount of data used for pruning. One fold is used for " + "pruning, the rest for growing the rules."; } /** * Sets the number of folds to use * * @param fold * the number of folds */ public void setFolds(final int fold) { this.m_Folds = fold; } /** * Gets the number of folds * * @return the number of folds */ public int getFolds() { return this.m_Folds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minNoTipText() { return "The minimum total weight of the instances in a rule."; } /** * Sets the minimum total weight of the instances in a rule * * @param m * the minimum total weight of the instances in a rule */ public void setMinNo(final double m) { this.m_MinNo = m; } /** * Gets the minimum total weight of the instances in a rule * * @return the minimum total weight of the instances in a rule */ public double getMinNo() { return this.m_MinNo; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String seedTipText() { return "The seed used for randomizing the data."; } /** * Sets the seed value to use in randomizing the data * * @param s * the new seed value */ public void setSeed(final long s) { this.m_Seed = s; } /** * Gets the current seed value to use in randomizing the data * * @return the seed value */ public long getSeed() { return this.m_Seed; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String optimizationsTipText() { return "The number of optimization runs."; } /** * Sets the number of optimization runs * * @param run * the number of optimization runs */ public void setOptimizations(final int run) { this.m_Optimizations = run; } /** * Gets the the number of optimization runs * * @return the number of optimization runs */ public int getOptimizations() { return this.m_Optimizations; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ @Override public String debugTipText() { return "Whether debug information is output to the console."; } /** * Sets whether debug information is output to the console * * @param d * whether debug information is output to the console */ @Override public void setDebug(final boolean d) { this.m_Debug = d; } /** * Gets whether debug information is output to the console * * @return whether debug information is output to the console */ @Override public boolean getDebug() { return this.m_Debug; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String checkErrorRateTipText() { return "Whether check for error rate >= 1/2 is included" + " in stopping criterion."; } /** * Sets whether to check for error rate is in stopping criterion * * @param d * whether to check for error rate is in stopping criterion */ public void setCheckErrorRate(final boolean d) { this.m_CheckErr = d; } /** * Gets whether to check for error rate is in stopping criterion * * @return true if checking for error rate is in stopping criterion */ public boolean getCheckErrorRate() { return this.m_CheckErr; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String usePruningTipText() { return "Whether pruning is performed."; } /** * Sets whether pruning is performed * * @param d * Whether pruning is performed */ public void setUsePruning(final boolean d) { this.m_UsePruning = d; } /** * Gets whether pruning is performed * * @return true if pruning is performed */ public boolean getUsePruning() { return this.m_UsePruning; } /** * Get the ruleset generated by Ripper * * @return the ruleset */ public ArrayList<Rule> getRuleset() { return this.m_Ruleset; } /** * Get the statistics of the ruleset in the given position * * @param pos * the position of the stats, assuming correct * @return the statistics of the ruleset in the given position */ public RuleStats getRuleStats(final int pos) { return this.m_RulesetStats.get(pos); } /** * The single antecedent in the rule, which is composed of an attribute and the corresponding value. There are two inherited classes, namely NumericAntd and NominalAntd in which the attributes are numeric and nominal respectively. */ public abstract class Antd implements WeightedInstancesHandler, Copyable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -8929754772994154334L; /** The attribute of the antecedent */ protected Attribute att; /** * The attribute value of the antecedent. For numeric attribute, value is either 0(1st bag) or 1(2nd bag) */ protected double value; /** * The maximum infoGain achieved by this antecedent test in the growing data */ protected double maxInfoGain; /** The accurate rate of this antecedent test on the growing data */ protected double accuRate; /** The coverage of this antecedent in the growing data */ protected double cover; /** The accurate data for this antecedent in the growing data */ protected double accu; /** * Constructor */ public Antd(final Attribute a) { this.att = a; this.value = Double.NaN; this.maxInfoGain = 0; this.accuRate = Double.NaN; this.cover = Double.NaN; this.accu = Double.NaN; } /* The abstract members for inheritance */ public abstract Instances[] splitData(Instances data, double defAcRt, double cla) throws InterruptedException; public abstract boolean covers(Instance inst); @Override public abstract String toString(); /** * Implements Copyable * * @return a copy of this object */ @Override public abstract Object copy(); /* Get functions of this antecedent */ public Attribute getAttr() { return this.att; } public double getAttrValue() { return this.value; } public double getMaxInfoGain() { return this.maxInfoGain; } public double getAccuRate() { return this.accuRate; } public double getAccu() { return this.accu; } public double getCover() { return this.cover; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * The antecedent with numeric attribute */ public class NumericAntd extends Antd { /** for serialization */ static final long serialVersionUID = 5699457269983735442L; /** The split point for this numeric antecedent */ private double splitPoint; /** * Constructor */ public NumericAntd(final Attribute a) { super(a); this.splitPoint = Double.NaN; } /** * Get split point of this numeric antecedent * * @return the split point of this numeric antecedent */ public double getSplitPoint() { return this.splitPoint; } /** * Implements Copyable * * @return a copy of this object */ @Override public Object copy() { NumericAntd na = new NumericAntd(this.getAttr()); na.value = this.value; na.splitPoint = this.splitPoint; return na; } /** * Implements the splitData function. This procedure is to split the data into two bags according to the information gain of the numeric attribute value The maximum infoGain is also calculated. * * @param insts * the data to be split * @param defAcRt * the default accuracy rate for data * @param cl * the class label to be predicted * @return the array of data after split * @throws InterruptedException */ @Override public Instances[] splitData(final Instances insts, final double defAcRt, final double cl) throws InterruptedException { Instances data = insts; int total = data.numInstances();// Total number of instances without // missing value for att int split = 1; // Current split position int prev = 0; // Previous split position int finalSplit = split; // Final split position this.maxInfoGain = 0; this.value = 0; double fstCover = 0, sndCover = 0, fstAccu = 0, sndAccu = 0; data.sort(this.att); // Find the las instance without missing value for (int x = 0; x < data.numInstances(); x++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } Instance inst = data.instance(x); if (inst.isMissing(this.att)) { total = x; break; } sndCover += inst.weight(); if (Utils.eq(inst.classValue(), cl)) { sndAccu += inst.weight(); } } if (total == 0) { return null; // Data all missing for the attribute } this.splitPoint = data.instance(total - 1).value(this.att); for (; split <= total; split++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if ((split == total) || (data.instance(split).value(this.att) > // Can't // split // within data.instance(prev).value(this.att))) { // same value for (int y = prev; y < split; y++) { Instance inst = data.instance(y); fstCover += inst.weight(); if (Utils.eq(data.instance(y).classValue(), cl)) { fstAccu += inst.weight(); // First bag positive# ++ } } double fstAccuRate = (fstAccu + 1.0) / (fstCover + 1.0), sndAccuRate = (sndAccu + 1.0) / (sndCover + 1.0); /* Which bag has higher information gain? */ boolean isFirst; double fstInfoGain, sndInfoGain; double accRate, infoGain, coverage, accurate; fstInfoGain = // Utils.eq(defAcRt, 1.0) ? // fstAccu/(double)numConds : fstAccu * (Utils.log2(fstAccuRate) - Utils.log2(defAcRt)); sndInfoGain = // Utils.eq(defAcRt, 1.0) ? // sndAccu/(double)numConds : sndAccu * (Utils.log2(sndAccuRate) - Utils.log2(defAcRt)); if (fstInfoGain > sndInfoGain) { isFirst = true; infoGain = fstInfoGain; accRate = fstAccuRate; accurate = fstAccu; coverage = fstCover; } else { isFirst = false; infoGain = sndInfoGain; accRate = sndAccuRate; accurate = sndAccu; coverage = sndCover; } /* Check whether so far the max infoGain */ if (infoGain > this.maxInfoGain) { this.splitPoint = data.instance(prev).value(this.att); this.value = (isFirst) ? 0 : 1; this.accuRate = accRate; this.accu = accurate; this.cover = coverage; this.maxInfoGain = infoGain; finalSplit = (isFirst) ? split : prev; } for (int y = prev; y < split; y++) { Instance inst = data.instance(y); sndCover -= inst.weight(); if (Utils.eq(data.instance(y).classValue(), cl)) { sndAccu -= inst.weight(); // Second bag positive# -- } } prev = split; } } /* Split the data */ Instances[] splitData = new Instances[2]; splitData[0] = new Instances(data, 0, finalSplit); splitData[1] = new Instances(data, finalSplit, total - finalSplit); return splitData; } /** * Whether the instance is covered by this antecedent * * @param inst * the instance in question * @return the boolean value indicating whether the instance is covered by this antecedent */ @Override public boolean covers(final Instance inst) { boolean isCover = true; if (!inst.isMissing(this.att)) { if ((int) this.value == 0) { // First bag if (inst.value(this.att) > this.splitPoint) { isCover = false; } } else if (inst.value(this.att) < this.splitPoint) { isCover = false; } } else { isCover = false; } return isCover; } /** * Prints this antecedent * * @return a textual description of this antecedent */ @Override public String toString() { String symbol = ((int) this.value == 0) ? " <= " : " >= "; return (this.att.name() + symbol + Utils.doubleToString(this.splitPoint, 6)); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * The antecedent with nominal attribute */ public class NominalAntd extends Antd { /** for serialization */ static final long serialVersionUID = -9102297038837585135L; /* * The parameters of infoGain calculated for each attribute value in the growing data */ private final double[] accurate; private final double[] coverage; /** * Constructor */ public NominalAntd(final Attribute a) { super(a); int bag = this.att.numValues(); this.accurate = new double[bag]; this.coverage = new double[bag]; } /** * Implements Copyable * * @return a copy of this object */ @Override public Object copy() { Antd antec = new NominalAntd(this.getAttr()); antec.value = this.value; return antec; } /** * Implements the splitData function. This procedure is to split the data into bags according to the nominal attribute value The infoGain for each bag is also calculated. * * @param data * the data to be split * @param defAcRt * the default accuracy rate for data * @param cl * the class label to be predicted * @return the array of data after split */ @Override public Instances[] splitData(final Instances data, final double defAcRt, final double cl) { int bag = this.att.numValues(); Instances[] splitData = new Instances[bag]; for (int x = 0; x < bag; x++) { splitData[x] = new Instances(data, data.numInstances()); this.accurate[x] = 0; this.coverage[x] = 0; } for (int x = 0; x < data.numInstances(); x++) { Instance inst = data.instance(x); if (!inst.isMissing(this.att)) { int v = (int) inst.value(this.att); splitData[v].add(inst); this.coverage[v] += inst.weight(); if ((int) inst.classValue() == (int) cl) { this.accurate[v] += inst.weight(); } } } for (int x = 0; x < bag; x++) { double t = this.coverage[x] + 1.0; double p = this.accurate[x] + 1.0; double infoGain = // Utils.eq(defAcRt, 1.0) ? // accurate[x]/(double)numConds : this.accurate[x] * (Utils.log2(p / t) - Utils.log2(defAcRt)); if (infoGain > this.maxInfoGain) { this.maxInfoGain = infoGain; this.cover = this.coverage[x]; this.accu = this.accurate[x]; this.accuRate = p / t; this.value = x; } } return splitData; } /** * Whether the instance is covered by this antecedent * * @param inst * the instance in question * @return the boolean value indicating whether the instance is covered by this antecedent */ @Override public boolean covers(final Instance inst) { boolean isCover = false; if (!inst.isMissing(this.att)) { if ((int) inst.value(this.att) == (int) this.value) { isCover = true; } } return isCover; } /** * Prints this antecedent * * @return a textual description of this antecedent */ @Override public String toString() { return (this.att.name() + " = " + this.att.value((int) this.value)); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * This class implements a single rule that predicts specified class. * * A rule consists of antecedents "AND"ed together and the consequent (class value) for the classification. In this class, the Information Gain (p*[log(p/t) - log(P/T)]) is used to select an antecedent and Reduced Error Prunning (REP) * with the metric of accuracy rate p/(p+n) or (TP+TN)/(P+N) is used to prune the rule. */ public class RipperRule extends Rule { /** for serialization */ static final long serialVersionUID = -2410020717305262952L; /** The internal representation of the class label to be predicted */ private double m_Consequent = -1; /** The vector of antecedents of this rule */ protected ArrayList<Antd> m_Antds = null; /** Constructor */ public RipperRule() { this.m_Antds = new ArrayList<>(); } /** * Removes redundant tests in the rule. * * @param data * an instance object that contains the appropriate header information for the attributes. */ public void cleanUp(final Instances data) { double[] mins = new double[data.numAttributes()]; double[] maxs = new double[data.numAttributes()]; for (int i = 0; i < data.numAttributes(); i++) { mins[i] = Double.MAX_VALUE; maxs[i] = -Double.MAX_VALUE; } for (int i = this.m_Antds.size() - 1; i >= 0; i--) { Attribute att = this.m_Antds.get(i).getAttr(); if (att.isNumeric()) { double splitPoint = ((NumericAntd) this.m_Antds.get(i)).getSplitPoint(); if (this.m_Antds.get(i).getAttrValue() == 0) { if (splitPoint < mins[att.index()]) { mins[att.index()] = splitPoint; } else { this.m_Antds.remove(i); } } else { if (splitPoint > maxs[att.index()]) { maxs[att.index()] = splitPoint; } else { this.m_Antds.remove(i); } } } } } /** * Sets the internal representation of the class label to be predicted * * @param cl * the internal representation of the class label to be predicted */ public void setConsequent(final double cl) { this.m_Consequent = cl; } /** * Gets the internal representation of the class label to be predicted * * @return the internal representation of the class label to be predicted */ @Override public double getConsequent() { return this.m_Consequent; } /** * Get a shallow copy of this rule * * @return the copy */ @Override public Object copy() { RipperRule copy = new RipperRule(); copy.setConsequent(this.getConsequent()); copy.m_Antds = new ArrayList<>(this.m_Antds.size()); for (Antd a : this.m_Antds) { copy.m_Antds.add((Antd) a.copy()); } return copy; } /** * Whether the instance covered by this rule * * @param datum * the instance in question * @return the boolean value indicating whether the instance is covered by this rule */ @Override public boolean covers(final Instance datum) { boolean isCover = true; for (int i = 0; i < this.m_Antds.size(); i++) { Antd antd = this.m_Antds.get(i); if (!antd.covers(datum)) { isCover = false; break; } } return isCover; } /** * Whether this rule has antecedents, i.e. whether it is a default rule * * @return the boolean value indicating whether the rule has antecedents */ @Override public boolean hasAntds() { if (this.m_Antds == null) { return false; } else { return (this.m_Antds.size() > 0); } } /** * Return the antecedents * * @return the vector of antecedents */ public ArrayList<Antd> getAntds() { return this.m_Antds; } /** * the number of antecedents of the rule * * @return the size of this rule */ @Override public double size() { return this.m_Antds.size(); } /** * Private function to compute default number of accurate instances in the specified data for the consequent of the rule * * @param data * the data in question * @return the default accuracy number */ private double computeDefAccu(final Instances data) { double defAccu = 0; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); if ((int) inst.classValue() == (int) this.m_Consequent) { defAccu += inst.weight(); } } return defAccu; } /** * Build one rule using the growing data * * @param data * the growing data used to build the rule * @throws Exception * if the consequent is not set yet */ @Override public void grow(final Instances data) throws Exception { if (this.m_Consequent == -1) { throw new Exception(" Consequent not set yet."); } Instances growData = data; double sumOfWeights = growData.sumOfWeights(); if (!Utils.gr(sumOfWeights, 0.0)) { return; } /* Compute the default accurate rate of the growing data */ double defAccu = this.computeDefAccu(growData); double defAcRt = (defAccu + 1.0) / (sumOfWeights + 1.0); /* Keep the record of which attributes have already been used */ boolean[] used = new boolean[growData.numAttributes()]; for (int k = 0; k < used.length; k++) { used[k] = false; } int numUnused = used.length; // If there are already antecedents existing for (int j = 0; j < this.m_Antds.size(); j++) { Antd antdj = this.m_Antds.get(j); if (!antdj.getAttr().isNumeric()) { used[antdj.getAttr().index()] = true; numUnused--; } } double maxInfoGain; while (Utils.gr(growData.numInstances(), 0.0) && (numUnused > 0) && Utils.sm(defAcRt, 1.0)) { // We require that infoGain be positive /* * if(numAntds == originalSize) maxInfoGain = 0.0; // At least one condition allowed else * maxInfoGain = Utils.eq(defAcRt, 1.0) ? defAccu/(double)numAntds : 0.0; */ maxInfoGain = 0.0; /* Build a list of antecedents */ Antd oneAntd = null; Instances coverData = null; Enumeration<Attribute> enumAttr = growData.enumerateAttributes(); /* Build one condition based on all attributes not used yet */ while (enumAttr.hasMoreElements()) { Attribute att = (enumAttr.nextElement()); if (JRip.this.m_Debug) { System.err.println("\nOne condition: size = " + growData.sumOfWeights()); } Antd antd = null; if (att.isNumeric()) { antd = new NumericAntd(att); } else { antd = new NominalAntd(att); } if (!used[att.index()]) { /* * Compute the best information gain for each attribute, it's stored in the antecedent formed by * this attribute. This procedure returns the data covered by the antecedent */ Instances coveredData = this.computeInfoGain(growData, defAcRt, antd); if (coveredData != null) { double infoGain = antd.getMaxInfoGain(); if (JRip.this.m_Debug) { System.err.println("Test of \'" + antd.toString() + "\': infoGain = " + infoGain + " | Accuracy = " + antd.getAccuRate() + "=" + antd.getAccu() + "/" + antd.getCover() + " def. accuracy: " + defAcRt); } if (infoGain > maxInfoGain) { oneAntd = antd; coverData = coveredData; maxInfoGain = infoGain; } } } } if (oneAntd == null) { break; // Cannot find antds } if (Utils.sm(oneAntd.getAccu(), JRip.this.m_MinNo)) { break;// Too low coverage } // Numeric attributes can be used more than once if (!oneAntd.getAttr().isNumeric()) { used[oneAntd.getAttr().index()] = true; numUnused--; } this.m_Antds.add(oneAntd); growData = coverData;// Grow data size is shrinking defAcRt = oneAntd.getAccuRate(); } } /** * Compute the best information gain for the specified antecedent * * @param instances * the data based on which the infoGain is computed * @param defAcRt * the default accuracy rate of data * @param antd * the specific antecedent * @return the data covered by the antecedent * @throws InterruptedException */ private Instances computeInfoGain(final Instances instances, final double defAcRt, final Antd antd) throws InterruptedException { Instances data = instances; /* * Split the data into bags. The information gain of each bag is also calculated in this procedure */ Instances[] splitData = antd.splitData(data, defAcRt, this.m_Consequent); /* Get the bag of data to be used for next antecedents */ if (splitData != null) { return splitData[(int) antd.getAttrValue()]; } else { return null; } } /** * Prune all the possible final sequences of the rule using the pruning data. The measure used to prune the rule is based on flag given. * * @param pruneData * the pruning data used to prune the rule * @param useWhole * flag to indicate whether use the error rate of the whole pruning data instead of the data covered */ public void prune(final Instances pruneData, final boolean useWhole) { Instances data = pruneData; double total = data.sumOfWeights(); if (!Utils.gr(total, 0.0)) { return; } /* The default accurate # and rate on pruning data */ double defAccu = this.computeDefAccu(data); if (JRip.this.m_Debug) { System.err.println("Pruning with " + defAccu + " positive data out of " + total + " instances"); } int size = this.m_Antds.size(); if (size == 0) { return; // Default rule before pruning } double[] worthRt = new double[size]; double[] coverage = new double[size]; double[] worthValue = new double[size]; for (int w = 0; w < size; w++) { worthRt[w] = coverage[w] = worthValue[w] = 0.0; } /* Calculate accuracy parameters for all the antecedents in this rule */ double tn = 0.0; // True negative if useWhole for (int x = 0; x < size; x++) { Antd antd = this.m_Antds.get(x); Instances newData = data; data = new Instances(newData, 0); // Make data empty for (int y = 0; y < newData.numInstances(); y++) { Instance ins = newData.instance(y); if (antd.covers(ins)) { // Covered by this antecedent coverage[x] += ins.weight(); data.add(ins); // Add to data for further pruning if ((int) ins.classValue() == (int) this.m_Consequent) { worthValue[x] += ins.weight(); } } else if (useWhole) { // Not covered if ((int) ins.classValue() != (int) this.m_Consequent) { tn += ins.weight(); } } } if (useWhole) { worthValue[x] += tn; worthRt[x] = worthValue[x] / total; } else { worthRt[x] = (worthValue[x] + 1.0) / (coverage[x] + 2.0); } } double maxValue = (defAccu + 1.0) / (total + 2.0); int maxIndex = -1; for (int i = 0; i < worthValue.length; i++) { if (JRip.this.m_Debug) { double denom = useWhole ? total : coverage[i]; System.err.println(i + "(useAccuray? " + !useWhole + "): " + worthRt[i] + "=" + worthValue[i] + "/" + denom); } if (worthRt[i] > maxValue) { // Prefer to the maxValue = worthRt[i]; // shorter rule maxIndex = i; } } /* Prune the antecedents according to the accuracy parameters */ for (int z = size - 1; z > maxIndex; z--) { this.m_Antds.remove(z); } } /** * Prints this rule * * @param classAttr * the class attribute in the data * @return a textual description of this rule */ public String toString(final Attribute classAttr) { StringBuffer text = new StringBuffer(); if (this.m_Antds.size() > 0) { for (int j = 0; j < (this.m_Antds.size() - 1); j++) { text.append("(" + (this.m_Antds.get(j)).toString() + ") and "); } text.append("(" + (this.m_Antds.get(this.m_Antds.size() - 1)).toString() + ")"); } text.append(" => " + classAttr.name() + "=" + classAttr.value((int) this.m_Consequent)); return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(this.m_Folds); return result; } /** * Builds Ripper in the order of class frequencies. For each class it's built in two stages: building and optimization * * @param instances * the training data * @throws Exception * if classifier can't be built successfully */ @Override public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); this.m_Random = instances.getRandomNumberGenerator(this.m_Seed); this.m_Total = RuleStats.numAllConditions(instances); if (this.m_Debug) { System.err.println("Number of all possible conditions = " + this.m_Total); } Instances data = null; this.m_Filter = new ClassOrder(); ((ClassOrder) this.m_Filter).setSeed(this.m_Random.nextInt()); ((ClassOrder) this.m_Filter).setClassOrder(ClassOrder.FREQ_ASCEND); this.m_Filter.setInputFormat(instances); data = Filter.useFilter(instances, this.m_Filter); if (data == null) { throw new Exception(" Unable to randomize the class orders."); } this.m_Class = data.classAttribute(); this.m_Ruleset = new ArrayList<>(); this.m_RulesetStats = new ArrayList<>(); this.m_Distributions = new ArrayList<>(); // Sort by classes frequency double[] orderedClasses = ((ClassOrder) this.m_Filter).getClassCounts(); if (this.m_Debug) { System.err.println("Sorted classes:"); for (int x = 0; x < this.m_Class.numValues(); x++) { System.err.println(x + ": " + this.m_Class.value(x) + " has " + orderedClasses[x] + " instances."); } } // Iterate from less prevalent class to more frequent one oneClass: for (int y = 0; y < data.numClasses() - 1; y++) { // For each // class double classIndex = y; if (this.m_Debug) { int ci = (int) classIndex; System.err.println("\n\nClass " + this.m_Class.value(ci) + "(" + ci + "): " + orderedClasses[y] + "instances\n" + "=====================================\n"); } if (Utils.eq(orderedClasses[y], 0.0)) { continue oneClass; } // The expected FP/err is the proportion of the class double all = 0; for (int i = y; i < orderedClasses.length; i++) { all += orderedClasses[i]; } double expFPRate = orderedClasses[y] / all; double classYWeights = 0, totalWeights = 0; for (int j = 0; j < data.numInstances(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance datum = data.instance(j); totalWeights += datum.weight(); if ((int) datum.classValue() == y) { classYWeights += datum.weight(); } } // DL of default rule, no theory DL, only data DL double defDL; if (classYWeights > 0) { defDL = RuleStats.dataDL(expFPRate, 0.0, totalWeights, 0.0, classYWeights); } else { continue oneClass; // Subsumed by previous rules } if (Double.isNaN(defDL) || Double.isInfinite(defDL)) { throw new Exception("Should never happen: " + "defDL NaN or infinite!"); } if (this.m_Debug) { System.err.println("The default DL = " + defDL); } data = this.rulesetForOneClass(expFPRate, data, classIndex, defDL); } // Remove redundant numeric tests from the rules for (Rule rule : this.m_Ruleset) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } ((RipperRule) rule).cleanUp(data); } // Set the default rule RipperRule defRule = new RipperRule(); defRule.setConsequent(data.numClasses() - 1); this.m_Ruleset.add(defRule); RuleStats defRuleStat = new RuleStats(); defRuleStat.setData(data); defRuleStat.setNumAllConds(this.m_Total); defRuleStat.addAndUpdate(defRule); this.m_RulesetStats.add(defRuleStat); for (int z = 0; z < this.m_RulesetStats.size(); z++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } RuleStats oneClass = this.m_RulesetStats.get(z); for (int xyz = 0; xyz < oneClass.getRulesetSize(); xyz++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double[] classDist = oneClass.getDistributions(xyz); Utils.normalize(classDist); if (classDist != null) { this.m_Distributions.add(((ClassOrder) this.m_Filter).distributionsByOriginalIndex(classDist)); } } } // free up memory for (int i = 0; i < this.m_RulesetStats.size(); i++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } (this.m_RulesetStats.get(i)).cleanUp(); } } /** * Classify the test instance with the rule learner and provide the class distributions * * @param datum * the instance to be classified * @return the distribution */ @Override public double[] distributionForInstance(final Instance datum) { try { for (int i = 0; i < this.m_Ruleset.size(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Rule rule = this.m_Ruleset.get(i); if (rule.covers(datum)) { return this.m_Distributions.get(i); } } } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } System.err.println("Should never happen!"); return new double[datum.classAttribute().numValues()]; } /** * Build a ruleset for the given class according to the given data * * @param expFPRate * the expected FP/(FP+FN) used in DL calculation * @param data * the given data * @param classIndex * the given class index * @param defDL * the default DL in the data * @throws Exception * if the ruleset can be built properly */ protected Instances rulesetForOneClass(final double expFPRate, final Instances data, final double classIndex, final double defDL) throws Exception { Instances newData = data, growData, pruneData; boolean stop = false; ArrayList<Rule> ruleset = new ArrayList<>(); double dl = defDL, minDL = defDL; RuleStats rstats = null; double[] rst; // Check whether data have positive examples boolean defHasPositive = true; // No longer used boolean hasPositive = defHasPositive; /********************** Building stage ***********************/ if (this.m_Debug) { System.err.println("\n*** Building stage ***"); } while ((!stop) && hasPositive) { // Generate new rules until if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } // stopping criteria met RipperRule oneRule; if (this.m_UsePruning) { /* Split data into Grow and Prune */ // We should have stratified the data, but ripper seems // to have a bug that makes it not to do so. In order // to simulate it more precisely, we do the same thing. // newData.randomize(m_Random); newData = RuleStats.stratify(newData, this.m_Folds, this.m_Random); Instances[] part = RuleStats.partition(newData, this.m_Folds); growData = part[0]; pruneData = part[1]; // growData=newData.trainCV(m_Folds, m_Folds-1); // pruneData=newData.testCV(m_Folds, m_Folds-1); oneRule = new RipperRule(); oneRule.setConsequent(classIndex); // Must set first if (this.m_Debug) { System.err.println("\nGrowing a rule ..."); } oneRule.grow(growData); // Build the rule if (this.m_Debug) { System.err.println("One rule found before pruning:" + oneRule.toString(this.m_Class)); } if (this.m_Debug) { System.err.println("\nPruning the rule ..."); } oneRule.prune(pruneData, false); // Prune the rule if (this.m_Debug) { System.err.println("One rule found after pruning:" + oneRule.toString(this.m_Class)); } } else { oneRule = new RipperRule(); oneRule.setConsequent(classIndex); // Must set first if (this.m_Debug) { System.err.println("\nNo pruning: growing a rule ..."); } oneRule.grow(newData); // Build the rule if (this.m_Debug) { System.err.println("No pruning: one rule found:\n" + oneRule.toString(this.m_Class)); } } // Compute the DL of this ruleset if (rstats == null) { // First rule rstats = new RuleStats(); rstats.setNumAllConds(this.m_Total); rstats.setData(newData); } rstats.addAndUpdate(oneRule); int last = rstats.getRuleset().size() - 1; // Index of last rule dl += rstats.relativeDL(last, expFPRate, this.m_CheckErr); if (Double.isNaN(dl) || Double.isInfinite(dl)) { throw new Exception("Should never happen: dl in " + "building stage NaN or infinite!"); } if (this.m_Debug) { System.err.println("Before optimization(" + last + "): the dl = " + dl + " | best: " + minDL); } if (dl < minDL) { minDL = dl; // The best dl so far } rst = rstats.getSimpleStats(last); if (this.m_Debug) { System.err.println("The rule covers: " + rst[0] + " | pos = " + rst[2] + " | neg = " + rst[4] + "\nThe rule doesn't cover: " + rst[1] + " | pos = " + rst[5]); } stop = this.checkStop(rst, minDL, dl); if (!stop) { ruleset.add(oneRule); // Accepted newData = rstats.getFiltered(last)[1];// Data not covered hasPositive = Utils.gr(rst[5], 0.0); // Positives remaining? if (this.m_Debug) { System.err.println("One rule added: has positive? " + hasPositive); } } else { if (this.m_Debug) { System.err.println("Quit rule"); } rstats.removeLast(); // Remove last to be re-used } } // while !stop /******************** Optimization stage *******************/ RuleStats finalRulesetStat = null; if (this.m_UsePruning) { for (int z = 0; z < this.m_Optimizations; z++) { if (this.m_Debug) { System.err.println("\n*** Optimization: run #" + z + " ***"); } newData = data; finalRulesetStat = new RuleStats(); finalRulesetStat.setData(newData); finalRulesetStat.setNumAllConds(this.m_Total); int position = 0; stop = false; boolean isResidual = false; hasPositive = defHasPositive; dl = minDL = defDL; oneRule: while (!stop && hasPositive) { isResidual = (position >= ruleset.size()); // Cover residual positive // examples // Re-do shuffling and stratification // newData.randomize(m_Random); newData = RuleStats.stratify(newData, this.m_Folds, this.m_Random); Instances[] part = RuleStats.partition(newData, this.m_Folds); growData = part[0]; pruneData = part[1]; // growData=newData.trainCV(m_Folds, m_Folds-1); // pruneData=newData.testCV(m_Folds, m_Folds-1); RipperRule finalRule; if (this.m_Debug) { System.err.println("\nRule #" + position + "| isResidual?" + isResidual + "| data size: " + newData.sumOfWeights()); } if (isResidual) { RipperRule newRule = new RipperRule(); newRule.setConsequent(classIndex); if (this.m_Debug) { System.err.println("\nGrowing and pruning" + " a new rule ..."); } newRule.grow(growData); newRule.prune(pruneData, false); finalRule = newRule; if (this.m_Debug) { System.err.println("\nNew rule found: " + newRule.toString(this.m_Class)); } } else { RipperRule oldRule = (RipperRule) ruleset.get(position); boolean covers = false; // Test coverage of the next old rule for (int i = 0; i < newData.numInstances(); i++) { if (oldRule.covers(newData.instance(i))) { covers = true; break; } } if (!covers) {// Null coverage, no variants can be generated finalRulesetStat.addAndUpdate(oldRule); position++; continue oneRule; } // 2 variants if (this.m_Debug) { System.err.println("\nGrowing and pruning" + " Replace ..."); } RipperRule replace = new RipperRule(); replace.setConsequent(classIndex); replace.grow(growData); // Remove the pruning data covered by the following // rules, then simply compute the error rate of the // current rule to prune it. According to Ripper, // it's equivalent to computing the error of the // whole ruleset -- is it true? pruneData = RuleStats.rmCoveredBySuccessives(pruneData, ruleset, position); replace.prune(pruneData, true); if (this.m_Debug) { System.err.println("\nGrowing and pruning" + " Revision ..."); } RipperRule revision = (RipperRule) oldRule.copy(); // For revision, first rm the data covered by the old rule Instances newGrowData = new Instances(growData, 0); for (int b = 0; b < growData.numInstances(); b++) { Instance inst = growData.instance(b); if (revision.covers(inst)) { newGrowData.add(inst); } } revision.grow(newGrowData); revision.prune(pruneData, true); double[][] prevRuleStats = new double[position][6]; for (int c = 0; c < position; c++) { prevRuleStats[c] = finalRulesetStat.getSimpleStats(c); } // Now compare the relative DL of variants ArrayList<Rule> tempRules = new ArrayList<>(ruleset.size()); for (Rule r : ruleset) { tempRules.add((Rule) r.copy()); } tempRules.set(position, replace); RuleStats repStat = new RuleStats(data, tempRules); repStat.setNumAllConds(this.m_Total); repStat.countData(position, newData, prevRuleStats); // repStat.countData(); rst = repStat.getSimpleStats(position); if (this.m_Debug) { System.err.println("Replace rule covers: " + rst[0] + " | pos = " + rst[2] + " | neg = " + rst[4] + "\nThe rule doesn't cover: " + rst[1] + " | pos = " + rst[5]); } double repDL = repStat.relativeDL(position, expFPRate, this.m_CheckErr); if (this.m_Debug) { System.err.println("\nReplace: " + replace.toString(this.m_Class) + " |dl = " + repDL); } if (Double.isNaN(repDL) || Double.isInfinite(repDL)) { throw new Exception("Should never happen: repDL" + "in optmz. stage NaN or " + "infinite!"); } tempRules.set(position, revision); RuleStats revStat = new RuleStats(data, tempRules); revStat.setNumAllConds(this.m_Total); revStat.countData(position, newData, prevRuleStats); // revStat.countData(); double revDL = revStat.relativeDL(position, expFPRate, this.m_CheckErr); if (this.m_Debug) { System.err.println("Revision: " + revision.toString(this.m_Class) + " |dl = " + revDL); } if (Double.isNaN(revDL) || Double.isInfinite(revDL)) { throw new Exception("Should never happen: revDL" + "in optmz. stage NaN or " + "infinite!"); } rstats = new RuleStats(data, ruleset); rstats.setNumAllConds(this.m_Total); rstats.countData(position, newData, prevRuleStats); // rstats.countData(); double oldDL = rstats.relativeDL(position, expFPRate, this.m_CheckErr); if (Double.isNaN(oldDL) || Double.isInfinite(oldDL)) { throw new Exception("Should never happen: oldDL" + "in optmz. stage NaN or " + "infinite!"); } if (this.m_Debug) { System.err.println("Old rule: " + oldRule.toString(this.m_Class) + " |dl = " + oldDL); } if (this.m_Debug) { System.err.println("\nrepDL: " + repDL + "\nrevDL: " + revDL + "\noldDL: " + oldDL); } if ((oldDL <= revDL) && (oldDL <= repDL)) { finalRule = oldRule; // Old the best } else if (revDL <= repDL) { finalRule = revision; // Revision the best } else { finalRule = replace; // Replace the best } } finalRulesetStat.addAndUpdate(finalRule); rst = finalRulesetStat.getSimpleStats(position); if (isResidual) { dl += finalRulesetStat.relativeDL(position, expFPRate, this.m_CheckErr); if (this.m_Debug) { System.err.println("After optimization: the dl" + "=" + dl + " | best: " + minDL); } if (dl < minDL) { minDL = dl; // The best dl so far } stop = this.checkStop(rst, minDL, dl); if (!stop) { ruleset.add(finalRule); // Accepted } else { finalRulesetStat.removeLast(); // Remove last to be re-used position--; } } else { ruleset.set(position, finalRule); // Accepted } if (this.m_Debug) { System.err.println("The rule covers: " + rst[0] + " | pos = " + rst[2] + " | neg = " + rst[4] + "\nThe rule doesn't cover: " + rst[1] + " | pos = " + rst[5]); System.err.println("\nRuleset so far: "); for (int x = 0; x < ruleset.size(); x++) { System.err.println(x + ": " + ((RipperRule) ruleset.get(x)).toString(this.m_Class)); } System.err.println(); } // Data not covered if (finalRulesetStat.getRulesetSize() > 0) { newData = finalRulesetStat.getFiltered(position)[1]; } hasPositive = Utils.gr(rst[5], 0.0); // Positives remaining? position++; } // while !stop && hasPositive if (ruleset.size() > (position + 1)) { // Hasn't gone through yet for (int k = position + 1; k < ruleset.size(); k++) { finalRulesetStat.addAndUpdate(ruleset.get(k)); } } if (this.m_Debug) { System.err.println("\nDeleting rules to decrease" + " DL of the whole ruleset ..."); } finalRulesetStat.reduceDL(expFPRate, this.m_CheckErr); if (this.m_Debug) { int del = ruleset.size() - finalRulesetStat.getRulesetSize(); System.err.println(del + " rules are deleted" + " after DL reduction procedure"); } ruleset = finalRulesetStat.getRuleset(); rstats = finalRulesetStat; } // For each run of optimization } // if pruning is used // Concatenate the ruleset for this class to the whole ruleset if (this.m_Debug) { System.err.println("\nFinal ruleset: "); for (int x = 0; x < ruleset.size(); x++) { System.err.println(x + ": " + ((RipperRule) ruleset.get(x)).toString(this.m_Class)); } System.err.println(); } this.m_Ruleset.addAll(ruleset); this.m_RulesetStats.add(rstats); if (ruleset.size() > 0) { return rstats.getFiltered(ruleset.size() - 1)[1]; // Data not } else { return data; } } /** * Check whether the stopping criterion meets * * @param rst * the statistic of the ruleset * @param minDL * the min description length so far * @param dl * the current description length of the ruleset * @return true if stop criterion meets, false otherwise */ private boolean checkStop(final double[] rst, final double minDL, final double dl) { if (dl > minDL + MAX_DL_SURPLUS) { if (this.m_Debug) { System.err.println("DL too large: " + dl + " | " + minDL); } return true; } else if (!Utils.gr(rst[2], 0.0)) {// Covered positives if (this.m_Debug) { System.err.println("Too few positives."); } return true; } else if ((rst[4] / rst[0]) >= 0.5) {// Err rate if (this.m_CheckErr) { if (this.m_Debug) { System.err.println("Error too large: " + rst[4] + "/" + rst[0]); } return true; } else { return false; } } else {// Not stops if (this.m_Debug) { System.err.println("Continue."); } return false; } } /** * Prints the all the rules of the rule learner. * * @return a textual description of the classifier */ @Override public String toString() { if (this.m_Ruleset == null) { return "JRIP: No model built yet."; } StringBuffer sb = new StringBuffer("JRIP rules:\n" + "===========\n\n"); for (int j = 0; j < this.m_RulesetStats.size(); j++) { RuleStats rs = this.m_RulesetStats.get(j); ArrayList<Rule> rules = rs.getRuleset(); for (int k = 0; k < rules.size(); k++) { double[] simStats = rs.getSimpleStats(k); sb.append(((RipperRule) rules.get(k)).toString(this.m_Class) + " (" + simStats[0] + "/" + simStats[4] + ")\n"); } } if (this.m_Debug) { System.err.println("Inside m_Ruleset"); for (int i = 0; i < this.m_Ruleset.size(); i++) { System.err.println(((RipperRule) this.m_Ruleset.get(i)).toString(this.m_Class)); } } sb.append("\nNumber of Rules : " + this.m_Ruleset.size() + "\n"); return sb.toString(); } public Attribute getM_Class() { return this.m_Class; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method. * * @param args * the options for the classifier */ public static void main(final String[] args) { runClassifier(new JRip(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/M5Rules.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * M5Rules.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.rules; import weka.classifiers.trees.m5.M5Base; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** <!-- globalinfo-start --> * Generates a decision list for regression problems using separate-and-conquer. In each iteration it builds a model tree using M5 and makes the "best" leaf into a rule.<br/> * <br/> * For more information see:<br/> * <br/> * Geoffrey Holmes, Mark Hall, Eibe Frank: Generating Rule Sets from Model Trees. In: Twelfth Australian Joint Conference on Artificial Intelligence, 1-12, 1999.<br/> * <br/> * Ross J. Quinlan: Learning with Continuous Classes. In: 5th Australian Joint Conference on Artificial Intelligence, Singapore, 343-348, 1992.<br/> * <br/> * Y. Wang, I. H. Witten: Induction of model trees for predicting continuous classes. In: Poster papers of the 9th European Conference on Machine Learning, 1997. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Holmes1999, * author = {Geoffrey Holmes and Mark Hall and Eibe Frank}, * booktitle = {Twelfth Australian Joint Conference on Artificial Intelligence}, * pages = {1-12}, * publisher = {Springer}, * title = {Generating Rule Sets from Model Trees}, * year = {1999} * } * * &#64;inproceedings{Quinlan1992, * address = {Singapore}, * author = {Ross J. Quinlan}, * booktitle = {5th Australian Joint Conference on Artificial Intelligence}, * pages = {343-348}, * publisher = {World Scientific}, * title = {Learning with Continuous Classes}, * year = {1992} * } * * &#64;inproceedings{Wang1997, * author = {Y. Wang and I. H. Witten}, * booktitle = {Poster papers of the 9th European Conference on Machine Learning}, * publisher = {Springer}, * title = {Induction of model trees for predicting continuous classes}, * year = {1997} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Use unpruned tree/rules</pre> * * <pre> -U * Use unsmoothed predictions</pre> * * <pre> -R * Build regression tree/rule rather than a model tree/rule</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf * (default 4)</pre> * <!-- options-end --> * * @author <a href="mailto:mhall@cs.waikato.ac.nz">Mark Hall</a> * @version $Revision$ */ public class M5Rules extends M5Base implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1746114858746563180L; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Generates a decision list for regression problems using " + "separate-and-conquer. In each iteration it builds a " + "model tree using M5 and makes the \"best\" " + "leaf into a rule.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Constructor */ public M5Rules() { super(); setGenerateRules(true); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Geoffrey Holmes and Mark Hall and Eibe Frank"); result.setValue(Field.TITLE, "Generating Rule Sets from Model Trees"); result.setValue(Field.BOOKTITLE, "Twelfth Australian Joint Conference on Artificial Intelligence"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.PAGES, "1-12"); result.setValue(Field.PUBLISHER, "Springer"); result.add(super.getTechnicalInformation()); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method by which this class can be tested * * @param args an array of options */ public static void main(String[] args) { runClassifier(new M5Rules(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/OneR.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * OneR.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import java.io.Serializable; import java.util.Collections; import java.util.Enumeration; import java.util.LinkedList; import java.util.ListIterator; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Sourcable; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WekaException; /** * <!-- globalinfo-start --> Class for building and using a 1R classifier; in other words, uses the minimum-error attribute for prediction, discretizing numeric attributes. For more information, see:<br/> * <br/> * R.C. Holte (1993). Very simple classification rules perform well on most commonly used datasets. Machine Learning. 11:63-91. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Holte1993, * author = {R.C. Holte}, * journal = {Machine Learning}, * pages = {63-91}, * title = {Very simple classification rules perform well on most commonly used datasets}, * volume = {11}, * year = {1993} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B &lt;minimum bucket size&gt; * The minimum number of objects in a bucket (default: 6). * </pre> * * <!-- options-end --> * * @author Ian H. Witten (ihw@cs.waikato.ac.nz) * @version $Revision$ */ public class OneR extends AbstractClassifier implements TechnicalInformationHandler, Sourcable { /** for serialization */ static final long serialVersionUID = -3459427003147861443L; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a 1R classifier; in other words, uses " + "the minimum-error attribute for prediction, discretizing numeric " + "attributes. For more information, see:\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "R.C. Holte"); result.setValue(Field.YEAR, "1993"); result.setValue(Field.TITLE, "Very simple classification rules perform well on most commonly used datasets"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "11"); result.setValue(Field.PAGES, "63-91"); return result; } /** * Class for storing store a 1R rule. */ private class OneRRule implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 2252814630957092281L; /** The class attribute. */ private final Attribute m_class; /** The number of instances used for building the rule. */ private final int m_numInst; /** Attribute to test */ private final Attribute m_attr; /** Training set examples this rule gets right */ private int m_correct; /** Predicted class for each value of attr */ private final int[] m_classifications; /** Predicted class for missing values */ private int m_missingValueClass = -1; /** Breakpoints (numeric attributes only) */ private double[] m_breakpoints; /** * Constructor for nominal attribute. * * @param data * the data to work with * @param attribute * the attribute to use * @throws Exception * if something goes wrong */ public OneRRule(final Instances data, final Attribute attribute) throws Exception { this.m_class = data.classAttribute(); this.m_numInst = data.numInstances(); this.m_attr = attribute; this.m_correct = 0; this.m_classifications = new int[this.m_attr.numValues()]; } /** * Constructor for numeric attribute. * * @param data * the data to work with * @param attribute * the attribute to use * @param nBreaks * the break point * @throws Exception * if something goes wrong */ public OneRRule(final Instances data, final Attribute attribute, final int nBreaks) throws Exception { this.m_class = data.classAttribute(); this.m_numInst = data.numInstances(); this.m_attr = attribute; this.m_correct = 0; this.m_classifications = new int[nBreaks]; this.m_breakpoints = new double[nBreaks - 1]; // last breakpoint is infinity } /** * Returns a description of the rule. * * @return a string representation of the rule */ @Override public String toString() { try { StringBuffer text = new StringBuffer(); text.append(this.m_attr.name() + ":\n"); for (int v = 0; v < this.m_classifications.length; v++) { text.append("\t"); if (this.m_attr.isNominal()) { text.append(this.m_attr.value(v)); } else if (v < this.m_breakpoints.length) { text.append("< " + this.m_breakpoints[v]); } else if (v > 0) { text.append(">= " + this.m_breakpoints[v - 1]); } else { text.append("not ?"); } text.append("\t-> " + this.m_class.value(this.m_classifications[v]) + "\n"); } if (this.m_missingValueClass != -1) { text.append("\t?\t-> " + this.m_class.value(this.m_missingValueClass) + "\n"); } text.append("(" + this.m_correct + "/" + this.m_numInst + " instances correct)\n"); return text.toString(); } catch (Exception e) { return "Can't print OneR classifier!"; } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** A 1-R rule */ private OneRRule m_rule; /** The minimum bucket size */ private int m_minBucketSize = 6; /** a ZeroR model in case no model can be built from the data */ private Classifier m_ZeroR; /** * Classifies a given instance. * * @param inst * the instance to be classified * @return the classification of the instance */ @Override public double classifyInstance(final Instance inst) throws Exception { // default model? if (this.m_ZeroR != null) { return this.m_ZeroR.classifyInstance(inst); } int v = 0; if (inst.isMissing(this.m_rule.m_attr)) { if (this.m_rule.m_missingValueClass != -1) { return this.m_rule.m_missingValueClass; } else { return 0; // missing values occur in test but not training set } } if (this.m_rule.m_attr.isNominal()) { v = (int) inst.value(this.m_rule.m_attr); } else { while (v < this.m_rule.m_breakpoints.length && inst.value(this.m_rule.m_attr) >= this.m_rule.m_breakpoints[v]) { v++; } } return this.m_rule.m_classifications[v]; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates the classifier. * * @param instances * the instances to be used for building the classifier * @throws Exception * if the classifier can't be built successfully */ @Override public void buildClassifier(final Instances instances) throws Exception { boolean noRule = true; // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class Instances data = new Instances(instances); data.deleteWithMissingClass(); // only class? -> build ZeroR model if (data.numAttributes() == 1) { System.err.println("Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); this.m_ZeroR = new weka.classifiers.rules.ZeroR(); this.m_ZeroR.buildClassifier(data); return; } else { this.m_ZeroR = null; } // for each attribute ... Enumeration<Attribute> enu = instances.enumerateAttributes(); while (enu.hasMoreElements()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } try { OneRRule r = this.newRule(enu.nextElement(), data); // if this attribute is the best so far, replace the rule if (noRule || r.m_correct > this.m_rule.m_correct) { this.m_rule = r; } noRule = false; } catch (Exception ex) { if (ex instanceof InterruptedException) { throw ex; } } } if (noRule) { throw new WekaException("No attributes found to work with!"); } } /** * Create a rule branching on this attribute. * * @param attr * the attribute to branch on * @param data * the data to be used for creating the rule * @return the generated rule * @throws Exception * if the rule can't be built successfully */ public OneRRule newRule(final Attribute attr, final Instances data) throws Exception { OneRRule r; // ... create array to hold the missing value counts int[] missingValueCounts = new int[data.classAttribute().numValues()]; if (attr.isNominal()) { r = this.newNominalRule(attr, data, missingValueCounts); } else { r = this.newNumericRule(attr, data, missingValueCounts); } r.m_missingValueClass = Utils.maxIndex(missingValueCounts); if (missingValueCounts[r.m_missingValueClass] == 0) { r.m_missingValueClass = -1; // signal for no missing value class } else { r.m_correct += missingValueCounts[r.m_missingValueClass]; } return r; } /** * Create a rule branching on this nominal attribute. * * @param attr * the attribute to branch on * @param data * the data to be used for creating the rule * @param missingValueCounts * to be filled in * @return the generated rule * @throws Exception * if the rule can't be built successfully */ public OneRRule newNominalRule(final Attribute attr, final Instances data, final int[] missingValueCounts) throws Exception { // ... create arrays to hold the counts int[][] counts = new int[attr.numValues()][data.classAttribute().numValues()]; // ... calculate the counts Enumeration<Instance> enu = data.enumerateInstances(); while (enu.hasMoreElements()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance i = enu.nextElement(); if (i.isMissing(attr)) { missingValueCounts[(int) i.classValue()]++; } else { counts[(int) i.value(attr)][(int) i.classValue()]++; } } OneRRule r = new OneRRule(data, attr); // create a new rule for (int value = 0; value < attr.numValues(); value++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } int best = Utils.maxIndex(counts[value]); r.m_classifications[value] = best; r.m_correct += counts[value][best]; } return r; } /** * Create a rule branching on this numeric attribute * * @param attr * the attribute to branch on * @param data * the data to be used for creating the rule * @param missingValueCounts * to be filled in * @return the generated rule * @throws Exception * if the rule can't be built successfully */ public OneRRule newNumericRule(final Attribute attr, Instances data, final int[] missingValueCounts) throws Exception { // make a copy before sorting so that ties are treated consistently // and aren't affected by sorting performed for any numeric // attributes processed before this one data = new Instances(data); int lastInstance = data.numInstances(); // missing values get sorted to the end of the instances data.sort(attr); while (lastInstance > 0 && data.instance(lastInstance - 1).isMissing(attr)) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } lastInstance--; missingValueCounts[(int) data.instance(lastInstance).classValue()]++; } if (lastInstance == 0) { throw new Exception("Only missing values in the training data!"); } // gather class distributions for all values double lastValue = 0; LinkedList<int[]> distributions = new LinkedList<>(); LinkedList<Double> values = new LinkedList<>(); int[] distribution = null; for (int i = 0; i < lastInstance; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // new value? if ((i == 0) || (data.instance(i).value(attr) > lastValue)) { if (i != 0) { values.add((lastValue + data.instance(i).value(attr)) / 2.0); } lastValue = data.instance(i).value(attr); distribution = new int[data.numClasses()]; distributions.add(distribution); } distribution[(int) data.instance(i).classValue()]++; } values.add(Double.MAX_VALUE); // create iterator to go through list ListIterator<int[]> it = distributions.listIterator(); ListIterator<Double> itVals = values.listIterator(); int[] oldDist = null; while (it.hasNext()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // grab next trivial bucket and iterate to next value as well int[] newDist = it.next(); itVals.next(); // should we merge the two buckets? if ((oldDist != null) && // classes the same? ((Utils.maxIndex(newDist) == Utils.maxIndex(oldDist)) || // bucket not large enough? (oldDist[Utils.maxIndex(oldDist)] < this.m_minBucketSize))) { // add counts for (int j = 0; j < oldDist.length; j++) { newDist[j] += oldDist[j]; } // remove distribution it.previous(); // element just visited it.previous(); // previous element we want to remove it.remove(); it.next(); // back to element just visited // remove value itVals.previous(); // element just visited itVals.previous(); // previous element we want to remove itVals.remove(); itVals.next(); // back to element just visited } // make progress oldDist = newDist; } // last scan, merge adjacent intervals with same class and calculate correct // classifications int numCorrect = 0; it = distributions.listIterator(); itVals = values.listIterator(); oldDist = null; while (it.hasNext()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } // grab next trivial bucket and iterate to next value as well int[] newDist = it.next(); itVals.next(); // number of correct classifications does not change by merging numCorrect += newDist[Utils.maxIndex(newDist)]; // should we merge the two buckets? if ((oldDist != null) && // classes the same? (Utils.maxIndex(newDist) == Utils.maxIndex(oldDist))) { // add counts for (int j = 0; j < oldDist.length; j++) { newDist[j] += oldDist[j]; } // remove distribution it.previous(); // element just visited it.previous(); // previous element we want to remove it.remove(); it.next(); // back to element just visited // remove value itVals.previous(); // element just visited itVals.previous(); // previous element we want to remove itVals.remove(); itVals.next(); // back to element just visited } // make progress oldDist = newDist; } OneRRule r = new OneRRule(data, attr, distributions.size()); // new rule // with cl // branches r.m_correct = numCorrect; it = distributions.listIterator(); itVals = values.listIterator(); int v = 0; while (it.hasNext()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } r.m_classifications[v] = Utils.maxIndex(it.next()); double splitPoint = itVals.next(); if (itVals.hasNext()) { r.m_breakpoints[v] = splitPoint; } v++; } return r; } /** * Returns an enumeration describing the available options.. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { String string = "\tThe minimum number of objects in a bucket (default: 6)."; Vector<Option> newVector = new Vector<>(1); newVector.addElement(new Option(string, "B", 1, "-B <minimum bucket size>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B &lt;minimum bucket size&gt; * The minimum number of objects in a bucket (default: 6). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String bucketSizeString = Utils.getOption('B', options); if (bucketSizeString.length() != 0) { this.m_minBucketSize = Integer.parseInt(bucketSizeString); } else { this.m_minBucketSize = 6; } super.setOptions(options); } /** * Gets the current settings of the OneR classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(1); options.add("-B"); options.add("" + this.m_minBucketSize); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns a string that describes the classifier as source. The classifier will be contained in a class with the given name (there may be auxiliary classes), and will contain a method with the signature: * * <pre> * <code> * public static double classify(Object[] i); * </code> * </pre> * * where the array <code>i</code> contains elements that are either Double, String, with missing values represented as null. The generated code is public domain and comes with no warranty. * * @param className * the name that should be given to the source class. * @return the object source described by a string * @throws Exception * if the souce can't be computed */ @Override public String toSource(final String className) throws Exception { StringBuffer result; int i; result = new StringBuffer(); if (this.m_ZeroR != null) { result.append(((ZeroR) this.m_ZeroR).toSource(className)); } else { result.append("class " + className + " {\n"); result.append(" public static double classify(Object[] i) {\n"); result.append(" // chosen attribute: " + this.m_rule.m_attr.name() + " (" + this.m_rule.m_attr.index() + ")\n"); result.append("\n"); // missing values result.append(" // missing value?\n"); result.append(" if (i[" + this.m_rule.m_attr.index() + "] == null)\n"); if (this.m_rule.m_missingValueClass != -1) { result.append(" return Double.NaN;\n"); } else { result.append(" return 0;\n"); } result.append("\n"); // actual prediction result.append(" // prediction\n"); result.append(" double v = 0;\n"); result.append(" double[] classifications = new double[]{" + Utils.arrayToString(this.m_rule.m_classifications) + "};"); result.append(" // "); for (i = 0; i < this.m_rule.m_classifications.length; i++) { if (i > 0) { result.append(", "); } result.append(this.m_rule.m_class.value(this.m_rule.m_classifications[i])); } result.append("\n"); if (this.m_rule.m_attr.isNominal()) { for (i = 0; i < this.m_rule.m_attr.numValues(); i++) { result.append(" "); if (i > 0) { result.append("else "); } result.append("if (((String) i[" + this.m_rule.m_attr.index() + "]).equals(\"" + this.m_rule.m_attr.value(i) + "\"))\n"); result.append(" v = " + i + "; // " + this.m_rule.m_class.value(this.m_rule.m_classifications[i]) + "\n"); } } else { result.append(" double[] breakpoints = new double[]{" + Utils.arrayToString(this.m_rule.m_breakpoints) + "};\n"); result.append(" while (v < breakpoints.length && \n"); result.append(" ((Double) i[" + this.m_rule.m_attr.index() + "]) >= breakpoints[(int) v]) {\n"); result.append(" v++;\n"); result.append(" }\n"); } result.append(" return classifications[(int) v];\n"); result.append(" }\n"); result.append("}\n"); } return result.toString(); } /** * Returns a description of the classifier * * @return a string representation of the classifier */ @Override public String toString() { // only ZeroR model? if (this.m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_ZeroR.toString()); return buf.toString(); } if (this.m_rule == null) { return "OneR: No model built yet."; } return this.m_rule.toString(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minBucketSizeTipText() { return "The minimum bucket size used for discretizing numeric " + "attributes."; } /** * Get the value of minBucketSize. * * @return Value of minBucketSize. */ public int getMinBucketSize() { return this.m_minBucketSize; } /** * Set the value of minBucketSize. * * @param v * Value to assign to minBucketSize. */ public void setMinBucketSize(final int v) { this.m_minBucketSize = v; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class * * @param argv * the commandline options */ public static void main(final String[] argv) { runClassifier(new OneR(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/PART.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PART.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.rules.part.MakeDecList; import weka.classifiers.trees.j48.BinC45ModelSelection; import weka.classifiers.trees.j48.C45ModelSelection; import weka.classifiers.trees.j48.ModelSelection; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Summarizable; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities; import weka.core.Capabilities.Capability; /** * <!-- globalinfo-start --> Class for generating a PART decision list. Uses * separate-and-conquer. Builds a partial C4.5 decision tree in each iteration * and makes the "best" leaf into a rule.<br/> * <br/> * For more information, see:<br/> * <br/> * Eibe Frank, Ian H. Witten: Generating Accurate Rule Sets Without Global * Optimization. In: Fifteenth International Conference on Machine Learning, * 144-151, 1998. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Frank1998, * author = {Eibe Frank and Ian H. Witten}, * booktitle = {Fifteenth International Conference on Machine Learning}, * editor = {J. Shavlik}, * pages = {144-151}, * publisher = {Morgan Kaufmann}, * title = {Generating Accurate Rule Sets Without Global Optimization}, * year = {1998}, * PS = {http://www.cs.waikato.ac.nz/\~eibe/pubs/ML98-57.ps.gz} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of objects&gt; * Set minimum number of objects per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -U * Generate unpruned decision list. * </pre> * * <pre> * -J * Do not use MDL correction for info gain on numeric attributes. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -doNotMakeSplitPointActualValue * Do not make split point actual value. * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class PART extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, Summarizable, AdditionalMeasureProducer, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 8121455039782598361L; /** The decision list */ private MakeDecList m_root; /** Confidence level */ private float m_CF = 0.25f; /** Minimum number of objects */ private int m_minNumObj = 2; /** Use MDL correction? */ private boolean m_useMDLcorrection = true; /** Use reduced error pruning? */ private boolean m_reducedErrorPruning = false; /** Number of folds for reduced error pruning. */ private int m_numFolds = 3; /** Binary splits on nominal attributes? */ private boolean m_binarySplits = false; /** Generate unpruned list? */ private boolean m_unpruned = false; /** The seed for random number generation. */ private int m_Seed = 1; /** Do not relocate split point to actual data value */ private boolean m_doNotMakeSplitPointActualValue; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Class for generating a PART decision list. Uses " + "separate-and-conquer. Builds a partial C4.5 decision tree " + "in each iteration and makes the \"best\" leaf into a rule.\n\n" + "For more information, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Eibe Frank and Ian H. Witten"); result.setValue(Field.TITLE, "Generating Accurate Rule Sets Without Global Optimization"); result.setValue(Field.BOOKTITLE, "Fifteenth International Conference on Machine Learning"); result.setValue(Field.EDITOR, "J. Shavlik"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.PAGES, "144-151"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.PS, "http://www.cs.waikato.ac.nz/~eibe/pubs/ML98-57.ps.gz"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result; result = new Capabilities(this); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances the data to train with * @throws Exception if classifier can't be built successfully */ @Override public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); ModelSelection modSelection; if (m_binarySplits) { modSelection = new BinC45ModelSelection(m_minNumObj, instances, m_useMDLcorrection, m_doNotMakeSplitPointActualValue); } else { modSelection = new C45ModelSelection(m_minNumObj, instances, m_useMDLcorrection, m_doNotMakeSplitPointActualValue); } if (m_unpruned) { m_root = new MakeDecList(modSelection, m_minNumObj); } else if (m_reducedErrorPruning) { m_root = new MakeDecList(modSelection, m_numFolds, m_minNumObj, m_Seed); } else { m_root = new MakeDecList(modSelection, m_CF, m_minNumObj); } m_root.buildClassifier(instances); if (m_binarySplits) { ((BinC45ModelSelection) modSelection).cleanup(); } else { ((C45ModelSelection) modSelection).cleanup(); } } /** * Classifies an instance. * * @param instance the instance to classify * @return the classification * @throws Exception if instance can't be classified successfully */ @Override public double classifyInstance(Instance instance) throws Exception { return m_root.classifyInstance(instance); } /** * Returns class probabilities for an instance. * * @param instance the instance to get the distribution for * @return the class probabilities * @throws Exception if the distribution can't be computed successfully */ @Override public final double[] distributionForInstance(Instance instance) throws Exception { return m_root.distributionForInstance(instance); } /** * Returns an enumeration describing the available options. * * Valid options are: * <p> * * -C confidence <br> * Set confidence threshold for pruning. (Default: 0.25) * <p> * * -M number <br> * Set minimum number of instances per leaf. (Default: 2) * <p> * * -R <br> * Use reduced error pruning. * <p> * * -N number <br> * Set number of folds for reduced error pruning. One fold is used as the * pruning set. (Default: 3) * <p> * * -B <br> * Use binary splits for nominal attributes. * <p> * * -U <br> * Generate unpruned decision list. * <p> * * -Q <br> * The seed for reduced-error pruning. * <p> * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(8); newVector.addElement(new Option("\tSet confidence threshold for pruning.\n" + "\t(default 0.25)", "C", 1, "-C <pruning confidence>")); newVector.addElement(new Option( "\tSet minimum number of objects per leaf.\n" + "\t(default 2)", "M", 1, "-M <minimum number of objects>")); newVector.addElement(new Option("\tUse reduced error pruning.", "R", 0, "-R")); newVector.addElement(new Option("\tSet number of folds for reduced error\n" + "\tpruning. One fold is used as pruning set.\n" + "\t(default 3)", "N", 1, "-N <number of folds>")); newVector.addElement(new Option("\tUse binary splits only.", "B", 0, "-B")); newVector.addElement(new Option("\tGenerate unpruned decision list.", "U", 0, "-U")); newVector.addElement(new Option( "\tDo not use MDL correction for info gain on numeric attributes.", "J", 0, "-J")); newVector.addElement(new Option( "\tSeed for random data shuffling (default 1).", "Q", 1, "-Q <seed>")); newVector.addElement(new Option("\tDo not make split point actual value.", "-doNotMakeSplitPointActualValue", 0, "-doNotMakeSplitPointActualValue")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of objects&gt; * Set minimum number of objects per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -U * Generate unpruned decision list. * </pre> * * <pre> * -J * Do not use MDL correction for info gain on numeric attributes. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -doNotMakeSplitPointActualValue * Do not make split point actual value. * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { // Pruning options m_unpruned = Utils.getFlag('U', options); m_reducedErrorPruning = Utils.getFlag('R', options); m_binarySplits = Utils.getFlag('B', options); m_useMDLcorrection = !Utils.getFlag('J', options); m_doNotMakeSplitPointActualValue = Utils.getFlag( "doNotMakeSplitPointActualValue", options); String confidenceString = Utils.getOption('C', options); if (confidenceString.length() != 0) { if (m_reducedErrorPruning) { throw new Exception("Setting CF doesn't make sense " + "for reduced error pruning."); } else { m_CF = (new Float(confidenceString)).floatValue(); if ((m_CF <= 0) || (m_CF >= 1)) { throw new Exception( "CF has to be greater than zero and smaller than one!"); } } } else { m_CF = 0.25f; } String numFoldsString = Utils.getOption('N', options); if (numFoldsString.length() != 0) { if (!m_reducedErrorPruning) { throw new Exception("Setting the number of folds" + " does only make sense for" + " reduced error pruning."); } else { m_numFolds = Integer.parseInt(numFoldsString); } } else { m_numFolds = 3; } // Other options String minNumString = Utils.getOption('M', options); if (minNumString.length() != 0) { m_minNumObj = Integer.parseInt(minNumString); } else { m_minNumObj = 2; } String seedString = Utils.getOption('Q', options); if (seedString.length() != 0) { m_Seed = Integer.parseInt(seedString); } else { m_Seed = 1; } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(13); if (m_unpruned) { options.add("-U"); } if (m_reducedErrorPruning) { options.add("-R"); } if (m_binarySplits) { options.add("-B"); } options.add("-M"); options.add("" + m_minNumObj); if (!m_reducedErrorPruning) { options.add("-C"); options.add("" + m_CF); } if (m_reducedErrorPruning) { options.add("-N"); options.add("" + m_numFolds); } options.add("-Q"); options.add("" + m_Seed); if (!m_useMDLcorrection) { options.add("-J"); } if (m_doNotMakeSplitPointActualValue) { options.add("-doNotMakeSplitPointActualValue"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns a description of the classifier * * @return a string representation of the classifier */ @Override public String toString() { if (m_root == null) { return "No classifier built"; } return "PART decision list\n------------------\n\n" + m_root.toString(); } /** * Returns a superconcise version of the model * * @return a concise version of the model */ @Override public String toSummaryString() { return "Number of rules: " + m_root.numRules() + "\n"; } /** * Return the number of rules. * * @return the number of rules */ public double measureNumRules() { return m_root.numRules(); } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<String>(1); newVector.addElement("measureNumRules"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ @Override public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) { return measureNumRules(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (PART)"); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String confidenceFactorTipText() { return "The confidence factor used for pruning (smaller values incur " + "more pruning)."; } /** * Get the value of CF. * * @return Value of CF. */ public float getConfidenceFactor() { return m_CF; } /** * Set the value of CF. * * @param v Value to assign to CF. */ public void setConfidenceFactor(float v) { m_CF = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minNumObjTipText() { return "The minimum number of instances per rule."; } /** * Get the value of minNumObj. * * @return Value of minNumObj. */ public int getMinNumObj() { return m_minNumObj; } /** * Set the value of minNumObj. * * @param v Value to assign to minNumObj. */ public void setMinNumObj(int v) { m_minNumObj = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String reducedErrorPruningTipText() { return "Whether reduced-error pruning is used instead of C.4.5 pruning."; } /** * Get the value of reducedErrorPruning. * * @return Value of reducedErrorPruning. */ public boolean getReducedErrorPruning() { return m_reducedErrorPruning; } /** * Set the value of reducedErrorPruning. * * @param v Value to assign to reducedErrorPruning. */ public void setReducedErrorPruning(boolean v) { m_reducedErrorPruning = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String unprunedTipText() { return "Whether pruning is performed."; } /** * Get the value of unpruned. * * @return Value of unpruned. */ public boolean getUnpruned() { return m_unpruned; } /** * Set the value of unpruned. * * @param newunpruned Value to assign to unpruned. */ public void setUnpruned(boolean newunpruned) { m_unpruned = newunpruned; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useMDLcorrectionTipText() { return "Whether MDL correction is used when finding splits on numeric attributes."; } /** * Get the value of useMDLcorrection. * * @return Value of useMDLcorrection. */ public boolean getUseMDLcorrection() { return m_useMDLcorrection; } /** * Set the value of useMDLcorrection. * * @param newuseMDLcorrection Value to assign to useMDLcorrection. */ public void setUseMDLcorrection(boolean newuseMDLcorrection) { m_useMDLcorrection = newuseMDLcorrection; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numFoldsTipText() { return "Determines the amount of data used for reduced-error pruning. " + " One fold is used for pruning, the rest for growing the rules."; } /** * Get the value of numFolds. * * @return Value of numFolds. */ public int getNumFolds() { return m_numFolds; } /** * Set the value of numFolds. * * @param v Value to assign to numFolds. */ public void setNumFolds(int v) { m_numFolds = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String seedTipText() { return "The seed used for randomizing the data " + "when reduced-error pruning is used."; } /** * Get the value of Seed. * * @return Value of Seed. */ public int getSeed() { return m_Seed; } /** * Set the value of Seed. * * @param newSeed Value to assign to Seed. */ public void setSeed(int newSeed) { m_Seed = newSeed; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String binarySplitsTipText() { return "Whether to use binary splits on nominal attributes when " + "building the partial trees."; } /** * Get the value of binarySplits. * * @return Value of binarySplits. */ public boolean getBinarySplits() { return m_binarySplits; } /** * Set the value of binarySplits. * * @param v Value to assign to binarySplits. */ public void setBinarySplits(boolean v) { m_binarySplits = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String doNotMakeSplitPointActualValueTipText() { return "If true, the split point is not relocated to an actual data value." + " This can yield substantial speed-ups for large datasets with numeric attributes."; } /** * Gets the value of doNotMakeSplitPointActualValue. * * @return the value */ public boolean getDoNotMakeSplitPointActualValue() { return m_doNotMakeSplitPointActualValue; } /** * Sets the value of doNotMakeSplitPointActualValue. * * @param m_doNotMakeSplitPointActualValue the value to set */ public void setDoNotMakeSplitPointActualValue( boolean m_doNotMakeSplitPointActualValue) { this.m_doNotMakeSplitPointActualValue = m_doNotMakeSplitPointActualValue; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv command line options */ public static void main(String[] argv) { runClassifier(new PART(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/Rule.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Rule.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.rules; import java.io.Serializable; import weka.core.Copyable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.WeightedInstancesHandler; /** * Abstract class of generic rule * * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class Rule implements WeightedInstancesHandler, Copyable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 8815687740470471229L; /** * Get a shallow copy of this rule * * @return the copy */ public Object copy(){ return this;} /** * Whether the instance covered by this rule * * @param datum the instance in question * @return the boolean value indicating whether the instance * is covered by this rule */ public abstract boolean covers(Instance datum); /** * Build this rule * * @param data the data used to build the rule * @exception Exception if rule cannot be built */ public abstract void grow(Instances data) throws Exception; /** * Whether this rule has antecedents, i.e. whether it is a default rule * * @return the boolean value indicating whether the rule has antecedents */ public abstract boolean hasAntds(); /** * Get the consequent of this rule, i.e. the predicted class * * @return the consequent */ public abstract double getConsequent(); /** * The size of the rule. Could be number of antecedents in the case * of conjunctive rule * * @return the size of the rule */ public abstract double size(); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/RuleStats.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RuleStats.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.rules; import java.io.Serializable; import java.util.ArrayList; import java.util.Enumeration; import java.util.Random; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * This class implements the statistics functions used in the propositional rule learner, from the simpler ones like count of true/false positive/negatives, filter data based on the ruleset, etc. to the more sophisticated ones such as MDL * calculation and rule variants generation for each rule in the ruleset. * <p> * * Obviously the statistics functions listed above need the specific data and the specific ruleset, which are given in order to instantiate an object of this class. * <p> * * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision$ */ public class RuleStats implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -5708153367675298624L; /** The data on which the stats calculation is based */ private Instances m_Data; /** The specific ruleset in question */ private ArrayList<Rule> m_Ruleset; /** The simple stats of each rule */ private ArrayList<double[]> m_SimpleStats; /** The set of instances filtered by the ruleset */ private ArrayList<Instances[]> m_Filtered; /** * The total number of possible conditions that could appear in a rule */ private double m_Total; /** The redundancy factor in theory description length */ private static double REDUNDANCY_FACTOR = 0.5; /** The theory weight in the MDL calculation */ private double MDL_THEORY_WEIGHT = 1.0; /** The class distributions predicted by each rule */ private ArrayList<double[]> m_Distributions; /** Default constructor */ public RuleStats() { this.m_Data = null; this.m_Ruleset = null; this.m_SimpleStats = null; this.m_Filtered = null; this.m_Distributions = null; this.m_Total = -1; } /** * Constructor that provides ruleset and data * * @param data * the data * @param rules * the ruleset */ public RuleStats(final Instances data, final ArrayList<Rule> rules) { this(); this.m_Data = data; this.m_Ruleset = rules; } /** * Frees up memory after classifier has been built. */ public void cleanUp() { this.m_Data = null; this.m_Filtered = null; } /** * Set the number of all conditions that could appear in a rule in this RuleStats object, if the number set is smaller than 0 (typically -1), then it calcualtes based on the data store * * @param total * the set number * @throws InterruptedException */ public void setNumAllConds(final double total) throws InterruptedException { if (total < 0) { this.m_Total = numAllConditions(this.m_Data); } else { this.m_Total = total; } } /** * Set the data of the stats, overwriting the old one if any * * @param data * the data to be set */ public void setData(final Instances data) { this.m_Data = data; } /** * Get the data of the stats * * @return the data */ public Instances getData() { return this.m_Data; } /** * Set the ruleset of the stats, overwriting the old one if any * * @param rules * the set of rules to be set */ public void setRuleset(final ArrayList<Rule> rules) { this.m_Ruleset = rules; } /** * Get the ruleset of the stats * * @return the set of rules */ public ArrayList<Rule> getRuleset() { return this.m_Ruleset; } /** * Get the size of the ruleset in the stats * * @return the size of ruleset */ public int getRulesetSize() { return this.m_Ruleset.size(); } /** * Get the simple stats of one rule, including 6 parameters: 0: coverage; 1:uncoverage; 2: true positive; 3: true negatives; 4: false positives; 5: false negatives * * @param index * the index of the rule * @return the stats */ public double[] getSimpleStats(final int index) { if ((this.m_SimpleStats != null) && (index < this.m_SimpleStats.size())) { return this.m_SimpleStats.get(index); } return null; } /** * Get the data after filtering the given rule * * @param index * the index of the rule * @return the data covered and uncovered by the rule */ public Instances[] getFiltered(final int index) { if ((this.m_Filtered != null) && (index < this.m_Filtered.size())) { return this.m_Filtered.get(index); } return null; } /** * Get the class distribution predicted by the rule in given position * * @param index * the position index of the rule * @return the class distributions */ public double[] getDistributions(final int index) { if ((this.m_Distributions != null) && (index < this.m_Distributions.size())) { return this.m_Distributions.get(index); } return null; } /** * Set the weight of theory in MDL calcualtion * * @param weight * the weight to be set */ public void setMDLTheoryWeight(final double weight) { this.MDL_THEORY_WEIGHT = weight; } /** * Compute the number of all possible conditions that could appear in a rule of a given data. For nominal attributes, it's the number of values that could appear; for numeric attributes, it's the number of values * 2, i.e. <= and >= are * counted as different possible conditions. * * @param data * the given data * @return number of all conditions of the data * @throws InterruptedException */ public static double numAllConditions(final Instances data) throws InterruptedException { double total = 0; Enumeration<Attribute> attEnum = data.enumerateAttributes(); while (attEnum.hasMoreElements()) { Attribute att = attEnum.nextElement(); if (att.isNominal()) { total += att.numValues(); } else { total += 2.0 * data.numDistinctValues(att); } } return total; } /** * Filter the data according to the ruleset and compute the basic stats: coverage/uncoverage, true/false positive/negatives of each rule */ public void countData() { if ((this.m_Filtered != null) || (this.m_Ruleset == null) || (this.m_Data == null)) { return; } int size = this.m_Ruleset.size(); this.m_Filtered = new ArrayList<Instances[]>(size); this.m_SimpleStats = new ArrayList<double[]>(size); this.m_Distributions = new ArrayList<double[]>(size); Instances data = new Instances(this.m_Data); for (int i = 0; i < size; i++) { double[] stats = new double[6]; // 6 statistics parameters double[] classCounts = new double[this.m_Data.classAttribute().numValues()]; Instances[] filtered = this.computeSimpleStats(i, data, stats, classCounts); this.m_Filtered.add(filtered); this.m_SimpleStats.add(stats); this.m_Distributions.add(classCounts); data = filtered[1]; // Data not covered } } /** * Count data from the position index in the ruleset assuming that given data are not covered by the rules in position 0...(index-1), and the statistics of these rules are provided.<br> * This procedure is typically useful when a temporary object of RuleStats is constructed in order to efficiently calculate the relative DL of rule in position index, thus all other stuff is not needed. * * @param index * the given position * @param uncovered * the data not covered by rules before index * @param prevRuleStats * the provided stats of previous rules */ public void countData(final int index, final Instances uncovered, final double[][] prevRuleStats) { if ((this.m_Filtered != null) || (this.m_Ruleset == null)) { return; } int size = this.m_Ruleset.size(); this.m_Filtered = new ArrayList<Instances[]>(size); this.m_SimpleStats = new ArrayList<double[]>(size); Instances[] data = new Instances[2]; data[1] = uncovered; for (int i = 0; i < index; i++) { this.m_SimpleStats.add(prevRuleStats[i]); if (i + 1 == index) { this.m_Filtered.add(data); } else { this.m_Filtered.add(new Instances[0]); // Stuff sth. } } for (int j = index; j < size; j++) { double[] stats = new double[6]; // 6 statistics parameters Instances[] filtered = this.computeSimpleStats(j, data[1], stats, null); this.m_Filtered.add(filtered); this.m_SimpleStats.add(stats); data = filtered; // Data not covered } } /** * Find all the instances in the dataset covered/not covered by the rule in given index, and the correponding simple statistics and predicted class distributions are stored in the given double array, which can be obtained by * getSimpleStats() and getDistributions().<br> * * @param index * the given index, assuming correct * @param insts * the dataset to be covered by the rule * @param stats * the given double array to hold stats, side-effected * @param dist * the given array to hold class distributions, side-effected if null, the distribution is not necessary * @return the instances covered and not covered by the rule */ private Instances[] computeSimpleStats(final int index, final Instances insts, final double[] stats, final double[] dist) { Rule rule = this.m_Ruleset.get(index); Instances[] data = new Instances[2]; data[0] = new Instances(insts, insts.numInstances()); data[1] = new Instances(insts, insts.numInstances()); for (int i = 0; i < insts.numInstances(); i++) { Instance datum = insts.instance(i); double weight = datum.weight(); if (rule.covers(datum)) { data[0].add(datum); // Covered by this rule stats[0] += weight; // Coverage if ((int) datum.classValue() == (int) rule.getConsequent()) { stats[2] += weight; // True positives } else { stats[4] += weight; // False positives } if (dist != null) { dist[(int) datum.classValue()] += weight; } } else { data[1].add(datum); // Not covered by this rule stats[1] += weight; if ((int) datum.classValue() != (int) rule.getConsequent()) { stats[3] += weight; // True negatives } else { stats[5] += weight; // False negatives } } } return data; } /** * Add a rule to the ruleset and update the stats * * @param lastRule * the rule to be added */ public void addAndUpdate(final Rule lastRule) { if (this.m_Ruleset == null) { this.m_Ruleset = new ArrayList<Rule>(); } this.m_Ruleset.add(lastRule); Instances data = (this.m_Filtered == null) ? this.m_Data : (this.m_Filtered.get(this.m_Filtered.size() - 1))[1]; double[] stats = new double[6]; double[] classCounts = new double[this.m_Data.classAttribute().numValues()]; Instances[] filtered = this.computeSimpleStats(this.m_Ruleset.size() - 1, data, stats, classCounts); if (this.m_Filtered == null) { this.m_Filtered = new ArrayList<Instances[]>(); } this.m_Filtered.add(filtered); if (this.m_SimpleStats == null) { this.m_SimpleStats = new ArrayList<double[]>(); } this.m_SimpleStats.add(stats); if (this.m_Distributions == null) { this.m_Distributions = new ArrayList<double[]>(); } this.m_Distributions.add(classCounts); } /** * Subset description length: <br> * S(t,k,p) = -k*log2(p)-(n-k)log2(1-p) * * Details see Quilan: "MDL and categorical theories (Continued)",ML95 * * @param t * the number of elements in a known set * @param k * the number of elements in a subset * @param p * the expected proportion of subset known by recipient * @return the subset description length */ public static double subsetDL(final double t, final double k, final double p) { double rt = Utils.gr(p, 0.0) ? (-k * Utils.log2(p)) : 0.0; rt -= (t - k) * Utils.log2(1 - p); return rt; } /** * The description length of the theory for a given rule. Computed as:<br> * 0.5* [||k||+ S(t, k, k/t)]<br> * where k is the number of antecedents of the rule; t is the total possible antecedents that could appear in a rule; ||K|| is the universal prior for k , log2*(k) and S(t,k,p) = -k*log2(p)-(n-k)log2(1-p) is the subset encoding length. * <p> * * Details see Quilan: "MDL and categorical theories (Continued)",ML95 * * @param index * the index of the given rule (assuming correct) * @return the theory DL, weighted if weight != 1.0 */ public double theoryDL(final int index) { double k = this.m_Ruleset.get(index).size(); if (k == 0) { return 0.0; } double tdl = Utils.log2(k); if (k > 1) { tdl += 2.0 * Utils.log2(tdl); // of log2 star } tdl += subsetDL(this.m_Total, k, k / this.m_Total); return this.MDL_THEORY_WEIGHT * REDUNDANCY_FACTOR * tdl; } /** * The description length of data given the parameters of the data based on the ruleset. * <p> * Details see Quinlan: "MDL and categorical theories (Continued)",ML95 * <p> * * @param expFPOverErr * expected FP/(FP+FN) * @param cover * coverage * @param uncover * uncoverage * @param fp * False Positive * @param fn * False Negative * @return the description length */ public static double dataDL(final double expFPOverErr, final double cover, final double uncover, final double fp, final double fn) { double totalBits = Utils.log2(cover + uncover + 1.0); // how many data? double coverBits, uncoverBits; // What's the error? double expErr; // Expected FP or FN if (Utils.gr(cover, uncover)) { expErr = expFPOverErr * (fp + fn); coverBits = subsetDL(cover, fp, expErr / cover); uncoverBits = Utils.gr(uncover, 0.0) ? subsetDL(uncover, fn, fn / uncover) : 0.0; } else { expErr = (1.0 - expFPOverErr) * (fp + fn); coverBits = Utils.gr(cover, 0.0) ? subsetDL(cover, fp, fp / cover) : 0.0; uncoverBits = subsetDL(uncover, fn, expErr / uncover); } /* * System.err.println("!!!cover: " + cover + "|uncover" + uncover + * "|coverBits: "+coverBits+"|uncBits: "+ uncoverBits+ * "|FPRate: "+expFPOverErr + "|expErr: "+expErr+ * "|fp: "+fp+"|fn: "+fn+"|total: "+totalBits); */ return (totalBits + coverBits + uncoverBits); } /** * Calculate the potential to decrease DL of the ruleset, i.e. the possible DL that could be decreased by deleting the rule whose index and simple statstics are given. If there's no potentials (i.e. smOrEq 0 && error rate < 0.5), it * returns NaN. * <p> * * The way this procedure does is copied from original RIPPER implementation and is quite bizzare because it does not update the following rules' stats recursively any more when testing each rule, which means it assumes after deletion * no data covered by the following rules (or regards the deleted rule as the last rule). Reasonable assumption? * <p> * * @param index * the index of the rule in m_Ruleset to be deleted * @param expFPOverErr * expected FP/(FP+FN) * @param rulesetStat * the simple statistics of the ruleset, updated if the rule should be deleted * @param ruleStat * the simple statistics of the rule to be deleted * @param checkErr * whether check if error rate >= 0.5 * @return the potential DL that could be decreased */ public double potential(final int index, final double expFPOverErr, final double[] rulesetStat, final double[] ruleStat, final boolean checkErr) { // Restore the stats if deleted double pcov = rulesetStat[0] - ruleStat[0]; double puncov = rulesetStat[1] + ruleStat[0]; double pfp = rulesetStat[4] - ruleStat[4]; double pfn = rulesetStat[5] + ruleStat[2]; double dataDLWith = dataDL(expFPOverErr, rulesetStat[0], rulesetStat[1], rulesetStat[4], rulesetStat[5]); double theoryDLWith = this.theoryDL(index); double dataDLWithout = dataDL(expFPOverErr, pcov, puncov, pfp, pfn); double potential = dataDLWith + theoryDLWith - dataDLWithout; double err = ruleStat[4] / ruleStat[0]; boolean overErr = Utils.grOrEq(err, 0.5); if (!checkErr) { overErr = false; } if (Utils.grOrEq(potential, 0.0) || overErr) { // If deleted, update ruleset stats. Other stats do not matter rulesetStat[0] = pcov; rulesetStat[1] = puncov; rulesetStat[4] = pfp; rulesetStat[5] = pfn; return potential; } else { return Double.NaN; } } /** * Compute the minimal data description length of the ruleset if the rule in the given position is deleted.<br> * The min_data_DL_if_deleted = data_DL_if_deleted - potential * * @param index * the index of the rule in question * @param expFPRate * expected FP/(FP+FN), used in dataDL calculation * @param checkErr * whether check if error rate >= 0.5 * @return the minDataDL */ public double minDataDLIfDeleted(final int index, final double expFPRate, final boolean checkErr) { double[] rulesetStat = new double[6]; // Stats of ruleset if deleted int more = this.m_Ruleset.size() - 1 - index; // How many rules after? ArrayList<double[]> indexPlus = new ArrayList<double[]>(more); // Their // stats // 0...(index-1) are OK for (int j = 0; j < index; j++) { // Covered stats are cumulative rulesetStat[0] += this.m_SimpleStats.get(j)[0]; rulesetStat[2] += this.m_SimpleStats.get(j)[2]; rulesetStat[4] += this.m_SimpleStats.get(j)[4]; } // Recount data from index+1 Instances data = (index == 0) ? this.m_Data : this.m_Filtered.get(index - 1)[1]; for (int j = (index + 1); j < this.m_Ruleset.size(); j++) { double[] stats = new double[6]; Instances[] split = this.computeSimpleStats(j, data, stats, null); indexPlus.add(stats); rulesetStat[0] += stats[0]; rulesetStat[2] += stats[2]; rulesetStat[4] += stats[4]; data = split[1]; } // Uncovered stats are those of the last rule if (more > 0) { rulesetStat[1] = indexPlus.get(indexPlus.size() - 1)[1]; rulesetStat[3] = indexPlus.get(indexPlus.size() - 1)[3]; rulesetStat[5] = indexPlus.get(indexPlus.size() - 1)[5]; } else if (index > 0) { rulesetStat[1] = this.m_SimpleStats.get(index - 1)[1]; rulesetStat[3] = this.m_SimpleStats.get(index - 1)[3]; rulesetStat[5] = this.m_SimpleStats.get(index - 1)[5]; } else { // Null coverage rulesetStat[1] = this.m_SimpleStats.get(0)[0] + this.m_SimpleStats.get(0)[1]; rulesetStat[3] = this.m_SimpleStats.get(0)[3] + this.m_SimpleStats.get(0)[4]; rulesetStat[5] = this.m_SimpleStats.get(0)[2] + this.m_SimpleStats.get(0)[5]; } // Potential double potential = 0; for (int k = index + 1; k < this.m_Ruleset.size(); k++) { double[] ruleStat = indexPlus.get(k - index - 1); double ifDeleted = this.potential(k, expFPRate, rulesetStat, ruleStat, checkErr); if (!Double.isNaN(ifDeleted)) { potential += ifDeleted; } } // Data DL of the ruleset without the rule // Note that ruleset stats has already been updated to reflect // deletion if any potential double dataDLWithout = dataDL(expFPRate, rulesetStat[0], rulesetStat[1], rulesetStat[4], rulesetStat[5]); // Why subtract potential again? To reflect change of theory DL?? return (dataDLWithout - potential); } /** * Compute the minimal data description length of the ruleset if the rule in the given position is NOT deleted.<br> * The min_data_DL_if_n_deleted = data_DL_if_n_deleted - potential * * @param index * the index of the rule in question * @param expFPRate * expected FP/(FP+FN), used in dataDL calculation * @param checkErr * whether check if error rate >= 0.5 * @return the minDataDL */ public double minDataDLIfExists(final int index, final double expFPRate, final boolean checkErr) { double[] rulesetStat = new double[6]; // Stats of ruleset if rule exists for (int j = 0; j < this.m_SimpleStats.size(); j++) { // Covered stats are cumulative rulesetStat[0] += this.m_SimpleStats.get(j)[0]; rulesetStat[2] += this.m_SimpleStats.get(j)[2]; rulesetStat[4] += this.m_SimpleStats.get(j)[4]; if (j == this.m_SimpleStats.size() - 1) { // Last rule rulesetStat[1] = this.m_SimpleStats.get(j)[1]; rulesetStat[3] = this.m_SimpleStats.get(j)[3]; rulesetStat[5] = this.m_SimpleStats.get(j)[5]; } } // Potential double potential = 0; for (int k = index + 1; k < this.m_SimpleStats.size(); k++) { double[] ruleStat = this.getSimpleStats(k); double ifDeleted = this.potential(k, expFPRate, rulesetStat, ruleStat, checkErr); if (!Double.isNaN(ifDeleted)) { potential += ifDeleted; } } // Data DL of the ruleset without the rule // Note that ruleset stats has already been updated to reflect deletion // if any potential double dataDLWith = dataDL(expFPRate, rulesetStat[0], rulesetStat[1], rulesetStat[4], rulesetStat[5]); return (dataDLWith - potential); } /** * The description length (DL) of the ruleset relative to if the rule in the given position is deleted, which is obtained by: <br> * MDL if the rule exists - MDL if the rule does not exist <br> * Note the minimal possible DL of the ruleset is calculated(i.e. some other rules may also be deleted) instead of the DL of the current ruleset. * <p> * * @param index * the given position of the rule in question (assuming correct) * @param expFPRate * expected FP/(FP+FN), used in dataDL calculation * @param checkErr * whether check if error rate >= 0.5 * @return the relative DL */ public double relativeDL(final int index, final double expFPRate, final boolean checkErr) { return (this.minDataDLIfExists(index, expFPRate, checkErr) + this.theoryDL(index) - this.minDataDLIfDeleted(index, expFPRate, checkErr)); } /** * Try to reduce the DL of the ruleset by testing removing the rules one by one in reverse order and update all the stats * * @param expFPRate * expected FP/(FP+FN), used in dataDL calculation * @param checkErr * whether check if error rate >= 0.5 */ public void reduceDL(final double expFPRate, final boolean checkErr) { boolean needUpdate = false; double[] rulesetStat = new double[6]; for (int j = 0; j < this.m_SimpleStats.size(); j++) { // Covered stats are cumulative rulesetStat[0] += this.m_SimpleStats.get(j)[0]; rulesetStat[2] += this.m_SimpleStats.get(j)[2]; rulesetStat[4] += this.m_SimpleStats.get(j)[4]; if (j == this.m_SimpleStats.size() - 1) { // Last rule rulesetStat[1] = this.m_SimpleStats.get(j)[1]; rulesetStat[3] = this.m_SimpleStats.get(j)[3]; rulesetStat[5] = this.m_SimpleStats.get(j)[5]; } } // Potential for (int k = this.m_SimpleStats.size() - 1; k >= 0; k--) { double[] ruleStat = this.m_SimpleStats.get(k); // rulesetStat updated double ifDeleted = this.potential(k, expFPRate, rulesetStat, ruleStat, checkErr); if (!Double.isNaN(ifDeleted)) { /* * System.err.println("!!!deleted ("+k+"): save "+ifDeleted * +" | "+rulesetStat[0] +" | "+rulesetStat[1] +" | "+rulesetStat[4] * +" | "+rulesetStat[5]); */ if (k == (this.m_SimpleStats.size() - 1)) { this.removeLast(); } else { this.m_Ruleset.remove(k); needUpdate = true; } } } if (needUpdate) { this.m_Filtered = null; this.m_SimpleStats = null; this.countData(); } } /** * Remove the last rule in the ruleset as well as it's stats. It might be useful when the last rule was added for testing purpose and then the test failed */ public void removeLast() { int last = this.m_Ruleset.size() - 1; this.m_Ruleset.remove(last); this.m_Filtered.remove(last); this.m_SimpleStats.remove(last); if (this.m_Distributions != null) { this.m_Distributions.remove(last); } } /** * Static utility function to count the data covered by the rules after the given index in the given rules, and then remove them. It returns the data not covered by the successive rules. * * @param data * the data to be processed * @param rules * the ruleset * @param index * the given index * @return the data after processing */ public static Instances rmCoveredBySuccessives(final Instances data, final ArrayList<Rule> rules, final int index) { Instances rt = new Instances(data, 0); for (int i = 0; i < data.numInstances(); i++) { Instance datum = data.instance(i); boolean covered = false; for (int j = index + 1; j < rules.size(); j++) { Rule rule = rules.get(j); if (rule.covers(datum)) { covered = true; break; } } if (!covered) { rt.add(datum); } } return rt; } /** * Stratify the given data into the given number of bags based on the class values. It differs from the <code>Instances.stratify(int fold)</code> that before stratification it sorts the instances according to the class order in the * header file. It assumes no missing values in the class. * * @param data * the given data * @param folds * the given number of folds * @param rand * the random object used to randomize the instances * @return the stratified instances * @throws InterruptedException */ public static final Instances stratify(final Instances data, final int folds, final Random rand) throws InterruptedException { if (!data.classAttribute().isNominal()) { return data; } Instances result = new Instances(data, 0); Instances[] bagsByClasses = new Instances[data.numClasses()]; for (int i = 0; i < bagsByClasses.length; i++) { bagsByClasses[i] = new Instances(data, 0); } // Sort by class for (int j = 0; j < data.numInstances(); j++) { Instance datum = data.instance(j); bagsByClasses[(int) datum.classValue()].add(datum); } // Randomize each class for (Instances bagsByClasse : bagsByClasses) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } bagsByClasse.randomize(rand); } for (int k = 0; k < folds; k++) { int offset = k, bag = 0; oneFold: while (true) { while (offset >= bagsByClasses[bag].numInstances()) { offset -= bagsByClasses[bag].numInstances(); if (++bag >= bagsByClasses.length) { break oneFold; } } result.add(bagsByClasses[bag].instance(offset)); offset += folds; } } return result; } /** * Compute the combined DL of the ruleset in this class, i.e. theory DL and data DL. Note this procedure computes the combined DL according to the current status of the ruleset in this class * * @param expFPRate * expected FP/(FP+FN), used in dataDL calculation * @param predicted * the default classification if ruleset covers null * @return the combined class */ public double combinedDL(final double expFPRate, final double predicted) { double rt = 0; if (this.getRulesetSize() > 0) { double[] stats = this.m_SimpleStats.get(this.m_SimpleStats.size() - 1); for (int j = this.getRulesetSize() - 2; j >= 0; j--) { stats[0] += this.getSimpleStats(j)[0]; stats[2] += this.getSimpleStats(j)[2]; stats[4] += this.getSimpleStats(j)[4]; } rt += dataDL(expFPRate, stats[0], stats[1], stats[4], stats[5]); // Data // DL } else { // Null coverage ruleset double fn = 0.0; for (int j = 0; j < this.m_Data.numInstances(); j++) { if ((int) this.m_Data.instance(j).classValue() == (int) predicted) { fn += this.m_Data.instance(j).weight(); } } rt += dataDL(expFPRate, 0.0, this.m_Data.sumOfWeights(), 0.0, fn); } for (int i = 0; i < this.getRulesetSize(); i++) { rt += this.theoryDL(i); } return rt; } /** * Patition the data into 2, first of which has (numFolds-1)/numFolds of the data and the second has 1/numFolds of the data * * * @param data * the given data * @param numFolds * the given number of folds * @return the patitioned instances */ public static final Instances[] partition(final Instances data, final int numFolds) { Instances[] rt = new Instances[2]; int splits = data.numInstances() * (numFolds - 1) / numFolds; rt[0] = new Instances(data, 0, splits); rt[1] = new Instances(data, splits, data.numInstances() - splits); return rt; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/ZeroR.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ZeroR.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import weka.classifiers.AbstractClassifier; import weka.classifiers.Sourcable; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for building and using a 0-R classifier. Predicts the mean (for a numeric class) or the mode (for a nominal class). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class ZeroR extends AbstractClassifier implements WeightedInstancesHandler, Sourcable { /** for serialization */ static final long serialVersionUID = 48055541465867954L; /** The class value 0R predicts. */ private double m_ClassValue; /** The number of instances in each class (null if class numeric). */ private double[] m_Counts; /** The class attribute. */ private Attribute m_Class; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a 0-R classifier. Predicts the mean " + "(for a numeric class) or the mode (for a nominal class)."; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.STRING_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(final Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); double sumOfWeights = 0; this.m_Class = instances.classAttribute(); this.m_ClassValue = 0; switch (instances.classAttribute().type()) { case Attribute.NUMERIC: this.m_Counts = null; break; case Attribute.NOMINAL: this.m_Counts = new double[instances.numClasses()]; for (int i = 0; i < this.m_Counts.length; i++) { this.m_Counts[i] = 1; } sumOfWeights = instances.numClasses(); break; } for (Instance instance : instances) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double classValue = instance.classValue(); if (!Utils.isMissingValue(classValue)) { if (instances.classAttribute().isNominal()) { this.m_Counts[(int) classValue] += instance.weight(); } else { this.m_ClassValue += instance.weight() * classValue; } sumOfWeights += instance.weight(); } } if (instances.classAttribute().isNumeric()) { if (Utils.gr(sumOfWeights, 0)) { this.m_ClassValue /= sumOfWeights; } } else { this.m_ClassValue = Utils.maxIndex(this.m_Counts); Utils.normalize(this.m_Counts, sumOfWeights); } } /** * Classifies a given instance. * * @param instance * the instance to be classified * @return index of the predicted class */ @Override public double classifyInstance(final Instance instance) { return this.m_ClassValue; } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if class is numeric */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { if (this.m_Counts == null) { double[] result = new double[1]; result[0] = this.m_ClassValue; return result; } else { return this.m_Counts.clone(); } } /** * Returns a string that describes the classifier as source. The classifier will be contained in a class with the given name (there may be auxiliary classes), and will contain a method with the signature: * * <pre> * <code> * public static double classify(Object[] i); * </code> * </pre> * * where the array <code>i</code> contains elements that are either Double, String, with missing values represented as null. The generated code is public domain and comes with no warranty. * * @param className * the name that should be given to the source class. * @return the object source described by a string * @throws Exception * if the souce can't be computed */ @Override public String toSource(final String className) throws Exception { StringBuffer result; result = new StringBuffer(); result.append("class " + className + " {\n"); result.append(" public static double classify(Object[] i) {\n"); if (this.m_Counts != null) { result.append(" // always predicts label '" + this.m_Class.value((int) this.m_ClassValue) + "'\n"); } result.append(" return " + this.m_ClassValue + ";\n"); result.append(" }\n"); result.append("}\n"); return result.toString(); } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ @Override public String toString() { if (this.m_Class == null) { return "ZeroR: No model built yet."; } if (this.m_Counts == null) { return "ZeroR predicts class value: " + this.m_ClassValue; } else { return "ZeroR predicts class value: " + this.m_Class.value((int) this.m_ClassValue); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new ZeroR(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/part/C45PruneableDecList.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * C45PruneableDecList.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules.part; import weka.classifiers.trees.j48.Distribution; import weka.classifiers.trees.j48.ModelSelection; import weka.classifiers.trees.j48.NoSplit; import weka.classifiers.trees.j48.Stats; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for handling a partial tree structure pruned using C4.5's pruning heuristic. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class C45PruneableDecList extends ClassifierDecList { /** for serialization */ private static final long serialVersionUID = -2757684345218324559L; /** CF */ private double CF = 0.25; /** * Constructor for pruneable tree structure. Stores reference to associated training data at each node. * * @param toSelectLocModel * selection method for local splitting model * @param cf * the confidence factor for pruning * @param minNum * the minimum number of objects in a leaf * @exception Exception * if something goes wrong */ public C45PruneableDecList(final ModelSelection toSelectLocModel, final double cf, final int minNum) throws Exception { super(toSelectLocModel, minNum); this.CF = cf; } /** * Builds the partial tree without hold out set. * * @exception Exception * if something goes wrong */ @Override public void buildDecList(Instances data, final boolean leaf) throws Exception { Instances[] localInstances; int ind; int i, j; double sumOfWeights; NoSplit noSplit; this.m_train = null; this.m_test = null; this.m_isLeaf = false; this.m_isEmpty = false; this.m_sons = null; this.indeX = 0; sumOfWeights = data.sumOfWeights(); noSplit = new NoSplit(new Distribution(data)); if (leaf) { this.m_localModel = noSplit; } else { this.m_localModel = this.m_toSelectModel.selectModel(data); } if (this.m_localModel.numSubsets() > 1) { localInstances = this.m_localModel.split(data); data = null; this.m_sons = new ClassifierDecList[this.m_localModel.numSubsets()]; i = 0; do { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } i++; ind = this.chooseIndex(); if (ind == -1) { for (j = 0; j < this.m_sons.length; j++) { if (this.m_sons[j] == null) { this.m_sons[j] = this.getNewDecList(localInstances[j], true); } } if (i < 2) { this.m_localModel = noSplit; this.m_isLeaf = true; this.m_sons = null; if (Utils.eq(sumOfWeights, 0)) { this.m_isEmpty = true; } return; } ind = 0; break; } else { this.m_sons[ind] = this.getNewDecList(localInstances[ind], false); } } while ((i < this.m_sons.length) && (this.m_sons[ind].m_isLeaf)); // Check if all successors are leaves for (j = 0; j < this.m_sons.length; j++) { if ((this.m_sons[j] == null) || (!this.m_sons[j].m_isLeaf)) { break; } } if (j == this.m_sons.length) { this.pruneEnd(); if (!this.m_isLeaf) { this.indeX = this.chooseLastIndex(); } } else { this.indeX = this.chooseLastIndex(); } } else { this.m_isLeaf = true; if (Utils.eq(sumOfWeights, 0)) { this.m_isEmpty = true; } } } /** * Returns a newly created tree. * * @exception Exception * if something goes wrong */ @Override protected ClassifierDecList getNewDecList(final Instances data, final boolean leaf) throws Exception { C45PruneableDecList newDecList = new C45PruneableDecList(this.m_toSelectModel, this.CF, this.m_minNumObj); newDecList.buildDecList(data, leaf); return newDecList; } /** * Prunes the end of the rule. */ protected void pruneEnd() { double errorsLeaf, errorsTree; errorsTree = this.getEstimatedErrorsForTree(); errorsLeaf = this.getEstimatedErrorsForLeaf(); if (Utils.smOrEq(errorsLeaf, errorsTree + 0.1)) { // +0.1 as in C4.5 this.m_isLeaf = true; this.m_sons = null; this.m_localModel = new NoSplit(this.localModel().distribution()); } } /** * Computes estimated errors for tree. */ private double getEstimatedErrorsForTree() { if (this.m_isLeaf) { return this.getEstimatedErrorsForLeaf(); } else { double error = 0; for (int i = 0; i < this.m_sons.length; i++) { if (!Utils.eq(this.son(i).localModel().distribution().total(), 0)) { error += ((C45PruneableDecList) this.son(i)).getEstimatedErrorsForTree(); } } return error; } } /** * Computes estimated errors for leaf. */ public double getEstimatedErrorsForLeaf() { double errors = this.localModel().distribution().numIncorrect(); return errors + Stats.addErrs(this.localModel().distribution().total(), errors, (float) this.CF); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/part/ClassifierDecList.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClassifierDecList.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules.part; import java.io.Serializable; import weka.classifiers.trees.j48.ClassifierSplitModel; import weka.classifiers.trees.j48.Distribution; import weka.classifiers.trees.j48.EntropySplitCrit; import weka.classifiers.trees.j48.ModelSelection; import weka.classifiers.trees.j48.NoSplit; import weka.core.ContingencyTables; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for handling a rule (partial tree) for a decision list. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class ClassifierDecList implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 7284358349711992497L; /** Minimum number of objects */ protected int m_minNumObj; /** To compute the entropy. */ protected static EntropySplitCrit m_splitCrit = new EntropySplitCrit(); /** The model selection method. */ protected ModelSelection m_toSelectModel; /** Local model at node. */ protected ClassifierSplitModel m_localModel; /** References to sons. */ protected ClassifierDecList[] m_sons; /** True if node is leaf. */ protected boolean m_isLeaf; /** True if node is empty. */ protected boolean m_isEmpty; /** The training instances. */ protected Instances m_train; /** The pruning instances. */ protected Distribution m_test; /** Which son to expand? */ protected int indeX; /** * Constructor - just calls constructor of class DecList. */ public ClassifierDecList(final ModelSelection toSelectLocModel, final int minNum) { this.m_toSelectModel = toSelectLocModel; this.m_minNumObj = minNum; } /** * Method for building a pruned partial tree. * * @exception Exception * if something goes wrong */ public void buildRule(final Instances data) throws Exception { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.buildDecList(data, false); this.cleanup(new Instances(data, 0)); } /** * Builds the partial tree without hold out set. * * @exception Exception * if something goes wrong */ public void buildDecList(Instances data, final boolean leaf) throws Exception { Instances[] localInstances; int ind; int i, j; double sumOfWeights; NoSplit noSplit; this.m_train = null; this.m_test = null; this.m_isLeaf = false; this.m_isEmpty = false; this.m_sons = null; this.indeX = 0; sumOfWeights = data.sumOfWeights(); noSplit = new NoSplit(new Distribution(data)); if (leaf) { this.m_localModel = noSplit; } else { this.m_localModel = this.m_toSelectModel.selectModel(data); } if (this.m_localModel.numSubsets() > 1) { localInstances = this.m_localModel.split(data); data = null; this.m_sons = new ClassifierDecList[this.m_localModel.numSubsets()]; i = 0; do { i++; ind = this.chooseIndex(); if (ind == -1) { for (j = 0; j < this.m_sons.length; j++) { if (this.m_sons[j] == null) { this.m_sons[j] = this.getNewDecList(localInstances[j], true); } } if (i < 2) { this.m_localModel = noSplit; this.m_isLeaf = true; this.m_sons = null; if (Utils.eq(sumOfWeights, 0)) { this.m_isEmpty = true; } return; } ind = 0; break; } else { this.m_sons[ind] = this.getNewDecList(localInstances[ind], false); } } while ((i < this.m_sons.length) && (this.m_sons[ind].m_isLeaf)); // Choose rule this.indeX = this.chooseLastIndex(); } else { this.m_isLeaf = true; if (Utils.eq(sumOfWeights, 0)) { this.m_isEmpty = true; } } } /** * Classifies an instance. * * @exception Exception * if something goes wrong */ public double classifyInstance(final Instance instance) throws Exception { double maxProb = -1; double currentProb; int maxIndex = 0; int j; for (j = 0; j < instance.numClasses(); j++) { currentProb = this.getProbs(j, instance, 1); if (Utils.gr(currentProb, maxProb)) { maxIndex = j; maxProb = currentProb; } } if (Utils.eq(maxProb, 0)) { return -1.0; } else { return maxIndex; } } /** * Returns class probabilities for a weighted instance. * * @exception Exception * if something goes wrong */ public final double[] distributionForInstance(final Instance instance) throws Exception { double[] doubles = new double[instance.numClasses()]; for (int i = 0; i < doubles.length; i++) { doubles[i] = this.getProbs(i, instance, 1); } return doubles; } /** * Returns the weight a rule assigns to an instance. * * @exception Exception * if something goes wrong */ public double weight(final Instance instance) throws Exception { int subset; if (this.m_isLeaf) { return 1; } subset = this.m_localModel.whichSubset(instance); if (subset == -1) { return (this.m_localModel.weights(instance))[this.indeX] * this.m_sons[this.indeX].weight(instance); } if (subset == this.indeX) { return this.m_sons[this.indeX].weight(instance); } return 0; } /** * Cleanup in order to save memory. * * @throws InterruptedException */ public final void cleanup(final Instances justHeaderInfo) throws InterruptedException { this.m_train = justHeaderInfo; this.m_test = null; if (!this.m_isLeaf) { for (ClassifierDecList m_son : this.m_sons) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (m_son != null) { m_son.cleanup(justHeaderInfo); } } } } /** * Prints rules. */ @Override public String toString() { try { StringBuffer text; text = new StringBuffer(); if (this.m_isLeaf) { text.append(": "); text.append(this.m_localModel.dumpLabel(0, this.m_train) + "\n"); } else { this.dumpDecList(text); // dumpTree(0,text); } return text.toString(); } catch (Exception e) { return "Can't print rule."; } } /** * Returns a newly created tree. * * @exception Exception * if something goes wrong */ protected ClassifierDecList getNewDecList(final Instances train, final boolean leaf) throws Exception { ClassifierDecList newDecList = new ClassifierDecList(this.m_toSelectModel, this.m_minNumObj); newDecList.buildDecList(train, leaf); return newDecList; } /** * Method for choosing a subset to expand. */ public final int chooseIndex() { int minIndex = -1; double estimated, min = Double.MAX_VALUE; int i, j; for (i = 0; i < this.m_sons.length; i++) { if (this.son(i) == null) { if (Utils.sm(this.localModel().distribution().perBag(i), this.m_minNumObj)) { estimated = Double.MAX_VALUE; } else { estimated = 0; for (j = 0; j < this.localModel().distribution().numClasses(); j++) { estimated -= m_splitCrit.lnFunc(this.localModel().distribution().perClassPerBag(i, j)); } estimated += m_splitCrit.lnFunc(this.localModel().distribution().perBag(i)); estimated /= (this.localModel().distribution().perBag(i) * ContingencyTables.log2); } if (Utils.smOrEq(estimated, 0)) { return i; } if (Utils.sm(estimated, min)) { min = estimated; minIndex = i; } } } return minIndex; } /** * Choose last index (ie. choose rule). */ public final int chooseLastIndex() { int minIndex = 0; double estimated, min = Double.MAX_VALUE; if (!this.m_isLeaf) { for (int i = 0; i < this.m_sons.length; i++) { if (this.son(i) != null) { if (Utils.grOrEq(this.localModel().distribution().perBag(i), this.m_minNumObj)) { estimated = this.son(i).getSizeOfBranch(); if (Utils.sm(estimated, min)) { min = estimated; minIndex = i; } } } } } return minIndex; } /** * Returns the number of instances covered by a branch */ protected double getSizeOfBranch() { if (this.m_isLeaf) { return -this.localModel().distribution().total(); } else { return this.son(this.indeX).getSizeOfBranch(); } } /** * Help method for printing tree structure. */ private void dumpDecList(final StringBuffer text) throws Exception { text.append(this.m_localModel.leftSide(this.m_train)); text.append(this.m_localModel.rightSide(this.indeX, this.m_train)); if (this.m_sons[this.indeX].m_isLeaf) { text.append(": "); text.append(this.m_localModel.dumpLabel(this.indeX, this.m_train) + "\n"); } else { text.append(" AND\n"); this.m_sons[this.indeX].dumpDecList(text); } } /** * Help method for computing class probabilities of a given instance. * * @exception Exception * Exception if something goes wrong */ private double getProbs(final int classIndex, final Instance instance, final double weight) throws Exception { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } double[] weights; int treeIndex; if (this.m_isLeaf) { return weight * this.localModel().classProb(classIndex, instance, -1); } else { treeIndex = this.localModel().whichSubset(instance); if (treeIndex == -1) { weights = this.localModel().weights(instance); return this.son(this.indeX).getProbs(classIndex, instance, weights[this.indeX] * weight); } else { if (treeIndex == this.indeX) { return this.son(this.indeX).getProbs(classIndex, instance, weight); } else { return 0; } } } } /** * Method just exists to make program easier to read. */ protected ClassifierSplitModel localModel() { return this.m_localModel; } /** * Method just exists to make program easier to read. */ protected ClassifierDecList son(final int index) { return this.m_sons[index]; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/part/MakeDecList.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MakeDecList.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules.part; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.trees.j48.ModelSelection; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for handling a decision list. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class MakeDecList implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -1427481323245079123L; /** Vector storing the rules. */ private Vector<ClassifierDecList> theRules; /** The confidence for C45-type pruning. */ private double CF = 0.25f; /** Minimum number of objects */ private final int minNumObj; /** The model selection method. */ private final ModelSelection toSelectModeL; /** * How many subsets of equal size? One used for pruning, the rest for training. */ private int numSetS = 3; /** Use reduced error pruning? */ private boolean reducedErrorPruning = false; /** Generated unpruned list? */ private boolean unpruned = false; /** The seed for random number generation. */ private int m_seed = 1; /** * Constructor for unpruned dec list. */ public MakeDecList(final ModelSelection toSelectLocModel, final int minNum) { this.toSelectModeL = toSelectLocModel; this.reducedErrorPruning = false; this.unpruned = true; this.minNumObj = minNum; } /** * Constructor for dec list pruned using C4.5 pruning. */ public MakeDecList(final ModelSelection toSelectLocModel, final double cf, final int minNum) { this.toSelectModeL = toSelectLocModel; this.CF = cf; this.reducedErrorPruning = false; this.unpruned = false; this.minNumObj = minNum; } /** * Constructor for dec list pruned using hold-out pruning. */ public MakeDecList(final ModelSelection toSelectLocModel, final int num, final int minNum, final int seed) { this.toSelectModeL = toSelectLocModel; this.numSetS = num; this.reducedErrorPruning = true; this.unpruned = false; this.minNumObj = minNum; this.m_seed = seed; } /** * Builds dec list. * * @exception Exception * if dec list can't be built successfully */ public void buildClassifier(final Instances data) throws Exception { ClassifierDecList currentRule; double currentWeight; Instances oldGrowData, newGrowData, oldPruneData, newPruneData; this.theRules = new Vector<ClassifierDecList>(); if ((this.reducedErrorPruning) && !(this.unpruned)) { Random random = new Random(this.m_seed); data.randomize(random); data.stratify(this.numSetS); oldGrowData = data.trainCV(this.numSetS, this.numSetS - 1, random); oldPruneData = data.testCV(this.numSetS, this.numSetS - 1); } else { oldGrowData = data; oldPruneData = null; } while (Utils.gr(oldGrowData.numInstances(), 0)) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } // Create rule if (this.unpruned) { currentRule = new ClassifierDecList(this.toSelectModeL, this.minNumObj); currentRule.buildRule(oldGrowData); } else if (this.reducedErrorPruning) { currentRule = new PruneableDecList(this.toSelectModeL, this.minNumObj); ((PruneableDecList) currentRule).buildRule(oldGrowData, oldPruneData); } else { currentRule = new C45PruneableDecList(this.toSelectModeL, this.CF, this.minNumObj); ((C45PruneableDecList) currentRule).buildRule(oldGrowData); } // Remove instances from growing data newGrowData = new Instances(oldGrowData, oldGrowData.numInstances()); Enumeration<Instance> enu = oldGrowData.enumerateInstances(); while (enu.hasMoreElements()) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } Instance instance = enu.nextElement(); currentWeight = currentRule.weight(instance); if (Utils.sm(currentWeight, 1)) { instance.setWeight(instance.weight() * (1 - currentWeight)); newGrowData.add(instance); } } newGrowData.compactify(); oldGrowData = newGrowData; // Remove instances from pruning data if ((this.reducedErrorPruning) && !(this.unpruned)) { newPruneData = new Instances(oldPruneData, oldPruneData.numInstances()); enu = oldPruneData.enumerateInstances(); while (enu.hasMoreElements()) { Instance instance = enu.nextElement(); currentWeight = currentRule.weight(instance); if (Utils.sm(currentWeight, 1)) { instance.setWeight(instance.weight() * (1 - currentWeight)); newPruneData.add(instance); } } newPruneData.compactify(); oldPruneData = newPruneData; } this.theRules.addElement(currentRule); } } /** * Outputs the classifier into a string. */ @Override public String toString() { StringBuffer text = new StringBuffer(); for (int i = 0; i < this.theRules.size(); i++) { text.append(this.theRules.elementAt(i) + "\n"); } text.append("Number of Rules : \t" + this.theRules.size() + "\n"); return text.toString(); } /** * Classifies an instance. * * @exception Exception * if instance can't be classified */ public double classifyInstance(final Instance instance) throws Exception { double maxProb = -1; double[] sumProbs; int maxIndex = 0; sumProbs = this.distributionForInstance(instance); for (int j = 0; j < sumProbs.length; j++) { if (Utils.gr(sumProbs[j], maxProb)) { maxIndex = j; maxProb = sumProbs[j]; } } return maxIndex; } /** * Returns the class distribution for an instance. * * @exception Exception * if distribution can't be computed */ public double[] distributionForInstance(final Instance instance) throws Exception { double[] currentProbs = null; double[] sumProbs; double currentWeight, weight = 1; int i, j; // Get probabilities. sumProbs = new double[instance.numClasses()]; i = 0; while ((Utils.gr(weight, 0)) && (i < this.theRules.size())) { currentWeight = this.theRules.elementAt(i).weight(instance); if (Utils.gr(currentWeight, 0)) { currentProbs = this.theRules.elementAt(i).distributionForInstance(instance); for (j = 0; j < sumProbs.length; j++) { sumProbs[j] += weight * currentProbs[j]; } weight = weight * (1 - currentWeight); } i++; } return sumProbs; } /** * Outputs the number of rules in the classifier. */ public int numRules() { return this.theRules.size(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/rules/part/PruneableDecList.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PruneableDecList.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules.part; import weka.classifiers.trees.j48.Distribution; import weka.classifiers.trees.j48.ModelSelection; import weka.classifiers.trees.j48.NoSplit; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for handling a partial tree structure that can be pruned using a pruning set. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class PruneableDecList extends ClassifierDecList { /** for serialization */ private static final long serialVersionUID = -7228103346297172921L; /** * Constructor for pruneable partial tree structure. * * @param toSelectLocModel * selection method for local splitting model * @param minNum * minimum number of objects in leaf */ public PruneableDecList(final ModelSelection toSelectLocModel, final int minNum) { super(toSelectLocModel, minNum); } /** * Method for building a pruned partial tree. * * @throws Exception * if tree can't be built successfully */ public void buildRule(final Instances train, final Instances test) throws Exception { this.buildDecList(train, test, false); this.cleanup(new Instances(train, 0)); } /** * Builds the partial tree with hold out set * * @throws Exception * if something goes wrong */ public void buildDecList(Instances train, Instances test, final boolean leaf) throws Exception { Instances[] localTrain, localTest; int ind; int i, j; double sumOfWeights; NoSplit noSplit; this.m_train = null; this.m_isLeaf = false; this.m_isEmpty = false; this.m_sons = null; this.indeX = 0; sumOfWeights = train.sumOfWeights(); noSplit = new NoSplit(new Distribution(train)); if (leaf) { this.m_localModel = noSplit; } else { this.m_localModel = this.m_toSelectModel.selectModel(train, test); } this.m_test = new Distribution(test, this.m_localModel); if (this.m_localModel.numSubsets() > 1) { localTrain = this.m_localModel.split(train); localTest = this.m_localModel.split(test); train = null; test = null; this.m_sons = new ClassifierDecList[this.m_localModel.numSubsets()]; i = 0; do { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } i++; ind = this.chooseIndex(); if (ind == -1) { for (j = 0; j < this.m_sons.length; j++) { if (this.m_sons[j] == null) { this.m_sons[j] = this.getNewDecList(localTrain[j], localTest[j], true); } } if (i < 2) { this.m_localModel = noSplit; this.m_isLeaf = true; this.m_sons = null; if (Utils.eq(sumOfWeights, 0)) { this.m_isEmpty = true; } return; } ind = 0; break; } else { this.m_sons[ind] = this.getNewDecList(localTrain[ind], localTest[ind], false); } } while ((i < this.m_sons.length) && (this.m_sons[ind].m_isLeaf)); // Check if all successors are leaves for (j = 0; j < this.m_sons.length; j++) { if ((this.m_sons[j] == null) || (!this.m_sons[j].m_isLeaf)) { break; } } if (j == this.m_sons.length) { this.pruneEnd(); if (!this.m_isLeaf) { this.indeX = this.chooseLastIndex(); } } else { this.indeX = this.chooseLastIndex(); } } else { this.m_isLeaf = true; if (Utils.eq(sumOfWeights, 0)) { this.m_isEmpty = true; } } } /** * Returns a newly created tree. * * @param train * train data * @param test * test data * @param leaf * @throws Exception * if something goes wrong */ protected ClassifierDecList getNewDecList(final Instances train, final Instances test, final boolean leaf) throws Exception { PruneableDecList newDecList = new PruneableDecList(this.m_toSelectModel, this.m_minNumObj); newDecList.buildDecList(train, test, leaf); return newDecList; } /** * Prunes the end of the rule. */ protected void pruneEnd() throws Exception { double errorsLeaf, errorsTree; errorsTree = this.errorsForTree(); errorsLeaf = this.errorsForLeaf(); if (Utils.smOrEq(errorsLeaf, errorsTree)) { this.m_isLeaf = true; this.m_sons = null; this.m_localModel = new NoSplit(this.localModel().distribution()); } } /** * Computes error estimate for tree. */ private double errorsForTree() throws Exception { if (this.m_isLeaf) { return this.errorsForLeaf(); } else { double error = 0; for (int i = 0; i < this.m_sons.length; i++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (Utils.eq(this.son(i).localModel().distribution().total(), 0)) { error += this.m_test.perBag(i) - this.m_test.perClassPerBag(i, this.localModel().distribution().maxClass()); } else { error += ((PruneableDecList) this.son(i)).errorsForTree(); } } return error; } } /** * Computes estimated errors for leaf. */ private double errorsForLeaf() throws Exception { return this.m_test.total() - this.m_test.perClass(this.localModel().distribution().maxClass()); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/DecisionStump.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DecisionStump.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Sourcable; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ContingencyTables; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for building and using a decision stump. Usually used in * conjunction with a boosting algorithm. Does regression (based on mean-squared error) or * classification (based on entropy). Missing is treated as a separate value. * <p/> * <!-- globalinfo-end --> * * Typical usage: * <p> * <code>java weka.classifiers.meta.LogitBoost -I 100 -W weka.classifiers.trees.DecisionStump * -t training_data </code> * <p> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class DecisionStump extends AbstractClassifier implements WeightedInstancesHandler, Sourcable { /** for serialization */ static final long serialVersionUID = 1618384535950391L; /** The attribute used for classification. */ protected int m_AttIndex; /** The split point (index respectively). */ protected double m_SplitPoint; /** The distribution of class values or the means in each subset. */ protected double[][] m_Distribution; /** The instances used for training. */ protected Instances m_Instances; /** a ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a decision stump. Usually used in " + "conjunction with a boosting algorithm. Does regression (based on " + "mean-squared error) or classification (based on entropy). Missing " + "is treated as a separate value."; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances instances) throws Exception { double bestVal = Double.MAX_VALUE, currVal; double bestPoint = -Double.MAX_VALUE; int bestAtt = -1, numClasses; // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // only class? -> build ZeroR model if (instances.numAttributes() == 1) { System.err.println("Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); this.m_ZeroR = new weka.classifiers.rules.ZeroR(); this.m_ZeroR.buildClassifier(instances); return; } else { this.m_ZeroR = null; } double[][] bestDist = new double[3][instances.numClasses()]; this.m_Instances = new Instances(instances); if (this.m_Instances.classAttribute().isNominal()) { numClasses = this.m_Instances.numClasses(); } else { numClasses = 1; } // For each attribute boolean first = true; for (int i = 0; i < this.m_Instances.numAttributes(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (i != this.m_Instances.classIndex()) { // Reserve space for distribution. this.m_Distribution = new double[3][numClasses]; // Compute value of criterion for best split on attribute if (this.m_Instances.attribute(i).isNominal()) { currVal = this.findSplitNominal(i); } else { currVal = this.findSplitNumeric(i); } if ((first) || (currVal < bestVal)) { bestVal = currVal; bestAtt = i; bestPoint = this.m_SplitPoint; for (int j = 0; j < 3; j++) { System.arraycopy(this.m_Distribution[j], 0, bestDist[j], 0, numClasses); } } // First attribute has been investigated first = false; } } // Set attribute, split point and distribution. this.m_AttIndex = bestAtt; this.m_SplitPoint = bestPoint; this.m_Distribution = bestDist; if (this.m_Instances.classAttribute().isNominal()) { for (int i = 0; i < this.m_Distribution.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double sumCounts = Utils.sum(this.m_Distribution[i]); if (sumCounts == 0) { // This means there were only missing attribute values System.arraycopy(this.m_Distribution[2], 0, this.m_Distribution[i], 0, this.m_Distribution[2].length); Utils.normalize(this.m_Distribution[i]); } else { Utils.normalize(this.m_Distribution[i], sumCounts); } } } // Save memory this.m_Instances = new Instances(this.m_Instances, 0); } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if distribution can't be computed */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { // default model? if (this.m_ZeroR != null) { return this.m_ZeroR.distributionForInstance(instance); } return this.m_Distribution[this.whichSubset(instance)]; } /** * Returns the decision tree as Java source code. * * @param className * the classname of the generated code * @return the tree as Java source code * @throws Exception * if something goes wrong */ @Override public String toSource(final String className) throws Exception { StringBuffer text = new StringBuffer("class "); Attribute c = this.m_Instances.classAttribute(); text.append(className).append(" {\n" + " public static double classify(Object[] i) {\n"); text.append(" /* " + this.m_Instances.attribute(this.m_AttIndex).name() + " */\n"); text.append(" if (i[").append(this.m_AttIndex); text.append("] == null) { return "); text.append(this.sourceClass(c, this.m_Distribution[2])).append(";"); if (this.m_Instances.attribute(this.m_AttIndex).isNominal()) { text.append(" } else if (((String)i[").append(this.m_AttIndex); text.append("]).equals(\""); text.append(this.m_Instances.attribute(this.m_AttIndex).value((int) this.m_SplitPoint)); text.append("\")"); } else { text.append(" } else if (((Double)i[").append(this.m_AttIndex); text.append("]).doubleValue() <= ").append(this.m_SplitPoint); } text.append(") { return "); text.append(this.sourceClass(c, this.m_Distribution[0])).append(";"); text.append(" } else { return "); text.append(this.sourceClass(c, this.m_Distribution[1])).append(";"); text.append(" }\n }\n}\n"); return text.toString(); } /** * Returns the value as string out of the given distribution * * @param c * the attribute to get the value for * @param dist * the distribution to extract the value * @return the value */ protected String sourceClass(final Attribute c, final double[] dist) { if (c.isNominal()) { return Integer.toString(Utils.maxIndex(dist)); } else { return Double.toString(dist[0]); } } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ @Override public String toString() { // only ZeroR model? if (this.m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_ZeroR.toString()); return buf.toString(); } if (this.m_Instances == null) { return "Decision Stump: No model built yet."; } try { StringBuffer text = new StringBuffer(); text.append("Decision Stump\n\n"); text.append("Classifications\n\n"); Attribute att = this.m_Instances.attribute(this.m_AttIndex); if (att.isNominal()) { text.append(att.name() + " = " + att.value((int) this.m_SplitPoint) + " : "); text.append(this.printClass(this.m_Distribution[0])); text.append(att.name() + " != " + att.value((int) this.m_SplitPoint) + " : "); text.append(this.printClass(this.m_Distribution[1])); } else { text.append(att.name() + " <= " + this.m_SplitPoint + " : "); text.append(this.printClass(this.m_Distribution[0])); text.append(att.name() + " > " + this.m_SplitPoint + " : "); text.append(this.printClass(this.m_Distribution[1])); } text.append(att.name() + " is missing : "); text.append(this.printClass(this.m_Distribution[2])); if (this.m_Instances.classAttribute().isNominal()) { text.append("\nClass distributions\n\n"); if (att.isNominal()) { text.append(att.name() + " = " + att.value((int) this.m_SplitPoint) + "\n"); text.append(this.printDist(this.m_Distribution[0])); text.append(att.name() + " != " + att.value((int) this.m_SplitPoint) + "\n"); text.append(this.printDist(this.m_Distribution[1])); } else { text.append(att.name() + " <= " + this.m_SplitPoint + "\n"); text.append(this.printDist(this.m_Distribution[0])); text.append(att.name() + " > " + this.m_SplitPoint + "\n"); text.append(this.printDist(this.m_Distribution[1])); } text.append(att.name() + " is missing\n"); text.append(this.printDist(this.m_Distribution[2])); } return text.toString(); } catch (Exception e) { return "Can't print decision stump classifier!"; } } /** * Prints a class distribution. * * @param dist * the class distribution to print * @return the distribution as a string * @throws Exception * if distribution can't be printed */ protected String printDist(final double[] dist) throws Exception { StringBuffer text = new StringBuffer(); if (this.m_Instances.classAttribute().isNominal()) { for (int i = 0; i < this.m_Instances.numClasses(); i++) { text.append(this.m_Instances.classAttribute().value(i) + "\t"); } text.append("\n"); for (int i = 0; i < this.m_Instances.numClasses(); i++) { text.append(dist[i] + "\t"); } text.append("\n"); } return text.toString(); } /** * Prints a classification. * * @param dist * the class distribution * @return the classificationn as a string * @throws Exception * if the classification can't be printed */ protected String printClass(final double[] dist) throws Exception { StringBuffer text = new StringBuffer(); if (this.m_Instances.classAttribute().isNominal()) { text.append(this.m_Instances.classAttribute().value(Utils.maxIndex(dist))); } else { text.append(dist[0]); } return text.toString() + "\n"; } /** * Finds best split for nominal attribute and returns value. * * @param index * attribute index * @return value of criterion for the best split * @throws Exception * if something goes wrong */ protected double findSplitNominal(final int index) throws Exception { if (this.m_Instances.classAttribute().isNominal()) { return this.findSplitNominalNominal(index); } else { return this.findSplitNominalNumeric(index); } } /** * Finds best split for nominal attribute and nominal class and returns value. * * @param index * attribute index * @return value of criterion for the best split * @throws Exception * if something goes wrong */ protected double findSplitNominalNominal(final int index) throws Exception { double bestVal = Double.MAX_VALUE, currVal; double[][] counts = new double[this.m_Instances.attribute(index).numValues() + 1][this.m_Instances.numClasses()]; double[] sumCounts = new double[this.m_Instances.numClasses()]; double[][] bestDist = new double[3][this.m_Instances.numClasses()]; int numMissing = 0; // Compute counts for all the values for (int i = 0; i < this.m_Instances.numInstances(); i++) { Instance inst = this.m_Instances.instance(i); if (inst.isMissing(index)) { numMissing++; counts[this.m_Instances.attribute(index).numValues()][(int) inst.classValue()] += inst.weight(); } else { counts[(int) inst.value(index)][(int) inst.classValue()] += inst.weight(); } } // Compute sum of counts for (int i = 0; i < this.m_Instances.attribute(index).numValues(); i++) { for (int j = 0; j < this.m_Instances.numClasses(); j++) { sumCounts[j] += counts[i][j]; } } // Make split counts for each possible split and evaluate System.arraycopy(counts[this.m_Instances.attribute(index).numValues()], 0, this.m_Distribution[2], 0, this.m_Instances.numClasses()); for (int i = 0; i < this.m_Instances.attribute(index).numValues(); i++) { for (int j = 0; j < this.m_Instances.numClasses(); j++) { this.m_Distribution[0][j] = counts[i][j]; this.m_Distribution[1][j] = sumCounts[j] - counts[i][j]; } currVal = ContingencyTables.entropyConditionedOnRows(this.m_Distribution); if (currVal < bestVal) { bestVal = currVal; this.m_SplitPoint = i; for (int j = 0; j < 3; j++) { System.arraycopy(this.m_Distribution[j], 0, bestDist[j], 0, this.m_Instances.numClasses()); } } } // No missing values in training data. if (numMissing == 0) { System.arraycopy(sumCounts, 0, bestDist[2], 0, this.m_Instances.numClasses()); } this.m_Distribution = bestDist; return bestVal; } /** * Finds best split for nominal attribute and numeric class and returns value. * * @param index * attribute index * @return value of criterion for the best split * @throws Exception * if something goes wrong */ protected double findSplitNominalNumeric(final int index) throws Exception { double bestVal = Double.MAX_VALUE, currVal; double[] sumsSquaresPerValue = new double[this.m_Instances.attribute(index).numValues()], sumsPerValue = new double[this.m_Instances.attribute(index).numValues()], weightsPerValue = new double[this.m_Instances.attribute(index).numValues()]; double totalSumSquaresW = 0, totalSumW = 0, totalSumOfWeightsW = 0, totalSumOfWeights = 0, totalSum = 0; double[] sumsSquares = new double[3], sumOfWeights = new double[3]; double[][] bestDist = new double[3][1]; // Compute counts for all the values for (int i = 0; i < this.m_Instances.numInstances(); i++) { Instance inst = this.m_Instances.instance(i); if (inst.isMissing(index)) { this.m_Distribution[2][0] += inst.classValue() * inst.weight(); sumsSquares[2] += inst.classValue() * inst.classValue() * inst.weight(); sumOfWeights[2] += inst.weight(); } else { weightsPerValue[(int) inst.value(index)] += inst.weight(); sumsPerValue[(int) inst.value(index)] += inst.classValue() * inst.weight(); sumsSquaresPerValue[(int) inst.value(index)] += inst.classValue() * inst.classValue() * inst.weight(); } totalSumOfWeights += inst.weight(); totalSum += inst.classValue() * inst.weight(); } // Check if the total weight is zero if (totalSumOfWeights <= 0) { return bestVal; } // Compute sum of counts without missing ones for (int i = 0; i < this.m_Instances.attribute(index).numValues(); i++) { totalSumOfWeightsW += weightsPerValue[i]; totalSumSquaresW += sumsSquaresPerValue[i]; totalSumW += sumsPerValue[i]; } // Make split counts for each possible split and evaluate for (int i = 0; i < this.m_Instances.attribute(index).numValues(); i++) { this.m_Distribution[0][0] = sumsPerValue[i]; sumsSquares[0] = sumsSquaresPerValue[i]; sumOfWeights[0] = weightsPerValue[i]; this.m_Distribution[1][0] = totalSumW - sumsPerValue[i]; sumsSquares[1] = totalSumSquaresW - sumsSquaresPerValue[i]; sumOfWeights[1] = totalSumOfWeightsW - weightsPerValue[i]; currVal = this.variance(this.m_Distribution, sumsSquares, sumOfWeights); if (currVal < bestVal) { bestVal = currVal; this.m_SplitPoint = i; for (int j = 0; j < 3; j++) { if (sumOfWeights[j] > 0) { bestDist[j][0] = this.m_Distribution[j][0] / sumOfWeights[j]; } else { bestDist[j][0] = totalSum / totalSumOfWeights; } } } } this.m_Distribution = bestDist; return bestVal; } /** * Finds best split for numeric attribute and returns value. * * @param index * attribute index * @return value of criterion for the best split * @throws Exception * if something goes wrong */ protected double findSplitNumeric(final int index) throws Exception { if (this.m_Instances.classAttribute().isNominal()) { return this.findSplitNumericNominal(index); } else { return this.findSplitNumericNumeric(index); } } /** * Finds best split for numeric attribute and nominal class and returns value. * * @param index * attribute index * @return value of criterion for the best split * @throws Exception * if something goes wrong */ protected double findSplitNumericNominal(final int index) throws Exception { double bestVal = Double.MAX_VALUE, currVal, currCutPoint; int numMissing = 0; double[] sum = new double[this.m_Instances.numClasses()]; double[][] bestDist = new double[3][this.m_Instances.numClasses()]; // Compute counts for all the values for (int i = 0; i < this.m_Instances.numInstances(); i++) { Instance inst = this.m_Instances.instance(i); if (!inst.isMissing(index)) { this.m_Distribution[1][(int) inst.classValue()] += inst.weight(); } else { this.m_Distribution[2][(int) inst.classValue()] += inst.weight(); numMissing++; } } System.arraycopy(this.m_Distribution[1], 0, sum, 0, this.m_Instances.numClasses()); // Save current distribution as best distribution for (int j = 0; j < 3; j++) { System.arraycopy(this.m_Distribution[j], 0, bestDist[j], 0, this.m_Instances.numClasses()); } // Sort instances this.m_Instances.sort(index); // Make split counts for each possible split and evaluate for (int i = 0; i < this.m_Instances.numInstances() - (numMissing + 1); i++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } Instance inst = this.m_Instances.instance(i); Instance instPlusOne = this.m_Instances.instance(i + 1); this.m_Distribution[0][(int) inst.classValue()] += inst.weight(); this.m_Distribution[1][(int) inst.classValue()] -= inst.weight(); if (inst.value(index) < instPlusOne.value(index)) { currCutPoint = (inst.value(index) + instPlusOne.value(index)) / 2.0; currVal = ContingencyTables.entropyConditionedOnRows(this.m_Distribution); if (currVal < bestVal) { this.m_SplitPoint = currCutPoint; bestVal = currVal; for (int j = 0; j < 3; j++) { System.arraycopy(this.m_Distribution[j], 0, bestDist[j], 0, this.m_Instances.numClasses()); } } } } // No missing values in training data. if (numMissing == 0) { System.arraycopy(sum, 0, bestDist[2], 0, this.m_Instances.numClasses()); } this.m_Distribution = bestDist; return bestVal; } /** * Finds best split for numeric attribute and numeric class and returns value. * * @param index * attribute index * @return value of criterion for the best split * @throws Exception * if something goes wrong */ protected double findSplitNumericNumeric(final int index) throws Exception { double bestVal = Double.MAX_VALUE, currVal, currCutPoint; int numMissing = 0; double[] sumsSquares = new double[3], sumOfWeights = new double[3]; double[][] bestDist = new double[3][1]; double totalSum = 0, totalSumOfWeights = 0; // Compute counts for all the values for (int i = 0; i < this.m_Instances.numInstances(); i++) { Instance inst = this.m_Instances.instance(i); if (!inst.isMissing(index)) { this.m_Distribution[1][0] += inst.classValue() * inst.weight(); sumsSquares[1] += inst.classValue() * inst.classValue() * inst.weight(); sumOfWeights[1] += inst.weight(); } else { this.m_Distribution[2][0] += inst.classValue() * inst.weight(); sumsSquares[2] += inst.classValue() * inst.classValue() * inst.weight(); sumOfWeights[2] += inst.weight(); numMissing++; } totalSumOfWeights += inst.weight(); totalSum += inst.classValue() * inst.weight(); } // Check if the total weight is zero if (totalSumOfWeights <= 0) { return bestVal; } // Sort instances this.m_Instances.sort(index); // Make split counts for each possible split and evaluate for (int i = 0; i < this.m_Instances.numInstances() - (numMissing + 1); i++) { Instance inst = this.m_Instances.instance(i); Instance instPlusOne = this.m_Instances.instance(i + 1); this.m_Distribution[0][0] += inst.classValue() * inst.weight(); sumsSquares[0] += inst.classValue() * inst.classValue() * inst.weight(); sumOfWeights[0] += inst.weight(); this.m_Distribution[1][0] -= inst.classValue() * inst.weight(); sumsSquares[1] -= inst.classValue() * inst.classValue() * inst.weight(); sumOfWeights[1] -= inst.weight(); if (inst.value(index) < instPlusOne.value(index)) { currCutPoint = (inst.value(index) + instPlusOne.value(index)) / 2.0; currVal = this.variance(this.m_Distribution, sumsSquares, sumOfWeights); if (currVal < bestVal) { this.m_SplitPoint = currCutPoint; bestVal = currVal; for (int j = 0; j < 3; j++) { if (sumOfWeights[j] > 0) { bestDist[j][0] = this.m_Distribution[j][0] / sumOfWeights[j]; } else { bestDist[j][0] = totalSum / totalSumOfWeights; } } } } } this.m_Distribution = bestDist; return bestVal; } /** * Computes variance for subsets. * * @param s * @param sS * @param sumOfWeights * @return the variance */ protected double variance(final double[][] s, final double[] sS, final double[] sumOfWeights) { double var = 0; for (int i = 0; i < s.length; i++) { if (sumOfWeights[i] > 0) { var += sS[i] - ((s[i][0] * s[i][0]) / sumOfWeights[i]); } } return var; } /** * Returns the subset an instance falls into. * * @param instance * the instance to check * @return the subset the instance falls into * @throws Exception * if something goes wrong */ protected int whichSubset(final Instance instance) throws Exception { if (instance.isMissing(this.m_AttIndex)) { return 2; } else if (instance.attribute(this.m_AttIndex).isNominal()) { if ((int) instance.value(this.m_AttIndex) == this.m_SplitPoint) { return 0; } else { return 1; } } else { if (instance.value(this.m_AttIndex) <= this.m_SplitPoint) { return 0; } else { return 1; } } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new DecisionStump(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/HoeffdingTree.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HoeffdingTree.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableClassifier; import weka.classifiers.trees.ht.ActiveHNode; import weka.classifiers.trees.ht.GiniSplitMetric; import weka.classifiers.trees.ht.HNode; import weka.classifiers.trees.ht.InactiveHNode; import weka.classifiers.trees.ht.InfoGainSplitMetric; import weka.classifiers.trees.ht.LeafNode; import weka.classifiers.trees.ht.LearningNode; import weka.classifiers.trees.ht.NBNode; import weka.classifiers.trees.ht.NBNodeAdaptive; import weka.classifiers.trees.ht.SplitCandidate; import weka.classifiers.trees.ht.SplitMetric; import weka.classifiers.trees.ht.SplitNode; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> A Hoeffding tree (VFDT) is an incremental, anytime decision tree * induction algorithm that is capable of learning from massive data streams, assuming that the * distribution generating examples does not change over time. Hoeffding trees exploit the fact that * a small sample can often be enough to choose an optimal splitting attribute. This idea is * supported mathematically by the Hoeffding bound, which quantifies the number of observations (in * our case, examples) needed to estimate some statistics within a prescribed precision (in our * case, the goodness of an attribute).<br/> * <br/> * A theoretically appealing feature of Hoeffding Trees not shared by otherincremental decision tree * learners is that it has sound guarantees of performance. Using the Hoeffding bound one can show * that its output is asymptotically nearly identical to that of a non-incremental learner using * infinitely many examples. For more information see: <br/> * <br/> * Geoff Hulten, Laurie Spencer, Pedro Domingos: Mining time-changing data streams. In: ACM SIGKDD * Intl. Conf. on Knowledge Discovery and Data Mining, 97-106, 2001. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Hulten2001, * author = {Geoff Hulten and Laurie Spencer and Pedro Domingos}, * booktitle = {ACM SIGKDD Intl. Conf. on Knowledge Discovery and Data Mining}, * pages = {97-106}, * publisher = {ACM Press}, * title = {Mining time-changing data streams}, * year = {2001} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L * The leaf prediction strategy to use. 0 = majority class, 1 = naive Bayes, 2 = naive Bayes adaptive. * (default = 0) * </pre> * * <pre> * -S * The splitting criterion to use. 0 = Gini, 1 = Info gain * (default = 0) * </pre> * * <pre> * -E * The allowable error in a split decision - values closer to zero will take longer to decide * (default = 1e-7) * </pre> * * <pre> * -H * Threshold below which a split will be forced to break ties * (default = 0.05) * </pre> * * <pre> * -M * Minimum fraction of weight required down at least two branches for info gain splitting * (default = 0.01) * </pre> * * <pre> * -G * Grace period - the number of instances a leaf should observe between split attempts * (default = 200) * </pre> * * <pre> * -N * The number of instances (weight) a leaf should observe before allowing naive Bayes to make predictions (NB or NB adaptive only) * (default = 0) * </pre> * * <pre> * -P * Print leaf models when using naive Bayes at the leaves. * </pre> * * <!-- options-end --> * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class HoeffdingTree extends AbstractClassifier implements UpdateableClassifier, WeightedInstancesHandler, OptionHandler, RevisionHandler, TechnicalInformationHandler, Drawable, Serializable { /** * For serialization */ private static final long serialVersionUID = 7117521775722396251L; protected Instances m_header; protected HNode m_root; /** The number of instances a leaf should observe between split attempts */ protected double m_gracePeriod = 200; /** * The allowable error in a split decision. Values closer to zero will take longer to decide */ protected double m_splitConfidence = 0.0000001; /** Threshold below which a split will be forced to break ties */ protected double m_hoeffdingTieThreshold = 0.05; /** * The minimum fraction of weight required down at least two branches for info gain splitting */ protected double m_minFracWeightForTwoBranchesGain = 0.01; /** The splitting metric to use */ protected int m_selectedSplitMetric = INFO_GAIN_SPLIT; protected SplitMetric m_splitMetric = new InfoGainSplitMetric(this.m_minFracWeightForTwoBranchesGain); /** The leaf prediction strategy to use */ protected int m_leafStrategy = LEAF_NB_ADAPTIVE; /** * The number of instances (total weight) a leaf should observe before allowing naive Bayes to make * predictions */ protected double m_nbThreshold = 0; protected int m_activeLeafCount; protected int m_inactiveLeafCount; protected int m_decisionNodeCount; public static final int GINI_SPLIT = 0; public static final int INFO_GAIN_SPLIT = 1; public static final Tag[] TAGS_SELECTION = { new Tag(GINI_SPLIT, "Gini split"), new Tag(INFO_GAIN_SPLIT, "Info gain split") }; public static final int LEAF_MAJ_CLASS = 0; public static final int LEAF_NB = 1; public static final int LEAF_NB_ADAPTIVE = 2; public static final Tag[] TAGS_SELECTION2 = { new Tag(LEAF_MAJ_CLASS, "Majority class"), new Tag(LEAF_NB, "Naive Bayes"), new Tag(LEAF_NB_ADAPTIVE, "Naive Bayes adaptive") }; /** * Print out leaf models in the case of naive Bayes or naive Bayes adaptive leaves */ protected boolean m_printLeafModels; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "A Hoeffding tree (VFDT) is an incremental, anytime decision tree induction algorithm" + " that is capable of learning from massive data streams, assuming that the" + " distribution generating examples does not change over time. Hoeffding trees" + " exploit the fact that a small sample can often be enough to choose an optimal" + " splitting attribute. This idea is supported mathematically by the Hoeffding" + " bound, which quantifies the number of observations (in our case, examples)" + " needed to estimate some statistics within a prescribed precision (in our" + " case, the goodness of an attribute).\n\nA theoretically appealing feature " + " of Hoeffding Trees not shared by otherincremental decision tree learners is that " + " it has sound guarantees of performance. Using the Hoeffding bound one can show that " + " its output is asymptotically nearly identical to that of a non-incremental learner " + " using infinitely many examples. For more information see: \n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Geoff Hulten and Laurie Spencer and Pedro Domingos"); result.setValue(Field.TITLE, "Mining time-changing data streams"); result.setValue(Field.BOOKTITLE, "ACM SIGKDD Intl. Conf. on Knowledge Discovery and Data Mining"); result.setValue(Field.YEAR, "2001"); result.setValue(Field.PAGES, "97-106"); result.setValue(Field.PUBLISHER, "ACM Press"); return result; } protected void reset() { this.m_root = null; this.m_activeLeafCount = 0; this.m_inactiveLeafCount = 0; this.m_decisionNodeCount = 0; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); result.setMinimumNumberInstances(0); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(); newVector.add(new Option("\tThe leaf prediction strategy to use. 0 = " + "majority class, 1 = naive Bayes, 2 = naive Bayes adaptive.\n\t" + "(default = 2)", "L", 1, "-L")); newVector.add(new Option("\tThe splitting criterion to use. 0 = " + "Gini, 1 = Info gain\n\t" + "(default = 1)", "S", 1, "-S")); newVector.add(new Option("\tThe allowable error in a split decision " + "- values closer to zero will take longer to decide\n\t" + "(default = 1e-7)", "E", 1, "-E")); newVector.add(new Option("\tThreshold below which a split will be forced to " + "break ties\n\t(default = 0.05)", "H", 1, "-H")); newVector.add(new Option("\tMinimum fraction of weight required down at least two " + "branches for info gain splitting\n\t(default = 0.01)", "M", 1, "-M")); newVector.add(new Option("\tGrace period - the number of instances " + "a leaf should observe between split attempts\n\t" + "(default = 200)", "G", 1, "-G")); newVector.add(new Option("\tThe number of instances (weight) a leaf " + "should observe before allowing naive Bayes to make " + "predictions (NB or NB adaptive only)\n\t(default = 0)", "N", 1, "-N")); newVector.add(new Option("\tPrint leaf models when using naive Bayes " + "at the leaves.", "P", 0, "-P")); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L * The leaf prediction strategy to use. 0 = majority class, 1 = naive Bayes, 2 = naive Bayes adaptive. * (default = 0) * </pre> * * <pre> * -S * The splitting criterion to use. 0 = Gini, 1 = Info gain * (default = 0) * </pre> * * <pre> * -E * The allowable error in a split decision - values closer to zero will take longer to decide * (default = 1e-7) * </pre> * * <pre> * -H * Threshold below which a split will be forced to break ties * (default = 0.05) * </pre> * * <pre> * -M * Minimum fraction of weight required down at least two branches for info gain splitting * (default = 0.01) * </pre> * * <pre> * -G * Grace period - the number of instances a leaf should observe between split attempts * (default = 200) * </pre> * * <pre> * -N * The number of instances (weight) a leaf should observe before allowing naive Bayes to make predictions (NB or NB adaptive only) * (default = 0) * </pre> * * <pre> * -P * Print leaf models when using naive Bayes at the leaves. * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.reset(); super.setOptions(options); String opt = Utils.getOption('L', options); if (opt.length() > 0) { this.setLeafPredictionStrategy(new SelectedTag(Integer.parseInt(opt), TAGS_SELECTION2)); } opt = Utils.getOption('S', options); if (opt.length() > 0) { this.setSplitCriterion(new SelectedTag(Integer.parseInt(opt), TAGS_SELECTION)); } opt = Utils.getOption('E', options); if (opt.length() > 0) { this.setSplitConfidence(Double.parseDouble(opt)); } opt = Utils.getOption('H', options); if (opt.length() > 0) { this.setHoeffdingTieThreshold(Double.parseDouble(opt)); } opt = Utils.getOption('M', options); if (opt.length() > 0) { this.setMinimumFractionOfWeightInfoGain(Double.parseDouble(opt)); } opt = Utils.getOption('G', options); if (opt.length() > 0) { this.setGracePeriod(Double.parseDouble(opt)); } opt = Utils.getOption('N', options); if (opt.length() > 0) { this.setNaiveBayesPredictionThreshold(Double.parseDouble(opt)); } this.m_printLeafModels = Utils.getFlag('P', options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { ArrayList<String> options = new ArrayList<>(); options.add("-L"); options.add("" + this.getLeafPredictionStrategy().getSelectedTag().getID()); options.add("-S"); options.add("" + this.getSplitCriterion().getSelectedTag().getID()); options.add("-E"); options.add("" + this.getSplitConfidence()); options.add("-H"); options.add("" + this.getHoeffdingTieThreshold()); options.add("-M"); options.add("" + this.getMinimumFractionOfWeightInfoGain()); options.add("-G"); options.add("" + this.getGracePeriod()); options.add("-N"); options.add("" + this.getNaiveBayesPredictionThreshold()); if (this.m_printLeafModels) { options.add("-P"); } return options.toArray(new String[1]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String printLeafModelsTipText() { return "Print leaf models (naive bayes leaves only)"; } public void setPrintLeafModels(final boolean p) { this.m_printLeafModels = p; } public boolean getPrintLeafModels() { return this.m_printLeafModels; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minimumFractionOfWeightInfoGainTipText() { return "Minimum fraction of weight required down at least two branches " + "for info gain splitting."; } /** * Set the minimum fraction of weight required down at least two branches for info gain splitting * * @param m * the minimum fraction of weight */ public void setMinimumFractionOfWeightInfoGain(final double m) { this.m_minFracWeightForTwoBranchesGain = m; } /** * Get the minimum fraction of weight required down at least two branches for info gain splitting * * @return the minimum fraction of weight */ public double getMinimumFractionOfWeightInfoGain() { return this.m_minFracWeightForTwoBranchesGain; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String gracePeriodTipText() { return "Number of instances (or total weight of instances) a leaf " + "should observe between split attempts."; } /** * Set the number of instances (or total weight of instances) a leaf should observe between split * attempts * * @param grace * the grace period */ public void setGracePeriod(final double grace) { this.m_gracePeriod = grace; } /** * Get the number of instances (or total weight of instances) a leaf should observe between split * attempts * * @return the grace period */ public double getGracePeriod() { return this.m_gracePeriod; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String hoeffdingTieThresholdTipText() { return "Theshold below which a split will be forced to break ties."; } /** * Set the threshold below which a split will be forced to break ties * * @param ht * the threshold */ public void setHoeffdingTieThreshold(final double ht) { this.m_hoeffdingTieThreshold = ht; } /** * Get the threshold below which a split will be forced to break ties * * @return the threshold */ public double getHoeffdingTieThreshold() { return this.m_hoeffdingTieThreshold; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String splitConfidenceTipText() { return "The allowable error in a split decision. Values closer to zero " + "will take longer to decide."; } /** * Set the allowable error in a split decision. Values closer to zero will take longer to decide. * * @param sc * the split confidence */ public void setSplitConfidence(final double sc) { this.m_splitConfidence = sc; } /** * Get the allowable error in a split decision. Values closer to zero will take longer to decide. * * @return the split confidence */ public double getSplitConfidence() { return this.m_splitConfidence; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String splitCriterionTipText() { return "The splitting criterion to use"; } /** * Set the split criterion to use (either Gini or info gain). * * @param crit * the criterion to use */ public void setSplitCriterion(final SelectedTag crit) { if (crit.getTags() == TAGS_SELECTION) { this.m_selectedSplitMetric = crit.getSelectedTag().getID(); } } /** * Get the split criterion to use (either Gini or info gain). * * @return the criterion to use */ public SelectedTag getSplitCriterion() { return new SelectedTag(this.m_selectedSplitMetric, TAGS_SELECTION); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String leafPredictionStrategyTipText() { return "The leaf prediction strategy to use"; } /** * Set the leaf prediction strategy to use (majority class, naive Bayes or naive Bayes adaptive) * * @param strat * the strategy to use */ public void setLeafPredictionStrategy(final SelectedTag strat) { if (strat.getTags() == TAGS_SELECTION2) { this.m_leafStrategy = strat.getSelectedTag().getID(); } } /** * Get the leaf prediction strategy to use (majority class, naive Bayes or naive Bayes adaptive) * * @return the strategy to use */ public SelectedTag getLeafPredictionStrategy() { return new SelectedTag(this.m_leafStrategy, TAGS_SELECTION2); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String naiveBayesPredictionThresholdTipText() { return "The number of instances (weight) a leaf should observe " + "before allowing naive Bayes (adaptive) to make predictions"; } /** * Set the number of instances (weight) a leaf should observe before allowing naive Bayes to make * predictions * * @param n * the number/weight of instances */ public void setNaiveBayesPredictionThreshold(final double n) { this.m_nbThreshold = n; } /** * Get the number of instances (weight) a leaf should observe before allowing naive Bayes to make * predictions * * @return the number/weight of instances */ public double getNaiveBayesPredictionThreshold() { return this.m_nbThreshold; } protected static double computeHoeffdingBound(final double max, final double confidence, final double weight) { return Math.sqrt(((max * max) * Math.log(1.0 / confidence)) / (2.0 * weight)); } /** * Builds the classifier. * * @param data * the data to train with * @throws Exception * if classifier can't be built successfully */ @Override public void buildClassifier(Instances data) throws Exception { this.reset(); this.m_header = new Instances(data, 0); if (this.m_selectedSplitMetric == GINI_SPLIT) { this.m_splitMetric = new GiniSplitMetric(); } else { this.m_splitMetric = new InfoGainSplitMetric(this.m_minFracWeightForTwoBranchesGain); } data = new Instances(data); data.deleteWithMissingClass(); for (int i = 0; i < data.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.updateClassifier(data.instance(i)); } // can classifier handle the data? this.getCapabilities().testWithFail(data); } /** * Updates the classifier with the given instance. * * @param instance * the new training instance to include in the model * @exception Exception * if the instance could not be incorporated in the model. */ @Override public void updateClassifier(final Instance inst) throws Exception { if (inst.classIsMissing()) { return; } if (this.m_root == null) { this.m_root = this.newLearningNode(); } LeafNode l = this.m_root.leafForInstance(inst, null, null); HNode actualNode = l.m_theNode; if (actualNode == null) { actualNode = new ActiveHNode(); l.m_parentNode.setChild(l.m_parentBranch, actualNode); } if (actualNode instanceof LearningNode) { actualNode.updateNode(inst); if (/* m_growthAllowed && */actualNode instanceof ActiveHNode) { double totalWeight = actualNode.totalWeight(); if (totalWeight - ((ActiveHNode) actualNode).m_weightSeenAtLastSplitEval > this.m_gracePeriod) { // try a split this.trySplit((ActiveHNode) actualNode, l.m_parentNode, l.m_parentBranch); ((ActiveHNode) actualNode).m_weightSeenAtLastSplitEval = totalWeight; } } } } /** * Returns class probabilities for an instance. * * @param instance * the instance to compute the distribution for * @return the class probabilities * @throws Exception * if distribution can't be computed successfully */ @Override public double[] distributionForInstance(final Instance inst) throws Exception { Attribute classAtt = inst.classAttribute(); double[] pred = new double[classAtt.numValues()]; if (this.m_root != null) { LeafNode l = this.m_root.leafForInstance(inst, null, null); HNode actualNode = l.m_theNode; if (actualNode == null) { actualNode = l.m_parentNode; } pred = actualNode.getDistribution(inst, classAtt); } else { // all class values equally likely for (int i = 0; i < classAtt.numValues(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } pred[i] = 1; } Utils.normalize(pred); } // Utils.normalize(pred); return pred; } /** * Deactivate (prevent growth) from the supplied node * * @param toDeactivate * the node to deactivate * @param parent * the node's parent * @param parentBranch * the branch leading to the node */ protected void deactivateNode(final ActiveHNode toDeactivate, final SplitNode parent, final String parentBranch) { HNode leaf = new InactiveHNode(toDeactivate.m_classDistribution); if (parent == null) { this.m_root = leaf; } else { parent.setChild(parentBranch, leaf); } this.m_activeLeafCount--; this.m_inactiveLeafCount++; } /** * Activate (allow growth) the supplied node * * @param toActivate * the node to activate * @param parent * the node's parent * @param parentBranch * the branch leading to the node */ protected void activateNode(final InactiveHNode toActivate, final SplitNode parent, final String parentBranch) { HNode leaf = new ActiveHNode(); leaf.m_classDistribution = toActivate.m_classDistribution; if (parent == null) { this.m_root = leaf; } else { parent.setChild(parentBranch, leaf); } this.m_activeLeafCount++; this.m_inactiveLeafCount--; } /** * Try a split from the supplied node * * @param node * the node to split * @param parent * the parent of the node * @param parentBranch * the branch leading to the node * @throws Exception * if a problem occurs */ protected void trySplit(final ActiveHNode node, final SplitNode parent, final String parentBranch) throws Exception { // non-pure? if (node.numEntriesInClassDistribution() > 1) { List<SplitCandidate> bestSplits = node.getPossibleSplits(this.m_splitMetric); Collections.sort(bestSplits); boolean doSplit = false; if (bestSplits.size() < 2) { doSplit = bestSplits.size() > 0; } else { // compute the Hoeffding bound double metricMax = this.m_splitMetric.getMetricRange(node.m_classDistribution); double hoeffdingBound = computeHoeffdingBound(metricMax, this.m_splitConfidence, node.totalWeight()); SplitCandidate best = bestSplits.get(bestSplits.size() - 1); SplitCandidate secondBest = bestSplits.get(bestSplits.size() - 2); if (best.m_splitMerit - secondBest.m_splitMerit > hoeffdingBound || hoeffdingBound < this.m_hoeffdingTieThreshold) { doSplit = true; } // TODO - remove poor attributes stuff? } if (doSplit) { SplitCandidate best = bestSplits.get(bestSplits.size() - 1); if (best.m_splitTest == null) { // preprune this.deactivateNode(node, parent, parentBranch); } else { SplitNode newSplit = new SplitNode(node.m_classDistribution, best.m_splitTest); for (int i = 0; i < best.numSplits(); i++) { ActiveHNode newChild = this.newLearningNode(); newChild.m_classDistribution = best.m_postSplitClassDistributions.get(i); newChild.m_weightSeenAtLastSplitEval = newChild.totalWeight(); String branchName = ""; if (this.m_header.attribute(best.m_splitTest.splitAttributes().get(0)).isNumeric()) { branchName = i == 0 ? "left" : "right"; } else { Attribute splitAtt = this.m_header.attribute(best.m_splitTest.splitAttributes().get(0)); branchName = splitAtt.value(i); } newSplit.setChild(branchName, newChild); } this.m_activeLeafCount--; this.m_decisionNodeCount++; this.m_activeLeafCount += best.numSplits(); if (parent == null) { this.m_root = newSplit; } else { parent.setChild(parentBranch, newSplit); } } } } } /** * Create a new learning node (either majority class, naive Bayes or naive Bayes adaptive) * * @return a new learning node * @throws Exception * if a problem occurs */ protected ActiveHNode newLearningNode() throws Exception { ActiveHNode newChild; if (this.m_leafStrategy == LEAF_MAJ_CLASS) { newChild = new ActiveHNode(); } else if (this.m_leafStrategy == LEAF_NB) { newChild = new NBNode(this.m_header, this.m_nbThreshold); } else { newChild = new NBNodeAdaptive(this.m_header, this.m_nbThreshold); } return newChild; } /** * Return a textual description of the mode * * @return a String describing the model */ @Override public String toString() { if (this.m_root == null) { return "No model built yet!"; } return this.m_root.toString(this.m_printLeafModels); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } public static void main(final String[] args) { runClassifier(new HoeffdingTree(), args); } @Override public int graphType() { return Drawable.TREE; } @Override public String graph() throws Exception { if (this.m_root == null) { throw new Exception("No model built yet!"); } this.m_root.installNodeNums(0); StringBuffer buff = new StringBuffer(); buff.append("digraph HoeffdingTree {\n"); this.m_root.graphTree(buff); buff.append("}\n"); return buff.toString(); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/J48.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * J48.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Sourcable; import weka.classifiers.trees.j48.BinC45ModelSelection; import weka.classifiers.trees.j48.C45ModelSelection; import weka.classifiers.trees.j48.C45PruneableClassifierTree; import weka.classifiers.trees.j48.ClassifierTree; import weka.classifiers.trees.j48.ModelSelection; import weka.classifiers.trees.j48.PruneableClassifierTree; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Matchable; import weka.core.Option; import weka.core.OptionHandler; import weka.core.PartitionGenerator; import weka.core.RevisionUtils; import weka.core.Summarizable; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for generating a pruned or unpruned C4.5 decision tree. For more * information, see<br/> * <br/> * Ross Quinlan (1993). C4.5: Programs for Machine Learning. Morgan Kaufmann Publishers, San Mateo, * CA. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;book{Quinlan1993, * address = {San Mateo, CA}, * author = {Ross Quinlan}, * publisher = {Morgan Kaufmann Publishers}, * title = {C4.5: Programs for Machine Learning}, * year = {1993} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -U * Use unpruned tree. * </pre> * * <pre> * -O * Do not collapse tree. * </pre> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -S * Don't perform subtree raising. * </pre> * * <pre> * -L * Do not clean up after the tree has been built. * </pre> * * <pre> * -A * Laplace smoothing for predicted probabilities. * </pre> * * <pre> * -J * Do not use MDL correction for info gain on numeric attributes. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -doNotMakeSplitPointActualValue * Do not make split point actual value. * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class J48 extends AbstractClassifier implements OptionHandler, Drawable, Matchable, Sourcable, WeightedInstancesHandler, Summarizable, AdditionalMeasureProducer, TechnicalInformationHandler, PartitionGenerator { /** for serialization */ static final long serialVersionUID = -217733168393644444L; /** The decision tree */ protected ClassifierTree m_root; /** Unpruned tree? */ protected boolean m_unpruned = false; /** Collapse tree? */ protected boolean m_collapseTree = true; /** Confidence level */ protected float m_CF = 0.25f; /** Minimum number of instances */ protected int m_minNumObj = 2; /** Use MDL correction? */ protected boolean m_useMDLcorrection = true; /** * Determines whether probabilities are smoothed using Laplace correction when predictions are * generated */ protected boolean m_useLaplace = false; /** Use reduced error pruning? */ protected boolean m_reducedErrorPruning = false; /** Number of folds for reduced error pruning. */ protected int m_numFolds = 3; /** Binary splits on nominal attributes? */ protected boolean m_binarySplits = false; /** Subtree raising to be performed? */ protected boolean m_subtreeRaising = true; /** Cleanup after the tree has been built. */ protected boolean m_noCleanup = false; /** Random number seed for reduced-error pruning. */ protected int m_Seed = 1; /** Do not relocate split point to actual data value */ protected boolean m_doNotMakeSplitPointActualValue; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for generating a pruned or unpruned C4.5 decision tree. For more " + "information, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.BOOK); result.setValue(Field.AUTHOR, "Ross Quinlan"); result.setValue(Field.YEAR, "1993"); result.setValue(Field.TITLE, "C4.5: Programs for Machine Learning"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann Publishers"); result.setValue(Field.ADDRESS, "San Mateo, CA"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result; result = new Capabilities(this); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances * the data to train the classifier with * @throws Exception * if classifier can't be built successfully */ @Override public void buildClassifier(final Instances instances) throws Exception { this.getCapabilities().testWithFail(instances); ModelSelection modSelection; if (this.m_binarySplits) { modSelection = new BinC45ModelSelection(this.m_minNumObj, instances, this.m_useMDLcorrection, this.m_doNotMakeSplitPointActualValue); } else { modSelection = new C45ModelSelection(this.m_minNumObj, instances, this.m_useMDLcorrection, this.m_doNotMakeSplitPointActualValue); } if (!this.m_reducedErrorPruning) { this.m_root = new C45PruneableClassifierTree(modSelection, !this.m_unpruned, this.m_CF, this.m_subtreeRaising, !this.m_noCleanup, this.m_collapseTree); } else { this.m_root = new PruneableClassifierTree(modSelection, !this.m_unpruned, this.m_numFolds, !this.m_noCleanup, this.m_Seed); } this.m_root.buildClassifier(instances); if (this.m_binarySplits) { ((BinC45ModelSelection) modSelection).cleanup(); } else { ((C45ModelSelection) modSelection).cleanup(); } } /** * Classifies an instance. * * @param instance * the instance to classify * @return the classification for the instance * @throws Exception * if instance can't be classified successfully */ @Override public double classifyInstance(final Instance instance) throws Exception { return this.m_root.classifyInstance(instance); } /** * Returns class probabilities for an instance. * * @param instance * the instance to calculate the class probabilities for * @return the class probabilities * @throws Exception * if distribution can't be computed successfully */ @Override public final double[] distributionForInstance(final Instance instance) throws Exception { return this.m_root.distributionForInstance(instance, this.m_useLaplace); } /** * Returns the type of graph this classifier represents. * * @return Drawable.TREE */ @Override public int graphType() { return Drawable.TREE; } /** * Returns graph describing the tree. * * @return the graph describing the tree * @throws Exception * if graph can't be computed */ @Override public String graph() throws Exception { return this.m_root.graph(); } /** * Returns tree in prefix order. * * @return the tree in prefix order * @throws Exception * if something goes wrong */ @Override public String prefix() throws Exception { return this.m_root.prefix(); } /** * Returns tree as an if-then statement. * * @param className * the name of the Java class * @return the tree as a Java if-then type statement * @throws Exception * if something goes wrong */ @Override public String toSource(final String className) throws Exception { StringBuffer[] source = this.m_root.toSource(className); return "class " + className + " {\n\n" + " public static double classify(Object[] i)\n" + " throws Exception {\n\n" + " double p = Double.NaN;\n" + source[0] // Assignment // code + " return p;\n" + " }\n" + source[1] // Support code + "}\n"; } /** * Returns an enumeration describing the available options. * * Valid options are: * <p> * * -U <br> * Use unpruned tree. * <p> * * -C confidence <br> * Set confidence threshold for pruning. (Default: 0.25) * <p> * * -M number <br> * Set minimum number of instances per leaf. (Default: 2) * <p> * * -R <br> * Use reduced error pruning. No subtree raising is performed. * <p> * * -N number <br> * Set number of folds for reduced error pruning. One fold is used as the pruning set. (Default: 3) * <p> * * -B <br> * Use binary splits for nominal attributes. * <p> * * -S <br> * Don't perform subtree raising. * <p> * * -L <br> * Do not clean up after the tree has been built. * * -A <br> * If set, Laplace smoothing is used for predicted probabilites. * <p> * * -Q <br> * The seed for reduced-error pruning. * <p> * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(13); newVector.addElement(new Option("\tUse unpruned tree.", "U", 0, "-U")); newVector.addElement(new Option("\tDo not collapse tree.", "O", 0, "-O")); newVector.addElement(new Option("\tSet confidence threshold for pruning.\n" + "\t(default 0.25)", "C", 1, "-C <pruning confidence>")); newVector.addElement(new Option("\tSet minimum number of instances per leaf.\n" + "\t(default 2)", "M", 1, "-M <minimum number of instances>")); newVector.addElement(new Option("\tUse reduced error pruning.", "R", 0, "-R")); newVector .addElement(new Option("\tSet number of folds for reduced error\n" + "\tpruning. One fold is used as pruning set.\n" + "\t(default 3)", "N", 1, "-N <number of folds>")); newVector.addElement(new Option("\tUse binary splits only.", "B", 0, "-B")); newVector.addElement(new Option("\tDo not perform subtree raising.", "S", 0, "-S")); newVector.addElement(new Option("\tDo not clean up after the tree has been built.", "L", 0, "-L")); newVector.addElement(new Option("\tLaplace smoothing for predicted probabilities.", "A", 0, "-A")); newVector.addElement(new Option("\tDo not use MDL correction for info gain on numeric attributes.", "J", 0, "-J")); newVector.addElement(new Option("\tSeed for random data shuffling (default 1).", "Q", 1, "-Q <seed>")); newVector.addElement(new Option("\tDo not make split point actual value.", "-doNotMakeSplitPointActualValue", 0, "-doNotMakeSplitPointActualValue")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -U * Use unpruned tree. * </pre> * * <pre> * -O * Do not collapse tree. * </pre> * * <pre> * -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25) * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2) * </pre> * * <pre> * -R * Use reduced error pruning. * </pre> * * <pre> * -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3) * </pre> * * <pre> * -B * Use binary splits only. * </pre> * * <pre> * -S * Don't perform subtree raising. * </pre> * * <pre> * -L * Do not clean up after the tree has been built. * </pre> * * <pre> * -A * Laplace smoothing for predicted probabilities. * </pre> * * <pre> * -J * Do not use MDL correction for info gain on numeric attributes. * </pre> * * <pre> * -Q &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -doNotMakeSplitPointActualValue * Do not make split point actual value. * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { // Other options String minNumString = Utils.getOption('M', options); if (minNumString.length() != 0) { this.m_minNumObj = Integer.parseInt(minNumString); } else { this.m_minNumObj = 2; } this.m_binarySplits = Utils.getFlag('B', options); this.m_useLaplace = Utils.getFlag('A', options); this.m_useMDLcorrection = !Utils.getFlag('J', options); // Pruning options this.m_unpruned = Utils.getFlag('U', options); this.m_collapseTree = !Utils.getFlag('O', options); this.m_subtreeRaising = !Utils.getFlag('S', options); this.m_noCleanup = Utils.getFlag('L', options); this.m_doNotMakeSplitPointActualValue = Utils.getFlag("doNotMakeSplitPointActualValue", options); if ((this.m_unpruned) && (!this.m_subtreeRaising)) { throw new Exception("Subtree raising doesn't need to be unset for unpruned tree!"); } this.m_reducedErrorPruning = Utils.getFlag('R', options); if ((this.m_unpruned) && (this.m_reducedErrorPruning)) { throw new Exception("Unpruned tree and reduced error pruning can't be selected " + "simultaneously!"); } String confidenceString = Utils.getOption('C', options); if (confidenceString.length() != 0) { if (this.m_reducedErrorPruning) { throw new Exception("Setting the confidence doesn't make sense " + "for reduced error pruning."); } else if (this.m_unpruned) { throw new Exception("Doesn't make sense to change confidence for unpruned " + "tree!"); } else { this.m_CF = (new Float(confidenceString)).floatValue(); if ((this.m_CF <= 0) || (this.m_CF >= 1)) { throw new Exception("Confidence has to be greater than zero and smaller " + "than one!"); } } } else { this.m_CF = 0.25f; } String numFoldsString = Utils.getOption('N', options); if (numFoldsString.length() != 0) { if (!this.m_reducedErrorPruning) { throw new Exception("Setting the number of folds" + " doesn't make sense if" + " reduced error pruning is not selected."); } else { this.m_numFolds = Integer.parseInt(numFoldsString); } } else { this.m_numFolds = 3; } String seedString = Utils.getOption('Q', options); if (seedString.length() != 0) { this.m_Seed = Integer.parseInt(seedString); } else { this.m_Seed = 1; } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); if (this.m_noCleanup) { options.add("-L"); } if (!this.m_collapseTree) { options.add("-O"); } if (this.m_unpruned) { options.add("-U"); } else { if (!this.m_subtreeRaising) { options.add("-S"); } if (this.m_reducedErrorPruning) { options.add("-R"); options.add("-N"); options.add("" + this.m_numFolds); options.add("-Q"); options.add("" + this.m_Seed); } else { options.add("-C"); options.add("" + this.m_CF); } } if (this.m_binarySplits) { options.add("-B"); } options.add("-M"); options.add("" + this.m_minNumObj); if (this.m_useLaplace) { options.add("-A"); } if (!this.m_useMDLcorrection) { options.add("-J"); } if (this.m_doNotMakeSplitPointActualValue) { options.add("-doNotMakeSplitPointActualValue"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String seedTipText() { return "The seed used for randomizing the data " + "when reduced-error pruning is used."; } /** * Get the value of Seed. * * @return Value of Seed. */ public int getSeed() { return this.m_Seed; } /** * Set the value of Seed. * * @param newSeed * Value to assign to Seed. */ public void setSeed(final int newSeed) { this.m_Seed = newSeed; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useLaplaceTipText() { return "Whether counts at leaves are smoothed based on Laplace."; } /** * Get the value of useLaplace. * * @return Value of useLaplace. */ public boolean getUseLaplace() { return this.m_useLaplace; } /** * Set the value of useLaplace. * * @param newuseLaplace * Value to assign to useLaplace. */ public void setUseLaplace(final boolean newuseLaplace) { this.m_useLaplace = newuseLaplace; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useMDLcorrectionTipText() { return "Whether MDL correction is used when finding splits on numeric attributes."; } /** * Get the value of useMDLcorrection. * * @return Value of useMDLcorrection. */ public boolean getUseMDLcorrection() { return this.m_useMDLcorrection; } /** * Set the value of useMDLcorrection. * * @param newuseMDLcorrection * Value to assign to useMDLcorrection. */ public void setUseMDLcorrection(final boolean newuseMDLcorrection) { this.m_useMDLcorrection = newuseMDLcorrection; } /** * Returns a description of the classifier. * * @return a description of the classifier */ @Override public String toString() { if (this.m_root == null) { return "No classifier built"; } if (this.m_unpruned) { return "J48 unpruned tree\n------------------\n" + this.m_root.toString(); } else { return "J48 pruned tree\n------------------\n" + this.m_root.toString(); } } /** * Returns a superconcise version of the model * * @return a summary of the model */ @Override public String toSummaryString() { return "Number of leaves: " + this.m_root.numLeaves() + "\n" + "Size of the tree: " + this.m_root.numNodes() + "\n"; } /** * Returns the size of the tree * * @return the size of the tree */ public double measureTreeSize() { return this.m_root.numNodes(); } /** * Returns the number of leaves * * @return the number of leaves */ public double measureNumLeaves() { return this.m_root.numLeaves(); } /** * Returns the number of rules (same as number of leaves) * * @return the number of rules */ public double measureNumRules() { return this.m_root.numLeaves(); } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<>(3); newVector.addElement("measureTreeSize"); newVector.addElement("measureNumLeaves"); newVector.addElement("measureNumRules"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) { return this.measureNumRules(); } else if (additionalMeasureName.compareToIgnoreCase("measureTreeSize") == 0) { return this.measureTreeSize(); } else if (additionalMeasureName.compareToIgnoreCase("measureNumLeaves") == 0) { return this.measureNumLeaves(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (j48)"); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String unprunedTipText() { return "Whether pruning is performed."; } /** * Get the value of unpruned. * * @return Value of unpruned. */ public boolean getUnpruned() { return this.m_unpruned; } /** * Set the value of unpruned. Turns reduced-error pruning off if set. * * @param v * Value to assign to unpruned. */ public void setUnpruned(final boolean v) { if (v) { this.m_reducedErrorPruning = false; } this.m_unpruned = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String collapseTreeTipText() { return "Whether parts are removed that do not reduce training error."; } /** * Get the value of collapseTree. * * @return Value of collapseTree. */ public boolean getCollapseTree() { return this.m_collapseTree; } /** * Set the value of collapseTree. * * @param v * Value to assign to collapseTree. */ public void setCollapseTree(final boolean v) { this.m_collapseTree = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String confidenceFactorTipText() { return "The confidence factor used for pruning (smaller values incur " + "more pruning)."; } /** * Get the value of CF. * * @return Value of CF. */ public float getConfidenceFactor() { return this.m_CF; } /** * Set the value of CF. * * @param v * Value to assign to CF. */ public void setConfidenceFactor(final float v) { this.m_CF = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minNumObjTipText() { return "The minimum number of instances per leaf."; } /** * Get the value of minNumObj. * * @return Value of minNumObj. */ public int getMinNumObj() { return this.m_minNumObj; } /** * Set the value of minNumObj. * * @param v * Value to assign to minNumObj. */ public void setMinNumObj(final int v) { this.m_minNumObj = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String reducedErrorPruningTipText() { return "Whether reduced-error pruning is used instead of C.4.5 pruning."; } /** * Get the value of reducedErrorPruning. * * @return Value of reducedErrorPruning. */ public boolean getReducedErrorPruning() { return this.m_reducedErrorPruning; } /** * Set the value of reducedErrorPruning. Turns unpruned trees off if set. * * @param v * Value to assign to reducedErrorPruning. */ public void setReducedErrorPruning(final boolean v) { if (v) { this.m_unpruned = false; } this.m_reducedErrorPruning = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "Determines the amount of data used for reduced-error pruning. " + " One fold is used for pruning, the rest for growing the tree."; } /** * Get the value of numFolds. * * @return Value of numFolds. */ public int getNumFolds() { return this.m_numFolds; } /** * Set the value of numFolds. * * @param v * Value to assign to numFolds. */ public void setNumFolds(final int v) { this.m_numFolds = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String binarySplitsTipText() { return "Whether to use binary splits on nominal attributes when " + "building the trees."; } /** * Get the value of binarySplits. * * @return Value of binarySplits. */ public boolean getBinarySplits() { return this.m_binarySplits; } /** * Set the value of binarySplits. * * @param v * Value to assign to binarySplits. */ public void setBinarySplits(final boolean v) { this.m_binarySplits = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String subtreeRaisingTipText() { return "Whether to consider the subtree raising operation when pruning."; } /** * Get the value of subtreeRaising. * * @return Value of subtreeRaising. */ public boolean getSubtreeRaising() { return this.m_subtreeRaising; } /** * Set the value of subtreeRaising. * * @param v * Value to assign to subtreeRaising. */ public void setSubtreeRaising(final boolean v) { this.m_subtreeRaising = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String saveInstanceDataTipText() { return "Whether to save the training data for visualization."; } /** * Check whether instance data is to be saved. * * @return true if instance data is saved */ public boolean getSaveInstanceData() { return this.m_noCleanup; } /** * Set whether instance data is to be saved. * * @param v * true if instance data is to be saved */ public void setSaveInstanceData(final boolean v) { this.m_noCleanup = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String doNotMakeSplitPointActualValueTipText() { return "If true, the split point is not relocated to an actual data value." + " This can yield substantial speed-ups for large datasets with numeric attributes."; } /** * Gets the value of doNotMakeSplitPointActualValue. * * @return the value */ public boolean getDoNotMakeSplitPointActualValue() { return this.m_doNotMakeSplitPointActualValue; } /** * Sets the value of doNotMakeSplitPointActualValue. * * @param m_doNotMakeSplitPointActualValue * the value to set */ public void setDoNotMakeSplitPointActualValue(final boolean m_doNotMakeSplitPointActualValue) { this.m_doNotMakeSplitPointActualValue = m_doNotMakeSplitPointActualValue; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Builds the classifier to generate a partition. */ @Override public void generatePartition(final Instances data) throws Exception { this.buildClassifier(data); } /** * Computes an array that indicates node membership. */ @Override public double[] getMembershipValues(final Instance inst) throws Exception { return this.m_root.getMembershipValues(inst); } /** * Returns the number of elements in the partition. */ @Override public int numElements() throws Exception { return this.m_root.numNodes(); } /** * Main method for testing this class * * @param argv * the commandline options */ public static void main(final String[] argv) { runClassifier(new J48(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/LMT.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LMT.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.trees.j48.C45ModelSelection; import weka.classifiers.trees.j48.ModelSelection; import weka.classifiers.trees.lmt.LMTNode; import weka.classifiers.trees.lmt.ResidualModelSelection; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.supervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** * <!-- globalinfo-start --> Classifier for building 'logistic model trees', * which are classification trees with logistic regression functions at the * leaves. The algorithm can deal with binary and multi-class target variables, * numeric and nominal attributes and missing values.<br/> * <br/> * For more information see: <br/> * <br/> * Niels Landwehr, Mark Hall, Eibe Frank (2005). Logistic Model Trees. Machine * Learning. 95(1-2):161-205.<br/> * <br/> * Marc Sumner, Eibe Frank, Mark Hall: Speeding up Logistic Model Tree * Induction. In: 9th European Conference on Principles and Practice of * Knowledge Discovery in Databases, 675-683, 2005. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Landwehr2005, * author = {Niels Landwehr and Mark Hall and Eibe Frank}, * journal = {Machine Learning}, * number = {1-2}, * pages = {161-205}, * title = {Logistic Model Trees}, * volume = {95}, * year = {2005} * } * * &#64;inproceedings{Sumner2005, * author = {Marc Sumner and Eibe Frank and Mark Hall}, * booktitle = {9th European Conference on Principles and Practice of Knowledge Discovery in Databases}, * pages = {675-683}, * publisher = {Springer}, * title = {Speeding up Logistic Model Tree Induction}, * year = {2005} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B * Binary splits (convert nominal attributes to binary ones) * </pre> * * <pre> * -R * Split on residuals instead of class values * </pre> * * <pre> * -C * Use cross-validation for boosting at all nodes (i.e., disable heuristic) * </pre> * * <pre> * -P * Use error on probabilities instead of misclassification error for stopping criterion of LogitBoost. * </pre> * * <pre> * -I &lt;numIterations&gt; * Set fixed number of iterations for LogitBoost (instead of using cross-validation) * </pre> * * <pre> * -M &lt;numInstances&gt; * Set minimum number of instances at which a node can be split (default 15) * </pre> * * <pre> * -W &lt;beta&gt; * Set beta for weight trimming for LogitBoost. Set to 0 (default) for no weight trimming. * </pre> * * <pre> * -A * The AIC is used to choose the best iteration. * </pre> * * <pre> * -doNotMakeSplitPointActualValue * Do not make split point actual value. * </pre> * * <!-- options-end --> * * @author Niels Landwehr * @author Marc Sumner * @version $Revision$ */ public class LMT extends AbstractClassifier implements OptionHandler, AdditionalMeasureProducer, Drawable, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1113212459618104943L; /** Filter to replace missing values */ protected ReplaceMissingValues m_replaceMissing; /** Filter to replace nominal attributes */ protected NominalToBinary m_nominalToBinary; /** root of the logistic model tree */ protected LMTNode m_tree; /** * use heuristic that determines the number of LogitBoost iterations only once * in the beginning? */ protected boolean m_fastRegression; /** convert nominal attributes to binary ? */ protected boolean m_convertNominal; /** split on residuals? */ protected boolean m_splitOnResiduals; /** * use error on probabilties instead of misclassification for stopping * criterion of LogitBoost? */ protected boolean m_errorOnProbabilities; /** minimum number of instances at which a node is considered for splitting */ protected int m_minNumInstances; /** if non-zero, use fixed number of iterations for LogitBoost */ protected int m_numBoostingIterations; /** * Threshold for trimming weights. Instances with a weight lower than this (as * a percentage of total weights) are not included in the regression fit. **/ protected double m_weightTrimBeta; /** If true, the AIC is used to choose the best LogitBoost iteration */ private boolean m_useAIC = false; /** Do not relocate split point to actual data value */ private boolean m_doNotMakeSplitPointActualValue; /** * Creates an instance of LMT with standard options */ public LMT() { m_fastRegression = true; m_numBoostingIterations = -1; m_minNumInstances = 15; m_weightTrimBeta = 0; m_useAIC = false; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds the classifier. * * @param data the data to train with * @throws Exception if classifier can't be built successfully */ @Override public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances filteredData = new Instances(data); filteredData.deleteWithMissingClass(); // replace missing values m_replaceMissing = new ReplaceMissingValues(); m_replaceMissing.setInputFormat(filteredData); filteredData = Filter.useFilter(filteredData, m_replaceMissing); // possibly convert nominal attributes globally m_nominalToBinary = new NominalToBinary(); m_nominalToBinary.setInputFormat(filteredData); if (m_convertNominal) { filteredData = Filter.useFilter(filteredData, m_nominalToBinary); } int minNumInstances = 2; // create ModelSelection object, either for splits on the residuals or for // splits on the class value ModelSelection modSelection; if (m_splitOnResiduals) { modSelection = new ResidualModelSelection(minNumInstances); } else { modSelection = new C45ModelSelection(minNumInstances, filteredData, true, m_doNotMakeSplitPointActualValue); } // create tree root m_tree = new LMTNode(modSelection, m_numBoostingIterations, m_fastRegression, m_errorOnProbabilities, m_minNumInstances, m_weightTrimBeta, m_useAIC, m_nominalToBinary, m_numDecimalPlaces); // build tree m_tree.buildClassifier(filteredData); if (modSelection instanceof C45ModelSelection) { ((C45ModelSelection) modSelection).cleanup(); } } /** * Returns class probabilities for an instance. * * @param instance the instance to compute the distribution for * @return the class probabilities * @throws Exception if distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { // replace missing values m_replaceMissing.input(instance); instance = m_replaceMissing.output(); // possibly convert nominal attributes if (m_convertNominal) { m_nominalToBinary.input(instance); instance = m_nominalToBinary.output(); } return m_tree.distributionForInstance(instance); } /** * Classifies an instance. * * @param instance the instance to classify * @return the classification * @throws Exception if instance can't be classified successfully */ @Override public double classifyInstance(Instance instance) throws Exception { double maxProb = -1; int maxIndex = 0; // classify by maximum probability double[] probs = distributionForInstance(instance); for (int j = 0; j < instance.numClasses(); j++) { if (Utils.gr(probs[j], maxProb)) { maxIndex = j; maxProb = probs[j]; } } return maxIndex; } /** * Returns a description of the classifier. * * @return a string representation of the classifier */ @Override public String toString() { if (m_tree != null) { return "Logistic model tree \n------------------\n" + m_tree.toString(); } else { return "No tree build"; } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(9); newVector.addElement(new Option( "\tBinary splits (convert nominal attributes to binary ones)", "B", 0, "-B")); newVector.addElement(new Option( "\tSplit on residuals instead of class values", "R", 0, "-R")); newVector .addElement(new Option( "\tUse cross-validation for boosting at all nodes (i.e., disable heuristic)", "C", 0, "-C")); newVector.addElement(new Option( "\tUse error on probabilities instead of misclassification error " + "for stopping criterion of LogitBoost.", "P", 0, "-P")); newVector.addElement(new Option( "\tSet fixed number of iterations for LogitBoost (instead of using " + "cross-validation)", "I", 1, "-I <numIterations>")); newVector .addElement(new Option( "\tSet minimum number of instances at which a node can be split (default 15)", "M", 1, "-M <numInstances>")); newVector .addElement(new Option( "\tSet beta for weight trimming for LogitBoost. Set to 0 (default) for no weight trimming.", "W", 1, "-W <beta>")); newVector.addElement(new Option( "\tThe AIC is used to choose the best iteration.", "A", 0, "-A")); newVector.addElement(new Option("\tDo not make split point actual value.", "-doNotMakeSplitPointActualValue", 0, "-doNotMakeSplitPointActualValue")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B * Binary splits (convert nominal attributes to binary ones) * </pre> * * <pre> * -R * Split on residuals instead of class values * </pre> * * <pre> * -C * Use cross-validation for boosting at all nodes (i.e., disable heuristic) * </pre> * * <pre> * -P * Use error on probabilities instead of misclassification error for stopping criterion of LogitBoost. * </pre> * * <pre> * -I &lt;numIterations&gt; * Set fixed number of iterations for LogitBoost (instead of using cross-validation) * </pre> * * <pre> * -M &lt;numInstances&gt; * Set minimum number of instances at which a node can be split (default 15) * </pre> * * <pre> * -W &lt;beta&gt; * Set beta for weight trimming for LogitBoost. Set to 0 (default) for no weight trimming. * </pre> * * <pre> * -A * The AIC is used to choose the best iteration. * </pre> * * <pre> * -doNotMakeSplitPointActualValue * Do not make split point actual value. * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setConvertNominal(Utils.getFlag('B', options)); setSplitOnResiduals(Utils.getFlag('R', options)); setFastRegression(!Utils.getFlag('C', options)); setErrorOnProbabilities(Utils.getFlag('P', options)); String optionString = Utils.getOption('I', options); if (optionString.length() != 0) { setNumBoostingIterations((new Integer(optionString)).intValue()); } optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMinNumInstances((new Integer(optionString)).intValue()); } optionString = Utils.getOption('W', options); if (optionString.length() != 0) { setWeightTrimBeta((new Double(optionString)).doubleValue()); } setUseAIC(Utils.getFlag('A', options)); m_doNotMakeSplitPointActualValue = Utils.getFlag( "doNotMakeSplitPointActualValue", options); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (getConvertNominal()) { options.add("-B"); } if (getSplitOnResiduals()) { options.add("-R"); } if (!getFastRegression()) { options.add("-C"); } if (getErrorOnProbabilities()) { options.add("-P"); } options.add("-I"); options.add("" + getNumBoostingIterations()); options.add("-M"); options.add("" + getMinNumInstances()); options.add("-W"); options.add("" + getWeightTrimBeta()); if (getUseAIC()) { options.add("-A"); } if (m_doNotMakeSplitPointActualValue) { options.add("-doNotMakeSplitPointActualValue"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Get the value of weightTrimBeta. */ public double getWeightTrimBeta() { return m_weightTrimBeta; } /** * Get the value of useAIC. * * @return Value of useAIC. */ public boolean getUseAIC() { return m_useAIC; } /** * Set the value of weightTrimBeta. */ public void setWeightTrimBeta(double n) { m_weightTrimBeta = n; } /** * Set the value of useAIC. * * @param c Value to assign to useAIC. */ public void setUseAIC(boolean c) { m_useAIC = c; } /** * Get the value of convertNominal. * * @return Value of convertNominal. */ public boolean getConvertNominal() { return m_convertNominal; } /** * Get the value of splitOnResiduals. * * @return Value of splitOnResiduals. */ public boolean getSplitOnResiduals() { return m_splitOnResiduals; } /** * Get the value of fastRegression. * * @return Value of fastRegression. */ public boolean getFastRegression() { return m_fastRegression; } /** * Get the value of errorOnProbabilities. * * @return Value of errorOnProbabilities. */ public boolean getErrorOnProbabilities() { return m_errorOnProbabilities; } /** * Get the value of numBoostingIterations. * * @return Value of numBoostingIterations. */ public int getNumBoostingIterations() { return m_numBoostingIterations; } /** * Get the value of minNumInstances. * * @return Value of minNumInstances. */ public int getMinNumInstances() { return m_minNumInstances; } /** * Set the value of convertNominal. * * @param c Value to assign to convertNominal. */ public void setConvertNominal(boolean c) { m_convertNominal = c; } /** * Set the value of splitOnResiduals. * * @param c Value to assign to splitOnResiduals. */ public void setSplitOnResiduals(boolean c) { m_splitOnResiduals = c; } /** * Set the value of fastRegression. * * @param c Value to assign to fastRegression. */ public void setFastRegression(boolean c) { m_fastRegression = c; } /** * Set the value of errorOnProbabilities. * * @param c Value to assign to errorOnProbabilities. */ public void setErrorOnProbabilities(boolean c) { m_errorOnProbabilities = c; } /** * Set the value of numBoostingIterations. * * @param c Value to assign to numBoostingIterations. */ public void setNumBoostingIterations(int c) { m_numBoostingIterations = c; } /** * Set the value of minNumInstances. * * @param c Value to assign to minNumInstances. */ public void setMinNumInstances(int c) { m_minNumInstances = c; } /** * Returns the type of graph this classifier represents. * * @return Drawable.TREE */ @Override public int graphType() { return Drawable.TREE; } /** * Returns graph describing the tree. * * @return the graph describing the tree * @throws Exception if graph can't be computed */ @Override public String graph() throws Exception { return m_tree.graph(); } /** * Returns the size of the tree * * @return the size of the tree */ public int measureTreeSize() { return m_tree.numNodes(); } /** * Returns the number of leaves in the tree * * @return the number of leaves in the tree */ public int measureNumLeaves() { return m_tree.numLeaves(); } /** * Returns an enumeration of the additional measure names * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<String>(2); newVector.addElement("measureTreeSize"); newVector.addElement("measureNumLeaves"); return newVector.elements(); } /** * Returns the value of the named measure * * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ @Override public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureTreeSize") == 0) { return measureTreeSize(); } else if (additionalMeasureName.compareToIgnoreCase("measureNumLeaves") == 0) { return measureNumLeaves(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (LMT)"); } } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Classifier for building 'logistic model trees', which are classification trees with " + "logistic regression functions at the leaves. The algorithm can deal with binary and multi-class " + "target variables, numeric and nominal attributes and missing values.\n\n" + "For more information see: \n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result .setValue(Field.AUTHOR, "Niels Landwehr and Mark Hall and Eibe Frank"); result.setValue(Field.TITLE, "Logistic Model Trees"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.VOLUME, "95"); result.setValue(Field.PAGES, "161-205"); result.setValue(Field.NUMBER, "1-2"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Marc Sumner and Eibe Frank and Mark Hall"); additional.setValue(Field.TITLE, "Speeding up Logistic Model Tree Induction"); additional .setValue( Field.BOOKTITLE, "9th European Conference on Principles and Practice of Knowledge Discovery in Databases"); additional.setValue(Field.YEAR, "2005"); additional.setValue(Field.PAGES, "675-683"); additional.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String convertNominalTipText() { return "Convert all nominal attributes to binary ones before building the tree. " + "This means that all splits in the final tree will be binary."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String splitOnResidualsTipText() { return "Set splitting criterion based on the residuals of LogitBoost. " + "There are two possible splitting criteria for LMT: the default is to use the C4.5 " + "splitting criterion that uses information gain on the class variable. The other splitting " + "criterion tries to improve the purity in the residuals produces when fitting the logistic " + "regression functions. The choice of the splitting criterion does not usually affect classification " + "accuracy much, but can produce different trees."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String fastRegressionTipText() { return "Use heuristic that avoids cross-validating the number of Logit-Boost iterations at every node. " + "When fitting the logistic regression functions at a node, LMT has to determine the number of LogitBoost " + "iterations to run. Originally, this number was cross-validated at every node in the tree. " + "To save time, this heuristic cross-validates the number only once and then uses that number at every " + "node in the tree. Usually this does not decrease accuracy but improves runtime considerably."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String errorOnProbabilitiesTipText() { return "Minimize error on probabilities instead of misclassification error when cross-validating the number " + "of LogitBoost iterations. When set, the number of LogitBoost iterations is chosen that minimizes " + "the root mean squared error instead of the misclassification error."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numBoostingIterationsTipText() { return "Set a fixed number of iterations for LogitBoost. If >= 0, this sets a fixed number of LogitBoost " + "iterations that is used everywhere in the tree. If < 0, the number is cross-validated."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minNumInstancesTipText() { return "Set the minimum number of instances at which a node is considered for splitting. " + "The default value is 15."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String weightTrimBetaTipText() { return "Set the beta value used for weight trimming in LogitBoost. " + "Only instances carrying (1 - beta)% of the weight from previous iteration " + "are used in the next iteration. Set to 0 for no weight trimming. " + "The default value is 0."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String useAICTipText() { return "The AIC is used to determine when to stop LogitBoost iterations. " + "The default is not to use AIC."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String doNotMakeSplitPointActualValueTipText() { return "If true, the split point is not relocated to an actual data value." + " This can yield substantial speed-ups for large datasets with numeric attributes."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numDecimalPlacesTipText() { return "The number of decimal places to be used for the output of coefficients."; } /** * Gets the value of doNotMakeSplitPointActualValue. * * @return the value */ public boolean getDoNotMakeSplitPointActualValue() { return m_doNotMakeSplitPointActualValue; } /** * Sets the value of doNotMakeSplitPointActualValue. * * @param m_doNotMakeSplitPointActualValue the value to set */ public void setDoNotMakeSplitPointActualValue( boolean m_doNotMakeSplitPointActualValue) { this.m_doNotMakeSplitPointActualValue = m_doNotMakeSplitPointActualValue; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class * * @param argv the commandline options */ public static void main(String[] argv) { runClassifier(new LMT(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/M5P.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * M5P.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.trees.m5.M5Base; import weka.classifiers.trees.m5.Rule; import weka.core.Drawable; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> M5Base. Implements base routines for generating M5 * Model trees and rules<br/> * The original algorithm M5 was invented by R. Quinlan and Yong Wang made * improvements.<br/> * <br/> * For more information see:<br/> * <br/> * Ross J. Quinlan: Learning with Continuous Classes. In: 5th Australian Joint * Conference on Artificial Intelligence, Singapore, 343-348, 1992.<br/> * <br/> * Y. Wang, I. H. Witten: Induction of model trees for predicting continuous * classes. In: Poster papers of the 9th European Conference on Machine * Learning, 1997. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Quinlan1992, * address = {Singapore}, * author = {Ross J. Quinlan}, * booktitle = {5th Australian Joint Conference on Artificial Intelligence}, * pages = {343-348}, * publisher = {World Scientific}, * title = {Learning with Continuous Classes}, * year = {1992} * } * * &#64;inproceedings{Wang1997, * author = {Y. Wang and I. H. Witten}, * booktitle = {Poster papers of the 9th European Conference on Machine Learning}, * publisher = {Springer}, * title = {Induction of model trees for predicting continuous classes}, * year = {1997} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -N * Use unpruned tree/rules * </pre> * * <pre> * -U * Use unsmoothed predictions * </pre> * * <pre> * -R * Build regression tree/rule rather than a model tree/rule * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf * (default 4) * </pre> * * <pre> * -L * Save instances at the nodes in * the tree (for visualization purposes) * </pre> * * <!-- options-end --> * * @author <a href="mailto:mhall@cs.waikato.ac.nz">Mark Hall</a> * @version $Revision$ */ public class M5P extends M5Base implements Drawable { /** for serialization */ static final long serialVersionUID = -6118439039768244417L; /** * returns information about the classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Implements the M5' model tree algorithm. " + "The original M5 algorithm was developed by R. Quinlan. Yong Wang " + "made improvements leading to M5'.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Creates a new <code>M5P</code> instance. */ public M5P() { super(); setGenerateRules(false); } /** * Returns the type of graph this classifier represents. * * @return Drawable.TREE */ @Override public int graphType() { return Drawable.TREE; } /** * Return a dot style String describing the tree. * * @return a <code>String</code> value * @throws Exception if an error occurs */ @Override public String graph() throws Exception { StringBuffer text = new StringBuffer(); text.append("digraph M5Tree {\n"); Rule temp = m_ruleSet.get(0); temp.topOfTree().graph(text); text.append("}\n"); return text.toString(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String saveInstancesTipText() { return "Whether to save instance data at each node in the tree for " + "visualization purposes."; } /** * Set whether to save instance data at each node in the tree for * visualization purposes * * @param save a <code>boolean</code> value */ public void setSaveInstances(boolean save) { m_saveInstances = save; } /** * Get whether instance data is being save. * * @return a <code>boolean</code> value */ public boolean getSaveInstances() { return m_saveInstances; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Enumeration<Option> superOpts = super.listOptions(); Vector<Option> newVector = new Vector<Option>(); while (superOpts.hasMoreElements()) { newVector.addElement(superOpts.nextElement()); } newVector.addElement(new Option("\tSave instances at the nodes in\n" + "\tthe tree (for visualization purposes)", "L", 0, "-L")); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -N * Use unpruned tree/rules * </pre> * * <pre> * -U * Use unsmoothed predictions * </pre> * * <pre> * -R * Build regression tree/rule rather than a model tree/rule * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf * (default 4) * </pre> * * <pre> * -L * Save instances at the nodes in * the tree (for visualization purposes) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setSaveInstances(Utils.getFlag('L', options)); super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] superOpts = super.getOptions(); String[] options = new String[superOpts.length + 1]; int current = superOpts.length; for (int i = 0; i < current; i++) { options[i] = superOpts[i]; } if (getSaveInstances()) { options[current++] = "-L"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method by which this class can be tested * * @param args an array of options */ public static void main(String[] args) { runClassifier(new M5P(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/REPTree.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * REPTree.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import java.io.Serializable; import java.util.Collections; import java.util.Enumeration; import java.util.LinkedList; import java.util.Queue; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Sourcable; import weka.classifiers.rules.ZeroR; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ContingencyTables; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.PartitionGenerator; import weka.core.Randomizable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Fast decision tree learner. Builds a decision/regression tree using * information gain/variance and prunes it using reduced-error pruning (with backfitting). Only * sorts values for numeric attributes once. Missing values are dealt with by splitting the * corresponding instances into pieces (i.e. as in C4.5). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2). * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3). * </pre> * * <pre> * -S &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -P * No pruning. * </pre> * * <pre> * -L * Maximum tree depth (default -1, no maximum) * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class REPTree extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, Drawable, AdditionalMeasureProducer, Sourcable, PartitionGenerator, Randomizable { /** for serialization */ static final long serialVersionUID = -9216785998198681299L; /** ZeroR model that is used if no attributes are present. */ protected ZeroR m_zeroR; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Fast decision tree learner. Builds a decision/regression tree using " + "information gain/variance and prunes it using reduced-error pruning " + "(with backfitting). Only sorts values for numeric attributes " + "once. Missing values are dealt with by splitting the corresponding " + "instances into pieces (i.e. as in C4.5)."; } /** An inner class for building and storing the tree structure */ protected class Tree implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -1635481717888437935L; /** The header information (for printing the tree). */ protected Instances m_Info = null; /** The subtrees of this tree. */ protected Tree[] m_Successors; /** The attribute to split on. */ protected int m_Attribute = -1; /** The split point. */ protected double m_SplitPoint = Double.NaN; /** The proportions of training instances going down each branch. */ protected double[] m_Prop = null; /** * Class probabilities from the training data in the nominal case. Holds the mean in the numeric * case. */ protected double[] m_ClassProbs = null; /** * The (unnormalized) class distribution in the nominal case. Holds the sum of squared errors and * the weight in the numeric case. */ protected double[] m_Distribution = null; /** * Class distribution of hold-out set at node in the nominal case. Straight sum of weights plus sum * of weighted targets in the numeric case (i.e. array has only two elements). */ protected double[] m_HoldOutDist = null; /** * The hold-out error of the node. The number of miss-classified instances in the nominal case, the * sum of squared errors in the numeric case. */ protected double m_HoldOutError = 0; /** * Computes class distribution of an instance using the tree. * * @param instance * the instance to compute the distribution for * @return the distribution * @throws Exception * if computation fails */ protected double[] distributionForInstance(final Instance instance) throws Exception { double[] returnedDist = null; if (this.m_Attribute > -1) { // Node is not a leaf if (instance.isMissing(this.m_Attribute)) { // Value is missing returnedDist = new double[this.m_Info.numClasses()]; // Split instance up for (int i = 0; i < this.m_Successors.length; i++) { double[] help = this.m_Successors[i].distributionForInstance(instance); if (help != null) { for (int j = 0; j < help.length; j++) { returnedDist[j] += this.m_Prop[i] * help[j]; } } } } else if (this.m_Info.attribute(this.m_Attribute).isNominal()) { // For nominal attributes returnedDist = this.m_Successors[(int) instance.value(this.m_Attribute)].distributionForInstance(instance); } else { // For numeric attributes if (instance.value(this.m_Attribute) < this.m_SplitPoint) { returnedDist = this.m_Successors[0].distributionForInstance(instance); } else { returnedDist = this.m_Successors[1].distributionForInstance(instance); } } } if ((this.m_Attribute == -1) || (returnedDist == null)) { // Node is a leaf or successor is empty if (this.m_ClassProbs == null) { return this.m_ClassProbs; } return this.m_ClassProbs.clone(); } else { return returnedDist; } } /** * Returns a string containing java source code equivalent to the test made at this node. The * instance being tested is called "i". This routine assumes to be called in the order of branching, * enabling us to set the >= condition test (the last one) of a numeric splitpoint to just "true" * (because being there in the flow implies that the previous less-than test failed). * * @param index * index of the value tested * @return a value of type 'String' */ public final String sourceExpression(final int index) { StringBuffer expr = null; if (index < 0) { return "i[" + this.m_Attribute + "] == null"; } if (this.m_Info.attribute(this.m_Attribute).isNominal()) { expr = new StringBuffer("i["); expr.append(this.m_Attribute).append("]"); expr.append(".equals(\"").append(this.m_Info.attribute(this.m_Attribute).value(index)).append("\")"); } else { expr = new StringBuffer(""); if (index == 0) { expr.append("((Double)i[").append(this.m_Attribute).append("]).doubleValue() < ").append(this.m_SplitPoint); } else { expr.append("true"); } } return expr.toString(); } /** * Returns source code for the tree as if-then statements. The class is assigned to variable "p", * and assumes the tested instance is named "i". The results are returned as two stringbuffers: a * section of code for assignment of the class, and a section of code containing support code (eg: * other support methods). * <p/> * TODO: If the outputted source code encounters a missing value for the evaluated attribute, it * stops branching and uses the class distribution of the current node to decide the return value. * This is unlike the behaviour of distributionForInstance(). * * @param className * the classname that this static classifier has * @param parent * parent node of the current node * @return an array containing two stringbuffers, the first string containing assignment code, and * the second containing source for support code. * @throws Exception * if something goes wrong */ public StringBuffer[] toSource(final String className, final Tree parent) throws Exception { StringBuffer[] result = new StringBuffer[2]; double[] currentProbs; if (this.m_ClassProbs == null) { currentProbs = parent.m_ClassProbs; } else { currentProbs = this.m_ClassProbs; } long printID = nextID(); // Is this a leaf? if (this.m_Attribute == -1) { result[0] = new StringBuffer(" p = "); if (this.m_Info.classAttribute().isNumeric()) { result[0].append(currentProbs[0]); } else { result[0].append(Utils.maxIndex(currentProbs)); } result[0].append(";\n"); result[1] = new StringBuffer(""); } else { StringBuffer text = new StringBuffer(""); StringBuffer atEnd = new StringBuffer(""); text.append(" static double N").append(Integer.toHexString(this.hashCode()) + printID).append("(Object []i) {\n").append(" double p = Double.NaN;\n"); text.append(" /* " + this.m_Info.attribute(this.m_Attribute).name() + " */\n"); // Missing attribute? text.append(" if (" + this.sourceExpression(-1) + ") {\n").append(" p = "); if (this.m_Info.classAttribute().isNumeric()) { text.append(currentProbs[0] + ";\n"); } else { text.append(Utils.maxIndex(currentProbs) + ";\n"); } text.append(" } "); // Branching of the tree for (int i = 0; i < this.m_Successors.length; i++) { text.append("else if (" + this.sourceExpression(i) + ") {\n"); // Is the successor a leaf? if (this.m_Successors[i].m_Attribute == -1) { double[] successorProbs = this.m_Successors[i].m_ClassProbs; if (successorProbs == null) { successorProbs = this.m_ClassProbs; } text.append(" p = "); if (this.m_Info.classAttribute().isNumeric()) { text.append(successorProbs[0] + ";\n"); } else { text.append(Utils.maxIndex(successorProbs) + ";\n"); } } else { StringBuffer[] sub = this.m_Successors[i].toSource(className, this); text.append("" + sub[0]); atEnd.append("" + sub[1]); } text.append(" } "); if (i == this.m_Successors.length - 1) { text.append("\n"); } } text.append(" return p;\n }\n"); result[0] = new StringBuffer(" p = " + className + ".N"); result[0].append(Integer.toHexString(this.hashCode()) + printID).append("(i);\n"); result[1] = text.append("" + atEnd); } return result; } /** * Outputs one node for graph. * * @param text * the buffer to append the output to * @param num * the current node id * @param parent * the parent of the nodes * @return the next node id * @throws Exception * if something goes wrong */ protected int toGraph(final StringBuffer text, int num, final Tree parent) throws Exception { num++; if (this.m_Attribute == -1) { text.append("N" + Integer.toHexString(Tree.this.hashCode()) + " [label=\"" + num + Utils.backQuoteChars(this.leafString(parent)) + "\"" + "shape=box]\n"); } else { text.append("N" + Integer.toHexString(Tree.this.hashCode()) + " [label=\"" + num + ": " + Utils.backQuoteChars(this.m_Info.attribute(this.m_Attribute).name()) + "\"]\n"); for (int i = 0; i < this.m_Successors.length; i++) { text.append("N" + Integer.toHexString(Tree.this.hashCode()) + "->" + "N" + Integer.toHexString(this.m_Successors[i].hashCode()) + " [label=\""); if (this.m_Info.attribute(this.m_Attribute).isNumeric()) { if (i == 0) { text.append(" < " + Utils.doubleToString(this.m_SplitPoint, REPTree.this.getNumDecimalPlaces())); } else { text.append(" >= " + Utils.doubleToString(this.m_SplitPoint, REPTree.this.getNumDecimalPlaces())); } } else { text.append(" = " + Utils.backQuoteChars(this.m_Info.attribute(this.m_Attribute).value(i))); } text.append("\"]\n"); num = this.m_Successors[i].toGraph(text, num, this); } } return num; } /** * Outputs description of a leaf node. * * @param parent * the parent of the node * @return the description of the node * @throws Exception * if generation fails */ protected String leafString(final Tree parent) throws Exception { if (this.m_Info.classAttribute().isNumeric()) { double classMean; if (this.m_ClassProbs == null) { classMean = parent.m_ClassProbs[0]; } else { classMean = this.m_ClassProbs[0]; } StringBuffer buffer = new StringBuffer(); buffer.append(" : " + Utils.doubleToString(classMean, REPTree.this.getNumDecimalPlaces())); double avgError = 0; if (this.m_Distribution[1] > 0) { avgError = this.m_Distribution[0] / this.m_Distribution[1]; } buffer.append(" (" + Utils.doubleToString(this.m_Distribution[1], REPTree.this.getNumDecimalPlaces()) + "/" + Utils.doubleToString(avgError, REPTree.this.getNumDecimalPlaces()) + ")"); avgError = 0; if (this.m_HoldOutDist[0] > 0) { avgError = this.m_HoldOutError / this.m_HoldOutDist[0]; } buffer.append(" [" + Utils.doubleToString(this.m_HoldOutDist[0], REPTree.this.getNumDecimalPlaces()) + "/" + Utils.doubleToString(avgError, REPTree.this.getNumDecimalPlaces()) + "]"); return buffer.toString(); } else { int maxIndex; if (this.m_ClassProbs == null) { maxIndex = Utils.maxIndex(parent.m_ClassProbs); } else { maxIndex = Utils.maxIndex(this.m_ClassProbs); } return " : " + this.m_Info.classAttribute().value(maxIndex) + " (" + Utils.doubleToString(Utils.sum(this.m_Distribution), REPTree.this.getNumDecimalPlaces()) + "/" + Utils.doubleToString((Utils.sum(this.m_Distribution) - this.m_Distribution[maxIndex]), REPTree.this.getNumDecimalPlaces()) + ")" + " [" + Utils.doubleToString(Utils.sum(this.m_HoldOutDist), REPTree.this.getNumDecimalPlaces()) + "/" + Utils.doubleToString((Utils.sum(this.m_HoldOutDist) - this.m_HoldOutDist[maxIndex]), REPTree.this.getNumDecimalPlaces()) + "]"; } } /** * Recursively outputs the tree. * * @param level * the current level * @param parent * the current parent * @return the generated substree */ protected String toString(final int level, final Tree parent) { try { StringBuffer text = new StringBuffer(); if (this.m_Attribute == -1) { // Output leaf info return this.leafString(parent); } else if (this.m_Info.attribute(this.m_Attribute).isNominal()) { // For nominal attributes for (int i = 0; i < this.m_Successors.length; i++) { text.append("\n"); for (int j = 0; j < level; j++) { text.append("| "); } text.append(this.m_Info.attribute(this.m_Attribute).name() + " = " + this.m_Info.attribute(this.m_Attribute).value(i)); text.append(this.m_Successors[i].toString(level + 1, this)); } } else { // For numeric attributes text.append("\n"); for (int j = 0; j < level; j++) { text.append("| "); } text.append(this.m_Info.attribute(this.m_Attribute).name() + " < " + Utils.doubleToString(this.m_SplitPoint, REPTree.this.getNumDecimalPlaces())); text.append(this.m_Successors[0].toString(level + 1, this)); text.append("\n"); for (int j = 0; j < level; j++) { text.append("| "); } text.append(this.m_Info.attribute(this.m_Attribute).name() + " >= " + Utils.doubleToString(this.m_SplitPoint, REPTree.this.getNumDecimalPlaces())); text.append(this.m_Successors[1].toString(level + 1, this)); } return text.toString(); } catch (Exception e) { e.printStackTrace(); return "Decision tree: tree can't be printed"; } } /** * Recursively generates a tree. * * @param sortedIndices * the sorted indices of the instances * @param weights * the weights of the instances * @param data * the data to work with * @param totalWeight * @param classProbs * the class probabilities * @param header * the header of the data * @param minNum * the minimum number of instances in a leaf * @param minVariance * @param depth * the current depth of the tree * @param maxDepth * the maximum allowed depth of the tree * @throws Exception * if generation fails */ protected void buildTree(final int[][][] sortedIndices, final double[][][] weights, final Instances data, final double totalWeight, final double[] classProbs, final Instances header, final double minNum, final double minVariance, final int depth, final int maxDepth) throws Exception { // Store structure of dataset, set minimum number of instances // and make space for potential info from pruning data this.m_Info = header; if (data.classAttribute().isNumeric()) { this.m_HoldOutDist = new double[2]; } else { this.m_HoldOutDist = new double[data.numClasses()]; } // Make leaf if there are no training instances int helpIndex = 0; if (data.classIndex() == 0) { helpIndex = 1; } if (sortedIndices[0][helpIndex].length == 0) { if (data.classAttribute().isNumeric()) { this.m_Distribution = new double[2]; } else { this.m_Distribution = new double[data.numClasses()]; } this.m_ClassProbs = null; sortedIndices[0] = null; weights[0] = null; return; } double priorVar = 0; if (data.classAttribute().isNumeric()) { // Compute prior variance double totalSum = 0, totalSumSquared = 0, totalSumOfWeights = 0; for (int i = 0; i < sortedIndices[0][helpIndex].length; i++) { Instance inst = data.instance(sortedIndices[0][helpIndex][i]); totalSum += inst.classValue() * weights[0][helpIndex][i]; totalSumSquared += inst.classValue() * inst.classValue() * weights[0][helpIndex][i]; totalSumOfWeights += weights[0][helpIndex][i]; } priorVar = this.singleVariance(totalSum, totalSumSquared, totalSumOfWeights); } // Check if node doesn't contain enough instances, is pure // or the maximum tree depth is reached this.m_ClassProbs = new double[classProbs.length]; System.arraycopy(classProbs, 0, this.m_ClassProbs, 0, classProbs.length); if ((totalWeight < (2 * minNum)) || // Nominal case (data.classAttribute().isNominal() && Utils.eq(this.m_ClassProbs[Utils.maxIndex(this.m_ClassProbs)], Utils.sum(this.m_ClassProbs))) || // Numeric case (data.classAttribute().isNumeric() && ((priorVar / totalWeight) < minVariance)) || // Check tree depth ((REPTree.this.m_MaxDepth >= 0) && (depth >= maxDepth))) { // Make leaf this.m_Attribute = -1; if (data.classAttribute().isNominal()) { // Nominal case this.m_Distribution = new double[this.m_ClassProbs.length]; for (int i = 0; i < this.m_ClassProbs.length; i++) { this.m_Distribution[i] = this.m_ClassProbs[i]; } this.doSmoothing(); Utils.normalize(this.m_ClassProbs); } else { // Numeric case this.m_Distribution = new double[2]; this.m_Distribution[0] = priorVar; this.m_Distribution[1] = totalWeight; } sortedIndices[0] = null; weights[0] = null; return; } // Compute class distributions and value of splitting // criterion for each attribute double[] vals = new double[data.numAttributes()]; double[][][] dists = new double[data.numAttributes()][0][0]; double[][] props = new double[data.numAttributes()][0]; double[][] totalSubsetWeights = new double[data.numAttributes()][0]; double[] splits = new double[data.numAttributes()]; if (data.classAttribute().isNominal()) { // Nominal case for (int i = 0; i < data.numAttributes(); i++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (i != data.classIndex()) { splits[i] = this.distribution(props, dists, i, sortedIndices[0][i], weights[0][i], totalSubsetWeights, data); vals[i] = this.gain(dists[i], this.priorVal(dists[i])); } } } else { // Numeric case for (int i = 0; i < data.numAttributes(); i++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (i != data.classIndex()) { splits[i] = this.numericDistribution(props, dists, i, sortedIndices[0][i], weights[0][i], totalSubsetWeights, data, vals); } } } // Find best attribute this.m_Attribute = Utils.maxIndex(vals); int numAttVals = dists[this.m_Attribute].length; // Check if there are at least two subsets with // required minimum number of instances int count = 0; for (int i = 0; i < numAttVals; i++) { if (totalSubsetWeights[this.m_Attribute][i] >= minNum) { count++; } if (count > 1) { break; } } // Any useful split found? if (Utils.gr(vals[this.m_Attribute], 0) && (count > 1)) { // Set split point, proportions, and temp arrays this.m_SplitPoint = splits[this.m_Attribute]; this.m_Prop = props[this.m_Attribute]; double[][] attSubsetDists = dists[this.m_Attribute]; double[] attTotalSubsetWeights = totalSubsetWeights[this.m_Attribute]; // Release some memory before proceeding further vals = null; dists = null; props = null; totalSubsetWeights = null; splits = null; // Split data int[][][][] subsetIndices = new int[numAttVals][1][data.numAttributes()][0]; double[][][][] subsetWeights = new double[numAttVals][1][data.numAttributes()][0]; this.splitData(subsetIndices, subsetWeights, this.m_Attribute, this.m_SplitPoint, sortedIndices[0], weights[0], data); // Release memory sortedIndices[0] = null; weights[0] = null; // Build successors this.m_Successors = new Tree[numAttVals]; for (int i = 0; i < numAttVals; i++) { this.m_Successors[i] = new Tree(); this.m_Successors[i].buildTree(subsetIndices[i], subsetWeights[i], data, attTotalSubsetWeights[i], attSubsetDists[i], header, minNum, minVariance, depth + 1, maxDepth); // Release as much memory as we can attSubsetDists[i] = null; } } else { // Make leaf this.m_Attribute = -1; sortedIndices[0] = null; weights[0] = null; } // Normalize class counts if (data.classAttribute().isNominal()) { this.m_Distribution = new double[this.m_ClassProbs.length]; for (int i = 0; i < this.m_ClassProbs.length; i++) { this.m_Distribution[i] = this.m_ClassProbs[i]; } this.doSmoothing(); Utils.normalize(this.m_ClassProbs); } else { this.m_Distribution = new double[2]; this.m_Distribution[0] = priorVar; this.m_Distribution[1] = totalWeight; } } /** * Smoothes class probabilities stored at node. */ protected void doSmoothing() { double val = REPTree.this.m_InitialCount; if (REPTree.this.m_SpreadInitialCount) { val /= this.m_ClassProbs.length; } for (int i = 0; i < this.m_ClassProbs.length; i++) { this.m_ClassProbs[i] += val; } } /** * Computes size of the tree. * * @return the number of nodes */ protected int numNodes() { if (this.m_Attribute == -1) { return 1; } else { int size = 1; for (Tree m_Successor : this.m_Successors) { size += m_Successor.numNodes(); } return size; } } /** * Splits instances into subsets. * * @param subsetIndices * the sorted indices in the subset * @param subsetWeights * the weights of the subset * @param att * the attribute index * @param splitPoint * the split point for numeric attributes * @param sortedIndices * the sorted indices of the whole set * @param weights * the weights of the whole set * @param data * the data to work with * @throws Exception * if something goes wrong */ protected void splitData(final int[][][][] subsetIndices, final double[][][][] subsetWeights, final int att, final double splitPoint, final int[][] sortedIndices, final double[][] weights, final Instances data) throws Exception { int j; int[] num; // For each attribute for (int i = 0; i < data.numAttributes(); i++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (i != data.classIndex()) { if (data.attribute(att).isNominal()) { // For nominal attributes num = new int[data.attribute(att).numValues()]; for (int k = 0; k < num.length; k++) { subsetIndices[k][0][i] = new int[sortedIndices[i].length]; subsetWeights[k][0][i] = new double[sortedIndices[i].length]; } for (j = 0; j < sortedIndices[i].length; j++) { Instance inst = data.instance(sortedIndices[i][j]); if (inst.isMissing(att)) { // Split instance up for (int k = 0; k < num.length; k++) { if (this.m_Prop[k] > 0) { subsetIndices[k][0][i][num[k]] = sortedIndices[i][j]; subsetWeights[k][0][i][num[k]] = this.m_Prop[k] * weights[i][j]; num[k]++; } } } else { int subset = (int) inst.value(att); subsetIndices[subset][0][i][num[subset]] = sortedIndices[i][j]; subsetWeights[subset][0][i][num[subset]] = weights[i][j]; num[subset]++; } } } else { // For numeric attributes num = new int[2]; for (int k = 0; k < 2; k++) { subsetIndices[k][0][i] = new int[sortedIndices[i].length]; subsetWeights[k][0][i] = new double[weights[i].length]; } for (j = 0; j < sortedIndices[i].length; j++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } Instance inst = data.instance(sortedIndices[i][j]); if (inst.isMissing(att)) { // Split instance up for (int k = 0; k < num.length; k++) { if (this.m_Prop[k] > 0) { subsetIndices[k][0][i][num[k]] = sortedIndices[i][j]; subsetWeights[k][0][i][num[k]] = this.m_Prop[k] * weights[i][j]; num[k]++; } } } else { int subset = (inst.value(att) < splitPoint) ? 0 : 1; subsetIndices[subset][0][i][num[subset]] = sortedIndices[i][j]; subsetWeights[subset][0][i][num[subset]] = weights[i][j]; num[subset]++; } } } // Trim arrays for (int k = 0; k < num.length; k++) { int[] copy = new int[num[k]]; System.arraycopy(subsetIndices[k][0][i], 0, copy, 0, num[k]); subsetIndices[k][0][i] = copy; double[] copyWeights = new double[num[k]]; System.arraycopy(subsetWeights[k][0][i], 0, copyWeights, 0, num[k]); subsetWeights[k][0][i] = copyWeights; } } } } /** * Computes class distribution for an attribute. * * @param props * @param dists * @param att * the attribute index * @param sortedIndices * the sorted indices of the instances * @param weights * the weights of the instances * @param subsetWeights * the weights of the subset * @param data * the data to work with * @return the split point * @throws Exception * if computation fails */ protected double distribution(final double[][] props, final double[][][] dists, final int att, final int[] sortedIndices, final double[] weights, final double[][] subsetWeights, final Instances data) throws Exception { double splitPoint = Double.NaN; Attribute attribute = data.attribute(att); double[][] dist = null; int i; if (attribute.isNominal()) { // For nominal attributes dist = new double[attribute.numValues()][data.numClasses()]; for (i = 0; i < sortedIndices.length; i++) { Instance inst = data.instance(sortedIndices[i]); if (inst.isMissing(att)) { break; } dist[(int) inst.value(att)][(int) inst.classValue()] += weights[i]; } } else { // For numeric attributes double[][] currDist = new double[2][data.numClasses()]; dist = new double[2][data.numClasses()]; // Move all instances into second subset for (int j = 0; j < sortedIndices.length; j++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } Instance inst = data.instance(sortedIndices[j]); if (inst.isMissing(att)) { break; } currDist[1][(int) inst.classValue()] += weights[j]; } double priorVal = this.priorVal(currDist); System.arraycopy(currDist[1], 0, dist[1], 0, dist[1].length); // Try all possible split points double currSplit = data.instance(sortedIndices[0]).value(att); double currVal, bestVal = -Double.MAX_VALUE; for (i = 0; i < sortedIndices.length; i++) { Instance inst = data.instance(sortedIndices[i]); if (inst.isMissing(att)) { break; } if (inst.value(att) > currSplit) { currVal = this.gain(currDist, priorVal); if (currVal > bestVal) { bestVal = currVal; splitPoint = (inst.value(att) + currSplit) / 2.0; // Check for numeric precision problems if (splitPoint <= currSplit) { splitPoint = inst.value(att); } for (int j = 0; j < currDist.length; j++) { System.arraycopy(currDist[j], 0, dist[j], 0, dist[j].length); } } } currSplit = inst.value(att); currDist[0][(int) inst.classValue()] += weights[i]; currDist[1][(int) inst.classValue()] -= weights[i]; } } // Compute weights props[att] = new double[dist.length]; for (int k = 0; k < props[att].length; k++) { props[att][k] = Utils.sum(dist[k]); } if (!(Utils.sum(props[att]) > 0)) { for (int k = 0; k < props[att].length; k++) { props[att][k] = 1.0 / props[att].length; } } else { Utils.normalize(props[att]); } // Distribute counts while (i < sortedIndices.length) { Instance inst = data.instance(sortedIndices[i]); for (int j = 0; j < dist.length; j++) { dist[j][(int) inst.classValue()] += props[att][j] * weights[i]; } i++; } // Compute subset weights subsetWeights[att] = new double[dist.length]; for (int j = 0; j < dist.length; j++) { subsetWeights[att][j] += Utils.sum(dist[j]); } // Return distribution and split point dists[att] = dist; return splitPoint; } /** * Computes class distribution for an attribute. * * @param props * @param dists * @param att * the attribute index * @param sortedIndices * the sorted indices of the instances * @param weights * the weights of the instances * @param subsetWeights * the weights of the subset * @param data * the data to work with * @param vals * @return the split point * @throws Exception * if computation fails */ protected double numericDistribution(final double[][] props, final double[][][] dists, final int att, final int[] sortedIndices, final double[] weights, final double[][] subsetWeights, final Instances data, final double[] vals) throws Exception { double splitPoint = Double.NaN; Attribute attribute = data.attribute(att); double[][] dist = null; double[] sums = null; double[] sumSquared = null; double[] sumOfWeights = null; double totalSum = 0, totalSumSquared = 0, totalSumOfWeights = 0; int i; if (attribute.isNominal()) { // For nominal attributes sums = new double[attribute.numValues()]; sumSquared = new double[attribute.numValues()]; sumOfWeights = new double[attribute.numValues()]; int attVal; for (i = 0; i < sortedIndices.length; i++) { Instance inst = data.instance(sortedIndices[i]); if (inst.isMissing(att)) { break; } attVal = (int) inst.value(att); sums[attVal] += inst.classValue() * weights[i]; sumSquared[attVal] += inst.classValue() * inst.classValue() * weights[i]; sumOfWeights[attVal] += weights[i]; } totalSum = Utils.sum(sums); totalSumSquared = Utils.sum(sumSquared); totalSumOfWeights = Utils.sum(sumOfWeights); } else { // For numeric attributes sums = new double[2]; sumSquared = new double[2]; sumOfWeights = new double[2]; double[] currSums = new double[2]; double[] currSumSquared = new double[2]; double[] currSumOfWeights = new double[2]; // Move all instances into second subset for (int j = 0; j < sortedIndices.length; j++) { Instance inst = data.instance(sortedIndices[j]); if (inst.isMissing(att)) { break; } currSums[1] += inst.classValue() * weights[j]; currSumSquared[1] += inst.classValue() * inst.classValue() * weights[j]; currSumOfWeights[1] += weights[j]; } totalSum = currSums[1]; totalSumSquared = currSumSquared[1]; totalSumOfWeights = currSumOfWeights[1]; sums[1] = currSums[1]; sumSquared[1] = currSumSquared[1]; sumOfWeights[1] = currSumOfWeights[1]; // Try all possible split points double currSplit = data.instance(sortedIndices[0]).value(att); double currVal, bestVal = Double.MAX_VALUE; for (i = 0; i < sortedIndices.length; i++) { Instance inst = data.instance(sortedIndices[i]); if (inst.isMissing(att)) { break; } if (inst.value(att) > currSplit) { currVal = this.variance(currSums, currSumSquared, currSumOfWeights); if (currVal < bestVal) { bestVal = currVal; splitPoint = (inst.value(att) + currSplit) / 2.0; // Check for numeric precision problems if (splitPoint <= currSplit) { splitPoint = inst.value(att); } for (int j = 0; j < 2; j++) { sums[j] = currSums[j]; sumSquared[j] = currSumSquared[j]; sumOfWeights[j] = currSumOfWeights[j]; } } } currSplit = inst.value(att); double classVal = inst.classValue() * weights[i]; double classValSquared = inst.classValue() * classVal; currSums[0] += classVal; currSumSquared[0] += classValSquared; currSumOfWeights[0] += weights[i]; currSums[1] -= classVal; currSumSquared[1] -= classValSquared; currSumOfWeights[1] -= weights[i]; } } // Compute weights props[att] = new double[sums.length]; for (int k = 0; k < props[att].length; k++) { props[att][k] = sumOfWeights[k]; } if (!(Utils.sum(props[att]) > 0)) { for (int k = 0; k < props[att].length; k++) { props[att][k] = 1.0 / props[att].length; } } else { Utils.normalize(props[att]); } // Distribute counts for missing values while (i < sortedIndices.length) { Instance inst = data.instance(sortedIndices[i]); for (int j = 0; j < sums.length; j++) { sums[j] += props[att][j] * inst.classValue() * weights[i]; sumSquared[j] += props[att][j] * inst.classValue() * inst.classValue() * weights[i]; sumOfWeights[j] += props[att][j] * weights[i]; } totalSum += inst.classValue() * weights[i]; totalSumSquared += inst.classValue() * inst.classValue() * weights[i]; totalSumOfWeights += weights[i]; i++; } // Compute final distribution dist = new double[sums.length][data.numClasses()]; for (int j = 0; j < sums.length; j++) { if (sumOfWeights[j] > 0) { dist[j][0] = sums[j] / sumOfWeights[j]; } else { dist[j][0] = totalSum / totalSumOfWeights; } } // Compute variance gain double priorVar = this.singleVariance(totalSum, totalSumSquared, totalSumOfWeights); double var = this.variance(sums, sumSquared, sumOfWeights); double gain = priorVar - var; // Return distribution and split point subsetWeights[att] = sumOfWeights; dists[att] = dist; vals[att] = gain; return splitPoint; } /** * Computes variance for subsets. * * @param s * @param sS * @param sumOfWeights * @return the variance */ protected double variance(final double[] s, final double[] sS, final double[] sumOfWeights) { double var = 0; for (int i = 0; i < s.length; i++) { if (sumOfWeights[i] > 0) { var += this.singleVariance(s[i], sS[i], sumOfWeights[i]); } } return var; } /** * Computes the variance for a single set * * @param s * @param sS * @param weight * the weight * @return the variance */ protected double singleVariance(final double s, final double sS, final double weight) { return sS - ((s * s) / weight); } /** * Computes value of splitting criterion before split. * * @param dist * @return the splitting criterion * @throws InterruptedException */ protected double priorVal(final double[][] dist) throws InterruptedException { return ContingencyTables.entropyOverColumns(dist); } /** * Computes value of splitting criterion after split. * * @param dist * @param priorVal * the splitting criterion * @return the gain after splitting */ protected double gain(final double[][] dist, final double priorVal) { return priorVal - ContingencyTables.entropyConditionedOnRows(dist); } /** * Prunes the tree using the hold-out data (bottom-up). * * @return the error * @throws Exception * if pruning fails for some reason */ protected double reducedErrorPrune() throws Exception { // Is node leaf ? if (this.m_Attribute == -1) { return this.m_HoldOutError; } // Prune all sub trees double errorTree = 0; for (Tree m_Successor : this.m_Successors) { errorTree += m_Successor.reducedErrorPrune(); } // Replace sub tree with leaf if error doesn't get worse if (errorTree >= this.m_HoldOutError) { this.m_Attribute = -1; this.m_Successors = null; return this.m_HoldOutError; } else { return errorTree; } } /** * Inserts hold-out set into tree. * * @param data * the data to insert * @throws Exception * if something goes wrong */ protected void insertHoldOutSet(final Instances data) throws Exception { for (int i = 0; i < data.numInstances(); i++) { this.insertHoldOutInstance(data.instance(i), data.instance(i).weight(), this); } } /** * Inserts an instance from the hold-out set into the tree. * * @param inst * the instance to insert * @param weight * the weight of the instance * @param parent * the parent of the node * @throws Exception * if insertion fails */ protected void insertHoldOutInstance(final Instance inst, final double weight, final Tree parent) throws Exception { // Insert instance into hold-out class distribution if (inst.classAttribute().isNominal()) { // Nominal case this.m_HoldOutDist[(int) inst.classValue()] += weight; int predictedClass = 0; if (this.m_ClassProbs == null) { predictedClass = Utils.maxIndex(parent.m_ClassProbs); } else { predictedClass = Utils.maxIndex(this.m_ClassProbs); } if (predictedClass != (int) inst.classValue()) { this.m_HoldOutError += weight; } } else { // Numeric case this.m_HoldOutDist[0] += weight; this.m_HoldOutDist[1] += weight * inst.classValue(); double diff = 0; if (this.m_ClassProbs == null) { diff = parent.m_ClassProbs[0] - inst.classValue(); } else { diff = this.m_ClassProbs[0] - inst.classValue(); } this.m_HoldOutError += diff * diff * weight; } // The process is recursive if (this.m_Attribute != -1) { // If node is not a leaf if (inst.isMissing(this.m_Attribute)) { // Distribute instance for (int i = 0; i < this.m_Successors.length; i++) { if (this.m_Prop[i] > 0) { this.m_Successors[i].insertHoldOutInstance(inst, weight * this.m_Prop[i], this); } } } else { if (this.m_Info.attribute(this.m_Attribute).isNominal()) { // Treat nominal attributes this.m_Successors[(int) inst.value(this.m_Attribute)].insertHoldOutInstance(inst, weight, this); } else { // Treat numeric attributes if (inst.value(this.m_Attribute) < this.m_SplitPoint) { this.m_Successors[0].insertHoldOutInstance(inst, weight, this); } else { this.m_Successors[1].insertHoldOutInstance(inst, weight, this); } } } } } /** * Backfits data from holdout set. * * @throws Exception * if insertion fails */ protected void backfitHoldOutSet() throws Exception { // Insert instance into hold-out class distribution if (this.m_Info.classAttribute().isNominal()) { // Nominal case if (this.m_ClassProbs == null) { this.m_ClassProbs = new double[this.m_Info.numClasses()]; } System.arraycopy(this.m_Distribution, 0, this.m_ClassProbs, 0, this.m_Info.numClasses()); for (int i = 0; i < this.m_HoldOutDist.length; i++) { this.m_ClassProbs[i] += this.m_HoldOutDist[i]; } if (Utils.sum(this.m_ClassProbs) > 0) { this.doSmoothing(); Utils.normalize(this.m_ClassProbs); } else { this.m_ClassProbs = null; } } else { // Numeric case double sumOfWeightsTrainAndHoldout = this.m_Distribution[1] + this.m_HoldOutDist[0]; if (sumOfWeightsTrainAndHoldout <= 0) { return; } if (this.m_ClassProbs == null) { this.m_ClassProbs = new double[1]; } else { this.m_ClassProbs[0] *= this.m_Distribution[1]; } this.m_ClassProbs[0] += this.m_HoldOutDist[1]; this.m_ClassProbs[0] /= sumOfWeightsTrainAndHoldout; } // The process is recursive if (this.m_Attribute != -1) { for (Tree m_Successor : this.m_Successors) { m_Successor.backfitHoldOutSet(); } } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } /** The Tree object */ protected Tree m_Tree = null; /** Number of folds for reduced error pruning. */ protected int m_NumFolds = 3; /** Seed for random data shuffling. */ protected int m_Seed = 1; /** Don't prune */ protected boolean m_NoPruning = false; /** The minimum number of instances per leaf. */ protected double m_MinNum = 2; /** * The minimum proportion of the total variance (over all the data) required for split. */ protected double m_MinVarianceProp = 1e-3; /** Upper bound on the tree depth */ protected int m_MaxDepth = -1; /** The initial class count */ protected double m_InitialCount = 0; /** Whether to spread initial count across all values */ protected boolean m_SpreadInitialCount = false; /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String noPruningTipText() { return "Whether pruning is performed."; } /** * Get the value of NoPruning. * * @return Value of NoPruning. */ public boolean getNoPruning() { return this.m_NoPruning; } /** * Set the value of NoPruning. * * @param newNoPruning * Value to assign to NoPruning. */ public void setNoPruning(final boolean newNoPruning) { this.m_NoPruning = newNoPruning; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minNumTipText() { return "The minimum total weight of the instances in a leaf."; } /** * Get the value of MinNum. * * @return Value of MinNum. */ public double getMinNum() { return this.m_MinNum; } /** * Set the value of MinNum. * * @param newMinNum * Value to assign to MinNum. */ public void setMinNum(final double newMinNum) { this.m_MinNum = newMinNum; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minVariancePropTipText() { return "The minimum proportion of the variance on all the data " + "that needs to be present at a node in order for splitting to " + "be performed in regression trees."; } /** * Get the value of MinVarianceProp. * * @return Value of MinVarianceProp. */ public double getMinVarianceProp() { return this.m_MinVarianceProp; } /** * Set the value of MinVarianceProp. * * @param newMinVarianceProp * Value to assign to MinVarianceProp. */ public void setMinVarianceProp(final double newMinVarianceProp) { this.m_MinVarianceProp = newMinVarianceProp; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String seedTipText() { return "The seed used for randomizing the data."; } /** * Get the value of Seed. * * @return Value of Seed. */ @Override public int getSeed() { return this.m_Seed; } /** * Set the value of Seed. * * @param newSeed * Value to assign to Seed. */ @Override public void setSeed(final int newSeed) { this.m_Seed = newSeed; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "Determines the amount of data used for pruning. One fold is used for " + "pruning, the rest for growing the rules."; } /** * Get the value of NumFolds. * * @return Value of NumFolds. */ public int getNumFolds() { return this.m_NumFolds; } /** * Set the value of NumFolds. * * @param newNumFolds * Value to assign to NumFolds. */ public void setNumFolds(final int newNumFolds) { this.m_NumFolds = newNumFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String maxDepthTipText() { return "The maximum tree depth (-1 for no restriction)."; } /** * Get the value of MaxDepth. * * @return Value of MaxDepth. */ public int getMaxDepth() { return this.m_MaxDepth; } /** * Set the value of MaxDepth. * * @param newMaxDepth * Value to assign to MaxDepth. */ public void setMaxDepth(final int newMaxDepth) { this.m_MaxDepth = newMaxDepth; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String initialCountTipText() { return "Initial class value count."; } /** * Get the value of InitialCount. * * @return Value of InitialCount. */ public double getInitialCount() { return this.m_InitialCount; } /** * Set the value of InitialCount. * * @param newInitialCount * Value to assign to InitialCount. */ public void setInitialCount(final double newInitialCount) { this.m_InitialCount = newInitialCount; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String spreadInitialCountTipText() { return "Spread initial count across all values instead of using the count per value."; } /** * Get the value of SpreadInitialCount. * * @return Value of SpreadInitialCount. */ public boolean getSpreadInitialCount() { return this.m_SpreadInitialCount; } /** * Set the value of SpreadInitialCount. * * @param newSpreadInitialCount * Value to assign to SpreadInitialCount. */ public void setSpreadInitialCount(final boolean newSpreadInitialCount) { this.m_SpreadInitialCount = newSpreadInitialCount; } /** * Lists the command-line options for this classifier. * * @return an enumeration over all commandline options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(8); newVector.addElement(new Option("\tSet minimum number of instances per leaf " + "(default 2).", "M", 1, "-M <minimum number of instances>")); newVector.addElement(new Option("\tSet minimum numeric class variance proportion\n" + "\tof train variance for split (default 1e-3).", "V", 1, "-V <minimum variance for split>")); newVector.addElement(new Option("\tNumber of folds for reduced error pruning " + "(default 3).", "N", 1, "-N <number of folds>")); newVector.addElement(new Option("\tSeed for random data shuffling (default 1).", "S", 1, "-S <seed>")); newVector.addElement(new Option("\tNo pruning.", "P", 0, "-P")); newVector.addElement(new Option("\tMaximum tree depth (default -1, no maximum)", "L", 1, "-L")); newVector.addElement(new Option("\tInitial class value count (default 0)", "I", 1, "-I")); newVector.addElement(new Option("\tSpread initial count over all class values (i.e." + " don't use 1 per value)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Gets options from this classifier. * * @return the options for the current setup */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-M"); options.add("" + (int) this.getMinNum()); options.add("-V"); options.add("" + this.getMinVarianceProp()); options.add("-N"); options.add("" + this.getNumFolds()); options.add("-S"); options.add("" + this.getSeed()); options.add("-L"); options.add("" + this.getMaxDepth()); if (this.getNoPruning()) { options.add("-P"); } options.add("-I"); options.add("" + this.getInitialCount()); if (this.getSpreadInitialCount()) { options.add("-R"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2). * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3). * </pre> * * <pre> * -S &lt;seed&gt; * Seed for random data shuffling (default 1). * </pre> * * <pre> * -P * No pruning. * </pre> * * <pre> * -L * Maximum tree depth (default -1, no maximum) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String minNumString = Utils.getOption('M', options); if (minNumString.length() != 0) { this.m_MinNum = Integer.parseInt(minNumString); } else { this.m_MinNum = 2; } String minVarString = Utils.getOption('V', options); if (minVarString.length() != 0) { this.m_MinVarianceProp = Double.parseDouble(minVarString); } else { this.m_MinVarianceProp = 1e-3; } String numFoldsString = Utils.getOption('N', options); if (numFoldsString.length() != 0) { this.m_NumFolds = Integer.parseInt(numFoldsString); } else { this.m_NumFolds = 3; } String seedString = Utils.getOption('S', options); if (seedString.length() != 0) { this.m_Seed = Integer.parseInt(seedString); } else { this.m_Seed = 1; } this.m_NoPruning = Utils.getFlag('P', options); String depthString = Utils.getOption('L', options); if (depthString.length() != 0) { this.m_MaxDepth = Integer.parseInt(depthString); } else { this.m_MaxDepth = -1; } String initialCountString = Utils.getOption('I', options); if (initialCountString.length() != 0) { this.m_InitialCount = Double.parseDouble(initialCountString); } else { this.m_InitialCount = 0; } this.m_SpreadInitialCount = Utils.getFlag('R', options); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Computes size of the tree. * * @return the number of nodes */ public int numNodes() { return this.m_Tree.numNodes(); } /** * Returns an enumeration of the additional measure names. * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<>(1); newVector.addElement("measureTreeSize"); return newVector.elements(); } /** * Returns the value of the named measure. * * @param additionalMeasureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String additionalMeasureName) { if (additionalMeasureName.equalsIgnoreCase("measureTreeSize")) { return this.numNodes(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (REPTree)"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds classifier. * * @param data * the data to train with * @throws Exception * if building fails */ @Override public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); Random random = new Random(this.m_Seed); this.m_zeroR = null; if (data.numAttributes() == 1) { this.m_zeroR = new ZeroR(); this.m_zeroR.buildClassifier(data); return; } // Randomize and stratify data.randomize(random); if (data.classAttribute().isNominal()) { data.stratify(this.m_NumFolds); } // Split data into training and pruning set Instances train = null; Instances prune = null; if (!this.m_NoPruning) { train = data.trainCV(this.m_NumFolds, 0, random); prune = data.testCV(this.m_NumFolds, 0); } else { train = data; } // Create array of sorted indices and weights int[][][] sortedIndices = new int[1][train.numAttributes()][0]; double[][][] weights = new double[1][train.numAttributes()][0]; double[] vals = new double[train.numInstances()]; for (int j = 0; j < train.numAttributes(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (j != train.classIndex()) { weights[0][j] = new double[train.numInstances()]; if (train.attribute(j).isNominal()) { // Handling nominal attributes. Putting indices of // instances with missing values at the end. sortedIndices[0][j] = new int[train.numInstances()]; int count = 0; for (int i = 0; i < train.numInstances(); i++) { Instance inst = train.instance(i); if (!inst.isMissing(j)) { sortedIndices[0][j][count] = i; weights[0][j][count] = inst.weight(); count++; } } for (int i = 0; i < train.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = train.instance(i); if (inst.isMissing(j)) { sortedIndices[0][j][count] = i; weights[0][j][count] = inst.weight(); count++; } } } else { // Sorted indices are computed for numeric attributes for (int i = 0; i < train.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = train.instance(i); vals[i] = inst.value(j); } sortedIndices[0][j] = Utils.sort(vals); for (int i = 0; i < train.numInstances(); i++) { weights[0][j][i] = train.instance(sortedIndices[0][j][i]).weight(); } } } } // Compute initial class counts double[] classProbs = new double[train.numClasses()]; double totalWeight = 0, totalSumSquared = 0; for (int i = 0; i < train.numInstances(); i++) { Instance inst = train.instance(i); if (data.classAttribute().isNominal()) { classProbs[(int) inst.classValue()] += inst.weight(); totalWeight += inst.weight(); } else { classProbs[0] += inst.classValue() * inst.weight(); totalSumSquared += inst.classValue() * inst.classValue() * inst.weight(); totalWeight += inst.weight(); } } this.m_Tree = new Tree(); double trainVariance = 0; if (data.classAttribute().isNumeric()) { trainVariance = this.m_Tree.singleVariance(classProbs[0], totalSumSquared, totalWeight) / totalWeight; classProbs[0] /= totalWeight; } // Build tree this.m_Tree.buildTree(sortedIndices, weights, train, totalWeight, classProbs, new Instances(train, 0), this.m_MinNum, this.m_MinVarianceProp * trainVariance, 0, this.m_MaxDepth); // Insert pruning data and perform reduced error pruning if (!this.m_NoPruning) { this.m_Tree.insertHoldOutSet(prune); this.m_Tree.reducedErrorPrune(); this.m_Tree.backfitHoldOutSet(); } } /** * Computes class distribution of an instance using the tree. * * @param instance * the instance to compute the distribution for * @return the computed class probabilities * @throws Exception * if computation fails */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { if (this.m_zeroR != null) { return this.m_zeroR.distributionForInstance(instance); } else { return this.m_Tree.distributionForInstance(instance); } } /** * For getting a unique ID when outputting the tree source (hashcode isn't guaranteed unique) */ private static long PRINTED_NODES = 0; /** * Gets the next unique node ID. * * @return the next unique node ID. */ protected static long nextID() { return PRINTED_NODES++; } /** * resets the counter for the nodes */ protected static void resetID() { PRINTED_NODES = 0; } /** * Returns the tree as if-then statements. * * @param className * the name for the generated class * @return the tree as a Java if-then type statement * @throws Exception * if something goes wrong */ @Override public String toSource(final String className) throws Exception { if (this.m_Tree == null) { throw new Exception("REPTree: No model built yet."); } StringBuffer[] source = this.m_Tree.toSource(className, this.m_Tree); return "class " + className + " {\n\n" + " public static double classify(Object [] i)\n" + " throws Exception {\n\n" + " double p = Double.NaN;\n" + source[0] // Assignment // code + " return p;\n" + " }\n" + source[1] // Support code + "}\n"; } /** * Returns the type of graph this classifier represents. * * @return Drawable.TREE */ @Override public int graphType() { return Drawable.TREE; } /** * Outputs the decision tree as a graph * * @return the tree as a graph * @throws Exception * if generation fails */ @Override public String graph() throws Exception { if (this.m_Tree == null) { throw new Exception("REPTree: No model built yet."); } StringBuffer resultBuff = new StringBuffer(); this.m_Tree.toGraph(resultBuff, 0, null); String result = "digraph Tree {\n" + "edge [style=bold]\n" + resultBuff.toString() + "\n}\n"; return result; } /** * Outputs the decision tree. * * @return a string representation of the classifier */ @Override public String toString() { if (this.m_zeroR != null) { return "No attributes other than class. Using ZeroR.\n\n" + this.m_zeroR.toString(); } if ((this.m_Tree == null)) { return "REPTree: No model built yet."; } return "\nREPTree\n============\n" + this.m_Tree.toString(0, null) + "\n" + "\nSize of the tree : " + this.numNodes(); } /** * Builds the classifier to generate a partition. */ @Override public void generatePartition(final Instances data) throws Exception { this.buildClassifier(data); } /** * Computes array that indicates node membership. Array locations are allocated based on * breadth-first exploration of the tree. */ @Override public double[] getMembershipValues(final Instance instance) throws Exception { if (this.m_zeroR != null) { double[] m = new double[1]; m[0] = instance.weight(); return m; } else { // Set up array for membership values double[] a = new double[this.numElements()]; // Initialize queues Queue<Double> queueOfWeights = new LinkedList<>(); Queue<Tree> queueOfNodes = new LinkedList<>(); queueOfWeights.add(instance.weight()); queueOfNodes.add(this.m_Tree); int index = 0; // While the queue is not empty while (!queueOfNodes.isEmpty()) { a[index++] = queueOfWeights.poll(); Tree node = queueOfNodes.poll(); // Is node a leaf? if (node.m_Attribute <= -1) { continue; } // Compute weight distribution double[] weights = new double[node.m_Successors.length]; if (instance.isMissing(node.m_Attribute)) { System.arraycopy(node.m_Prop, 0, weights, 0, node.m_Prop.length); } else if (node.m_Info.attribute(node.m_Attribute).isNominal()) { weights[(int) instance.value(node.m_Attribute)] = 1.0; } else { if (instance.value(node.m_Attribute) < node.m_SplitPoint) { weights[0] = 1.0; } else { weights[1] = 1.0; } } for (int i = 0; i < node.m_Successors.length; i++) { queueOfNodes.add(node.m_Successors[i]); queueOfWeights.add(a[index - 1] * weights[i]); } } return a; } } /** * Returns the number of elements in the partition. */ @Override public int numElements() throws Exception { if (this.m_zeroR != null) { return 1; } return this.numNodes(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for this class. * * @param argv * the commandline options */ public static void main(final String[] argv) { runClassifier(new REPTree(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/RandomForest.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomForest.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.meta.Bagging; import weka.core.Capabilities; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.Utils; import weka.core.WekaException; import weka.gui.ProgrammaticProperty; /** * <!-- globalinfo-start --> Class for constructing a forest of random trees.<br> * <br> * For more information see: <br> * <br> * Leo Breiman (2001). Random Forests. Machine Learning. 45(1):5-32. <br> * <br> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Breiman2001, * author = {Leo Breiman}, * journal = {Machine Learning}, * number = {1}, * pages = {5-32}, * title = {Random Forests}, * volume = {45}, * year = {2001} * } * </pre> * * <br> * <br> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p> * * <pre> * -P * Size of each bag, as a percentage of the * training set size. (default 100) * </pre> * * <pre> * -O * Calculate the out of bag error. * </pre> * * <pre> * -store-out-of-bag-predictions * Whether to store out of bag predictions in internal evaluation object. * </pre> * * <pre> * -output-out-of-bag-complexity-statistics * Whether to output complexity-based statistics when out-of-bag evaluation is performed. * </pre> * * <pre> * -print * Print the individual classifiers in the output * </pre> * * <pre> * -attribute-importance * Compute and output attribute importance (mean impurity decrease method) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (current value 100) * </pre> * * <pre> * -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism) * (use 0 to auto-detect number of cores) * </pre> * * <pre> * -K &lt;number of attributes&gt; * Number of attributes to randomly investigate. (default 0) * (&lt;1 = int(log_2(#predictors)+1)). * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 1) * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -S &lt;num&gt; * Seed for random number generator. * (default 1) * </pre> * * <pre> * -depth &lt;num&gt; * The maximum depth of the tree, 0 for unlimited. * (default 0) * </pre> * * <pre> * -N &lt;num&gt; * Number of folds for backfitting (default 0, no backfitting). * </pre> * * <pre> * -U * Allow unclassified instances. * </pre> * * <pre> * -B * Break ties randomly when several attributes look equally good. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * -num-decimal-places * The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <pre> * -batch-size * The desired batch size for batch prediction (default 100). * </pre> * * <!-- options-end --> * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision$ */ public class RandomForest extends Bagging { /** for serialization */ static final long serialVersionUID = 1116839470751428698L; /** True to compute attribute importance */ protected boolean m_computeAttributeImportance; /** * The default number of iterations to perform. */ @Override protected int defaultNumberOfIterations() { return 100; } /** * Constructor that sets base classifier for bagging to RandomTre and default * number of iterations to 100. */ public RandomForest() { RandomTree rTree = new RandomTree(); rTree.setDoNotCheckCapabilities(true); super.setClassifier(rTree); super.setRepresentCopiesUsingWeights(true); this.setNumIterations(this.defaultNumberOfIterations()); } /** * Returns default capabilities of the base classifier. * * @return the capabilities of the base classifier */ @Override public Capabilities getCapabilities() { // Cannot use the main RandomTree object because capabilities checking has // been turned off // for that object. return (new RandomTree()).getCapabilities(); } /** * String describing default classifier. * * @return the default classifier classname */ @Override protected String defaultClassifierString() { return "weka.classifiers.trees.RandomTree"; } /** * String describing default classifier options. * * @return the default classifier options */ @Override protected String[] defaultClassifierOptions() { String[] args = { "-do-not-check-capabilities" }; return args; } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ @Override public String globalInfo() { return "Class for constructing a forest of random trees.\n\n" + "For more information see: \n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Leo Breiman"); result.setValue(Field.YEAR, "2001"); result.setValue(Field.TITLE, "Random Forests"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "45"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.PAGES, "5-32"); return result; } /** * This method only accepts RandomTree arguments. * * @param newClassifier the RandomTree to use. * @exception if argument is not a RandomTree */ @Override @ProgrammaticProperty public void setClassifier(final Classifier newClassifier) { if (!(newClassifier instanceof RandomTree)) { throw new IllegalArgumentException("RandomForest: Argument of setClassifier() must be a RandomTree."); } super.setClassifier(newClassifier); } /** * This method only accepts true as its argument * * @param representUsingWeights must be set to true. * @exception if argument is not true */ @Override @ProgrammaticProperty public void setRepresentCopiesUsingWeights(final boolean representUsingWeights) { if (!representUsingWeights) { throw new IllegalArgumentException("RandomForest: Argument of setRepresentCopiesUsingWeights() must be true."); } super.setRepresentCopiesUsingWeights(representUsingWeights); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numFeaturesTipText() { return ((RandomTree) this.getClassifier()).KValueTipText(); } /** * Get the number of features used in random selection. * * @return Value of numFeatures. */ public int getNumFeatures() { return ((RandomTree) this.getClassifier()).getKValue(); } /** * Set the number of features to use in random selection. * * @param newNumFeatures Value to assign to numFeatures. */ public void setNumFeatures(final int newNumFeatures) { ((RandomTree) this.getClassifier()).setKValue(newNumFeatures); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String computeAttributeImportanceTipText() { return "Compute attribute importance via mean impurity decrease"; } /** * Set whether to compute and output attribute importance scores * * @param computeAttributeImportance true to compute attribute importance * scores */ public void setComputeAttributeImportance(final boolean computeAttributeImportance) { this.m_computeAttributeImportance = computeAttributeImportance; ((RandomTree) this.m_Classifier).setComputeImpurityDecreases(computeAttributeImportance); } /** * Get whether to compute and output attribute importance scores * * @return true if computing attribute importance scores */ public boolean getComputeAttributeImportance() { return this.m_computeAttributeImportance; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String maxDepthTipText() { return ((RandomTree) this.getClassifier()).maxDepthTipText(); } /** * Get the maximum depth of trh tree, 0 for unlimited. * * @return the maximum depth. */ public int getMaxDepth() { return ((RandomTree) this.getClassifier()).getMaxDepth(); } /** * Set the maximum depth of the tree, 0 for unlimited. * * @param value the maximum depth. */ public void setMaxDepth(final int value) { ((RandomTree) this.getClassifier()).setMaxDepth(value); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String breakTiesRandomlyTipText() { return ((RandomTree) this.getClassifier()).breakTiesRandomlyTipText(); } /** * Get whether to break ties randomly. * * @return true if ties are to be broken randomly. */ public boolean getBreakTiesRandomly() { return ((RandomTree) this.getClassifier()).getBreakTiesRandomly(); } /** * Set whether to break ties randomly. * * @param newBreakTiesRandomly true if ties are to be broken randomly */ public void setBreakTiesRandomly(final boolean newBreakTiesRandomly) { ((RandomTree) this.getClassifier()).setBreakTiesRandomly(newBreakTiesRandomly); } /** * Set debugging mode. * * @param debug true if debug output should be printed */ @Override public void setDebug(final boolean debug) { super.setDebug(debug); ((RandomTree) this.getClassifier()).setDebug(debug); } /** * Set the number of decimal places. */ @Override public void setNumDecimalPlaces(final int num) { super.setNumDecimalPlaces(num); ((RandomTree) this.getClassifier()).setNumDecimalPlaces(num); } /** * Set the preferred batch size for batch prediction. * * @param size the batch size to use */ @Override public void setBatchSize(final String size) { super.setBatchSize(size); ((RandomTree) this.getClassifier()).setBatchSize(size); } /** * Sets the seed for the random number generator. * * @param s the seed to be used */ @Override public void setSeed(final int s) { super.setSeed(s); ((RandomTree) this.getClassifier()).setSeed(s); } /** * Returns description of the bagged classifier. * * @return description of the bagged classifier as a string * @throws InterruptedException */ @Override public String toString() { if (this.m_Classifiers == null) { return "RandomForest: No model built yet."; } StringBuilder buffer = new StringBuilder("RandomForest\n\n"); buffer.append(super.toString()); if (this.getComputeAttributeImportance()) { try { double[] nodeCounts = new double[this.m_data.numAttributes()]; double[] impurityScores = this.computeAverageImpurityDecreasePerAttribute(nodeCounts); int[] sortedIndices = Utils.sort(impurityScores); buffer.append("\n\nAttribute importance based on average impurity decrease " + "(and number of nodes using that attribute)\n\n"); for (int i = sortedIndices.length - 1; i >= 0; i--) { int index = sortedIndices[i]; if (index != this.m_data.classIndex()) { buffer.append(Utils.doubleToString(impurityScores[index], 10, this.getNumDecimalPlaces())).append(" (").append(Utils.doubleToString(nodeCounts[index], 6, 0)).append(") ").append(this.m_data.attribute(index).name()) .append("\n"); } } } catch (WekaException ex) { // ignore } catch (InterruptedException e) { e.printStackTrace(); } } return buffer.toString(); } /** * Computes the average impurity decrease per attribute over the trees * * @param nodeCounts an optional array that, if non-null, will hold the count * of the number of nodes at which each attribute was used for * splitting * @return the average impurity decrease per attribute over the trees */ public double[] computeAverageImpurityDecreasePerAttribute(double[] nodeCounts) throws WekaException { if (this.m_Classifiers == null) { throw new WekaException("Classifier has not been built yet!"); } if (!this.getComputeAttributeImportance()) { throw new WekaException("Stats for attribute importance have not " + "been collected!"); } double[] impurityDecreases = new double[this.m_data.numAttributes()]; if (nodeCounts == null) { nodeCounts = new double[this.m_data.numAttributes()]; } for (Classifier c : this.m_Classifiers) { double[][] forClassifier = ((RandomTree) c).getImpurityDecreases(); for (int i = 0; i < this.m_data.numAttributes(); i++) { impurityDecreases[i] += forClassifier[i][0]; nodeCounts[i] += forClassifier[i][1]; } } for (int i = 0; i < this.m_data.numAttributes(); i++) { if (nodeCounts[i] > 0) { impurityDecreases[i] /= nodeCounts[i]; } } return impurityDecreases; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.addElement(new Option("\tSize of each bag, as a percentage of the\n" + "\ttraining set size. (default 100)", "P", 1, "-P")); newVector.addElement(new Option("\tCalculate the out of bag error.", "O", 0, "-O")); newVector.addElement(new Option("\tWhether to store out of bag predictions in internal evaluation object.", "store-out-of-bag-predictions", 0, "-store-out-of-bag-predictions")); newVector.addElement(new Option("\tWhether to output complexity-based statistics when out-of-bag evaluation is performed.", "output-out-of-bag-complexity-statistics", 0, "-output-out-of-bag-complexity-statistics")); newVector.addElement(new Option("\tPrint the individual classifiers in the output", "print", 0, "-print")); newVector.addElement(new Option("\tCompute and output attribute importance (mean impurity decrease " + "method)", "attribute-importance", 0, "-attribute-importance")); newVector.addElement(new Option("\tNumber of iterations.\n" + "\t(current value " + this.getNumIterations() + ")", "I", 1, "-I <num>")); newVector.addElement(new Option("\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)\n" + "\t(use 0 to auto-detect number of cores)", "num-slots", 1, "-num-slots <num>")); // Add base classifier options List<Option> list = Collections.list(((OptionHandler) this.getClassifier()).listOptions()); newVector.addAll(list); return newVector.elements(); } /** * Gets the current settings of the forest. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-P"); result.add("" + this.getBagSizePercent()); if (this.getCalcOutOfBag()) { result.add("-O"); } if (this.getStoreOutOfBagPredictions()) { result.add("-store-out-of-bag-predictions"); } if (this.getOutputOutOfBagComplexityStatistics()) { result.add("-output-out-of-bag-complexity-statistics"); } if (this.getPrintClassifiers()) { result.add("-print"); } if (this.getComputeAttributeImportance()) { result.add("-attribute-importance"); } result.add("-I"); result.add("" + this.getNumIterations()); result.add("-num-slots"); result.add("" + this.getNumExecutionSlots()); if (this.getDoNotCheckCapabilities()) { result.add("-do-not-check-capabilities"); } // Add base classifier options Vector<String> classifierOptions = new Vector<String>(); Collections.addAll(classifierOptions, ((OptionHandler) this.getClassifier()).getOptions()); Option.deleteFlagString(classifierOptions, "-do-not-check-capabilities"); result.addAll(classifierOptions); return result.toArray(new String[result.size()]); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p> * * <pre> * -P * Size of each bag, as a percentage of the * training set size. (default 100) * </pre> * * <pre> * -O * Calculate the out of bag error. * </pre> * * <pre> * -store-out-of-bag-predictions * Whether to store out of bag predictions in internal evaluation object. * </pre> * * <pre> * -output-out-of-bag-complexity-statistics * Whether to output complexity-based statistics when out-of-bag evaluation is performed. * </pre> * * <pre> * -print * Print the individual classifiers in the output * </pre> * * <pre> * -attribute-importance * Compute and output attribute importance (mean impurity decrease method) * </pre> * * <pre> * -I &lt;num&gt; * Number of iterations. * (current value 100) * </pre> * * <pre> * -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism) * (use 0 to auto-detect number of cores) * </pre> * * <pre> * -K &lt;number of attributes&gt; * Number of attributes to randomly investigate. (default 0) * (&lt;1 = int(log_2(#predictors)+1)). * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 1) * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -S &lt;num&gt; * Seed for random number generator. * (default 1) * </pre> * * <pre> * -depth &lt;num&gt; * The maximum depth of the tree, 0 for unlimited. * (default 0) * </pre> * * <pre> * -N &lt;num&gt; * Number of folds for backfitting (default 0, no backfitting). * </pre> * * <pre> * -U * Allow unclassified instances. * </pre> * * <pre> * -B * Break ties randomly when several attributes look equally good. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * -num-decimal-places * The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <pre> * -batch-size * The desired batch size for batch prediction (default 100). * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String bagSize = Utils.getOption('P', options); if (bagSize.length() != 0) { this.setBagSizePercent(Integer.parseInt(bagSize)); } else { this.setBagSizePercent(100); } this.setCalcOutOfBag(Utils.getFlag('O', options)); this.setStoreOutOfBagPredictions(Utils.getFlag("store-out-of-bag-predictions", options)); this.setOutputOutOfBagComplexityStatistics(Utils.getFlag("output-out-of-bag-complexity-statistics", options)); this.setPrintClassifiers(Utils.getFlag("print", options)); this.setComputeAttributeImportance(Utils.getFlag("attribute-importance", options)); String iterations = Utils.getOption('I', options); if (iterations.length() != 0) { this.setNumIterations(Integer.parseInt(iterations)); } else { this.setNumIterations(this.defaultNumberOfIterations()); } String numSlots = Utils.getOption("num-slots", options); if (numSlots.length() != 0) { this.setNumExecutionSlots(Integer.parseInt(numSlots)); } else { this.setNumExecutionSlots(1); } RandomTree classifier = ((RandomTree) AbstractClassifier.forName(this.defaultClassifierString(), options)); classifier.setComputeImpurityDecreases(this.m_computeAttributeImportance); this.setDoNotCheckCapabilities(classifier.getDoNotCheckCapabilities()); this.setSeed(classifier.getSeed()); this.setDebug(classifier.getDebug()); this.setNumDecimalPlaces(classifier.getNumDecimalPlaces()); this.setBatchSize(classifier.getBatchSize()); classifier.setDoNotCheckCapabilities(true); // Set base classifier and options this.setClassifier(classifier); Utils.checkForRemainingOptions(options); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for this class. * * @param argv the options */ public static void main(final String[] argv) { runClassifier(new RandomForest(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/RandomTree.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomTree.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees; import java.io.Serializable; import java.util.Collections; import java.util.Enumeration; import java.util.LinkedList; import java.util.Queue; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ContingencyTables; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.PartitionGenerator; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.gui.ProgrammaticProperty; /** * <!-- globalinfo-start --> Class for constructing a tree that considers K randomly chosen * attributes at each node. Performs no pruning. Also has an option to allow estimation of class * probabilities (or target mean in the regression case) based on a hold-out set (backfitting). <br> * <br> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p> * * <pre> * -K &lt;number of attributes&gt; * Number of attributes to randomly investigate. (default 0) * (&lt;1 = int(log_2(#predictors)+1)). * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 1) * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -S &lt;num&gt; * Seed for random number generator. * (default 1) * </pre> * * <pre> * -depth &lt;num&gt; * The maximum depth of the tree, 0 for unlimited. * (default 0) * </pre> * * <pre> * -N &lt;num&gt; * Number of folds for backfitting (default 0, no backfitting). * </pre> * * <pre> * -U * Allow unclassified instances. * </pre> * * <pre> * -B * Break ties randomly when several attributes look equally good. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * -num-decimal-places * The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision$ */ public class RandomTree extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, Randomizable, Drawable, PartitionGenerator { /** for serialization */ private static final long serialVersionUID = -9051119597407396024L; /** The Tree object */ protected Tree m_Tree = null; /** The header information. */ protected Instances m_Info = null; /** Minimum number of instances for leaf. */ protected double m_MinNum = 1.0; /** The number of attributes considered for a split. */ protected int m_KValue = 0; /** The random seed to use. */ protected int m_randomSeed = 1; /** The maximum depth of the tree (0 = unlimited) */ protected int m_MaxDepth = 0; /** Determines how much data is used for backfitting */ protected int m_NumFolds = 0; /** Whether unclassified instances are allowed */ protected boolean m_AllowUnclassifiedInstances = false; /** Whether to break ties randomly. */ protected boolean m_BreakTiesRandomly = false; /** a ZeroR model in case no model can be built from the data */ protected Classifier m_zeroR; /** * The minimum proportion of the total variance (over all the data) required for split. */ protected double m_MinVarianceProp = 1e-3; /** Whether to store the impurity decrease/gain sum */ protected boolean m_computeImpurityDecreases; /** * Indexed by attribute, each two element array contains impurity decrease/gain sum in first element * and count in the second */ protected double[][] m_impurityDecreasees; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for constructing a tree that considers K randomly " + " chosen attributes at each node. Performs no pruning. Also has" + " an option to allow estimation of class probabilities (or target mean " + "in the regression case) based on a hold-out set (backfitting)."; } /** * Get the array of impurity decrease/gain sums * * @return the array of impurity decrease/gain sums */ public double[][] getImpurityDecreases() { return this.m_impurityDecreasees; } /** * Set whether to compute/store impurity decreases for variable importance in RandomForest * * @param computeImpurityDecreases * true to compute and store impurity decrease values for splitting attributes */ @ProgrammaticProperty public void setComputeImpurityDecreases(final boolean computeImpurityDecreases) { this.m_computeImpurityDecreases = computeImpurityDecreases; } /** * Get whether to compute/store impurity decreases for variable importance in RandomForest * * @return true to compute and store impurity decrease values for splitting attributes */ public boolean getComputeImpurityDecreases() { return this.m_computeImpurityDecreases; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minNumTipText() { return "The minimum total weight of the instances in a leaf."; } /** * Get the value of MinNum. * * @return Value of MinNum. */ public double getMinNum() { return this.m_MinNum; } /** * Set the value of MinNum. * * @param newMinNum * Value to assign to MinNum. */ public void setMinNum(final double newMinNum) { this.m_MinNum = newMinNum; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minVariancePropTipText() { return "The minimum proportion of the variance on all the data " + "that needs to be present at a node in order for splitting to " + "be performed in regression trees."; } /** * Get the value of MinVarianceProp. * * @return Value of MinVarianceProp. */ public double getMinVarianceProp() { return this.m_MinVarianceProp; } /** * Set the value of MinVarianceProp. * * @param newMinVarianceProp * Value to assign to MinVarianceProp. */ public void setMinVarianceProp(final double newMinVarianceProp) { this.m_MinVarianceProp = newMinVarianceProp; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String KValueTipText() { return "Sets the number of randomly chosen attributes. If 0, int(log_2(#predictors) + 1) is used."; } /** * Get the value of K. * * @return Value of K. */ public int getKValue() { return this.m_KValue; } /** * Set the value of K. * * @param k * Value to assign to K. */ public void setKValue(final int k) { this.m_KValue = k; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed used for selecting attributes."; } /** * Set the seed for random number generation. * * @param seed * the seed */ @Override public void setSeed(final int seed) { this.m_randomSeed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ @Override public int getSeed() { return this.m_randomSeed; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String maxDepthTipText() { return "The maximum depth of the tree, 0 for unlimited."; } /** * Get the maximum depth of trh tree, 0 for unlimited. * * @return the maximum depth. */ public int getMaxDepth() { return this.m_MaxDepth; } /** * Set the maximum depth of the tree, 0 for unlimited. * * @param value * the maximum depth. */ public void setMaxDepth(final int value) { this.m_MaxDepth = value; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "Determines the amount of data used for backfitting. One fold is used for " + "backfitting, the rest for growing the tree. (Default: 0, no backfitting)"; } /** * Get the value of NumFolds. * * @return Value of NumFolds. */ public int getNumFolds() { return this.m_NumFolds; } /** * Set the value of NumFolds. * * @param newNumFolds * Value to assign to NumFolds. */ public void setNumFolds(final int newNumFolds) { this.m_NumFolds = newNumFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String allowUnclassifiedInstancesTipText() { return "Whether to allow unclassified instances."; } /** * Gets whether tree is allowed to abstain from making a prediction. * * @return true if tree is allowed to abstain from making a prediction. */ public boolean getAllowUnclassifiedInstances() { return this.m_AllowUnclassifiedInstances; } /** * Set the value of AllowUnclassifiedInstances. * * @param newAllowUnclassifiedInstances * true if tree is allowed to abstain from making a prediction */ public void setAllowUnclassifiedInstances(final boolean newAllowUnclassifiedInstances) { this.m_AllowUnclassifiedInstances = newAllowUnclassifiedInstances; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String breakTiesRandomlyTipText() { return "Break ties randomly when several attributes look equally good."; } /** * Get whether to break ties randomly. * * @return true if ties are to be broken randomly. */ public boolean getBreakTiesRandomly() { return this.m_BreakTiesRandomly; } /** * Set whether to break ties randomly. * * @param newBreakTiesRandomly * true if ties are to be broken randomly */ public void setBreakTiesRandomly(final boolean newBreakTiesRandomly) { this.m_BreakTiesRandomly = newBreakTiesRandomly; } /** * Lists the command-line options for this classifier. * * @return an enumeration over all possible options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(); newVector.addElement(new Option("\tNumber of attributes to randomly investigate.\t(default 0)\n" + "\t(<1 = int(log_2(#predictors)+1)).", "K", 1, "-K <number of attributes>")); newVector.addElement(new Option("\tSet minimum number of instances per leaf.\n\t(default 1)", "M", 1, "-M <minimum number of instances>")); newVector.addElement(new Option("\tSet minimum numeric class variance proportion\n" + "\tof train variance for split (default 1e-3).", "V", 1, "-V <minimum variance for split>")); newVector.addElement(new Option("\tSeed for random number generator.\n" + "\t(default 1)", "S", 1, "-S <num>")); newVector.addElement(new Option("\tThe maximum depth of the tree, 0 for unlimited.\n" + "\t(default 0)", "depth", 1, "-depth <num>")); newVector.addElement(new Option("\tNumber of folds for backfitting " + "(default 0, no backfitting).", "N", 1, "-N <num>")); newVector.addElement(new Option("\tAllow unclassified instances.", "U", 0, "-U")); newVector.addElement(new Option("\t" + this.breakTiesRandomlyTipText(), "B", 0, "-B")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Gets options from this classifier. * * @return the options for the current setup */ @Override public String[] getOptions() { Vector<String> result = new Vector<>(); result.add("-K"); result.add("" + this.getKValue()); result.add("-M"); result.add("" + this.getMinNum()); result.add("-V"); result.add("" + this.getMinVarianceProp()); result.add("-S"); result.add("" + this.getSeed()); if (this.getMaxDepth() > 0) { result.add("-depth"); result.add("" + this.getMaxDepth()); } if (this.getNumFolds() > 0) { result.add("-N"); result.add("" + this.getNumFolds()); } if (this.getAllowUnclassifiedInstances()) { result.add("-U"); } if (this.getBreakTiesRandomly()) { result.add("-B"); } Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p> * * <pre> * -K &lt;number of attributes&gt; * Number of attributes to randomly investigate. (default 0) * (&lt;1 = int(log_2(#predictors)+1)). * </pre> * * <pre> * -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 1) * </pre> * * <pre> * -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3). * </pre> * * <pre> * -S &lt;num&gt; * Seed for random number generator. * (default 1) * </pre> * * <pre> * -depth &lt;num&gt; * The maximum depth of the tree, 0 for unlimited. * (default 0) * </pre> * * <pre> * -N &lt;num&gt; * Number of folds for backfitting (default 0, no backfitting). * </pre> * * <pre> * -U * Allow unclassified instances. * </pre> * * <pre> * -B * Break ties randomly when several attributes look equally good. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <pre> * -num-decimal-places * The number of decimal places for the output of numbers in the model (default 2). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('K', options); if (tmpStr.length() != 0) { this.m_KValue = Integer.parseInt(tmpStr); } else { this.m_KValue = 0; } tmpStr = Utils.getOption('M', options); if (tmpStr.length() != 0) { this.m_MinNum = Double.parseDouble(tmpStr); } else { this.m_MinNum = 1; } String minVarString = Utils.getOption('V', options); if (minVarString.length() != 0) { this.m_MinVarianceProp = Double.parseDouble(minVarString); } else { this.m_MinVarianceProp = 1e-3; } tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) { this.setSeed(Integer.parseInt(tmpStr)); } else { this.setSeed(1); } tmpStr = Utils.getOption("depth", options); if (tmpStr.length() != 0) { this.setMaxDepth(Integer.parseInt(tmpStr)); } else { this.setMaxDepth(0); } String numFoldsString = Utils.getOption('N', options); if (numFoldsString.length() != 0) { this.m_NumFolds = Integer.parseInt(numFoldsString); } else { this.m_NumFolds = 0; } this.setAllowUnclassifiedInstances(Utils.getFlag('U', options)); this.setBreakTiesRandomly(Utils.getFlag('B', options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds classifier. * * @param data * the data to train with * @throws Exception * if something goes wrong or the data doesn't fit */ @Override public void buildClassifier(Instances data) throws Exception { if (this.m_computeImpurityDecreases) { this.m_impurityDecreasees = new double[data.numAttributes()][2]; } // Make sure K value is in range if (this.m_KValue > data.numAttributes() - 1) { this.m_KValue = data.numAttributes() - 1; } if (this.m_KValue < 1) { this.m_KValue = (int) Utils.log2(data.numAttributes() - 1) + 1; } // can classifier handle the data? this.getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); // only class? -> build ZeroR model if (data.numAttributes() == 1) { System.err.println("Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); this.m_zeroR = new weka.classifiers.rules.ZeroR(); this.m_zeroR.buildClassifier(data); return; } else { this.m_zeroR = null; } // Figure out appropriate datasets Instances train = null; Instances backfit = null; Random rand = data.getRandomNumberGenerator(this.m_randomSeed); if (this.m_NumFolds <= 0) { train = data; } else { data.randomize(rand); data.stratify(this.m_NumFolds); train = data.trainCV(this.m_NumFolds, 1, rand); backfit = data.testCV(this.m_NumFolds, 1); } // Create the attribute indices window int[] attIndicesWindow = new int[data.numAttributes() - 1]; int j = 0; for (int i = 0; i < attIndicesWindow.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (j == data.classIndex()) { j++; // do not include the class } attIndicesWindow[i] = j++; } double totalWeight = 0; double totalSumSquared = 0; // Compute initial class counts double[] classProbs = new double[train.numClasses()]; for (int i = 0; i < train.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance inst = train.instance(i); if (data.classAttribute().isNominal()) { classProbs[(int) inst.classValue()] += inst.weight(); totalWeight += inst.weight(); } else { classProbs[0] += inst.classValue() * inst.weight(); totalSumSquared += inst.classValue() * inst.classValue() * inst.weight(); totalWeight += inst.weight(); } } double trainVariance = 0; if (data.classAttribute().isNumeric()) { trainVariance = RandomTree.singleVariance(classProbs[0], totalSumSquared, totalWeight) / totalWeight; classProbs[0] /= totalWeight; } // Build tree this.m_Tree = new Tree(); this.m_Info = new Instances(data, 0); this.m_Tree.buildTree(train, classProbs, attIndicesWindow, totalWeight, rand, 0, this.m_MinVarianceProp * trainVariance); // Backfit if required if (backfit != null) { this.m_Tree.backfitData(backfit); } } /** * Computes class distribution of an instance using the tree. * * @param instance * the instance to compute the distribution for * @return the computed class probabilities * @throws Exception * if computation fails */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { if (this.m_zeroR != null) { return this.m_zeroR.distributionForInstance(instance); } else { return this.m_Tree.distributionForInstance(instance); } } /** * Outputs the decision tree. * * @return a string representation of the classifier */ @Override public String toString() { // only ZeroR model? if (this.m_zeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(this.m_zeroR.toString()); return buf.toString(); } if (this.m_Tree == null) { return "RandomTree: no model has been built yet."; } else { return "\nRandomTree\n==========\n" + this.m_Tree.toString(0) + "\n" + "\nSize of the tree : " + this.m_Tree.numNodes() + (this.getMaxDepth() > 0 ? ("\nMax depth of tree: " + this.getMaxDepth()) : ("")); } } /** * Returns graph describing the tree. * * @return the graph describing the tree * @throws Exception * if graph can't be computed */ @Override public String graph() throws Exception { if (this.m_Tree == null) { throw new Exception("RandomTree: No model built yet."); } StringBuffer resultBuff = new StringBuffer(); this.m_Tree.toGraph(resultBuff, 0, null); String result = "digraph RandomTree {\n" + "edge [style=bold]\n" + resultBuff.toString() + "\n}\n"; return result; } /** * Returns the type of graph this classifier represents. * * @return Drawable.TREE */ @Override public int graphType() { return Drawable.TREE; } /** * Builds the classifier to generate a partition. */ @Override public void generatePartition(final Instances data) throws Exception { this.buildClassifier(data); } /** * Computes array that indicates node membership. Array locations are allocated based on * breadth-first exploration of the tree. */ @Override public double[] getMembershipValues(final Instance instance) throws Exception { if (this.m_zeroR != null) { double[] m = new double[1]; m[0] = instance.weight(); return m; } else { // Set up array for membership values double[] a = new double[this.numElements()]; // Initialize queues Queue<Double> queueOfWeights = new LinkedList<>(); Queue<Tree> queueOfNodes = new LinkedList<>(); queueOfWeights.add(instance.weight()); queueOfNodes.add(this.m_Tree); int index = 0; // While the queue is not empty while (!queueOfNodes.isEmpty()) { a[index++] = queueOfWeights.poll(); Tree node = queueOfNodes.poll(); // Is node a leaf? if (node.m_Attribute <= -1) { continue; } // Compute weight distribution double[] weights = new double[node.m_Successors.length]; if (instance.isMissing(node.m_Attribute)) { System.arraycopy(node.m_Prop, 0, weights, 0, node.m_Prop.length); } else if (this.m_Info.attribute(node.m_Attribute).isNominal()) { weights[(int) instance.value(node.m_Attribute)] = 1.0; } else { if (instance.value(node.m_Attribute) < node.m_SplitPoint) { weights[0] = 1.0; } else { weights[1] = 1.0; } } for (int i = 0; i < node.m_Successors.length; i++) { queueOfNodes.add(node.m_Successors[i]); queueOfWeights.add(a[index - 1] * weights[i]); } } return a; } } /** * Returns the number of elements in the partition. */ @Override public int numElements() throws Exception { if (this.m_zeroR != null) { return 1; } return this.m_Tree.numNodes(); } /** * The inner class for dealing with the tree. */ public class Tree implements Serializable { /** For serialization */ private static final long serialVersionUID = 3549573538656522569L; /** The subtrees appended to this tree. */ protected Tree[] m_Successors; /** The attribute to split on. */ protected int m_Attribute = -1; /** The split point. */ protected double m_SplitPoint = Double.NaN; /** The proportions of training instances going down each branch. */ protected double[] m_Prop = null; /** * Class probabilities from the training data in the nominal case. Holds the mean in the numeric * case. */ protected double[] m_ClassDistribution = null; /** * Holds the sum of squared errors and the weight in the numeric case. */ protected double[] m_Distribution = null; /** * Backfits the given data into the tree. */ public void backfitData(final Instances data) throws Exception { double totalWeight = 0; double totalSumSquared = 0; // Compute initial class counts double[] classProbs = new double[data.numClasses()]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); if (data.classAttribute().isNominal()) { classProbs[(int) inst.classValue()] += inst.weight(); totalWeight += inst.weight(); } else { classProbs[0] += inst.classValue() * inst.weight(); totalSumSquared += inst.classValue() * inst.classValue() * inst.weight(); totalWeight += inst.weight(); } } double trainVariance = 0; if (data.classAttribute().isNumeric()) { trainVariance = RandomTree.singleVariance(classProbs[0], totalSumSquared, totalWeight) / totalWeight; classProbs[0] /= totalWeight; } // Fit data into tree this.backfitData(data, classProbs, totalWeight); } /** * Computes class distribution of an instance using the decision tree. * * @param instance * the instance to compute the distribution for * @return the computed class distribution * @throws Exception * if computation fails */ public double[] distributionForInstance(final Instance instance) throws Exception { double[] returnedDist = null; if (this.m_Attribute > -1) { // Node is not a leaf if (instance.isMissing(this.m_Attribute)) { // Value is missing returnedDist = new double[RandomTree.this.m_Info.numClasses()]; // Split instance up for (int i = 0; i < this.m_Successors.length; i++) { double[] help = this.m_Successors[i].distributionForInstance(instance); if (help != null) { for (int j = 0; j < help.length; j++) { returnedDist[j] += this.m_Prop[i] * help[j]; } } } } else if (RandomTree.this.m_Info.attribute(this.m_Attribute).isNominal()) { // For nominal attributes returnedDist = this.m_Successors[(int) instance.value(this.m_Attribute)].distributionForInstance(instance); } else { // For numeric attributes if (instance.value(this.m_Attribute) < this.m_SplitPoint) { returnedDist = this.m_Successors[0].distributionForInstance(instance); } else { returnedDist = this.m_Successors[1].distributionForInstance(instance); } } } // Node is a leaf or successor is empty? if ((this.m_Attribute == -1) || (returnedDist == null)) { // Is node empty? if (this.m_ClassDistribution == null) { if (RandomTree.this.getAllowUnclassifiedInstances()) { double[] result = new double[RandomTree.this.m_Info.numClasses()]; if (RandomTree.this.m_Info.classAttribute().isNumeric()) { result[0] = Utils.missingValue(); } return result; } else { return null; } } // Else return normalized distribution double[] normalizedDistribution = this.m_ClassDistribution.clone(); if (RandomTree.this.m_Info.classAttribute().isNominal()) { Utils.normalize(normalizedDistribution); } return normalizedDistribution; } else { return returnedDist; } } /** * Outputs one node for graph. * * @param text * the buffer to append the output to * @param num * unique node id * @return the next node id * @throws Exception * if generation fails */ public int toGraph(final StringBuffer text, int num) throws Exception { int maxIndex = Utils.maxIndex(this.m_ClassDistribution); String classValue = RandomTree.this.m_Info.classAttribute().isNominal() ? RandomTree.this.m_Info.classAttribute().value(maxIndex) : Utils.doubleToString(this.m_ClassDistribution[0], RandomTree.this.getNumDecimalPlaces()); num++; if (this.m_Attribute == -1) { text.append("N" + Integer.toHexString(this.hashCode()) + " [label=\"" + num + ": " + classValue + "\"" + "shape=box]\n"); } else { text.append("N" + Integer.toHexString(this.hashCode()) + " [label=\"" + num + ": " + classValue + "\"]\n"); for (int i = 0; i < this.m_Successors.length; i++) { text.append("N" + Integer.toHexString(this.hashCode()) + "->" + "N" + Integer.toHexString(this.m_Successors[i].hashCode()) + " [label=\"" + RandomTree.this.m_Info.attribute(this.m_Attribute).name()); if (RandomTree.this.m_Info.attribute(this.m_Attribute).isNumeric()) { if (i == 0) { text.append(" < " + Utils.doubleToString(this.m_SplitPoint, RandomTree.this.getNumDecimalPlaces())); } else { text.append(" >= " + Utils.doubleToString(this.m_SplitPoint, RandomTree.this.getNumDecimalPlaces())); } } else { text.append(" = " + RandomTree.this.m_Info.attribute(this.m_Attribute).value(i)); } text.append("\"]\n"); num = this.m_Successors[i].toGraph(text, num); } } return num; } /** * Outputs a leaf. * * @return the leaf as string * @throws Exception * if generation fails */ protected String leafString() throws Exception { double sum = 0, maxCount = 0; int maxIndex = 0; double classMean = 0; double avgError = 0; if (this.m_ClassDistribution != null) { if (RandomTree.this.m_Info.classAttribute().isNominal()) { sum = Utils.sum(this.m_ClassDistribution); maxIndex = Utils.maxIndex(this.m_ClassDistribution); maxCount = this.m_ClassDistribution[maxIndex]; } else { classMean = this.m_ClassDistribution[0]; if (this.m_Distribution[1] > 0) { avgError = this.m_Distribution[0] / this.m_Distribution[1]; } } } if (RandomTree.this.m_Info.classAttribute().isNumeric()) { return " : " + Utils.doubleToString(classMean, RandomTree.this.getNumDecimalPlaces()) + " (" + Utils.doubleToString(this.m_Distribution[1], RandomTree.this.getNumDecimalPlaces()) + "/" + Utils.doubleToString(avgError, RandomTree.this.getNumDecimalPlaces()) + ")"; } return " : " + RandomTree.this.m_Info.classAttribute().value(maxIndex) + " (" + Utils.doubleToString(sum, RandomTree.this.getNumDecimalPlaces()) + "/" + Utils.doubleToString(sum - maxCount, RandomTree.this.getNumDecimalPlaces()) + ")"; } /** * Recursively outputs the tree. * * @param level * the current level of the tree * @return the generated subtree */ protected String toString(final int level) { try { StringBuffer text = new StringBuffer(); if (this.m_Attribute == -1) { // Output leaf info return this.leafString(); } else if (RandomTree.this.m_Info.attribute(this.m_Attribute).isNominal()) { // For nominal attributes for (int i = 0; i < this.m_Successors.length; i++) { text.append("\n"); for (int j = 0; j < level; j++) { text.append("| "); } text.append(RandomTree.this.m_Info.attribute(this.m_Attribute).name() + " = " + RandomTree.this.m_Info.attribute(this.m_Attribute).value(i)); text.append(this.m_Successors[i].toString(level + 1)); } } else { // For numeric attributes text.append("\n"); for (int j = 0; j < level; j++) { text.append("| "); } text.append(RandomTree.this.m_Info.attribute(this.m_Attribute).name() + " < " + Utils.doubleToString(this.m_SplitPoint, RandomTree.this.getNumDecimalPlaces())); text.append(this.m_Successors[0].toString(level + 1)); text.append("\n"); for (int j = 0; j < level; j++) { text.append("| "); } text.append(RandomTree.this.m_Info.attribute(this.m_Attribute).name() + " >= " + Utils.doubleToString(this.m_SplitPoint, RandomTree.this.getNumDecimalPlaces())); text.append(this.m_Successors[1].toString(level + 1)); } return text.toString(); } catch (Exception e) { e.printStackTrace(); return "RandomTree: tree can't be printed"; } } /** * Recursively backfits data into the tree. * * @param data * the data to work with * @param classProbs * the class distribution * @throws Exception * if generation fails */ protected void backfitData(final Instances data, final double[] classProbs, final double totalWeight) throws Exception { // Make leaf if there are no training instances if (data.numInstances() == 0) { this.m_Attribute = -1; this.m_ClassDistribution = null; if (data.classAttribute().isNumeric()) { this.m_Distribution = new double[2]; } this.m_Prop = null; return; } double priorVar = 0; if (data.classAttribute().isNumeric()) { // Compute prior variance double totalSum = 0, totalSumSquared = 0, totalSumOfWeights = 0; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); totalSum += inst.classValue() * inst.weight(); totalSumSquared += inst.classValue() * inst.classValue() * inst.weight(); totalSumOfWeights += inst.weight(); } priorVar = RandomTree.singleVariance(totalSum, totalSumSquared, totalSumOfWeights); } // Check if node doesn't contain enough instances or is pure // or maximum depth reached this.m_ClassDistribution = classProbs.clone(); /* * if (Utils.sum(m_ClassDistribution) < 2 * m_MinNum || * Utils.eq(m_ClassDistribution[Utils.maxIndex(m_ClassDistribution)], Utils * .sum(m_ClassDistribution))) { * * // Make leaf m_Attribute = -1; m_Prop = null; return; } */ // Are we at an inner node if (this.m_Attribute > -1) { // Compute new weights for subsets based on backfit data this.m_Prop = new double[this.m_Successors.length]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); if (!inst.isMissing(this.m_Attribute)) { if (data.attribute(this.m_Attribute).isNominal()) { this.m_Prop[(int) inst.value(this.m_Attribute)] += inst.weight(); } else { this.m_Prop[(inst.value(this.m_Attribute) < this.m_SplitPoint) ? 0 : 1] += inst.weight(); } } } // If we only have missing values we can make this node into a leaf if (Utils.sum(this.m_Prop) <= 0) { this.m_Attribute = -1; this.m_Prop = null; if (data.classAttribute().isNumeric()) { this.m_Distribution = new double[2]; this.m_Distribution[0] = priorVar; this.m_Distribution[1] = totalWeight; } return; } // Otherwise normalize the proportions Utils.normalize(this.m_Prop); // Split data Instances[] subsets = this.splitData(data); // Go through subsets for (int i = 0; i < subsets.length; i++) { // Compute distribution for current subset double[] dist = new double[data.numClasses()]; double sumOfWeights = 0; for (int j = 0; j < subsets[i].numInstances(); j++) { if (data.classAttribute().isNominal()) { dist[(int) subsets[i].instance(j).classValue()] += subsets[i].instance(j).weight(); } else { dist[0] += subsets[i].instance(j).classValue() * subsets[i].instance(j).weight(); sumOfWeights += subsets[i].instance(j).weight(); } } if (sumOfWeights > 0) { dist[0] /= sumOfWeights; } // Backfit subset this.m_Successors[i].backfitData(subsets[i], dist, totalWeight); } // If unclassified instances are allowed, we don't need to store the // class distribution if (RandomTree.this.getAllowUnclassifiedInstances()) { this.m_ClassDistribution = null; return; } for (int i = 0; i < subsets.length; i++) { if (this.m_Successors[i].m_ClassDistribution == null) { return; } } this.m_ClassDistribution = null; // If we have a least two non-empty successors, we should keep this tree /* * int nonEmptySuccessors = 0; for (int i = 0; i < subsets.length; i++) { if * (m_Successors[i].m_ClassDistribution != null) { nonEmptySuccessors++; if (nonEmptySuccessors > 1) * { return; } } } * * // Otherwise, this node is a leaf or should become a leaf m_Successors = null; m_Attribute = -1; * m_Prop = null; return; */ } } /** * Recursively generates a tree. * * @param data * the data to work with * @param classProbs * the class distribution * @param attIndicesWindow * the attribute window to choose attributes from * @param random * random number generator for choosing random attributes * @param depth * the current depth * @throws Exception * if generation fails */ protected void buildTree(final Instances data, final double[] classProbs, final int[] attIndicesWindow, double totalWeight, final Random random, final int depth, final double minVariance) throws Exception { // Make leaf if there are no training instances if (data.numInstances() == 0) { this.m_Attribute = -1; this.m_ClassDistribution = null; this.m_Prop = null; if (data.classAttribute().isNumeric()) { this.m_Distribution = new double[2]; } return; } double priorVar = 0; if (data.classAttribute().isNumeric()) { // Compute prior variance double totalSum = 0, totalSumSquared = 0, totalSumOfWeights = 0; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); totalSum += inst.classValue() * inst.weight(); totalSumSquared += inst.classValue() * inst.classValue() * inst.weight(); totalSumOfWeights += inst.weight(); } priorVar = RandomTree.singleVariance(totalSum, totalSumSquared, totalSumOfWeights); } // Check if node doesn't contain enough instances or is pure // or maximum depth reached if (data.classAttribute().isNominal()) { totalWeight = Utils.sum(classProbs); } // System.err.println("Total weight " + totalWeight); // double sum = Utils.sum(classProbs); if (totalWeight < 2 * RandomTree.this.m_MinNum || // Nominal case (data.classAttribute().isNominal() && Utils.eq(classProbs[Utils.maxIndex(classProbs)], Utils.sum(classProbs))) || // Numeric case (data.classAttribute().isNumeric() && priorVar / totalWeight < minVariance) || // check tree depth ((RandomTree.this.getMaxDepth() > 0) && (depth >= RandomTree.this.getMaxDepth()))) { // Make leaf this.m_Attribute = -1; this.m_ClassDistribution = classProbs.clone(); if (data.classAttribute().isNumeric()) { this.m_Distribution = new double[2]; this.m_Distribution[0] = priorVar; this.m_Distribution[1] = totalWeight; } this.m_Prop = null; return; } // Compute class distributions and value of splitting // criterion for each attribute double val = -Double.MAX_VALUE; double split = -Double.MAX_VALUE; double[][] bestDists = null; double[] bestProps = null; int bestIndex = 0; // Handles to get arrays out of distribution method double[][] props = new double[1][0]; double[][][] dists = new double[1][0][0]; double[][] totalSubsetWeights = new double[data.numAttributes()][0]; // Investigate K random attributes int attIndex = 0; int windowSize = attIndicesWindow.length; int k = RandomTree.this.m_KValue; boolean gainFound = false; double[] tempNumericVals = new double[data.numAttributes()]; while ((windowSize > 0) && (k-- > 0 || !gainFound)) { int chosenIndex = random.nextInt(windowSize); attIndex = attIndicesWindow[chosenIndex]; // shift chosen attIndex out of window attIndicesWindow[chosenIndex] = attIndicesWindow[windowSize - 1]; attIndicesWindow[windowSize - 1] = attIndex; windowSize--; double currSplit = data.classAttribute().isNominal() ? this.distribution(props, dists, attIndex, data) : this.numericDistribution(props, dists, attIndex, totalSubsetWeights, data, tempNumericVals); double currVal = data.classAttribute().isNominal() ? this.gain(dists[0], this.priorVal(dists[0])) : tempNumericVals[attIndex]; if (Utils.gr(currVal, 0)) { gainFound = true; } if ((currVal > val) || ((!RandomTree.this.getBreakTiesRandomly()) && (currVal == val) && (attIndex < bestIndex))) { val = currVal; bestIndex = attIndex; split = currSplit; bestProps = props[0]; bestDists = dists[0]; } } // Find best attribute this.m_Attribute = bestIndex; // Any useful split found? if (Utils.gr(val, 0)) { if (RandomTree.this.m_computeImpurityDecreases) { RandomTree.this.m_impurityDecreasees[this.m_Attribute][0] += val; RandomTree.this.m_impurityDecreasees[this.m_Attribute][1]++; } // Build subtrees this.m_SplitPoint = split; this.m_Prop = bestProps; Instances[] subsets = this.splitData(data); this.m_Successors = new Tree[bestDists.length]; double[] attTotalSubsetWeights = totalSubsetWeights[bestIndex]; for (int i = 0; i < bestDists.length; i++) { this.m_Successors[i] = new Tree(); this.m_Successors[i].buildTree(subsets[i], bestDists[i], attIndicesWindow, data.classAttribute().isNominal() ? 0 : attTotalSubsetWeights[i], random, depth + 1, minVariance); } // If all successors are non-empty, we don't need to store the class // distribution boolean emptySuccessor = false; for (int i = 0; i < subsets.length; i++) { if (this.m_Successors[i].m_ClassDistribution == null) { emptySuccessor = true; break; } } if (emptySuccessor) { this.m_ClassDistribution = classProbs.clone(); } } else { // Make leaf this.m_Attribute = -1; this.m_ClassDistribution = classProbs.clone(); if (data.classAttribute().isNumeric()) { this.m_Distribution = new double[2]; this.m_Distribution[0] = priorVar; this.m_Distribution[1] = totalWeight; } } } /** * Computes size of the tree. * * @return the number of nodes */ public int numNodes() { if (this.m_Attribute == -1) { return 1; } else { int size = 1; for (Tree m_Successor : this.m_Successors) { size += m_Successor.numNodes(); } return size; } } /** * Splits instances into subsets based on the given split. * * @param data * the data to work with * @return the subsets of instances * @throws Exception * if something goes wrong */ protected Instances[] splitData(final Instances data) throws Exception { // Allocate array of Instances objects Instances[] subsets = new Instances[this.m_Prop.length]; for (int i = 0; i < this.m_Prop.length; i++) { subsets[i] = new Instances(data, data.numInstances()); } // Go through the data for (int i = 0; i < data.numInstances(); i++) { // Get instance Instance inst = data.instance(i); // Does the instance have a missing value? if (inst.isMissing(this.m_Attribute)) { // Split instance up for (int k = 0; k < this.m_Prop.length; k++) { if (this.m_Prop[k] > 0) { Instance copy = (Instance) inst.copy(); copy.setWeight(this.m_Prop[k] * inst.weight()); subsets[k].add(copy); } } // Proceed to next instance continue; } // Do we have a nominal attribute? if (data.attribute(this.m_Attribute).isNominal()) { subsets[(int) inst.value(this.m_Attribute)].add(inst); // Proceed to next instance continue; } // Do we have a numeric attribute? if (data.attribute(this.m_Attribute).isNumeric()) { subsets[(inst.value(this.m_Attribute) < this.m_SplitPoint) ? 0 : 1].add(inst); // Proceed to next instance continue; } // Else throw an exception throw new IllegalArgumentException("Unknown attribute type"); } // Save memory for (int i = 0; i < this.m_Prop.length; i++) { subsets[i].compactify(); } // Return the subsets return subsets; } /** * Computes numeric class distribution for an attribute * * @param props * @param dists * @param att * @param subsetWeights * @param data * @param vals * @return * @throws Exception * if a problem occurs */ protected double numericDistribution(final double[][] props, final double[][][] dists, final int att, final double[][] subsetWeights, final Instances data, final double[] vals) throws Exception { double splitPoint = Double.NaN; Attribute attribute = data.attribute(att); double[][] dist = null; double[] sums = null; double[] sumSquared = null; double[] sumOfWeights = null; double totalSum = 0, totalSumSquared = 0, totalSumOfWeights = 0; int indexOfFirstMissingValue = data.numInstances(); if (attribute.isNominal()) { sums = new double[attribute.numValues()]; sumSquared = new double[attribute.numValues()]; sumOfWeights = new double[attribute.numValues()]; int attVal; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); if (inst.isMissing(att)) { // Skip missing values at this stage if (indexOfFirstMissingValue == data.numInstances()) { indexOfFirstMissingValue = i; } continue; } attVal = (int) inst.value(att); sums[attVal] += inst.classValue() * inst.weight(); sumSquared[attVal] += inst.classValue() * inst.classValue() * inst.weight(); sumOfWeights[attVal] += inst.weight(); } totalSum = Utils.sum(sums); totalSumSquared = Utils.sum(sumSquared); totalSumOfWeights = Utils.sum(sumOfWeights); } else { // For numeric attributes sums = new double[2]; sumSquared = new double[2]; sumOfWeights = new double[2]; double[] currSums = new double[2]; double[] currSumSquared = new double[2]; double[] currSumOfWeights = new double[2]; // Sort data data.sort(att); // Move all instances into second subset for (int j = 0; j < data.numInstances(); j++) { Instance inst = data.instance(j); if (inst.isMissing(att)) { // Can stop as soon as we hit a missing value indexOfFirstMissingValue = j; break; } currSums[1] += inst.classValue() * inst.weight(); currSumSquared[1] += inst.classValue() * inst.classValue() * inst.weight(); currSumOfWeights[1] += inst.weight(); } totalSum = currSums[1]; totalSumSquared = currSumSquared[1]; totalSumOfWeights = currSumOfWeights[1]; sums[1] = currSums[1]; sumSquared[1] = currSumSquared[1]; sumOfWeights[1] = currSumOfWeights[1]; // Try all possible split points double currSplit = data.instance(0).value(att); double currVal, bestVal = Double.MAX_VALUE; for (int i = 0; i < indexOfFirstMissingValue; i++) { Instance inst = data.instance(i); if (inst.value(att) > currSplit) { currVal = RandomTree.variance(currSums, currSumSquared, currSumOfWeights); if (currVal < bestVal) { bestVal = currVal; splitPoint = (inst.value(att) + currSplit) / 2.0; // Check for numeric precision problems if (splitPoint <= currSplit) { splitPoint = inst.value(att); } for (int j = 0; j < 2; j++) { sums[j] = currSums[j]; sumSquared[j] = currSumSquared[j]; sumOfWeights[j] = currSumOfWeights[j]; } } } currSplit = inst.value(att); double classVal = inst.classValue() * inst.weight(); double classValSquared = inst.classValue() * classVal; currSums[0] += classVal; currSumSquared[0] += classValSquared; currSumOfWeights[0] += inst.weight(); currSums[1] -= classVal; currSumSquared[1] -= classValSquared; currSumOfWeights[1] -= inst.weight(); } } // Compute weights props[0] = new double[sums.length]; for (int k = 0; k < props[0].length; k++) { props[0][k] = sumOfWeights[k]; } if (!(Utils.sum(props[0]) > 0)) { for (int k = 0; k < props[0].length; k++) { props[0][k] = 1.0 / props[0].length; } } else { Utils.normalize(props[0]); } // Distribute weights for instances with missing values for (int i = indexOfFirstMissingValue; i < data.numInstances(); i++) { Instance inst = data.instance(i); for (int j = 0; j < sums.length; j++) { sums[j] += props[0][j] * inst.classValue() * inst.weight(); sumSquared[j] += props[0][j] * inst.classValue() * inst.classValue() * inst.weight(); sumOfWeights[j] += props[0][j] * inst.weight(); } totalSum += inst.classValue() * inst.weight(); totalSumSquared += inst.classValue() * inst.classValue() * inst.weight(); totalSumOfWeights += inst.weight(); } // Compute final distribution dist = new double[sums.length][data.numClasses()]; for (int j = 0; j < sums.length; j++) { if (sumOfWeights[j] > 0) { dist[j][0] = sums[j] / sumOfWeights[j]; } else { dist[j][0] = totalSum / totalSumOfWeights; } } // Compute variance gain double priorVar = singleVariance(totalSum, totalSumSquared, totalSumOfWeights); double var = variance(sums, sumSquared, sumOfWeights); double gain = priorVar - var; // Return distribution and split point subsetWeights[att] = sumOfWeights; dists[0] = dist; vals[att] = gain; return splitPoint; } /** * Computes class distribution for an attribute. * * @param props * @param dists * @param att * the attribute index * @param data * the data to work with * @throws Exception * if something goes wrong */ protected double distribution(final double[][] props, final double[][][] dists, final int att, final Instances data) throws Exception { double splitPoint = Double.NaN; Attribute attribute = data.attribute(att); double[][] dist = null; int indexOfFirstMissingValue = data.numInstances(); if (attribute.isNominal()) { // For nominal attributes dist = new double[attribute.numValues()][data.numClasses()]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); if (inst.isMissing(att)) { // Skip missing values at this stage if (indexOfFirstMissingValue == data.numInstances()) { indexOfFirstMissingValue = i; } continue; } dist[(int) inst.value(att)][(int) inst.classValue()] += inst.weight(); } } else { // For numeric attributes double[][] currDist = new double[2][data.numClasses()]; dist = new double[2][data.numClasses()]; // Sort data data.sort(att); // Move all instances into second subset for (int j = 0; j < data.numInstances(); j++) { Instance inst = data.instance(j); if (inst.isMissing(att)) { // Can stop as soon as we hit a missing value indexOfFirstMissingValue = j; break; } currDist[1][(int) inst.classValue()] += inst.weight(); } // Value before splitting double priorVal = this.priorVal(currDist); // Save initial distribution for (int j = 0; j < currDist.length; j++) { System.arraycopy(currDist[j], 0, dist[j], 0, dist[j].length); } // Try all possible split points double currSplit = data.instance(0).value(att); double currVal, bestVal = -Double.MAX_VALUE; for (int i = 0; i < indexOfFirstMissingValue; i++) { Instance inst = data.instance(i); double attVal = inst.value(att); // Can we place a sensible split point here? if (attVal > currSplit) { // Compute gain for split point currVal = this.gain(currDist, priorVal); // Is the current split point the best point so far? if (currVal > bestVal) { // Store value of current point bestVal = currVal; // Save split point splitPoint = (attVal + currSplit) / 2.0; // Check for numeric precision problems if (splitPoint <= currSplit) { splitPoint = attVal; } // Save distribution for (int j = 0; j < currDist.length; j++) { System.arraycopy(currDist[j], 0, dist[j], 0, dist[j].length); } } // Update value currSplit = attVal; } // Shift over the weight int classVal = (int) inst.classValue(); currDist[0][classVal] += inst.weight(); currDist[1][classVal] -= inst.weight(); } } // Compute weights for subsets props[0] = new double[dist.length]; for (int k = 0; k < props[0].length; k++) { props[0][k] = Utils.sum(dist[k]); } if (Utils.eq(Utils.sum(props[0]), 0)) { for (int k = 0; k < props[0].length; k++) { props[0][k] = 1.0 / props[0].length; } } else { Utils.normalize(props[0]); } // Distribute weights for instances with missing values for (int i = indexOfFirstMissingValue; i < data.numInstances(); i++) { Instance inst = data.instance(i); if (attribute.isNominal()) { // Need to check if attribute value is missing if (inst.isMissing(att)) { for (int j = 0; j < dist.length; j++) { dist[j][(int) inst.classValue()] += props[0][j] * inst.weight(); } } } else { // Can be sure that value is missing, so no test required for (int j = 0; j < dist.length; j++) { dist[j][(int) inst.classValue()] += props[0][j] * inst.weight(); } } } // Return distribution and split point dists[0] = dist; return splitPoint; } /** * Computes value of splitting criterion before split. * * @param dist * the distributions * @return the splitting criterion * @throws InterruptedException */ protected double priorVal(final double[][] dist) throws InterruptedException { return ContingencyTables.entropyOverColumns(dist); } /** * Computes value of splitting criterion after split. * * @param dist * the distributions * @param priorVal * the splitting criterion * @return the gain after the split */ protected double gain(final double[][] dist, final double priorVal) { return priorVal - ContingencyTables.entropyConditionedOnRows(dist); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Outputs one node for graph. * * @param text * the buffer to append the output to * @param num * the current node id * @param parent * the parent of the nodes * @return the next node id * @throws Exception * if something goes wrong */ protected int toGraph(final StringBuffer text, int num, final Tree parent) throws Exception { num++; if (this.m_Attribute == -1) { text.append("N" + Integer.toHexString(Tree.this.hashCode()) + " [label=\"" + num + Utils.backQuoteChars(this.leafString()) + "\"" + " shape=box]\n"); } else { text.append("N" + Integer.toHexString(Tree.this.hashCode()) + " [label=\"" + num + ": " + Utils.backQuoteChars(RandomTree.this.m_Info.attribute(this.m_Attribute).name()) + "\"]\n"); for (int i = 0; i < this.m_Successors.length; i++) { text.append("N" + Integer.toHexString(Tree.this.hashCode()) + "->" + "N" + Integer.toHexString(this.m_Successors[i].hashCode()) + " [label=\""); if (RandomTree.this.m_Info.attribute(this.m_Attribute).isNumeric()) { if (i == 0) { text.append(" < " + Utils.doubleToString(this.m_SplitPoint, RandomTree.this.getNumDecimalPlaces())); } else { text.append(" >= " + Utils.doubleToString(this.m_SplitPoint, RandomTree.this.getNumDecimalPlaces())); } } else { text.append(" = " + Utils.backQuoteChars(RandomTree.this.m_Info.attribute(this.m_Attribute).value(i))); } text.append("\"]\n"); num = this.m_Successors[i].toGraph(text, num, this); } } return num; } /** * Get the successor subtrees of this tree. * * @return the subtrees */ public Tree[] getM_Successors() { return this.m_Successors; } /** * Get the attribute this tree splits by. * * @return the attribute index */ public int getM_Attribute() { return this.m_Attribute; } /** * Get the split point for the attribute (relevant only if it is numeric). If the attribute value is strictly smaller * than the split point, the relevant successor is the first successor, otherwise it is the second successor. * * @return the split point */ public double getM_SplitPoint() { return this.m_SplitPoint; } /** * Gets the class distribution for the last Instances object * * @return the array that captures the likelihood of each class */ public double[] getM_Classdistribution() { return this.m_ClassDistribution; } } /** * Computes variance for subsets. * * @param s * @param sS * @param sumOfWeights * @return the variance */ protected static double variance(final double[] s, final double[] sS, final double[] sumOfWeights) { double var = 0; for (int i = 0; i < s.length; i++) { if (sumOfWeights[i] > 0) { var += singleVariance(s[i], sS[i], sumOfWeights[i]); } } return var; } /** * Computes the variance for a single set * * @param s * @param sS * @param weight * the weight * @return the variance */ protected static double singleVariance(final double s, final double sS, final double weight) { return sS - ((s * s) / weight); } /** * Main method for this class. * * @param argv * the commandline parameters */ public static void main(final String[] argv) { runClassifier(new RandomTree(), argv); } public Tree getM_Tree() { return this.m_Tree; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/ActiveHNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ActiveHNode.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import weka.core.Attribute; import weka.core.Instance; /** * Node that is "active" (i.e. growth can occur) in a Hoeffding tree * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class ActiveHNode extends LeafNode implements LearningNode, Serializable { /** * For serialization */ private static final long serialVersionUID = 3284585939739561683L; /** The weight of instances seen at the last split evaluation */ public double m_weightSeenAtLastSplitEval = 0; /** Statistics for nominal or numeric attributes conditioned on the class */ protected Map<String, ConditionalSufficientStats> m_nodeStats = new HashMap<String, ConditionalSufficientStats>(); @Override public void updateNode(Instance inst) throws Exception { super.updateDistribution(inst); for (int i = 0; i < inst.numAttributes(); i++) { Attribute a = inst.attribute(i); if (i != inst.classIndex()) { ConditionalSufficientStats stats = m_nodeStats.get(a.name()); if (stats == null) { if (a.isNumeric()) { stats = new GaussianConditionalSufficientStats(); } else { stats = new NominalConditionalSufficientStats(); } m_nodeStats.put(a.name(), stats); } stats .update(inst.value(a), inst.classAttribute().value((int) inst.classValue()), inst.weight()); } } } /** * Returns a list of split candidates * * @param splitMetric the splitting metric to use * @return a list of split candidates */ public List<SplitCandidate> getPossibleSplits(SplitMetric splitMetric) { List<SplitCandidate> splits = new ArrayList<SplitCandidate>(); // null split List<Map<String, WeightMass>> nullDist = new ArrayList<Map<String, WeightMass>>(); nullDist.add(m_classDistribution); SplitCandidate nullSplit = new SplitCandidate(null, nullDist, splitMetric.evaluateSplit(m_classDistribution, nullDist)); splits.add(nullSplit); for (Map.Entry<String, ConditionalSufficientStats> e : m_nodeStats .entrySet()) { ConditionalSufficientStats stat = e.getValue(); SplitCandidate splitCandidate = stat.bestSplit(splitMetric, m_classDistribution, e.getKey()); if (splitCandidate != null) { splits.add(splitCandidate); } } return splits; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/ConditionalSufficientStats.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ConditionalSufficientStats.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import java.util.HashMap; import java.util.Map; /** * Records sufficient stats for an attribute * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public abstract class ConditionalSufficientStats implements Serializable { /** * For serialization */ private static final long serialVersionUID = 8724787722646808376L; /** Lookup by class value */ protected Map<String, Object> m_classLookup = new HashMap<String, Object>(); /** * Update this stat with the supplied attribute value and class value * * @param attVal the value of the attribute * @param classVal the class value * @param weight the weight of this observation */ public abstract void update(double attVal, String classVal, double weight); /** * Return the probability of an attribute value conditioned on a class value * * @param attVal the attribute value to compute the conditional probability * for * @param classVal the class value * @return the probability */ public abstract double probabilityOfAttValConditionedOnClass(double attVal, String classVal); /** * Return the best split * * @param splitMetric the split metric to use * @param preSplitDist the distribution of class values prior to splitting * @param attName the name of the attribute being considered for splitting * @return the best split for the attribute */ public abstract SplitCandidate bestSplit(SplitMetric splitMetric, Map<String, WeightMass> preSplitDist, String attName); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/GaussianConditionalSufficientStats.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GaussianConditionalSufficientStats.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeSet; import weka.core.Utils; import weka.estimators.UnivariateNormalEstimator; /** * Maintains sufficient stats for a Gaussian distribution for a numeric * attribute * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class GaussianConditionalSufficientStats extends ConditionalSufficientStats implements Serializable { /** * For serialization */ private static final long serialVersionUID = -1527915607201784762L; /** * Inner class that implements a Gaussian estimator * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) */ protected class GaussianEstimator extends UnivariateNormalEstimator implements Serializable { /** * For serialization */ private static final long serialVersionUID = 4756032800685001315L; public double getSumOfWeights() { return m_SumOfWeights; } public double probabilityDensity(double value) { updateMeanAndVariance(); if (m_SumOfWeights > 0) { double stdDev = Math.sqrt(m_Variance); if (stdDev > 0) { double diff = value - m_Mean; return (1.0 / (CONST * stdDev)) * Math.exp(-(diff * diff / (2.0 * m_Variance))); } return value == m_Mean ? 1.0 : 0.0; } return 0.0; } public double[] weightLessThanEqualAndGreaterThan(double value) { double stdDev = Math.sqrt(m_Variance); double equalW = probabilityDensity(value) * m_SumOfWeights; double lessW = (stdDev > 0) ? weka.core.Statistics .normalProbability((value - m_Mean) / stdDev) * m_SumOfWeights - equalW : (value < m_Mean) ? m_SumOfWeights - equalW : 0.0; double greaterW = m_SumOfWeights - equalW - lessW; return new double[] { lessW, equalW, greaterW }; } } protected Map<String, Double> m_minValObservedPerClass = new HashMap<String, Double>(); protected Map<String, Double> m_maxValObservedPerClass = new HashMap<String, Double>(); protected int m_numBins = 10; public void setNumBins(int b) { m_numBins = b; } public int getNumBins() { return m_numBins; } @Override public void update(double attVal, String classVal, double weight) { if (!Utils.isMissingValue(attVal)) { GaussianEstimator norm = (GaussianEstimator) m_classLookup.get(classVal); if (norm == null) { norm = new GaussianEstimator(); m_classLookup.put(classVal, norm); m_minValObservedPerClass.put(classVal, attVal); m_maxValObservedPerClass.put(classVal, attVal); } else { if (attVal < m_minValObservedPerClass.get(classVal)) { m_minValObservedPerClass.put(classVal, attVal); } if (attVal > m_maxValObservedPerClass.get(classVal)) { m_maxValObservedPerClass.put(classVal, attVal); } } norm.addValue(attVal, weight); } } @Override public double probabilityOfAttValConditionedOnClass(double attVal, String classVal) { GaussianEstimator norm = (GaussianEstimator) m_classLookup.get(classVal); if (norm == null) { return 0; } // return Utils.lo return norm.probabilityDensity(attVal); } protected TreeSet<Double> getSplitPointCandidates() { TreeSet<Double> splits = new TreeSet<Double>(); double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; for (String classVal : m_classLookup.keySet()) { if (m_minValObservedPerClass.containsKey(classVal)) { if (m_minValObservedPerClass.get(classVal) < min) { min = m_minValObservedPerClass.get(classVal); } if (m_maxValObservedPerClass.get(classVal) > max) { max = m_maxValObservedPerClass.get(classVal); } } } if (min < Double.POSITIVE_INFINITY) { double bin = max - min; bin /= (m_numBins + 1); for (int i = 0; i < m_numBins; i++) { double split = min + (bin * (i + 1)); if (split > min && split < max) { splits.add(split); } } } return splits; } protected List<Map<String, WeightMass>> classDistsAfterSplit(double splitVal) { Map<String, WeightMass> lhsDist = new HashMap<String, WeightMass>(); Map<String, WeightMass> rhsDist = new HashMap<String, WeightMass>(); for (Map.Entry<String, Object> e : m_classLookup.entrySet()) { String classVal = e.getKey(); GaussianEstimator attEst = (GaussianEstimator) e.getValue(); if (attEst != null) { if (splitVal < m_minValObservedPerClass.get(classVal)) { WeightMass mass = rhsDist.get(classVal); if (mass == null) { mass = new WeightMass(); rhsDist.put(classVal, mass); } mass.m_weight += attEst.getSumOfWeights(); } else if (splitVal > m_maxValObservedPerClass.get(classVal)) { WeightMass mass = lhsDist.get(classVal); if (mass == null) { mass = new WeightMass(); lhsDist.put(classVal, mass); } mass.m_weight += attEst.getSumOfWeights(); } else { double[] weights = attEst.weightLessThanEqualAndGreaterThan(splitVal); WeightMass mass = lhsDist.get(classVal); if (mass == null) { mass = new WeightMass(); lhsDist.put(classVal, mass); } mass.m_weight += weights[0] + weights[1]; // <= mass = rhsDist.get(classVal); if (mass == null) { mass = new WeightMass(); rhsDist.put(classVal, mass); } mass.m_weight += weights[2]; // > } } } List<Map<String, WeightMass>> dists = new ArrayList<Map<String, WeightMass>>(); dists.add(lhsDist); dists.add(rhsDist); return dists; } @Override public SplitCandidate bestSplit(SplitMetric splitMetric, Map<String, WeightMass> preSplitDist, String attName) { SplitCandidate best = null; TreeSet<Double> candidates = getSplitPointCandidates(); for (Double s : candidates) { List<Map<String, WeightMass>> postSplitDists = classDistsAfterSplit(s); double splitMerit = splitMetric.evaluateSplit(preSplitDist, postSplitDists); if (best == null || splitMerit > best.m_splitMerit) { Split split = new UnivariateNumericBinarySplit(attName, s); best = new SplitCandidate(split, postSplitDists, splitMerit); } } return best; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/GiniSplitMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GiniSplitMetric.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import java.util.List; import java.util.Map; /** * Implements the gini splitting criterion * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class GiniSplitMetric extends SplitMetric implements Serializable { /** * For serialization */ private static final long serialVersionUID = -2037586582742660298L; @Override public double evaluateSplit(Map<String, WeightMass> preDist, List<Map<String, WeightMass>> postDist) { double totalWeight = 0.0; double[] distWeights = new double[postDist.size()]; for (int i = 0; i < postDist.size(); i++) { distWeights[i] = SplitMetric.sum(postDist.get(i)); totalWeight += distWeights[i]; } double gini = 0; for (int i = 0; i < postDist.size(); i++) { gini += (distWeights[i] / totalWeight) * gini(postDist.get(i), distWeights[i]); } return 1.0 - gini; } /** * Return the gini metric computed from the supplied distribution * * @param dist the distribution to compute the gini metric from * @param sumOfWeights the sum of the distribution weights * @return the gini metric */ protected static double gini(Map<String, WeightMass> dist, double sumOfWeights) { double gini = 1.0; for (Map.Entry<String, WeightMass> e : dist.entrySet()) { double frac = e.getValue().m_weight / sumOfWeights; gini -= frac * frac; } return gini; } /** * Return the gini metric computed from the supplied distribution * * @param dist dist the distribution to compute the gini metric from * @return */ public static double gini(Map<String, WeightMass> dist) { return gini(dist, SplitMetric.sum(dist)); } @Override public double getMetricRange(Map<String, WeightMass> preDist) { return 1.0; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/HNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HNode.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import java.util.LinkedHashMap; import java.util.Map; import weka.core.Attribute; import weka.core.Instance; import weka.core.Utils; /** * Abstract base class for nodes in a Hoeffding tree * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @revision $Revision$ */ public abstract class HNode implements Serializable { /** * For serialization */ private static final long serialVersionUID = 197233928177240264L; /** Class distribution at this node */ public Map<String, WeightMass> m_classDistribution = new LinkedHashMap<>(); /** Holds the leaf number (if this is a leaf) */ protected int m_leafNum; /** Holds the node number (for graphing purposes) */ protected int m_nodeNum; /** * Construct a new HNode */ public HNode() { } /** * Construct a new HNode with the supplied class distribution * * @param classDistrib */ public HNode(final Map<String, WeightMass> classDistrib) { this.m_classDistribution = classDistrib; } /** * Returns true if this is a leaf * * @return */ public boolean isLeaf() { return true; } /** * The size of the class distribution * * @return the number of entries in the class distribution */ public int numEntriesInClassDistribution() { return this.m_classDistribution.size(); } /** * Returns true if the class distribution is pure * * @return true if the class distribution is pure */ public boolean classDistributionIsPure() { int count = 0; for (Map.Entry<String, WeightMass> el : this.m_classDistribution.entrySet()) { if (el.getValue().m_weight > 0) { count++; if (count > 1) { break; } } } return (count < 2); } /** * Update the class frequency distribution with the supplied instance * * @param inst * the instance to update with */ public void updateDistribution(final Instance inst) { if (inst.classIsMissing()) { return; } String classVal = inst.stringValue(inst.classAttribute()); WeightMass m = this.m_classDistribution.get(classVal); if (m == null) { m = new WeightMass(); m.m_weight = 1.0; this.m_classDistribution.put(classVal, m); } m.m_weight += inst.weight(); } /** * Return a class probability distribution computed from the frequency counts at this node * * @param inst * the instance to get a prediction for * @param classAtt * the class attribute * @return a class probability distribution * @throws Exception * if a problem occurs */ public double[] getDistribution(final Instance inst, final Attribute classAtt) throws Exception { double[] dist = new double[classAtt.numValues()]; for (int i = 0; i < classAtt.numValues(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } WeightMass w = this.m_classDistribution.get(classAtt.value(i)); if (w != null) { dist[i] = w.m_weight; } else { dist[i] = 1.0; } } Utils.normalize(dist); return dist; } public int installNodeNums(int nodeNum) { nodeNum++; this.m_nodeNum = nodeNum; return nodeNum; } protected int dumpTree(final int depth, int leafCount, final StringBuffer buff) { double max = -1; String classVal = ""; for (Map.Entry<String, WeightMass> e : this.m_classDistribution.entrySet()) { if (e.getValue().m_weight > max) { max = e.getValue().m_weight; classVal = e.getKey(); } } buff.append(classVal + " (" + String.format("%-9.3f", max).trim() + ")"); leafCount++; this.m_leafNum = leafCount; return leafCount; } protected void printLeafModels(final StringBuffer buff) { } public void graphTree(final StringBuffer text) { double max = -1; String classVal = ""; for (Map.Entry<String, WeightMass> e : this.m_classDistribution.entrySet()) { if (e.getValue().m_weight > max) { max = e.getValue().m_weight; classVal = e.getKey(); } } text.append("N" + this.m_nodeNum + " [label=\"" + classVal + " (" + String.format("%-9.3f", max).trim() + ")\" shape=box style=filled]\n"); } /** * Print a textual description of the tree * * @param printLeaf * true if leaf models (NB, NB adaptive) should be output * @return a textual description of the tree */ public String toString(final boolean printLeaf) { this.installNodeNums(0); StringBuffer buff = new StringBuffer(); this.dumpTree(0, 0, buff); if (printLeaf) { buff.append("\n\n"); this.printLeafModels(buff); } return buff.toString(); } /** * Return the total weight of instances seen at this node * * @return the total weight of instances seen at this node */ public double totalWeight() { double tw = 0; for (Map.Entry<String, WeightMass> e : this.m_classDistribution.entrySet()) { tw += e.getValue().m_weight; } return tw; } /** * Return the leaf that the supplied instance ends up at * * @param inst * the instance to find the leaf for * @param parent * the parent node * @param parentBranch * the parent branch * @return the leaf that the supplied instance ends up at */ public LeafNode leafForInstance(final Instance inst, final SplitNode parent, final String parentBranch) { return new LeafNode(this, parent, parentBranch); } /** * Update the node with the supplied instance * * @param inst * the instance to update with * @throws Exception * if a problem occurs */ public abstract void updateNode(Instance inst) throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/InactiveHNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InactiveHNode.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import java.util.Map; import weka.core.Instance; /** * Class implementing an inactive node (i.e. one that does not allow growth) * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class InactiveHNode extends LeafNode implements LearningNode, Serializable { /** * For serialization */ private static final long serialVersionUID = -8747567733141700911L; /** * Constructor * * @param classDistrib the class distribution at this node */ public InactiveHNode(Map<String, WeightMass> classDistrib) { m_classDistribution = classDistrib; } @Override public void updateNode(Instance inst) { super.updateDistribution(inst); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/InfoGainSplitMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InfoGainSplitMetric.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import java.util.List; import java.util.Map; import weka.core.ContingencyTables; import weka.core.Utils; /** * Implements the info gain splitting criterion * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class InfoGainSplitMetric extends SplitMetric implements Serializable { /** * For serialization */ private static final long serialVersionUID = 2173840581308675428L; protected double m_minFracWeightForTwoBranches; public InfoGainSplitMetric(double minFracWeightForTwoBranches) { m_minFracWeightForTwoBranches = minFracWeightForTwoBranches; } @Override public double evaluateSplit(Map<String, WeightMass> preDist, List<Map<String, WeightMass>> postDist) { double[] pre = new double[preDist.size()]; int count = 0; for (Map.Entry<String, WeightMass> e : preDist.entrySet()) { pre[count++] = e.getValue().m_weight; } double preEntropy = ContingencyTables.entropy(pre); double[] distWeights = new double[postDist.size()]; double totalWeight = 0.0; for (int i = 0; i < postDist.size(); i++) { distWeights[i] = SplitMetric.sum(postDist.get(i)); totalWeight += distWeights[i]; } int fracCount = 0; for (double d : distWeights) { if (d / totalWeight > m_minFracWeightForTwoBranches) { fracCount++; } } if (fracCount < 2) { return Double.NEGATIVE_INFINITY; } double postEntropy = 0; for (int i = 0; i < postDist.size(); i++) { Map<String, WeightMass> d = postDist.get(i); double[] post = new double[d.size()]; count = 0; for (Map.Entry<String, WeightMass> e : d.entrySet()) { post[count++] = e.getValue().m_weight; } postEntropy += distWeights[i] * ContingencyTables.entropy(post); } if (totalWeight > 0) { postEntropy /= totalWeight; } return preEntropy - postEntropy; } @Override public double getMetricRange(Map<String, WeightMass> preDist) { int numClasses = preDist.size(); if (numClasses < 2) { numClasses = 2; } return Utils.log2(numClasses); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/LeafNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LeafNode.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; import java.io.Serializable; import weka.core.Instance; /** * Leaf node in a HoeffdingTree * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class LeafNode extends HNode implements Serializable { /** * For serialization */ private static final long serialVersionUID = -3359429731894384404L; /** The actual node for this leaf */ public HNode m_theNode; /** Parent split node */ public SplitNode m_parentNode; /** Parent branch leading to this node */ public String m_parentBranch; /** * Construct an empty leaf node */ public LeafNode() { } /** * Construct a leaf node with the given actual node, parent and parent branch * * @param node the actual node at this leaf * @param parentNode the parent split node * @param parentBranch the branch leading to this node */ public LeafNode(HNode node, SplitNode parentNode, String parentBranch) { m_theNode = node; m_parentNode = parentNode; m_parentBranch = parentBranch; } @Override public void updateNode(Instance inst) throws Exception { if (m_theNode != null) { m_theNode.updateDistribution(inst); } else { super.updateDistribution(inst); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/LearningNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LearningNode.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.ht; /** * Marker interface for a node that can be updated with incoming instances in a * HoeffdingTree. Implementations include ActiveHNode, NBNode and NBNodeAdaptive * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public interface LearningNode { }