index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/UnsupervisedSubsetEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * UnsupervisedSubsetEvaluator.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.clusterers.Clusterer; /** * Abstract unsupervised attribute subset evaluator. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class UnsupervisedSubsetEvaluator extends ASEvaluation implements SubsetEvaluator { /** for serialization */ static final long serialVersionUID = 627934376267488763L; /** * Return the number of clusters used by the subset evaluator * * @return the number of clusters used * @exception Exception if an error occurs */ public abstract int getNumClusters() throws Exception; /** * Get the clusterer * * @return the clusterer */ public abstract Clusterer getClusterer(); /** * Set the clusterer to use * * @param d the clusterer to use */ public abstract void setClusterer(Clusterer d); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/WrapperSubsetEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * WrapperSubsetEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.BitSet; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.evaluation.AbstractEvaluationMetric; import weka.classifiers.evaluation.InformationRetrievalEvaluationMetric; import weka.classifiers.rules.ZeroR; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** <!-- globalinfo-start --> * WrapperSubsetEval:<br> * <br> * Evaluates attribute sets by using a learning scheme. Cross validation is used * to estimate the accuracy of the learning scheme for a set of attributes.<br> * <br> * For more information see:<br> * <br> * <p>Ron Kohavi, George H. John (1997). Wrappers for feature subset selection. * Artificial Intelligence. 97(1-2):273-324. * </p> <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Kohavi1997, * author = {Ron Kohavi and George H. John}, * journal = {Artificial Intelligence}, * note = {Special issue on relevance}, * number = {1-2}, * pages = {273-324}, * title = {Wrappers for feature subset selection}, * volume = {97}, * year = {1997}, * ISSN = {0004-3702} * } * </pre> * <p> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: * </p> * * <pre> * -B &lt;base learner&gt; * class name of base learner to use for accuracy estimation. * Place any classifier options LAST on the command line * following a "--". eg.: * -B weka.classifiers.bayes.NaiveBayes ... -- -K * (default: weka.classifiers.rules.ZeroR) * </pre> * * <pre> * -F &lt;num&gt; * number of cross validation folds to use for estimating accuracy. * (default=5) * </pre> * * <pre> * -R &lt;seed&gt; * Seed for cross validation accuracy testimation. * (default = 1) * </pre> * * <pre> * -T &lt;num&gt; * threshold by which to execute another cross validation * (standard deviation---expressed as a percentage of the mean). * (default: 0.01 (1%)) * </pre> * * <pre> * -E &lt;acc | rmse | mae | f-meas | auc | auprc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class) * </pre> * * <pre> * -IRclass &lt;label | index&gt; * Optional class value (label or 1-based index) to use in conjunction with * IR statistics (f-meas, auc or auprc). Omitting this option will use * the class-weighted average. * </pre> * * <pre> * Options specific to scheme weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision$ */ public class WrapperSubsetEval extends ASEvaluation implements SubsetEvaluator, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -4573057658746728675L; /** training instances */ private Instances m_trainInstances; /** class index */ private int m_classIndex; /** number of attributes in the training data */ private int m_numAttribs; /** holds an evaluation object */ private Evaluation m_Evaluation; /** holds the base classifier object */ private Classifier m_BaseClassifier; /** number of folds to use for cross validation */ private int m_folds; /** random number seed */ private int m_seed; /** * the threshold by which to do further cross validations when estimating the * accuracy of a subset */ private double m_threshold; public static final int EVAL_DEFAULT = 1; public static final int EVAL_ACCURACY = 2; public static final int EVAL_RMSE = 3; public static final int EVAL_MAE = 4; public static final int EVAL_FMEASURE = 5; public static final int EVAL_AUC = 6; public static final int EVAL_AUPRC = 7; public static final int EVAL_CORRELATION = 8; public static final int EVAL_PLUGIN = 9; /** * Small subclass of Tag to store info about a plugin metric */ protected static class PluginTag extends Tag { private static final long serialVersionUID = -6978438858413428382L; /** The metric object itself */ protected AbstractEvaluationMetric m_metric; /** The particular statistic from the metric that this tag pertains to */ protected String m_statisticName; /** * Constructor * * @param metric the metric object * @param statisticName the particular statistic that this tag pertains to */ public PluginTag(final int ID, final AbstractEvaluationMetric metric, final String statisticName) { super(ID, statisticName, statisticName); this.m_metric = metric; this.m_statisticName = statisticName; } /** * Get the name of the metric represented by this tag * * @return the name of the metric */ public String getMetricName() { return this.m_metric.getMetricName(); } /** * Get the name of the statistic that this tag pertains to * * @return the name of the statistic */ public String getStatisticName() { return this.m_statisticName; } /** * Get the actual metric object * * @return the metric object */ public AbstractEvaluationMetric getMetric() { return this.m_metric; } } /** Holds all tags for metrics */ public static final Tag[] TAGS_EVALUATION; /** * If >= 0, and an IR metric is being used, then evaluate with respect to this * class value (0-based index) */ protected int m_IRClassVal = -1; /** User supplied option for IR class value (either name or 1-based index) */ protected String m_IRClassValS = ""; protected static List<AbstractEvaluationMetric> PLUGIN_METRICS = AbstractEvaluationMetric.getPluginMetrics(); static { int totalPluginCount = 0; if (PLUGIN_METRICS != null) { for (AbstractEvaluationMetric m : PLUGIN_METRICS) { totalPluginCount += m.getStatisticNames().size(); } } TAGS_EVALUATION = new Tag[8 + totalPluginCount]; TAGS_EVALUATION[0] = new Tag(EVAL_DEFAULT, "default", "Default: accuracy (discrete class); RMSE (numeric class)"); TAGS_EVALUATION[1] = new Tag(EVAL_ACCURACY, "acc", "Accuracy (discrete class only)"); TAGS_EVALUATION[2] = new Tag(EVAL_RMSE, "rmse", "RMSE (of the class probabilities for discrete class)"); TAGS_EVALUATION[3] = new Tag(EVAL_MAE, "mae", "MAE (of the class probabilities for discrete class)"); TAGS_EVALUATION[4] = new Tag(EVAL_FMEASURE, "f-meas", "F-measure (discrete class only)"); TAGS_EVALUATION[5] = new Tag(EVAL_AUC, "auc", "AUC (area under the ROC curve - discrete class only)"); TAGS_EVALUATION[6] = new Tag(EVAL_AUPRC, "auprc", "AUPRC (area under the precision-recall curve - discrete class only)"); TAGS_EVALUATION[7] = new Tag(EVAL_CORRELATION, "corr-coeff", "Correlation coefficient - numeric class only"); if (PLUGIN_METRICS != null) { int index = 8; for (AbstractEvaluationMetric m : PLUGIN_METRICS) { for (String stat : m.getStatisticNames()) { TAGS_EVALUATION[index++] = new PluginTag(index + 1, m, stat); } } } } /** The evaluation measure to use */ protected Tag m_evaluationMeasure = TAGS_EVALUATION[0]; /** * Returns a string describing this attribute evaluator * * @return a description of the evaluator suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "WrapperSubsetEval:\n\n" + "Evaluates attribute sets by using a learning scheme. Cross " + "validation is used to estimate the accuracy of the learning " + "scheme for a set of attributes.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Ron Kohavi and George H. John"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.TITLE, "Wrappers for feature subset selection"); result.setValue(Field.JOURNAL, "Artificial Intelligence"); result.setValue(Field.VOLUME, "97"); result.setValue(Field.NUMBER, "1-2"); result.setValue(Field.PAGES, "273-324"); result.setValue(Field.NOTE, "Special issue on relevance"); result.setValue(Field.ISSN, "0004-3702"); return result; } /** * Constructor. Calls restOptions to set default options **/ public WrapperSubsetEval() { this.resetOptions(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. **/ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tclass name of base learner to use for \taccuracy estimation.\n" + "\tPlace any classifier options LAST on the command line\n" + "\tfollowing a \"--\". eg.:\n" + "\t\t-B weka.classifiers.bayes.NaiveBayes ... -- -K\n" + "\t(default: weka.classifiers.rules.ZeroR)", "B", 1, "-B <base learner>")); newVector.addElement(new Option("\tnumber of cross validation folds to use for estimating accuracy.\n" + "\t(default=5)", "F", 1, "-F <num>")); newVector.addElement(new Option("\tSeed for cross validation accuracy testimation.\n" + "\t(default = 1)", "R", 1, "-R <seed>")); newVector.addElement(new Option("\tthreshold by which to execute another cross validation\n" + "\t(standard deviation---expressed as a percentage of the mean).\n" + "\t(default: 0.01 (1%))", "T", 1, "-T <num>")); newVector.addElement( new Option("\tPerformance evaluation measure to use for selecting attributes.\n" + "\t(Default = default: accuracy for discrete class and rmse for " + "numeric class)", "E", 1, "-E " + Tag.toOptionList(TAGS_EVALUATION))); newVector.addElement(new Option("\tOptional class value (label or 1-based index) to use in conjunction with\n" + "\tIR statistics (f-meas, auc or auprc). Omitting this option will use\n" + "\tthe class-weighted average.", "IRclass", 1, "-IRclass <label | index>")); if ((this.m_BaseClassifier != null) && (this.m_BaseClassifier instanceof OptionHandler)) { newVector.addElement(new Option("", "", 0, "\nOptions specific to scheme " + this.m_BaseClassifier.getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler) this.m_BaseClassifier).listOptions())); } return newVector.elements(); } /** * <p>Parses a given list of options. * </p> * <!-- options-start --> * Valid options are: * <br> * * <pre> * -B &lt;base learner&gt; * class name of base learner to use for accuracy estimation. * Place any classifier options LAST on the command line * following a "--". eg.: * -B weka.classifiers.bayes.NaiveBayes ... -- -K * (default: weka.classifiers.rules.ZeroR) * </pre> * * <pre> * -F &lt;num&gt; * number of cross validation folds to use for estimating accuracy. * (default=5) * </pre> * * <pre> * -R &lt;seed&gt; * Seed for cross validation accuracy testimation. * (default = 1) * </pre> * * <pre> * -T &lt;num&gt; * threshold by which to execute another cross validation * (standard deviation---expressed as a percentage of the mean). * (default: 0.01 (1%)) * </pre> * * <pre> * -E &lt;acc | rmse | mae | f-meas | auc | auprc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class) * </pre> * * <pre> * -IRclass &lt;label | index&gt; * Optional class value (label or 1-based index) to use in conjunction with * IR statistics (f-meas, auc or auprc). Omitting this option will use * the class-weighted average. * </pre> * * <pre> * Options specific to scheme weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String optionString; this.resetOptions(); optionString = Utils.getOption('B', options); if (optionString.length() == 0) { optionString = ZeroR.class.getName(); } this.setClassifier(AbstractClassifier.forName(optionString, Utils.partitionOptions(options))); optionString = Utils.getOption('F', options); if (optionString.length() != 0) { this.setFolds(Integer.parseInt(optionString)); } optionString = Utils.getOption('R', options); if (optionString.length() != 0) { this.setSeed(Integer.parseInt(optionString)); } // optionString = Utils.getOption('S',options); // if (optionString.length() != 0) // { // seed = Integer.parseInt(optionString); // } optionString = Utils.getOption('T', options); if (optionString.length() != 0) { Double temp; temp = Double.valueOf(optionString); this.setThreshold(temp.doubleValue()); } optionString = Utils.getOption('E', options); if (optionString.length() != 0) { for (Tag t : TAGS_EVALUATION) { if (t.getIDStr().equalsIgnoreCase(optionString)) { this.setEvaluationMeasure(new SelectedTag(t.getIDStr(), TAGS_EVALUATION)); break; } } } optionString = Utils.getOption("IRClass", options); if (optionString.length() > 0) { this.setIRClassValue(optionString); } } /** * Set the class value (label or index) to use with IR metric evaluation of * subsets. Leaving this unset will result in the class weighted average for * the IR metric being used. * * @param val the class label or 1-based index of the class label to use when * evaluating subsets with an IR metric */ public void setIRClassValue(final String val) { this.m_IRClassValS = val; } /** * Get the class value (label or index) to use with IR metric evaluation of * subsets. Leaving this unset will result in the class weighted average for * the IR metric being used. * * @return the class label or 1-based index of the class label to use when * evaluating subsets with an IR metric */ public String getIRClassValue() { return this.m_IRClassValS; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String IRClassValueTipText() { return "The class label, or 1-based index of the class label, to use " + "when evaluating subsets with an IR metric (such as f-measure " + "or AUC. Leaving this unset will result in the class frequency " + "weighted average of the metric being used."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String evaluationMeasureTipText() { return "The measure used to evaluate the performance of attribute combinations."; } /** * Gets the currently set performance evaluation measure used for selecting * attributes for the decision table * * @return the performance evaluation measure */ public SelectedTag getEvaluationMeasure() { return new SelectedTag(this.m_evaluationMeasure.getIDStr(), TAGS_EVALUATION); } /** * Sets the performance evaluation measure to use for selecting attributes for * the decision table * * @param newMethod the new performance evaluation metric to use */ public void setEvaluationMeasure(final SelectedTag newMethod) { if (newMethod.getTags() == TAGS_EVALUATION) { this.m_evaluationMeasure = newMethod.getSelectedTag(); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String thresholdTipText() { return "Repeat xval if stdev of mean exceeds this value."; } /** * Set the value of the threshold for repeating cross validation * * @param t the value of the threshold */ public void setThreshold(final double t) { this.m_threshold = t; } /** * Get the value of the threshold * * @return the threshold as a double */ public double getThreshold() { return this.m_threshold; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String foldsTipText() { return "Number of xval folds to use when estimating subset accuracy."; } /** * Set the number of folds to use for accuracy estimation * * @param f the number of folds */ public void setFolds(final int f) { this.m_folds = f; } /** * Get the number of folds used for accuracy estimation * * @return the number of folds */ public int getFolds() { return this.m_folds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String seedTipText() { return "Seed to use for randomly generating xval splits."; } /** * Set the seed to use for cross validation * * @param s the seed */ public void setSeed(final int s) { this.m_seed = s; } /** * Get the random number seed used for cross validation * * @return the seed */ public int getSeed() { return this.m_seed; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String classifierTipText() { return "Classifier to use for estimating the accuracy of subsets"; } /** * Set the classifier to use for accuracy estimation * * @param newClassifier the Classifier to use. */ public void setClassifier(final Classifier newClassifier) { this.m_BaseClassifier = newClassifier; } /** * Get the classifier used as the base learner. * * @return the classifier used as the classifier */ public Classifier getClassifier() { return this.m_BaseClassifier; } /** * Gets the current settings of WrapperSubsetEval. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { String[] classifierOptions = new String[0]; if ((this.m_BaseClassifier != null) && (this.m_BaseClassifier instanceof OptionHandler)) { classifierOptions = ((OptionHandler) this.m_BaseClassifier).getOptions(); } String[] options = new String[13 + classifierOptions.length]; int current = 0; if (this.getClassifier() != null) { options[current++] = "-B"; options[current++] = this.getClassifier().getClass().getName(); } options[current++] = "-F"; options[current++] = "" + this.getFolds(); options[current++] = "-T"; options[current++] = "" + this.getThreshold(); options[current++] = "-R"; options[current++] = "" + this.getSeed(); options[current++] = "-E"; options[current++] = this.m_evaluationMeasure.getIDStr(); if (this.m_IRClassValS != null && this.m_IRClassValS.length() > 0) { options[current++] = "-IRClass"; options[current++] = this.m_IRClassValS; } options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); current += classifierOptions.length; while (current < options.length) { options[current++] = ""; } return options; } protected void resetOptions() { this.m_trainInstances = null; this.m_Evaluation = null; this.m_BaseClassifier = new ZeroR(); this.m_folds = 5; this.m_seed = 1; this.m_threshold = 0.01; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result; if (this.getClassifier() == null) { result = super.getCapabilities(); result.disableAll(); } else { result = this.getClassifier().getCapabilities(); } // set dependencies for (Capability cap : Capability.values()) { result.enableDependency(cap); } // adjustment for class based on selected evaluation metric result.disable(Capability.NUMERIC_CLASS); result.disable(Capability.DATE_CLASS); boolean pluginMetricNominalClass = false; if (this.m_evaluationMeasure.getID() >= EVAL_PLUGIN) { String metricName = ((PluginTag) this.m_evaluationMeasure).getMetricName(); for (AbstractEvaluationMetric m : PLUGIN_METRICS) { if (m.getMetricName().equals(metricName)) { pluginMetricNominalClass = m.appliesToNominalClass(); break; } } } if (this.m_evaluationMeasure.getID() != EVAL_ACCURACY && this.m_evaluationMeasure.getID() != EVAL_FMEASURE && this.m_evaluationMeasure.getID() != EVAL_AUC && this.m_evaluationMeasure.getID() != EVAL_AUPRC && !pluginMetricNominalClass) { result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); } result.setMinimumNumberInstances(this.getFolds()); return result; } /** * Generates a attribute evaluator. Has to initialize all fields of the * evaluator that are not being set via options. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been generated successfully */ @Override public void buildEvaluator(final Instances data) throws Exception { // can evaluator handle data? this.getCapabilities().testWithFail(data); this.m_trainInstances = data; this.m_classIndex = this.m_trainInstances.classIndex(); this.m_numAttribs = this.m_trainInstances.numAttributes(); if (this.m_IRClassValS != null && this.m_IRClassValS.length() > 0) { // XXX thread interrupted; throw exception if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA"); } // try to parse as a number first try { this.m_IRClassVal = Integer.parseInt(this.m_IRClassValS); // make zero-based this.m_IRClassVal--; } catch (NumberFormatException e) { // now try as a named class label this.m_IRClassVal = this.m_trainInstances.classAttribute().indexOfValue(this.m_IRClassValS); } } } /** * Evaluates a subset of attributes * * @param subset a bitset representing the attribute subset to be evaluated * @return the error rate * @throws Exception if the subset could not be evaluated */ @Override public double evaluateSubset(final BitSet subset) throws Exception { double evalMetric = 0; double[] repError = new double[5]; int numAttributes = 0; int i, j; Random Rnd = new Random(this.m_seed); Remove delTransform = new Remove(); delTransform.setInvertSelection(true); // copy the instances Instances trainCopy = new Instances(this.m_trainInstances); // count attributes set in the BitSet for (i = 0; i < this.m_numAttribs; i++) { if (subset.get(i)) { numAttributes++; } } // set up an array of attribute indexes for the filter (+1 for the class) int[] featArray = new int[numAttributes + 1]; for (i = 0, j = 0; i < this.m_numAttribs; i++) { if (subset.get(i)) { featArray[j++] = i; } } featArray[j] = this.m_classIndex; delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); trainCopy = Filter.useFilter(trainCopy, delTransform); AbstractEvaluationMetric pluginMetric = null; String statName = null; String metricName = null; // max of 5 repetitions of cross validation for (i = 0; i < 5; i++) { // XXX thread interrupted; throw exception if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA"); } this.m_Evaluation = new Evaluation(trainCopy); this.m_Evaluation.crossValidateModel(this.m_BaseClassifier, trainCopy, this.m_folds, Rnd); switch (this.m_evaluationMeasure.getID()) { case EVAL_DEFAULT: repError[i] = this.m_Evaluation.errorRate(); break; case EVAL_ACCURACY: repError[i] = this.m_Evaluation.errorRate(); break; case EVAL_RMSE: repError[i] = this.m_Evaluation.rootMeanSquaredError(); break; case EVAL_MAE: repError[i] = this.m_Evaluation.meanAbsoluteError(); break; case EVAL_FMEASURE: if (this.m_IRClassVal < 0) { repError[i] = this.m_Evaluation.weightedFMeasure(); } else { repError[i] = this.m_Evaluation.fMeasure(this.m_IRClassVal); } break; case EVAL_AUC: if (this.m_IRClassVal < 0) { repError[i] = this.m_Evaluation.weightedAreaUnderROC(); } else { repError[i] = this.m_Evaluation.areaUnderROC(this.m_IRClassVal); } break; case EVAL_AUPRC: if (this.m_IRClassVal < 0) { repError[i] = this.m_Evaluation.weightedAreaUnderPRC(); } else { repError[i] = this.m_Evaluation.areaUnderPRC(this.m_IRClassVal); } break; case EVAL_CORRELATION: repError[i] = this.m_Evaluation.correlationCoefficient(); break; default: if (this.m_evaluationMeasure.getID() >= EVAL_PLUGIN) { metricName = ((PluginTag) this.m_evaluationMeasure).getMetricName(); statName = ((PluginTag) this.m_evaluationMeasure).getStatisticName(); statName = ((PluginTag) this.m_evaluationMeasure).getStatisticName(); pluginMetric = this.m_Evaluation.getPluginMetric(metricName); if (pluginMetric == null) { throw new Exception("Metric " + metricName + " does not seem to be " + "available"); } } if (pluginMetric instanceof InformationRetrievalEvaluationMetric) { if (this.m_IRClassVal < 0) { repError[i] = ((InformationRetrievalEvaluationMetric) pluginMetric).getClassWeightedAverageStatistic(statName); } else { repError[i] = ((InformationRetrievalEvaluationMetric) pluginMetric).getStatistic(statName, this.m_IRClassVal); } } else { repError[i] = pluginMetric.getStatistic(statName); } break; } // check on the standard deviation if (!this.repeat(repError, i + 1)) { i++; break; } } for (j = 0; j < i; j++) { evalMetric += repError[j]; } evalMetric /= i; this.m_Evaluation = null; switch (this.m_evaluationMeasure.getID()) { case EVAL_DEFAULT: case EVAL_ACCURACY: case EVAL_RMSE: case EVAL_MAE: if (this.m_trainInstances.classAttribute().isNominal() && (this.m_evaluationMeasure.getID() == EVAL_DEFAULT || this.m_evaluationMeasure.getID() == EVAL_ACCURACY)) { evalMetric = 1 - evalMetric; } else { evalMetric = -evalMetric; // maximize } break; default: if (pluginMetric != null && !pluginMetric.statisticIsMaximisable(statName)) { evalMetric = -evalMetric; // maximize } } return evalMetric; } /** * Returns a string describing the wrapper * * @return the description as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); if (this.m_trainInstances == null) { text.append("\tWrapper subset evaluator has not been built yet\n"); } else { text.append("\tWrapper Subset Evaluator\n"); text.append("\tLearning scheme: " + this.getClassifier().getClass().getName() + "\n"); text.append("\tScheme options: "); String[] classifierOptions = new String[0]; if (this.m_BaseClassifier instanceof OptionHandler) { classifierOptions = ((OptionHandler) this.m_BaseClassifier).getOptions(); for (String classifierOption : classifierOptions) { text.append(classifierOption + " "); } } text.append("\n"); String IRClassL = ""; if (this.m_IRClassVal >= 0) { IRClassL = "(class value: " + this.m_trainInstances.classAttribute().value(this.m_IRClassVal) + ")"; } switch (this.m_evaluationMeasure.getID()) { case EVAL_DEFAULT: case EVAL_ACCURACY: if (this.m_trainInstances.attribute(this.m_classIndex).isNumeric()) { text.append("\tSubset evaluation: RMSE\n"); } else { text.append("\tSubset evaluation: classification accuracy\n"); } break; case EVAL_RMSE: if (this.m_trainInstances.attribute(this.m_classIndex).isNumeric()) { text.append("\tSubset evaluation: RMSE\n"); } else { text.append("\tSubset evaluation: RMSE (probability estimates)\n"); } break; case EVAL_MAE: if (this.m_trainInstances.attribute(this.m_classIndex).isNumeric()) { text.append("\tSubset evaluation: MAE\n"); } else { text.append("\tSubset evaluation: MAE (probability estimates)\n"); } break; case EVAL_FMEASURE: text.append("\tSubset evaluation: F-measure " + (this.m_IRClassVal >= 0 ? IRClassL : "") + "\n"); break; case EVAL_AUC: text.append("\tSubset evaluation: area under the ROC curve " + (this.m_IRClassVal >= 0 ? IRClassL : "") + "\n"); break; case EVAL_AUPRC: text.append("\tSubset evaluation: area under the precision-recall curve " + (this.m_IRClassVal >= 0 ? IRClassL : "") + "\n"); break; case EVAL_CORRELATION: text.append("\tSubset evaluation: correlation coefficient\n"); break; default: text.append("\tSubset evaluation: " + this.m_evaluationMeasure.getReadable()); if (((PluginTag) this.m_evaluationMeasure).getMetric() instanceof InformationRetrievalEvaluationMetric) { text.append(" " + (this.m_IRClassVal > 0 ? IRClassL : "")); } text.append("\n"); break; } text.append("\tNumber of folds for accuracy estimation: " + this.m_folds + "\n"); } return text.toString(); } /** * decides whether to do another repeat of cross validation. If the standard * deviation of the cross validations is greater than threshold% of the mean * (default 1%) then another repeat is done. * * @param repError an array of cross validation results * @param entries the number of cross validations done so far * @return true if another cv is to be done */ private boolean repeat(final double[] repError, final int entries) { int i; double mean = 0; double variance = 0; // setting a threshold less than zero allows for "manual" exploration // and prevents multiple xval for each subset if (this.m_threshold < 0) { return false; } if (entries == 1) { return true; } for (i = 0; i < entries; i++) { mean += repError[i]; } mean /= entries; for (i = 0; i < entries; i++) { variance += ((repError[i] - mean) * (repError[i] - mean)); } variance /= entries; if (variance > 0) { variance = Math.sqrt(variance); } if ((variance / mean) > this.m_threshold) { return true; } return false; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } @Override public void clean() { this.m_trainInstances = new Instances(this.m_trainInstances, 0); } /** * Main method for testing this class. * * @param args the options */ public static void main(final String[] args) { runEvaluator(new WrapperSubsetEval(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/AbstractClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Attribute; import weka.core.BatchPredictor; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.CapabilitiesIgnorer; import weka.core.CommandlineRunnable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; /** * Abstract classifier. All schemes for numeric or nominal prediction in Weka * extend this class. Note that a classifier MUST either implement * distributionForInstance() or classifyInstance(). * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class AbstractClassifier implements Classifier, BatchPredictor, Cloneable, Serializable, OptionHandler, CapabilitiesHandler, RevisionHandler, CapabilitiesIgnorer, CommandlineRunnable { /** for serialization */ private static final long serialVersionUID = 6502780192411755341L; /** Whether the classifier is run in debug mode. */ protected boolean m_Debug = false; /** Whether capabilities should not be checked before classifier is built. */ protected boolean m_DoNotCheckCapabilities = false; /** * The number of decimal places used when printing numbers in the model. */ public static int NUM_DECIMAL_PLACES_DEFAULT = 2; protected int m_numDecimalPlaces = NUM_DECIMAL_PLACES_DEFAULT; /** Default preferred batch size for batch predictions */ public static String BATCH_SIZE_DEFAULT = "100"; protected String m_BatchSize = BATCH_SIZE_DEFAULT; /** * Creates a new instance of a classifier given it's class name and (optional) * arguments to pass to it's setOptions method. If the classifier implements * OptionHandler and the options parameter is non-null, the classifier will * have it's options set. * * @param classifierName the fully qualified class name of the classifier * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created classifier, ready for use. * @exception Exception if the classifier name is invalid, or the options * supplied are not acceptable to the classifier */ public static Classifier forName(String classifierName, String[] options) throws Exception { return ((AbstractClassifier) Utils.forName(Classifier.class, classifierName, options)); } /** * Creates a deep copy of the given classifier using serialization. * * @param model the classifier to copy * @return a deep copy of the classifier * @exception Exception if an error occurs */ public static Classifier makeCopy(Classifier model) throws Exception { return (Classifier) new SerializedObject(model).getObject(); } /** * Creates a given number of deep copies of the given classifier using * serialization. * * @param model the classifier to copy * @param num the number of classifier copies to create. * @return an array of classifiers. * @exception Exception if an error occurs */ public static Classifier[] makeCopies(Classifier model, int num) throws Exception { if (model == null) { throw new Exception("No model classifier set"); } Classifier[] classifiers = new Classifier[num]; SerializedObject so = new SerializedObject(model); for (int i = 0; i < classifiers.length; i++) { classifiers[i] = (Classifier) so.getObject(); } return classifiers; } /** * runs the classifier instance with the given options. * * @param classifier the classifier to run * @param options the commandline options */ public static void runClassifier(Classifier classifier, String[] options) { try { if (classifier instanceof CommandlineRunnable) { ((CommandlineRunnable)classifier).preExecution(); } System.out.println(Evaluation.evaluateModel(classifier, options)); } catch (Exception e) { if (((e.getMessage() != null) && (e.getMessage().indexOf("General options") == -1)) || (e.getMessage() == null)) { e.printStackTrace(); } else { System.err.println(e.getMessage()); } } if (classifier instanceof CommandlineRunnable) { try { ((CommandlineRunnable) classifier).postExecution(); } catch (Exception ex) { ex.printStackTrace(); } } } /** * Classifies the given test instance. The instance has to belong to a dataset * when it's being classified. Note that a classifier MUST implement either * this or distributionForInstance(). * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ @Override public double classifyInstance(Instance instance) throws Exception { double[] dist = distributionForInstance(instance); if (dist == null) { throw new Exception("Null distribution predicted"); } switch (instance.classAttribute().type()) { case Attribute.NOMINAL: double max = 0; int maxIndex = 0; for (int i = 0; i < dist.length; i++) { if (dist[i] > max) { maxIndex = i; max = dist[i]; } } if (max > 0) { return maxIndex; } else { return Utils.missingValue(); } case Attribute.NUMERIC: case Attribute.DATE: return dist[0]; default: return Utils.missingValue(); } } /** * Predicts the class memberships for a given instance. If an instance is * unclassified, the returned array elements must be all zero. If the class is * numeric, the array must consist of only one element, which contains the * predicted value. Note that a classifier MUST implement either this or * classifyInstance(). * * @param instance the instance to be classified * @return an array containing the estimated membership probabilities of the * test instance in each class or the numeric prediction * @exception Exception if distribution could not be computed successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { double[] dist = new double[instance.numClasses()]; switch (instance.classAttribute().type()) { case Attribute.NOMINAL: double classification = classifyInstance(instance); if (Utils.isMissingValue(classification)) { return dist; } else { dist[(int) classification] = 1.0; } return dist; case Attribute.NUMERIC: case Attribute.DATE: dist[0] = classifyInstance(instance); return dist; default: return dist; } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = Option .listOptionsForClassHierarchy(this.getClass(), AbstractClassifier.class); newVector.addElement(new Option( "\tIf set, classifier is run in debug mode and\n" + "\tmay output additional info to the console", "output-debug-info", 0, "-output-debug-info")); newVector.addElement(new Option( "\tIf set, classifier capabilities are not checked before classifier is built\n" + "\t(use with caution).", "-do-not-check-capabilities", 0, "-do-not-check-capabilities")); newVector.addElement(new Option( "\tThe number of decimal places for the output of numbers in the model" + " (default " + m_numDecimalPlaces + ").", "num-decimal-places", 1, "-num-decimal-places")); newVector.addElement(new Option( "\tThe desired batch size for batch prediction " + " (default " + m_BatchSize + ").", "batch-size", 1, "-batch-size")); return newVector.elements(); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); for (String s : Option.getOptionsForHierarchy(this, AbstractClassifier.class)) { options.add(s); } if (getDebug()) { options.add("-output-debug-info"); } if (getDoNotCheckCapabilities()) { options.add("-do-not-check-capabilities"); } if (getNumDecimalPlaces() != NUM_DECIMAL_PLACES_DEFAULT) { options.add("-num-decimal-places"); options.add("" + getNumDecimalPlaces()); } if (!(getBatchSize().equals(BATCH_SIZE_DEFAULT))) { options.add("-batch-size"); options.add("" + getBatchSize()); } return options.toArray(new String[0]); } /** * Parses a given list of options. Valid options are: * <p> * * -output-debug-info <br> * If set, classifier is run in debug mode and may output additional info to * the console. * <p> * * -do-not-check-capabilities <br> * If set, classifier capabilities are not checked before classifier is built * (use with caution). * <p> * * -num-decimal-places <br> * The number of decimal places for the output of numbers in the model. * <p> * * -batch-size <br> * The desired batch size for batch prediction. * <p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { Option.setOptionsForHierarchy(options, this, AbstractClassifier.class); setDebug(Utils.getFlag("output-debug-info", options)); setDoNotCheckCapabilities( Utils.getFlag("do-not-check-capabilities", options)); String optionString = Utils.getOption("num-decimal-places", options); if (optionString.length() != 0) { setNumDecimalPlaces((new Integer(optionString)).intValue()); } optionString = Utils.getOption("batch-size", options); if (optionString.length() != 0) { setBatchSize(optionString); } } /** * Get whether debugging is turned on. * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Set debugging mode. * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String debugTipText() { return "If set to true, classifier may output additional info to " + "the console."; } /** * Get whether capabilities checking is turned off. * * @return true if capabilities checking is turned off. */ @Override public boolean getDoNotCheckCapabilities() { return m_DoNotCheckCapabilities; } /** * Set whether not to check capabilities. * * @param doNotCheckCapabilities true if capabilities are not to be checked. */ @Override public void setDoNotCheckCapabilities(boolean doNotCheckCapabilities) { m_DoNotCheckCapabilities = doNotCheckCapabilities; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String doNotCheckCapabilitiesTipText() { return "If set, classifier capabilities are not checked before classifier is built" + " (Use with caution to reduce runtime)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numDecimalPlacesTipText() { return "The number of decimal places to be used for the output of numbers in " + "the model."; } /** * Get the number of decimal places. */ public int getNumDecimalPlaces() { return m_numDecimalPlaces; } /** * Set the number of decimal places. */ public void setNumDecimalPlaces(int num) { m_numDecimalPlaces = num; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String batchSizeTipText() { return "The preferred number of instances to process if batch prediction is " + "being performed. More or fewer instances may be provided, but this gives " + "implementations a chance to specify a preferred batch size."; } /** * Set the preferred batch size for batch prediction. * * @param size the batch size to use */ @Override public void setBatchSize(String size) { m_BatchSize = size; } /** * Get the preferred batch size for batch prediction. * * @return the preferred batch size */ @Override public String getBatchSize() { return m_BatchSize; } /** * Return true if this classifier can generate batch predictions in an * efficient manner. Default implementation here returns false. Subclasses to * override as appropriate. * * @return true if this classifier can generate batch predictions in an * efficient manner. */ @Override public boolean implementsMoreEfficientBatchPrediction() { return false; } /** * Batch prediction method. This default implementation simply calls * distributionForInstance() for each instance in the batch. If subclasses can * produce batch predictions in a more efficient manner than this they should * override this method and also return true from * implementsMoreEfficientBatchPrediction() * * @param batch the instances to get predictions for * @return an array of probability distributions, one for each instance in the * batch * @throws Exception if a problem occurs. */ @Override public double[][] distributionsForInstances(Instances batch) throws Exception { double[][] batchPreds = new double[batch.numInstances()][]; for (int i = 0; i < batch.numInstances(); i++) { batchPreds[i] = distributionForInstance(batch.instance(i)); } return batchPreds; } /** * Returns the Capabilities of this classifier. Maximally permissive * capabilities are allowed by default. Derived classifiers should override * this method and first disable all capabilities and then enable just those * capabilities that make sense for the scheme. * * @return the capabilities of this object * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.enableAll(); return result; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Perform any setup stuff that might need to happen before commandline * execution. Subclasses should override if they need to do something here * * @throws Exception if a problem occurs during setup */ @Override public void preExecution() throws Exception { } /** * Execute the supplied object. * * @param toRun the object to execute * @param options any options to pass to the object * @throws Exception if the object is not of the expected type. */ @Override public void run(Object toRun, String[] options) throws Exception { if (!(toRun instanceof Classifier)) { throw new IllegalArgumentException("Object to run is not a Classifier!"); } runClassifier((Classifier) toRun, options); } /** * Perform any teardown stuff that might need to happen after execution. * Subclasses should override if they need to do something here * * @throws Exception if a problem occurs during teardown */ @Override public void postExecution() throws Exception { } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/AggregateableEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AggregateableEvaluation.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers; import weka.core.Instances; /** * Subclass of Evaluation that provides a method for aggregating the results * stored in another Evaluation object. Delegates to the actual implementation * in weka.classifiers.evaluation.AggregateableEvaluation. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class AggregateableEvaluation extends Evaluation { /** For serialization */ private static final long serialVersionUID = 6850546230173753210L; /** * Constructs a new AggregateableEvaluation object * * @param data the Instances to use * @throws Exception if a problem occurs */ public AggregateableEvaluation(Instances data) throws Exception { super(data); m_delegate = new weka.classifiers.evaluation.AggregateableEvaluation(data); } /** * Constructs a new AggregateableEvaluation object * * @param data the Instances to use * @param costMatrix the cost matrix to use * @throws Exception if a problem occurs */ public AggregateableEvaluation(Instances data, CostMatrix costMatrix) throws Exception { super(data, costMatrix); m_delegate = new weka.classifiers.evaluation.AggregateableEvaluation(data, costMatrix); } /** * Constructs a new AggregateableEvaluation object based on an Evaluation * object * * @param eval the Evaluation object to use */ public AggregateableEvaluation(Evaluation eval) throws Exception { super(eval.getHeader()); m_delegate = new weka.classifiers.evaluation.AggregateableEvaluation( eval.m_delegate); } /** * Adds the statistics encapsulated in the supplied Evaluation object into * this one. Does not perform any checks for compatibility between the * supplied Evaluation object and this one. * * @param evaluation the evaluation object to aggregate */ public void aggregate(Evaluation evaluation) { ((weka.classifiers.evaluation.AggregateableEvaluation) m_delegate) .aggregate(evaluation.m_delegate); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/BVDecompose.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BVDecompose.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.BufferedReader; import java.io.FileReader; import java.io.Reader; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for performing a Bias-Variance decomposition on any classifier using the method specified in:<br/> * <br/> * Ron Kohavi, David H. Wolpert: Bias Plus Variance Decomposition for Zero-One Loss Functions. In: Machine Learning: Proceedings of the Thirteenth International Conference, 275-283, 1996. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Kohavi1996, * author = {Ron Kohavi and David H. Wolpert}, * booktitle = {Machine Learning: Proceedings of the Thirteenth International Conference}, * editor = {Lorenza Saitta}, * pages = {275-283}, * publisher = {Morgan Kaufmann}, * title = {Bias Plus Variance Decomposition for Zero-One Loss Functions}, * year = {1996}, * PS = {http://robotics.stanford.edu/\~ronnyk/biasVar.ps} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;training pool size&gt; * The number of instances placed in the training pool. * The remainder will be used for testing. (default 100)</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -x &lt;num&gt; * The number of training repetitions used. * (default 50)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-learner. <p> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class BVDecompose implements OptionHandler, TechnicalInformationHandler, RevisionHandler { /** Debugging mode, gives extra output if true */ protected boolean m_Debug; /** An instantiated base classifier used for getting and testing options. */ protected Classifier m_Classifier = new weka.classifiers.rules.ZeroR(); /** The options to be passed to the base classifier. */ protected String [] m_ClassifierOptions; /** The number of train iterations */ protected int m_TrainIterations = 50; /** The name of the data file used for the decomposition */ protected String m_DataFileName; /** The index of the class attribute */ protected int m_ClassIndex = -1; /** The random number seed */ protected int m_Seed = 1; /** The calculated bias (squared) */ protected double m_Bias; /** The calculated variance */ protected double m_Variance; /** The calculated sigma (squared) */ protected double m_Sigma; /** The error rate */ protected double m_Error; /** The number of instances used in the training pool */ protected int m_TrainPoolSize = 100; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for performing a Bias-Variance decomposition on any classifier " + "using the method specified in:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Ron Kohavi and David H. Wolpert"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.TITLE, "Bias Plus Variance Decomposition for Zero-One Loss Functions"); result.setValue(Field.BOOKTITLE, "Machine Learning: Proceedings of the Thirteenth International Conference"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.EDITOR, "Lorenza Saitta"); result.setValue(Field.PAGES, "275-283"); result.setValue(Field.PS, "http://robotics.stanford.edu/~ronnyk/biasVar.ps"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(7); newVector.addElement(new Option( "\tThe index of the class attribute.\n"+ "\t(default last)", "c", 1, "-c <class index>")); newVector.addElement(new Option( "\tThe name of the arff file used for the decomposition.", "t", 1, "-t <name of arff file>")); newVector.addElement(new Option( "\tThe number of instances placed in the training pool.\n" + "\tThe remainder will be used for testing. (default 100)", "T", 1, "-T <training pool size>")); newVector.addElement(new Option( "\tThe random number seed used.", "s", 1, "-s <seed>")); newVector.addElement(new Option( "\tThe number of training repetitions used.\n" +"\t(default 50)", "x", 1, "-x <num>")); newVector.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); newVector.addElement(new Option( "\tFull class name of the learner used in the decomposition.\n" +"\teg: weka.classifiers.bayes.NaiveBayes", "W", 1, "-W <classifier class name>")); if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { newVector.addElement(new Option( "", "", 0, "\nOptions specific to learner " + m_Classifier.getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler)m_Classifier).listOptions())); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;training pool size&gt; * The number of instances placed in the training pool. * The remainder will be used for testing. (default 100)</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -x &lt;num&gt; * The number of training repetitions used. * (default 50)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-learner. <p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String classIndex = Utils.getOption('c', options); if (classIndex.length() != 0) { if (classIndex.toLowerCase().equals("last")) { setClassIndex(0); } else if (classIndex.toLowerCase().equals("first")) { setClassIndex(1); } else { setClassIndex(Integer.parseInt(classIndex)); } } else { setClassIndex(0); } String trainIterations = Utils.getOption('x', options); if (trainIterations.length() != 0) { setTrainIterations(Integer.parseInt(trainIterations)); } else { setTrainIterations(50); } String trainPoolSize = Utils.getOption('T', options); if (trainPoolSize.length() != 0) { setTrainPoolSize(Integer.parseInt(trainPoolSize)); } else { setTrainPoolSize(100); } String seedString = Utils.getOption('s', options); if (seedString.length() != 0) { setSeed(Integer.parseInt(seedString)); } else { setSeed(1); } String dataFile = Utils.getOption('t', options); if (dataFile.length() == 0) { throw new Exception("An arff file must be specified" + " with the -t option."); } setDataFileName(dataFile); String classifierName = Utils.getOption('W', options); if (classifierName.length() == 0) { throw new Exception("A learner must be specified with the -W option."); } setClassifier(AbstractClassifier.forName(classifierName, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] classifierOptions = new String [0]; if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { classifierOptions = ((OptionHandler)m_Classifier).getOptions(); } String [] options = new String [classifierOptions.length + 14]; int current = 0; if (getDebug()) { options[current++] = "-D"; } options[current++] = "-c"; options[current++] = "" + getClassIndex(); options[current++] = "-x"; options[current++] = "" + getTrainIterations(); options[current++] = "-T"; options[current++] = "" + getTrainPoolSize(); options[current++] = "-s"; options[current++] = "" + getSeed(); if (getDataFileName() != null) { options[current++] = "-t"; options[current++] = "" + getDataFileName(); } if (getClassifier() != null) { options[current++] = "-W"; options[current++] = getClassifier().getClass().getName(); } options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); current += classifierOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Get the number of instances in the training pool. * * @return number of instances in the training pool. */ public int getTrainPoolSize() { return m_TrainPoolSize; } /** * Set the number of instances in the training pool. * * @param numTrain number of instances in the training pool. */ public void setTrainPoolSize(int numTrain) { m_TrainPoolSize = numTrain; } /** * Set the classifiers being analysed * * @param newClassifier the Classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Gets the name of the classifier being analysed * * @return the classifier being analysed. */ public Classifier getClassifier() { return m_Classifier; } /** * Sets debugging mode * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Gets whether debugging is turned on * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Sets the random number seed * * @param seed the random number seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the random number seed * * @return the random number seed */ public int getSeed() { return m_Seed; } /** * Sets the maximum number of boost iterations * * @param trainIterations the number of boost iterations */ public void setTrainIterations(int trainIterations) { m_TrainIterations = trainIterations; } /** * Gets the maximum number of boost iterations * * @return the maximum number of boost iterations */ public int getTrainIterations() { return m_TrainIterations; } /** * Sets the name of the data file used for the decomposition * * @param dataFileName the data file to use */ public void setDataFileName(String dataFileName) { m_DataFileName = dataFileName; } /** * Get the name of the data file used for the decomposition * * @return the name of the data file */ public String getDataFileName() { return m_DataFileName; } /** * Get the index (starting from 1) of the attribute used as the class. * * @return the index of the class attribute */ public int getClassIndex() { return m_ClassIndex + 1; } /** * Sets index of attribute to discretize on * * @param classIndex the index (starting from 1) of the class attribute */ public void setClassIndex(int classIndex) { m_ClassIndex = classIndex - 1; } /** * Get the calculated bias squared * * @return the bias squared */ public double getBias() { return m_Bias; } /** * Get the calculated variance * * @return the variance */ public double getVariance() { return m_Variance; } /** * Get the calculated sigma squared * * @return the sigma squared */ public double getSigma() { return m_Sigma; } /** * Get the calculated error rate * * @return the error rate */ public double getError() { return m_Error; } /** * Carry out the bias-variance decomposition * * @throws Exception if the decomposition couldn't be carried out */ public void decompose() throws Exception { Reader dataReader = new BufferedReader(new FileReader(m_DataFileName)); Instances data = new Instances(dataReader); if (m_ClassIndex < 0) { data.setClassIndex(data.numAttributes() - 1); } else { data.setClassIndex(m_ClassIndex); } if (data.classAttribute().type() != Attribute.NOMINAL) { throw new Exception("Class attribute must be nominal"); } int numClasses = data.numClasses(); data.deleteWithMissingClass(); if (data.checkForStringAttributes()) { throw new Exception("Can't handle string attributes!"); } if (data.numInstances() < 2 * m_TrainPoolSize) { throw new Exception("The dataset must contain at least " + (2 * m_TrainPoolSize) + " instances"); } Random random = new Random(m_Seed); data.randomize(random); Instances trainPool = new Instances(data, 0, m_TrainPoolSize); Instances test = new Instances(data, m_TrainPoolSize, data.numInstances() - m_TrainPoolSize); int numTest = test.numInstances(); double [][] instanceProbs = new double [numTest][numClasses]; m_Error = 0; for (int i = 0; i < m_TrainIterations; i++) { if (m_Debug) { System.err.println("Iteration " + (i + 1)); } trainPool.randomize(random); Instances train = new Instances(trainPool, 0, m_TrainPoolSize / 2); Classifier current = AbstractClassifier.makeCopy(m_Classifier); current.buildClassifier(train); //// Evaluate the classifier on test, updating BVD stats for (int j = 0; j < numTest; j++) { int pred = (int)current.classifyInstance(test.instance(j)); if (pred != test.instance(j).classValue()) { m_Error++; } instanceProbs[j][pred]++; } } m_Error /= (m_TrainIterations * numTest); // Average the BV over each instance in test. m_Bias = 0; m_Variance = 0; m_Sigma = 0; for (int i = 0; i < numTest; i++) { Instance current = test.instance(i); double [] predProbs = instanceProbs[i]; double pActual, pPred; double bsum = 0, vsum = 0, ssum = 0; for (int j = 0; j < numClasses; j++) { pActual = (current.classValue() == j) ? 1 : 0; // Or via 1NN from test data? pPred = predProbs[j] / m_TrainIterations; bsum += (pActual - pPred) * (pActual - pPred) - pPred * (1 - pPred) / (m_TrainIterations - 1); vsum += pPred * pPred; ssum += pActual * pActual; } m_Bias += bsum; m_Variance += (1 - vsum); m_Sigma += (1 - ssum); } m_Bias /= (2 * numTest); m_Variance /= (2 * numTest); m_Sigma /= (2 * numTest); if (m_Debug) { System.err.println("Decomposition finished"); } } /** * Returns description of the bias-variance decomposition results. * * @return the bias-variance decomposition results as a string */ public String toString() { String result = "\nBias-Variance Decomposition\n"; if (getClassifier() == null) { return "Invalid setup"; } result += "\nClassifier : " + getClassifier().getClass().getName(); if (getClassifier() instanceof OptionHandler) { result += Utils.joinOptions(((OptionHandler)m_Classifier).getOptions()); } result += "\nData File : " + getDataFileName(); result += "\nClass Index : "; if (getClassIndex() == 0) { result += "last"; } else { result += getClassIndex(); } result += "\nTraining Pool: " + getTrainPoolSize(); result += "\nIterations : " + getTrainIterations(); result += "\nSeed : " + getSeed(); result += "\nError : " + Utils.doubleToString(getError(), 6, 4); result += "\nSigma^2 : " + Utils.doubleToString(getSigma(), 6, 4); result += "\nBias^2 : " + Utils.doubleToString(getBias(), 6, 4); result += "\nVariance : " + Utils.doubleToString(getVariance(), 6, 4); return result + "\n"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Test method for this class * * @param args the command line arguments */ public static void main(String [] args) { try { BVDecompose bvd = new BVDecompose(); try { bvd.setOptions(args); Utils.checkForRemainingOptions(args); } catch (Exception ex) { String result = ex.getMessage() + "\nBVDecompose Options:\n\n"; Enumeration<Option> enu = bvd.listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); result += option.synopsis() + "\n" + option.description() + "\n"; } throw new Exception(result); } bvd.decompose(); System.out.println(bvd.toString()); } catch (Exception ex) { System.err.println(ex.getMessage()); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/BVDecomposeSegCVSub.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BVDecomposeSegCVSub.java * Copyright (C) 2003 Paul Conilione * * Based on the class: BVDecompose.java by Len Trigg (1999) */ /* * DEDICATION * * Paul Conilione would like to express his deep gratitude and appreciation * to his Chinese Buddhist Taoist Master Sifu Chow Yuk Nen for the abilities * and insight that he has been taught, which have allowed him to program in * a clear and efficient manner. * * Master Sifu Chow Yuk Nen's Teachings are unique and precious. They are * applicable to any field of human endeavour. Through his unique and powerful * ability to skilfully apply Chinese Buddhist Teachings, people have achieved * success in; Computing, chemical engineering, business, accounting, philosophy * and more. * */ package weka.classifiers; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.io.BufferedReader; import java.io.FileReader; import java.io.Reader; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * This class performs Bias-Variance decomposion on any classifier using the sub-sampled cross-validation procedure as specified in (1).<br/> * The Kohavi and Wolpert definition of bias and variance is specified in (2).<br/> * The Webb definition of bias and variance is specified in (3).<br/> * <br/> * Geoffrey I. Webb, Paul Conilione (2002). Estimating bias and variance from data. School of Computer Science and Software Engineering, Victoria, Australia.<br/> * <br/> * Ron Kohavi, David H. Wolpert: Bias Plus Variance Decomposition for Zero-One Loss Functions. In: Machine Learning: Proceedings of the Thirteenth International Conference, 275-283, 1996.<br/> * <br/> * Geoffrey I. Webb (2000). MultiBoosting: A Technique for Combining Boosting and Wagging. Machine Learning. 40(2):159-196. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{Webb2002, * address = {School of Computer Science and Software Engineering, Victoria, Australia}, * author = {Geoffrey I. Webb and Paul Conilione}, * institution = {Monash University}, * title = {Estimating bias and variance from data}, * year = {2002}, * PDF = {http://www.csse.monash.edu.au/\~webb/Files/WebbConilione04.pdf} * } * * &#64;inproceedings{Kohavi1996, * author = {Ron Kohavi and David H. Wolpert}, * booktitle = {Machine Learning: Proceedings of the Thirteenth International Conference}, * editor = {Lorenza Saitta}, * pages = {275-283}, * publisher = {Morgan Kaufmann}, * title = {Bias Plus Variance Decomposition for Zero-One Loss Functions}, * year = {1996}, * PS = {http://robotics.stanford.edu/\~ronnyk/biasVar.ps} * } * * &#64;article{Webb2000, * author = {Geoffrey I. Webb}, * journal = {Machine Learning}, * number = {2}, * pages = {159-196}, * title = {MultiBoosting: A Technique for Combining Boosting and Wagging}, * volume = {40}, * year = {2000} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -l &lt;num&gt; * The number of times each instance is classified. * (default 10)</pre> * * <pre> -p &lt;proportion of objects in common&gt; * The average proportion of instances common between any two training sets</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;number of instances in training set&gt; * The number of instances in the training set.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-learner. <p> * * @author Paul Conilione (paulc4321@yahoo.com.au) * @version $Revision$ */ public class BVDecomposeSegCVSub implements OptionHandler, TechnicalInformationHandler, RevisionHandler { /** Debugging mode, gives extra output if true. */ protected boolean m_Debug; /** An instantiated base classifier used for getting and testing options. */ protected Classifier m_Classifier = new weka.classifiers.rules.ZeroR(); /** The options to be passed to the base classifier. */ protected String [] m_ClassifierOptions; /** The number of times an instance is classified*/ protected int m_ClassifyIterations; /** The name of the data file used for the decomposition */ protected String m_DataFileName; /** The index of the class attribute */ protected int m_ClassIndex = -1; /** The random number seed */ protected int m_Seed = 1; /** The calculated Kohavi & Wolpert bias (squared) */ protected double m_KWBias; /** The calculated Kohavi & Wolpert variance */ protected double m_KWVariance; /** The calculated Kohavi & Wolpert sigma */ protected double m_KWSigma; /** The calculated Webb bias */ protected double m_WBias; /** The calculated Webb variance */ protected double m_WVariance; /** The error rate */ protected double m_Error; /** The training set size */ protected int m_TrainSize; /** Proportion of instances common between any two training sets. */ protected double m_P; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "This class performs Bias-Variance decomposion on any classifier using the " + "sub-sampled cross-validation procedure as specified in (1).\n" + "The Kohavi and Wolpert definition of bias and variance is specified in (2).\n" + "The Webb definition of bias and variance is specified in (3).\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "Geoffrey I. Webb and Paul Conilione"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.TITLE, "Estimating bias and variance from data"); result.setValue(Field.INSTITUTION, "Monash University"); result.setValue(Field.ADDRESS, "School of Computer Science and Software Engineering, Victoria, Australia"); result.setValue(Field.PDF, "http://www.csse.monash.edu.au/~webb/Files/WebbConilione04.pdf"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Ron Kohavi and David H. Wolpert"); additional.setValue(Field.YEAR, "1996"); additional.setValue(Field.TITLE, "Bias Plus Variance Decomposition for Zero-One Loss Functions"); additional.setValue(Field.BOOKTITLE, "Machine Learning: Proceedings of the Thirteenth International Conference"); additional.setValue(Field.PUBLISHER, "Morgan Kaufmann"); additional.setValue(Field.EDITOR, "Lorenza Saitta"); additional.setValue(Field.PAGES, "275-283"); additional.setValue(Field.PS, "http://robotics.stanford.edu/~ronnyk/biasVar.ps"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "Geoffrey I. Webb"); additional.setValue(Field.YEAR, "2000"); additional.setValue(Field.TITLE, "MultiBoosting: A Technique for Combining Boosting and Wagging"); additional.setValue(Field.JOURNAL, "Machine Learning"); additional.setValue(Field.VOLUME, "40"); additional.setValue(Field.NUMBER, "2"); additional.setValue(Field.PAGES, "159-196"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(8); newVector.addElement(new Option( "\tThe index of the class attribute.\n"+ "\t(default last)", "c", 1, "-c <class index>")); newVector.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); newVector.addElement(new Option( "\tThe number of times each instance is classified.\n" +"\t(default 10)", "l", 1, "-l <num>")); newVector.addElement(new Option( "\tThe average proportion of instances common between any two training sets", "p", 1, "-p <proportion of objects in common>")); newVector.addElement(new Option( "\tThe random number seed used.", "s", 1, "-s <seed>")); newVector.addElement(new Option( "\tThe name of the arff file used for the decomposition.", "t", 1, "-t <name of arff file>")); newVector.addElement(new Option( "\tThe number of instances in the training set.", "T", 1, "-T <number of instances in training set>")); newVector.addElement(new Option( "\tFull class name of the learner used in the decomposition.\n" +"\teg: weka.classifiers.bayes.NaiveBayes", "W", 1, "-W <classifier class name>")); if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { newVector.addElement(new Option( "", "", 0, "\nOptions specific to learner " + m_Classifier.getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler)m_Classifier).listOptions())); } return newVector.elements(); } /** * Sets the OptionHandler's options using the given list. All options * will be set (or reset) during this call (i.e. incremental setting * of options is not possible). <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -l &lt;num&gt; * The number of times each instance is classified. * (default 10)</pre> * * <pre> -p &lt;proportion of objects in common&gt; * The average proportion of instances common between any two training sets</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;number of instances in training set&gt; * The number of instances in the training set.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String classIndex = Utils.getOption('c', options); if (classIndex.length() != 0) { if (classIndex.toLowerCase().equals("last")) { setClassIndex(0); } else if (classIndex.toLowerCase().equals("first")) { setClassIndex(1); } else { setClassIndex(Integer.parseInt(classIndex)); } } else { setClassIndex(0); } String classifyIterations = Utils.getOption('l', options); if (classifyIterations.length() != 0) { setClassifyIterations(Integer.parseInt(classifyIterations)); } else { setClassifyIterations(10); } String prob = Utils.getOption('p', options); if (prob.length() != 0) { setP( Double.parseDouble(prob)); } else { setP(-1); } //throw new Exception("A proportion must be specified" + " with a -p option."); String seedString = Utils.getOption('s', options); if (seedString.length() != 0) { setSeed(Integer.parseInt(seedString)); } else { setSeed(1); } String dataFile = Utils.getOption('t', options); if (dataFile.length() != 0) { setDataFileName(dataFile); } else { throw new Exception("An arff file must be specified" + " with the -t option."); } String trainSize = Utils.getOption('T', options); if (trainSize.length() != 0) { setTrainSize(Integer.parseInt(trainSize)); } else { setTrainSize(-1); } //throw new Exception("A training set size must be specified" + " with a -T option."); String classifierName = Utils.getOption('W', options); if (classifierName.length() != 0) { setClassifier(AbstractClassifier.forName(classifierName, Utils.partitionOptions(options))); } else { throw new Exception("A learner must be specified with the -W option."); } } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] classifierOptions = new String [0]; if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { classifierOptions = ((OptionHandler)m_Classifier).getOptions(); } String [] options = new String [classifierOptions.length + 14]; int current = 0; if (getDebug()) { options[current++] = "-D"; } options[current++] = "-c"; options[current++] = "" + getClassIndex(); options[current++] = "-l"; options[current++] = "" + getClassifyIterations(); options[current++] = "-p"; options[current++] = "" + getP(); options[current++] = "-s"; options[current++] = "" + getSeed(); if (getDataFileName() != null) { options[current++] = "-t"; options[current++] = "" + getDataFileName(); } options[current++] = "-T"; options[current++] = "" + getTrainSize(); if (getClassifier() != null) { options[current++] = "-W"; options[current++] = getClassifier().getClass().getName(); } options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); current += classifierOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Set the classifiers being analysed * * @param newClassifier the Classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Gets the name of the classifier being analysed * * @return the classifier being analysed. */ public Classifier getClassifier() { return m_Classifier; } /** * Sets debugging mode * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Gets whether debugging is turned on * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Sets the random number seed * * @param seed the random number seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the random number seed * * @return the random number seed */ public int getSeed() { return m_Seed; } /** * Sets the number of times an instance is classified * * @param classifyIterations number of times an instance is classified */ public void setClassifyIterations(int classifyIterations) { m_ClassifyIterations = classifyIterations; } /** * Gets the number of times an instance is classified * * @return the maximum number of times an instance is classified */ public int getClassifyIterations() { return m_ClassifyIterations; } /** * Sets the name of the dataset file. * * @param dataFileName name of dataset file. */ public void setDataFileName(String dataFileName) { m_DataFileName = dataFileName; } /** * Get the name of the data file used for the decomposition * * @return the name of the data file */ public String getDataFileName() { return m_DataFileName; } /** * Get the index (starting from 1) of the attribute used as the class. * * @return the index of the class attribute */ public int getClassIndex() { return m_ClassIndex + 1; } /** * Sets index of attribute to discretize on * * @param classIndex the index (starting from 1) of the class attribute */ public void setClassIndex(int classIndex) { m_ClassIndex = classIndex - 1; } /** * Get the calculated bias squared according to the Kohavi and Wolpert definition * * @return the bias squared */ public double getKWBias() { return m_KWBias; } /** * Get the calculated bias according to the Webb definition * * @return the bias * */ public double getWBias() { return m_WBias; } /** * Get the calculated variance according to the Kohavi and Wolpert definition * * @return the variance */ public double getKWVariance() { return m_KWVariance; } /** * Get the calculated variance according to the Webb definition * * @return the variance according to Webb * */ public double getWVariance() { return m_WVariance; } /** * Get the calculated sigma according to the Kohavi and Wolpert definition * * @return the sigma * */ public double getKWSigma() { return m_KWSigma; } /** * Set the training size. * * @param size the size of the training set * */ public void setTrainSize(int size) { m_TrainSize = size; } /** * Get the training size * * @return the size of the training set * */ public int getTrainSize() { return m_TrainSize; } /** * Set the proportion of instances that are common between two training sets * used to train a classifier. * * @param proportion the proportion of instances that are common between training * sets. * */ public void setP(double proportion) { m_P = proportion; } /** * Get the proportion of instances that are common between two training sets. * * @return the proportion * */ public double getP() { return m_P; } /** * Get the calculated error rate * * @return the error rate */ public double getError() { return m_Error; } /** * Carry out the bias-variance decomposition using the sub-sampled cross-validation method. * * @throws Exception if the decomposition couldn't be carried out */ public void decompose() throws Exception { Reader dataReader; Instances data; int tps; // training pool size, size of segment E. int k; // number of folds in segment E. int q; // number of segments of size tps. dataReader = new BufferedReader(new FileReader(m_DataFileName)); //open file data = new Instances(dataReader); // encapsulate in wrapper class called weka.Instances() if (m_ClassIndex < 0) { data.setClassIndex(data.numAttributes() - 1); } else { data.setClassIndex(m_ClassIndex); } if (data.classAttribute().type() != Attribute.NOMINAL) { throw new Exception("Class attribute must be nominal"); } int numClasses = data.numClasses(); data.deleteWithMissingClass(); if ( data.checkForStringAttributes() ) { throw new Exception("Can't handle string attributes!"); } // Dataset size must be greater than 2 if ( data.numInstances() <= 2 ){ throw new Exception("Dataset size must be greater than 2."); } if ( m_TrainSize == -1 ){ // default value m_TrainSize = (int) Math.floor( (double) data.numInstances() / 2.0 ); }else if ( m_TrainSize < 0 || m_TrainSize >= data.numInstances() - 1 ) { // Check if 0 < training Size < D - 1 throw new Exception("Training set size of "+m_TrainSize+" is invalid."); } if ( m_P == -1 ){ // default value m_P = (double) m_TrainSize / ( (double)data.numInstances() - 1 ); }else if ( m_P < ( m_TrainSize / ( (double)data.numInstances() - 1 ) ) || m_P >= 1.0 ) { //Check if p is in range: m/(|D|-1) <= p < 1.0 throw new Exception("Proportion is not in range: "+ (m_TrainSize / ((double) data.numInstances() - 1 )) +" <= p < 1.0 "); } //roundup tps from double to integer tps = (int) Math.ceil( ((double)m_TrainSize / (double)m_P) + 1 ); k = (int) Math.ceil( tps / (tps - (double) m_TrainSize)); // number of folds cannot be more than the number of instances in the training pool if ( k > tps ) { throw new Exception("The required number of folds is too many." + "Change p or the size of the training set."); } // calculate the number of segments, round down. q = (int) Math.floor( (double) data.numInstances() / (double)tps ); //create confusion matrix, columns = number of instances in data set, as all will be used, by rows = number of classes. double [][] instanceProbs = new double [data.numInstances()][numClasses]; int [][] foldIndex = new int [ k ][ 2 ]; Vector<int[]> segmentList = new Vector<int[]>(q + 1); //Set random seed Random random = new Random(m_Seed); data.randomize(random); //create index arrays for different segments int currentDataIndex = 0; for( int count = 1; count <= (q + 1); count++ ){ if( count > q){ int [] segmentIndex = new int [ (data.numInstances() - (q * tps)) ]; for(int index = 0; index < segmentIndex.length; index++, currentDataIndex++){ segmentIndex[index] = currentDataIndex; } segmentList.add(segmentIndex); } else { int [] segmentIndex = new int [ tps ]; for(int index = 0; index < segmentIndex.length; index++, currentDataIndex++){ segmentIndex[index] = currentDataIndex; } segmentList.add(segmentIndex); } } int remainder = tps % k; // remainder is used to determine when to shrink the fold size by 1. //foldSize = ROUNDUP( tps / k ) (round up, eg 3 -> 3, 3.3->4) int foldSize = (int) Math.ceil( (double)tps /(double) k); //roundup fold size double to integer int index = 0; int currentIndex; for( int count = 0; count < k; count ++){ if( remainder != 0 && count == remainder ){ foldSize -= 1; } foldIndex[count][0] = index; foldIndex[count][1] = foldSize; index += foldSize; } for( int l = 0; l < m_ClassifyIterations; l++) { for(int i = 1; i <= q; i++){ int [] currentSegment = (int[]) segmentList.get(i - 1); randomize(currentSegment, random); //CROSS FOLD VALIDATION for current Segment for( int j = 1; j <= k; j++){ Instances TP = null; for(int foldNum = 1; foldNum <= k; foldNum++){ if( foldNum != j){ int startFoldIndex = foldIndex[ foldNum - 1 ][ 0 ]; //start index foldSize = foldIndex[ foldNum - 1 ][ 1 ]; int endFoldIndex = startFoldIndex + foldSize - 1; for(int currentFoldIndex = startFoldIndex; currentFoldIndex <= endFoldIndex; currentFoldIndex++){ if( TP == null ){ TP = new Instances(data, currentSegment[ currentFoldIndex ], 1); }else{ TP.add( data.instance( currentSegment[ currentFoldIndex ] ) ); } } } } TP.randomize(random); if( getTrainSize() > TP.numInstances() ){ throw new Exception("The training set size of " + getTrainSize() + ", is greater than the training pool " + TP.numInstances() ); } Instances train = new Instances(TP, 0, m_TrainSize); Classifier current = AbstractClassifier.makeCopy(m_Classifier); current.buildClassifier(train); // create a clssifier using the instances in train. int currentTestIndex = foldIndex[ j - 1 ][ 0 ]; //start index int testFoldSize = foldIndex[ j - 1 ][ 1 ]; //size int endTestIndex = currentTestIndex + testFoldSize - 1; while( currentTestIndex <= endTestIndex ){ Instance testInst = data.instance( currentSegment[currentTestIndex] ); int pred = (int)current.classifyInstance( testInst ); if(pred != testInst.classValue()) { m_Error++; // add 1 to mis-classifications. } instanceProbs[ currentSegment[ currentTestIndex ] ][ pred ]++; currentTestIndex++; } if( i == 1 && j == 1){ int[] segmentElast = (int[])segmentList.lastElement(); for( currentIndex = 0; currentIndex < segmentElast.length; currentIndex++){ Instance testInst = data.instance( segmentElast[currentIndex] ); int pred = (int)current.classifyInstance( testInst ); if(pred != testInst.classValue()) { m_Error++; // add 1 to mis-classifications. } instanceProbs[ segmentElast[ currentIndex ] ][ pred ]++; } } } } } m_Error /= (double)( m_ClassifyIterations * data.numInstances() ); m_KWBias = 0.0; m_KWVariance = 0.0; m_KWSigma = 0.0; m_WBias = 0.0; m_WVariance = 0.0; for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance( i ); double [] predProbs = instanceProbs[ i ]; double pActual, pPred; double bsum = 0, vsum = 0, ssum = 0; double wBSum = 0, wVSum = 0; Vector<Integer> centralTendencies = findCentralTendencies( predProbs ); if( centralTendencies == null ){ throw new Exception("Central tendency was null."); } for (int j = 0; j < numClasses; j++) { pActual = (current.classValue() == j) ? 1 : 0; pPred = predProbs[j] / m_ClassifyIterations; bsum += (pActual - pPred) * (pActual - pPred) - pPred * (1 - pPred) / (m_ClassifyIterations - 1); vsum += pPred * pPred; ssum += pActual * pActual; } m_KWBias += bsum; m_KWVariance += (1 - vsum); m_KWSigma += (1 - ssum); for( int count = 0; count < centralTendencies.size(); count++ ) { int wB = 0, wV = 0; int centralTendency = ((Integer)centralTendencies.get(count)).intValue(); // For a single instance xi, find the bias and variance. for (int j = 0; j < numClasses; j++) { //Webb definition if( j != (int)current.classValue() && j == centralTendency ) { wB += predProbs[j]; } if( j != (int)current.classValue() && j != centralTendency ) { wV += predProbs[j]; } } wBSum += (double) wB; wVSum += (double) wV; } // calculate bais by dividing bSum by the number of central tendencies and // total number of instances. (effectively finding the average and dividing // by the number of instances to get the nominalised probability). m_WBias += ( wBSum / ((double) ( centralTendencies.size() * m_ClassifyIterations ))); // calculate variance by dividing vSum by the total number of interations m_WVariance += ( wVSum / ((double) ( centralTendencies.size() * m_ClassifyIterations ))); } m_KWBias /= (2.0 * (double) data.numInstances()); m_KWVariance /= (2.0 * (double) data.numInstances()); m_KWSigma /= (2.0 * (double) data.numInstances()); // bias = bias / number of data instances m_WBias /= (double) data.numInstances(); // variance = variance / number of data instances. m_WVariance /= (double) data.numInstances(); if (m_Debug) { System.err.println("Decomposition finished"); } } /** Finds the central tendency, given the classifications for an instance. * * Where the central tendency is defined as the class that was most commonly * selected for a given instance.<p> * * For example, instance 'x' may be classified out of 3 classes y = {1, 2, 3}, * so if x is classified 10 times, and is classified as follows, '1' = 2 times, '2' = 5 times * and '3' = 3 times. Then the central tendency is '2'. <p> * * However, it is important to note that this method returns a list of all classes * that have the highest number of classifications. * * In cases where there are several classes with the largest number of classifications, then * all of these classes are returned. For example if 'x' is classified '1' = 4 times, * '2' = 4 times and '3' = 2 times. Then '1' and '2' are returned.<p> * * @param predProbs the array of classifications for a single instance. * * @return a Vector containing Integer objects which store the class(s) which * are the central tendency. */ public Vector<Integer> findCentralTendencies(double[] predProbs) { int centralTValue = 0; int currentValue = 0; //array to store the list of classes the have the greatest number of classifictions. Vector<Integer> centralTClasses; centralTClasses = new Vector<Integer>(); //create an array with size of the number of classes. // Go through array, finding the central tendency. for( int i = 0; i < predProbs.length; i++) { currentValue = (int) predProbs[i]; // if current value is greater than the central tendency value then // clear vector and add new class to vector array. if( currentValue > centralTValue) { centralTClasses.clear(); centralTClasses.addElement( new Integer(i) ); centralTValue = currentValue; } else if( currentValue != 0 && currentValue == centralTValue) { centralTClasses.addElement( new Integer(i) ); } } //return all classes that have the greatest number of classifications. if( centralTValue != 0){ return centralTClasses; } else { return null; } } /** * Returns description of the bias-variance decomposition results. * * @return the bias-variance decomposition results as a string */ public String toString() { String result = "\nBias-Variance Decomposition Segmentation, Cross Validation\n" + "with subsampling.\n"; if (getClassifier() == null) { return "Invalid setup"; } result += "\nClassifier : " + getClassifier().getClass().getName(); if (getClassifier() instanceof OptionHandler) { result += Utils.joinOptions(((OptionHandler)m_Classifier).getOptions()); } result += "\nData File : " + getDataFileName(); result += "\nClass Index : "; if (getClassIndex() == 0) { result += "last"; } else { result += getClassIndex(); } result += "\nIterations : " + getClassifyIterations(); result += "\np : " + getP(); result += "\nTraining Size : " + getTrainSize(); result += "\nSeed : " + getSeed(); result += "\n\nDefinition : " +"Kohavi and Wolpert"; result += "\nError :" + Utils.doubleToString(getError(), 4); result += "\nBias^2 :" + Utils.doubleToString(getKWBias(), 4); result += "\nVariance :" + Utils.doubleToString(getKWVariance(), 4); result += "\nSigma^2 :" + Utils.doubleToString(getKWSigma(), 4); result += "\n\nDefinition : " +"Webb"; result += "\nError :" + Utils.doubleToString(getError(), 4); result += "\nBias :" + Utils.doubleToString(getWBias(), 4); result += "\nVariance :" + Utils.doubleToString(getWVariance(), 4); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Test method for this class * * @param args the command line arguments */ public static void main(String [] args) { try { BVDecomposeSegCVSub bvd = new BVDecomposeSegCVSub(); try { bvd.setOptions(args); Utils.checkForRemainingOptions(args); } catch (Exception ex) { String result = ex.getMessage() + "\nBVDecompose Options:\n\n"; Enumeration<Option> enu = bvd.listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); result += option.synopsis() + "\n" + option.description() + "\n"; } throw new Exception(result); } bvd.decompose(); System.out.println(bvd.toString()); } catch (Exception ex) { System.err.println(ex.getMessage()); } } /** * Accepts an array of ints and randomises the values in the array, using the * random seed. * *@param index is the array of integers *@param random is the Random seed. */ public final void randomize(int[] index, Random random) { for( int j = index.length - 1; j > 0; j-- ){ int k = random.nextInt( j + 1 ); int temp = index[j]; index[j] = index[k]; index[k] = temp; } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/CheckClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.CheckScheme; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.TestInstances; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * Class for examining the capabilities and finding problems with classifiers. * If you implement a classifier using the WEKA.libraries, you should run the * checks on it to ensure robustness and correct operation. Passing all the * tests of this object does not mean bugs in the classifier don't exist, but * this will help find some common ones. * <p/> * * Typical usage: * <p/> * <code>java weka.classifiers.CheckClassifier -W classifier_name * classifier_options </code> * <p/> * * CheckClassifier reports on the following: * <ul> * <li>Classifier abilities * <ul> * <li>Possible command line options to the classifier</li> * <li>Whether the classifier can predict nominal, numeric, string, date or * relational class attributes. Warnings will be displayed if performance is * worse than ZeroR</li> * <li>Whether the classifier can be trained incrementally</li> * <li>Whether the classifier can handle numeric predictor attributes</li> * <li>Whether the classifier can handle nominal predictor attributes</li> * <li>Whether the classifier can handle string predictor attributes</li> * <li>Whether the classifier can handle date predictor attributes</li> * <li>Whether the classifier can handle relational predictor attributes</li> * <li>Whether the classifier can handle multi-instance data</li> * <li>Whether the classifier can handle missing predictor values</li> * <li>Whether the classifier can handle missing class values</li> * <li>Whether a nominal classifier only handles 2 class problems</li> * <li>Whether the classifier can handle instance weights</li> * </ul> * </li> * <li>Correct functioning * <ul> * <li>Correct initialisation during buildClassifier (i.e. no result changes * when buildClassifier called repeatedly)</li> * <li>Whether incremental training produces the same results as during * non-incremental training (which may or may not be OK)</li> * <li>Whether the classifier alters the data pased to it (number of instances, * instance order, instance weights, etc)</li> * <li>Whether the toString() method works correctly before the classifier has * been built.</li> * </ul> * </li> * <li>Degenerate cases * <ul> * <li>building classifier with zero training instances</li> * <li>all but one predictor attribute values missing</li> * <li>all predictor attribute values missing</li> * <li>all but one class values missing</li> * <li>all class values missing</li> * </ul> * </li> * </ul> * Running CheckClassifier with the debug option set will output the training * and test datasets for any failed tests. * <p/> * * The <code>weka.classifiers.AbstractClassifierTest</code> uses this class to * test all the classifiers. Any changes here, have to be checked in that * abstract test class, too. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -S * Silent mode - prints nothing to stdout. * </pre> * * <pre> * -N &lt;num&gt; * The number of instances in the datasets (default 20). * </pre> * * <pre> * -nominal &lt;num&gt; * The number of nominal attributes (default 2). * </pre> * * <pre> * -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1). * </pre> * * <pre> * -numeric &lt;num&gt; * The number of numeric attributes (default 1). * </pre> * * <pre> * -string &lt;num&gt; * The number of string attributes (default 1). * </pre> * * <pre> * -date &lt;num&gt; * The number of date attributes (default 1). * </pre> * * <pre> * -relational &lt;num&gt; * The number of relational attributes (default 1). * </pre> * * <pre> * -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10). * </pre> * * <pre> * -words &lt;comma-separated-list&gt; * The words to use in string attributes. * </pre> * * <pre> * -word-separators &lt;chars&gt; * The word separators to use in string attributes. * </pre> * * <pre> * -W * Full name of the classifier analysed. * eg: weka.classifiers.bayes.NaiveBayes * (default weka.classifiers.rules.ZeroR) * </pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * Options after -- are passed to the designated classifier. * <p/> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision$ * @see TestInstances */ public class CheckClassifier extends CheckScheme { /* * Note about test methods: - methods return array of booleans - first index: * success or not - second index: acceptable or not (e.g., Exception is OK) - * in case the performance is worse than that of ZeroR both indices are true * * FracPete (fracpete at waikato dot ac dot nz) */ /*** The classifier to be examined */ protected Classifier m_Classifier = new weka.classifiers.rules.ZeroR(); /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addAll(Collections.list(super.listOptions())); result.add(new Option("\tFull name of the classifier analysed.\n" + "\teg: weka.classifiers.bayes.NaiveBayes\n" + "\t(default weka.classifiers.rules.ZeroR)", "W", 1, "-W")); if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { result.add(new Option("", "", 0, "\nOptions specific to classifier " + m_Classifier.getClass().getName() + ":")); result.addAll(Collections.list(((OptionHandler) m_Classifier) .listOptions())); } return result.elements(); } /** * Parses a given list of options. * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Turn on debugging output. * </pre> * * <pre> * -S * Silent mode - prints nothing to stdout. * </pre> * * <pre> * -N &lt;num&gt; * The number of instances in the datasets (default 20). * </pre> * * <pre> * -nominal &lt;num&gt; * The number of nominal attributes (default 2). * </pre> * * <pre> * -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1). * </pre> * * <pre> * -numeric &lt;num&gt; * The number of numeric attributes (default 1). * </pre> * * <pre> * -string &lt;num&gt; * The number of string attributes (default 1). * </pre> * * <pre> * -date &lt;num&gt; * The number of date attributes (default 1). * </pre> * * <pre> * -relational &lt;num&gt; * The number of relational attributes (default 1). * </pre> * * <pre> * -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10). * </pre> * * <pre> * -words &lt;comma-separated-list&gt; * The words to use in string attributes. * </pre> * * <pre> * -word-separators &lt;chars&gt; * The word separators to use in string attributes. * </pre> * * <pre> * -W * Full name of the classifier analysed. * eg: weka.classifiers.bayes.NaiveBayes * (default weka.classifiers.rules.ZeroR) * </pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) { tmpStr = weka.classifiers.rules.ZeroR.class.getName(); } setClassifier((Classifier) forName("weka.classifiers", Classifier.class, tmpStr, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result; String[] options; result = new Vector<String>(); Collections.addAll(result, super.getOptions()); if (getClassifier() != null) { result.add("-W"); result.add(getClassifier().getClass().getName()); } if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { options = ((OptionHandler) m_Classifier).getOptions(); if (options.length > 0) { result.add("--"); Collections.addAll(result, options); } } return result.toArray(new String[result.size()]); } /** * Begin the tests, reporting results to System.out */ @Override public void doTests() { if (getClassifier() == null) { println("\n=== No classifier set ==="); return; } println("\n=== Check on Classifier: " + getClassifier().getClass().getName() + " ===\n"); // Start tests m_ClasspathProblems = false; println("--> Checking for interfaces"); canTakeOptions(); boolean updateableClassifier = updateableClassifier()[0]; boolean weightedInstancesHandler = weightedInstancesHandler()[0]; boolean multiInstanceHandler = multiInstanceHandler()[0]; println("--> Classifier tests"); declaresSerialVersionUID(); testToString(); testsPerClassType(Attribute.NOMINAL, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.NUMERIC, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.DATE, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.STRING, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.RELATIONAL, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); } /** * Set the classifier for boosting. * * @param newClassifier the Classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Get the classifier used as the classifier * * @return the classifier used as the classifier */ public Classifier getClassifier() { return m_Classifier; } /** * Run a battery of tests for a given class attribute type * * @param classType true if the class attribute should be numeric * @param updateable true if the classifier is updateable * @param weighted true if the classifier says it handles weights * @param multiInstance true if the classifier is a multi-instance classifier */ protected void testsPerClassType(int classType, boolean updateable, boolean weighted, boolean multiInstance) { boolean PNom = canPredict(true, false, false, false, false, multiInstance, classType)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance, classType)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance, classType)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance, classType)[0]; boolean PRel; if (!multiInstance) { PRel = canPredict(false, false, false, false, true, multiInstance, classType)[0]; } else { PRel = false; } if (PNom || PNum || PStr || PDat || PRel) { if (weighted) { instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); } canHandleOnlyClass(PNom, PNum, PStr, PDat, PRel, classType); if (classType == Attribute.NOMINAL) { canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4); } if (!multiInstance) { canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 0); canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 1); } canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 20)[0]; if (handleMissingPredictors) { canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 100); } boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 20)[0]; if (handleMissingClass) { canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 100); } correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, handleMissingPredictors, handleMissingClass); doesntUseTestClassVal(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); if (updateable) { updatingEquality(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); } } } /** * Checks whether the scheme's toString() method works even though the * classifies hasn't been built yet. * * @return index 0 is true if the toString() method works fine */ protected boolean[] testToString() { boolean[] result = new boolean[2]; print("toString..."); try { Classifier copy = m_Classifier.getClass().newInstance(); copy.toString(); result[0] = true; println("yes"); } catch (Exception e) { result[0] = false; println("no"); if (m_Debug) { println("\n=== Full report ==="); e.printStackTrace(); println("\n"); } } return result; } /** * tests for a serialVersionUID. Fails in case the scheme doesn't declare a * UID. * * @return index 0 is true if the scheme declares a UID */ protected boolean[] declaresSerialVersionUID() { boolean[] result = new boolean[2]; print("serialVersionUID..."); result[0] = !SerializationHelper.needsUID(m_Classifier.getClass()); if (result[0]) { println("yes"); } else { println("no"); } return result; } /** * Checks whether the scheme can take command line options. * * @return index 0 is true if the classifier can take options */ protected boolean[] canTakeOptions() { boolean[] result = new boolean[2]; print("options..."); if (m_Classifier instanceof OptionHandler) { println("yes"); if (m_Debug) { println("\n=== Full report ==="); Enumeration<Option> enu = ((OptionHandler) m_Classifier).listOptions(); while (enu.hasMoreElements()) { Option option = enu.nextElement(); print(option.synopsis() + "\n" + option.description() + "\n"); } println("\n"); } result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme can build models incrementally. * * @return index 0 is true if the classifier can train incrementally */ protected boolean[] updateableClassifier() { boolean[] result = new boolean[2]; print("updateable classifier..."); if (m_Classifier instanceof UpdateableClassifier) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme says it can handle instance weights. * * @return true if the classifier handles instance weights */ protected boolean[] weightedInstancesHandler() { boolean[] result = new boolean[2]; print("weighted instances classifier..."); if (m_Classifier instanceof WeightedInstancesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme handles multi-instance data. * * @return true if the classifier handles multi-instance data */ protected boolean[] multiInstanceHandler() { boolean[] result = new boolean[2]; print("multi-instance classifier..."); if (m_Classifier instanceof MultiInstanceCapabilitiesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks basic prediction of the scheme, for simple non-troublesome datasets. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canPredict(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("basic predict"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("unary"); accepts.add("binary"); accepts.add("nominal"); accepts.add("numeric"); accepts.add("string"); accepts.add("date"); accepts.add("relational"); accepts.add("multi-instance"); accepts.add("not in classpath"); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme can handle data that contains only the class * attribute. If a scheme cannot build a proper model with that data, it * should default back to a ZeroR model. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canHandleOnlyClass(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, int classType) { print("only class in data"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, false, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("class"); accepts.add("zeror"); int numTrain = getNumInstances(), numTest = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(false, false, false, false, false, false, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, 2, accepts); } /** * Checks whether nominal schemes can handle more than two classes. If a * scheme is only designed for two-class problems it should throw an * appropriate exception for multi-class problems. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param numClasses the number of classes to test * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canHandleNClasses(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int numClasses) { print("more than two class problems"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("number"); accepts.add("class"); int numTrain = getNumInstances(), numTest = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme can handle class attributes as Nth attribute. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class attribute (0-based, -1 means last * attribute) * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable * @see TestInstances#CLASS_IS_LAST */ protected boolean[] canHandleClassAsNthAttribute(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex) { if (classIndex == TestInstances.CLASS_IS_LAST) { print("class attribute as last attribute"); } else { print("class attribute as " + (classIndex + 1) + ". attribute"); } printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, classIndex, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme can handle zero training instances. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canHandleZeroTraining(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("handle zero training instances"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("train"); accepts.add("value"); int numTrain = 0, numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme correctly initialises models when buildClassifier * is called. This test calls buildClassifier with one training dataset and * records performance on a test set. buildClassifier is then called on a * training set with different structure, and then again with the original * training set. The performance on the test set is compared with the original * results and any performance difference noted as incorrect build * initialisation. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if the * scheme performs worse than ZeroR, but without error (index 0 is * false) */ protected boolean[] correctBuildInitialisation(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { boolean[] result = new boolean[2]; print("correct initialisation during buildClassifier"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; Instances train1 = null; Instances test1 = null; Instances train2 = null; Instances test2 = null; Classifier classifier = null; Evaluation evaluation1A = null; Evaluation evaluation1B = null; Evaluation evaluation2 = null; boolean built = false; int stage = 0; try { // Make two sets of train/test splits with different // numbers of attributes train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); train2 = makeTestDataset(84, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test1 = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test2 = makeTestDataset(48, numTest, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train1, missingLevel, predictorMissing, classMissing); addMissing(test1, Math.min(missingLevel, 50), predictorMissing, classMissing); addMissing(train2, missingLevel, predictorMissing, classMissing); addMissing(test2, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifier = AbstractClassifier.makeCopies(getClassifier(), 1)[0]; evaluation1A = new Evaluation(train1); evaluation1B = new Evaluation(train1); evaluation2 = new Evaluation(train2); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { stage = 0; classifier.buildClassifier(train1); built = true; if (!testWRTZeroR(classifier, evaluation1A, train1, test1)[0]) { throw new Exception("Scheme performs worse than ZeroR"); } stage = 1; built = false; classifier.buildClassifier(train2); built = true; if (!testWRTZeroR(classifier, evaluation2, train2, test2)[0]) { throw new Exception("Scheme performs worse than ZeroR"); } stage = 2; built = false; classifier.buildClassifier(train1); built = true; if (!testWRTZeroR(classifier, evaluation1B, train1, test1)[0]) { throw new Exception("Scheme performs worse than ZeroR"); } stage = 3; if (!evaluation1A.equals(evaluation1B)) { if (m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildClassifier()", true) + "\n\n"); println(evaluation1B.toSummaryString("\nSecond buildClassifier()", true) + "\n\n"); } throw new Exception("Results differ between buildClassifier calls"); } println("yes"); result[0] = true; } catch (Exception ex) { String msg = ex.getMessage().toLowerCase(); if (msg.indexOf("worse than zeror") >= 0) { println("warning: performs worse than ZeroR"); // result[0] = (stage < 1); // result[1] = (stage < 1); result[0] = true; result[1] = true; } else { println("no"); result[0] = false; } if (m_Debug) { println("\n=== Full Report ==="); print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } switch (stage) { case 0: print(" of dataset 1"); break; case 1: print(" of dataset 2"); break; case 2: print(" of dataset 1 (2nd build)"); break; case 3: print(", comparing results from builds of dataset 1"); break; } println(": " + ex.getMessage() + "\n"); println("here are the datasets:\n"); println("=== Train1 Dataset ===\n" + train1.toString() + "\n"); println("=== Test1 Dataset ===\n" + test1.toString() + "\n\n"); println("=== Train2 Dataset ===\n" + train2.toString() + "\n"); println("=== Test2 Dataset ===\n" + test2.toString() + "\n\n"); } } return result; } /** * Checks basic missing value handling of the scheme. If the missing values * cause an exception to be thrown by the scheme, this will be recorded. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if the missing values may be in the predictors * @param classMissing true if the missing values may be in the class * @param missingLevel the percentage of missing values * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] canHandleMissing(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing, int missingLevel) { if (missingLevel == 100) { print("100% "); } print("missing"); if (predictorMissing) { print(" predictor"); if (classMissing) { print(" and"); } } if (classMissing) { print(" class"); } print(" values"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); ArrayList<String> accepts = new ArrayList<String>(); accepts.add("missing"); accepts.add("value"); accepts.add("train"); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether an updateable scheme produces the same model when trained * incrementally as when batch trained. The model itself cannot be compared, * so we compare the evaluation on test data for both models. It is possible * to get a false positive on this test (likelihood depends on the * classifier). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed */ protected boolean[] updatingEquality(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("incremental training produces the same results" + " as batch training"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier[] classifiers = null; Evaluation evaluationB = null; Evaluation evaluationI = null; boolean built = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifiers = AbstractClassifier.makeCopies(getClassifier(), 2); evaluationB = new Evaluation(train); evaluationI = new Evaluation(train); classifiers[0].buildClassifier(train); testWRTZeroR(classifiers[0], evaluationB, train, test); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { classifiers[1].buildClassifier(new Instances(train, 0)); for (int i = 0; i < train.numInstances(); i++) { ((UpdateableClassifier) classifiers[1]).updateClassifier(train .instance(i)); } built = true; testWRTZeroR(classifiers[1], evaluationI, train, test); if (!evaluationB.equals(evaluationI)) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); println("Results differ between batch and " + "incrementally built models.\n" + "Depending on the classifier, this may be OK"); println("Here are the results:\n"); println(evaluationB.toSummaryString("\nbatch built results\n", true)); println(evaluationI.toSummaryString( "\nincrementally built results\n", true)); println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Test Dataset ===\n" + test.toString() + "\n\n"); } } else { println("yes"); result[0] = true; } } catch (Exception ex) { result[0] = false; print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); } return result; } /** * Checks whether the classifier erroneously uses the class value of test * instances (if provided). Runs the classifier with test instance class * values set to missing and compares with results when test instance class * values are left intact. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed */ protected boolean[] doesntUseTestClassVal(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("classifier ignores test instance class vals"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2 * getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier[] classifiers = null; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifiers = AbstractClassifier.makeCopies(getClassifier(), 2); classifiers[0].buildClassifier(train); classifiers[1].buildClassifier(train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now set test values to missing when predicting for (int i = 0; i < test.numInstances(); i++) { Instance testInst = test.instance(i); Instance classMissingInst = (Instance) testInst.copy(); classMissingInst.setDataset(test); classMissingInst.setClassMissing(); double[] dist0 = classifiers[0].distributionForInstance(testInst); double[] dist1 = classifiers[1] .distributionForInstance(classMissingInst); for (int j = 0; j < dist0.length; j++) { // ignore, if both are NaNs if (Double.isNaN(dist0[j]) && Double.isNaN(dist1[j])) { if (getDebug()) { System.out.println("Both predictions are NaN!"); } continue; } // distribution different? if (dist0[j] != dist1[j]) { throw new Exception("Prediction different for instance " + (i + 1)); } } } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results differ between non-missing and " + "missing test class values."); } else { print("Problem during testing"); println(": " + ex.getMessage() + "\n"); } println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } println("=== Test Dataset ===\n" + test.toString() + "\n\n"); println("(test weights all 1.0\n"); } } return result; } /** * Checks whether the classifier can handle instance weights. This test * compares the classifier performance on two datasets that are identical * except for the training weights. If the results change, then the classifier * must be using the weights. It may be possible to get a false positive from * this test if the weight changes aren't significant enough to induce a * change in classifier performance (but the weights are chosen to minimize * the likelihood of this). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 true if the test was passed */ protected boolean[] instanceWeights(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("classifier uses instance weights"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2 * getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier[] classifiers = null; Evaluation evaluationB = null; Evaluation evaluationI = null; boolean built = false; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifiers = AbstractClassifier.makeCopies(getClassifier(), 2); evaluationB = new Evaluation(train); evaluationI = new Evaluation(train); classifiers[0].buildClassifier(train); testWRTZeroR(classifiers[0], evaluationB, train, test); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now modify instance weights and re-built/test for (int i = 0; i < train.numInstances(); i++) { train.instance(i).setWeight(0); } Random random = new Random(1); for (int i = 0; i < train.numInstances() / 2; i++) { int inst = random.nextInt(train.numInstances()); int weight = random.nextInt(10) + 1; train.instance(inst).setWeight(weight); } classifiers[1].buildClassifier(train); built = true; testWRTZeroR(classifiers[1], evaluationI, train, test); if (evaluationB.equals(evaluationI)) { // println("no"); evalFail = true; throw new Exception("evalFail"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results don't differ between non-weighted and " + "weighted instance models."); println("Here are the results:\n"); println(evaluationB.toSummaryString("\nboth methods\n", true)); } else { print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); } println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } println("=== Test Dataset ===\n" + test.toString() + "\n\n"); println("(test weights all 1.0\n"); } } return result; } /** * Checks whether the scheme alters the training dataset during training. If * the scheme needs to modify the training data it should take a copy of the * training data. Currently checks for changes to header structure, number of * instances, order of instances, instance weights. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if we know the classifier can handle (at * least) moderate missing predictor values * @param classMissing true if we know the classifier can handle (at least) * moderate missing class values * @return index 0 is true if the test was passed */ protected boolean[] datasetIntegrity(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing) { print("classifier doesn't alter original datasets"); printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 20; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier classifier = null; Evaluation evaluation = null; boolean built = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifier = AbstractClassifier.makeCopies(getClassifier(), 1)[0]; evaluation = new Evaluation(train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { Instances trainCopy = new Instances(train); Instances testCopy = new Instances(test); classifier.buildClassifier(trainCopy); compareDatasets(train, trainCopy); built = true; testWRTZeroR(classifier, evaluation, trainCopy, testCopy); compareDatasets(test, testCopy); println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Test Dataset ===\n" + test.toString() + "\n\n"); } } return result; } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numTest the number of instaces in the test set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numTest, int numClasses, ArrayList<String> accepts) { return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the attribute index of the class * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numTest the number of instaces in the test set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test was * acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numTest, int numClasses, ArrayList<String> accepts) { boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier classifier = null; Evaluation evaluation = null; boolean built = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifier = AbstractClassifier.makeCopies(getClassifier(), 1)[0]; evaluation = new Evaluation(train); } catch (Exception ex) { ex.printStackTrace(); throw new Error("Error setting up for tests: " + ex.getMessage()); } try { classifier.buildClassifier(train); built = true; if (!testWRTZeroR(classifier, evaluation, train, test)[0]) { result[0] = true; result[1] = true; throw new Exception("Scheme performs worse than ZeroR"); } println("yes"); result[0] = true; } catch (Exception ex) { boolean acceptable = false; String msg; if (ex.getMessage() == null) { msg = ""; } else { msg = ex.getMessage().toLowerCase(); } if (msg.indexOf("not in classpath") > -1) { m_ClasspathProblems = true; } if (msg.indexOf("worse than zeror") >= 0) { println("warning: performs worse than ZeroR"); result[0] = true; result[1] = true; } else { for (int i = 0; i < accepts.size(); i++) { if (msg.indexOf(accepts.get(i)) >= 0) { acceptable = true; } } println("no" + (acceptable ? " (OK error message)" : "")); result[1] = acceptable; } if (m_Debug) { println("\n=== Full Report ==="); print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); if (!acceptable) { if (accepts.size() > 0) { print("Error message doesn't mention "); for (int i = 0; i < accepts.size(); i++) { if (i != 0) { print(" or "); } print('"' + accepts.get(i) + '"'); } } println("here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Test Dataset ===\n" + test.toString() + "\n\n"); } } } return result; } /** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; } /** * Make a simple set of instances, which can later be modified for use in * specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, boolean multiInstance) throws Exception { return makeTestDataset(seed, numInstances, numNominal, numNumeric, numString, numDate, numRelational, numClasses, classType, TestInstances.CLASS_IS_LAST, multiInstance); } /** * Make a simple set of instances with variable position of the class * attribute, which can later be modified for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class (0-based, -1 as last) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see TestInstances#CLASS_IS_LAST * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, int classIndex, boolean multiInstance) throws Exception { TestInstances dataset = new TestInstances(); dataset.setSeed(seed); dataset.setNumInstances(numInstances); dataset.setNumNominal(numNominal); dataset.setNumNumeric(numNumeric); dataset.setNumString(numString); dataset.setNumDate(numDate); dataset.setNumRelational(numRelational); dataset.setNumClasses(numClasses); dataset.setClassType(classType); dataset.setClassIndex(classIndex); dataset.setNumClasses(numClasses); dataset.setMultiInstance(multiInstance); dataset.setWords(getWords()); dataset.setWordSeparators(getWordSeparators()); return process(dataset.generate()); } /** * Print out a short summary string for the dataset characteristics * * @param nominalPredictor true if nominal predictor attributes are present * @param numericPredictor true if numeric predictor attributes are present * @param stringPredictor true if string predictor attributes are present * @param datePredictor true if date predictor attributes are present * @param relationalPredictor true if relational predictor attributes are * present * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) */ protected void printAttributeSummary(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { String str = ""; if (numericPredictor) { str += " numeric"; } if (nominalPredictor) { if (str.length() > 0) { str += " &"; } str += " nominal"; } if (stringPredictor) { if (str.length() > 0) { str += " &"; } str += " string"; } if (datePredictor) { if (str.length() > 0) { str += " &"; } str += " date"; } if (relationalPredictor) { if (str.length() > 0) { str += " &"; } str += " relational"; } str += " predictors)"; switch (classType) { case Attribute.NUMERIC: str = " (numeric class," + str; break; case Attribute.NOMINAL: str = " (nominal class," + str; break; case Attribute.STRING: str = " (string class," + str; break; case Attribute.DATE: str = " (date class," + str; break; case Attribute.RELATIONAL: str = " (relational class," + str; break; } print(str); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Test method for this class * * @param args the commandline parameters */ public static void main(String[] args) { runCheck(new CheckClassifier(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/CheckSource.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckSource.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers; import java.io.File; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.converters.ConverterUtils.DataSource; /** * A simple class for checking the source generated from Classifiers * implementing the <code>weka.classifiers.Sourcable</code> interface. * It takes a classifier, the classname of the generated source * and the dataset the source was generated with as parameters and tests * the output of the built classifier against the output of the generated * source. Use option '-h' to display all available commandline options. * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;classname and options&gt; * The classifier (incl. options) that was used to generate * the source code.</pre> * * <pre> -S &lt;classname&gt; * The classname of the generated source code.</pre> * * <pre> -t &lt;file&gt; * The training set with which the source code was generated.</pre> * * <pre> -c &lt;index&gt; * The class index of the training set. 'first' and 'last' are * valid indices. * (default: last)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier (specified with -W). * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ * @see weka.classifiers.Sourcable */ public class CheckSource implements OptionHandler, RevisionHandler { /** the classifier used for generating the source code */ protected Classifier m_Classifier = null; /** the generated source code */ protected Classifier m_SourceCode = null; /** the dataset to use for testing */ protected File m_Dataset = null; /** the class index */ protected int m_ClassIndex = -1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option( "\tThe classifier (incl. options) that was used to generate\n" + "\tthe source code.", "W", 1, "-W <classname and options>")); result.addElement(new Option( "\tThe classname of the generated source code.", "S", 1, "-S <classname>")); result.addElement(new Option( "\tThe training set with which the source code was generated.", "t", 1, "-t <file>")); result.addElement(new Option( "\tThe class index of the training set. 'first' and 'last' are\n" + "\tvalid indices.\n" + "\t(default: last)", "c", 1, "-c <index>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;classname and options&gt; * The classifier (incl. options) that was used to generate * the source code.</pre> * * <pre> -S &lt;classname&gt; * The classname of the generated source code.</pre> * * <pre> -t &lt;file&gt; * The training set with which the source code was generated.</pre> * * <pre> -c &lt;index&gt; * The class index of the training set. 'first' and 'last' are * valid indices. * (default: last)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier (specified with * -W). * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] spec; String classname; tmpStr = Utils.getOption('W', options); if (tmpStr.length() > 0) { spec = Utils.splitOptions(tmpStr); if (spec.length == 0) throw new IllegalArgumentException("Invalid classifier specification string"); classname = spec[0]; spec[0] = ""; setClassifier((Classifier) Utils.forName(Classifier.class, classname, spec)); } else { throw new Exception("No classifier (classname + options) provided!"); } tmpStr = Utils.getOption('S', options); if (tmpStr.length() > 0) { spec = Utils.splitOptions(tmpStr); if (spec.length != 1) throw new IllegalArgumentException("Invalid source code specification string"); classname = spec[0]; spec[0] = ""; setSourceCode((Classifier) Utils.forName(Classifier.class, classname, spec)); } else { throw new Exception("No source code (classname) provided!"); } tmpStr = Utils.getOption('t', options); if (tmpStr.length() != 0) setDataset(new File(tmpStr)); else throw new Exception("No dataset provided!"); tmpStr = Utils.getOption('c', options); if (tmpStr.length() != 0) { if (tmpStr.equals("first")) setClassIndex(0); else if (tmpStr.equals("last")) setClassIndex(-1); else setClassIndex(Integer.parseInt(tmpStr) - 1); } else { setClassIndex(-1); } } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; result = new Vector<String>(); if (getClassifier() != null) { result.add("-W"); result.add(getClassifier().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) getClassifier()).getOptions())); } if (getSourceCode() != null) { result.add("-S"); result.add(getSourceCode().getClass().getName()); } if (getDataset() != null) { result.add("-t"); result.add(m_Dataset.getAbsolutePath()); } result.add("-c"); if (getClassIndex() == -1) result.add("last"); else if (getClassIndex() == 0) result.add("first"); else result.add("" + (getClassIndex() + 1)); return result.toArray(new String[result.size()]); } /** * Sets the classifier to use for the comparison. * * @param value the classifier to use */ public void setClassifier(Classifier value) { m_Classifier = value; } /** * Gets the classifier being used for the tests, can be null. * * @return the currently set classifier */ public Classifier getClassifier() { return m_Classifier; } /** * Sets the class to test. * * @param value the class to test */ public void setSourceCode(Classifier value) { m_SourceCode = value; } /** * Gets the class to test. * * @return the currently set class, can be null. */ public Classifier getSourceCode() { return m_SourceCode; } /** * Sets the dataset to use for testing. * * @param value the dataset to use. */ public void setDataset(File value) { if (!value.exists()) throw new IllegalArgumentException( "Dataset '" + value.getAbsolutePath() + "' does not exist!"); else m_Dataset = value; } /** * Gets the dataset to use for testing, can be null. * * @return the dataset to use. */ public File getDataset() { return m_Dataset; } /** * Sets the class index of the dataset. * * @param value the class index of the dataset. */ public void setClassIndex(int value) { m_ClassIndex = value; } /** * Gets the class index of the dataset. * * @return the current class index. */ public int getClassIndex() { return m_ClassIndex; } /** * performs the comparison test * * @return true if tests were successful * @throws Exception if tests fail */ public boolean execute() throws Exception { boolean result; Classifier cls; Classifier code; int i; Instances data; DataSource source; boolean numeric; boolean different; double predClassifier; double predSource; result = true; // a few checks if (getClassifier() == null) throw new Exception("No classifier set!"); if (getSourceCode() == null) throw new Exception("No source code set!"); if (getDataset() == null) throw new Exception("No dataset set!"); if (!getDataset().exists()) throw new Exception( "Dataset '" + getDataset().getAbsolutePath() + "' does not exist!"); // load data source = new DataSource(getDataset().getAbsolutePath()); data = source.getDataSet(); if (getClassIndex() == -1) data.setClassIndex(data.numAttributes() - 1); else data.setClassIndex(getClassIndex()); numeric = data.classAttribute().isNumeric(); // build classifier cls = AbstractClassifier.makeCopy(getClassifier()); cls.buildClassifier(data); code = getSourceCode(); // compare predictions for (i = 0; i < data.numInstances(); i++) { // perform predictions predClassifier = cls.classifyInstance(data.instance(i)); predSource = code.classifyInstance(data.instance(i)); // compare both results if (Double.isNaN(predClassifier) && Double.isNaN(predSource)) { different = false; } else { if (numeric) different = !Utils.eq(predClassifier, predSource); else different = ((int) predClassifier != (int) predSource); } if (different) { result = false; if (numeric) System.out.println( (i+1) + ". instance (Classifier/Source code): " + predClassifier + " != " + predSource); else System.out.println( (i+1) + ". instance (Classifier/Source code): " + data.classAttribute().value((int) predClassifier) + " != " + data.classAttribute().value((int) predSource)); } } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Executes the tests, use "-h" to list the commandline options. * * @param args the commandline parameters * @throws Exception if something goes wrong */ public static void main(String[] args) throws Exception{ CheckSource check; StringBuffer text; Enumeration<Option> enm; check = new CheckSource(); if (Utils.getFlag('h', args)) { text = new StringBuffer(); text.append("\nHelp requested:\n\n"); enm = check.listOptions(); while (enm.hasMoreElements()) { Option option = (Option) enm.nextElement(); text.append(option.synopsis() + "\n"); text.append(option.description() + "\n"); } System.out.println("\n" + text + "\n"); } else { check.setOptions(args); if (check.execute()) System.out.println("Tests OK!"); else System.out.println("Tests failed!"); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/Classifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Classifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.Serializable; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; /** * Classifier interface. All schemes for numeric or nominal prediction in * Weka implement this interface. Note that a classifier MUST either implement * distributionForInstance() or classifyInstance(). * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public interface Classifier extends Serializable { /** * Generates a classifier. Must initialize all fields of the classifier * that are not being set via options (ie. multiple calls of buildClassifier * must always lead to the same result). Must not change the dataset * in any way. * * @param data set of instances serving as training data * @exception Exception if the classifier has not been * generated successfully */ public abstract void buildClassifier(Instances data) throws Exception; /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. Note that a classifier MUST * implement either this or distributionForInstance(). * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double classifyInstance(Instance instance) throws Exception; /** * Predicts the class memberships for a given instance. If * an instance is unclassified, the returned array elements * must be all zero. If the class is numeric, the array * must consist of only one element, which contains the * predicted value. Note that a classifier MUST implement * either this or classifyInstance(). * * @param instance the instance to be classified * @return an array containing the estimated membership * probabilities of the test instance in each class * or the numeric prediction * @exception Exception if distribution could not be * computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception; /** * Returns the Capabilities of this classifier. Maximally permissive * capabilities are allowed by default. Derived classifiers should * override this method and first disable all capabilities and then * enable just those capabilities that make sense for the scheme. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities(); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/ConditionalDensityEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ConditionalDensityEstimator.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instance; /** * Interface for numeric prediction schemes that can output conditional * density estimates. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public interface ConditionalDensityEstimator { /** * Returns natural logarithm of density estimate for given value based on given instance. * * @param instance the instance to make the prediction for. * @param value the value to make the prediction for. * @return the natural logarithm of the density estimate * @exception Exception if the density cannot be computed */ public double logDensity(Instance instance, double value) throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/CostMatrix.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CostMatrix.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.LineNumberReader; import java.io.Reader; import java.io.Serializable; import java.io.StreamTokenizer; import java.io.Writer; import java.util.Random; import java.util.StringTokenizer; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.expressionlanguage.common.IfElseMacro; import weka.core.expressionlanguage.common.JavaMacro; import weka.core.expressionlanguage.common.MacroDeclarationsCompositor; import weka.core.expressionlanguage.common.MathFunctions; import weka.core.expressionlanguage.common.Primitives.DoubleExpression; import weka.core.expressionlanguage.core.Node; import weka.core.expressionlanguage.parser.Parser; import weka.core.expressionlanguage.weka.InstancesHelper; /** * Class for storing and manipulating a misclassification cost matrix. The * element at position i,j in the matrix is the penalty for classifying an * instance of class j as class i. Cost values can be fixed or computed on a * per-instance basis (cost sensitive evaluation only) from the value of an * attribute or a mathematical expression involving attribute(s).<br> * <br> * * Values in an instance are accessed in an expression by prefixing their index * (starting at 1) with the character 'a'. E.g.<br> * <br> * * a1 &circ; 2 * a5 / log(a7 * 4.0) <br> * * Supported opperators: +, -, *, /, ^, log, abs, cos, exp, sqrt, floor, ceil, * rint, tan, sin, (, ). * * * * * @author Mark Hall * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision$ * @see weka.core.UnsupportedAttributeTypeException */ public class CostMatrix implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -1973792250544554965L; private int m_size; /** [rows][columns] */ protected Object[][] m_matrix; /** The deafult file extension for cost matrix files */ public static String FILE_EXTENSION = ".cost"; /** * Creates a default cost matrix of a particular size. All diagonal values * will be 0 and all non-diagonal values 1. * * @param numOfClasses the number of classes that the cost matrix holds. */ public CostMatrix(int numOfClasses) { m_size = numOfClasses; initialize(); } /** * Creates a cost matrix that is a copy of another. * * @param toCopy the matrix to copy. */ public CostMatrix(CostMatrix toCopy) { this(toCopy.size()); for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { setCell(i, j, toCopy.getCell(i, j)); } } } /** * Initializes the matrix */ public void initialize() { m_matrix = new Object[m_size][m_size]; for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { setCell(i, j, i == j ? new Double(0.0) : new Double(1.0)); } } } /** * The number of rows (and columns) * * @return the size of the matrix */ public int size() { return m_size; } /** * Same as size * * @return the number of columns */ public int numColumns() { return size(); } /** * Same as size * * @return the number of rows */ public int numRows() { return size(); } private boolean replaceStrings(Instances dataset) throws Exception { boolean nonDouble = false; for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { if (getCell(i, j) instanceof String) { setCell(i, j, new InstanceExpression((String) getCell(i, j), dataset)); nonDouble = true; } else if (getCell(i, j) instanceof InstanceExpression) { nonDouble = true; } } } return nonDouble; } /** * Applies the cost matrix to a set of instances. If a random number generator * is supplied the instances will be resampled, otherwise they will be * rewighted. Adapted from code once sitting in Instances.java * * @param data the instances to reweight. * @param random a random number generator for resampling, if null then * instances are rewighted. * @return a new dataset reflecting the cost of misclassification. * @exception Exception if the data has no class or the matrix in * inappropriate. */ public Instances applyCostMatrix(Instances data, Random random) throws Exception { double sumOfWeightFactors = 0, sumOfMissClassWeights, sumOfWeights; double[] weightOfInstancesInClass, weightFactor, weightOfInstances; if (data.classIndex() < 0) { throw new Exception("Class index is not set!"); } if (size() != data.numClasses()) { throw new Exception("Misclassification cost matrix has wrong format!"); } // are there any non-fixed, per-instance costs defined in the matrix? if (replaceStrings(data)) { // could reweight in the two class case if (data.classAttribute().numValues() > 2) { throw new Exception("Can't resample/reweight instances using " + "non-fixed cost values when there are more " + "than two classes!"); } else { // Store new weights weightOfInstances = new double[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); int classValIndex = (int) inst.classValue(); double factor = 1.0; Object element = (classValIndex == 0) ? getCell(classValIndex, 1) : getCell( classValIndex, 0); if (element instanceof Double) { factor = ((Double) element).doubleValue(); } else { factor = ((InstanceExpression) element).evaluate(inst); } weightOfInstances[i] = inst.weight() * factor; /* * System.err.println("Multiplying " + * inst.classAttribute().value((int)inst.classValue()) +" by factor " * + factor); */ } // Change instances weight or do resampling if (random != null) { return data.resampleWithWeights(random, weightOfInstances); } else { Instances instances = new Instances(data); for (int i = 0; i < data.numInstances(); i++) { instances.instance(i).setWeight(weightOfInstances[i]); } return instances; } } } weightFactor = new double[data.numClasses()]; weightOfInstancesInClass = new double[data.numClasses()]; for (int j = 0; j < data.numInstances(); j++) { weightOfInstancesInClass[(int) data.instance(j).classValue()] += data.instance(j).weight(); } sumOfWeights = Utils.sum(weightOfInstancesInClass); // normalize the matrix if not already for (int i = 0; i < m_size; i++) { if (!Utils.eq(((Double) getCell(i, i)).doubleValue(), 0)) { CostMatrix normMatrix = new CostMatrix(this); normMatrix.normalize(); return normMatrix.applyCostMatrix(data, random); } } for (int i = 0; i < data.numClasses(); i++) { // Using Kai Ming Ting's formula for deriving weights for // the classes and Breiman's heuristic for multiclass // problems. sumOfMissClassWeights = 0; for (int j = 0; j < data.numClasses(); j++) { if (Utils.sm(((Double) getCell(i, j)).doubleValue(), 0)) { throw new Exception("Neg. weights in misclassification " + "cost matrix!"); } sumOfMissClassWeights += ((Double) getCell(i, j)).doubleValue(); } weightFactor[i] = sumOfMissClassWeights * sumOfWeights; sumOfWeightFactors += sumOfMissClassWeights * weightOfInstancesInClass[i]; } for (int i = 0; i < data.numClasses(); i++) { weightFactor[i] /= sumOfWeightFactors; } // Store new weights weightOfInstances = new double[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { weightOfInstances[i] = data.instance(i).weight() * weightFactor[(int) data.instance(i).classValue()]; } // Change instances weight or do resampling if (random != null) { return data.resampleWithWeights(random, weightOfInstances); } else { Instances instances = new Instances(data); for (int i = 0; i < data.numInstances(); i++) { instances.instance(i).setWeight(weightOfInstances[i]); } return instances; } } /** * Calculates the expected misclassification cost for each possible class * value, given class probability estimates. * * @param classProbs the class probability estimates. * @return the expected costs. * @exception Exception if the wrong number of class probabilities is * supplied. */ public double[] expectedCosts(double[] classProbs) throws Exception { if (classProbs.length != m_size) { throw new Exception("Length of probability estimates don't " + "match cost matrix"); } double[] costs = new double[m_size]; for (int x = 0; x < m_size; x++) { for (int y = 0; y < m_size; y++) { Object element = getCell(y, x); if (!(element instanceof Double)) { throw new Exception("Can't use non-fixed costs in " + "computing expected costs."); } costs[x] += classProbs[y] * ((Double) element).doubleValue(); } } return costs; } /** * Calculates the expected misclassification cost for each possible class * value, given class probability estimates. * * @param classProbs the class probability estimates. * @param inst the current instance for which the class probabilites apply. Is * used for computing any non-fixed cost values. * @return the expected costs. * @exception Exception if something goes wrong */ public double[] expectedCosts(double[] classProbs, Instance inst) throws Exception { if (classProbs.length != m_size) { throw new Exception("Length of probability estimates don't " + "match cost matrix"); } if (!replaceStrings(inst.dataset())) { return expectedCosts(classProbs); } double[] costs = new double[m_size]; for (int x = 0; x < m_size; x++) { for (int y = 0; y < m_size; y++) { Object element = getCell(y, x); double costVal; if (!(element instanceof Double)) { costVal = ((InstanceExpression) element).evaluate(inst); } else { costVal = ((Double) element).doubleValue(); } costs[x] += classProbs[y] * costVal; } } return costs; } /** * Gets the maximum cost for a particular class value. * * @param classVal the class value. * @return the maximum cost. * @exception Exception if cost matrix contains non-fixed costs */ public double getMaxCost(int classVal) throws Exception { double maxCost = Double.NEGATIVE_INFINITY; for (int i = 0; i < m_size; i++) { Object element = getCell(classVal, i); if (!(element instanceof Double)) { throw new Exception("Can't use non-fixed costs when " + "getting max cost."); } double cost = ((Double) element).doubleValue(); if (cost > maxCost) maxCost = cost; } return maxCost; } /** * Gets the maximum cost for a particular class value. * * @param classVal the class value. * @return the maximum cost. * @exception Exception if cost matrix contains non-fixed costs */ public double getMaxCost(int classVal, Instance inst) throws Exception { if (!replaceStrings(inst.dataset())) { return getMaxCost(classVal); } double maxCost = Double.NEGATIVE_INFINITY; double cost; for (int i = 0; i < m_size; i++) { Object element = getCell(classVal, i); if (!(element instanceof Double)) { cost = ((InstanceExpression) element).evaluate(inst); } else { cost = ((Double) element).doubleValue(); } if (cost > maxCost) maxCost = cost; } return maxCost; } /** * Normalizes the matrix so that the diagonal contains zeros. * */ public void normalize() { for (int y = 0; y < m_size; y++) { double diag = ((Double) getCell(y, y)).doubleValue(); for (int x = 0; x < m_size; x++) { setCell(x, y, new Double(((Double) getCell(x, y)).doubleValue() - diag)); } } } /** * Loads a cost matrix in the old format from a reader. Adapted from code once * sitting in Instances.java * * @param reader the reader to get the values from. * @exception Exception if the matrix cannot be read correctly. */ public void readOldFormat(Reader reader) throws Exception { StreamTokenizer tokenizer; int currentToken; double firstIndex, secondIndex, weight; tokenizer = new StreamTokenizer(reader); initialize(); tokenizer.commentChar('%'); tokenizer.eolIsSignificant(true); while (StreamTokenizer.TT_EOF != (currentToken = tokenizer.nextToken())) { // Skip empty lines if (currentToken == StreamTokenizer.TT_EOL) { continue; } // Get index of first class. if (currentToken != StreamTokenizer.TT_NUMBER) { throw new Exception("Only numbers and comments allowed " + "in cost file!"); } firstIndex = tokenizer.nval; if (!Utils.eq((int) firstIndex, firstIndex)) { throw new Exception("First number in line has to be " + "index of a class!"); } if ((int) firstIndex >= size()) { throw new Exception("Class index out of range!"); } // Get index of second class. if (StreamTokenizer.TT_EOF == (currentToken = tokenizer.nextToken())) { throw new Exception("Premature end of file!"); } if (currentToken == StreamTokenizer.TT_EOL) { throw new Exception("Premature end of line!"); } if (currentToken != StreamTokenizer.TT_NUMBER) { throw new Exception("Only numbers and comments allowed " + "in cost file!"); } secondIndex = tokenizer.nval; if (!Utils.eq((int) secondIndex, secondIndex)) { throw new Exception("Second number in line has to be " + "index of a class!"); } if ((int) secondIndex >= size()) { throw new Exception("Class index out of range!"); } if ((int) secondIndex == (int) firstIndex) { throw new Exception("Diagonal of cost matrix non-zero!"); } // Get cost factor. if (StreamTokenizer.TT_EOF == (currentToken = tokenizer.nextToken())) { throw new Exception("Premature end of file!"); } if (currentToken == StreamTokenizer.TT_EOL) { throw new Exception("Premature end of line!"); } if (currentToken != StreamTokenizer.TT_NUMBER) { throw new Exception("Only numbers and comments allowed " + "in cost file!"); } weight = tokenizer.nval; if (!Utils.gr(weight, 0)) { throw new Exception("Only positive weights allowed!"); } setCell((int) firstIndex, (int) secondIndex, new Double(weight)); } } /** * Reads a matrix from a reader. The first line in the file should contain the * number of rows and columns. Subsequent lines contain elements of the * matrix. (FracPete: taken from old weka.core.Matrix class) * * @param reader the reader containing the matrix * @throws Exception if an error occurs * @see #write(Writer) */ public CostMatrix(Reader reader) throws Exception { LineNumberReader lnr = new LineNumberReader(reader); String line; int currentRow = -1; while ((line = lnr.readLine()) != null) { // Comments if (line.startsWith("%")) { continue; } StringTokenizer st = new StringTokenizer(line); // Ignore blank lines if (!st.hasMoreTokens()) { continue; } if (currentRow < 0) { int rows = Integer.parseInt(st.nextToken()); if (!st.hasMoreTokens()) { throw new Exception("Line " + lnr.getLineNumber() + ": expected number of columns"); } int cols = Integer.parseInt(st.nextToken()); if (rows != cols) { throw new Exception("Trying to create a non-square cost " + "matrix"); } // m_matrix = new Object[rows][cols]; m_size = rows; initialize(); currentRow++; continue; } else { if (currentRow == m_size) { throw new Exception("Line " + lnr.getLineNumber() + ": too many rows provided"); } for (int i = 0; i < m_size; i++) { if (!st.hasMoreTokens()) { throw new Exception("Line " + lnr.getLineNumber() + ": too few matrix elements provided"); } String nextTok = st.nextToken(); // try to parse as a double first Double val = null; try { val = new Double(nextTok); } catch (Exception ex) { val = null; } if (val == null) { setCell(currentRow, i, nextTok); } else { setCell(currentRow, i, val); } } currentRow++; } } if (currentRow == -1) { throw new Exception("Line " + lnr.getLineNumber() + ": expected number of rows"); } else if (currentRow != m_size) { throw new Exception("Line " + lnr.getLineNumber() + ": too few rows provided"); } } /** * Writes out a matrix. The format can be read via the CostMatrix(Reader) * constructor. (FracPete: taken from old weka.core.Matrix class) * * @param w the output Writer * @throws Exception if an error occurs */ public void write(Writer w) throws Exception { w.write("% Rows\tColumns\n"); w.write("" + m_size + "\t" + m_size + "\n"); w.write("% Matrix elements\n"); for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { w.write("" + getCell(i, j) + "\t"); } w.write("\n"); } w.flush(); } /** * converts the Matrix into a single line Matlab string: matrix is enclosed by * parentheses, rows are separated by semicolon and single cells by blanks, * e.g., [1 2; 3 4]. * * @return the matrix in Matlab single line format */ public String toMatlab() { StringBuffer result; int i; int n; result = new StringBuffer(); result.append("["); for (i = 0; i < m_size; i++) { if (i > 0) { result.append("; "); } for (n = 0; n < m_size; n++) { if (n > 0) { result.append(" "); } result.append(getCell(i, n)); } } result.append("]"); return result.toString(); } /** * creates a matrix from the given Matlab string. * * @param matlab the matrix in matlab format * @return the matrix represented by the given string * @see #toMatlab() */ public static CostMatrix parseMatlab(String matlab) throws Exception { StringTokenizer tokRow; StringTokenizer tokCol; int rows; int cols; CostMatrix result; String cells; // get content cells = matlab.substring(matlab.indexOf("[") + 1, matlab.indexOf("]")).trim(); // determine dimenions tokRow = new StringTokenizer(cells, ";"); rows = tokRow.countTokens(); tokCol = new StringTokenizer(tokRow.nextToken(), " "); cols = tokCol.countTokens(); // fill matrix result = new CostMatrix(rows); tokRow = new StringTokenizer(cells, ";"); rows = 0; while (tokRow.hasMoreTokens()) { tokCol = new StringTokenizer(tokRow.nextToken(), " "); cols = 0; while (tokCol.hasMoreTokens()) { // is it a number String current = tokCol.nextToken(); try { double val = Double.parseDouble(current); result.setCell(rows, cols, new Double(val)); } catch (NumberFormatException e) { // must be an expression result.setCell(rows, cols, current); } cols++; } rows++; } return result; } /** * Set the value of a particular cell in the matrix * * @param rowIndex the row * @param columnIndex the column * @param value the value to set */ public final void setCell(int rowIndex, int columnIndex, Object value) { m_matrix[rowIndex][columnIndex] = value; } /** * Return the contents of a particular cell. Note: this method returns the * Object stored at a particular cell. * * @param rowIndex the row * @param columnIndex the column * @return the value at the cell */ public final Object getCell(int rowIndex, int columnIndex) { return m_matrix[rowIndex][columnIndex]; } /** * Return the value of a cell as a double (for legacy code) * * @param rowIndex the row * @param columnIndex the column * @return the value at a particular cell as a double * @exception Exception if the value is not a double */ public final double getElement(int rowIndex, int columnIndex) throws Exception { if (!(m_matrix[rowIndex][columnIndex] instanceof Double)) { throw new Exception("Cost matrix contains non-fixed costs!"); } return ((Double) m_matrix[rowIndex][columnIndex]).doubleValue(); } /** * Return the value of a cell as a double. Computes the value for non-fixed * costs using the supplied Instance * * @param rowIndex the row * @param columnIndex the column * @return the value from a particular cell * @exception Exception if something goes wrong */ public final double getElement(int rowIndex, int columnIndex, Instance inst) throws Exception { if (m_matrix[rowIndex][columnIndex] instanceof Double) { return ((Double) m_matrix[rowIndex][columnIndex]).doubleValue(); } else if (m_matrix[rowIndex][columnIndex] instanceof String) { replaceStrings(inst.dataset()); } return ((InstanceExpression) m_matrix[rowIndex][columnIndex]) .evaluate(inst); } /** * Set the value of a cell as a double * * @param rowIndex the row * @param columnIndex the column * @param value the value (double) to set */ public final void setElement(int rowIndex, int columnIndex, double value) { m_matrix[rowIndex][columnIndex] = new Double(value); } /** * Converts a matrix to a string. (FracPete: taken from old weka.core.Matrix * class) * * @return the converted string */ @Override public String toString() { // Determine the width required for the maximum element, // and check for fractional display requirement. double maxval = 0; boolean fractional = false; Object element = null; int widthNumber = 0; int widthExpression = 0; for (int i = 0; i < size(); i++) { for (int j = 0; j < size(); j++) { element = getCell(i, j); if (element instanceof Double) { double current = ((Double) element).doubleValue(); if (current < 0) current *= -11; if (current > maxval) maxval = current; double fract = Math.abs(current - Math.rint(current)); if (!fractional && ((Math.log(fract) / Math.log(10)) >= -2)) { fractional = true; } } else { if (element.toString().length() > widthExpression) { widthExpression = element.toString().length(); } } } } if (maxval > 0) { widthNumber = (int) (Math.log(maxval) / Math.log(10) + (fractional ? 4 : 1)); } int width = (widthNumber > widthExpression) ? widthNumber : widthExpression; StringBuffer text = new StringBuffer(); for (int i = 0; i < size(); i++) { for (int j = 0; j < size(); j++) { element = getCell(i, j); if (element instanceof Double) { text.append(" ").append( Utils.doubleToString(((Double) element).doubleValue(), width, (fractional ? 2 : 0))); } else { int diff = width - element.toString().length(); if (diff > 0) { int left = diff % 2; left += diff / 2; String temp = Utils.padLeft(element.toString(), element.toString().length() + left); temp = Utils.padRight(temp, width); text.append(" ").append(temp); } else { text.append(" ").append(element.toString()); } } } text.append("\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } private static class InstanceExpression { private final DoubleExpression m_compiledExpression; private final String m_expression; private final InstancesHelper m_instancesHelper; public InstanceExpression(String expression, Instances dataset) throws Exception { this.m_expression = expression; m_instancesHelper = new InstancesHelper(dataset); Node node = Parser.parse( // expression expression, // variables m_instancesHelper, // marcos new MacroDeclarationsCompositor(m_instancesHelper, new MathFunctions(), new IfElseMacro(), new JavaMacro())); if (!(node instanceof DoubleExpression)) throw new Exception("Expression must be of double type!"); m_compiledExpression = (DoubleExpression) node; } public double evaluate(Instance inst) { m_instancesHelper.setInstance(inst); return m_compiledExpression.evaluate(); } @Override public String toString() { return m_expression; } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/Evaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Evaluation.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.Random; import weka.classifiers.evaluation.AbstractEvaluationMetric; import weka.classifiers.evaluation.Prediction; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.Summarizable; /** * Class for evaluating machine learning models. Delegates to the actual * implementation in weka.classifiers.evaluation.Evaluation. * * <p/> * * ------------------------------------------------------------------- * <p/> * * General options when evaluating a learning scheme from the command-line: * <p/> * * -t filename <br/> * Name of the file with the training data. (required) * <p/> * * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with ".xml", * a PMML file is loaded or, if that fails, options are loaded from XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the * extensions, e.g., '.arff' for ARFF format or '.csv' for CSV. * <p/> * * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first * label) * <p/> * * ------------------------------------------------------------------- * <p/> * * Example usage as the main of a classifier (called FunkyClassifier): * <code> <pre> * public static void main(String [] args) { * runClassifier(new FunkyClassifier(), args); * } * </pre> </code> * <p/> * * ------------------------------------------------------------------ * <p/> * * Example usage from within an application: <code> <pre> * Instances trainInstances = ... instances got from somewhere * Instances testInstances = ... instances got from somewhere * Classifier scheme = ... scheme got from somewhere * * Evaluation evaluation = new Evaluation(trainInstances); * evaluation.evaluateModel(scheme, testInstances); * System.out.println(evaluation.toSummaryString()); * </pre> </code> * * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class Evaluation implements Serializable, Summarizable, RevisionHandler { /** For serialization */ private static final long serialVersionUID = -170766452472965668L; public static final String[] BUILT_IN_EVAL_METRICS = weka.classifiers.evaluation.Evaluation.BUILT_IN_EVAL_METRICS; /** The actual evaluation object that we delegate to */ protected weka.classifiers.evaluation.Evaluation m_delegate; /** * Utility method to get a list of the names of all built-in and plugin * evaluation metrics * * @return the complete list of available evaluation metrics */ public static List<String> getAllEvaluationMetricNames() { return weka.classifiers.evaluation.Evaluation.getAllEvaluationMetricNames(); } public Evaluation(Instances data) throws Exception { m_delegate = new weka.classifiers.evaluation.Evaluation(data); } public Evaluation(Instances data, CostMatrix costMatrix) throws Exception { m_delegate = new weka.classifiers.evaluation.Evaluation(data, costMatrix); } /** * Returns the header of the underlying dataset. * * @return the header information */ public Instances getHeader() { return m_delegate.getHeader(); } /** * Returns the list of plugin metrics in use (or null if there are none) * * @return the list of plugin metrics */ public List<AbstractEvaluationMetric> getPluginMetrics() { return m_delegate.getPluginMetrics(); } /** * Get the named plugin evaluation metric * * @param name the name of the metric (as returned by * AbstractEvaluationMetric.getName()) or the fully qualified class * name of the metric to find * * @return the metric or null if the metric is not in the list of plugin * metrics */ public AbstractEvaluationMetric getPluginMetric(String name) { return m_delegate.getPluginMetric(name); } /** * Set a list of the names of metrics to have appear in the output. The * default is to display all built in metrics and plugin metrics that haven't * been globally disabled. * * @param display a list of metric names to have appear in the output */ public void setMetricsToDisplay(List<String> display) { m_delegate.setMetricsToDisplay(display); } /** * Get a list of the names of metrics to have appear in the output The default * is to display all built in metrics and plugin metrics that haven't been * globally disabled. * * @return a list of metric names to have appear in the output */ public List<String> getMetricsToDisplay() { return m_delegate.getMetricsToDisplay(); } /** * Toggle the output of the metrics specified in the supplied list. * * @param metricsToToggle a list of metrics to toggle */ public void toggleEvalMetrics(List<String> metricsToToggle) { m_delegate.toggleEvalMetrics(metricsToToggle); } /** * Sets whether to discard predictions, ie, not storing them for future * reference via predictions() method in order to conserve memory. * * @param value true if to discard the predictions * @see #predictions() */ public void setDiscardPredictions(boolean value) { m_delegate.setDiscardPredictions(value); } /** * Returns whether predictions are not recorded at all, in order to conserve * memory. * * @return true if predictions are not recorded * @see #predictions() */ public boolean getDiscardPredictions() { return m_delegate.getDiscardPredictions(); } /** * Returns the area under ROC for those predictions that have been collected * in the evaluateClassifier(Classifier, Instances) method. Returns * Utils.missingValue() if the area is not available. * * @param classIndex the index of the class to consider as "positive" * @return the area under the ROC curve or not a number */ public double areaUnderROC(int classIndex) { return m_delegate.areaUnderROC(classIndex); } /** * Calculates the weighted (by class size) AUC. * * @return the weighted AUC. */ public double weightedAreaUnderROC() { return m_delegate.weightedAreaUnderROC(); } /** * Returns the area under precision-recall curve (AUPRC) for those predictions * that have been collected in the evaluateClassifier(Classifier, Instances) * method. Returns Utils.missingValue() if the area is not available. * * @param classIndex the index of the class to consider as "positive" * @return the area under the precision-recall curve or not a number */ public double areaUnderPRC(int classIndex) { return m_delegate.areaUnderPRC(classIndex); } /** * Calculates the weighted (by class size) AUPRC. * * @return the weighted AUPRC. */ public double weightedAreaUnderPRC() { return m_delegate.weightedAreaUnderPRC(); } /** * Returns a copy of the confusion matrix. * * @return a copy of the confusion matrix as a two-dimensional array */ public double[][] confusionMatrix() { return m_delegate.confusionMatrix(); } /** * Performs a (stratified if class is nominal) cross-validation for a * classifier on a set of instances. Now performs a deep copy of the * classifier before each call to buildClassifier() (just in case the * classifier is not initialized properly). * * @param classifier the classifier with any options set. * @param data the data on which the cross-validation is to be performed * @param numFolds the number of folds for the cross-validation * @param random random number generator for randomization * @throws Exception if a classifier could not be generated successfully or * the class is not defined */ public void crossValidateModel(Classifier classifier, Instances data, int numFolds, Random random) throws Exception { m_delegate.crossValidateModel(classifier, data, numFolds, random); } /** * Performs a (stratified if class is nominal) cross-validation for a * classifier on a set of instances. Now performs a deep copy of the * classifier before each call to buildClassifier() (just in case the * classifier is not initialized properly). * * @param classifier the classifier with any options set. * @param data the data on which the cross-validation is to be performed * @param numFolds the number of folds for the cross-validation * @param random random number generator for randomization * @param forPredictionsPrinting varargs parameter that, if supplied, is * expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput * object * @throws Exception if a classifier could not be generated successfully or * the class is not defined */ public void crossValidateModel(Classifier classifier, Instances data, int numFolds, Random random, Object... forPredictionsPrinting) throws Exception { m_delegate.crossValidateModel(classifier, data, numFolds, random, forPredictionsPrinting); } /** * Performs a (stratified if class is nominal) cross-validation for a * classifier on a set of instances. * * @param classifierString a string naming the class of the classifier * @param data the data on which the cross-validation is to be performed * @param numFolds the number of folds for the cross-validation * @param options the options to the classifier. Any options * @param random the random number generator for randomizing the data accepted * by the classifier will be removed from this array. * @throws Exception if a classifier could not be generated successfully or * the class is not defined */ public void crossValidateModel(String classifierString, Instances data, int numFolds, String[] options, Random random) throws Exception { m_delegate.crossValidateModel(classifierString, data, numFolds, options, random); } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * * Valid options are: * <p/> * * -t filename <br/> * Name of the file with the training data. (required) * <p/> * * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with * ".xml",a PMML file is loaded or, if that fails, options are loaded from * XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs detailed information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are * desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the * extensions, e.g., '.arff' for ARFF format or '.csv' for CSV. * <p/> * * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first * label) * <p/> * * @param classifierString class of machine learning classifier as a string * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateModel(String classifierString, String[] options) throws Exception { return weka.classifiers.evaluation.Evaluation.evaluateModel( classifierString, options); } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * * Valid options are: * <p/> * * -t name of training file <br/> * Name of the file with the training data. (required) * <p/> * * -T name of test file <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c class index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number of folds <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m file with cost matrix <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with * ".xml",a PMML file is loaded or, if that fails, options are loaded from * XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs detailed information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are * desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * @param classifier machine learning classifier * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateModel(Classifier classifier, String[] options) throws Exception { return weka.classifiers.evaluation.Evaluation.evaluateModel(classifier, options); } /** * Evaluates the classifier on a given set of instances. Note that the data * must have exactly the same format (e.g. order of attributes) as the data * used to train the classifier! Otherwise the results will generally be * meaningless. * * @param classifier machine learning classifier * @param data set of test instances for evaluation * @param forPredictionsPrinting varargs parameter that, if supplied, is * expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput * object * @return the predictions * @throws Exception if model could not be evaluated successfully */ public double[] evaluateModel(Classifier classifier, Instances data, Object... forPredictionsPrinting) throws Exception { return m_delegate.evaluateModel(classifier, data, forPredictionsPrinting); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @param storePredictions whether to store predictions for nominal classifier * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluationForSingleInstance(double[] dist, Instance instance, boolean storePredictions) throws Exception { return m_delegate.evaluationForSingleInstance(dist, instance, storePredictions); } /** * Evaluates the classifier on a single instance and records the prediction. * * @param classifier machine learning classifier * @param instance the test instance to be classified * @return the prediction made by the clasifier * @throws Exception if model could not be evaluated successfully or the data * contains string attributes */ public double evaluateModelOnceAndRecordPrediction(Classifier classifier, Instance instance) throws Exception { return m_delegate .evaluateModelOnceAndRecordPrediction(classifier, instance); } /** * Evaluates the classifier on a single instance. * * @param classifier machine learning classifier * @param instance the test instance to be classified * @return the prediction made by the clasifier * @throws Exception if model could not be evaluated successfully or the data * contains string attributes */ public double evaluateModelOnce(Classifier classifier, Instance instance) throws Exception { return m_delegate.evaluateModelOnce(classifier, instance); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluateModelOnce(double[] dist, Instance instance) throws Exception { return m_delegate.evaluateModelOnce(dist, instance); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluateModelOnceAndRecordPrediction(double[] dist, Instance instance) throws Exception { return m_delegate.evaluateModelOnceAndRecordPrediction(dist, instance); } /** * Evaluates the supplied prediction on a single instance. * * @param prediction the supplied prediction * @param instance the test instance to be classified * @throws Exception if model could not be evaluated successfully */ public void evaluateModelOnce(double prediction, Instance instance) throws Exception { m_delegate.evaluateModelOnce(prediction, instance); } /** * Returns the predictions that have been collected. * * @return a reference to the FastVector containing the predictions that have * been collected. This should be null if no predictions have been * collected. */ public ArrayList<Prediction> predictions() { return m_delegate.predictions(); } /** * Wraps a static classifier in enough source to test using the weka class * libraries. * * @param classifier a Sourcable Classifier * @param className the name to give to the source code class * @return the source for a static classifier that can be tested with weka * libraries. * @throws Exception if code-generation fails */ public static String wekaStaticWrapper(Sourcable classifier, String className) throws Exception { return weka.classifiers.evaluation.Evaluation.wekaStaticWrapper(classifier, className); } /** * Gets the number of test instances that had a known class value (actually * the sum of the weights of test instances with known class value). * * @return the number of test instances with known class */ public final double numInstances() { return m_delegate.numInstances(); } /** * Gets the coverage of the test cases by the predicted regions at the * confidence level specified when evaluation was performed. * * @return the coverage of the test cases by the predicted regions */ public final double coverageOfTestCasesByPredictedRegions() { return m_delegate.coverageOfTestCasesByPredictedRegions(); } /** * Gets the average size of the predicted regions, relative to the range of * the target in the training data, at the confidence level specified when * evaluation was performed. * * @return the average size of the predicted regions */ public final double sizeOfPredictedRegions() { return m_delegate.sizeOfPredictedRegions(); } /** * Gets the number of instances incorrectly classified (that is, for which an * incorrect prediction was made). (Actually the sum of the weights of these * instances) * * @return the number of incorrectly classified instances */ public final double incorrect() { return m_delegate.incorrect(); } /** * Gets the percentage of instances incorrectly classified (that is, for which * an incorrect prediction was made). * * @return the percent of incorrectly classified instances (between 0 and 100) */ public final double pctIncorrect() { return m_delegate.pctIncorrect(); } /** * Gets the total cost, that is, the cost of each prediction times the weight * of the instance, summed over all instances. * * @return the total cost */ public final double totalCost() { return m_delegate.totalCost(); } /** * Gets the average cost, that is, total cost of misclassifications (incorrect * plus unclassified) over the total number of instances. * * @return the average cost. */ public final double avgCost() { return m_delegate.avgCost(); } /** * Gets the number of instances correctly classified (that is, for which a * correct prediction was made). (Actually the sum of the weights of these * instances) * * @return the number of correctly classified instances */ public final double correct() { return m_delegate.correct(); } /** * Gets the percentage of instances correctly classified (that is, for which a * correct prediction was made). * * @return the percent of correctly classified instances (between 0 and 100) */ public final double pctCorrect() { return m_delegate.pctCorrect(); } /** * Gets the number of instances not classified (that is, for which no * prediction was made by the classifier). (Actually the sum of the weights of * these instances) * * @return the number of unclassified instances */ public final double unclassified() { return m_delegate.unclassified(); } /** * Gets the percentage of instances not classified (that is, for which no * prediction was made by the classifier). * * @return the percent of unclassified instances (between 0 and 100) */ public final double pctUnclassified() { return m_delegate.pctUnclassified(); } /** * Returns the estimated error rate or the root mean squared error (if the * class is numeric). If a cost matrix was given this error rate gives the * average cost. * * @return the estimated error rate (between 0 and 1, or between 0 and maximum * cost) */ public final double errorRate() { return m_delegate.errorRate(); } /** * Returns value of kappa statistic if class is nominal. * * @return the value of the kappa statistic */ public final double kappa() { return m_delegate.kappa(); } @Override public String getRevision() { return m_delegate.getRevision(); } /** * Returns the correlation coefficient if the class is numeric. * * @return the correlation coefficient * @throws Exception if class is not numeric */ public final double correlationCoefficient() throws Exception { return m_delegate.correlationCoefficient(); } /** * Returns the mean absolute error. Refers to the error of the predicted * values for numeric classes, and the error of the predicted probability * distribution for nominal classes. * * @return the mean absolute error */ public final double meanAbsoluteError() { return m_delegate.meanAbsoluteError(); } /** * Returns the mean absolute error of the prior. * * @return the mean absolute error */ public final double meanPriorAbsoluteError() { return m_delegate.meanPriorAbsoluteError(); } /** * Returns the relative absolute error. * * @return the relative absolute error * @throws Exception if it can't be computed */ public final double relativeAbsoluteError() throws Exception { return m_delegate.relativeAbsoluteError(); } /** * Returns the root mean squared error. * * @return the root mean squared error */ public final double rootMeanSquaredError() { return m_delegate.rootMeanSquaredError(); } /** * Returns the root mean prior squared error. * * @return the root mean prior squared error */ public final double rootMeanPriorSquaredError() { return m_delegate.rootMeanPriorSquaredError(); } /** * Returns the root relative squared error if the class is numeric. * * @return the root relative squared error */ public final double rootRelativeSquaredError() { return m_delegate.rootRelativeSquaredError(); } /** * Calculate the entropy of the prior distribution. * * @return the entropy of the prior distribution * @throws Exception if the class is not nominal */ public final double priorEntropy() throws Exception { return m_delegate.priorEntropy(); } /** * Return the total Kononenko & Bratko Information score in bits. * * @return the K&B information score * @throws Exception if the class is not nominal */ public final double KBInformation() throws Exception { return m_delegate.KBInformation(); } /** * Return the Kononenko & Bratko Information score in bits per instance. * * @return the K&B information score * @throws Exception if the class is not nominal */ public final double KBMeanInformation() throws Exception { return m_delegate.KBMeanInformation(); } /** * Return the Kononenko & Bratko Relative Information score. * * @return the K&B relative information score * @throws Exception if the class is not nominal */ public final double KBRelativeInformation() throws Exception { return m_delegate.KBRelativeInformation(); } /** * Returns the total entropy for the null model. * * @return the total null model entropy */ public final double SFPriorEntropy() { return m_delegate.SFPriorEntropy(); } /** * Returns the entropy per instance for the null model. * * @return the null model entropy per instance */ public final double SFMeanPriorEntropy() { return m_delegate.SFMeanPriorEntropy(); } /** * Returns the total entropy for the scheme. * * @return the total scheme entropy */ public final double SFSchemeEntropy() { return m_delegate.SFSchemeEntropy(); } /** * Returns the entropy per instance for the scheme. * * @return the scheme entropy per instance */ public final double SFMeanSchemeEntropy() { return m_delegate.SFMeanSchemeEntropy(); } /** * Returns the total SF, which is the null model entropy minus the scheme * entropy. * * @return the total SF */ public final double SFEntropyGain() { return m_delegate.SFEntropyGain(); } /** * Returns the SF per instance, which is the null model entropy minus the * scheme entropy, per instance. * * @return the SF per instance */ public final double SFMeanEntropyGain() { return m_delegate.SFMeanEntropyGain(); } /** * Output the cumulative margin distribution as a string suitable for input * for gnuplot or similar package. * * @return the cumulative margin distribution * @throws Exception if the class attribute is nominal */ public String toCumulativeMarginDistributionString() throws Exception { return m_delegate.toCumulativeMarginDistributionString(); } /** * Calls toSummaryString() with no title and no complexity stats. * * @return a summary description of the classifier evaluation */ @Override public String toSummaryString() { return m_delegate.toSummaryString(); } /** * Calls toSummaryString() with a default title. * * @param printComplexityStatistics if true, complexity statistics are * returned as well * @return the summary string */ public String toSummaryString(boolean printComplexityStatistics) { return m_delegate.toSummaryString(printComplexityStatistics); } /** * Outputs the performance statistics in summary form. Lists number (and * percentage) of instances classified correctly, incorrectly and * unclassified. Outputs the total number of instances classified, and the * number of instances (if any) that had no class value provided. * * @param title the title for the statistics * @param printComplexityStatistics if true, complexity statistics are * returned as well * @return the summary as a String */ public String toSummaryString(String title, boolean printComplexityStatistics) { return m_delegate.toSummaryString(title, printComplexityStatistics); } /** * Calls toMatrixString() with a default title. * * @return the confusion matrix as a string * @throws Exception if the class is numeric */ public String toMatrixString() throws Exception { return m_delegate.toMatrixString(); } /** * Outputs the performance statistics as a classification confusion matrix. * For each class value, shows the distribution of predicted class values. * * @param title the title for the confusion matrix * @return the confusion matrix as a String * @throws Exception if the class is numeric */ public String toMatrixString(String title) throws Exception { return m_delegate.toMatrixString(title); } /** * Generates a breakdown of the accuracy for each class (with default title), * incorporating various information-retrieval statistics, such as true/false * positive rate, precision/recall/F-Measure. Should be useful for ROC curves, * recall/precision curves. * * @return the statistics presented as a string * @throws Exception if class is not nominal */ public String toClassDetailsString() throws Exception { return m_delegate.toClassDetailsString(); } /** * Generates a breakdown of the accuracy for each class, incorporating various * information-retrieval statistics, such as true/false positive rate, * precision/recall/F-Measure. Should be useful for ROC curves, * recall/precision curves. * * @param title the title to prepend the stats string with * @return the statistics presented as a string * @throws Exception if class is not nominal */ public String toClassDetailsString(String title) throws Exception { return m_delegate.toClassDetailsString(title); } /** * Calculate the number of true positives with respect to a particular class. * This is defined as * <p/> * * <pre> * correctly classified positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double numTruePositives(int classIndex) { return m_delegate.numTruePositives(classIndex); } /** * Calculate the true positive rate with respect to a particular class. This * is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double truePositiveRate(int classIndex) { return m_delegate.truePositiveRate(classIndex); } /** * Calculates the weighted (by class size) true positive rate. * * @return the weighted true positive rate. */ public double weightedTruePositiveRate() { return m_delegate.weightedTruePositiveRate(); } /** * Calculate the number of true negatives with respect to a particular class. * This is defined as * <p/> * * <pre> * correctly classified negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double numTrueNegatives(int classIndex) { return m_delegate.numTrueNegatives(classIndex); } /** * Calculate the true negative rate with respect to a particular class. This * is defined as * <p/> * * <pre> * correctly classified negatives * ------------------------------ * total negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double trueNegativeRate(int classIndex) { return m_delegate.trueNegativeRate(classIndex); } /** * Calculates the weighted (by class size) true negative rate. * * @return the weighted true negative rate. */ public double weightedTrueNegativeRate() { return m_delegate.weightedTrueNegativeRate(); } /** * Calculate number of false positives with respect to a particular class. * This is defined as * <p/> * * <pre> * incorrectly classified negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double numFalsePositives(int classIndex) { return m_delegate.numFalsePositives(classIndex); } /** * Calculate the false positive rate with respect to a particular class. This * is defined as * <p/> * * <pre> * incorrectly classified negatives * -------------------------------- * total negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double falsePositiveRate(int classIndex) { return m_delegate.falsePositiveRate(classIndex); } /** * Calculates the weighted (by class size) false positive rate. * * @return the weighted false positive rate. */ public double weightedFalsePositiveRate() { return m_delegate.weightedFalsePositiveRate(); } /** * Calculate number of false negatives with respect to a particular class. * This is defined as * <p/> * * <pre> * incorrectly classified positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double numFalseNegatives(int classIndex) { return m_delegate.numFalseNegatives(classIndex); } /** * Calculate the false negative rate with respect to a particular class. This * is defined as * <p/> * * <pre> * incorrectly classified positives * -------------------------------- * total positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double falseNegativeRate(int classIndex) { return m_delegate.falseNegativeRate(classIndex); } /** * Calculates the weighted (by class size) false negative rate. * * @return the weighted false negative rate. */ public double weightedFalseNegativeRate() { return m_delegate.weightedFalseNegativeRate(); } /** * Calculates the matthews correlation coefficient (sometimes called phi * coefficient) for the supplied class * * @param classIndex the index of the class to compute the matthews * correlation coefficient for * * @return the mathews correlation coefficient */ public double matthewsCorrelationCoefficient(int classIndex) { return m_delegate.matthewsCorrelationCoefficient(classIndex); } /** * Calculates the weighted (by class size) matthews correlation coefficient. * * @return the weighted matthews correlation coefficient. */ public double weightedMatthewsCorrelation() { return m_delegate.weightedMatthewsCorrelation(); } /** * Calculate the recall with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * <p/> * (Which is also the same as the truePositiveRate.) * * @param classIndex the index of the class to consider as "positive" * @return the recall */ public double recall(int classIndex) { return m_delegate.recall(classIndex); } /** * Calculates the weighted (by class size) recall. * * @return the weighted recall. */ public double weightedRecall() { return m_delegate.weightedRecall(); } /** * Calculate the precision with respect to a particular class. This is defined * as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the precision */ public double precision(int classIndex) { return m_delegate.precision(classIndex); } /** * Calculates the weighted (by class size) precision. * * @return the weighted precision. */ public double weightedPrecision() { return m_delegate.weightedPrecision(); } /** * Calculate the F-Measure with respect to a particular class. This is defined * as * <p/> * * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the F-Measure */ public double fMeasure(int classIndex) { return m_delegate.fMeasure(classIndex); } /** * Calculates the macro weighted (by class size) average F-Measure. * * @return the weighted F-Measure. */ public double weightedFMeasure() { return m_delegate.weightedFMeasure(); } /** * Unweighted macro-averaged F-measure. If some classes not present in the * test set, they're just skipped (since recall is undefined there anyway) . * * @return unweighted macro-averaged F-measure. * */ public double unweightedMacroFmeasure() { return m_delegate.unweightedMacroFmeasure(); } /** * Unweighted micro-averaged F-measure. If some classes not present in the * test set, they have no effect. * * Note: if the test set is *single-label*, then this is the same as accuracy. * * @return unweighted micro-averaged F-measure. */ public double unweightedMicroFmeasure() { return m_delegate.unweightedMicroFmeasure(); } /** * Sets the class prior probabilities. * * @param train the training instances used to determine the prior * probabilities * @throws Exception if the class attribute of the instances is not set */ public void setPriors(Instances train) throws Exception { m_delegate.setPriors(train); } /** * Get the current weighted class counts. * * @return the weighted class counts */ public double[] getClassPriors() { return m_delegate.getClassPriors(); } /** * Updates the class prior probabilities or the mean respectively (when * incrementally training). * * @param instance the new training instance seen * @throws Exception if the class of the instance is not set */ public void updatePriors(Instance instance) throws Exception { m_delegate.updatePriors(instance); } /** * disables the use of priors, e.g., in case of de-serialized schemes that * have no access to the original training set, but are evaluated on a set * set. */ public void useNoPriors() { m_delegate.useNoPriors(); } /** * Tests whether the current evaluation object is equal to another evaluation * object. * * @param obj the object to compare against * @return true if the two objects are equal */ @Override public boolean equals(Object obj) { if (obj instanceof weka.classifiers.Evaluation) { obj = ((weka.classifiers.Evaluation) obj).m_delegate; } return m_delegate.equals(obj); } /** * A test method for this class. Just extracts the first command line argument * as a classifier class name and calls evaluateModel. * * @param args an array of command line arguments, the first of which must be * the class name of a classifier. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the class name" + " of a classifier"); } String classifier = args[0]; args[0] = ""; System.out.println(evaluateModel(classifier, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/IntervalEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IntervalEstimator.java * Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instance; /** * Interface for numeric prediction schemes that can output prediction * intervals. * * @author Kurt Driessens (kurtd@cs.waikato.ac.nz) * @version $Revision$ */ public interface IntervalEstimator { /** * Returns an N * 2 array, where N is the number of prediction * intervals. In each row, the first element contains the lower * boundary of the corresponding prediction interval and the second * element the upper boundary. * * @param inst the instance to make the prediction for. * @param confidenceLevel the percentage of cases that the interval should cover. * @return an array of prediction intervals * @exception Exception if the intervals can't be computed */ double[][] predictIntervals(Instance inst, double confidenceLevel) throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/IteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IteratedSingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble from a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class IteratedSingleClassifierEnhancer extends SingleClassifierEnhancer { /** for serialization */ private static final long serialVersionUID = -6217979135443319724L; /** Array for storing the generated base classifiers. */ protected Classifier[] m_Classifiers; /** The number of iterations. */ protected int m_NumIterations = this.defaultNumberOfIterations(); /** * The default number of iterations to perform. */ protected int defaultNumberOfIterations() { return 10; } public Classifier[] getM_Classifiers() { return this.m_Classifiers; } /** * Stump method for building the classifiers. * * @param data the training data to be used for generating the * bagged classifier. * @exception Exception if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { if (this.m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, this.m_NumIterations); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option("\tNumber of iterations.\n" + "\t(current value " + this.getNumIterations() + ")", "I", 1, "-I <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String iterations = Utils.getOption('I', options); if (iterations.length() != 0) { this.setNumIterations(Integer.parseInt(iterations)); } else { this.setNumIterations(this.defaultNumberOfIterations()); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[superOptions.length + 2]; int current = 0; options[current++] = "-I"; options[current++] = "" + this.getNumIterations(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "The number of iterations to be performed."; } /** * Sets the number of bagging iterations */ public void setNumIterations(final int numIterations) { this.m_NumIterations = numIterations; } /** * Gets the number of bagging iterations * * @return the maximum number of bagging iterations */ public int getNumIterations() { return this.m_NumIterations; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/IterativeClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IterativeClassifier.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instances; /** * Interface for classifiers that can induce models of growing * complexity one step at a time. * * @author Gabi Schmidberger (gabi@cs.waikato.ac.nz) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @version $Revision$ */ public interface IterativeClassifier extends Classifier { /** * Initializes an iterative classifier. * * @param instances the instances to be used in induction * @exception Exception if the model cannot be initialized */ void initializeClassifier(Instances instances) throws Exception; /** * Performs one iteration. * * @return false if no further iterations could be performed, true otherwise * @exception Exception if this iteration fails for unexpected reasons */ boolean next() throws Exception; /** * Signal end of iterating, useful for any house-keeping/cleanup * * @exception Exception if cleanup fails */ void done() throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/MultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultipleClassifiersCombiner.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.*; import weka.core.Capabilities.Capability; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble from multiple classifiers. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class MultipleClassifiersCombiner extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = 2776436621129422119L; /** Array for storing the generated base classifiers. */ protected Classifier[] m_Classifiers = { new weka.classifiers.rules.ZeroR() }; /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option( "\tFull class name of classifier to include, followed\n" + "\tby scheme options. May be specified multiple times.\n" + "\t(default: \"weka.classifiers.rules.ZeroR\")", "B", 1, "-B <classifier specification>")); newVector.addAll(Collections.list(super.listOptions())); for (Classifier classifier : getClassifiers()) { if (classifier instanceof OptionHandler) { newVector.addElement(new Option( "", "", 0, "\nOptions specific to classifier " + classifier.getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler)classifier).listOptions())); } } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -B classifierstring <br> * Classifierstring should contain the full class name of a scheme * included for selection followed by options to the classifier * (required, option should be used once for each classifier).<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { // Iterate through the schemes Vector<Classifier> classifiers = new Vector<Classifier>(); while (true) { String classifierString = Utils.getOption('B', options); if (classifierString.length() == 0) { break; } String [] classifierSpec = Utils.splitOptions(classifierString); if (classifierSpec.length == 0) { throw new IllegalArgumentException("Invalid classifier specification string"); } String classifierName = classifierSpec[0]; classifierSpec[0] = ""; classifiers.addElement(AbstractClassifier.forName(classifierName, classifierSpec)); } if (classifiers.size() == 0) { classifiers.addElement(new weka.classifiers.rules.ZeroR()); } Classifier [] classifiersArray = new Classifier [classifiers.size()]; for (int i = 0; i < classifiersArray.length; i++) { classifiersArray[i] = (Classifier) classifiers.elementAt(i); } setClassifiers(classifiersArray); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); for (int i = 0; i < m_Classifiers.length; i++) { options.add("-B"); options.add("" + getClassifierSpec(i)); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifiersTipText() { return "The base classifiers to be used."; } /** * Sets the list of possible classifers to choose from. * * @param classifiers an array of classifiers with all options set. */ public void setClassifiers(Classifier [] classifiers) { m_Classifiers = classifiers; } /** * Gets the list of possible classifers to choose from. * * @return the array of Classifiers */ public Classifier [] getClassifiers() { return m_Classifiers; } /** * Gets a single classifier from the set of available classifiers. * * @param index the index of the classifier wanted * @return the Classifier */ public Classifier getClassifier(int index) { return m_Classifiers[index]; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @param index the index of the classifier string to retrieve, starting from * 0. * @return the classifier string, or the empty string if no classifier * has been assigned (or the index given is out of range). */ protected String getClassifierSpec(int index) { if (m_Classifiers.length < index) { return ""; } Classifier c = getClassifier(index); return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } /** * Returns combined capabilities of the base classifiers, i.e., the * capabilities all of them have in common. * * @return the capabilities of the base classifiers */ public Capabilities getCapabilities() { Capabilities result; int i; if (getClassifiers().length == 0) { result = new Capabilities(this); result.disableAll(); } else { result = (Capabilities) getClassifier(0).getCapabilities().clone(); for (i = 1; i < getClassifiers().length; i++) result.and(getClassifier(i).getCapabilities()); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); result.setOwner(this); return result; } @Override public void preExecution() throws Exception { for (Classifier classifier : getClassifiers()) { if (classifier instanceof CommandlineRunnable) { ((CommandlineRunnable) classifier).preExecution(); } } } @Override public void postExecution() throws Exception { for (Classifier classifier : getClassifiers()) { if (classifier instanceof CommandlineRunnable) { ((CommandlineRunnable) classifier).postExecution(); } } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/ParallelIteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ParallelIteratedSingleClassifierEnhancer.java * Copyright (C) 2009-2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Abstract utility class for handling settings common to meta classifiers that build an ensemble in * parallel from a single base learner. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @author Bernhard Pfahringer (bernhard@waikato.ac.nz) * @version $Revision$ */ public abstract class ParallelIteratedSingleClassifierEnhancer extends IteratedSingleClassifierEnhancer { /** For serialization */ private static final long serialVersionUID = -5026378741833046436L; /** The number of threads to have executing at any one time */ protected int m_numExecutionSlots = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(2); newVector.addElement(new Option("\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)\n" + "\t(use 0 to auto-detect number of cores)", "num-slots", 1, "-num-slots <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are: * <p> * * -num-slots num <br> * Set the number of execution slots to use (default 1 - i.e. no parallelism). * <p> * * Options after -- are passed to the designated classifier. * <p> * * @param options * the list of options as an array of strings * @exception Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String iterations = Utils.getOption("num-slots", options); if (iterations.length() != 0) { this.setNumExecutionSlots(Integer.parseInt(iterations)); } else { this.setNumExecutionSlots(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[superOptions.length + 2]; int current = 0; options[current++] = "-num-slots"; options[current++] = "" + this.getNumExecutionSlots(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Set the number of execution slots (threads) to use for building the members of the ensemble. * * @param numSlots * the number of slots to use. */ public void setNumExecutionSlots(final int numSlots) { this.m_numExecutionSlots = numSlots; } /** * Get the number of execution slots (threads) to use for building the members of the ensemble. * * @return the number of slots to use */ public int getNumExecutionSlots() { return this.m_numExecutionSlots; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numExecutionSlotsTipText() { return "The number of execution slots (threads) to use for " + "constructing the ensemble."; } /** * Stump method for building the classifiers * * @param data * the training data to be used for generating the ensemble * @exception Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { super.buildClassifier(data); if (this.m_numExecutionSlots < 0) { throw new Exception("Number of execution slots needs to be >= 0!"); } } /** * Start the pool of execution threads */ /** * Does the actual construction of the ensemble * * @throws Exception * if something goes wrong during the training process */ protected void buildClassifiers() throws Exception { if (this.m_numExecutionSlots != 1) { int numCores = (this.m_numExecutionSlots == 0) ? Runtime.getRuntime().availableProcessors() : this.m_numExecutionSlots; ExecutorService executorPool = Executors.newFixedThreadPool(numCores); final CountDownLatch doneSignal = new CountDownLatch(this.m_Classifiers.length); final AtomicInteger numFailed = new AtomicInteger(); for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } final Classifier currentClassifier = this.m_Classifiers[i]; // MultiClassClassifier may produce occasional NULL classifiers ... if (currentClassifier == null) { continue; } final int iteration = i; if (this.m_Debug) { System.out.print("Training classifier (" + (i + 1) + ")"); } Runnable newTask = new Runnable() { @Override public void run() { try { currentClassifier.buildClassifier(ParallelIteratedSingleClassifierEnhancer.this.getTrainingSet(iteration)); } catch (Throwable ex) { ex.printStackTrace(); numFailed.incrementAndGet(); if (ParallelIteratedSingleClassifierEnhancer.this.m_Debug) { System.err.println("Iteration " + iteration + " failed!"); } } finally { doneSignal.countDown(); } } }; // launch this task executorPool.submit(newTask); } // wait for all tasks to finish, then shutdown pool doneSignal.await(); executorPool.shutdownNow(); if (this.m_Debug && numFailed.intValue() > 0) { System.err.println("Problem building classifiers - some iterations failed."); } } else { // simple single-threaded execution for (int i = 0; i < this.m_Classifiers.length; i++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } this.m_Classifiers[i].buildClassifier(this.getTrainingSet(i)); } } } /** * Gets a training set for a particular iteration. Implementations need to be careful with thread * safety and should probably be synchronized to be on the safe side. * * @param iteration * the number of the iteration for the requested training set * @return the training set for the supplied iteration number * @throws Exception * if something goes wrong. */ protected abstract Instances getTrainingSet(int iteration) throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/ParallelMultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ParallelMultipleClassifiersCombiner.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Abstract utility class for handling settings common to meta classifiers that build an ensemble in * parallel using multiple classifiers. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public abstract class ParallelMultipleClassifiersCombiner extends MultipleClassifiersCombiner { /** For serialization */ private static final long serialVersionUID = 728109028953726626L; /** The number of threads to have executing at any one time */ protected int m_numExecutionSlots = 1; /** Pool of threads to train models with */ protected transient ThreadPoolExecutor m_executorPool; /** The number of classifiers completed so far */ protected int m_completed; /** * The number of classifiers that experienced a failure of some sort during construction */ protected int m_failed; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(1); newVector.addElement(new Option("\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)", "num-slots", 1, "-num-slots <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are: * <p> * * -Z num <br> * Set the number of execution slots to use (default 1 - i.e. no parallelism). * <p> * * Options after -- are passed to the designated classifier. * <p> * * @param options * the list of options as an array of strings * @exception Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String iterations = Utils.getOption("num-slots", options); if (iterations.length() != 0) { this.setNumExecutionSlots(Integer.parseInt(iterations)); } else { this.setNumExecutionSlots(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); options.add("-num-slots"); options.add("" + this.getNumExecutionSlots()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Set the number of execution slots (threads) to use for building the members of the ensemble. * * @param numSlots * the number of slots to use. */ public void setNumExecutionSlots(final int numSlots) { this.m_numExecutionSlots = numSlots; } /** * Get the number of execution slots (threads) to use for building the members of the ensemble. * * @return the number of slots to use */ public int getNumExecutionSlots() { return this.m_numExecutionSlots; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String numExecutionSlotsTipText() { return "The number of execution slots (threads) to use for " + "constructing the ensemble."; } /** * Stump method for building the classifiers * * @param data * the training data to be used for generating the ensemble * @exception Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(final Instances data) throws Exception { if (this.m_numExecutionSlots < 1) { throw new Exception("Number of execution slots needs to be >= 1!"); } if (this.m_numExecutionSlots > 1) { if (this.m_Debug) { System.out.println("Starting executor pool with " + this.m_numExecutionSlots + " slots..."); } this.startExecutorPool(); } this.m_completed = 0; this.m_failed = 0; } /** * Start the pool of execution threads */ protected void startExecutorPool() { if (this.m_executorPool != null) { this.m_executorPool.shutdownNow(); } this.m_executorPool = new ThreadPoolExecutor(this.m_numExecutionSlots, this.m_numExecutionSlots, 120, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); } private synchronized void block(final boolean tf) { if (tf) { try { if (this.m_numExecutionSlots > 1 && this.m_completed + this.m_failed < this.m_Classifiers.length) { this.wait(); } } catch (InterruptedException ex) { } } else { this.notifyAll(); } } /** * Does the actual construction of the ensemble * * @throws Exception * if something goes wrong during the training process */ protected synchronized void buildClassifiers(final Instances data) throws Exception { for (int i = 0; i < this.m_Classifiers.length; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (this.m_numExecutionSlots > 1) { final Classifier currentClassifier = this.m_Classifiers[i]; final int iteration = i; Runnable newTask = new Runnable() { @Override public void run() { try { if (ParallelMultipleClassifiersCombiner.this.m_Debug) { System.out.println("Training classifier (" + (iteration + 1) + ")"); } currentClassifier.buildClassifier(data); if (ParallelMultipleClassifiersCombiner.this.m_Debug) { System.out.println("Finished classifier (" + (iteration + 1) + ")"); } ParallelMultipleClassifiersCombiner.this.completedClassifier(iteration, true); } catch (Exception ex) { ex.printStackTrace(); ParallelMultipleClassifiersCombiner.this.completedClassifier(iteration, false); } } }; // launch this task this.m_executorPool.execute(newTask); } else { this.m_Classifiers[i].buildClassifier(data); } } if (this.m_numExecutionSlots > 1 && this.m_completed + this.m_failed < this.m_Classifiers.length) { this.block(true); } } /** * Records the completion of the training of a single classifier. Unblocks if all classifiers have * been trained. * * @param iteration * the iteration that has completed * @param success * whether the classifier trained successfully */ protected synchronized void completedClassifier(final int iteration, final boolean success) { if (!success) { this.m_failed++; if (this.m_Debug) { System.err.println("Iteration " + iteration + " failed!"); } } else { this.m_completed++; } if (this.m_completed + this.m_failed == this.m_Classifiers.length) { if (this.m_failed > 0) { if (this.m_Debug) { System.err.println("Problem building classifiers - some iterations failed."); } } // have to shut the pool down or program executes as a server // and when running from the command line does not return to the // prompt this.m_executorPool.shutdown(); this.block(false); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/RandomizableClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableClassifier.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * classifiers. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class RandomizableClassifier extends AbstractClassifier implements Randomizable { /** for serialization */ private static final long serialVersionUID = -8816375798262351903L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); options.add("-S"); options.add("" + getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/RandomizableIteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableIteratedSingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble from a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class RandomizableIteratedSingleClassifierEnhancer extends IteratedSingleClassifierEnhancer implements Randomizable { /** for serialization */ private static final long serialVersionUID = 5063351391524938557L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); options.add("-S"); options.add("" + getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/RandomizableMultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableMultipleClassifiersCombiner.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble from multiple classifiers based * on a given random number seed. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class RandomizableMultipleClassifiersCombiner extends MultipleClassifiersCombiner implements Randomizable { /** for serialization */ private static final long serialVersionUID = 5057936555724785679L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -B classifierstring <br> * Classifierstring should contain the full class name of a scheme * included for selection followed by options to the classifier * (required, option should be used once for each classifier).<p> * * -S num <br> * Set the random number seed (default 1). <p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); options.add("-S"); options.add("" + getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/RandomizableParallelIteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableParallelIteratedSingleClassifierEnhancer.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble in parallel from a single base * learner. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public abstract class RandomizableParallelIteratedSingleClassifierEnhancer extends ParallelIteratedSingleClassifierEnhancer implements Randomizable { /** * For serialization */ private static final long serialVersionUID = 1298141000373615374L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); options.add("-S"); options.add("" + getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/RandomizableParallelMultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableParallelMultipleClassifiersCombiner.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble in parallel using multiple * classifiers based on a given random number seed. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public abstract class RandomizableParallelMultipleClassifiersCombiner extends ParallelMultipleClassifiersCombiner implements Randomizable { /** For serialization */ private static final long serialVersionUID = 8274061943448676943L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -B classifierstring <br> * Classifierstring should contain the full class name of a scheme * included for selection followed by options to the classifier * (required, option should be used once for each classifier).<p> * * -S num <br> * Set the random number seed (default 1). <p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); options.add("-S"); options.add("" + getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/RandomizableSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableSingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble from a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class RandomizableSingleClassifierEnhancer extends SingleClassifierEnhancer implements Randomizable { /** for serialization */ private static final long serialVersionUID = 558286687096157160L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); options.add("-S"); options.add("" + getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/SingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.rules.ZeroR; import weka.core.*; import weka.core.Capabilities.Capability; /** * Abstract utility class for handling settings common to meta * classifiers that use a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public abstract class SingleClassifierEnhancer extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = -3665885256363525164L; /** The base classifier to use */ protected Classifier m_Classifier = new ZeroR(); /** * String describing default classifier. */ protected String defaultClassifierString() { return "weka.classifiers.rules.ZeroR"; } /** * String describing options for default classifier. */ protected String[] defaultClassifierOptions() { return new String[0]; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(3); newVector.addElement(new Option( "\tFull name of base classifier.\n" + "\t(default: " + defaultClassifierString() + ((defaultClassifierOptions().length > 0) ? " with options " + Utils.joinOptions(defaultClassifierOptions()) + ")" : ")"), "W", 1, "-W <classifier name>")); newVector.addAll(Collections.list(super.listOptions())); newVector.addElement(new Option( "", "", 0, "\nOptions specific to classifier " + m_Classifier.getClass().getName() + ":")); newVector.addAll(Collections.list(((OptionHandler)m_Classifier).listOptions())); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { super.setOptions(options); String classifierName = Utils.getOption('W', options); if (classifierName.length() > 0) { setClassifier(AbstractClassifier.forName(classifierName, null)); setClassifier(AbstractClassifier.forName(classifierName, Utils.partitionOptions(options))); } else { setClassifier(AbstractClassifier.forName(defaultClassifierString(), null)); String[] classifierOptions = Utils.partitionOptions(options); if (classifierOptions.length > 0) { setClassifier(AbstractClassifier.forName(defaultClassifierString(), classifierOptions)); } else { setClassifier(AbstractClassifier.forName(defaultClassifierString(), defaultClassifierOptions())); } } } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector<String> options = new Vector<String>(); options.add("-W"); options.add(getClassifier().getClass().getName()); Collections.addAll(options, super.getOptions()); String[] classifierOptions = ((OptionHandler)m_Classifier).getOptions(); if (classifierOptions.length > 0) { options.add("--"); Collections.addAll(options, classifierOptions); } return options.toArray(new String[0]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifierTipText() { return "The base classifier to be used."; } /** * Returns default capabilities of the base classifier. * * @return the capabilities of the base classifier */ public Capabilities getCapabilities() { Capabilities result; if (getClassifier() != null) { result = getClassifier().getCapabilities(); } else { result = new Capabilities(this); result.disableAll(); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); result.setOwner(this); return result; } /** * Set the base learner. * * @param newClassifier the classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Get the classifier used as the base learner. * * @return the classifier used as the classifier */ public Classifier getClassifier() { return m_Classifier; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @return the classifier string */ protected String getClassifierSpec() { Classifier c = getClassifier(); return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } @Override public void preExecution() throws Exception { if (getClassifier() instanceof CommandlineRunnable) { ((CommandlineRunnable) getClassifier()).preExecution(); } } @Override public void postExecution() throws Exception { if (getClassifier() instanceof CommandlineRunnable) { ((CommandlineRunnable) getClassifier()).postExecution(); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/Sourcable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Sourcable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; /** * Interface for classifiers that can be converted to Java source. * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public interface Sourcable { /** * Returns a string that describes the classifier as source. The * classifier will be contained in a class with the given name (there may * be auxiliary classes), * and will contain a method with the signature: * <pre><code> * public static double classify(Object [] i); * </code></pre> * where the array <code>i</code> contains elements that are either * Double, String, with missing values represented as null. The generated * code is public domain and comes with no warranty. * * @param className the name that should be given to the source class. * @return the object source described by a string * @throws Exception if the source can't be computed */ String toSource(String className) throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/UpdateableBatchProcessor.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * UpdateableBatchProcessor.java * Copyright (C) 2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; /** * Updateable classifiers can implement this if they wish to be informed at the * end of the training stream. This could be useful for cleaning up temporary * data structures, pruning dictionaries etc. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public interface UpdateableBatchProcessor { /** * Signal that the training data is finished (for now). * * @throws Exception if a problem occurs */ public void batchFinished() throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/UpdateableClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * UpdateableClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instance; /** * Interface to incremental classification models that can learn using * one instance at a time. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public interface UpdateableClassifier { /** * Updates a classifier using the given instance. * * @param instance the instance to included * @exception Exception if instance could not be incorporated * successfully */ void updateClassifier(Instance instance) throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/BayesNet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.bayes.net.ADNode; import weka.classifiers.bayes.net.BIFReader; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.estimate.BayesNetEstimator; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.classifiers.bayes.net.estimate.SimpleEstimator; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.classifiers.bayes.net.search.local.K2; import weka.classifiers.bayes.net.search.local.LocalScoreSearchAlgorithm; import weka.classifiers.bayes.net.search.local.Scoreable; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.estimators.Estimator; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** * <!-- globalinfo-start --> Bayes Network learning using various search algorithms and quality measures.<br/> * Base class for a Bayes Network classifier. Provides datastructures (network structure, conditional probability distributions, etc.) and facilities common to Bayes Network learning algorithms like K2 and B.<br/> * <br/> * For more information see:<br/> * <br/> * http://sourceforge.net/projects/weka/files/documentation/WekaManual-3-7-0.pdf /download * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Do not use ADTree data structure * </pre> * * <pre> * -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> * -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> * -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class BayesNet extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, Drawable, AdditionalMeasureProducer { /** for serialization */ static final long serialVersionUID = 746037443258775954L; /** * The parent sets. */ protected ParentSet[] m_ParentSets; /** * The attribute estimators containing CPTs. */ public Estimator[][] m_Distributions; /** filter used to quantize continuous variables, if any **/ protected Discretize m_DiscretizeFilter = null; /** attribute index of a non-nominal attribute */ int m_nNonDiscreteAttribute = -1; /** filter used to fill in missing values, if any **/ protected ReplaceMissingValues m_MissingValuesFilter = null; /** * The number of classes */ protected int m_NumClasses; /** * The dataset header for the purposes of printing out a semi-intelligible model */ public Instances m_Instances; /** * The number of instances the model was built from */ private int m_NumInstances; /** * Datastructure containing ADTree representation of the database. This may result in more efficient access to the data. */ ADNode m_ADTree; /** * Bayes network to compare the structure with. */ protected BIFReader m_otherBayesNet = null; /** * Use the experimental ADTree datastructure for calculating contingency tables */ boolean m_bUseADTree = false; /** * Search algorithm used for learning the structure of a network. */ SearchAlgorithm m_SearchAlgorithm = new K2(); /** * Search algorithm used for learning the structure of a network. */ BayesNetEstimator m_BayesNetEstimator = new SimpleEstimator(); /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // ensure we have a data set with discrete variables only and with no // missing values instances = this.normalizeDataSet(instances); // Copy the instances this.m_Instances = new Instances(instances); this.m_NumInstances = this.m_Instances.numInstances(); // sanity check: need more than 1 variable in datat set this.m_NumClasses = instances.numClasses(); // initialize ADTree if (this.m_bUseADTree) { this.m_ADTree = ADNode.makeADTree(instances); } // build the network structure this.initStructure(); // build the network structure this.buildStructure(); // build the set of CPTs this.estimateCPTs(); // Save space this.m_Instances = new Instances(this.m_Instances, 0); this.m_ADTree = null; } // buildClassifier /** * Returns the number of instances the model was built from. */ public int getNumInstances() { return this.m_NumInstances; } /** * ensure that all variables are nominal and that there are no missing values * * @param instances * data set to check and quantize and/or fill in missing values * @return filtered instances * @throws Exception * if a filter (Discretize, ReplaceMissingValues) fails */ protected Instances normalizeDataSet(Instances instances) throws Exception { this.m_nNonDiscreteAttribute = -1; Enumeration<Attribute> enu = instances.enumerateAttributes(); while (enu.hasMoreElements()) { Attribute attribute = enu.nextElement(); if (attribute.type() != Attribute.NOMINAL) { this.m_nNonDiscreteAttribute = attribute.index(); } } if ((this.m_nNonDiscreteAttribute > -1) && (instances.attribute(this.m_nNonDiscreteAttribute).type() != Attribute.NOMINAL)) { this.m_DiscretizeFilter = new Discretize(); this.m_DiscretizeFilter.setInputFormat(instances); instances = Filter.useFilter(instances, this.m_DiscretizeFilter); } this.m_MissingValuesFilter = new ReplaceMissingValues(); this.m_MissingValuesFilter.setInputFormat(instances); instances = Filter.useFilter(instances, this.m_MissingValuesFilter); return instances; } // normalizeDataSet /** * ensure that all variables are nominal and that there are no missing values * * @param instance * instance to check and quantize and/or fill in missing values * @return filtered instance * @throws Exception * if a filter (Discretize, ReplaceMissingValues) fails */ protected Instance normalizeInstance(Instance instance) throws Exception { if ((this.m_nNonDiscreteAttribute > -1) && (instance.attribute(this.m_nNonDiscreteAttribute).type() != Attribute.NOMINAL)) { this.m_DiscretizeFilter.input(instance); instance = this.m_DiscretizeFilter.output(); } this.m_MissingValuesFilter.input(instance); instance = this.m_MissingValuesFilter.output(); return instance; } // normalizeInstance /** * Init structure initializes the structure to an empty graph or a Naive Bayes graph (depending on the -N flag). * * @throws Exception * in case of an error */ public void initStructure() throws Exception { // initialize topological ordering // m_nOrder = new int[m_Instances.numAttributes()]; // m_nOrder[0] = m_Instances.classIndex(); int nAttribute = 0; for (int iOrder = 1; iOrder < this.m_Instances.numAttributes(); iOrder++) { if (nAttribute == this.m_Instances.classIndex()) { nAttribute++; } // m_nOrder[iOrder] = nAttribute++; } // reserve memory this.m_ParentSets = new ParentSet[this.m_Instances.numAttributes()]; for (int iAttribute = 0; iAttribute < this.m_Instances.numAttributes(); iAttribute++) { this.m_ParentSets[iAttribute] = new ParentSet(this.m_Instances.numAttributes()); } } // initStructure /** * buildStructure determines the network structure/graph of the network. The default behavior is creating a network where all nodes have the first node as its parent (i.e., a BayesNet that behaves like a naive Bayes classifier). This * method can be overridden by derived classes to restrict the class of network structures that are acceptable. * * @throws Exception * in case of an error */ public void buildStructure() throws Exception { this.m_SearchAlgorithm.buildStructure(this, this.m_Instances); } // buildStructure /** * estimateCPTs estimates the conditional probability tables for the Bayes Net using the network structure. * * @throws Exception * in case of an error */ public void estimateCPTs() throws Exception { this.m_BayesNetEstimator.estimateCPTs(this); } // estimateCPTs /** * initializes the conditional probabilities * * @throws Exception * in case of an error */ public void initCPTs() throws Exception { this.m_BayesNetEstimator.initCPTs(this); } // estimateCPTs /** * Updates the classifier with the given instance. * * @param instance * the new training instance to include in the model * @throws Exception * if the instance could not be incorporated in the model. */ public void updateClassifier(Instance instance) throws Exception { instance = this.normalizeInstance(instance); this.m_BayesNetEstimator.updateClassifier(this, instance); } // updateClassifier /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if there is a problem generating the prediction */ @Override public double[] distributionForInstance(Instance instance) throws Exception { instance = this.normalizeInstance(instance); return this.m_BayesNetEstimator.distributionForInstance(this, instance); } // distributionForInstance /** * Calculates the counts for Dirichlet distribution for the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return counts for Dirichlet distribution for class probability * @throws Exception * if there is a problem generating the prediction */ public double[] countsForInstance(final Instance instance) throws Exception { double[] fCounts = new double[this.m_NumClasses]; for (int iClass = 0; iClass < this.m_NumClasses; iClass++) { fCounts[iClass] = 0.0; } for (int iClass = 0; iClass < this.m_NumClasses; iClass++) { double fCount = 0; for (int iAttribute = 0; iAttribute < this.m_Instances.numAttributes(); iAttribute++) { double iCPT = 0; for (int iParent = 0; iParent < this.m_ParentSets[iAttribute].getNrOfParents(); iParent++) { int nParent = this.m_ParentSets[iAttribute].getParent(iParent); if (nParent == this.m_Instances.classIndex()) { iCPT = iCPT * this.m_NumClasses + iClass; } else { iCPT = iCPT * this.m_Instances.attribute(nParent).numValues() + instance.value(nParent); } } if (iAttribute == this.m_Instances.classIndex()) { fCount += ((DiscreteEstimatorBayes) this.m_Distributions[iAttribute][(int) iCPT]).getCount(iClass); } else { fCount += ((DiscreteEstimatorBayes) this.m_Distributions[iAttribute][(int) iCPT]).getCount(instance.value(iAttribute)); } } fCounts[iClass] += fCount; } return fCounts; } // countsForInstance /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tDo not use ADTree data structure\n", "D", 0, "-D")); newVector.addElement(new Option("\tBIF file to compare with\n", "B", 1, "-B <BIF file>")); newVector.addElement(new Option("\tSearch algorithm\n", "Q", 1, "-Q weka.classifiers.bayes.net.search.SearchAlgorithm")); newVector.addElement(new Option("\tEstimator algorithm\n", "E", 1, "-E weka.classifiers.bayes.net.estimate.SimpleEstimator")); newVector.addAll(Collections.list(super.listOptions())); newVector.addElement(new Option("", "", 0, "\nOptions specific to search method " + this.getSearchAlgorithm().getClass().getName() + ":")); newVector.addAll(Collections.list(this.getSearchAlgorithm().listOptions())); newVector.addElement(new Option("", "", 0, "\nOptions specific to estimator method " + this.getEstimator().getClass().getName() + ":")); newVector.addAll(Collections.list(this.getEstimator().listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Do not use ADTree data structure * </pre> * * <pre> * -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> * -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> * -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { super.setOptions(options); this.m_bUseADTree = !(Utils.getFlag('D', options)); String sBIFFile = Utils.getOption('B', options); if (sBIFFile != null && !sBIFFile.equals("")) { this.setBIFFile(sBIFFile); } String searchAlgorithmName = Utils.getOption('Q', options); if (searchAlgorithmName.length() != 0) { this.setSearchAlgorithm((SearchAlgorithm) Utils.forName(SearchAlgorithm.class, searchAlgorithmName, partitionOptions(options))); } else { this.setSearchAlgorithm(new K2()); } String estimatorName = Utils.getOption('E', options); if (estimatorName.length() != 0) { this.setEstimator((BayesNetEstimator) Utils.forName(BayesNetEstimator.class, estimatorName, Utils.partitionOptions(options))); } else { this.setEstimator(new SimpleEstimator()); } Utils.checkForRemainingOptions(options); } // setOptions /** * Returns the secondary set of options (if any) contained in the supplied options array. The secondary set is defined to be any options after the first "--" but before the "-E". These options are removed from the original options * array. * * @param options * the input array of options * @return the array of secondary options */ public static String[] partitionOptions(final String[] options) { for (int i = 0; i < options.length; i++) { if (options[i].equals("--")) { // ensure it follows by a -E option int j = i; while ((j < options.length) && !(options[j].equals("-E"))) { j++; } /* * if (j >= options.length) { return new String[0]; } */ options[i++] = ""; String[] result = new String[options.length - i]; j = i; while ((j < options.length) && !(options[j].equals("-E"))) { result[j - i] = options[j]; options[j] = ""; j++; } while (j < options.length) { result[j - i] = ""; j++; } return result; } } return new String[0]; } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); Collections.addAll(options, super.getOptions()); if (!this.m_bUseADTree) { options.add("-D"); } if (this.m_otherBayesNet != null) { options.add("-B"); options.add(this.m_otherBayesNet.getFileName()); } options.add("-Q"); options.add("" + this.getSearchAlgorithm().getClass().getName()); options.add("--"); Collections.addAll(options, this.getSearchAlgorithm().getOptions()); options.add("-E"); options.add("" + this.getEstimator().getClass().getName()); options.add("--"); Collections.addAll(options, this.getEstimator().getOptions()); return options.toArray(new String[0]); } // getOptions /** * Set the SearchAlgorithm used in searching for network structures. * * @param newSearchAlgorithm * the SearchAlgorithm to use. */ public void setSearchAlgorithm(final SearchAlgorithm newSearchAlgorithm) { this.m_SearchAlgorithm = newSearchAlgorithm; } /** * Get the SearchAlgorithm used as the search algorithm * * @return the SearchAlgorithm used as the search algorithm */ public SearchAlgorithm getSearchAlgorithm() { return this.m_SearchAlgorithm; } /** * Set the Estimator Algorithm used in calculating the CPTs * * @param newBayesNetEstimator * the Estimator to use. */ public void setEstimator(final BayesNetEstimator newBayesNetEstimator) { this.m_BayesNetEstimator = newBayesNetEstimator; } /** * Get the BayesNetEstimator used for calculating the CPTs * * @return the BayesNetEstimator used. */ public BayesNetEstimator getEstimator() { return this.m_BayesNetEstimator; } /** * Set whether ADTree structure is used or not * * @param bUseADTree * true if an ADTree structure is used */ public void setUseADTree(final boolean bUseADTree) { this.m_bUseADTree = bUseADTree; } /** * Method declaration * * @return whether ADTree structure is used or not */ public boolean getUseADTree() { return this.m_bUseADTree; } /** * Set name of network in BIF file to compare with * * @param sBIFFile * the name of the BIF file */ public void setBIFFile(final String sBIFFile) { try { this.m_otherBayesNet = new BIFReader().processFile(sBIFFile); } catch (Throwable t) { this.m_otherBayesNet = null; } } /** * Get name of network in BIF file to compare with * * @return BIF file name */ public String getBIFFile() { if (this.m_otherBayesNet != null) { return this.m_otherBayesNet.getFileName(); } return ""; } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ @Override public String toString() { StringBuffer text = new StringBuffer(); text.append("Bayes Network Classifier"); text.append("\n" + (this.m_bUseADTree ? "Using " : "not using ") + "ADTree"); if (this.m_Instances == null) { text.append(": No model built yet."); } else { // flatten BayesNet down to text text.append("\n#attributes="); text.append(this.m_Instances.numAttributes()); text.append(" #classindex="); text.append(this.m_Instances.classIndex()); text.append("\nNetwork structure (nodes followed by parents)\n"); for (int iAttribute = 0; iAttribute < this.m_Instances.numAttributes(); iAttribute++) { text.append(this.m_Instances.attribute(iAttribute).name() + "(" + this.m_Instances.attribute(iAttribute).numValues() + "): "); for (int iParent = 0; iParent < this.m_ParentSets[iAttribute].getNrOfParents(); iParent++) { text.append(this.m_Instances.attribute(this.m_ParentSets[iAttribute].getParent(iParent)).name() + " "); } text.append("\n"); // Description of distributions tends to be too much detail, so it is // commented out here // for (int iParent = 0; iParent < // m_ParentSets[iAttribute].GetCardinalityOfParents(); iParent++) { // text.append('(' + m_Distributions[iAttribute][iParent].toString() + // ')'); // } // text.append("\n"); } text.append("LogScore Bayes: " + this.measureBayesScore() + "\n"); text.append("LogScore BDeu: " + this.measureBDeuScore() + "\n"); text.append("LogScore MDL: " + this.measureMDLScore() + "\n"); text.append("LogScore ENTROPY: " + this.measureEntropyScore() + "\n"); text.append("LogScore AIC: " + this.measureAICScore() + "\n"); if (this.m_otherBayesNet != null) { text.append("Missing: " + this.m_otherBayesNet.missingArcs(this) + " Extra: " + this.m_otherBayesNet.extraArcs(this) + " Reversed: " + this.m_otherBayesNet.reversedArcs(this) + "\n"); text.append("Divergence: " + this.m_otherBayesNet.divergence(this) + "\n"); } } return text.toString(); } // toString /** * Returns the type of graph this classifier represents. * * @return Drawable.TREE */ @Override public int graphType() { return Drawable.BayesNet; } /** * Returns a BayesNet graph in XMLBIF ver 0.3 format. * * @return String representing this BayesNet in XMLBIF ver 0.3 * @throws Exception * in case BIF generation fails */ @Override public String graph() throws Exception { return this.toXMLBIF03(); } public String getBIFHeader() { StringBuffer text = new StringBuffer(); text.append("<?xml version=\"1.0\"?>\n"); text.append("<!-- DTD for the XMLBIF 0.3 format -->\n"); text.append("<!DOCTYPE BIF [\n"); text.append(" <!ELEMENT BIF ( NETWORK )*>\n"); text.append(" <!ATTLIST BIF VERSION CDATA #REQUIRED>\n"); text.append(" <!ELEMENT NETWORK ( NAME, ( PROPERTY | VARIABLE | DEFINITION )* )>\n"); text.append(" <!ELEMENT NAME (#PCDATA)>\n"); text.append(" <!ELEMENT VARIABLE ( NAME, ( OUTCOME | PROPERTY )* ) >\n"); text.append(" <!ATTLIST VARIABLE TYPE (nature|decision|utility) \"nature\">\n"); text.append(" <!ELEMENT OUTCOME (#PCDATA)>\n"); text.append(" <!ELEMENT DEFINITION ( FOR | GIVEN | TABLE | PROPERTY )* >\n"); text.append(" <!ELEMENT FOR (#PCDATA)>\n"); text.append(" <!ELEMENT GIVEN (#PCDATA)>\n"); text.append(" <!ELEMENT TABLE (#PCDATA)>\n"); text.append(" <!ELEMENT PROPERTY (#PCDATA)>\n"); text.append("]>\n"); return text.toString(); } // getBIFHeader /** * Returns a description of the classifier in XML BIF 0.3 format. See http://www-2.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/ for details on XML BIF. * * @return an XML BIF 0.3 description of the classifier as a string. */ public String toXMLBIF03() { if (this.m_Instances == null) { return ("<!--No model built yet-->"); } StringBuffer text = new StringBuffer(); text.append(this.getBIFHeader()); text.append("\n"); text.append("\n"); text.append("<BIF VERSION=\"0.3\">\n"); text.append("<NETWORK>\n"); text.append("<NAME>" + this.XMLNormalize(Utils.quote(this.m_Instances.relationName())) + "</NAME>\n"); for (int iAttribute = 0; iAttribute < this.m_Instances.numAttributes(); iAttribute++) { text.append("<VARIABLE TYPE=\"nature\">\n"); text.append("<NAME>" + this.XMLNormalize(Utils.quote(this.m_Instances.attribute(iAttribute).name())) + "</NAME>\n"); for (int iValue = 0; iValue < this.m_Instances.attribute(iAttribute).numValues(); iValue++) { text.append("<OUTCOME>" + this.XMLNormalize(Utils.quote(this.m_Instances.attribute(iAttribute).value(iValue))) + "</OUTCOME>\n"); } text.append("</VARIABLE>\n"); } for (int iAttribute = 0; iAttribute < this.m_Instances.numAttributes(); iAttribute++) { text.append("<DEFINITION>\n"); text.append("<FOR>" + this.XMLNormalize(Utils.quote(this.m_Instances.attribute(iAttribute).name())) + "</FOR>\n"); for (int iParent = 0; iParent < this.m_ParentSets[iAttribute].getNrOfParents(); iParent++) { text.append("<GIVEN>" + this.XMLNormalize(Utils.quote(this.m_Instances.attribute(this.m_ParentSets[iAttribute].getParent(iParent)).name())) + "</GIVEN>\n"); } text.append("<TABLE>\n"); for (int iParent = 0; iParent < this.m_ParentSets[iAttribute].getCardinalityOfParents(); iParent++) { for (int iValue = 0; iValue < this.m_Instances.attribute(iAttribute).numValues(); iValue++) { text.append(this.m_Distributions[iAttribute][iParent].getProbability(iValue)); text.append(' '); } text.append('\n'); } text.append("</TABLE>\n"); text.append("</DEFINITION>\n"); } text.append("</NETWORK>\n"); text.append("</BIF>\n"); return text.toString(); } // toXMLBIF03 /** * XMLNormalize converts the five standard XML entities in a string g.e. the string V&D's is returned as V&amp;D&apos;s * * @param sStr * string to normalize * @return normalized string */ protected String XMLNormalize(final String sStr) { StringBuffer sStr2 = new StringBuffer(); for (int iStr = 0; iStr < sStr.length(); iStr++) { char c = sStr.charAt(iStr); switch (c) { case '&': sStr2.append("&amp;"); break; case '\'': sStr2.append("&apos;"); break; case '\"': sStr2.append("&quot;"); break; case '<': sStr2.append("&lt;"); break; case '>': sStr2.append("&gt;"); break; default: sStr2.append(c); } } return sStr2.toString(); } // XMLNormalize /** * @return a string to describe the UseADTreeoption. */ public String useADTreeTipText() { return "When ADTree (the data structure for increasing speed on counts," + " not to be confused with the classifier under the same name) is used" + " learning time goes down typically. However, because ADTrees are memory" + " intensive, memory problems may occur. Switching this option off makes" + " the structure learning algorithms slower, and run with less memory." + " By default, ADTrees are used."; } /** * @return a string to describe the SearchAlgorithm. */ public String searchAlgorithmTipText() { return "Select method used for searching network structures."; } /** * This will return a string describing the BayesNetEstimator. * * @return The string. */ public String estimatorTipText() { return "Select Estimator algorithm for finding the conditional probability tables" + " of the Bayes Network."; } /** * @return a string to describe the BIFFile. */ public String BIFFileTipText() { return "Set the name of a file in BIF XML format. A Bayes network learned" + " from data can be compared with the Bayes network represented by the BIF file." + " Statistics calculated are o.a. the number of missing and extra arcs."; } /** * This will return a string describing the classifier. * * @return The string. */ public String globalInfo() { return "Bayes Network learning using various search algorithms and " + "quality measures.\n" + "Base class for a Bayes Network classifier. Provides " + "datastructures (network structure, conditional probability " + "distributions, etc.) and facilities common to Bayes Network " + "learning algorithms like K2 and B.\n\n" + "For more information see:\n\n" + "http://www.cs.waikato.ac.nz/~remco/weka.pdf"; } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new BayesNet(), argv); } // main /** * get name of the Bayes network * * @return name of the Bayes net */ public String getName() { return this.m_Instances.relationName(); } /** * get number of nodes in the Bayes network * * @return number of nodes */ public int getNrOfNodes() { return this.m_Instances.numAttributes(); } /** * get name of a node in the Bayes network * * @param iNode * index of the node * @return name of the specified node */ public String getNodeName(final int iNode) { return this.m_Instances.attribute(iNode).name(); } /** * get number of values a node can take * * @param iNode * index of the node * @return cardinality of the specified node */ public int getCardinality(final int iNode) { return this.m_Instances.attribute(iNode).numValues(); } /** * get name of a particular value of a node * * @param iNode * index of the node * @param iValue * index of the value * @return cardinality of the specified node */ public String getNodeValue(final int iNode, final int iValue) { return this.m_Instances.attribute(iNode).value(iValue); } /** * get number of parents of a node in the network structure * * @param iNode * index of the node * @return number of parents of the specified node */ public int getNrOfParents(final int iNode) { return this.m_ParentSets[iNode].getNrOfParents(); } /** * get node index of a parent of a node in the network structure * * @param iNode * index of the node * @param iParent * index of the parents, e.g., 0 is the first parent, 1 the second parent, etc. * @return node index of the iParent's parent of the specified node */ public int getParent(final int iNode, final int iParent) { return this.m_ParentSets[iNode].getParent(iParent); } /** * Get full set of parent sets. * * @return parent sets; */ public ParentSet[] getParentSets() { return this.m_ParentSets; } /** * Get full set of estimators. * * @return estimators; */ public Estimator[][] getDistributions() { return this.m_Distributions; } /** * get number of values the collection of parents of a node can take * * @param iNode * index of the node * @return cardinality of the parent set of the specified node */ public int getParentCardinality(final int iNode) { return this.m_ParentSets[iNode].getCardinalityOfParents(); } /** * get particular probability of the conditional probability distribtion of a node given its parents. * * @param iNode * index of the node * @param iParent * index of the parent set, 0 <= iParent <= getParentCardinality(iNode) * @param iValue * index of the value, 0 <= iValue <= getCardinality(iNode) * @return probability */ public double getProbability(final int iNode, final int iParent, final int iValue) { return this.m_Distributions[iNode][iParent].getProbability(iValue); } /** * get the parent set of a node * * @param iNode * index of the node * @return Parent set of the specified node. */ public ParentSet getParentSet(final int iNode) { return this.m_ParentSets[iNode]; } /** * get ADTree strucrture containing efficient representation of counts. * * @return ADTree strucrture */ public ADNode getADTree() { return this.m_ADTree; } // implementation of AdditionalMeasureProducer interface /** * Returns an enumeration of the measure names. Additional measures must follow the naming convention of starting with "measure", eg. double measureBlah() * * @return an enumeration of the measure names */ @Override public Enumeration<String> enumerateMeasures() { Vector<String> newVector = new Vector<String>(4); newVector.addElement("measureExtraArcs"); newVector.addElement("measureMissingArcs"); newVector.addElement("measureReversedArcs"); newVector.addElement("measureDivergence"); newVector.addElement("measureBayesScore"); newVector.addElement("measureBDeuScore"); newVector.addElement("measureMDLScore"); newVector.addElement("measureAICScore"); newVector.addElement("measureEntropyScore"); return newVector.elements(); } // enumerateMeasures public double measureExtraArcs() { if (this.m_otherBayesNet != null) { return this.m_otherBayesNet.extraArcs(this); } return 0; } // measureExtraArcs public double measureMissingArcs() { if (this.m_otherBayesNet != null) { return this.m_otherBayesNet.missingArcs(this); } return 0; } // measureMissingArcs public double measureReversedArcs() { if (this.m_otherBayesNet != null) { return this.m_otherBayesNet.reversedArcs(this); } return 0; } // measureReversedArcs public double measureDivergence() { if (this.m_otherBayesNet != null) { return this.m_otherBayesNet.divergence(this); } return 0; } // measureDivergence public double measureBayesScore() { try { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, this.m_Instances); return s.logScore(Scoreable.BAYES); } catch (ArithmeticException ex) { return Double.NaN; } } // measureBayesScore public double measureBDeuScore() { try { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, this.m_Instances); return s.logScore(Scoreable.BDeu); } catch (ArithmeticException ex) { return Double.NaN; } } // measureBDeuScore public double measureMDLScore() { try { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, this.m_Instances); return s.logScore(Scoreable.MDL); } catch (ArithmeticException ex) { return Double.NaN; } } // measureMDLScore public double measureAICScore() { try { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, this.m_Instances); return s.logScore(Scoreable.AIC); } catch (ArithmeticException ex) { return Double.NaN; } } // measureAICScore public double measureEntropyScore() { try { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, this.m_Instances); return s.logScore(Scoreable.ENTROPY); } catch (ArithmeticException ex) { return Double.NaN; } } // measureEntropyScore /** * Returns the value of the named measure * * @param measureName * the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException * if the named measure is not supported */ @Override public double getMeasure(final String measureName) { if (measureName.equals("measureExtraArcs")) { return this.measureExtraArcs(); } if (measureName.equals("measureMissingArcs")) { return this.measureMissingArcs(); } if (measureName.equals("measureReversedArcs")) { return this.measureReversedArcs(); } if (measureName.equals("measureDivergence")) { return this.measureDivergence(); } if (measureName.equals("measureBayesScore")) { return this.measureBayesScore(); } if (measureName.equals("measureBDeuScore")) { return this.measureBDeuScore(); } if (measureName.equals("measureMDLScore")) { return this.measureMDLScore(); } if (measureName.equals("measureAICScore")) { return this.measureAICScore(); } if (measureName.equals("measureEntropyScore")) { return this.measureEntropyScore(); } return 0; } // getMeasure /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class BayesNet
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/NaiveBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayes.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.Aggregateable; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedAttributesHandler; import weka.core.WeightedInstancesHandler; import weka.estimators.DiscreteEstimator; import weka.estimators.Estimator; import weka.estimators.KernelEstimator; import weka.estimators.NormalEstimator; /** * <!-- globalinfo-start --> Class for a Naive Bayes classifier using estimator classes. Numeric estimator precision values are chosen based on analysis of the training data. For this reason, the classifier is not an UpdateableClassifier * (which in typical usage are initialized with zero training instances) -- if you need the UpdateableClassifier functionality, use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable classifier will use a default precision of 0.1 * for numeric attributes when buildClassifier is called with zero training instances.<br/> * <br/> * For more information on Naive Bayes classifiers, see<br/> * <br/> * George H. John, Pat Langley: Estimating Continuous Distributions in Bayesian Classifiers. In: Eleventh Conference on Uncertainty in Artificial Intelligence, San Mateo, 338-345, 1995. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{John1995, * address = {San Mateo}, * author = {George H. John and Pat Langley}, * booktitle = {Eleventh Conference on Uncertainty in Artificial Intelligence}, * pages = {338-345}, * publisher = {Morgan Kaufmann}, * title = {Estimating Continuous Distributions in Bayesian Classifiers}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -K * Use kernel density estimator rather than normal * distribution for numeric attributes * </pre> * * <pre> * -D * Use supervised discretization to process numeric attributes * </pre> * * <pre> * -O * Display model in old format (good when there are many classes) * </pre> * * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class NaiveBayes extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, WeightedAttributesHandler, TechnicalInformationHandler, Aggregateable<NaiveBayes> { /** for serialization */ static final long serialVersionUID = 5995231201785697655L; /** The attribute estimators. */ protected Estimator[][] m_Distributions; /** The class estimator. */ protected Estimator m_ClassDistribution; /** * Whether to use kernel density estimator rather than normal distribution for numeric attributes */ protected boolean m_UseKernelEstimator = false; /** * Whether to use discretization than normal distribution for numeric attributes */ protected boolean m_UseDiscretization = false; /** The number of classes (or 1 for numeric class) */ protected int m_NumClasses; /** * The dataset header for the purposes of printing out a semi-intelligible model */ protected Instances m_Instances; /*** The precision parameter used for numeric attributes */ protected static final double DEFAULT_NUM_PRECISION = 0.01; /** * The discretization filter. */ protected weka.filters.supervised.attribute.Discretize m_Disc = null; protected boolean m_displayModelInOldFormat = false; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for a Naive Bayes classifier using estimator classes. Numeric" + " estimator precision values are chosen based on analysis of the " + " training data. For this reason, the classifier is not an" + " UpdateableClassifier (which in typical usage are initialized with zero" + " training instances) -- if you need the UpdateableClassifier functionality," + " use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable" + " classifier will use a default precision of 0.1 for numeric attributes" + " when buildClassifier is called with zero training instances.\n\n" + "For more information on Naive Bayes classifiers, see\n\n" + this.getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "George H. John and Pat Langley"); result.setValue(Field.TITLE, "Estimating Continuous Distributions in Bayesian Classifiers"); result.setValue(Field.BOOKTITLE, "Eleventh Conference on Uncertainty in Artificial Intelligence"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.PAGES, "338-345"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.ADDRESS, "San Mateo"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @exception Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); this.m_NumClasses = instances.numClasses(); // Copy the instances this.m_Instances = new Instances(instances); // Discretize instances if required if (this.m_UseDiscretization) { this.m_Disc = new weka.filters.supervised.attribute.Discretize(); this.m_Disc.setInputFormat(this.m_Instances); this.m_Instances = weka.filters.Filter.useFilter(this.m_Instances, this.m_Disc); } else { this.m_Disc = null; } // Reserve space for the distributions this.m_Distributions = new Estimator[this.m_Instances.numAttributes() - 1][this.m_Instances.numClasses()]; this.m_ClassDistribution = new DiscreteEstimator(this.m_Instances.numClasses(), true); int attIndex = 0; Enumeration<Attribute> enu = this.m_Instances.enumerateAttributes(); while (enu.hasMoreElements()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Attribute attribute = enu.nextElement(); // If the attribute is numeric, determine the estimator // numeric precision from differences between adjacent values double numPrecision = DEFAULT_NUM_PRECISION; if (attribute.type() == Attribute.NUMERIC) { this.m_Instances.sort(attribute); if ((this.m_Instances.numInstances() > 0) && !this.m_Instances.instance(0).isMissing(attribute)) { double lastVal = this.m_Instances.instance(0).value(attribute); double currentVal, deltaSum = 0; int distinct = 0; for (int i = 1; i < this.m_Instances.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance currentInst = this.m_Instances.instance(i); if (currentInst.isMissing(attribute)) { break; } currentVal = currentInst.value(attribute); if (currentVal != lastVal) { deltaSum += currentVal - lastVal; lastVal = currentVal; distinct++; } } if (distinct > 0) { numPrecision = deltaSum / distinct; } } } for (int j = 0; j < this.m_Instances.numClasses(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } switch (attribute.type()) { case Attribute.NUMERIC: if (this.m_UseKernelEstimator) { this.m_Distributions[attIndex][j] = new KernelEstimator(numPrecision); } else { this.m_Distributions[attIndex][j] = new NormalEstimator(numPrecision); } break; case Attribute.NOMINAL: this.m_Distributions[attIndex][j] = new DiscreteEstimator(attribute.numValues(), true); break; default: throw new Exception("Attribute type unknown to NaiveBayes"); } } attIndex++; } // Compute counts Enumeration<Instance> enumInsts = this.m_Instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Instance instance = enumInsts.nextElement(); this.updateClassifier(instance); } // Save space this.m_Instances = new Instances(this.m_Instances, 0); } /** * Updates the classifier with the given instance. * * @param instance * the new training instance to include in the model * @exception Exception * if the instance could not be incorporated in the model. */ public void updateClassifier(final Instance instance) throws Exception { if (!instance.classIsMissing()) { Enumeration<Attribute> enumAtts = this.m_Instances.enumerateAttributes(); int attIndex = 0; while (enumAtts.hasMoreElements()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Attribute attribute = enumAtts.nextElement(); if (!instance.isMissing(attribute)) { this.m_Distributions[attIndex][(int) instance.classValue()].addValue(instance.value(attribute), instance.weight()); } attIndex++; } this.m_ClassDistribution.addValue(instance.classValue(), instance.weight()); } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @exception Exception * if there is a problem generating the prediction */ @Override public double[] distributionForInstance(Instance instance) throws Exception { if (this.m_UseDiscretization) { this.m_Disc.input(instance); instance = this.m_Disc.output(); } double[] probs = new double[this.m_NumClasses]; for (int j = 0; j < this.m_NumClasses; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } probs[j] = this.m_ClassDistribution.getProbability(j); } Enumeration<Attribute> enumAtts = instance.enumerateAttributes(); int attIndex = 0; while (enumAtts.hasMoreElements()) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } Attribute attribute = enumAtts.nextElement(); if (!instance.isMissing(attribute)) { double temp, max = 0; for (int j = 0; j < this.m_NumClasses; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } temp = Math.max(1e-75, Math.pow(this.m_Distributions[attIndex][j].getProbability(instance.value(attribute)), this.m_Instances.attribute(attIndex).weight())); probs[j] *= temp; if (probs[j] > max) { max = probs[j]; } if (Double.isNaN(probs[j])) { throw new Exception("NaN returned from estimator for attribute " + attribute.name() + ":\n" + this.m_Distributions[attIndex][j].toString()); } } if ((max > 0) && (max < 1e-75)) { // Danger of probability underflow for (int j = 0; j < this.m_NumClasses; j++) { probs[j] *= 1e75; } } } attIndex++; } // Display probabilities Utils.normalize(probs); return probs; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<>(3); newVector.addElement(new Option("\tUse kernel density estimator rather than normal\n" + "\tdistribution for numeric attributes", "K", 0, "-K")); newVector.addElement(new Option("\tUse supervised discretization to process numeric attributes\n", "D", 0, "-D")); newVector.addElement(new Option("\tDisplay model in old format (good when there are " + "many classes)\n", "O", 0, "-O")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -K * Use kernel density estimator rather than normal * distribution for numeric attributes * </pre> * * <pre> * -D * Use supervised discretization to process numeric attributes * </pre> * * <pre> * -O * Display model in old format (good when there are many classes) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @exception Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { super.setOptions(options); boolean k = Utils.getFlag('K', options); boolean d = Utils.getFlag('D', options); if (k && d) { throw new IllegalArgumentException("Can't use both kernel density " + "estimation and discretization!"); } this.setUseSupervisedDiscretization(d); this.setUseKernelEstimator(k); this.setDisplayModelInOldFormat(Utils.getFlag('O', options)); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<>(); Collections.addAll(options, super.getOptions()); if (this.m_UseKernelEstimator) { options.add("-K"); } if (this.m_UseDiscretization) { options.add("-D"); } if (this.m_displayModelInOldFormat) { options.add("-O"); } return options.toArray(new String[0]); } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ @Override public String toString() { if (this.m_displayModelInOldFormat) { return this.toStringOriginal(); } StringBuffer temp = new StringBuffer(); temp.append("Naive Bayes Classifier"); if (this.m_Instances == null) { temp.append(": No model built yet."); } else { int maxWidth = 0; int maxAttWidth = 0; boolean containsKernel = false; // set up max widths // class values for (int i = 0; i < this.m_Instances.numClasses(); i++) { if (this.m_Instances.classAttribute().value(i).length() > maxWidth) { maxWidth = this.m_Instances.classAttribute().value(i).length(); } } // attributes for (int i = 0; i < this.m_Instances.numAttributes(); i++) { if (i != this.m_Instances.classIndex()) { Attribute a = this.m_Instances.attribute(i); if (a.name().length() > maxAttWidth) { maxAttWidth = this.m_Instances.attribute(i).name().length(); } if (a.isNominal()) { // check values for (int j = 0; j < a.numValues(); j++) { String val = a.value(j) + " "; if (val.length() > maxAttWidth) { maxAttWidth = val.length(); } } } } } for (Estimator[] m_Distribution : this.m_Distributions) { for (int j = 0; j < this.m_Instances.numClasses(); j++) { if (m_Distribution[0] instanceof NormalEstimator) { // check mean/precision dev against maxWidth NormalEstimator n = (NormalEstimator) m_Distribution[j]; double mean = Math.log(Math.abs(n.getMean())) / Math.log(10.0); double precision = Math.log(Math.abs(n.getPrecision())) / Math.log(10.0); double width = (mean > precision) ? mean : precision; if (width < 0) { width = 1; } // decimal + # decimal places + 1 width += 6.0; if ((int) width > maxWidth) { maxWidth = (int) width; } } else if (m_Distribution[0] instanceof KernelEstimator) { containsKernel = true; KernelEstimator ke = (KernelEstimator) m_Distribution[j]; int numK = ke.getNumKernels(); String temps = "K" + numK + ": mean (weight)"; if (maxAttWidth < temps.length()) { maxAttWidth = temps.length(); } // check means + weights against maxWidth if (ke.getNumKernels() > 0) { double[] means = ke.getMeans(); double[] weights = ke.getWeights(); for (int k = 0; k < ke.getNumKernels(); k++) { String m = Utils.doubleToString(means[k], maxWidth, 4).trim(); m += " (" + Utils.doubleToString(weights[k], maxWidth, 1).trim() + ")"; if (maxWidth < m.length()) { maxWidth = m.length(); } } } } else if (m_Distribution[0] instanceof DiscreteEstimator) { DiscreteEstimator d = (DiscreteEstimator) m_Distribution[j]; for (int k = 0; k < d.getNumSymbols(); k++) { String size = "" + d.getCount(k); if (size.length() > maxWidth) { maxWidth = size.length(); } } int sum = ("" + d.getSumOfCounts()).length(); if (sum > maxWidth) { maxWidth = sum; } } } } // Check width of class labels for (int i = 0; i < this.m_Instances.numClasses(); i++) { String cSize = this.m_Instances.classAttribute().value(i); if (cSize.length() > maxWidth) { maxWidth = cSize.length(); } } // Check width of class priors for (int i = 0; i < this.m_Instances.numClasses(); i++) { String priorP = Utils.doubleToString(((DiscreteEstimator) this.m_ClassDistribution).getProbability(i), maxWidth, 2).trim(); priorP = "(" + priorP + ")"; if (priorP.length() > maxWidth) { maxWidth = priorP.length(); } } if (maxAttWidth < "Attribute".length()) { maxAttWidth = "Attribute".length(); } if (maxAttWidth < " weight sum".length()) { maxAttWidth = " weight sum".length(); } if (containsKernel) { if (maxAttWidth < " [precision]".length()) { maxAttWidth = " [precision]".length(); } } maxAttWidth += 2; temp.append("\n\n"); temp.append(this.pad("Class", " ", (maxAttWidth + maxWidth + 1) - "Class".length(), true)); temp.append("\n"); temp.append(this.pad("Attribute", " ", maxAttWidth - "Attribute".length(), false)); // class labels for (int i = 0; i < this.m_Instances.numClasses(); i++) { String classL = this.m_Instances.classAttribute().value(i); temp.append(this.pad(classL, " ", maxWidth + 1 - classL.length(), true)); } temp.append("\n"); // class priors temp.append(this.pad("", " ", maxAttWidth, true)); for (int i = 0; i < this.m_Instances.numClasses(); i++) { String priorP = Utils.doubleToString(((DiscreteEstimator) this.m_ClassDistribution).getProbability(i), maxWidth, 2).trim(); priorP = "(" + priorP + ")"; temp.append(this.pad(priorP, " ", maxWidth + 1 - priorP.length(), true)); } temp.append("\n"); temp.append(this.pad("", "=", maxAttWidth + (maxWidth * this.m_Instances.numClasses()) + this.m_Instances.numClasses() + 1, true)); temp.append("\n"); // loop over the attributes int counter = 0; for (int i = 0; i < this.m_Instances.numAttributes(); i++) { if (i == this.m_Instances.classIndex()) { continue; } String attName = this.m_Instances.attribute(i).name(); temp.append(attName + "\n"); if (this.m_Distributions[counter][0] instanceof NormalEstimator) { String meanL = " mean"; temp.append(this.pad(meanL, " ", maxAttWidth + 1 - meanL.length(), false)); for (int j = 0; j < this.m_Instances.numClasses(); j++) { // means NormalEstimator n = (NormalEstimator) this.m_Distributions[counter][j]; String mean = Utils.doubleToString(n.getMean(), maxWidth, 4).trim(); temp.append(this.pad(mean, " ", maxWidth + 1 - mean.length(), true)); } temp.append("\n"); // now do std deviations String stdDevL = " std. dev."; temp.append(this.pad(stdDevL, " ", maxAttWidth + 1 - stdDevL.length(), false)); for (int j = 0; j < this.m_Instances.numClasses(); j++) { NormalEstimator n = (NormalEstimator) this.m_Distributions[counter][j]; String stdDev = Utils.doubleToString(n.getStdDev(), maxWidth, 4).trim(); temp.append(this.pad(stdDev, " ", maxWidth + 1 - stdDev.length(), true)); } temp.append("\n"); // now the weight sums String weightL = " weight sum"; temp.append(this.pad(weightL, " ", maxAttWidth + 1 - weightL.length(), false)); for (int j = 0; j < this.m_Instances.numClasses(); j++) { NormalEstimator n = (NormalEstimator) this.m_Distributions[counter][j]; String weight = Utils.doubleToString(n.getSumOfWeights(), maxWidth, 4).trim(); temp.append(this.pad(weight, " ", maxWidth + 1 - weight.length(), true)); } temp.append("\n"); // now the precisions String precisionL = " precision"; temp.append(this.pad(precisionL, " ", maxAttWidth + 1 - precisionL.length(), false)); for (int j = 0; j < this.m_Instances.numClasses(); j++) { NormalEstimator n = (NormalEstimator) this.m_Distributions[counter][j]; String precision = Utils.doubleToString(n.getPrecision(), maxWidth, 4).trim(); temp.append(this.pad(precision, " ", maxWidth + 1 - precision.length(), true)); } temp.append("\n\n"); } else if (this.m_Distributions[counter][0] instanceof DiscreteEstimator) { Attribute a = this.m_Instances.attribute(i); for (int j = 0; j < a.numValues(); j++) { String val = " " + a.value(j); temp.append(this.pad(val, " ", maxAttWidth + 1 - val.length(), false)); for (int k = 0; k < this.m_Instances.numClasses(); k++) { DiscreteEstimator d = (DiscreteEstimator) this.m_Distributions[counter][k]; String count = "" + d.getCount(j); temp.append(this.pad(count, " ", maxWidth + 1 - count.length(), true)); } temp.append("\n"); } // do the totals String total = " [total]"; temp.append(this.pad(total, " ", maxAttWidth + 1 - total.length(), false)); for (int k = 0; k < this.m_Instances.numClasses(); k++) { DiscreteEstimator d = (DiscreteEstimator) this.m_Distributions[counter][k]; String count = "" + d.getSumOfCounts(); temp.append(this.pad(count, " ", maxWidth + 1 - count.length(), true)); } temp.append("\n\n"); } else if (this.m_Distributions[counter][0] instanceof KernelEstimator) { String kL = " [# kernels]"; temp.append(this.pad(kL, " ", maxAttWidth + 1 - kL.length(), false)); for (int k = 0; k < this.m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator) this.m_Distributions[counter][k]; String nk = "" + ke.getNumKernels(); temp.append(this.pad(nk, " ", maxWidth + 1 - nk.length(), true)); } temp.append("\n"); // do num kernels, std. devs and precisions String stdDevL = " [std. dev]"; temp.append(this.pad(stdDevL, " ", maxAttWidth + 1 - stdDevL.length(), false)); for (int k = 0; k < this.m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator) this.m_Distributions[counter][k]; String stdD = Utils.doubleToString(ke.getStdDev(), maxWidth, 4).trim(); temp.append(this.pad(stdD, " ", maxWidth + 1 - stdD.length(), true)); } temp.append("\n"); String precL = " [precision]"; temp.append(this.pad(precL, " ", maxAttWidth + 1 - precL.length(), false)); for (int k = 0; k < this.m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator) this.m_Distributions[counter][k]; String prec = Utils.doubleToString(ke.getPrecision(), maxWidth, 4).trim(); temp.append(this.pad(prec, " ", maxWidth + 1 - prec.length(), true)); } temp.append("\n"); // first determine max number of kernels accross the classes int maxK = 0; for (int k = 0; k < this.m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator) this.m_Distributions[counter][k]; if (ke.getNumKernels() > maxK) { maxK = ke.getNumKernels(); } } for (int j = 0; j < maxK; j++) { // means first String meanL = " K" + (j + 1) + ": mean (weight)"; temp.append(this.pad(meanL, " ", maxAttWidth + 1 - meanL.length(), false)); for (int k = 0; k < this.m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator) this.m_Distributions[counter][k]; double[] means = ke.getMeans(); double[] weights = ke.getWeights(); String m = "--"; if (ke.getNumKernels() == 0) { m = "" + 0; } else if (j < ke.getNumKernels()) { m = Utils.doubleToString(means[j], maxWidth, 4).trim(); m += " (" + Utils.doubleToString(weights[j], maxWidth, 1).trim() + ")"; } temp.append(this.pad(m, " ", maxWidth + 1 - m.length(), true)); } temp.append("\n"); } temp.append("\n"); } counter++; } } return temp.toString(); } /** * Returns a description of the classifier in the old format. * * @return a description of the classifier as a string. */ protected String toStringOriginal() { StringBuffer text = new StringBuffer(); text.append("Naive Bayes Classifier"); if (this.m_Instances == null) { text.append(": No model built yet."); } else { try { for (int i = 0; i < this.m_Distributions[0].length; i++) { text.append("\n\nClass " + this.m_Instances.classAttribute().value(i) + ": Prior probability = " + Utils.doubleToString(this.m_ClassDistribution.getProbability(i), 4, 2) + "\n\n"); Enumeration<Attribute> enumAtts = this.m_Instances.enumerateAttributes(); int attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = enumAtts.nextElement(); if (attribute.weight() > 0) { text.append(attribute.name() + ": " + this.m_Distributions[attIndex][i]); } attIndex++; } } } catch (Exception ex) { text.append(ex.getMessage()); } } return text.toString(); } private String pad(final String source, final String padChar, final int length, final boolean leftPad) { StringBuffer temp = new StringBuffer(); if (leftPad) { for (int i = 0; i < length; i++) { temp.append(padChar); } temp.append(source); } else { temp.append(source); for (int i = 0; i < length; i++) { temp.append(padChar); } } return temp.toString(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useKernelEstimatorTipText() { return "Use a kernel estimator for numeric attributes rather than a " + "normal distribution."; } /** * Gets if kernel estimator is being used. * * @return Value of m_UseKernelEstimatory. */ public boolean getUseKernelEstimator() { return this.m_UseKernelEstimator; } /** * Sets if kernel estimator is to be used. * * @param v * Value to assign to m_UseKernelEstimatory. */ public void setUseKernelEstimator(final boolean v) { this.m_UseKernelEstimator = v; if (v) { this.setUseSupervisedDiscretization(false); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useSupervisedDiscretizationTipText() { return "Use supervised discretization to convert numeric attributes to nominal " + "ones."; } /** * Get whether supervised discretization is to be used. * * @return true if supervised discretization is to be used. */ public boolean getUseSupervisedDiscretization() { return this.m_UseDiscretization; } /** * Set whether supervised discretization is to be used. * * @param newblah * true if supervised discretization is to be used. */ public void setUseSupervisedDiscretization(final boolean newblah) { this.m_UseDiscretization = newblah; if (newblah) { this.setUseKernelEstimator(false); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String displayModelInOldFormatTipText() { return "Use old format for model output. The old format is " + "better when there are many class values. The new format " + "is better when there are fewer classes and many attributes."; } /** * Set whether to display model output in the old, original format. * * @param d * true if model ouput is to be shown in the old format */ public void setDisplayModelInOldFormat(final boolean d) { this.m_displayModelInOldFormat = d; } /** * Get whether to display model output in the old, original format. * * @return true if model ouput is to be shown in the old format */ public boolean getDisplayModelInOldFormat() { return this.m_displayModelInOldFormat; } /** * Return the header that this classifier was trained with * * @return the header that this classifier was trained with */ public Instances getHeader() { return this.m_Instances; } /** * Get all the conditional estimators. * * @return all the conditional estimators. */ public Estimator[][] getConditionalEstimators() { return this.m_Distributions; } /** * Get the class estimator. * * @return the class estimator */ public Estimator getClassEstimator() { return this.m_ClassDistribution; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } @SuppressWarnings({ "rawtypes", "unchecked" }) @Override public NaiveBayes aggregate(final NaiveBayes toAggregate) throws Exception { // Highly unlikely that discretization intervals will match between the // two classifiers if (this.m_UseDiscretization || toAggregate.getUseSupervisedDiscretization()) { throw new Exception("Unable to aggregate when supervised discretization " + "has been turned on"); } if (!this.m_Instances.equalHeaders(toAggregate.m_Instances)) { throw new Exception("Can't aggregate - data headers don't match: " + this.m_Instances.equalHeadersMsg(toAggregate.m_Instances)); } ((Aggregateable) this.m_ClassDistribution).aggregate(toAggregate.m_ClassDistribution); // aggregate all conditional estimators for (int i = 0; i < this.m_Distributions.length; i++) { for (int j = 0; j < this.m_Distributions[i].length; j++) { ((Aggregateable) this.m_Distributions[i][j]).aggregate(toAggregate.m_Distributions[i][j]); } } return this; } @Override public void finalizeAggregation() throws Exception { // nothing to do } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new NaiveBayes(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/NaiveBayesMultinomial.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayesMultinomial.java * Copyright (C) 2003-2017 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.bayes; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> Class for building and using a multinomial Naive Bayes classifier. For * more information see,<br/> * <br/> * Andrew Mccallum, Kamal Nigam: A Comparison of Event Models for Naive Bayes Text Classification. * In: AAAI-98 Workshop on 'Learning for Text Categorization', 1998.<br/> * <br/> * The core equation for this classifier:<br/> * <br/> * P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)<br/> * <br/> * where Ci is class i and D is a document. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;inproceedings{Mccallum1998, * author = {Andrew Mccallum and Kamal Nigam}, * booktitle = {AAAI-98 Workshop on 'Learning for Text Categorization'}, * title = {A Comparison of Event Models for Naive Bayes Text Classification}, * year = {1998} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * -output-debug-info <br> * If set, classifier is run in debug mode and may output additional info to the console. * <p> * * -do-not-check-capabilities <br> * If set, classifier capabilities are not checked before classifier is built (use with caution). * <p> * * -num-decimal-laces <br> * The number of decimal places for the output of numbers in the model. * <p> * * -batch-size <br> * The desired batch size for batch prediction. * <p> * * <!-- options-end --> * * @author Andrew Golightly (acg4@cs.waikato.ac.nz) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class NaiveBayesMultinomial extends AbstractClassifier implements WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5932177440181257085L; /** * probability that a word (w) exists in a class (H) (i.e. Pr[w|H]) The matrix is in the this * format: probOfWordGivenClass[class][wordAttribute] NOTE: the values are actually the log of * Pr[w|H] */ protected double[][] m_probOfWordGivenClass; /** the probability of a class (i.e. Pr[H]). */ protected double[] m_probOfClass; /** number of unique words */ protected int m_numAttributes; /** number of class values */ protected int m_numClasses; /** copy of header information for use in toString method */ protected Instances m_headerInfo; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a multinomial Naive Bayes classifier. " + "For more information see,\n\n" + this.getTechnicalInformation().toString() + "\n\n" + "The core equation for this classifier:\n\n" + "P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes' rule)\n\n" + "where Ci is class i and D is a document."; } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Andrew Mccallum and Kamal Nigam"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "A Comparison of Event Models for Naive Bayes Text Classification"); result.setValue(Field.BOOKTITLE, "AAAI-98 Workshop on 'Learning for Text Categorization'"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Sets up the classifier before any actual instances are processed. */ protected void initializeClassifier(final Instances instances) throws Exception { // can classifier handle the data? this.getCapabilities().testWithFail(instances); this.m_headerInfo = new Instances(instances, 0); this.m_numClasses = instances.numClasses(); this.m_numAttributes = instances.numAttributes(); this.m_probOfWordGivenClass = new double[this.m_numClasses][]; // Initialize the matrix of word counts for (int c = 0; c < this.m_numClasses; c++) { this.m_probOfWordGivenClass[c] = new double[this.m_numAttributes]; for (int att = 0; att < this.m_numAttributes; att++) { this.m_probOfWordGivenClass[c][att] = 1.0; } } // Initialize class counts this.m_probOfClass = new double[this.m_numClasses]; for (int i = 0; i < this.m_numClasses; i++) { this.m_probOfClass[i] = 1.0; } } /** * Generates the classifier. * * @param instances * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(final Instances instances) throws Exception { this.initializeClassifier(instances); // enumerate through the instances double[] wordsPerClass = new double[this.m_numClasses]; for (Instance instance : instances) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double classValue = instance.value(instance.classIndex()); if (!Utils.isMissingValue(classValue)) { int classIndex = (int) classValue; this.m_probOfClass[classIndex] += instance.weight(); for (int a = 0; a < instance.numValues(); a++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (instance.index(a) != instance.classIndex()) { if (!instance.isMissingSparse(a)) { double numOccurrences = instance.valueSparse(a) * instance.weight(); if (numOccurrences < 0) { throw new Exception("Numeric attribute values must all be greater or equal to zero."); } wordsPerClass[classIndex] += numOccurrences; this.m_probOfWordGivenClass[classIndex][instance.index(a)] += numOccurrences; } } } } } /* * normalising probOfWordGivenClass values and saving each value as the log of each value */ for (int c = 0; c < this.m_numClasses; c++) { for (int v = 0; v < this.m_numAttributes; v++) { this.m_probOfWordGivenClass[c][v] = Math.log(this.m_probOfWordGivenClass[c][v]) - Math.log(wordsPerClass[c] + this.m_numAttributes - 1); } } // Normalize prior class probabilities Utils.normalize(this.m_probOfClass); } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if there is a problem generating the prediction */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { double[] probOfClassGivenDoc = new double[this.m_numClasses]; // calculate the array of log(Pr[D|C]) double[] logDocGivenClass = new double[this.m_numClasses]; for (int h = 0; h < this.m_numClasses; h++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } logDocGivenClass[h] = this.probOfDocGivenClass(instance, h); } double max = logDocGivenClass[Utils.maxIndex(logDocGivenClass)]; for (int i = 0; i < this.m_numClasses; i++) { probOfClassGivenDoc[i] = Math.exp(logDocGivenClass[i] - max) * this.m_probOfClass[i]; } Utils.normalize(probOfClassGivenDoc); return probOfClassGivenDoc; } /** * log(N!) + (sum for all the words i)(log(Pi^ni) - log(ni!)) * * where N is the total number of words Pi is the probability of obtaining word i ni is the number * of times the word at index i occurs in the document * * Actually, this method just computes (sum for all the words i)(log(Pi^ni) because the factorials * are irrelevant when posterior class probabilities are computed. * * @param inst * The instance to be classified * @param classIndex * The index of the class we are calculating the probability with respect to * * @return The log of the probability of the document occuring given the class */ protected double probOfDocGivenClass(final Instance inst, final int classIndex) { double answer = 0; for (int i = 0; i < inst.numValues(); i++) { if (inst.index(i) != inst.classIndex()) { answer += (inst.valueSparse(i) * this.m_probOfWordGivenClass[classIndex][inst.index(i)]); } } return answer; } /** * Returns a string representation of the classifier. * * @return a string representation of the classifier */ @Override public String toString() { StringBuffer result = new StringBuffer("The independent probability of a class\n--------------------------------------\n"); for (int c = 0; c < this.m_numClasses; c++) { result.append(this.m_headerInfo.classAttribute().value(c)).append("\t").append(Utils.doubleToString(this.m_probOfClass[c], this.getNumDecimalPlaces())).append("\n"); } result.append("\nThe probability of a word given the class\n-----------------------------------------\n\t"); for (int c = 0; c < this.m_numClasses; c++) { result.append(this.m_headerInfo.classAttribute().value(c)).append("\t"); } result.append("\n"); for (int w = 0; w < this.m_numAttributes; w++) { if (w != this.m_headerInfo.classIndex()) { result.append(this.m_headerInfo.attribute(w).name()).append("\t"); for (int c = 0; c < this.m_numClasses; c++) { result.append(Utils.doubleToString(Math.exp(this.m_probOfWordGivenClass[c][w]), this.getNumDecimalPlaces())).append("\t"); } result.append("\n"); } } return result.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv * the options */ public static void main(final String[] argv) { runClassifier(new NaiveBayesMultinomial(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/NaiveBayesMultinomialText.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayesMultinomialText.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.bayes; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableBatchProcessor; import weka.classifiers.UpdateableClassifier; import weka.core.Aggregateable; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.stemmers.NullStemmer; import weka.core.stemmers.Stemmer; import weka.core.stopwords.Null; import weka.core.stopwords.StopwordsHandler; import weka.core.tokenizers.Tokenizer; import weka.core.tokenizers.WordTokenizer; /** * <!-- globalinfo-start --> Multinomial naive bayes for text data. Operates directly (and only) on String attributes. Other types of input attributes are accepted but ignored during training and classification * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -W * Use word frequencies instead of binary bag of words. * </pre> * * <pre> * -P &lt;# instances&gt; * How often to prune the dictionary of low frequency words (default = 0, i.e. don't prune) * </pre> * * <pre> * -M &lt;double&gt; * Minimum word frequency. Words with less than this frequence are ignored. * If periodic pruning is turned on then this is also used to determine which * words to remove from the dictionary (default = 3). * </pre> * * <pre> * -normalize * Normalize document length (use in conjunction with -norm and -lnorm) * </pre> * * <pre> * -norm &lt;num&gt; * Specify the norm that each instance must have (default 1.0) * </pre> * * <pre> * -lnorm &lt;num&gt; * Specify L-norm to use (default 2.0) * </pre> * * <pre> * -lowercase * Convert all tokens to lowercase before adding to the dictionary. * </pre> * * <pre> * -stopwords-handler * The stopwords handler to use (default Null). * </pre> * * <pre> * -tokenizer &lt;spec&gt; * The tokenizing algorihtm (classname plus parameters) to use. * (default: weka.core.tokenizers.WordTokenizer) * </pre> * * <pre> * -stemmer &lt;spec&gt; * The stemmering algorihtm (classname plus parameters) to use. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @author Andrew Golightly (acg4@cs.waikato.ac.nz) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * */ public class NaiveBayesMultinomialText extends AbstractClassifier implements UpdateableClassifier, UpdateableBatchProcessor, WeightedInstancesHandler, Aggregateable<NaiveBayesMultinomialText> { /** For serialization */ private static final long serialVersionUID = 2139025532014821394L; private static class Count implements Serializable { /** * For serialization */ private static final long serialVersionUID = 2104201532017340967L; public double m_count; public Count(final double c) { this.m_count = c; } } /** The header of the training data */ protected Instances m_data; protected double[] m_probOfClass; protected double[] m_wordsPerClass; protected Map<Integer, LinkedHashMap<String, Count>> m_probOfWordGivenClass; /** * Holds the current document vector (LinkedHashMap is more efficient when iterating over EntrySet than HashMap) */ protected transient LinkedHashMap<String, Count> m_inputVector; /** Stopword handler to use. */ protected StopwordsHandler m_StopwordsHandler = new Null(); /** The tokenizer to use */ protected Tokenizer m_tokenizer = new WordTokenizer(); /** Whether or not to convert all tokens to lowercase */ protected boolean m_lowercaseTokens; /** The stemming algorithm. */ protected Stemmer m_stemmer = new NullStemmer(); /** * The number of training instances at which to periodically prune the dictionary of min frequency words. Empty or null string indicates don't prune */ protected int m_periodicP = 0; /** * Only consider dictionary words (features) that occur at least this many times */ protected double m_minWordP = 3; /** Use word frequencies rather than bag-of-words if true */ protected boolean m_wordFrequencies = false; /** normailize document length ? */ protected boolean m_normalize = false; /** The length that each document vector should have in the end */ protected double m_norm = 1.0; /** The L-norm to use */ protected double m_lnorm = 2.0; /** Leplace-like correction factor for zero frequency */ protected double m_leplace = 1.0; /** Holds the current instance number */ protected double m_t; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Multinomial naive bayes for text data. Operates " + "directly (and only) on String attributes. " + "Other types of input attributes are accepted but " + "ignored during training and classification"; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.STRING_ATTRIBUTES); result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NOMINAL_CLASS); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param data * set of instances serving as training data * @throws Exception * if the classifier has not been generated successfully */ @Override public void buildClassifier(Instances data) throws Exception { this.reset(); // can classifier handle the data? this.getCapabilities().testWithFail(data); this.m_data = new Instances(data, 0); data = new Instances(data); this.m_wordsPerClass = new double[data.numClasses()]; this.m_probOfClass = new double[data.numClasses()]; this.m_probOfWordGivenClass = new HashMap<Integer, LinkedHashMap<String, Count>>(); double laplace = 1.0; for (int i = 0; i < data.numClasses(); i++) { LinkedHashMap<String, Count> dict = new LinkedHashMap<String, Count>(10000 / data.numClasses()); this.m_probOfWordGivenClass.put(i, dict); this.m_probOfClass[i] = laplace; // this needs to be updated for laplace correction every time we see a new // word (attribute) this.m_wordsPerClass[i] = 0; } for (int i = 0; i < data.numInstances(); i++) { this.updateClassifier(data.instance(i)); } if (data.numInstances() > 0) { this.pruneDictionary(true); } } /** * Updates the classifier with the given instance. * * @param instance * the new training instance to include in the model * @throws Exception * if the instance could not be incorporated in the model. */ @Override public void updateClassifier(final Instance instance) throws Exception { this.updateClassifier(instance, true); } protected void updateClassifier(final Instance instance, final boolean updateDictionary) throws Exception { if (!instance.classIsMissing()) { int classIndex = (int) instance.classValue(); this.m_probOfClass[classIndex] += instance.weight(); this.tokenizeInstance(instance, updateDictionary); this.m_t++; } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance * the instance to be classified * @return predicted class probability distribution * @throws Exception * if there is a problem generating the prediction */ @Override public double[] distributionForInstance(final Instance instance) throws Exception { this.tokenizeInstance(instance, false); double[] probOfClassGivenDoc = new double[this.m_data.numClasses()]; double[] logDocGivenClass = new double[this.m_data.numClasses()]; for (int i = 0; i < this.m_data.numClasses(); i++) { logDocGivenClass[i] += Math.log(this.m_probOfClass[i]); LinkedHashMap<String, Count> dictForClass = this.m_probOfWordGivenClass.get(i); double allWords = 0; // for document normalization (if in use) double iNorm = 0; double fv = 0; if (this.m_normalize) { for (Map.Entry<String, Count> feature : this.m_inputVector.entrySet()) { String word = feature.getKey(); Count c = feature.getValue(); // check the word against all the dictionaries (all classes) boolean ok = false; for (int clss = 0; clss < this.m_data.numClasses(); clss++) { if (this.m_probOfWordGivenClass.get(clss).get(word) != null) { ok = true; break; } } // only normalize with respect to those words that we've seen during // training // (i.e. dictionary over all classes) if (ok) { // word counts or bag-of-words? fv = (this.m_wordFrequencies) ? c.m_count : 1.0; iNorm += Math.pow(Math.abs(fv), this.m_lnorm); } } iNorm = Math.pow(iNorm, 1.0 / this.m_lnorm); } for (Map.Entry<String, Count> feature : this.m_inputVector.entrySet()) { String word = feature.getKey(); Count dictCount = dictForClass.get(word); // check the word against all the dictionaries (all classes) boolean ok = false; for (int clss = 0; clss < this.m_data.numClasses(); clss++) { if (this.m_probOfWordGivenClass.get(clss).get(word) != null) { ok = true; break; } } // ignore words we haven't seen in the training data if (ok) { double freq = (this.m_wordFrequencies) ? feature.getValue().m_count : 1.0; // double freq = (feature.getValue().m_count / iNorm * m_norm); if (this.m_normalize) { freq *= (this.m_norm / iNorm); } allWords += freq; if (dictCount != null) { logDocGivenClass[i] += freq * Math.log(dictCount.m_count); } else { // leplace for zero frequency logDocGivenClass[i] += freq * Math.log(this.m_leplace); } } } if (this.m_wordsPerClass[i] > 0) { logDocGivenClass[i] -= allWords * Math.log(this.m_wordsPerClass[i]); } } double max = logDocGivenClass[Utils.maxIndex(logDocGivenClass)]; for (int i = 0; i < this.m_data.numClasses(); i++) { probOfClassGivenDoc[i] = Math.exp(logDocGivenClass[i] - max); } Utils.normalize(probOfClassGivenDoc); return probOfClassGivenDoc; } protected void tokenizeInstance(final Instance instance, final boolean updateDictionary) { if (this.m_inputVector == null) { this.m_inputVector = new LinkedHashMap<String, Count>(); } else { this.m_inputVector.clear(); } for (int i = 0; i < instance.numAttributes(); i++) { if (instance.attribute(i).isString() && !instance.isMissing(i)) { this.m_tokenizer.tokenize(instance.stringValue(i)); while (this.m_tokenizer.hasMoreElements()) { String word = this.m_tokenizer.nextElement(); if (this.m_lowercaseTokens) { word = word.toLowerCase(); } word = this.m_stemmer.stem(word); if (this.m_StopwordsHandler.isStopword(word)) { continue; } Count docCount = this.m_inputVector.get(word); if (docCount == null) { this.m_inputVector.put(word, new Count(instance.weight())); } else { docCount.m_count += instance.weight(); } } } } if (updateDictionary) { int classValue = (int) instance.classValue(); LinkedHashMap<String, Count> dictForClass = this.m_probOfWordGivenClass.get(classValue); // document normalization double iNorm = 0; double fv = 0; if (this.m_normalize) { for (Count c : this.m_inputVector.values()) { // word counts or bag-of-words? fv = (this.m_wordFrequencies) ? c.m_count : 1.0; iNorm += Math.pow(Math.abs(fv), this.m_lnorm); } iNorm = Math.pow(iNorm, 1.0 / this.m_lnorm); } for (Map.Entry<String, Count> feature : this.m_inputVector.entrySet()) { String word = feature.getKey(); double freq = (this.m_wordFrequencies) ? feature.getValue().m_count : 1.0; // double freq = (feature.getValue().m_count / iNorm * m_norm); if (this.m_normalize) { freq *= (this.m_norm / iNorm); } // check all classes for (int i = 0; i < this.m_data.numClasses(); i++) { LinkedHashMap<String, Count> dict = this.m_probOfWordGivenClass.get(i); if (dict.get(word) == null) { dict.put(word, new Count(this.m_leplace)); this.m_wordsPerClass[i] += this.m_leplace; } } Count dictCount = dictForClass.get(word); /* * if (dictCount == null) { dictForClass.put(word, new Count(m_leplace + * freq)); m_wordsPerClass[classValue] += (m_leplace + freq); } else { */ dictCount.m_count += freq; this.m_wordsPerClass[classValue] += freq; // } } this.pruneDictionary(false); } } protected void pruneDictionary(final boolean force) { if ((this.m_periodicP <= 0 || this.m_t % this.m_periodicP > 0) && !force) { return; } Set<Integer> classesSet = this.m_probOfWordGivenClass.keySet(); for (Integer classIndex : classesSet) { LinkedHashMap<String, Count> dictForClass = this.m_probOfWordGivenClass.get(classIndex); Iterator<Map.Entry<String, Count>> entries = dictForClass.entrySet().iterator(); while (entries.hasNext()) { Map.Entry<String, Count> entry = entries.next(); if (entry.getValue().m_count < this.m_minWordP) { this.m_wordsPerClass[classIndex] -= entry.getValue().m_count; entries.remove(); } } } } /** * Reset the classifier. */ public void reset() { this.m_t = 1; this.m_wordsPerClass = null; this.m_probOfWordGivenClass = null; this.m_probOfClass = null; } /** * the stemming algorithm to use, null means no stemming at all (i.e., the NullStemmer is used). * * @param value * the configured stemming algorithm, or null * @see NullStemmer */ public void setStemmer(final Stemmer value) { if (value != null) { this.m_stemmer = value; } else { this.m_stemmer = new NullStemmer(); } } /** * Returns the current stemming algorithm, null if none is used. * * @return the current stemming algorithm, null if none set */ public Stemmer getStemmer() { return this.m_stemmer; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String stemmerTipText() { return "The stemming algorithm to use on the words."; } /** * the tokenizer algorithm to use. * * @param value * the configured tokenizing algorithm */ public void setTokenizer(final Tokenizer value) { this.m_tokenizer = value; } /** * Returns the current tokenizer algorithm. * * @return the current tokenizer algorithm */ public Tokenizer getTokenizer() { return this.m_tokenizer; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String tokenizerTipText() { return "The tokenizing algorithm to use on the strings."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String useWordFrequenciesTipText() { return "Use word frequencies rather than binary " + "bag of words representation"; } /** * Set whether to use word frequencies rather than binary bag of words representation. * * @param u * true if word frequencies are to be used. */ public void setUseWordFrequencies(final boolean u) { this.m_wordFrequencies = u; } /** * Get whether to use word frequencies rather than binary bag of words representation. * * @return true if word frequencies are to be used. */ public boolean getUseWordFrequencies() { return this.m_wordFrequencies; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String lowercaseTokensTipText() { return "Whether to convert all tokens to lowercase"; } /** * Set whether to convert all tokens to lowercase * * @param l * true if all tokens are to be converted to lowercase */ public void setLowercaseTokens(final boolean l) { this.m_lowercaseTokens = l; } /** * Get whether to convert all tokens to lowercase * * @return true true if all tokens are to be converted to lowercase */ public boolean getLowercaseTokens() { return this.m_lowercaseTokens; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String periodicPruningTipText() { return "How often (number of instances) to prune " + "the dictionary of low frequency terms. " + "0 means don't prune. Setting a positive " + "integer n means prune after every n instances"; } /** * Set how often to prune the dictionary * * @param p * how often to prune */ public void setPeriodicPruning(final int p) { this.m_periodicP = p; } /** * Get how often to prune the dictionary * * @return how often to prune the dictionary */ public int getPeriodicPruning() { return this.m_periodicP; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minWordFrequencyTipText() { return "Ignore any words that don't occur at least " + "min frequency times in the training data. If periodic " + "pruning is turned on, then the dictionary is pruned " + "according to this value"; } /** * Set the minimum word frequency. Words that don't occur at least min freq times are ignored when updating weights. If periodic pruning is turned on, then min frequency is used when removing words from the dictionary. * * @param minFreq * the minimum word frequency to use */ public void setMinWordFrequency(final double minFreq) { this.m_minWordP = minFreq; } /** * Get the minimum word frequency. Words that don't occur at least min freq times are ignored when updating weights. If periodic pruning is turned on, then min frequency is used when removing words from the dictionary. * * @return the minimum word frequency to use */ public double getMinWordFrequency() { return this.m_minWordP; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String normalizeDocLengthTipText() { return "If true then document length is normalized according " + "to the settings for norm and lnorm"; } /** * Set whether to normalize the length of each document * * @param norm * true if document lengths is to be normalized */ public void setNormalizeDocLength(final boolean norm) { this.m_normalize = norm; } /** * Get whether to normalize the length of each document * * @return true if document lengths is to be normalized */ public boolean getNormalizeDocLength() { return this.m_normalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String normTipText() { return "The norm of the instances after normalization."; } /** * Get the instance's Norm. * * @return the Norm */ public double getNorm() { return this.m_norm; } /** * Set the norm of the instances * * @param newNorm * the norm to wich the instances must be set */ public void setNorm(final double newNorm) { this.m_norm = newNorm; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String LNormTipText() { return "The LNorm to use for document length normalization."; } /** * Get the L Norm used. * * @return the L-norm used */ public double getLNorm() { return this.m_lnorm; } /** * Set the L-norm to used * * @param newLNorm * the L-norm */ public void setLNorm(final double newLNorm) { this.m_lnorm = newLNorm; } /** * Sets the stopwords handler to use. * * @param value * the stopwords handler, if null, Null is used */ public void setStopwordsHandler(final StopwordsHandler value) { if (value != null) { this.m_StopwordsHandler = value; } else { this.m_StopwordsHandler = new Null(); } } /** * Gets the stopwords handler. * * @return the stopwords handler */ public StopwordsHandler getStopwordsHandler() { return this.m_StopwordsHandler; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String stopwordsHandlerTipText() { return "The stopwords handler to use (Null means no stopwords are used)."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.add(new Option("\tUse word frequencies instead of " + "binary bag of words.", "W", 0, "-W")); newVector.add(new Option("\tHow often to prune the dictionary " + "of low frequency words (default = 0, i.e. don't prune)", "P", 1, "-P <# instances>")); newVector.add(new Option( "\tMinimum word frequency. Words with less " + "than this frequence are ignored.\n\tIf periodic pruning " + "is turned on then this is also used to determine which\n\t" + "words to remove from the dictionary (default = 3).", "M", 1, "-M <double>")); newVector.addElement(new Option("\tNormalize document length (use in conjunction with -norm and " + "-lnorm)", "normalize", 0, "-normalize")); newVector.addElement(new Option("\tSpecify the norm that each instance must have (default 1.0)", "norm", 1, "-norm <num>")); newVector.addElement(new Option("\tSpecify L-norm to use (default 2.0)", "lnorm", 1, "-lnorm <num>")); newVector.addElement(new Option("\tConvert all tokens to lowercase " + "before adding to the dictionary.", "lowercase", 0, "-lowercase")); newVector.addElement(new Option("\tThe stopwords handler to use (default Null).", "-stopwords-handler", 1, "-stopwords-handler")); newVector.addElement(new Option("\tThe tokenizing algorihtm (classname plus parameters) to use.\n" + "\t(default: " + WordTokenizer.class.getName() + ")", "tokenizer", 1, "-tokenizer <spec>")); newVector.addElement(new Option("\tThe stemmering algorihtm (classname plus parameters) to use.", "stemmer", 1, "-stemmer <spec>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -W * Use word frequencies instead of binary bag of words. * </pre> * * <pre> * -P &lt;# instances&gt; * How often to prune the dictionary of low frequency words (default = 0, i.e. don't prune) * </pre> * * <pre> * -M &lt;double&gt; * Minimum word frequency. Words with less than this frequence are ignored. * If periodic pruning is turned on then this is also used to determine which * words to remove from the dictionary (default = 3). * </pre> * * <pre> * -normalize * Normalize document length (use in conjunction with -norm and -lnorm) * </pre> * * <pre> * -norm &lt;num&gt; * Specify the norm that each instance must have (default 1.0) * </pre> * * <pre> * -lnorm &lt;num&gt; * Specify L-norm to use (default 2.0) * </pre> * * <pre> * -lowercase * Convert all tokens to lowercase before adding to the dictionary. * </pre> * * <pre> * -stopwords-handler * The stopwords handler to use (default Null). * </pre> * * <pre> * -tokenizer &lt;spec&gt; * The tokenizing algorihtm (classname plus parameters) to use. * (default: weka.core.tokenizers.WordTokenizer) * </pre> * * <pre> * -stemmer &lt;spec&gt; * The stemmering algorihtm (classname plus parameters) to use. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.reset(); super.setOptions(options); this.setUseWordFrequencies(Utils.getFlag("W", options)); String pruneFreqS = Utils.getOption("P", options); if (pruneFreqS.length() > 0) { this.setPeriodicPruning(Integer.parseInt(pruneFreqS)); } String minFreq = Utils.getOption("M", options); if (minFreq.length() > 0) { this.setMinWordFrequency(Double.parseDouble(minFreq)); } this.setNormalizeDocLength(Utils.getFlag("normalize", options)); String normFreqS = Utils.getOption("norm", options); if (normFreqS.length() > 0) { this.setNorm(Double.parseDouble(normFreqS)); } String lnormFreqS = Utils.getOption("lnorm", options); if (lnormFreqS.length() > 0) { this.setLNorm(Double.parseDouble(lnormFreqS)); } this.setLowercaseTokens(Utils.getFlag("lowercase", options)); String stemmerString = Utils.getOption("stemmer", options); if (stemmerString.length() == 0) { this.setStemmer(null); } else { String[] stemmerSpec = Utils.splitOptions(stemmerString); if (stemmerSpec.length == 0) { throw new Exception("Invalid stemmer specification string"); } String stemmerName = stemmerSpec[0]; stemmerSpec[0] = ""; Stemmer stemmer = (Stemmer) Utils.forName(Class.forName("weka.core.stemmers.Stemmer"), stemmerName, stemmerSpec); this.setStemmer(stemmer); } String stopwordsHandlerString = Utils.getOption("stopwords-handler", options); if (stopwordsHandlerString.length() == 0) { this.setStopwordsHandler(null); } else { String[] stopwordsHandlerSpec = Utils.splitOptions(stopwordsHandlerString); if (stopwordsHandlerSpec.length == 0) { throw new Exception("Invalid StopwordsHandler specification string"); } String stopwordsHandlerName = stopwordsHandlerSpec[0]; stopwordsHandlerSpec[0] = ""; StopwordsHandler stopwordsHandler = (StopwordsHandler) Utils.forName(Class.forName("weka.core.stopwords.StopwordsHandler"), stopwordsHandlerName, stopwordsHandlerSpec); this.setStopwordsHandler(stopwordsHandler); } String tokenizerString = Utils.getOption("tokenizer", options); if (tokenizerString.length() == 0) { this.setTokenizer(new WordTokenizer()); } else { String[] tokenizerSpec = Utils.splitOptions(tokenizerString); if (tokenizerSpec.length == 0) { throw new Exception("Invalid tokenizer specification string"); } String tokenizerName = tokenizerSpec[0]; tokenizerSpec[0] = ""; Tokenizer tokenizer = (Tokenizer) Utils.forName(Class.forName("weka.core.tokenizers.Tokenizer"), tokenizerName, tokenizerSpec); this.setTokenizer(tokenizer); } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); if (this.getUseWordFrequencies()) { options.add("-W"); } options.add("-P"); options.add("" + this.getPeriodicPruning()); options.add("-M"); options.add("" + this.getMinWordFrequency()); if (this.getNormalizeDocLength()) { options.add("-normalize"); } options.add("-norm"); options.add("" + this.getNorm()); options.add("-lnorm"); options.add("" + this.getLNorm()); if (this.getLowercaseTokens()) { options.add("-lowercase"); } if (this.getStopwordsHandler() != null) { options.add("-stopwords-handler"); String spec = this.getStopwordsHandler().getClass().getName(); if (this.getStopwordsHandler() instanceof OptionHandler) { spec += " " + Utils.joinOptions(((OptionHandler) this.getStopwordsHandler()).getOptions()); } options.add(spec.trim()); } options.add("-tokenizer"); String spec = this.getTokenizer().getClass().getName(); if (this.getTokenizer() instanceof OptionHandler) { spec += " " + Utils.joinOptions(((OptionHandler) this.getTokenizer()).getOptions()); } options.add(spec.trim()); if (this.getStemmer() != null) { options.add("-stemmer"); spec = this.getStemmer().getClass().getName(); if (this.getStemmer() instanceof OptionHandler) { spec += " " + Utils.joinOptions(((OptionHandler) this.getStemmer()).getOptions()); } options.add(spec.trim()); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[1]); } /** * Returns a textual description of this classifier. * * @return a textual description of this classifier. */ @Override public String toString() { if (this.m_probOfClass == null) { return "NaiveBayesMultinomialText: No model built yet.\n"; } StringBuffer result = new StringBuffer(); // build a master dictionary over all classes HashSet<String> master = new HashSet<String>(); for (int i = 0; i < this.m_data.numClasses(); i++) { LinkedHashMap<String, Count> classDict = this.m_probOfWordGivenClass.get(i); for (String key : classDict.keySet()) { master.add(key); } } result.append("Dictionary size: " + master.size()).append("\n\n"); result.append("The independent frequency of a class\n"); result.append("--------------------------------------\n"); for (int i = 0; i < this.m_data.numClasses(); i++) { result.append(this.m_data.classAttribute().value(i)).append("\t").append(Double.toString(this.m_probOfClass[i])).append("\n"); } if (master.size() > 150000) { result.append("\nFrequency table ommitted due to size\n"); return result.toString(); } result.append("\nThe frequency of a word given the class\n"); result.append("-----------------------------------------\n"); for (int i = 0; i < this.m_data.numClasses(); i++) { result.append(Utils.padLeft(this.m_data.classAttribute().value(i), 11)).append("\t"); } result.append("\n"); Iterator<String> masterIter = master.iterator(); while (masterIter.hasNext()) { String word = masterIter.next(); for (int i = 0; i < this.m_data.numClasses(); i++) { LinkedHashMap<String, Count> classDict = this.m_probOfWordGivenClass.get(i); Count c = classDict.get(word); if (c == null) { result.append("<laplace=1>\t"); } else { result.append(Utils.padLeft(Double.toString(c.m_count), 11)).append("\t"); } } result.append(word); result.append("\n"); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } protected int m_numModels = 0; @Override public NaiveBayesMultinomialText aggregate(final NaiveBayesMultinomialText toAggregate) throws Exception { if (this.m_numModels == Integer.MIN_VALUE) { throw new Exception("Can't aggregate further - model has already been " + "aggregated and finalized"); } if (this.m_probOfClass == null) { throw new Exception("No model built yet, can't aggregate"); } // just check the class attribute for compatibility as we will be // merging dictionaries if (!this.m_data.classAttribute().equals(toAggregate.m_data.classAttribute())) { throw new Exception("Can't aggregate - class attribute in data headers " + "does not match: " + this.m_data.classAttribute().equalsMsg(toAggregate.m_data.classAttribute())); } for (int i = 0; i < this.m_probOfClass.length; i++) { // we already have a laplace correction, so -1 this.m_probOfClass[i] += toAggregate.m_probOfClass[i] - 1; this.m_wordsPerClass[i] += toAggregate.m_wordsPerClass[i]; } Map<Integer, LinkedHashMap<String, Count>> dicts = toAggregate.m_probOfWordGivenClass; Iterator<Map.Entry<Integer, LinkedHashMap<String, Count>>> perClass = dicts.entrySet().iterator(); while (perClass.hasNext()) { Map.Entry<Integer, LinkedHashMap<String, Count>> currentClassDict = perClass.next(); LinkedHashMap<String, Count> masterDict = this.m_probOfWordGivenClass.get(currentClassDict.getKey()); if (masterDict == null) { // we haven't seen this class during our training masterDict = new LinkedHashMap<String, Count>(); this.m_probOfWordGivenClass.put(currentClassDict.getKey(), masterDict); } // now process words seen for this class Iterator<Map.Entry<String, Count>> perClassEntries = currentClassDict.getValue().entrySet().iterator(); while (perClassEntries.hasNext()) { Map.Entry<String, Count> entry = perClassEntries.next(); Count masterCount = masterDict.get(entry.getKey()); if (masterCount == null) { // we haven't seen this entry (or its been pruned) masterCount = new Count(entry.getValue().m_count); masterDict.put(entry.getKey(), masterCount); } else { // add up masterCount.m_count += entry.getValue().m_count - 1; } } } this.m_t += toAggregate.m_t; this.m_numModels++; return this; } @Override public void finalizeAggregation() throws Exception { if (this.m_numModels == 0) { throw new Exception("Unable to finalize aggregation - " + "haven't seen any models to aggregate"); } if (this.m_periodicP > 0 && this.m_t > this.m_periodicP) { this.pruneDictionary(true); this.m_t = 0; } } @Override public void batchFinished() throws Exception { this.pruneDictionary(true); } /** * Main method for testing this class. * * @param args * the options */ public static void main(final String[] args) { runClassifier(new NaiveBayesMultinomialText(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/NaiveBayesMultinomialUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayesMultinomialUpdateable.java * Copyright (C) 2003-2017 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.bayes; import weka.classifiers.UpdateableClassifier; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for building and using an updateable multinomial Naive Bayes classifier. For more information see,<br/> * <br/> * Andrew Mccallum, Kamal Nigam: A Comparison of Event Models for Naive Bayes Text Classification. In: AAAI-98 Workshop on 'Learning for Text Categorization', 1998.<br/> * <br/> * The core equation for this classifier:<br/> * <br/> * P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)<br/> * <br/> * where Ci is class i and D is a document. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Mccallum1998, * author = {Andrew Mccallum and Kamal Nigam}, * booktitle = {AAAI-98 Workshop on 'Learning for Text Categorization'}, * title = {A Comparison of Event Models for Naive Bayes Text Classification}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * -output-debug-info <br> * If set, classifier is run in debug mode and may output additional info to * the console. * <p> * * -do-not-check-capabilities <br> * If set, classifier capabilities are not checked before classifier is built * (use with caution). * <p> * * -num-decimal-laces <br> * The number of decimal places for the output of numbers in the model. * <p> * * -batch-size <br> * The desired batch size for batch prediction. * <p> * <!-- options-end --> * * @author Andrew Golightly (acg4@cs.waikato.ac.nz) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class NaiveBayesMultinomialUpdateable extends NaiveBayesMultinomial implements UpdateableClassifier { /** for serialization */ static final long serialVersionUID = -7204398796974263186L; /** the number of words per class. */ protected double[] m_wordsPerClass; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using an updateable multinomial Naive Bayes classifier. " + "For more information see,\n\n" + getTechnicalInformation().toString() + "\n\n" + "The core equation for this classifier:\n\n" + "P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes' rule)\n\n" + "where Ci is class i and D is a document."; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { initializeClassifier(instances); //enumerate through the instances m_wordsPerClass = new double[m_numClasses]; for (int i = 0; i < m_numClasses; i++) { m_wordsPerClass[i] = m_numAttributes - 1; } for (Instance instance : instances) { updateClassifier(instance); } } /** * Updates the classifier with information from one training instance. * * @param instance the instance to be incorporated * @throws Exception if the instance cannot be processed successfully. */ public void updateClassifier(Instance instance) throws Exception { double classValue = instance.value(instance.classIndex()); if (!Utils.isMissingValue(classValue)) { int classIndex = (int) classValue; m_probOfClass[classIndex] += instance.weight(); for (int a = 0; a < instance.numValues(); a++) { if (instance.index(a) != instance.classIndex()) { if (!instance.isMissingSparse(a)) { double numOccurrences = instance.valueSparse(a) * instance.weight(); if (numOccurrences < 0) throw new Exception("Numeric attribute values must all be greater or equal to zero."); m_wordsPerClass[classIndex] += numOccurrences; m_probOfWordGivenClass[classIndex][instance.index(a)] += numOccurrences; } } } } } /** * log(N!) + (sum for all the words i)(log(Pi^ni) - log(ni!)) * * where * N is the total number of words * Pi is the probability of obtaining word i * ni is the number of times the word at index i occurs in the document * * Actually, this method just computes (sum for all the words i)(log(Pi^ni) because the factorials are irrelevant * when posterior class probabilities are computed. * * @param inst The instance to be classified * @param classIndex The index of the class we are calculating the probability with respect to * * @return The log of the probability of the document occuring given the class */ protected double probOfDocGivenClass(Instance inst, int classIndex) { double answer = 0; for(int i = 0; i < inst.numValues(); i++) { if (inst.index(i) != inst.classIndex()) { answer += inst.valueSparse(i) * (Math.log(m_probOfWordGivenClass[classIndex][inst.index(i)]) - Math.log(m_wordsPerClass[classIndex])); } } return answer; } /** * Returns a string representation of the classifier. * * @return a string representation of the classifier */ public String toString() { StringBuffer result = new StringBuffer("The class counts (including Laplace correction)\n-----------------------------------------------\n"); for(int c = 0; c<m_numClasses; c++) result.append(m_headerInfo.classAttribute().value(c)).append("\t"). append(Utils.doubleToString(m_probOfClass[c], getNumDecimalPlaces())).append("\n"); result.append("\nThe probability of a word given the class\n-----------------------------------------\n\t"); for(int c = 0; c<m_numClasses; c++) result.append(m_headerInfo.classAttribute().value(c)).append("\t"); result.append("\n"); for(int w = 0; w<m_numAttributes; w++) { if (w != m_headerInfo.classIndex()) { result.append(m_headerInfo.attribute(w).name()).append("\t"); for(int c = 0; c<m_numClasses; c++) result.append(Utils.doubleToString(m_probOfWordGivenClass[c][w] / m_wordsPerClass[c], getNumDecimalPlaces())).append("\t"); result.append("\n"); } } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new NaiveBayesMultinomialUpdateable(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/NaiveBayesUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayesUpdateable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes; import weka.classifiers.UpdateableClassifier; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; /** <!-- globalinfo-start --> * Class for a Naive Bayes classifier using estimator classes. This is the updateable version of NaiveBayes.<br/> * This classifier will use a default precision of 0.1 for numeric attributes when buildClassifier is called with zero training instances.<br/> * <br/> * For more information on Naive Bayes classifiers, see<br/> * <br/> * George H. John, Pat Langley: Estimating Continuous Distributions in Bayesian Classifiers. In: Eleventh Conference on Uncertainty in Artificial Intelligence, San Mateo, 338-345, 1995. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{John1995, * address = {San Mateo}, * author = {George H. John and Pat Langley}, * booktitle = {Eleventh Conference on Uncertainty in Artificial Intelligence}, * pages = {338-345}, * publisher = {Morgan Kaufmann}, * title = {Estimating Continuous Distributions in Bayesian Classifiers}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -K * Use kernel density estimator rather than normal * distribution for numeric attributes</pre> * * <pre> -D * Use supervised discretization to process numeric attributes * </pre> * * <pre> -O * Display model in old format (good when there are many classes) * </pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision$ */ public class NaiveBayesUpdateable extends NaiveBayes implements UpdateableClassifier { /** for serialization */ static final long serialVersionUID = -5354015843807192221L; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for a Naive Bayes classifier using estimator classes. This is the " +"updateable version of NaiveBayes.\n" +"This classifier will use a default precision of 0.1 for numeric attributes " +"when buildClassifier is called with zero training instances.\n\n" +"For more information on Naive Bayes classifiers, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { return super.getTechnicalInformation(); } /** * Set whether supervised discretization is to be used. * * @param newblah true if supervised discretization is to be used. */ public void setUseSupervisedDiscretization(boolean newblah) { if (newblah) { throw new IllegalArgumentException("Can't use discretization " + "in NaiveBayesUpdateable!"); } m_UseDiscretization = false; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new NaiveBayesUpdateable(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/ADNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ADNode.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.FileReader; import java.io.Serializable; import java.util.ArrayList; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * The ADNode class implements the ADTree datastructure which increases the * speed with which sub-contingency tables can be constructed from a data set in * an Instances object. For details, see: * <p/> * * <!-- technical-plaintext-start --> Andrew W. Moore, Mary S. Lee (1998). * Cached Sufficient Statistics for Efficient Machine Learning with Large * Datasets. Journal of Artificial Intelligence Research. 8:67-91. <!-- * technical-plaintext-end --> * <p/> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Moore1998, * author = {Andrew W. Moore and Mary S. Lee}, * journal = {Journal of Artificial Intelligence Research}, * pages = {67-91}, * title = {Cached Sufficient Statistics for Efficient Machine Learning with Large Datasets}, * volume = {8}, * year = {1998} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class ADNode implements Serializable, TechnicalInformationHandler, RevisionHandler { /** for serialization */ static final long serialVersionUID = 397409728366910204L; final static int MIN_RECORD_SIZE = 0; /** list of VaryNode children **/ public VaryNode[] m_VaryNodes; /** * list of Instance children (either m_Instances or m_VaryNodes is * instantiated) **/ public Instance[] m_Instances; /** count **/ public int m_nCount; /** first node in VaryNode array **/ public int m_nStartNode; /** Creates new ADNode */ public ADNode() { } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Andrew W. Moore and Mary S. Lee"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Cached Sufficient Statistics for Efficient Machine Learning with Large Datasets"); result.setValue(Field.JOURNAL, "Journal of Artificial Intelligence Research"); result.setValue(Field.VOLUME, "8"); result.setValue(Field.PAGES, "67-91"); return result; } /** * create sub tree * * @param iNode index of the lowest node in the tree * @param nRecords set of records in instances to be considered * @param instances data set * @return VaryNode representing part of an ADTree * @throws InterruptedException **/ public static VaryNode makeVaryNode(final int iNode, final ArrayList<Integer> nRecords, final Instances instances) throws InterruptedException { VaryNode _VaryNode = new VaryNode(iNode); int nValues = instances.attribute(iNode).numValues(); // reserve memory and initialize @SuppressWarnings("unchecked") ArrayList<Integer>[] nChildRecords = new ArrayList[nValues]; for (int iChild = 0; iChild < nValues; iChild++) { nChildRecords[iChild] = new ArrayList<Integer>(); } // divide the records among children for (int iRecord = 0; iRecord < nRecords.size(); iRecord++) { if (Thread.interrupted()) { // XXX interrupt weka throw new InterruptedException("Killed WEKA!"); } int iInstance = nRecords.get(iRecord).intValue(); nChildRecords[(int) instances.instance(iInstance).value(iNode)].add(new Integer(iInstance)); } // find most common value int nCount = nChildRecords[0].size(); int nMCV = 0; for (int iChild = 1; iChild < nValues; iChild++) { if (Thread.interrupted()) { // XXX interrupt weka throw new InterruptedException("Killed WEKA!"); } if (nChildRecords[iChild].size() > nCount) { nCount = nChildRecords[iChild].size(); nMCV = iChild; } } _VaryNode.m_nMCV = nMCV; // determine child nodes _VaryNode.m_ADNodes = new ADNode[nValues]; for (int iChild = 0; iChild < nValues; iChild++) { if (Thread.interrupted()) { // XXX interrupt weka throw new InterruptedException("Killed WEKA!"); } if (iChild == nMCV || nChildRecords[iChild].size() == 0) { _VaryNode.m_ADNodes[iChild] = null; } else { _VaryNode.m_ADNodes[iChild] = makeADTree(iNode + 1, nChildRecords[iChild], instances); } } return _VaryNode; } // MakeVaryNode /** * create sub tree * * @param iNode index of the lowest node in the tree * @param nRecords set of records in instances to be considered * @param instances data set * @return ADNode representing an ADTree * @throws InterruptedException */ public static ADNode makeADTree(final int iNode, final ArrayList<Integer> nRecords, final Instances instances) throws InterruptedException { ADNode _ADNode = new ADNode(); _ADNode.m_nCount = nRecords.size(); _ADNode.m_nStartNode = iNode; if (nRecords.size() < MIN_RECORD_SIZE) { _ADNode.m_Instances = new Instance[nRecords.size()]; for (int iInstance = 0; iInstance < nRecords.size(); iInstance++) { if (Thread.interrupted()) { // XXX interrupt weka throw new InterruptedException("Killed WEKA!"); } _ADNode.m_Instances[iInstance] = instances.instance(nRecords.get(iInstance).intValue()); } } else { _ADNode.m_VaryNodes = new VaryNode[instances.numAttributes() - iNode]; for (int iNode2 = iNode; iNode2 < instances.numAttributes(); iNode2++) { if (Thread.interrupted()) { // XXX interrupt weka throw new InterruptedException("Killed WEKA!"); } _ADNode.m_VaryNodes[iNode2 - iNode] = makeVaryNode(iNode2, nRecords, instances); } } return _ADNode; } // MakeADTree /** * create AD tree from set of instances * * @param instances data set * @return ADNode representing an ADTree * @throws InterruptedException */ public static ADNode makeADTree(final Instances instances) throws InterruptedException { ArrayList<Integer> nRecords = new ArrayList<Integer>(instances.numInstances()); for (int iRecord = 0; iRecord < instances.numInstances(); iRecord++) { if (Thread.interrupted()) { // XXX interrupt weka throw new InterruptedException("Killed WEKA!"); } nRecords.add(new Integer(iRecord)); } return makeADTree(0, nRecords, instances); } // MakeADTree /** * get counts for specific instantiation of a set of nodes * * @param nCounts - array for storing counts * @param nNodes - array of node indexes * @param nOffsets - offset for nodes in nNodes in nCounts * @param iNode - index into nNode indicating current node * @param iOffset - Offset into nCounts due to nodes below iNode * @param bSubstract - indicate whether counts should be added or substracted * @throws InterruptedException */ public void getCounts(final int[] nCounts, final int[] nNodes, final int[] nOffsets, final int iNode, final int iOffset, final boolean bSubstract) throws InterruptedException { // for (int iNode2 = 0; iNode2 < nCounts.length; iNode2++) { // System.out.print(nCounts[iNode2] + " "); // } // System.out.println(); if (iNode >= nNodes.length) { if (bSubstract) { nCounts[iOffset] -= this.m_nCount; } else { nCounts[iOffset] += this.m_nCount; } return; } else { if (this.m_VaryNodes != null) { this.m_VaryNodes[nNodes[iNode] - this.m_nStartNode].getCounts(nCounts, nNodes, nOffsets, iNode, iOffset, this, bSubstract); } else { for (Instance instance : this.m_Instances) { if (Thread.interrupted()) { // XXX interrupt weka throw new InterruptedException("Killed WEKA!"); } int iOffset2 = iOffset; for (int iNode2 = iNode; iNode2 < nNodes.length; iNode2++) { iOffset2 = iOffset2 + nOffsets[iNode2] * (int) instance.value(nNodes[iNode2]); } if (bSubstract) { nCounts[iOffset2]--; } else { nCounts[iOffset2]++; } } } } } // getCounts /** * print is used for debugging only and shows the ADTree in ASCII graphics */ public void print() { String sTab = new String(); for (int i = 0; i < this.m_nStartNode; i++) { sTab = sTab + " "; } System.out.println(sTab + "Count = " + this.m_nCount); if (this.m_VaryNodes != null) { for (int iNode = 0; iNode < this.m_VaryNodes.length; iNode++) { System.out.println(sTab + "Node " + (iNode + this.m_nStartNode)); this.m_VaryNodes[iNode].print(sTab); } } else { System.out.println(this.m_Instances); } } /** * for testing only * * @param argv the commandline options */ public static void main(final String[] argv) { try { Instances instances = new Instances(new FileReader("\\iris.2.arff")); ADNode ADTree = ADNode.makeADTree(instances); int[] nCounts = new int[12]; int[] nNodes = new int[3]; int[] nOffsets = new int[3]; nNodes[0] = 0; nNodes[1] = 3; nNodes[2] = 4; nOffsets[0] = 2; nOffsets[1] = 1; nOffsets[2] = 4; ADTree.print(); ADTree.getCounts(nCounts, nNodes, nOffsets, 0, 0, false); } catch (Throwable t) { t.printStackTrace(); } } // main /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class ADNode
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/BIFReader.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BIFReader.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.File; import java.io.StringReader; import java.util.ArrayList; import java.util.StringTokenizer; import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.CharacterData; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.core.Attribute; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.estimators.Estimator; /** * <!-- globalinfo-start --> Builds a description of a Bayes Net classifier * stored in XML BIF 0.3 format.<br/> * <br/> * For more details on XML BIF see:<br/> * <br/> * Fabio Cozman, Marek Druzdzel, Daniel Garcia (1998). XML BIF version 0.3. URL * http://www-2.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;misc{Cozman1998, * author = {Fabio Cozman and Marek Druzdzel and Daniel Garcia}, * title = {XML BIF version 0.3}, * year = {1998}, * URL = {http://www-2.cs.cmu.edu/\~fgcozman/Research/InterchangeFormat/} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Do not use ADTree data structure * </pre> * * <pre> * -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> * -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> * -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class BIFReader extends BayesNet implements TechnicalInformationHandler { protected int[] m_nPositionX; protected int[] m_nPositionY; private int[] m_order; /** for serialization */ static final long serialVersionUID = -8358864680379881429L; /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "Builds a description of a Bayes Net classifier stored in XML " + "BIF 0.3 format.\n\n" + "For more details on XML BIF see:\n\n" + getTechnicalInformation().toString(); } /** * processFile reads a BIFXML file and initializes a Bayes Net * * @param sFile name of the file to parse * @return the BIFReader * @throws Exception if processing fails */ public BIFReader processFile(String sFile) throws Exception { m_sFile = sFile; DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setValidating(true); Document doc = factory.newDocumentBuilder().parse(new File(sFile)); doc.normalize(); buildInstances(doc, sFile); buildStructure(doc); return this; } // processFile public BIFReader processString(String sStr) throws Exception { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setValidating(true); Document doc = factory.newDocumentBuilder().parse( new org.xml.sax.InputSource(new StringReader(sStr))); doc.normalize(); buildInstances(doc, "from-string"); buildStructure(doc); return this; } // processString /** the current filename */ String m_sFile; /** * returns the current filename * * @return the current filename */ public String getFileName() { return m_sFile; } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "Fabio Cozman and Marek Druzdzel and Daniel Garcia"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "XML BIF version 0.3"); result.setValue(Field.URL, "http://www-2.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/"); return result; } /** * buildStructure parses the BIF document in the DOM tree contained in the doc * parameter and specifies the the network structure and probability tables. * It assumes that buildInstances has been called before * * @param doc DOM document containing BIF document in DOM tree * @throws Exception if building of structure fails */ void buildStructure(Document doc) throws Exception { // Get the name of the network // initialize conditional distribution tables m_Distributions = new Estimator[m_Instances.numAttributes()][]; for (int iNode = 0; iNode < m_Instances.numAttributes(); iNode++) { // find definition that goes with this node String sName = m_Instances.attribute(iNode).name(); Element definition = getDefinition(doc, sName); /* * if (nodelist.getLength() == 0) { throw new * Exception("No definition found for node " + sName); } if * (nodelist.getLength() > 1) { * System.err.println("More than one definition found for node " + sName + * ". Using first definition."); } Element definition = (Element) * nodelist.item(0); */ // get the parents for this node // resolve structure ArrayList<Node> nodelist = getParentNodes(definition); for (int iParent = 0; iParent < nodelist.size(); iParent++) { Node parentName = nodelist.get(iParent).getFirstChild(); String sParentName = ((CharacterData) (parentName)).getData(); int nParent = getNode(sParentName); m_ParentSets[iNode].addParent(nParent, m_Instances); } // resolve conditional probability table int nCardinality = m_ParentSets[iNode].getCardinalityOfParents(); int nValues = m_Instances.attribute(iNode).numValues(); m_Distributions[iNode] = new Estimator[nCardinality]; for (int i = 0; i < nCardinality; i++) { m_Distributions[iNode][i] = new DiscreteEstimatorBayes(nValues, 0.0f); } /* * StringBuffer sTable = new StringBuffer(); for (int iText = 0; iText < * nodelist.getLength(); iText++) { sTable.append(((CharacterData) * (nodelist.item(iText))).getData()); sTable.append(' '); } * StringTokenizer st = new StringTokenizer(sTable.toString()); */ String sTable = getTable(definition); StringTokenizer st = new StringTokenizer(sTable.toString()); for (int i = 0; i < nCardinality; i++) { DiscreteEstimatorBayes d = (DiscreteEstimatorBayes) m_Distributions[iNode][i]; for (int iValue = 0; iValue < nValues; iValue++) { String sWeight = st.nextToken(); d.addValue(iValue, new Double(sWeight).doubleValue()); } } } } // buildStructure /** * synchronizes the node ordering of this Bayes network with those in the * other network (if possible). * * @param other Bayes network to synchronize with * @throws Exception if nr of attributes differs or not all of the variables * have the same name. */ public void Sync(BayesNet other) throws Exception { int nAtts = m_Instances.numAttributes(); if (nAtts != other.m_Instances.numAttributes()) { throw new Exception( "Cannot synchronize networks: different number of attributes."); } m_order = new int[nAtts]; for (int iNode = 0; iNode < nAtts; iNode++) { String sName = other.getNodeName(iNode); m_order[getNode(sName)] = iNode; } } // Sync /** * Returns all TEXT children of the given node in one string. Between the node * values new lines are inserted. * * @param node the node to return the content for * @return the content of the node */ public String getContent(Element node) { NodeList list; Node item; int i; String result; result = ""; list = node.getChildNodes(); for (i = 0; i < list.getLength(); i++) { item = list.item(i); if (item.getNodeType() == Node.TEXT_NODE) { result += "\n" + item.getNodeValue(); } } return result; } /** * buildInstances parses the BIF document and creates a Bayes Net with its * nodes specified, but leaves the network structure and probability tables * empty. * * @param doc DOM document containing BIF document in DOM tree * @param sName default name to give to the Bayes Net. Will be overridden if * specified in the BIF document. * @throws Exception if building fails */ void buildInstances(Document doc, String sName) throws Exception { NodeList nodelist; // Get the name of the network nodelist = selectAllNames(doc); if (nodelist.getLength() > 0) { sName = ((CharacterData) (nodelist.item(0).getFirstChild())).getData(); } // Process variables nodelist = selectAllVariables(doc); int nNodes = nodelist.getLength(); // initialize structure ArrayList<Attribute> attInfo = new ArrayList<Attribute>(nNodes); // Initialize m_nPositionX = new int[nodelist.getLength()]; m_nPositionY = new int[nodelist.getLength()]; // Process variables for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { // Get element ArrayList<Node> valueslist; // Get the name of the network valueslist = selectOutCome(nodelist.item(iNode)); int nValues = valueslist.size(); // generate value strings ArrayList<String> nomStrings = new ArrayList<String>(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { Node node = valueslist.get(iValue).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue == null) { sValue = "Value" + (iValue + 1); } nomStrings.add(sValue); } ArrayList<Node> nodelist2; // Get the name of the network nodelist2 = selectName(nodelist.item(iNode)); if (nodelist2.size() == 0) { throw new Exception("No name specified for variable"); } String sNodeName = ((CharacterData) (nodelist2.get(0).getFirstChild())) .getData(); weka.core.Attribute att = new weka.core.Attribute(sNodeName, nomStrings); attInfo.add(att); valueslist = selectProperty(nodelist.item(iNode)); nValues = valueslist.size(); // generate value strings for (int iValue = 0; iValue < nValues; iValue++) { // parsing for strings of the form "position = (73, 165)" Node node = valueslist.get(iValue).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue.startsWith("position")) { int i0 = sValue.indexOf('('); int i1 = sValue.indexOf(','); int i2 = sValue.indexOf(')'); String sX = sValue.substring(i0 + 1, i1).trim(); String sY = sValue.substring(i1 + 1, i2).trim(); try { m_nPositionX[iNode] = Integer.parseInt(sX); m_nPositionY[iNode] = Integer.parseInt(sY); } catch (NumberFormatException e) { System.err.println("Wrong number format in position :(" + sX + "," + sY + ")"); m_nPositionX[iNode] = 0; m_nPositionY[iNode] = 0; } } } } m_Instances = new Instances(sName, attInfo, 100); m_Instances.setClassIndex(nNodes - 1); setUseADTree(false); initStructure(); } // buildInstances // /** selectNodeList selects list of nodes from document specified in XPath // expression // * @param doc : document (or node) to query // * @param sXPath : XPath expression // * @return list of nodes conforming to XPath expression in doc // * @throws Exception // */ // private NodeList selectNodeList(Node doc, String sXPath) throws Exception { // NodeList nodelist = org.apache.xpath.XPathAPI.selectNodeList(doc, sXPath); // return nodelist; // } // selectNodeList NodeList selectAllNames(Document doc) throws Exception { // NodeList nodelist = selectNodeList(doc, "//NAME"); NodeList nodelist = doc.getElementsByTagName("NAME"); return nodelist; } // selectAllNames NodeList selectAllVariables(Document doc) throws Exception { // NodeList nodelist = selectNodeList(doc, "//VARIABLE"); NodeList nodelist = doc.getElementsByTagName("VARIABLE"); return nodelist; } // selectAllVariables Element getDefinition(Document doc, String sName) throws Exception { // NodeList nodelist = selectNodeList(doc, // "//DEFINITION[normalize-space(FOR/text())=\"" + sName + "\"]"); NodeList nodelist = doc.getElementsByTagName("DEFINITION"); for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { Node node = nodelist.item(iNode); ArrayList<Node> list = selectElements(node, "FOR"); if (list.size() > 0) { Node forNode = list.get(0); if (getContent((Element) forNode).trim().equals(sName)) { return (Element) node; } } } throw new Exception("Could not find definition for ((" + sName + "))"); } // getDefinition ArrayList<Node> getParentNodes(Node definition) throws Exception { // NodeList nodelist = selectNodeList(definition, "GIVEN"); ArrayList<Node> nodelist = selectElements(definition, "GIVEN"); return nodelist; } // getParentNodes String getTable(Node definition) throws Exception { // NodeList nodelist = selectNodeList(definition, "TABLE/text()"); ArrayList<Node> nodelist = selectElements(definition, "TABLE"); String sTable = getContent((Element) nodelist.get(0)); sTable = sTable.replaceAll("\\n", " "); return sTable; } // getTable ArrayList<Node> selectOutCome(Node item) throws Exception { // NodeList nodelist = selectNodeList(item, "OUTCOME"); ArrayList<Node> nodelist = selectElements(item, "OUTCOME"); return nodelist; } // selectOutCome ArrayList<Node> selectName(Node item) throws Exception { // NodeList nodelist = selectNodeList(item, "NAME"); ArrayList<Node> nodelist = selectElements(item, "NAME"); return nodelist; } // selectName ArrayList<Node> selectProperty(Node item) throws Exception { // NodeList nodelist = selectNodeList(item, "PROPERTY"); ArrayList<Node> nodelist = selectElements(item, "PROPERTY"); return nodelist; } // selectProperty ArrayList<Node> selectElements(Node item, String sElement) throws Exception { NodeList children = item.getChildNodes(); ArrayList<Node> nodelist = new ArrayList<Node>(); for (int iNode = 0; iNode < children.getLength(); iNode++) { Node node = children.item(iNode); if ((node.getNodeType() == Node.ELEMENT_NODE) && node.getNodeName().equals(sElement)) { nodelist.add(node); } } return nodelist; } // selectElements /** * Count nr of arcs missing from other network compared to current network * Note that an arc is not 'missing' if it is reversed. * * @param other network to compare with * @return nr of missing arcs */ public int missingArcs(BayesNet other) { try { Sync(other); int nMissing = 0; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < m_ParentSets[iAttribute] .getNrOfParents(); iParent++) { int nParent = m_ParentSets[iAttribute].getParent(iParent); if (!other.getParentSet(m_order[iAttribute]).contains( m_order[nParent]) && !other.getParentSet(m_order[nParent]).contains( m_order[iAttribute])) { nMissing++; } } } return nMissing; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // missingArcs /** * Count nr of exta arcs from other network compared to current network Note * that an arc is not 'extra' if it is reversed. * * @param other network to compare with * @return nr of missing arcs */ public int extraArcs(BayesNet other) { try { Sync(other); int nExtra = 0; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < other.getParentSet(m_order[iAttribute]) .getNrOfParents(); iParent++) { int nParent = m_order[other.getParentSet(m_order[iAttribute]) .getParent(iParent)]; if (!m_ParentSets[iAttribute].contains(nParent) && !m_ParentSets[nParent].contains(iAttribute)) { nExtra++; } } } return nExtra; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // extraArcs /** * calculates the divergence between the probability distribution represented * by this network and that of another, that is, \sum_{x\in X} P(x)log * P(x)/Q(x) where X is the set of values the nodes in the network can take, * P(x) the probability of this network for configuration x Q(x) the * probability of the other network for configuration x * * @param other network to compare with * @return divergence between this and other Bayes Network */ public double divergence(BayesNet other) { try { Sync(other); // D: divergence double D = 0.0; int nNodes = m_Instances.numAttributes(); int[] nCard = new int[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { nCard[iNode] = m_Instances.attribute(iNode).numValues(); } // x: holds current configuration of nodes int[] x = new int[nNodes]; // simply sum over all configurations to calc divergence D int i = 0; while (i < nNodes) { // update configuration x[i]++; while (i < nNodes && x[i] == m_Instances.attribute(i).numValues()) { x[i] = 0; i++; if (i < nNodes) { x[i]++; } } if (i < nNodes) { i = 0; // calc P(x) and Q(x) double P = 1.0; for (int iNode = 0; iNode < nNodes; iNode++) { int iCPT = 0; for (int iParent = 0; iParent < m_ParentSets[iNode] .getNrOfParents(); iParent++) { int nParent = m_ParentSets[iNode].getParent(iParent); iCPT = iCPT * nCard[nParent] + x[nParent]; } P = P * m_Distributions[iNode][iCPT].getProbability(x[iNode]); } double Q = 1.0; for (int iNode = 0; iNode < nNodes; iNode++) { int iCPT = 0; for (int iParent = 0; iParent < other.getParentSet(m_order[iNode]) .getNrOfParents(); iParent++) { int nParent = m_order[other.getParentSet(m_order[iNode]) .getParent(iParent)]; iCPT = iCPT * nCard[nParent] + x[nParent]; } Q = Q * other.m_Distributions[m_order[iNode]][iCPT] .getProbability(x[iNode]); } // update divergence if probabilities are positive if (P > 0.0 && Q > 0.0) { D = D + P * Math.log(Q / P); } } } return D; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // divergence /** * Count nr of reversed arcs from other network compared to current network * * @param other network to compare with * @return nr of missing arcs */ public int reversedArcs(BayesNet other) { try { Sync(other); int nReversed = 0; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < m_ParentSets[iAttribute] .getNrOfParents(); iParent++) { int nParent = m_ParentSets[iAttribute].getParent(iParent); if (!other.getParentSet(m_order[iAttribute]).contains( m_order[nParent]) && other.getParentSet(m_order[nParent]).contains( m_order[iAttribute])) { nReversed++; } } } return nReversed; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // reversedArcs /** * getNode finds the index of the node with name sNodeName and throws an * exception if no such node can be found. * * @param sNodeName name of the node to get the index from * @return index of the node with name sNodeName * @throws Exception if node cannot be found */ public int getNode(String sNodeName) throws Exception { int iNode = 0; while (iNode < m_Instances.numAttributes()) { if (m_Instances.attribute(iNode).name().equals(sNodeName)) { return iNode; } iNode++; } throw new Exception("Could not find node [[" + sNodeName + "]]"); } // getNode /** * the default constructor */ public BIFReader() { } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Loads the file specified as first parameter and prints it to stdout. * * @param args the command line parameters */ public static void main(String[] args) { try { BIFReader br = new BIFReader(); br.processFile(args[0]); System.out.println(br.toString()); } catch (Throwable t) { t.printStackTrace(); } } // main } // class BIFReader
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/BayesNetGenerator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.util.ArrayList; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.estimators.Estimator; /** * <!-- globalinfo-start --> Bayes Network learning using various search * algorithms and quality measures.<br/> * Base class for a Bayes Network classifier. Provides datastructures (network * structure, conditional probability distributions, etc.) and facilities common * to Bayes Network learning algorithms like K2 and B.<br/> * <br/> * For more information see:<br/> * <br/> * http://www.cs.waikato.ac.nz/~remco/weka.pdf * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B * Generate network (instead of instances) * </pre> * * <pre> * -N &lt;integer&gt; * Nr of nodes * </pre> * * <pre> * -A &lt;integer&gt; * Nr of arcs * </pre> * * <pre> * -M &lt;integer&gt; * Nr of instances * </pre> * * <pre> * -C &lt;integer&gt; * Cardinality of the variables * </pre> * * <pre> * -S &lt;integer&gt; * Seed for random number generator * </pre> * * <pre> * -F &lt;file&gt; * The BIF file to obtain the structure from. * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class BayesNetGenerator extends EditableBayesNet { /** the seed value */ int m_nSeed = 1; /** the random number generator */ Random random; /** for serialization */ static final long serialVersionUID = -7462571170596157720L; /** * Constructor for BayesNetGenerator. */ public BayesNetGenerator() { super(); } // c'tor /** * Generate random connected Bayesian network with discrete nodes having all * the same cardinality. * * @throws Exception if something goes wrong */ public void generateRandomNetwork() throws Exception { if (m_otherBayesNet == null) { // generate from scratch Init(m_nNrOfNodes, m_nCardinality); generateRandomNetworkStructure(m_nNrOfNodes, m_nNrOfArcs); generateRandomDistributions(m_nNrOfNodes, m_nCardinality); } else { // read from file, just copy parent sets and distributions m_nNrOfNodes = m_otherBayesNet.getNrOfNodes(); m_ParentSets = m_otherBayesNet.getParentSets(); m_Distributions = m_otherBayesNet.getDistributions(); random = new Random(m_nSeed); // initialize m_Instances ArrayList<Attribute> attInfo = new ArrayList<Attribute>(m_nNrOfNodes); // generate value strings for (int iNode = 0; iNode < m_nNrOfNodes; iNode++) { int nValues = m_otherBayesNet.getCardinality(iNode); ArrayList<String> nomStrings = new ArrayList<String>(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { nomStrings.add(m_otherBayesNet.getNodeValue(iNode, iValue)); } Attribute att = new Attribute(m_otherBayesNet.getNodeName(iNode), nomStrings); attInfo.add(att); } m_Instances = new Instances(m_otherBayesNet.getName(), attInfo, 100); m_Instances.setClassIndex(m_nNrOfNodes - 1); } } // GenerateRandomNetwork /** * Init defines a minimal Bayes net with no arcs * * @param nNodes number of nodes in the Bayes net * @param nValues number of values each of the nodes can take * @throws Exception if something goes wrong */ public void Init(int nNodes, int nValues) throws Exception { random = new Random(m_nSeed); // initialize structure ArrayList<Attribute> attInfo = new ArrayList<Attribute>(nNodes); // generate value strings ArrayList<String> nomStrings = new ArrayList<String>(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { nomStrings.add("Value" + (iValue + 1)); } for (int iNode = 0; iNode < nNodes; iNode++) { Attribute att = new Attribute("Node" + (iNode + 1), nomStrings); attInfo.add(att); } m_Instances = new Instances("RandomNet", attInfo, 100); m_Instances.setClassIndex(nNodes - 1); setUseADTree(false); // m_bInitAsNaiveBayes = false; // m_bMarkovBlanketClassifier = false; initStructure(); // initialize conditional distribution tables m_Distributions = new Estimator[nNodes][1]; for (int iNode = 0; iNode < nNodes; iNode++) { m_Distributions[iNode][0] = new DiscreteEstimatorBayes(nValues, getEstimator().getAlpha()); } m_nEvidence = new ArrayList<Integer>(nNodes); for (int i = 0; i < nNodes; i++) { m_nEvidence.add(-1); } m_fMarginP = new ArrayList<double[]>(nNodes); for (int i = 0; i < nNodes; i++) { double[] P = new double[getCardinality(i)]; m_fMarginP.add(P); } m_nPositionX = new ArrayList<Integer>(nNodes); m_nPositionY = new ArrayList<Integer>(nNodes); for (int iNode = 0; iNode < nNodes; iNode++) { m_nPositionX.add(iNode % 10 * 50); m_nPositionY.add((iNode / 10) * 50); } } // DefineNodes /** * GenerateRandomNetworkStructure generate random connected Bayesian network * * @param nNodes number of nodes in the Bayes net to generate * @param nArcs number of arcs to generate. Must be between nNodes - 1 and * nNodes * (nNodes-1) / 2 * @throws Exception if number of arcs is incorrect */ public void generateRandomNetworkStructure(int nNodes, int nArcs) throws Exception { if (nArcs < nNodes - 1) { throw new Exception("Number of arcs should be at least (nNodes - 1) = " + (nNodes - 1) + " instead of " + nArcs); } if (nArcs > nNodes * (nNodes - 1) / 2) { throw new Exception( "Number of arcs should be at most nNodes * (nNodes - 1) / 2 = " + (nNodes * (nNodes - 1) / 2) + " instead of " + nArcs); } if (nArcs == 0) { return; } // deal with patalogical case for nNodes = 1 // first generate tree connecting all nodes generateTree(nNodes); // The tree contains nNodes - 1 arcs, so there are // nArcs - (nNodes-1) to add at random. // All arcs point from lower to higher ordered nodes // so that acyclicity is ensured. for (int iArc = nNodes - 1; iArc < nArcs; iArc++) { boolean bDone = false; while (!bDone) { int nNode1 = random.nextInt(nNodes); int nNode2 = random.nextInt(nNodes); if (nNode1 == nNode2) { nNode2 = (nNode1 + 1) % nNodes; } if (nNode2 < nNode1) { int h = nNode1; nNode1 = nNode2; nNode2 = h; } if (!m_ParentSets[nNode2].contains(nNode1)) { m_ParentSets[nNode2].addParent(nNode1, m_Instances); bDone = true; } } } } // GenerateRandomNetworkStructure /** * GenerateTree creates a tree-like network structure (actually a forest) by * starting with a randomly selected pair of nodes, add an arc between. Then * keep on selecting one of the connected nodes and one of the unconnected * ones and add an arrow between them, till all nodes are connected. * * @param nNodes number of nodes in the Bayes net to generate */ void generateTree(int nNodes) { boolean[] bConnected = new boolean[nNodes]; // start adding an arc at random int nNode1 = random.nextInt(nNodes); int nNode2 = random.nextInt(nNodes); if (nNode1 == nNode2) { nNode2 = (nNode1 + 1) % nNodes; } if (nNode2 < nNode1) { int h = nNode1; nNode1 = nNode2; nNode2 = h; } m_ParentSets[nNode2].addParent(nNode1, m_Instances); bConnected[nNode1] = true; bConnected[nNode2] = true; // Repeatedly, select one of the connected nodes, and one of // the unconnected nodes and add an arc. // All arcs point from lower to higher ordered nodes // so that acyclicity is ensured. for (int iArc = 2; iArc < nNodes; iArc++) { int nNode = random.nextInt(nNodes); nNode1 = 0; // one of the connected nodes while (nNode >= 0) { nNode1 = (nNode1 + 1) % nNodes; while (!bConnected[nNode1]) { nNode1 = (nNode1 + 1) % nNodes; } nNode--; } nNode = random.nextInt(nNodes); nNode2 = 0; // one of the unconnected nodes while (nNode >= 0) { nNode2 = (nNode2 + 1) % nNodes; while (bConnected[nNode2]) { nNode2 = (nNode2 + 1) % nNodes; } nNode--; } if (nNode2 < nNode1) { int h = nNode1; nNode1 = nNode2; nNode2 = h; } m_ParentSets[nNode2].addParent(nNode1, m_Instances); bConnected[nNode1] = true; bConnected[nNode2] = true; } } // GenerateTree /** * GenerateRandomDistributions generates discrete conditional distribution * tables for all nodes of a Bayes network once a network structure has been * determined. * * @param nNodes number of nodes in the Bayes net * @param nValues number of values each of the nodes can take */ void generateRandomDistributions(int nNodes, int nValues) { // Reserve space for CPTs int nMaxParentCardinality = 1; for (int iAttribute = 0; iAttribute < nNodes; iAttribute++) { if (m_ParentSets[iAttribute].getCardinalityOfParents() > nMaxParentCardinality) { nMaxParentCardinality = m_ParentSets[iAttribute] .getCardinalityOfParents(); } } // Reserve plenty of memory m_Distributions = new Estimator[m_Instances.numAttributes()][nMaxParentCardinality]; // estimate CPTs for (int iAttribute = 0; iAttribute < nNodes; iAttribute++) { int[] nPs = new int[nValues + 1]; nPs[0] = 0; nPs[nValues] = 1000; for (int iParent = 0; iParent < m_ParentSets[iAttribute] .getCardinalityOfParents(); iParent++) { // fill array with random nr's for (int iValue = 1; iValue < nValues; iValue++) { nPs[iValue] = random.nextInt(1000); } // sort for (int iValue = 1; iValue < nValues; iValue++) { for (int iValue2 = iValue + 1; iValue2 < nValues; iValue2++) { if (nPs[iValue2] < nPs[iValue]) { int h = nPs[iValue2]; nPs[iValue2] = nPs[iValue]; nPs[iValue] = h; } } } // assign to probability tables DiscreteEstimatorBayes d = new DiscreteEstimatorBayes(nValues, getEstimator().getAlpha()); for (int iValue = 0; iValue < nValues; iValue++) { d.addValue(iValue, nPs[iValue + 1] - nPs[iValue]); } m_Distributions[iAttribute][iParent] = d; } } } // GenerateRandomDistributions /** * GenerateInstances generates random instances sampling from the distribution * represented by the Bayes network structure. It assumes a Bayes network * structure has been initialized * * @throws Exception if something goes wrong */ public void generateInstances() throws Exception { int[] order = getOrder(); for (int iInstance = 0; iInstance < m_nNrOfInstances; iInstance++) { int nNrOfAtts = m_Instances.numAttributes(); double[] instance = new double[nNrOfAtts]; for (int iAtt2 = 0; iAtt2 < nNrOfAtts; iAtt2++) { int iAtt = order[iAtt2]; double iCPT = 0; for (int iParent = 0; iParent < m_ParentSets[iAtt].getNrOfParents(); iParent++) { int nParent = m_ParentSets[iAtt].getParent(iParent); iCPT = iCPT * m_Instances.attribute(nParent).numValues() + instance[nParent]; } double fRandom = random.nextInt(1000) / 1000.0f; int iValue = 0; while (fRandom > m_Distributions[iAtt][(int) iCPT] .getProbability(iValue)) { fRandom = fRandom - m_Distributions[iAtt][(int) iCPT].getProbability(iValue); iValue++; } instance[iAtt] = iValue; } m_Instances.add(new DenseInstance(1.0, instance)); } } // GenerateInstances /** * @throws Exception if there's a cycle in the graph */ int[] getOrder() throws Exception { int nNrOfAtts = m_Instances.numAttributes(); int[] order = new int[nNrOfAtts]; boolean[] bDone = new boolean[nNrOfAtts]; for (int iAtt = 0; iAtt < nNrOfAtts; iAtt++) { int iAtt2 = 0; boolean allParentsDone = false; while (!allParentsDone && iAtt2 < nNrOfAtts) { if (!bDone[iAtt2]) { allParentsDone = true; int iParent = 0; while (allParentsDone && iParent < m_ParentSets[iAtt2].getNrOfParents()) { allParentsDone = bDone[m_ParentSets[iAtt2].getParent(iParent++)]; } if (allParentsDone && iParent == m_ParentSets[iAtt2].getNrOfParents()) { order[iAtt] = iAtt2; bDone[iAtt2] = true; } else { iAtt2++; } } else { iAtt2++; } } if (!allParentsDone && iAtt2 == nNrOfAtts) { throw new Exception("There appears to be a cycle in the graph"); } } return order; } // getOrder /** * Returns either the net (if BIF format) or the generated instances * * @return either the net or the generated instances */ @Override public String toString() { if (m_bGenerateNet) { return toXMLBIF03(); } return m_Instances.toString(); } // toString boolean m_bGenerateNet = false; int m_nNrOfNodes = 10; int m_nNrOfArcs = 10; int m_nNrOfInstances = 10; int m_nCardinality = 2; String m_sBIFFile = ""; void setNrOfNodes(int nNrOfNodes) { m_nNrOfNodes = nNrOfNodes; } void setNrOfArcs(int nNrOfArcs) { m_nNrOfArcs = nNrOfArcs; } void setNrOfInstances(int nNrOfInstances) { m_nNrOfInstances = nNrOfInstances; } void setCardinality(int nCardinality) { m_nCardinality = nCardinality; } void setSeed(int nSeed) { m_nSeed = nSeed; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(6); newVector.addElement(new Option( "\tGenerate network (instead of instances)\n", "B", 0, "-B")); newVector.addElement(new Option("\tNr of nodes\n", "N", 1, "-N <integer>")); newVector.addElement(new Option("\tNr of arcs\n", "A", 1, "-A <integer>")); newVector.addElement(new Option("\tNr of instances\n", "M", 1, "-M <integer>")); newVector.addElement(new Option("\tCardinality of the variables\n", "C", 1, "-C <integer>")); newVector.addElement(new Option("\tSeed for random number generator\n", "S", 1, "-S <integer>")); newVector.addElement(new Option( "\tThe BIF file to obtain the structure from.\n", "F", 1, "-F <file>")); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B * Generate network (instead of instances) * </pre> * * <pre> * -N &lt;integer&gt; * Nr of nodes * </pre> * * <pre> * -A &lt;integer&gt; * Nr of arcs * </pre> * * <pre> * -M &lt;integer&gt; * Nr of instances * </pre> * * <pre> * -C &lt;integer&gt; * Cardinality of the variables * </pre> * * <pre> * -S &lt;integer&gt; * Seed for random number generator * </pre> * * <pre> * -F &lt;file&gt; * The BIF file to obtain the structure from. * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { m_bGenerateNet = Utils.getFlag('B', options); String sNrOfNodes = Utils.getOption('N', options); if (sNrOfNodes.length() != 0) { setNrOfNodes(Integer.parseInt(sNrOfNodes)); } else { setNrOfNodes(10); } String sNrOfArcs = Utils.getOption('A', options); if (sNrOfArcs.length() != 0) { setNrOfArcs(Integer.parseInt(sNrOfArcs)); } else { setNrOfArcs(10); } String sNrOfInstances = Utils.getOption('M', options); if (sNrOfInstances.length() != 0) { setNrOfInstances(Integer.parseInt(sNrOfInstances)); } else { setNrOfInstances(10); } String sCardinality = Utils.getOption('C', options); if (sCardinality.length() != 0) { setCardinality(Integer.parseInt(sCardinality)); } else { setCardinality(2); } String sSeed = Utils.getOption('S', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } else { setSeed(1); } String sBIFFile = Utils.getOption('F', options); if ((sBIFFile != null) && (sBIFFile != "")) { setBIFFile(sBIFFile); } } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] options = new String[13]; int current = 0; if (m_bGenerateNet) { options[current++] = "-B"; } options[current++] = "-N"; options[current++] = "" + m_nNrOfNodes; options[current++] = "-A"; options[current++] = "" + m_nNrOfArcs; options[current++] = "-M"; options[current++] = "" + m_nNrOfInstances; options[current++] = "-C"; options[current++] = "" + m_nCardinality; options[current++] = "-S"; options[current++] = "" + m_nSeed; if (m_sBIFFile.length() != 0) { options[current++] = "-F"; options[current++] = "" + m_sBIFFile; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * prints all the options to stdout */ protected static void printOptions(OptionHandler o) { Enumeration<Option> enm = o.listOptions(); System.out.println("Options for " + o.getClass().getName() + ":\n"); while (enm.hasMoreElements()) { Option option = enm.nextElement(); System.out.println(option.synopsis()); System.out.println(option.description()); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method * * @param args the commandline parameters */ static public void main(String[] args) { BayesNetGenerator b = new BayesNetGenerator(); try { if ((args.length == 0) || (Utils.getFlag('h', args))) { printOptions(b); return; } b.setOptions(args); b.generateRandomNetwork(); if (!b.m_bGenerateNet) { // skip if not required b.generateInstances(); } System.out.println(b.toString()); } catch (Exception e) { e.printStackTrace(); printOptions(b); } } // main } // class BayesNetGenerator
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/EditableBayesNet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EditableBayesNet.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.Serializable; import java.io.StringReader; import java.util.ArrayList; import java.util.StringTokenizer; import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.CharacterData; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.core.Attribute; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.estimators.Estimator; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Reorder; /** * <!-- globalinfo-start --> Bayes Network learning using various search * algorithms and quality measures.<br/> * Base class for a Bayes Network classifier. Provides datastructures (network * structure, conditional probability distributions, etc.) and facilities common * to Bayes Network learning algorithms like K2 and B.<br/> * <br/> * For more information see:<br/> * <br/> * http://www.cs.waikato.ac.nz/~remco/weka.pdf * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -D * Do not use ADTree data structure * </pre> * * <pre> * -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> * -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> * -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class EditableBayesNet extends BayesNet { /** for serialization */ static final long serialVersionUID = 746037443258735954L; /** location of nodes, used for graph drawing * */ protected ArrayList<Integer> m_nPositionX; protected ArrayList<Integer> m_nPositionY; /** marginal distributions * */ protected ArrayList<double[]> m_fMarginP; /** evidence values, used for evidence propagation * */ protected ArrayList<Integer> m_nEvidence; /** standard constructor * */ public EditableBayesNet() { super(); m_nEvidence = new ArrayList<Integer>(0); m_fMarginP = new ArrayList<double[]>(0); m_nPositionX = new ArrayList<Integer>(); m_nPositionY = new ArrayList<Integer>(); clearUndoStack(); } // c'tor /** * constructor, creates empty network with nodes based on the attributes in a * data set */ public EditableBayesNet(Instances instances) { try { if (instances.classIndex() < 0) { instances.setClassIndex(instances.numAttributes() - 1); } m_Instances = normalizeDataSet(instances); } catch (Exception e) { e.printStackTrace(); } int nNodes = getNrOfNodes(); m_ParentSets = new ParentSet[nNodes]; for (int i = 0; i < nNodes; i++) { m_ParentSets[i] = new ParentSet(); } m_Distributions = new Estimator[nNodes][]; for (int iNode = 0; iNode < nNodes; iNode++) { m_Distributions[iNode] = new Estimator[1]; m_Distributions[iNode][0] = new DiscreteEstimatorBayes( getCardinality(iNode), 0.5); } m_nEvidence = new ArrayList<Integer>(nNodes); for (int i = 0; i < nNodes; i++) { m_nEvidence.add(-1); } m_fMarginP = new ArrayList<double[]>(nNodes); for (int i = 0; i < nNodes; i++) { double[] P = new double[getCardinality(i)]; m_fMarginP.add(P); } m_nPositionX = new ArrayList<Integer>(nNodes); m_nPositionY = new ArrayList<Integer>(nNodes); for (int iNode = 0; iNode < nNodes; iNode++) { m_nPositionX.add(iNode % 10 * 50); m_nPositionY.add((iNode / 10) * 50); } } // c'tor /** * constructor, copies Bayesian network structure from a Bayesian network * encapsulated in a BIFReader */ public EditableBayesNet(BIFReader other) { m_Instances = other.m_Instances; m_ParentSets = other.getParentSets(); m_Distributions = other.getDistributions(); int nNodes = getNrOfNodes(); m_nPositionX = new ArrayList<Integer>(nNodes); m_nPositionY = new ArrayList<Integer>(nNodes); for (int i = 0; i < nNodes; i++) { m_nPositionX.add(other.m_nPositionX[i]); m_nPositionY.add(other.m_nPositionY[i]); } m_nEvidence = new ArrayList<Integer>(nNodes); for (int i = 0; i < nNodes; i++) { m_nEvidence.add(-1); } m_fMarginP = new ArrayList<double[]>(nNodes); for (int i = 0; i < nNodes; i++) { double[] P = new double[getCardinality(i)]; m_fMarginP.add(P); } clearUndoStack(); } // c'tor /** * constructor that potentially initializes instances as well * * @param bSetInstances flag indicating whether to initialize instances or not */ public EditableBayesNet(boolean bSetInstances) { super(); m_nEvidence = new ArrayList<Integer>(0); m_fMarginP = new ArrayList<double[]>(0); m_nPositionX = new ArrayList<Integer>(); m_nPositionY = new ArrayList<Integer>(); clearUndoStack(); if (bSetInstances) { m_Instances = new Instances("New Network", new ArrayList<Attribute>(0), 0); } } // c'tor /** * Assuming a network structure is defined and we want to learn from data, the * data set must be put if correct order first and possibly * discretized/missing values filled in before proceeding to CPT learning. * * @param instances data set to learn from * @exception Exception when data sets are not compatible, e.g., a variable is * missing or a variable has different nr of values. */ public void setData(Instances instances) throws Exception { // sync order of variables int[] order = new int[getNrOfNodes()]; for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { String sName = getNodeName(iNode); int nNode = 0; while (nNode < getNrOfNodes() && !sName.equals(instances.attribute(nNode).name())) { nNode++; } if (nNode >= getNrOfNodes()) { throw new Exception("Cannot find node named [[[" + sName + "]]] in the data"); } order[iNode] = nNode; } Reorder reorderFilter = new Reorder(); reorderFilter.setAttributeIndicesArray(order); reorderFilter.setInputFormat(instances); instances = Filter.useFilter(instances, reorderFilter); // filter using discretization/missing values filter Instances newInstances = new Instances(m_Instances, 0); if (m_DiscretizeFilter == null && m_MissingValuesFilter == null) { newInstances = normalizeDataSet(instances); } else { for (int iInstance = 0; iInstance < instances.numInstances(); iInstance++) { newInstances.add(normalizeInstance(instances.instance(iInstance))); } } // sanity check for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (newInstances.attribute(iNode).numValues() != getCardinality(iNode)) { throw new Exception("Number of values of node [[[" + getNodeName(iNode) + "]]] differs in (discretized) dataset."); } } // if we got this far, all is ok with the data set and // we can replace data set of Bayes net m_Instances = newInstances; } // setData /** * returns index of node with given name, or -1 if no such node exists * * @param sNodeName name of the node to get index for */ public int getNode2(String sNodeName) { int iNode = 0; while (iNode < m_Instances.numAttributes()) { if (m_Instances.attribute(iNode).name().equals(sNodeName)) { return iNode; } iNode++; } return -1; } // getNode2 /** * returns index of node with given name. Throws exception if no such node * exists * * @param sNodeName name of the node to get index for */ public int getNode(String sNodeName) throws Exception { int iNode = getNode2(sNodeName); if (iNode < 0) { throw new Exception("Could not find node [[" + sNodeName + "]]"); } return iNode; } // getNode /** * Add new node to the network, initializing instances, parentsets, * distributions. Used for manual manipulation of the Bayesian network. * * @param sName name of the node. If the name already exists, an x is appended * to the name * @param nCardinality number of values for this node * @throws Exception */ public void addNode(String sName, int nCardinality) throws Exception { addNode(sName, nCardinality, 100 + getNrOfNodes() * 10, 100 + getNrOfNodes() * 10); } // addNode /** * Add node to network at a given position, initializing instances, * parentsets, distributions. Used for manual manipulation of the Bayesian * network. * * @param sName name of the node. If the name already exists, an x is appended * to the name * @param nCardinality number of values for this node * @param nPosX x-coordiate of the position to place this node * @param nPosY y-coordiate of the position to place this node * @throws Exception */ public void addNode(String sName, int nCardinality, int nPosX, int nPosY) throws Exception { if (getNode2(sName) >= 0) { addNode(sName + "x", nCardinality); return; } // update instances ArrayList<String> values = new ArrayList<String>(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { values.add("Value" + (iValue + 1)); } Attribute att = new Attribute(sName, values); m_Instances.insertAttributeAt(att, m_Instances.numAttributes()); int nAtts = m_Instances.numAttributes(); // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; for (int iParentSet = 0; iParentSet < nAtts - 1; iParentSet++) { parentSets[iParentSet] = m_ParentSets[iParentSet]; } parentSets[nAtts - 1] = new ParentSet(); m_ParentSets = parentSets; // update distributions Estimator[][] distributions = new Estimator[nAtts][]; for (int iNode = 0; iNode < nAtts - 1; iNode++) { distributions[iNode] = m_Distributions[iNode]; } distributions[nAtts - 1] = new Estimator[1]; distributions[nAtts - 1][0] = new DiscreteEstimatorBayes(nCardinality, 0.5); m_Distributions = distributions; // update positions m_nPositionX.add(nPosX); m_nPositionY.add(nPosY); // update evidence & margins m_nEvidence.add(-1); double[] fMarginP = new double[nCardinality]; for (int iValue = 0; iValue < nCardinality; iValue++) { fMarginP[iValue] = 1.0 / nCardinality; } m_fMarginP.add(fMarginP); // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddNodeAction(sName, nCardinality, nPosX, nPosY)); } } // addNode /** * Delete node from the network, updating instances, parentsets, distributions * Conditional distributions are condensed by taking the values for the target * node to be its first value. Used for manual manipulation of the Bayesian * network. * * @param sName name of the node. If the name does not exists an exception is * thrown * @throws Exception */ public void deleteNode(String sName) throws Exception { int nTargetNode = getNode(sName); deleteNode(nTargetNode); } // deleteNode /** * Delete node from the network, updating instances, parentsets, distributions * Conditional distributions are condensed by taking the values for the target * node to be its first value. Used for manual manipulation of the Bayesian * network. * * @param nTargetNode index of the node to delete. * @throws Exception */ public void deleteNode(int nTargetNode) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DeleteNodeAction(nTargetNode)); } int nAtts = m_Instances.numAttributes() - 1; int nTargetCard = m_Instances.attribute(nTargetNode).numValues(); // update distributions Estimator[][] distributions = new Estimator[nAtts][]; for (int iNode = 0; iNode < nAtts; iNode++) { int iNode2 = iNode; if (iNode >= nTargetNode) { iNode2++; } Estimator[] distribution = m_Distributions[iNode2]; if (m_ParentSets[iNode2].contains(nTargetNode)) { // condense distribution, use values for targetnode = 0 int nParentCard = m_ParentSets[iNode2].getCardinalityOfParents(); nParentCard = nParentCard / nTargetCard; Estimator[] distribution2 = new Estimator[nParentCard]; for (int iParent = 0; iParent < nParentCard; iParent++) { distribution2[iParent] = distribution[iParent]; } distribution = distribution2; } distributions[iNode] = distribution; } m_Distributions = distributions; // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; for (int iParentSet = 0; iParentSet < nAtts; iParentSet++) { int iParentSet2 = iParentSet; if (iParentSet >= nTargetNode) { iParentSet2++; } ParentSet parentset = m_ParentSets[iParentSet2]; parentset.deleteParent(nTargetNode, m_Instances); for (int iParent = 0; iParent < parentset.getNrOfParents(); iParent++) { int nParent = parentset.getParent(iParent); if (nParent > nTargetNode) { parentset.SetParent(iParent, nParent - 1); } } parentSets[iParentSet] = parentset; } m_ParentSets = parentSets; // update instances m_Instances.setClassIndex(-1); m_Instances.deleteAttributeAt(nTargetNode); m_Instances.setClassIndex(nAtts - 1); // update positions m_nPositionX.remove(nTargetNode); m_nPositionY.remove(nTargetNode); // update evidence & margins m_nEvidence.remove(nTargetNode); m_fMarginP.remove(nTargetNode); } // deleteNode /** * Delete nodes with indexes in selection from the network, updating * instances, parentsets, distributions Conditional distributions are * condensed by taking the values for the target node to be its first value. * Used for manual manipulation of the Bayesian network. * * @param nodes array of indexes of nodes to delete. * @throws Exception */ public void deleteSelection(ArrayList<Integer> nodes) { // sort before proceeding for (int i = 0; i < nodes.size(); i++) { for (int j = i + 1; j < nodes.size(); j++) { if (nodes.get(i) > nodes.get(j)) { int h = nodes.get(i); nodes.set(i, nodes.get(j)); nodes.set(j, h); } } } // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DeleteSelectionAction(nodes)); } boolean bNeedsUndoAction = m_bNeedsUndoAction; m_bNeedsUndoAction = false; try { for (int iNode = nodes.size() - 1; iNode >= 0; iNode--) { deleteNode(nodes.get(iNode)); } } catch (Exception e) { e.printStackTrace(); } m_bNeedsUndoAction = bNeedsUndoAction; } // deleteSelection /** * XML helper function for selecting elements under a node with a given name * * @param item XMLNode to select items from * @param sElement name of the element to return */ ArrayList<Node> selectElements(Node item, String sElement) throws Exception { NodeList children = item.getChildNodes(); ArrayList<Node> nodelist = new ArrayList<Node>(); for (int iNode = 0; iNode < children.getLength(); iNode++) { Node node = children.item(iNode); if ((node.getNodeType() == Node.ELEMENT_NODE) && node.getNodeName().equals(sElement)) { nodelist.add(node); } } return nodelist; } // selectElements /** * XML helper function. Returns all TEXT children of the given node in one * string. Between the node values new lines are inserted. * * @param node the node to return the content for * @return the content of the node */ public String getContent(Element node) { NodeList list; Node item; int i; String result; result = ""; list = node.getChildNodes(); for (i = 0; i < list.getLength(); i++) { item = list.item(i); if (item.getNodeType() == Node.TEXT_NODE) { result += "\n" + item.getNodeValue(); } } return result; } /** * XML helper function that returns DEFINITION element from a XMLBIF document * for a node with a given name. * * @param doc XMLBIF document * @param sName name of the node to get the definition for */ Element getDefinition(Document doc, String sName) throws Exception { NodeList nodelist = doc.getElementsByTagName("DEFINITION"); for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { Node node = nodelist.item(iNode); ArrayList<Node> list = selectElements(node, "FOR"); if (list.size() > 0) { Node forNode = list.get(0); if (getContent((Element) forNode).trim().equals(sName)) { return (Element) node; } } } throw new Exception("Could not find definition for ((" + sName + "))"); } // getDefinition /** * Paste modes. This allows for verifying that a past action does not cause * any problems before actually performing the paste operation. */ final static int TEST = 0; final static int EXECUTE = 1; /** * Apply paste operation with XMLBIF fragment. This adds nodes in the XMLBIF * fragment to the network, together with its parents. First, paste in test * mode to verify no problems occur, then execute paste operation. If a * problem occurs (e.g. parent does not exist) then a exception is thrown. * * @param sXML XMLBIF fragment to paste into the network */ public void paste(String sXML) throws Exception { try { paste(sXML, TEST); } catch (Exception e) { throw e; } paste(sXML, EXECUTE); } // paste /** * Apply paste operation with XMLBIF fragment. Depending on the paste mode, * the nodes are actually added to the network or it is just tested that the * nodes can be added to the network. * * @param sXML XMLBIF fragment to paste into the network * @param mode paste mode TEST or EXECUTE */ void paste(String sXML, int mode) throws Exception { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setValidating(true); Document doc = factory.newDocumentBuilder().parse( new org.xml.sax.InputSource(new StringReader(sXML))); doc.normalize(); // create nodes first NodeList nodelist = doc.getElementsByTagName("VARIABLE"); ArrayList<String> sBaseNames = new ArrayList<String>(); Instances instances = new Instances(m_Instances, 0); int nBase = instances.numAttributes(); for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { // Get element ArrayList<Node> valueslist; // Get the name of the node valueslist = selectElements(nodelist.item(iNode), "OUTCOME"); int nValues = valueslist.size(); // generate value strings ArrayList<String> nomStrings = new ArrayList<String>(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { Node node = valueslist.get(iValue).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue == null) { sValue = "Value" + (iValue + 1); } nomStrings.add(sValue); } ArrayList<Node> nodelist2; // Get the name of the network nodelist2 = selectElements(nodelist.item(iNode), "NAME"); if (nodelist2.size() == 0) { throw new Exception("No name specified for variable"); } String sBaseName = ((CharacterData) (nodelist2.get(0).getFirstChild())) .getData(); sBaseNames.add(sBaseName); String sNodeName = sBaseName; if (getNode2(sNodeName) >= 0) { sNodeName = "Copy of " + sBaseName; } int iAttempt = 2; while (getNode2(sNodeName) >= 0) { sNodeName = "Copy (" + iAttempt + ") of " + sBaseName; iAttempt++; } Attribute att = new Attribute(sNodeName, nomStrings); instances.insertAttributeAt(att, instances.numAttributes()); valueslist = selectElements(nodelist.item(iNode), "PROPERTY"); nValues = valueslist.size(); // generate value strings int nPosX = iAttempt * 10; int nPosY = iAttempt * 10; for (int iValue = 0; iValue < nValues; iValue++) { // parsing for strings of the form "position = (73, 165)" Node node = valueslist.get(iValue).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue.startsWith("position")) { int i0 = sValue.indexOf('('); int i1 = sValue.indexOf(','); int i2 = sValue.indexOf(')'); String sX = sValue.substring(i0 + 1, i1).trim(); String sY = sValue.substring(i1 + 1, i2).trim(); try { nPosX = (Integer.parseInt(sX) + iAttempt * 10); nPosY = (Integer.parseInt(sY) + iAttempt * 10); } catch (NumberFormatException e) { System.err.println("Wrong number format in position :(" + sX + "," + sY + ")"); } } } if (mode == EXECUTE) { m_nPositionX.add(nPosX); m_nPositionY.add(nPosY); } } ArrayList<Node> nodelist2; Estimator[][] distributions = new Estimator[nBase + sBaseNames.size()][]; ParentSet[] parentsets = new ParentSet[nBase + sBaseNames.size()]; for (int iNode = 0; iNode < nBase; iNode++) { distributions[iNode] = m_Distributions[iNode]; parentsets[iNode] = m_ParentSets[iNode]; } if (mode == EXECUTE) { m_Instances = instances; } // create arrows & create distributions for (int iNode = 0; iNode < sBaseNames.size(); iNode++) { // find definition that goes with this node String sName = sBaseNames.get(iNode); Element definition = getDefinition(doc, sName); parentsets[nBase + iNode] = new ParentSet(); // get the parents for this node // resolve structure nodelist2 = selectElements(definition, "GIVEN"); for (int iParent = 0; iParent < nodelist2.size(); iParent++) { Node parentName = nodelist2.get(iParent).getFirstChild(); String sParentName = ((CharacterData) (parentName)).getData(); int nParent = -1; for (int iBase = 0; iBase < sBaseNames.size(); iBase++) { if (sParentName.equals(sBaseNames.get(iBase))) { nParent = nBase + iBase; } } if (nParent < 0) { nParent = getNode(sParentName); } parentsets[nBase + iNode].addParent(nParent, instances); } // resolve conditional probability table int nCardinality = parentsets[nBase + iNode].getCardinalityOfParents(); int nValues = instances.attribute(nBase + iNode).numValues(); distributions[nBase + iNode] = new Estimator[nCardinality]; for (int i = 0; i < nCardinality; i++) { distributions[nBase + iNode][i] = new DiscreteEstimatorBayes(nValues, 0.0f); } String sTable = getContent((Element) selectElements(definition, "TABLE") .get(0)); sTable = sTable.replaceAll("\\n", " "); StringTokenizer st = new StringTokenizer(sTable.toString()); for (int i = 0; i < nCardinality; i++) { DiscreteEstimatorBayes d = (DiscreteEstimatorBayes) distributions[nBase + iNode][i]; for (int iValue = 0; iValue < nValues; iValue++) { String sWeight = st.nextToken(); d.addValue(iValue, new Double(sWeight).doubleValue()); } } if (mode == EXECUTE) { m_nEvidence.add(nBase + iNode, -1); m_fMarginP .add(nBase + iNode, new double[getCardinality(nBase + iNode)]); } } if (mode == EXECUTE) { m_Distributions = distributions; m_ParentSets = parentsets; } // update undo stack if (mode == EXECUTE && m_bNeedsUndoAction) { addUndoAction(new PasteAction(sXML, nBase)); } } // paste /** * Add arc between two nodes Distributions are updated by duplication for * every value of the parent node. * * @param sParent name of the parent node * @param sChild name of the child node * @throws Exception if parent or child cannot be found in network */ public void addArc(String sParent, String sChild) throws Exception { int nParent = getNode(sParent); int nChild = getNode(sChild); addArc(nParent, nChild); } // addArc /** * Add arc between two nodes Distributions are updated by duplication for * every value of the parent node. * * @param nParent index of the parent node * @param nChild index of the child node * @throws Exception */ public void addArc(int nParent, int nChild) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddArcAction(nParent, nChild)); } int nOldCard = m_ParentSets[nChild].getCardinalityOfParents(); // update parentsets m_ParentSets[nChild].addParent(nParent, m_Instances); // update distributions int nNewCard = m_ParentSets[nChild].getCardinalityOfParents(); Estimator[] ds = new Estimator[nNewCard]; for (int iParent = 0; iParent < nNewCard; iParent++) { ds[iParent] = Estimator .clone(m_Distributions[nChild][iParent % nOldCard]); } m_Distributions[nChild] = ds; } // addArc /** * Add arc between parent node and each of the nodes in a given list. * Distributions are updated as above. * * @param sParent name of the parent node * @param nodes array of indexes of child nodes * @throws Exception */ public void addArc(String sParent, ArrayList<Integer> nodes) throws Exception { int nParent = getNode(sParent); // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddArcAction(nParent, nodes)); } boolean bNeedsUndoAction = m_bNeedsUndoAction; m_bNeedsUndoAction = false; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); addArc(nParent, nNode); } m_bNeedsUndoAction = bNeedsUndoAction; } // addArc /** * Delete arc between two nodes. Distributions are updated by condensing for * the parent node taking its first value. * * @param sParent name of the parent node * @param sChild name of the child node * @throws Exception if parent or child cannot be found in network */ public void deleteArc(String sParent, String sChild) throws Exception { int nParent = getNode(sParent); int nChild = getNode(sChild); deleteArc(nParent, nChild); } // deleteArc /** * Delete arc between two nodes. Distributions are updated by condensing for * the parent node taking its first value. * * @param nParent index of the parent node * @param nChild index of the child node * @throws Exception */ public void deleteArc(int nParent, int nChild) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DeleteArcAction(nParent, nChild)); } // update distributions // condense distribution, use values for targetnode = 0 int nParentCard = m_ParentSets[nChild].getCardinalityOfParents(); int nTargetCard = m_Instances.attribute(nChild).numValues(); nParentCard = nParentCard / nTargetCard; Estimator[] distribution2 = new Estimator[nParentCard]; for (int iParent = 0; iParent < nParentCard; iParent++) { distribution2[iParent] = m_Distributions[nChild][iParent]; } m_Distributions[nChild] = distribution2; // update parentsets m_ParentSets[nChild].deleteParent(nParent, m_Instances); } // deleteArc /** * specify distribution of a node * * @param sName name of the node to specify distribution for * @param P matrix representing distribution with P[i][j] = P(node = j | * parent configuration = i) * @throws Exception if parent or child cannot be found in network */ public void setDistribution(String sName, double[][] P) throws Exception { int nTargetNode = getNode(sName); setDistribution(nTargetNode, P); } // setDistribution /** * specify distribution of a node * * @param nTargetNode index of the node to specify distribution for * @param P matrix representing distribution with P[i][j] = P(node = j | * parent configuration = i) * @throws Exception if parent or child cannot be found in network */ public void setDistribution(int nTargetNode, double[][] P) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new SetDistributionAction(nTargetNode, P)); } Estimator[] distributions = m_Distributions[nTargetNode]; for (int iParent = 0; iParent < distributions.length; iParent++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes( P[0].length, 0); for (int iValue = 0; iValue < distribution.getNumSymbols(); iValue++) { distribution.addValue(iValue, P[iParent][iValue]); } distributions[iParent] = distribution; } // m_Distributions[nTargetNode] = distributions; } // setDistribution /** * returns distribution of a node in matrix form with matrix representing * distribution with P[i][j] = P(node = j | parent configuration = i) * * @param sName name of the node to get distribution from */ public double[][] getDistribution(String sName) { int nTargetNode = getNode2(sName); return getDistribution(nTargetNode); } // getDistribution /** * returns distribution of a node in matrix form with matrix representing * distribution with P[i][j] = P(node = j | parent configuration = i) * * @param nTargetNode index of the node to get distribution from */ public double[][] getDistribution(int nTargetNode) { int nParentCard = m_ParentSets[nTargetNode].getCardinalityOfParents(); int nCard = m_Instances.attribute(nTargetNode).numValues(); double[][] P = new double[nParentCard][nCard]; for (int iParent = 0; iParent < nParentCard; iParent++) { for (int iValue = 0; iValue < nCard; iValue++) { P[iParent][iValue] = m_Distributions[nTargetNode][iParent] .getProbability(iValue); } } return P; } // getDistribution /** * returns array of values of a node * * @param sName name of the node to get values from */ public String[] getValues(String sName) { int nTargetNode = getNode2(sName); return getValues(nTargetNode); } // getValues /** * returns array of values of a node * * @param nTargetNode index of the node to get values from */ public String[] getValues(int nTargetNode) { String[] values = new String[getCardinality(nTargetNode)]; for (int iValue = 0; iValue < values.length; iValue++) { values[iValue] = m_Instances.attribute(nTargetNode).value(iValue); } return values; } // getValues /** * returns value of a node * * @param nTargetNode index of the node to get values from * @param iValue index of the value */ public String getValueName(int nTargetNode, int iValue) { return m_Instances.attribute(nTargetNode).value(iValue); } // getNodeValue /** * change the name of a node * * @param nTargetNode index of the node to set name for * @param sName new name to assign */ public void setNodeName(int nTargetNode, String sName) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new RenameAction(nTargetNode, getNodeName(nTargetNode), sName)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); ArrayList<String> values = new ArrayList<String>(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { values.add(att.value(iValue)); } replaceAtt(nTargetNode, sName, values); } // setNodeName /** * change the name of a value of a node * * @param nTargetNode index of the node to set name for * @param sValue current name of the value * @param sNewValue new name of the value */ public void renameNodeValue(int nTargetNode, String sValue, String sNewValue) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new RenameValueAction(nTargetNode, sValue, sNewValue)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); ArrayList<String> values = new ArrayList<String>(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { if (att.value(iValue).equals(sValue)) { values.add(sNewValue); } else { values.add(att.value(iValue)); } } replaceAtt(nTargetNode, att.name(), values); } // renameNodeValue /** * Add node value to a node. Distributions for the node assign zero * probability to the new value. Child nodes duplicate CPT conditioned on the * new value. * * @param nTargetNode index of the node to add value for * @param sNewValue name of the value */ public void addNodeValue(int nTargetNode, String sNewValue) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddValueAction(nTargetNode, sNewValue)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); ArrayList<String> values = new ArrayList<String>(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { values.add(att.value(iValue)); } values.add(sNewValue); replaceAtt(nTargetNode, att.name(), values); // update distributions of this node Estimator[] distributions = m_Distributions[nTargetNode]; int nNewCard = values.size(); for (int iParent = 0; iParent < distributions.length; iParent++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes( nNewCard, 0); for (int iValue = 0; iValue < nNewCard - 1; iValue++) { distribution.addValue(iValue, distributions[iParent].getProbability(iValue)); } distributions[iParent] = distribution; } // update distributions of all children for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { distributions = m_Distributions[iNode]; ParentSet parentSet = m_ParentSets[iNode]; int nParentCard = parentSet.getFreshCardinalityOfParents(m_Instances); Estimator[] newDistributions = new Estimator[nParentCard]; int nCard = getCardinality(iNode); int nParents = parentSet.getNrOfParents(); int[] values2 = new int[nParents]; int iOldPos = 0; int iTargetNode = 0; while (parentSet.getParent(iTargetNode) != nTargetNode) { iTargetNode++; } for (int iPos = 0; iPos < nParentCard; iPos++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes( nCard, 0); for (int iValue = 0; iValue < nCard; iValue++) { distribution.addValue(iValue, distributions[iOldPos].getProbability(iValue)); } newDistributions[iPos] = distribution; // update values int i = 0; values2[i]++; while (i < nParents && values2[i] == getCardinality(parentSet.getParent(i))) { values2[i] = 0; i++; if (i < nParents) { values2[i]++; } } if (values2[iTargetNode] != nNewCard - 1) { iOldPos++; } } m_Distributions[iNode] = newDistributions; } } } // addNodeValue /** * Delete node value from a node. Distributions for the node are scaled up * proportional to existing distribution (or made uniform if zero probability * is assigned to remainder of values). .* Child nodes delete CPTs conditioned * on the new value. * * @param nTargetNode index of the node to delete value from * @param sValue name of the value to delete */ public void delNodeValue(int nTargetNode, String sValue) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DelValueAction(nTargetNode, sValue)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); ArrayList<String> values = new ArrayList<String>(nCardinality); int nValue = -1; for (int iValue = 0; iValue < nCardinality; iValue++) { if (att.value(iValue).equals(sValue)) { nValue = iValue; } else { values.add(att.value(iValue)); } } if (nValue < 0) { // could not find value throw new Exception("Node " + nTargetNode + " does not have value (" + sValue + ")"); } replaceAtt(nTargetNode, att.name(), values); // update distributions Estimator[] distributions = m_Distributions[nTargetNode]; int nCard = values.size(); for (int iParent = 0; iParent < distributions.length; iParent++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes(nCard, 0); double sum = 0; for (int iValue = 0; iValue < nCard; iValue++) { sum += distributions[iParent].getProbability(iValue); } if (sum > 0) { for (int iValue = 0; iValue < nCard; iValue++) { distribution.addValue(iValue, distributions[iParent].getProbability(iValue) / sum); } } else { for (int iValue = 0; iValue < nCard; iValue++) { distribution.addValue(iValue, 1.0 / nCard); } } distributions[iParent] = distribution; } // update distributions of all children for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { ParentSet parentSet = m_ParentSets[iNode]; distributions = m_Distributions[iNode]; Estimator[] newDistributions = new Estimator[distributions.length * nCard / (nCard + 1)]; int iCurrentDist = 0; int nParents = parentSet.getNrOfParents(); int[] values2 = new int[nParents]; // fill in the values int nParentCard = parentSet.getFreshCardinalityOfParents(m_Instances) * (nCard + 1) / nCard; int iTargetNode = 0; while (parentSet.getParent(iTargetNode) != nTargetNode) { iTargetNode++; } int[] nCards = new int[nParents]; for (int iParent = 0; iParent < nParents; iParent++) { nCards[iParent] = getCardinality(parentSet.getParent(iParent)); } nCards[iTargetNode]++; for (int iPos = 0; iPos < nParentCard; iPos++) { if (values2[iTargetNode] != nValue) { newDistributions[iCurrentDist++] = distributions[iPos]; } // update values int i = 0; values2[i]++; while (i < nParents && values2[i] == nCards[i]) { values2[i] = 0; i++; if (i < nParents) { values2[i]++; } } } m_Distributions[iNode] = newDistributions; } } // update evidence if (getEvidence(nTargetNode) > nValue) { setEvidence(nTargetNode, getEvidence(nTargetNode) - 1); } } // delNodeValue /** * set position of node * * @param iNode index of node to set position for * @param nX x position of new position * @param nY y position of new position */ public void setPosition(int iNode, int nX, int nY) { // update undo stack if (m_bNeedsUndoAction) { boolean isUpdate = false; UndoAction undoAction = null; try { if (m_undoStack.size() > 0) { undoAction = m_undoStack.get(m_undoStack.size() - 1); SetPositionAction posAction = (SetPositionAction) undoAction; if (posAction.m_nTargetNode == iNode) { isUpdate = true; posAction.setUndoPosition(nX, nY); } } } catch (Exception e) { // ignore. it's not a SetPositionAction } if (!isUpdate) { addUndoAction(new SetPositionAction(iNode, nX, nY)); } } m_nPositionX.add(iNode, nX); m_nPositionY.add(iNode, nY); } // setPosition /** * Set position of node. Move set of nodes with the same displacement as a * specified node. * * @param nNode index of node to set position for * @param nX x position of new position * @param nY y position of new position * @param nodes array of indexes of nodes to move */ public void setPosition(int nNode, int nX, int nY, ArrayList<Integer> nodes) { int dX = nX - getPositionX(nNode); int dY = nY - getPositionY(nNode); // update undo stack if (m_bNeedsUndoAction) { boolean isUpdate = false; try { UndoAction undoAction = null; if (m_undoStack.size() > 0) { undoAction = m_undoStack.get(m_undoStack.size() - 1); SetGroupPositionAction posAction = (SetGroupPositionAction) undoAction; isUpdate = true; int iNode = 0; while (isUpdate && iNode < posAction.m_nodes.size()) { if (posAction.m_nodes.get(iNode) != nodes.get(iNode)) { isUpdate = false; } iNode++; } if (isUpdate == true) { posAction.setUndoPosition(dX, dY); } } } catch (Exception e) { // ignore. it's not a SetPositionAction } if (!isUpdate) { addUndoAction(new SetGroupPositionAction(nodes, dX, dY)); } } for (int iNode = 0; iNode < nodes.size(); iNode++) { nNode = nodes.get(iNode); m_nPositionX.set(nNode, getPositionX(nNode) + dX); m_nPositionY.set(nNode, getPositionY(nNode) + dY); } } // setPosition /** * set positions of all nodes * * @param nPosX new x positions for all nodes * @param nPosY new y positions for all nodes */ public void layoutGraph(ArrayList<Integer> nPosX, ArrayList<Integer> nPosY) { if (m_bNeedsUndoAction) { addUndoAction(new LayoutGraphAction(nPosX, nPosY)); } m_nPositionX = nPosX; m_nPositionY = nPosY; } // layoutGraph /** * get x position of a node * * @param iNode index of node of interest */ public int getPositionX(int iNode) { return (m_nPositionX.get(iNode)); } /** * get y position of a node * * @param iNode index of node of interest */ public int getPositionY(int iNode) { return (m_nPositionY.get(iNode)); } /** * align set of nodes with the left most node in the list * * @param nodes list of indexes of nodes to align */ public void alignLeft(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignLeftAction(nodes)); } int nMinX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX(nodes.get(iNode)); if (nX < nMinX || iNode == 0) { nMinX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionX.set(nNode, nMinX); } } // alignLeft /** * align set of nodes with the right most node in the list * * @param nodes list of indexes of nodes to align */ public void alignRight(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignRightAction(nodes)); } int nMaxX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX(nodes.get(iNode)); if (nX > nMaxX || iNode == 0) { nMaxX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionX.set(nNode, nMaxX); } } // alignRight /** * align set of nodes with the top most node in the list * * @param nodes list of indexes of nodes to align */ public void alignTop(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignTopAction(nodes)); } int nMinY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY(nodes.get(iNode)); if (nY < nMinY || iNode == 0) { nMinY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionY.set(nNode, nMinY); } } // alignTop /** * align set of nodes with the bottom most node in the list * * @param nodes list of indexes of nodes to align */ public void alignBottom(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignBottomAction(nodes)); } int nMaxY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY(nodes.get(iNode)); if (nY > nMaxY || iNode == 0) { nMaxY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionY.set(nNode, nMaxY); } } // alignBottom /** * center set of nodes half way between left and right most node in the list * * @param nodes list of indexes of nodes to center */ public void centerHorizontal(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new centerHorizontalAction(nodes)); } int nMinY = -1; int nMaxY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY(nodes.get(iNode)); if (nY < nMinY || iNode == 0) { nMinY = nY; } if (nY > nMaxY || iNode == 0) { nMaxY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionY.set(nNode, (nMinY + nMaxY) / 2); } } // centerHorizontal /** * center set of nodes half way between top and bottom most node in the list * * @param nodes list of indexes of nodes to center */ public void centerVertical(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new centerVerticalAction(nodes)); } int nMinX = -1; int nMaxX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX(nodes.get(iNode)); if (nX < nMinX || iNode == 0) { nMinX = nX; } if (nX > nMaxX || iNode == 0) { nMaxX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionX.set(nNode, (nMinX + nMaxX) / 2); } } // centerVertical /** * space out set of nodes evenly between left and right most node in the list * * @param nodes list of indexes of nodes to space out */ public void spaceHorizontal(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new spaceHorizontalAction(nodes)); } int nMinX = -1; int nMaxX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX(nodes.get(iNode)); if (nX < nMinX || iNode == 0) { nMinX = nX; } if (nX > nMaxX || iNode == 0) { nMaxX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionX.set(nNode, (int) (nMinX + iNode * (nMaxX - nMinX) / (nodes.size() - 1.0))); } } // spaceHorizontal /** * space out set of nodes evenly between top and bottom most node in the list * * @param nodes list of indexes of nodes to space out */ public void spaceVertical(ArrayList<Integer> nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new spaceVerticalAction(nodes)); } int nMinY = -1; int nMaxY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY(nodes.get(iNode)); if (nY < nMinY || iNode == 0) { nMinY = nY; } if (nY > nMaxY || iNode == 0) { nMaxY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nPositionY.set(nNode, (int) (nMinY + iNode * (nMaxY - nMinY) / (nodes.size() - 1.0))); } } // spaceVertical /** * replace attribute with specified name and values * * @param nTargetNode index of node the replace specification for * @param sName new name of the node * @param values array of values of the node */ void replaceAtt(int nTargetNode, String sName, ArrayList<String> values) { Attribute newAtt = new Attribute(sName, values); if (m_Instances.classIndex() == nTargetNode) { m_Instances.setClassIndex(-1); /* * m_Instances.insertAttributeAt(newAtt, nTargetNode); * m_Instances.deleteAttributeAt(nTargetNode + 1); * m_Instances.setClassIndex(nTargetNode); */ m_Instances.deleteAttributeAt(nTargetNode); m_Instances.insertAttributeAt(newAtt, nTargetNode); m_Instances.setClassIndex(nTargetNode); } else { /* * m_Instances.insertAttributeAt(newAtt, nTargetNode); * m_Instances.deleteAttributeAt(nTargetNode + 1); */ m_Instances.deleteAttributeAt(nTargetNode); m_Instances.insertAttributeAt(newAtt, nTargetNode); } } // replaceAtt /** * return marginal distibution for a node * * @param iNode index of node of interest */ public double[] getMargin(int iNode) { return m_fMarginP.get(iNode); }; /** * set marginal distibution for a node * * @param iNode index of node to set marginal distribution for * @param fMarginP marginal distribution */ public void setMargin(int iNode, double[] fMarginP) { m_fMarginP.set(iNode, fMarginP); } /** * get evidence state of a node. -1 represents no evidence set, otherwise the * index of a value of the node * * @param iNode index of node of interest */ public int getEvidence(int iNode) { return m_nEvidence.get(iNode); } /** * set evidence state of a node. -1 represents no evidence set, otherwise the * index of a value of the node * * @param iNode index of node of interest * @param iValue evidence value to set */ public void setEvidence(int iNode, int iValue) { m_nEvidence.set(iNode, iValue); } /** * return list of children of a node * * @param nTargetNode index of node of interest */ public ArrayList<Integer> getChildren(int nTargetNode) { ArrayList<Integer> children = new ArrayList<Integer>(); for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { children.add(iNode); } } return children; } // getChildren /** * returns network in XMLBIF format */ @Override public String toXMLBIF03() { if (m_Instances == null) { return ("<!--No model built yet-->"); } StringBuffer text = new StringBuffer(); text.append(getBIFHeader()); text.append("\n"); text.append("\n"); text.append("<BIF VERSION=\"0.3\">\n"); text.append("<NETWORK>\n"); text.append("<NAME>" + XMLNormalize(m_Instances.relationName()) + "</NAME>\n"); for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { text.append("<VARIABLE TYPE=\"nature\">\n"); text.append("<NAME>" + XMLNormalize(m_Instances.attribute(iAttribute).name()) + "</NAME>\n"); for (int iValue = 0; iValue < m_Instances.attribute(iAttribute) .numValues(); iValue++) { text.append("<OUTCOME>" + XMLNormalize(m_Instances.attribute(iAttribute).value(iValue)) + "</OUTCOME>\n"); } text.append("<PROPERTY>position = (" + getPositionX(iAttribute) + "," + getPositionY(iAttribute) + ")</PROPERTY>\n"); text.append("</VARIABLE>\n"); } for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { text.append("<DEFINITION>\n"); text.append("<FOR>" + XMLNormalize(m_Instances.attribute(iAttribute).name()) + "</FOR>\n"); for (int iParent = 0; iParent < m_ParentSets[iAttribute].getNrOfParents(); iParent++) { text .append("<GIVEN>" + XMLNormalize(m_Instances.attribute( m_ParentSets[iAttribute].getParent(iParent)).name()) + "</GIVEN>\n"); } text.append("<TABLE>\n"); for (int iParent = 0; iParent < m_ParentSets[iAttribute] .getCardinalityOfParents(); iParent++) { for (int iValue = 0; iValue < m_Instances.attribute(iAttribute) .numValues(); iValue++) { text.append(m_Distributions[iAttribute][iParent] .getProbability(iValue)); text.append(' '); } text.append('\n'); } text.append("</TABLE>\n"); text.append("</DEFINITION>\n"); } text.append("</NETWORK>\n"); text.append("</BIF>\n"); return text.toString(); } // toXMLBIF03 /** * return fragment of network in XMLBIF format * * @param nodes array of indexes of nodes that should be in the fragment */ public String toXMLBIF03(ArrayList<Integer> nodes) { StringBuffer text = new StringBuffer(); text.append(getBIFHeader()); text.append("\n"); text.append("\n"); text.append("<BIF VERSION=\"0.3\">\n"); text.append("<NETWORK>\n"); text.append("<NAME>" + XMLNormalize(m_Instances.relationName()) + "</NAME>\n"); for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); text.append("<VARIABLE TYPE=\"nature\">\n"); text.append("<NAME>" + XMLNormalize(m_Instances.attribute(nNode).name()) + "</NAME>\n"); for (int iValue = 0; iValue < m_Instances.attribute(nNode).numValues(); iValue++) { text.append("<OUTCOME>" + XMLNormalize(m_Instances.attribute(nNode).value(iValue)) + "</OUTCOME>\n"); } text.append("<PROPERTY>position = (" + getPositionX(nNode) + "," + getPositionY(nNode) + ")</PROPERTY>\n"); text.append("</VARIABLE>\n"); } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); text.append("<DEFINITION>\n"); text.append("<FOR>" + XMLNormalize(m_Instances.attribute(nNode).name()) + "</FOR>\n"); for (int iParent = 0; iParent < m_ParentSets[nNode].getNrOfParents(); iParent++) { text.append("<GIVEN>" + XMLNormalize(m_Instances.attribute( m_ParentSets[nNode].getParent(iParent)).name()) + "</GIVEN>\n"); } text.append("<TABLE>\n"); for (int iParent = 0; iParent < m_ParentSets[nNode] .getCardinalityOfParents(); iParent++) { for (int iValue = 0; iValue < m_Instances.attribute(nNode).numValues(); iValue++) { text.append(m_Distributions[nNode][iParent].getProbability(iValue)); text.append(' '); } text.append('\n'); } text.append("</TABLE>\n"); text.append("</DEFINITION>\n"); } text.append("</NETWORK>\n"); text.append("</BIF>\n"); return text.toString(); } // toXMLBIF03 /** undo stack for undoin edit actions, or redo edit actions */ ArrayList<UndoAction> m_undoStack = new ArrayList<UndoAction>(); /** current action in undo stack */ int m_nCurrentEditAction = -1; /** action that the network is saved */ int m_nSavedPointer = -1; /*************************************************************************** * flag to indicate whether an edit action needs to introduce an undo action. * This is only false when an undo or redo action is performed. **************************************************************************/ boolean m_bNeedsUndoAction = true; /** return whether there is something on the undo stack that can be performed */ public boolean canUndo() { return m_nCurrentEditAction >= 0; } /** return whether there is something on the undo stack that can be performed */ public boolean canRedo() { return m_nCurrentEditAction < m_undoStack.size() - 1; } /** * return true when current state differs from the state the network was last * saved */ public boolean isChanged() { return m_nCurrentEditAction != m_nSavedPointer; } /** indicate the network state was saved */ public void isSaved() { m_nSavedPointer = m_nCurrentEditAction; } /** get message representing the last action performed on the network */ public String lastActionMsg() { if (m_undoStack.size() == 0) { return ""; } return m_undoStack.get(m_undoStack.size() - 1).getRedoMsg(); } // lastActionMsg /** * undo the last edit action performed on the network. returns message * representing the action performed. */ public String undo() { if (!canUndo()) { return ""; } UndoAction undoAction = m_undoStack.get(m_nCurrentEditAction); m_bNeedsUndoAction = false; undoAction.undo(); m_bNeedsUndoAction = true; m_nCurrentEditAction--; // undo stack debugging /* * if (m_nCurrentEditAction>0) { String sXML = (String) * m_sXMLStack.get(m_nCurrentEditAction); String sXMLCurrent = toXMLBIF03(); * if (!sXML.equals(sXMLCurrent)) { String sDiff = ""; String sDiff2 = ""; * for (int i = 0; i < sXML.length() && sDiff.length() < 80; i++) { if * (sXML.charAt(i) != sXMLCurrent.charAt(i)) { sDiff += sXML.charAt(i); * sDiff2 += sXMLCurrent.charAt(i); } } * * JOptionPane.showMessageDialog(null,"Undo error\n" + sDiff + " \n" + * sDiff2); } } */ return undoAction.getUndoMsg(); } // undo /** * redo the last edit action performed on the network. returns message * representing the action performed. */ public String redo() { if (!canRedo()) { return ""; } m_nCurrentEditAction++; UndoAction undoAction = m_undoStack.get(m_nCurrentEditAction); m_bNeedsUndoAction = false; undoAction.redo(); m_bNeedsUndoAction = true; // undo stack debugging /* * if (m_nCurrentEditAction < m_sXMLStack.size()) { String sXML = (String) * m_sXMLStack.get(m_nCurrentEditAction); String sXMLCurrent = toXMLBIF03(); * if (!sXML.equals(sXMLCurrent)) { String sDiff = ""; String sDiff2 = ""; * for (int i = 0; i < sXML.length() && sDiff.length() < 80; i++) { if * (sXML.charAt(i) != sXMLCurrent.charAt(i)) { sDiff += sXML.charAt(i); * sDiff2 += sXMLCurrent.charAt(i); } } * * JOptionPane.showMessageDialog(null,"redo error\n" + sDiff + " \n" + * sDiff2); } } */ return undoAction.getRedoMsg(); } // redo /** * add undo action to the undo stack. * * @param action operation that needs to be added to the undo stack */ void addUndoAction(UndoAction action) { int iAction = m_undoStack.size() - 1; while (iAction > m_nCurrentEditAction) { m_undoStack.remove(iAction--); } if (m_nSavedPointer > m_nCurrentEditAction) { m_nSavedPointer = -2; } m_undoStack.add(action); // m_sXMLStack.add(toXMLBIF03()); m_nCurrentEditAction++; } // addUndoAction /** remove all actions from the undo stack */ public void clearUndoStack() { m_undoStack = new ArrayList<UndoAction>(); // m_sXMLStack = new FastVector(); m_nCurrentEditAction = -1; m_nSavedPointer = -1; } // clearUndoStack /** * base class for actions representing operations on the Bayesian network that * can be undone/redone */ class UndoAction implements Serializable { /** for serialization */ static final long serialVersionUID = 1; public void undo() { } public void redo() { } public String getUndoMsg() { return getMsg(); } public String getRedoMsg() { return getMsg(); } String getMsg() { String sStr = toString(); int iStart = sStr.indexOf('$'); int iEnd = sStr.indexOf('@'); StringBuffer sBuffer = new StringBuffer(); for (int i = iStart + 1; i < iEnd; i++) { char c = sStr.charAt(i); if (Character.isUpperCase(c)) { sBuffer.append(' '); } sBuffer.append(sStr.charAt(i)); } return sBuffer.toString(); } // getMsg } // class UndoAction class AddNodeAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; String m_sName; int m_nPosX; int m_nPosY; int m_nCardinality; AddNodeAction(String sName, int nCardinality, int nPosX, int nPosY) { m_sName = sName; m_nCardinality = nCardinality; m_nPosX = nPosX; m_nPosY = nPosY; } // c'tor @Override public void undo() { try { deleteNode(getNrOfNodes() - 1); } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { addNode(m_sName, m_nCardinality, m_nPosX, m_nPosY); } catch (Exception e) { e.printStackTrace(); } } // redo } // class AddNodeAction class DeleteNodeAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; Attribute m_att; Estimator[] m_CPT; ParentSet m_ParentSet; ArrayList<DeleteArcAction> m_deleteArcActions; int m_nPosX; int m_nPosY; DeleteNodeAction(int nTargetNode) { m_nTargetNode = nTargetNode; m_att = m_Instances.attribute(nTargetNode); try { SerializedObject so = new SerializedObject(m_Distributions[nTargetNode]); m_CPT = (Estimator[]) so.getObject(); ; so = new SerializedObject(m_ParentSets[nTargetNode]); m_ParentSet = (ParentSet) so.getObject(); } catch (Exception e) { e.printStackTrace(); } m_deleteArcActions = new ArrayList<DeleteArcAction>(); for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { m_deleteArcActions.add(new DeleteArcAction(nTargetNode, iNode)); } } m_nPosX = getPositionX(m_nTargetNode); m_nPosY = getPositionY(m_nTargetNode); } // c'tor @Override public void undo() { try { m_Instances.insertAttributeAt(m_att, m_nTargetNode); int nAtts = m_Instances.numAttributes(); // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; int nX = 0; for (int iParentSet = 0; iParentSet < nAtts; iParentSet++) { if (iParentSet == m_nTargetNode) { SerializedObject so = new SerializedObject(m_ParentSet); parentSets[iParentSet] = (ParentSet) so.getObject(); nX = 1; } else { parentSets[iParentSet] = m_ParentSets[iParentSet - nX]; for (int iParent = 0; iParent < parentSets[iParentSet] .getNrOfParents(); iParent++) { int nParent = parentSets[iParentSet].getParent(iParent); if (nParent >= m_nTargetNode) { parentSets[iParentSet].SetParent(iParent, nParent + 1); } } } } m_ParentSets = parentSets; // update distributions Estimator[][] distributions = new Estimator[nAtts][]; nX = 0; for (int iNode = 0; iNode < nAtts; iNode++) { if (iNode == m_nTargetNode) { SerializedObject so = new SerializedObject(m_CPT); distributions[iNode] = (Estimator[]) so.getObject(); nX = 1; } else { distributions[iNode] = m_Distributions[iNode - nX]; } } m_Distributions = distributions; for (int deletedArc = 0; deletedArc < m_deleteArcActions.size(); deletedArc++) { DeleteArcAction action = m_deleteArcActions.get(deletedArc); action.undo(); } m_nPositionX.add(m_nTargetNode, m_nPosX); m_nPositionY.add(m_nTargetNode, m_nPosY); m_nEvidence.add(m_nTargetNode, -1); m_fMarginP .add(m_nTargetNode, new double[getCardinality(m_nTargetNode)]); } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { deleteNode(m_nTargetNode); } catch (Exception e) { e.printStackTrace(); } } // redo } // class DeleteNodeAction class DeleteSelectionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; ArrayList<Integer> m_nodes; Attribute[] m_att; Estimator[][] m_CPT; ParentSet[] m_ParentSet; ArrayList<DeleteArcAction> m_deleteArcActions; int[] m_nPosX; int[] m_nPosY; public DeleteSelectionAction(ArrayList<Integer> nodes) { m_nodes = new ArrayList<Integer>(); int nNodes = nodes.size(); m_att = new Attribute[nNodes]; m_CPT = new Estimator[nNodes][]; m_ParentSet = new ParentSet[nNodes]; m_nPosX = new int[nNodes]; m_nPosY = new int[nNodes]; m_deleteArcActions = new ArrayList<DeleteArcAction>(); for (int iNode = 0; iNode < nodes.size(); iNode++) { int nTargetNode = nodes.get(iNode); m_nodes.add(nTargetNode); m_att[iNode] = m_Instances.attribute(nTargetNode); try { SerializedObject so = new SerializedObject( m_Distributions[nTargetNode]); m_CPT[iNode] = (Estimator[]) so.getObject(); ; so = new SerializedObject(m_ParentSets[nTargetNode]); m_ParentSet[iNode] = (ParentSet) so.getObject(); } catch (Exception e) { e.printStackTrace(); } m_nPosX[iNode] = getPositionX(nTargetNode); m_nPosY[iNode] = getPositionY(nTargetNode); for (int iNode2 = 0; iNode2 < getNrOfNodes(); iNode2++) { if (!nodes.contains(iNode2) && m_ParentSets[iNode2].contains(nTargetNode)) { m_deleteArcActions.add(new DeleteArcAction(nTargetNode, iNode2)); } } } } // c'tor @Override public void undo() { try { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nTargetNode = m_nodes.get(iNode); m_Instances.insertAttributeAt(m_att[iNode], nTargetNode); } int nAtts = m_Instances.numAttributes(); // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; int[] offset = new int[nAtts]; for (int iNode = 0; iNode < nAtts; iNode++) { offset[iNode] = iNode; } for (int iNode = m_nodes.size() - 1; iNode >= 0; iNode--) { int nTargetNode = m_nodes.get(iNode); for (int i = nTargetNode; i < nAtts - 1; i++) { offset[i] = offset[i + 1]; } } int iTargetNode = 0; for (int iParentSet = 0; iParentSet < nAtts; iParentSet++) { if (iTargetNode < m_nodes.size() && m_nodes.get(iTargetNode) == iParentSet) { SerializedObject so = new SerializedObject(m_ParentSet[iTargetNode]); parentSets[iParentSet] = (ParentSet) so.getObject(); iTargetNode++; } else { parentSets[iParentSet] = m_ParentSets[iParentSet - iTargetNode]; for (int iParent = 0; iParent < parentSets[iParentSet] .getNrOfParents(); iParent++) { int nParent = parentSets[iParentSet].getParent(iParent); parentSets[iParentSet].SetParent(iParent, offset[nParent]); } } } m_ParentSets = parentSets; // update distributions Estimator[][] distributions = new Estimator[nAtts][]; iTargetNode = 0; for (int iNode = 0; iNode < nAtts; iNode++) { if (iTargetNode < m_nodes.size() && m_nodes.get(iTargetNode) == iNode) { SerializedObject so = new SerializedObject(m_CPT[iTargetNode]); distributions[iNode] = (Estimator[]) so.getObject(); iTargetNode++; } else { distributions[iNode] = m_Distributions[iNode - iTargetNode]; } } m_Distributions = distributions; for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nTargetNode = m_nodes.get(iNode); m_nPositionX.add(nTargetNode, m_nPosX[iNode]); m_nPositionY.add(nTargetNode, m_nPosY[iNode]); m_nEvidence.add(nTargetNode, -1); m_fMarginP.add(nTargetNode, new double[getCardinality(nTargetNode)]); } for (int deletedArc = 0; deletedArc < m_deleteArcActions.size(); deletedArc++) { DeleteArcAction action = m_deleteArcActions.get(deletedArc); action.undo(); } } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { for (int iNode = m_nodes.size() - 1; iNode >= 0; iNode--) { int nNode = m_nodes.get(iNode); deleteNode(nNode); } } catch (Exception e) { e.printStackTrace(); } } // redo } // class DeleteSelectionAction class AddArcAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; // int m_nChild; ArrayList<Integer> m_children; int m_nParent; Estimator[][] m_CPT; AddArcAction(int nParent, int nChild) { try { m_nParent = nParent; m_children = new ArrayList<Integer>(); m_children.add(nChild); // m_nChild = nChild; SerializedObject so = new SerializedObject(m_Distributions[nChild]); m_CPT = new Estimator[1][]; m_CPT[0] = (Estimator[]) so.getObject(); ; } catch (Exception e) { e.printStackTrace(); } } // c'tor AddArcAction(int nParent, ArrayList<Integer> children) { try { m_nParent = nParent; m_children = new ArrayList<Integer>(); m_CPT = new Estimator[children.size()][]; for (int iChild = 0; iChild < children.size(); iChild++) { int nChild = children.get(iChild); m_children.add(nChild); SerializedObject so = new SerializedObject(m_Distributions[nChild]); m_CPT[iChild] = (Estimator[]) so.getObject(); } } catch (Exception e) { e.printStackTrace(); } } // c'tor @Override public void undo() { try { for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = m_children.get(iChild); deleteArc(m_nParent, nChild); SerializedObject so = new SerializedObject(m_CPT[iChild]); m_Distributions[nChild] = (Estimator[]) so.getObject(); } } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = m_children.get(iChild); addArc(m_nParent, nChild); } } catch (Exception e) { e.printStackTrace(); } } // redo } // class AddArcAction class DeleteArcAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int[] m_nParents; int m_nChild; int m_nParent; Estimator[] m_CPT; DeleteArcAction(int nParent, int nChild) { try { m_nChild = nChild; m_nParent = nParent; m_nParents = new int[getNrOfParents(nChild)]; for (int iParent = 0; iParent < m_nParents.length; iParent++) { m_nParents[iParent] = getParent(nChild, iParent); } SerializedObject so = new SerializedObject(m_Distributions[nChild]); m_CPT = (Estimator[]) so.getObject(); } catch (Exception e) { e.printStackTrace(); } } // c'tor @Override public void undo() { try { SerializedObject so = new SerializedObject(m_CPT); m_Distributions[m_nChild] = (Estimator[]) so.getObject(); ParentSet parentSet = new ParentSet(); for (int m_nParent2 : m_nParents) { parentSet.addParent(m_nParent2, m_Instances); } m_ParentSets[m_nChild] = parentSet; } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { deleteArc(m_nParent, m_nChild); } catch (Exception e) { e.printStackTrace(); } } // redo } // class DeleteArcAction class SetDistributionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; Estimator[] m_CPT; double[][] m_P; SetDistributionAction(int nTargetNode, double[][] P) { try { m_nTargetNode = nTargetNode; SerializedObject so = new SerializedObject(m_Distributions[nTargetNode]); m_CPT = (Estimator[]) so.getObject(); ; m_P = P; } catch (Exception e) { e.printStackTrace(); } } // c'tor @Override public void undo() { try { SerializedObject so = new SerializedObject(m_CPT); m_Distributions[m_nTargetNode] = (Estimator[]) so.getObject(); } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { setDistribution(m_nTargetNode, m_P); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Distribution of node " + getNodeName(m_nTargetNode) + " changed"; } @Override public String getRedoMsg() { return "Distribution of node " + getNodeName(m_nTargetNode) + " changed"; } } // class SetDistributionAction class RenameAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; String m_sNewName; String m_sOldName; RenameAction(int nTargetNode, String sOldName, String sNewName) { m_nTargetNode = nTargetNode; m_sNewName = sNewName; m_sOldName = sOldName; } // c'tor @Override public void undo() { setNodeName(m_nTargetNode, m_sOldName); } // undo @Override public void redo() { setNodeName(m_nTargetNode, m_sNewName); } // redo } // class RenameAction class RenameValueAction extends RenameAction { /** for serialization */ static final long serialVersionUID = 1; RenameValueAction(int nTargetNode, String sOldName, String sNewName) { super(nTargetNode, sOldName, sNewName); } // c'tor @Override public void undo() { renameNodeValue(m_nTargetNode, m_sNewName, m_sOldName); } // undo @Override public void redo() { renameNodeValue(m_nTargetNode, m_sOldName, m_sNewName); } // redo @Override public String getUndoMsg() { return "Value of node " + getNodeName(m_nTargetNode) + " changed from " + m_sNewName + " to " + m_sOldName; } @Override public String getRedoMsg() { return "Value of node " + getNodeName(m_nTargetNode) + " changed from " + m_sOldName + " to " + m_sNewName; } } // class RenameValueAction class AddValueAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; String m_sValue; AddValueAction(int nTargetNode, String sValue) { m_nTargetNode = nTargetNode; m_sValue = sValue; } // c'tor @Override public void undo() { try { delNodeValue(m_nTargetNode, m_sValue); } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { addNodeValue(m_nTargetNode, m_sValue); } // redo @Override public String getUndoMsg() { return "Value " + m_sValue + " removed from node " + getNodeName(m_nTargetNode); } @Override public String getRedoMsg() { return "Value " + m_sValue + " added to node " + getNodeName(m_nTargetNode); } } // class AddValueAction class DelValueAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; String m_sValue; Estimator[] m_CPT; ArrayList<Integer> m_children; Estimator[][] m_childAtts; Attribute m_att; DelValueAction(int nTargetNode, String sValue) { try { m_nTargetNode = nTargetNode; m_sValue = sValue; m_att = m_Instances.attribute(nTargetNode); SerializedObject so = new SerializedObject(m_Distributions[nTargetNode]); m_CPT = (Estimator[]) so.getObject(); ; m_children = new ArrayList<Integer>(); for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { m_children.add(iNode); } } m_childAtts = new Estimator[m_children.size()][]; for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = m_children.get(iChild); m_childAtts[iChild] = m_Distributions[nChild]; } } catch (Exception e) { e.printStackTrace(); } } // c'tor @Override public void undo() { try { m_Instances.insertAttributeAt(m_att, m_nTargetNode); SerializedObject so = new SerializedObject(m_CPT); m_Distributions[m_nTargetNode] = (Estimator[]) so.getObject(); for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = m_children.get(iChild); m_Instances.insertAttributeAt(m_att, m_nTargetNode); so = new SerializedObject(m_childAtts[iChild]); m_Distributions[nChild] = (Estimator[]) so.getObject(); } } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { delNodeValue(m_nTargetNode, m_sValue); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Value " + m_sValue + " added to node " + getNodeName(m_nTargetNode); } @Override public String getRedoMsg() { return "Value " + m_sValue + " removed from node " + getNodeName(m_nTargetNode); } } // class DelValueAction class alignAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; ArrayList<Integer> m_nodes; ArrayList<Integer> m_posX; ArrayList<Integer> m_posY; alignAction(ArrayList<Integer> nodes) { m_nodes = new ArrayList<Integer>(nodes.size()); m_posX = new ArrayList<Integer>(nodes.size()); m_posY = new ArrayList<Integer>(nodes.size()); for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = nodes.get(iNode); m_nodes.add(nNode); m_posX.add(getPositionX(nNode)); m_posY.add(getPositionY(nNode)); } } // c'tor @Override public void undo() { try { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nNode = m_nodes.get(iNode); setPosition(nNode, m_posX.get(iNode), m_posY.get(iNode)); } } catch (Exception e) { e.printStackTrace(); } } // undo } // class alignAction class alignLeftAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignLeftAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { alignLeft(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the left."; } @Override public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the left."; } } // class alignLeftAction class alignRightAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignRightAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { alignRight(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the right."; } @Override public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the right."; } } // class alignLeftAction class alignTopAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignTopAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { alignTop(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the top."; } @Override public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the top."; } } // class alignTopAction class alignBottomAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignBottomAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { alignBottom(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the bottom."; } @Override public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the bottom."; } } // class alignBottomAction class centerHorizontalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public centerHorizontalAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { centerHorizontal(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from centering horizontally."; } @Override public String getRedoMsg() { return "Centering " + m_nodes.size() + " nodes horizontally."; } } // class centerHorizontalAction class centerVerticalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public centerVerticalAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { centerVertical(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from centering vertically."; } @Override public String getRedoMsg() { return "Centering " + m_nodes.size() + " nodes vertically."; } } // class centerVerticalAction class spaceHorizontalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public spaceHorizontalAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { spaceHorizontal(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from spaceing horizontally."; } @Override public String getRedoMsg() { return "spaceing " + m_nodes.size() + " nodes horizontally."; } } // class spaceHorizontalAction class spaceVerticalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public spaceVerticalAction(ArrayList<Integer> nodes) { super(nodes); } // c'tor @Override public void redo() { try { spaceVertical(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo @Override public String getUndoMsg() { return "Returning " + m_nodes.size() + " from spaceng vertically."; } @Override public String getRedoMsg() { return "Spaceng " + m_nodes.size() + " nodes vertically."; } } // class spaceVerticalAction class SetPositionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; int m_nX; int m_nY; int m_nX2; int m_nY2; SetPositionAction(int nTargetNode, int nX, int nY) { m_nTargetNode = nTargetNode; m_nX2 = nX; m_nY2 = nY; m_nX = getPositionX(nTargetNode); m_nY = getPositionY(nTargetNode); } // c'tor @Override public void undo() { setPosition(m_nTargetNode, m_nX, m_nY); } // undo @Override public void redo() { setPosition(m_nTargetNode, m_nX2, m_nY2); } // redo public void setUndoPosition(int nX, int nY) { m_nX2 = nX; m_nY2 = nY; } // setPosition } // class SetPositionAction class SetGroupPositionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; ArrayList<Integer> m_nodes; int m_dX; int m_dY; SetGroupPositionAction(ArrayList<Integer> nodes, int dX, int dY) { m_nodes = new ArrayList<Integer>(nodes.size()); for (int iNode = 0; iNode < nodes.size(); iNode++) { m_nodes.add(nodes.get(iNode)); } m_dX = dX; m_dY = dY; } // c'tor @Override public void undo() { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nNode = m_nodes.get(iNode); setPosition(nNode, getPositionX(nNode) - m_dX, getPositionY(nNode) - m_dY); } } // undo @Override public void redo() { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nNode = m_nodes.get(iNode); setPosition(nNode, getPositionX(nNode) + m_dX, getPositionY(nNode) + m_dY); } } // redo public void setUndoPosition(int dX, int dY) { m_dX += dX; m_dY += dY; } // setPosition } // class SetGroupPositionAction class LayoutGraphAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; ArrayList<Integer> m_nPosX; ArrayList<Integer> m_nPosY; ArrayList<Integer> m_nPosX2; ArrayList<Integer> m_nPosY2; LayoutGraphAction(ArrayList<Integer> nPosX, ArrayList<Integer> nPosY) { m_nPosX = new ArrayList<Integer>(nPosX.size()); m_nPosY = new ArrayList<Integer>(nPosX.size()); m_nPosX2 = new ArrayList<Integer>(nPosX.size()); m_nPosY2 = new ArrayList<Integer>(nPosX.size()); for (int iNode = 0; iNode < nPosX.size(); iNode++) { m_nPosX.add(m_nPositionX.get(iNode)); m_nPosY.add(m_nPositionY.get(iNode)); m_nPosX2.add(nPosX.get(iNode)); m_nPosY2.add(nPosY.get(iNode)); } } // c'tor @Override public void undo() { for (int iNode = 0; iNode < m_nPosX.size(); iNode++) { setPosition(iNode, m_nPosX.get(iNode), m_nPosY.get(iNode)); } } // undo @Override public void redo() { for (int iNode = 0; iNode < m_nPosX.size(); iNode++) { setPosition(iNode, m_nPosX2.get(iNode), m_nPosY2.get(iNode)); } } // redo } // class LayoutGraphAction class PasteAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nBase; String m_sXML; PasteAction(String sXML, int nBase) { m_sXML = sXML; m_nBase = nBase; } // c'tor @Override public void undo() { try { int iNode = getNrOfNodes() - 1; while (iNode >= m_nBase) { deleteNode(iNode); iNode--; } } catch (Exception e) { e.printStackTrace(); } } // undo @Override public void redo() { try { paste(m_sXML, EXECUTE); } catch (Exception e) { e.printStackTrace(); } } // redo } // class PasteAction /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * @param args */ public static void main(String[] args) { } // main } // class EditableBayesNet
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/GUI.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GUI.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Container; import java.awt.Dimension; import java.awt.Font; import java.awt.FontMetrics; import java.awt.Frame; import java.awt.Graphics; import java.awt.Graphics2D; import java.awt.GridBagConstraints; import java.awt.GridBagLayout; import java.awt.GridLayout; import java.awt.Insets; import java.awt.Rectangle; import java.awt.RenderingHints; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.awt.event.MouseMotionAdapter; import java.awt.event.WindowAdapter; import java.awt.event.WindowEvent; import java.awt.image.BufferedImage; import java.awt.print.PageFormat; import java.awt.print.Printable; import java.awt.print.PrinterException; import java.awt.print.PrinterJob; import java.beans.PropertyEditor; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Random; import javax.swing.*; import javax.swing.table.AbstractTableModel; import weka.classifiers.bayes.net.MarginCalculator.JunctionTreeNode; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.SerializedObject; import weka.core.Utils; import weka.core.converters.AbstractFileLoader; import weka.core.converters.AbstractFileSaver; import weka.core.converters.ArffSaver; import weka.core.converters.ConverterUtils; import weka.gui.ConverterFileChooser; import weka.gui.ExtensionFileFilter; import weka.gui.GenericObjectEditor; import weka.gui.LookAndFeel; import weka.gui.PropertyDialog; import weka.gui.graphvisualizer.BIFFormatException; import weka.gui.graphvisualizer.BIFParser; import weka.gui.graphvisualizer.GraphEdge; import weka.gui.graphvisualizer.GraphNode; import weka.gui.graphvisualizer.HierarchicalBCEngine; import weka.gui.graphvisualizer.LayoutCompleteEvent; import weka.gui.graphvisualizer.LayoutCompleteEventListener; import weka.gui.graphvisualizer.LayoutEngine; import weka.gui.visualize.PrintablePanel; /** * GUI interface to Bayesian Networks. Allows editing Bayesian networks on * screen and provides GUI interface to various Bayesian network facilities in * Weka, including random network generation, data set generation and Bayesion * network inference. * * @author Remco Bouckaert (remco@cs.waikato.ac.nz) * @version $Revision$ */ public class GUI extends JPanel implements LayoutCompleteEventListener { /** for serialization */ private static final long serialVersionUID = -2038911085935515624L; /** The current LayoutEngine */ protected LayoutEngine m_layoutEngine; /** Panel actually displaying the graph */ protected GraphPanel m_GraphPanel; /** Container of Bayesian network */ EditableBayesNet m_BayesNet = new EditableBayesNet(true); /** String containing file name storing current network */ protected String m_sFileName = ""; /** used for calculating marginals in Bayesian netwowrks */ MarginCalculator m_marginCalculator = null; /** * used for calculating marginals in Bayesian netwowrks when evidence is * present */ MarginCalculator m_marginCalculatorWithEvidence = null; /** * flag indicating whether marginal distributions of each of the nodes should * be shown in display. */ boolean m_bViewMargins = false; boolean m_bViewCliques = false; /** The menu bar */ private JMenuBar m_menuBar; /** data selected from file. Used to train a Bayesian network on */ Instances m_Instances = null; /** Text field for specifying zoom */ final JTextField m_jTfZoom; /** toolbar containing buttons at top of window */ final JToolBar m_jTbTools; /** status bar at bottom of window */ final JLabel m_jStatusBar; /** TextField for node's width */ private final JTextField m_jTfNodeWidth = new JTextField(3); /** TextField for nodes height */ private final JTextField m_jTfNodeHeight = new JTextField(3); /** this contains the m_GraphPanel GraphPanel */ JScrollPane m_jScrollPane; /** path for icons */ private final String ICONPATH = "weka/classifiers/bayes/net/icons/"; /** current zoom value */ private double m_fScale = 1; /** standard width of node */ private int m_nNodeHeight = 2 * getFontMetrics(getFont()).getHeight(); /** standard height of node */ final static int DEFAULT_NODE_WIDTH = 50; private int m_nNodeWidth = DEFAULT_NODE_WIDTH; /** width of node, allowing for some padding */ final static int PADDING = 10; private int m_nPaddedNodeWidth = DEFAULT_NODE_WIDTH + PADDING; /** used when using zoomIn and zoomOut buttons */ private final int[] m_nZoomPercents = { 10, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300, 350, 400, 450, 500, 550, 600, 650, 700, 800, 900, 999 }; /** actions triggered by GUI events */ Action a_new = new ActionNew(); Action a_quit = new ActionQuit(); Action a_save = new ActionSave(); ActionExport a_export = new ActionExport(); ActionPrint a_print = new ActionPrint(); Action a_load = new ActionLoad(); Action a_zoomin = new ActionZoomIn(); Action a_zoomout = new ActionZoomOut(); Action a_layout = new ActionLayout(); Action a_saveas = new ActionSaveAs(); Action a_viewtoolbar = new ActionViewToolbar(); Action a_viewstatusbar = new ActionViewStatusbar(); Action a_networkgenerator = new ActionGenerateNetwork(); Action a_datagenerator = new ActionGenerateData(); Action a_datasetter = new ActionSetData(); Action a_learn = new ActionLearn(); Action a_learnCPT = new ActionLearnCPT(); Action a_help = new ActionHelp(); Action a_about = new ActionAbout(); ActionAddNode a_addnode = new ActionAddNode(); Action a_delnode = new ActionDeleteNode(); Action a_cutnode = new ActionCutNode(); Action a_copynode = new ActionCopyNode(); Action a_pastenode = new ActionPasteNode(); Action a_selectall = new ActionSelectAll(); Action a_addarc = new ActionAddArc(); Action a_delarc = new ActionDeleteArc(); Action a_undo = new ActionUndo(); Action a_redo = new ActionRedo(); Action a_alignleft = new ActionAlignLeft(); Action a_alignright = new ActionAlignRight(); Action a_aligntop = new ActionAlignTop(); Action a_alignbottom = new ActionAlignBottom(); Action a_centerhorizontal = new ActionCenterHorizontal(); Action a_centervertical = new ActionCenterVertical(); Action a_spacehorizontal = new ActionSpaceHorizontal(); Action a_spacevertical = new ActionSpaceVertical(); /** node currently selected through right clicking */ int m_nCurrentNode = -1; /** selection of nodes */ Selection m_Selection = new Selection(); /** selection rectangle drawn through dragging with left mouse button */ Rectangle m_nSelectedRect = null; class Selection { ArrayList<Integer> m_selected; public Selection() { m_selected = new ArrayList<Integer>(); } // c'tor public ArrayList<Integer> getSelected() { return m_selected; } void updateGUI() { if (m_selected.size() > 0) { a_cutnode.setEnabled(true); a_copynode.setEnabled(true); } else { a_cutnode.setEnabled(false); a_copynode.setEnabled(false); } if (m_selected.size() > 1) { a_alignleft.setEnabled(true); a_alignright.setEnabled(true); a_aligntop.setEnabled(true); a_alignbottom.setEnabled(true); a_centerhorizontal.setEnabled(true); a_centervertical.setEnabled(true); a_spacehorizontal.setEnabled(true); a_spacevertical.setEnabled(true); } else { a_alignleft.setEnabled(false); a_alignright.setEnabled(false); a_aligntop.setEnabled(false); a_alignbottom.setEnabled(false); a_centerhorizontal.setEnabled(false); a_centervertical.setEnabled(false); a_spacehorizontal.setEnabled(false); a_spacevertical.setEnabled(false); } } // updateGUI public void addToSelection(int nNode) { for (int iNode = 0; iNode < m_selected.size(); iNode++) { if (nNode == m_selected.get(iNode)) { return; } } m_selected.add(nNode); updateGUI(); } // addToSelection public void addToSelection(int[] iNodes) { for (int iNode2 : iNodes) { addToSelection(iNode2); } updateGUI(); } // addToSelection public void addToSelection(Rectangle selectedRect) { for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { if (contains(selectedRect, iNode)) { addToSelection(iNode); } } } // addToSelection public void selectAll() { m_selected.clear(); for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { m_selected.add(iNode); } updateGUI(); } // selectAll boolean contains(Rectangle rect, int iNode) { return rect.intersects((m_BayesNet.getPositionX(iNode)) * m_fScale, (m_BayesNet.getPositionY(iNode)) * m_fScale, m_nPaddedNodeWidth * m_fScale, m_nNodeHeight * m_fScale); } // contains public void removeFromSelection(int nNode) { for (int iNode = 0; iNode < m_selected.size(); iNode++) { if (nNode == m_selected.get(iNode)) { m_selected.remove(iNode); } } updateGUI(); } // removeFromSelection public void toggleSelection(int nNode) { for (int iNode = 0; iNode < m_selected.size(); iNode++) { if (nNode == m_selected.get(iNode)) { m_selected.remove(iNode); updateGUI(); return; } } addToSelection(nNode); } // toggleSelection public void toggleSelection(Rectangle selectedRect) { for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { if (contains(selectedRect, iNode)) { toggleSelection(iNode); } } } // toggleSelection public void clear() { m_selected.clear(); updateGUI(); } public void draw(Graphics g) { if (m_selected.size() == 0) { return; } for (int iNode = 0; iNode < m_selected.size(); iNode++) { int nNode = m_selected.get(iNode); int nPosX = m_BayesNet.getPositionX(nNode); int nPosY = m_BayesNet.getPositionY(nNode); g.setColor(Color.BLACK); int nXRC = nPosX + m_nPaddedNodeWidth - m_nNodeWidth - (m_nPaddedNodeWidth - m_nNodeWidth) / 2; int nYRC = nPosY; int d = 5; g.fillRect(nXRC, nYRC, d, d); g.fillRect(nXRC, nYRC + m_nNodeHeight, d, d); g.fillRect(nXRC + m_nNodeWidth, nYRC, d, d); g.fillRect(nXRC + m_nNodeWidth, nYRC + m_nNodeHeight, d, d); } } // draw } // Selection ClipBoard m_clipboard = new ClipBoard(); class ClipBoard { String m_sText = null; public ClipBoard() { if (a_pastenode != null) { a_pastenode.setEnabled(false); } } public boolean hasText() { return m_sText != null; } public String getText() { return m_sText; } public void setText(String sText) { m_sText = sText; a_pastenode.setEnabled(true); } } // class ClipBoard /** * Base class used for definining actions with a name, tool tip text, possibly * an icon and accelerator key. * */ class MyAction extends AbstractAction { /** for serialization */ private static final long serialVersionUID = -2038911111935517L; public MyAction(String sName, String sToolTipText, String sIcon, String sAcceleratorKey) { super(sName); // setToolTipText(sToolTipText); putValue(Action.SHORT_DESCRIPTION, sToolTipText); putValue(Action.LONG_DESCRIPTION, sToolTipText); if (sAcceleratorKey.length() > 0) { KeyStroke keyStroke = KeyStroke.getKeyStroke(sAcceleratorKey); putValue(Action.ACCELERATOR_KEY, keyStroke); } putValue(Action.MNEMONIC_KEY, (int) sName.charAt(0)); java.net.URL tempURL = ClassLoader.getSystemResource(ICONPATH + sIcon + ".png"); if (tempURL != null) { putValue(Action.SMALL_ICON, new ImageIcon(tempURL)); } else { putValue(Action.SMALL_ICON, new ImageIcon(new BufferedImage(20, 20, BufferedImage.TYPE_4BYTE_ABGR))); // System.err.println(ICONPATH + sIcon + // ".png not found for weka.gui.graphvisualizer.Graph"); } } // c'tor /* * Place holder. Should be implemented by derived classes. (non-Javadoc) * * @see * java.awt.event.ActionListener#actionPerformed(java.awt.event.ActionEvent) */ @Override public void actionPerformed(ActionEvent ae) { } } // class MyAction class ActionGenerateNetwork extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038911085935517L; public ActionGenerateNetwork() { super("Generate Network", "Generate Random Bayesian Network", "generate.network", "ctrl N"); } // c'tor int m_nNrOfNodes = 10; int m_nNrOfArcs = 15; int m_nCardinality = 2; int m_nSeed = 123; JDialog dlg = null; @Override public void actionPerformed(ActionEvent ae) { if (dlg == null) { dlg = new JDialog(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setTitle("Generate Random Bayesian Network Options"); final JLabel jLbNrOfNodes = new JLabel("Nr of nodes"); final JTextField jTfNrOfNodes = new JTextField(3); jTfNrOfNodes.setHorizontalAlignment(JTextField.CENTER); jTfNrOfNodes.setText("" + m_nNrOfNodes); final JLabel jLbNrOfArcs = new JLabel("Nr of arcs"); final JTextField jTfNrOfArcs = new JTextField(3); jTfNrOfArcs.setHorizontalAlignment(JTextField.CENTER); jTfNrOfArcs.setText("" + m_nNrOfArcs); final JLabel jLbCardinality = new JLabel("Cardinality"); final JTextField jTfCardinality = new JTextField(3); jTfCardinality.setHorizontalAlignment(JTextField.CENTER); jTfCardinality.setText("" + m_nCardinality); final JLabel jLbSeed = new JLabel("Random seed"); final JTextField jTfSeed = new JTextField(3); jTfSeed.setHorizontalAlignment(JTextField.CENTER); jTfSeed.setText("" + m_nSeed); JButton jBtGo; jBtGo = new JButton("Generate Network"); jBtGo.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { try { BayesNetGenerator generator = new BayesNetGenerator(); m_BayesNet = generator; m_BayesNet.clearUndoStack(); String[] options = new String[8]; options[0] = "-N"; options[1] = "" + jTfNrOfNodes.getText(); options[2] = "-A"; options[3] = "" + jTfNrOfArcs.getText(); options[4] = "-C"; options[5] = "" + jTfCardinality.getText(); options[6] = "-S"; options[7] = "" + jTfSeed.getText(); generator.setOptions(options); generator.generateRandomNetwork(); // Convert to EditableBayesNet // This ensures the getOptions() called by GenericObjectEditor to // get the correct result. BIFReader bifReader = new BIFReader(); bifReader.processString(m_BayesNet.toXMLBIF03()); m_BayesNet = new EditableBayesNet(bifReader); updateStatus(); layoutGraph(); a_datagenerator.setEnabled(true); m_Instances = null; ; a_learn.setEnabled(false); a_learnCPT.setEnabled(false); dlg.setVisible(false); } catch (Exception e) { e.printStackTrace(); } } }); JButton jBtCancel; jBtCancel = new JButton("Cancel"); jBtCancel.setMnemonic('C'); jBtCancel.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { dlg.setVisible(false); } }); GridBagConstraints gbc = new GridBagConstraints(); dlg.setLayout(new GridBagLayout()); Container c = new Container(); c.setLayout(new GridBagLayout()); gbc.gridwidth = 2; gbc.insets = new Insets(8, 0, 0, 0); gbc.anchor = GridBagConstraints.NORTHWEST; gbc.gridwidth = GridBagConstraints.RELATIVE; gbc.fill = GridBagConstraints.HORIZONTAL; c.add(jLbNrOfNodes, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfNrOfNodes, gbc); gbc.gridwidth = GridBagConstraints.RELATIVE; c.add(jLbNrOfArcs, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfNrOfArcs, gbc); gbc.gridwidth = GridBagConstraints.RELATIVE; c.add(jLbCardinality, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfCardinality, gbc); gbc.gridwidth = GridBagConstraints.RELATIVE; c.add(jLbSeed, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfSeed, gbc); gbc.fill = GridBagConstraints.HORIZONTAL; dlg.add(c, gbc); dlg.add(jBtGo); gbc.gridwidth = GridBagConstraints.REMAINDER; dlg.add(jBtCancel); } //dlg.pack(); //dlg.setLocation(100, 100); //dlg.setVisible(true); dlg.setSize(450, 350); //dlg.setVisible(false); dlg.pack(); dlg.setLocationRelativeTo(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setVisible(true); //dlg.repaint(); } // actionPerformed } // class ActionGenerate class ActionGenerateData extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038911085935516L; public ActionGenerateData() { super("Generate Data", "Generate Random Instances from Network", "generate.data", "ctrl D"); } // c'tor int m_nNrOfInstances = 100; int m_nSeed = 1234; String m_sFile = ""; JDialog dlg = null; @Override public void actionPerformed(ActionEvent ae) { if (dlg == null) { dlg = new JDialog(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setTitle("Generate Random Data Options"); final JLabel jLbNrOfInstances = new JLabel("Nr of instances"); final JTextField jTfNrOfInstances = new JTextField(3); jTfNrOfInstances.setHorizontalAlignment(JTextField.CENTER); jTfNrOfInstances.setText("" + m_nNrOfInstances); final JLabel jLbSeed = new JLabel("Random seed"); final JTextField jTfSeed = new JTextField(3); jTfSeed.setHorizontalAlignment(JTextField.CENTER); jTfSeed.setText("" + m_nSeed); final JLabel jLbFile = new JLabel("Output file (optional)"); final JTextField jTfFile = new JTextField(12); jTfFile.setHorizontalAlignment(JTextField.CENTER); jTfFile.setText(m_sFile); JButton jBtGo; jBtGo = new JButton("Generate Data"); jBtGo.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { try { String tmpfilename = "tmp.bif.file.xml"; BayesNetGenerator generator = new BayesNetGenerator(); String[] options = new String[4]; options[0] = "-M"; options[1] = "" + jTfNrOfInstances.getText(); options[2] = "-F"; options[3] = tmpfilename; FileWriter outfile = new FileWriter(tmpfilename); StringBuffer text = new StringBuffer(); if (m_marginCalculator == null) { m_marginCalculator = new MarginCalculator(); m_marginCalculator.calcMargins(m_BayesNet); } text.append(m_marginCalculator.toXMLBIF03()); outfile.write(text.toString()); outfile.close(); generator.setOptions(options); generator.generateRandomNetwork(); generator.generateInstances(); m_Instances = generator.m_Instances; a_learn.setEnabled(true); a_learnCPT.setEnabled(true); m_sFile = jTfFile.getText(); if (m_sFile != null && !m_sFile.equals("")) { AbstractFileSaver saver = ConverterUtils .getSaverForFile(m_sFile); // no idea what the format is, so let's save it as ARFF file if (saver == null) { saver = new ArffSaver(); } saver.setFile(new File(m_sFile)); saver.setInstances(m_Instances); saver.writeBatch(); } } catch (Exception e) { e.printStackTrace(); } dlg.setVisible(false); } }); JButton jBtFile = new JButton("Browse"); jBtFile.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { ConverterFileChooser fc = new ConverterFileChooser(System .getProperty("user.dir")); fc.setDialogTitle("Save Instances As"); int rval = fc.showSaveDialog(GUI.this); if (rval == JFileChooser.APPROVE_OPTION) { String filename = fc.getSelectedFile().toString(); jTfFile.setText(filename); } dlg.setVisible(true); } }); JButton jBtCancel; jBtCancel = new JButton("Cancel"); jBtCancel.setMnemonic('C'); jBtCancel.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { dlg.setVisible(false); } }); GridBagConstraints gbc = new GridBagConstraints(); dlg.setLayout(new GridBagLayout()); Container c = new Container(); c.setLayout(new GridBagLayout()); gbc.gridwidth = 2; gbc.insets = new Insets(8, 0, 0, 0); gbc.anchor = GridBagConstraints.NORTHWEST; gbc.gridwidth = GridBagConstraints.RELATIVE; gbc.fill = GridBagConstraints.HORIZONTAL; c.add(jLbNrOfInstances, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfNrOfInstances, gbc); gbc.gridwidth = GridBagConstraints.RELATIVE; c.add(jLbSeed, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfSeed, gbc); gbc.gridwidth = GridBagConstraints.RELATIVE; c.add(jLbFile, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfFile, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jBtFile, gbc); gbc.fill = GridBagConstraints.HORIZONTAL; dlg.add(c, gbc); dlg.add(jBtGo); gbc.gridwidth = GridBagConstraints.REMAINDER; dlg.add(jBtCancel); } //dlg.setLocation(100, 100); //dlg.setVisible(true); dlg.setSize(450, 350); //dlg.setVisible(false); dlg.pack(); dlg.setLocationRelativeTo(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setVisible(true); //dlg.repaint(); } // actionPerformed } // class ActionGenerateData class ActionLearn extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038911085935516L; public ActionLearn() { super("Learn Network", "Learn Bayesian Network", "learn", "ctrl L"); setEnabled(false); } // c'tor JDialog dlg = null; @Override public void actionPerformed(ActionEvent ae) { if (dlg == null) { dlg = new JDialog(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setTitle("Learn Bayesian Network"); final JButton jBtOptions = new JButton("Options"); jBtOptions.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { // m_BayesNet = new EditableBayesNet(); try { GenericObjectEditor.registerEditors(); GenericObjectEditor ce = new GenericObjectEditor(true); ce.setClassType(weka.classifiers.Classifier.class); ce.setValue(m_BayesNet); PropertyDialog pd; if (PropertyDialog.getParentDialog(GUI.this) != null) { pd = new PropertyDialog(PropertyDialog .getParentDialog(GUI.this), ce, -1, -1); } else { pd = new PropertyDialog( PropertyDialog.getParentFrame(GUI.this), ce, -1, -1); } pd.addWindowListener(new WindowAdapter() { @Override public void windowClosing(WindowEvent e) { PropertyEditor pe = ((PropertyDialog) e.getSource()) .getEditor(); Object c = pe.getValue(); String options = ""; if (c instanceof OptionHandler) { options = Utils.joinOptions(((OptionHandler) c) .getOptions()); try { m_BayesNet.setOptions(((OptionHandler) c).getOptions()); } catch (Exception e2) { e2.printStackTrace(); } } System.out.println(c.getClass().getName() + " " + options); } }); pd.setVisible(true); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } m_BayesNet.clearUndoStack(); a_undo.setEnabled(false); a_redo.setEnabled(false); } }); final JTextField jTfOptions = new JTextField(40); jTfOptions.setHorizontalAlignment(JTextField.CENTER); jTfOptions.setText("" + Utils.joinOptions(m_BayesNet.getOptions())); JButton jBtGo; jBtGo = new JButton("Learn"); jBtGo.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { try { m_BayesNet.buildClassifier(m_Instances); layoutGraph(); updateStatus(); m_BayesNet.clearUndoStack(); dlg.setVisible(false); } catch (Exception e) { e.printStackTrace(); } dlg.setVisible(false); } }); JButton jBtCancel; jBtCancel = new JButton("Cancel"); jBtCancel.setMnemonic('C'); jBtCancel.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { dlg.setVisible(false); } }); GridBagConstraints gbc = new GridBagConstraints(); dlg.setLayout(new GridBagLayout()); Container c = new Container(); c.setLayout(new GridBagLayout()); gbc.gridwidth = 2; gbc.insets = new Insets(8, 0, 0, 0); gbc.anchor = GridBagConstraints.NORTHWEST; gbc.gridwidth = GridBagConstraints.RELATIVE; gbc.fill = GridBagConstraints.HORIZONTAL; c.add(jBtOptions, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jTfOptions, gbc); gbc.fill = GridBagConstraints.HORIZONTAL; dlg.add(c, gbc); dlg.add(jBtGo); gbc.gridwidth = GridBagConstraints.REMAINDER; dlg.add(jBtCancel); } //dlg.setLocation(100, 100); //dlg.setVisible(true); dlg.setSize(450, 350); //dlg.setVisible(false); dlg.pack(); dlg.setLocationRelativeTo(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setVisible(true); //dlg.repaint(); } // actionPerformed } // class ActionLearn class ActionLearnCPT extends MyAction { /** for serialization */ private static final long serialVersionUID = -2022211085935516L; public ActionLearnCPT() { super("Learn CPT", "Learn conditional probability tables", "learncpt", ""); setEnabled(false); } // c'tor @Override public void actionPerformed(ActionEvent ae) { if (m_Instances == null) { JOptionPane.showMessageDialog(null, "Select instances to learn from first (menu Tools/Set Data)"); return; } try { m_BayesNet.setData(m_Instances); } catch (Exception e) { JOptionPane.showMessageDialog(null, "Data set is not compatible with network.\n" + e.getMessage() + "\nChoose other instances (menu Tools/Set Data)"); return; } try { m_BayesNet.estimateCPTs(); m_BayesNet.clearUndoStack(); } catch (Exception e) { e.printStackTrace(); } updateStatus(); } // actionPerformed } // class ActionLearnCPT class ActionSetData extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038911085935519L; public ActionSetData() { super("Set Data", "Set Data File", "setdata", "ctrl A"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { ConverterFileChooser fc = new ConverterFileChooser( System.getProperty("user.dir")); fc.setDialogTitle("Set Data File"); int rval = fc.showOpenDialog(GUI.this); if (rval == JFileChooser.APPROVE_OPTION) { AbstractFileLoader loader = fc.getLoader(); try { if (loader != null) { m_Instances = loader.getDataSet(); } if (m_Instances.classIndex() == -1) { m_Instances.setClassIndex(m_Instances.numAttributes() - 1); } a_learn.setEnabled(true); a_learnCPT.setEnabled(true); repaint(); } catch (Exception e) { e.printStackTrace(); } } } } // class ActionSetData class ActionUndo extends MyAction { /** for serialization */ private static final long serialVersionUID = -3038910085935519L; public ActionUndo() { super("Undo", "Undo", "undo", "ctrl Z"); setEnabled(false); } // c'tor @Override public boolean isEnabled() { return m_BayesNet.canUndo(); } @Override public void actionPerformed(ActionEvent ae) { String sMsg = m_BayesNet.undo(); m_jStatusBar.setText("Undo action performed: " + sMsg); // if (!sMsg.equals("")) { // JOptionPane.showMessageDialog(null, sMsg, "Undo action successful", // JOptionPane.INFORMATION_MESSAGE); // } a_redo.setEnabled(m_BayesNet.canRedo()); a_undo.setEnabled(m_BayesNet.canUndo()); m_Selection.clear(); updateStatus(); repaint(); } } // ActionUndo class ActionRedo extends MyAction { /** for serialization */ private static final long serialVersionUID = -4038910085935519L; public ActionRedo() { super("Redo", "Redo", "redo", "ctrl Y"); setEnabled(false); } // c'tor @Override public boolean isEnabled() { return m_BayesNet.canRedo(); } @Override public void actionPerformed(ActionEvent ae) { String sMsg = m_BayesNet.redo(); m_jStatusBar.setText("Redo action performed: " + sMsg); // if (!sMsg.equals("")) { // JOptionPane.showMessageDialog(null, sMsg, "Redo action successful", // JOptionPane.INFORMATION_MESSAGE); // } m_Selection.clear(); updateStatus(); repaint(); } } // ActionRedo class ActionAddNode extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038910085935519L; public ActionAddNode() { super("Add Node", "Add Node", "addnode", ""); } // c'tor JDialog dlg = null; JTextField jTfName = new JTextField(20); JTextField jTfCard = new JTextField(3); int m_X = Integer.MAX_VALUE; int m_Y; public void addNode(int nX, int nY) { m_X = nX; m_Y = nY; addNode(); } // addNode void addNode() { if (dlg == null) { dlg = new JDialog(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setTitle("Add node"); JLabel jLbName = new JLabel("Name"); jTfName.setHorizontalAlignment(JTextField.CENTER); JLabel jLbCard = new JLabel("Cardinality"); jTfCard.setHorizontalAlignment(JTextField.CENTER); jTfCard.setText("2"); JButton jBtCancel; jBtCancel = new JButton("Cancel"); jBtCancel.setMnemonic('C'); jBtCancel.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { dlg.setVisible(false); } }); JButton jBtOk = new JButton("Ok"); jBtOk.setMnemonic('O'); jBtOk.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { String sName = jTfName.getText(); if (sName.length() <= 0) { JOptionPane.showMessageDialog(null, "Name should have at least one character"); return; } int nCard = new Integer(jTfCard.getText()).intValue(); if (nCard <= 1) { JOptionPane.showMessageDialog(null, "Cardinality should be larger than 1"); return; } try { if (m_X < Integer.MAX_VALUE) { m_BayesNet.addNode(sName, nCard, m_X, m_Y); } else { m_BayesNet.addNode(sName, nCard); } m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); // GraphNode n = new GraphNode("id" + m_nodes.size(), sName); // n.probs = m_BayesNet.getDistribution(sName); // n.outcomes = m_BayesNet.getValues(sName); // n.x = 100 + m_nodes.size() * 10; // n.y = 100 + m_nodes.size() * 10; // m_nodes.addElement(n); } catch (Exception e) { e.printStackTrace(); } repaint(); dlg.setVisible(false); } }); dlg.setLayout(new GridLayout(3, 2, 10, 10)); dlg.add(jLbName); dlg.add(jTfName); dlg.add(jLbCard); dlg.add(jTfCard); dlg.add(jBtOk); dlg.add(jBtCancel); dlg.setSize(450, 350); } jTfName.setText("Node" + (m_BayesNet.getNrOfNodes() + 1)); dlg.pack(); dlg.setLocationRelativeTo(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setVisible(true); } // addNode @Override public void actionPerformed(ActionEvent ae) { m_X = Integer.MAX_VALUE; addNode(); } } // class ActionAddNode class ActionDeleteNode extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038912085935519L; public ActionDeleteNode() { super("Delete Node", "Delete Node", "delnode", "DELETE"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { if (m_Selection.getSelected().size() > 0) { m_BayesNet.deleteSelection(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); m_Selection.clear(); updateStatus(); repaint(); } else { String[] options = new String[m_BayesNet.getNrOfNodes()]; for (int i = 0; i < options.length; i++) { options[i] = m_BayesNet.getNodeName(i); } String sResult = (String) JOptionPane.showInputDialog(null, "Select node to delete", "Nodes", 0, null, options, options[0]); if (sResult != null && !sResult.equals("")) { int iNode = m_BayesNet.getNode2(sResult); deleteNode(iNode); } } } } // class ActionDeleteNode class ActionCopyNode extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038732085935519L; public ActionCopyNode() { super("Copy", "Copy Nodes", "copy", "ctrl C"); } // c'tor public ActionCopyNode(String sName, String sToolTipText, String sIcon, String sAcceleratorKey) { super(sName, sToolTipText, sIcon, sAcceleratorKey); } // c'rot @Override public void actionPerformed(ActionEvent ae) { copy(); } public void copy() { String sXML = m_BayesNet.toXMLBIF03(m_Selection.getSelected()); m_clipboard.setText(sXML); } // copy } // class ActionCopyNode class ActionCutNode extends ActionCopyNode { /** for serialization */ private static final long serialVersionUID = -2038822085935519L; public ActionCutNode() { super("Cut", "Cut Nodes", "cut", "ctrl X"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { copy(); m_BayesNet.deleteSelection(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); m_Selection.clear(); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionCutNode class ActionPasteNode extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038732085935519L; public ActionPasteNode() { super("Paste", "Paste Nodes", "paste", "ctrl V"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { try { m_BayesNet.paste(m_clipboard.getText()); updateStatus(); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); } catch (Exception e) { e.printStackTrace(); } } @Override public boolean isEnabled() { return m_clipboard.hasText(); } } // class ActionPasteNode class ActionSelectAll extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038642085935519L; public ActionSelectAll() { super("Select All", "Select All Nodes", "selectall", "ctrl A"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_Selection.selectAll(); repaint(); } } // class ActionSelectAll class ActionExport extends MyAction { boolean m_bIsExporting = false; /** for serialization */ private static final long serialVersionUID = -3027642085935519L; public ActionExport() { super("Export", "Export to graphics file", "export", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_bIsExporting = true; m_GraphPanel.saveComponent(); m_bIsExporting = false; repaint(); } public boolean isExporting() { return m_bIsExporting; } } // class ActionExport class ActionAlignLeft extends MyAction { /** for serialization */ private static final long serialVersionUID = -3138642085935519L; public ActionAlignLeft() { super("Align Left", "Align Left", "alignleft", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.alignLeft(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionAlignLeft class ActionAlignRight extends MyAction { /** for serialization */ private static final long serialVersionUID = -4238642085935519L; public ActionAlignRight() { super("Align Right", "Align Right", "alignright", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.alignRight(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionAlignRight class ActionAlignTop extends MyAction { /** for serialization */ private static final long serialVersionUID = -5338642085935519L; public ActionAlignTop() { super("Align Top", "Align Top", "aligntop", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.alignTop(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionAlignTop class ActionAlignBottom extends MyAction { /** for serialization */ private static final long serialVersionUID = -6438642085935519L; public ActionAlignBottom() { super("Align Bottom", "Align Bottom", "alignbottom", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.alignBottom(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionAlignBottom class ActionCenterHorizontal extends MyAction { /** for serialization */ private static final long serialVersionUID = -7538642085935519L; public ActionCenterHorizontal() { super("Center Horizontal", "Center Horizontal", "centerhorizontal", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.centerHorizontal(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionCenterHorizontal class ActionCenterVertical extends MyAction { /** for serialization */ private static final long serialVersionUID = -8638642085935519L; public ActionCenterVertical() { super("Center Vertical", "Center Vertical", "centervertical", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.centerVertical(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionCenterVertical class ActionSpaceHorizontal extends MyAction { /** for serialization */ private static final long serialVersionUID = -9738642085935519L; public ActionSpaceHorizontal() { super("Space Horizontal", "Space Horizontal", "spacehorizontal", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.spaceHorizontal(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionSpaceHorizontal class ActionSpaceVertical extends MyAction { /** for serialization */ private static final long serialVersionUID = -838642085935519L; public ActionSpaceVertical() { super("Space Vertical", "Space Vertical", "spacevertical", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_BayesNet.spaceVertical(m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } } // class ActionSpaceVertical class ActionAddArc extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038913085935519L; public ActionAddArc() { super("Add Arc", "Add Arc", "addarc", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { try { String[] options = new String[m_BayesNet.getNrOfNodes()]; for (int i = 0; i < options.length; i++) { options[i] = m_BayesNet.getNodeName(i); } String sChild = (String) JOptionPane.showInputDialog(null, "Select child node", "Nodes", 0, null, options, options[0]); if (sChild == null || sChild.equals("")) { return; } int iChild = m_BayesNet.getNode(sChild); addArcInto(iChild); } catch (Exception e) { e.printStackTrace(); } } } // class ActionAddArc class ActionDeleteArc extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038914085935519L; public ActionDeleteArc() { super("Delete Arc", "Delete Arc", "delarc", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { int nEdges = 0; for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { nEdges += m_BayesNet.getNrOfParents(iNode); } String[] options = new String[nEdges]; int i = 0; for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { for (int iParent = 0; iParent < m_BayesNet.getNrOfParents(iNode); iParent++) { int nParent = m_BayesNet.getParent(iNode, iParent); String sEdge = m_BayesNet.getNodeName(nParent); sEdge += " -> "; sEdge += m_BayesNet.getNodeName(iNode); options[i++] = sEdge; } } deleteArc(options); } } // class ActionDeleteArc class ActionNew extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038911085935515L; public ActionNew() { super("New", "New Network", "new", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_sFileName = ""; m_BayesNet = new EditableBayesNet(true); updateStatus(); layoutGraph(); a_datagenerator.setEnabled(false); m_BayesNet.clearUndoStack(); m_jStatusBar.setText("New Network"); m_Selection = new Selection(); repaint(); } } // class ActionNew class ActionLoad extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038911085935515L; public ActionLoad() { super("Load", "Load Graph", "open", "ctrl O"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { JFileChooser fc = new JFileChooser(System.getProperty("user.dir")); ExtensionFileFilter ef1 = new ExtensionFileFilter(".arff", "ARFF files"); ExtensionFileFilter ef2 = new ExtensionFileFilter(".xml", "XML BIF files"); fc.addChoosableFileFilter(ef1); fc.addChoosableFileFilter(ef2); fc.setDialogTitle("Load Graph"); int rval = fc.showOpenDialog(GUI.this); if (rval == JFileChooser.APPROVE_OPTION) { String sFileName = fc.getSelectedFile().toString(); if (sFileName.endsWith(ef1.getExtensions()[0])) { initFromArffFile(sFileName); } else { try { readBIFFromFile(sFileName); } catch (Exception e) { e.printStackTrace(); } } m_jStatusBar.setText("Loaded " + sFileName); updateStatus(); } } } // class ActionLoad class ActionViewStatusbar extends MyAction { /** for serialization */ private static final long serialVersionUID = -20389330812354L; public ActionViewStatusbar() { super("View statusbar", "View statusbar", "statusbar", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_jStatusBar.setVisible(!m_jStatusBar.isVisible()); } // actionPerformed } // class ActionViewStatusbar class ActionViewToolbar extends MyAction { /** for serialization */ private static final long serialVersionUID = -20389110812354L; public ActionViewToolbar() { super("View toolbar", "View toolbar", "toolbar", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { m_jTbTools.setVisible(!m_jTbTools.isVisible()); } // actionPerformed } // class ActionViewToolbar class ActionSave extends MyAction { /** for serialization */ private static final long serialVersionUID = -20389110859355156L; public ActionSave() { super("Save", "Save Graph", "save", "ctrl S"); } // c'tor public ActionSave(String sName, String sToolTipText, String sIcon, String sAcceleratorKey) { super(sName, sToolTipText, sIcon, sAcceleratorKey); } // c'tor @Override public void actionPerformed(ActionEvent ae) { if (!m_sFileName.equals("")) { saveFile(m_sFileName); m_BayesNet.isSaved(); m_jStatusBar.setText("Saved as " + m_sFileName); } else { if (saveAs()) { m_BayesNet.isSaved(); m_jStatusBar.setText("Saved as " + m_sFileName); } } } // actionPerformed ExtensionFileFilter ef1 = new ExtensionFileFilter(".xml", "XML BIF files"); boolean saveAs() { JFileChooser fc = new JFileChooser(System.getProperty("user.dir")); fc.addChoosableFileFilter(ef1); fc.setDialogTitle("Save Graph As"); if (!m_sFileName.equals("")) { // can happen on actionQuit fc.setSelectedFile(new File(m_sFileName)); } int rval = fc.showSaveDialog(GUI.this); if (rval == JFileChooser.APPROVE_OPTION) { // System.out.println("Saving to file \""+ // f.getAbsoluteFile().toString()+"\""); String sFileName = fc.getSelectedFile().toString(); if (!sFileName.endsWith(".xml")) { sFileName = sFileName.concat(".xml"); } saveFile(sFileName); return true; } return false; } // saveAs protected void saveFile(String sFileName) { try { FileWriter outfile = new FileWriter(sFileName); outfile.write(m_BayesNet.toXMLBIF03()); outfile.close(); m_sFileName = sFileName; m_jStatusBar.setText("Saved as " + m_sFileName); } catch (IOException e) { e.printStackTrace(); } } // saveFile } // class ActionSave class ActionSaveAs extends ActionSave { /** for serialization */ private static final long serialVersionUID = -20389110859354L; public ActionSaveAs() { super("Save As", "Save Graph As", "saveas", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { saveAs(); } // actionPerformed } // class ActionSaveAs class ActionPrint extends ActionSave { /** for serialization */ private static final long serialVersionUID = -20389001859354L; boolean m_bIsPrinting = false; public ActionPrint() { super("Print", "Print Graph", "print", "ctrl P"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { PrinterJob printJob = PrinterJob.getPrinterJob(); printJob.setPrintable(m_GraphPanel); if (printJob.printDialog()) { try { m_bIsPrinting = true; printJob.print(); m_bIsPrinting = false; } catch (PrinterException pe) { m_jStatusBar.setText("Error printing: " + pe); m_bIsPrinting = false; } } m_jStatusBar.setText("Print"); } // actionPerformed public boolean isPrinting() { return m_bIsPrinting; } } // class ActionPrint class ActionQuit extends ActionSave { /** for serialization */ private static final long serialVersionUID = -2038911085935515L; public ActionQuit() { super("Exit", "Exit Program", "exit", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { if (m_BayesNet.isChanged()) { int result = JOptionPane.showConfirmDialog(null, "Network changed. Do you want to save it?", "Save before closing?", JOptionPane.YES_NO_CANCEL_OPTION); if (result == JOptionPane.CANCEL_OPTION) { return; } if (result == JOptionPane.YES_OPTION) { if (!saveAs()) { return; } } } } } // class ActionQuit class ActionHelp extends MyAction { /** for serialization */ private static final long serialVersionUID = -20389110859354L; public ActionHelp() { super("Help", "Bayesian Network Workbench Help", "help", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { JOptionPane.showMessageDialog(null, "See Weka Homepage\nhttp://www.cs.waikato.ac.nz/ml", "Help Message", JOptionPane.PLAIN_MESSAGE); } } // class ActionHelp class ActionAbout extends MyAction { /** for serialization */ private static final long serialVersionUID = -20389110859353L; public ActionAbout() { super("About", "Help about", "about", ""); } // c'tor @Override public void actionPerformed(ActionEvent ae) { JOptionPane.showMessageDialog(null, "Bayesian Network Workbench\nPart of Weka\n2007", "About Message", JOptionPane.PLAIN_MESSAGE); } } // class ActionAbout class ActionZoomIn extends MyAction { /** for serialization */ private static final long serialVersionUID = -2038911085935515L; public ActionZoomIn() { super("Zoom in", "Zoom in", "zoomin", "+"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { int i = 0, s = (int) (m_fScale * 100); if (s < 300) { i = s / 25; } else if (s < 700) { i = 6 + s / 50; } else { i = 13 + s / 100; } if (s >= 999) { setEnabled(false); return; } else if (s >= 10) { if (i >= 22) { setEnabled(false); } if (s == 10 && !a_zoomout.isEnabled()) { a_zoomout.setEnabled(true); } m_jTfZoom.setText(m_nZoomPercents[i + 1] + "%"); m_fScale = m_nZoomPercents[i + 1] / 100D; } else { if (!a_zoomout.isEnabled()) { a_zoomout.setEnabled(true); } m_jTfZoom.setText(m_nZoomPercents[0] + "%"); m_fScale = m_nZoomPercents[0] / 100D; } setAppropriateSize(); m_GraphPanel.repaint(); m_GraphPanel.invalidate(); m_jScrollPane.revalidate(); m_jStatusBar.setText("Zooming in"); } } // class ActionZoomIn class ActionZoomOut extends MyAction { /** for serialization */ private static final long serialVersionUID = -203891108593551L; public ActionZoomOut() { super("Zoom out", "Zoom out", "zoomout", "-"); } // c'tor @Override public void actionPerformed(ActionEvent ae) { int i = 0, s = (int) (m_fScale * 100); if (s < 300) { i = (int) Math.ceil(s / 25D); } else if (s < 700) { i = 6 + (int) Math.ceil(s / 50D); } else { i = 13 + (int) Math.ceil(s / 100D); } if (s <= 10) { setEnabled(false); } else if (s < 999) { if (i <= 1) { setEnabled(false); } m_jTfZoom.setText(m_nZoomPercents[i - 1] + "%"); m_fScale = m_nZoomPercents[i - 1] / 100D; } else { if (!a_zoomin.isEnabled()) { a_zoomin.setEnabled(true); } m_jTfZoom.setText(m_nZoomPercents[22] + "%"); m_fScale = m_nZoomPercents[22] / 100D; } setAppropriateSize(); m_GraphPanel.repaint(); m_GraphPanel.invalidate(); m_jScrollPane.revalidate(); m_jStatusBar.setText("Zooming out"); } } // class ActionZoomOut class ActionLayout extends MyAction { /** for serialization */ private static final long serialVersionUID = -203891108593551L; public ActionLayout() { super("Layout", "Layout Graph", "layout", "ctrl L"); } // c'tor JDialog dlg = null; @Override public void actionPerformed(ActionEvent ae) { if (dlg == null) { dlg = new JDialog(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setTitle("Graph Layout Options"); final JCheckBox jCbCustomNodeSize = new JCheckBox("Custom Node Size"); final JLabel jLbNodeWidth = new JLabel("Width"); final JLabel jLbNodeHeight = new JLabel("Height"); m_jTfNodeWidth.setHorizontalAlignment(JTextField.CENTER); m_jTfNodeWidth.setText("" + m_nNodeWidth); m_jTfNodeHeight.setHorizontalAlignment(JTextField.CENTER); m_jTfNodeHeight.setText("" + m_nNodeHeight); jLbNodeWidth.setEnabled(false); m_jTfNodeWidth.setEnabled(false); jLbNodeHeight.setEnabled(false); m_jTfNodeHeight.setEnabled(false); jCbCustomNodeSize.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { if (((JCheckBox) ae.getSource()).isSelected()) { jLbNodeWidth.setEnabled(true); m_jTfNodeWidth.setEnabled(true); jLbNodeHeight.setEnabled(true); m_jTfNodeHeight.setEnabled(true); } else { jLbNodeWidth.setEnabled(false); m_jTfNodeWidth.setEnabled(false); jLbNodeHeight.setEnabled(false); m_jTfNodeHeight.setEnabled(false); setAppropriateSize(); setAppropriateNodeSize(); } } }); JButton jBtLayout; jBtLayout = new JButton("Layout Graph"); jBtLayout.setMnemonic('L'); jBtLayout.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { int tmpW, tmpH; if (jCbCustomNodeSize.isSelected()) { try { tmpW = Integer.parseInt(m_jTfNodeWidth.getText()); } catch (NumberFormatException ne) { JOptionPane.showMessageDialog(GUI.this.getParent(), "Invalid integer entered for node width.", "Error", JOptionPane.ERROR_MESSAGE); tmpW = m_nNodeWidth; m_jTfNodeWidth.setText("" + m_nNodeWidth); } try { tmpH = Integer.parseInt(m_jTfNodeHeight.getText()); } catch (NumberFormatException ne) { JOptionPane.showMessageDialog(GUI.this.getParent(), "Invalid integer entered for node height.", "Error", JOptionPane.ERROR_MESSAGE); tmpH = m_nNodeHeight; m_jTfNodeWidth.setText("" + m_nNodeHeight); } if (tmpW != m_nNodeWidth || tmpH != m_nNodeHeight) { m_nNodeWidth = tmpW; m_nPaddedNodeWidth = m_nNodeWidth + PADDING; m_nNodeHeight = tmpH; } } // JButton bt = (JButton) ae.getSource(); // bt.setEnabled(false); dlg.setVisible(false); updateStatus(); layoutGraph(); m_jStatusBar.setText("Laying out Bayes net"); } }); JButton jBtCancel; jBtCancel = new JButton("Cancel"); jBtCancel.setMnemonic('C'); jBtCancel.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { dlg.setVisible(false); } }); GridBagConstraints gbc = new GridBagConstraints(); dlg.setLayout(new GridBagLayout()); // dlg.add(m_le.getControlPanel()); Container c = new Container(); c.setLayout(new GridBagLayout()); gbc.gridwidth = 1; gbc.insets = new Insets(8, 0, 0, 0); gbc.anchor = GridBagConstraints.NORTHWEST; gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(jCbCustomNodeSize, gbc); gbc.gridwidth = GridBagConstraints.RELATIVE; c.add(jLbNodeWidth, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(m_jTfNodeWidth, gbc); gbc.gridwidth = GridBagConstraints.RELATIVE; c.add(jLbNodeHeight, gbc); gbc.gridwidth = GridBagConstraints.REMAINDER; c.add(m_jTfNodeHeight, gbc); gbc.fill = GridBagConstraints.HORIZONTAL; dlg.add(c, gbc); dlg.add(jBtLayout); gbc.gridwidth = GridBagConstraints.REMAINDER; dlg.add(jBtCancel); } //dlg.setLocation(100, 100); //dlg.setVisible(true); dlg.setSize(450, 350); //dlg.setVisible(false); dlg.pack(); dlg.setLocationRelativeTo(SwingUtilities.getWindowAncestor(GUI.this)); dlg.setVisible(true); //dlg.repaint(); } } // class ActionLayout /** * Constructor<br> * Sets up the gui and initializes all the other previously uninitialized * variables. */ public GUI() { m_GraphPanel = new GraphPanel(); m_jScrollPane = new JScrollPane(m_GraphPanel); // creating a new layout engine and adding this class as its listener // to receive layoutComplete events m_jTfZoom = new JTextField("100%"); m_jTfZoom.setMinimumSize(m_jTfZoom.getPreferredSize()); m_jTfZoom.setHorizontalAlignment(JTextField.CENTER); m_jTfZoom.setToolTipText("Zoom"); m_jTfZoom.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { JTextField jt = (JTextField) ae.getSource(); try { int i = -1; i = jt.getText().indexOf('%'); if (i == -1) { i = Integer.parseInt(jt.getText()); } else { i = Integer.parseInt(jt.getText().substring(0, i)); } if (i <= 999) { m_fScale = i / 100D; } jt.setText((int) (m_fScale * 100) + "%"); if (m_fScale > 0.1) { if (!a_zoomout.isEnabled()) { a_zoomout.setEnabled(true); } } else { a_zoomout.setEnabled(false); } if (m_fScale < 9.99) { if (!a_zoomin.isEnabled()) { a_zoomin.setEnabled(true); } } else { a_zoomin.setEnabled(false); } setAppropriateSize(); // m_GraphPanel.clearBuffer(); m_GraphPanel.repaint(); m_GraphPanel.invalidate(); m_jScrollPane.revalidate(); } catch (NumberFormatException ne) { JOptionPane.showMessageDialog(GUI.this.getParent(), "Invalid integer entered for zoom.", "Error", JOptionPane.ERROR_MESSAGE); jt.setText((m_fScale * 100) + "%"); } } }); GridBagConstraints gbc = new GridBagConstraints(); final JPanel p = new JPanel(new GridBagLayout()); p.setBorder(BorderFactory.createCompoundBorder( BorderFactory.createTitledBorder("ExtraControls"), BorderFactory.createEmptyBorder(4, 4, 4, 4))); p.setPreferredSize(new Dimension(0, 0)); m_jTbTools = new JToolBar(); m_jTbTools.setFloatable(false); m_jTbTools.setLayout(new GridBagLayout()); gbc.anchor = GridBagConstraints.NORTHWEST; gbc.gridwidth = GridBagConstraints.REMAINDER; gbc.insets = new Insets(0, 0, 0, 0); m_jTbTools.add(p, gbc); gbc.gridwidth = 1; m_jTbTools.add(a_new); m_jTbTools.add(a_save); m_jTbTools.add(a_load); m_jTbTools.addSeparator(new Dimension(2, 2)); m_jTbTools.add(a_cutnode); m_jTbTools.add(a_copynode); m_jTbTools.add(a_pastenode); m_jTbTools.addSeparator(new Dimension(2, 2)); m_jTbTools.add(a_undo); m_jTbTools.add(a_redo); m_jTbTools.addSeparator(new Dimension(2, 2)); m_jTbTools.add(a_alignleft); m_jTbTools.add(a_alignright); m_jTbTools.add(a_aligntop); m_jTbTools.add(a_alignbottom); m_jTbTools.add(a_centerhorizontal); m_jTbTools.add(a_centervertical); m_jTbTools.add(a_spacehorizontal); m_jTbTools.add(a_spacevertical); m_jTbTools.addSeparator(new Dimension(2, 2)); m_jTbTools.add(a_zoomin); gbc.fill = GridBagConstraints.VERTICAL; gbc.weighty = 1; JPanel p2 = new JPanel(new BorderLayout()); p2.setPreferredSize(m_jTfZoom.getPreferredSize()); p2.setMinimumSize(m_jTfZoom.getPreferredSize()); p2.add(m_jTfZoom, BorderLayout.CENTER); m_jTbTools.add(p2, gbc); gbc.weighty = 0; gbc.fill = GridBagConstraints.NONE; m_jTbTools.add(a_zoomout); m_jTbTools.addSeparator(new Dimension(2, 2)); // jTbTools.add(jBtExtraControls, gbc); m_jTbTools.add(a_layout); m_jTbTools.addSeparator(new Dimension(4, 2)); gbc.weightx = 1; gbc.fill = GridBagConstraints.BOTH; // jTbTools.add(m_layoutEngine.getProgressBar(), gbc); m_jStatusBar = new JLabel("Status bar"); this.setLayout(new BorderLayout()); this.add(m_jTbTools, BorderLayout.NORTH); this.add(m_jScrollPane, BorderLayout.CENTER); this.add(m_jStatusBar, BorderLayout.SOUTH); updateStatus(); a_datagenerator.setEnabled(false); makeMenuBar(); } /** * Get the menu bar for this application. * * @return the menu bar */ public JMenuBar getMenuBar() { return m_menuBar; } private void makeMenuBar() { m_menuBar = new JMenuBar(); JMenu fileMenu = new JMenu("File"); fileMenu.setMnemonic('F'); m_menuBar.add(fileMenu); fileMenu.add(a_new); fileMenu.add(a_load); fileMenu.add(a_save); fileMenu.add(a_saveas); fileMenu.addSeparator(); fileMenu.add(a_print); fileMenu.add(a_export); fileMenu.addSeparator(); fileMenu.add(a_quit); JMenu editMenu = new JMenu("Edit"); editMenu.setMnemonic('E'); m_menuBar.add(editMenu); editMenu.add(a_undo); editMenu.add(a_redo); editMenu.addSeparator(); editMenu.add(a_selectall); editMenu.add(a_delnode); editMenu.add(a_cutnode); editMenu.add(a_copynode); editMenu.add(a_pastenode); editMenu.addSeparator(); editMenu.add(a_addnode); editMenu.add(a_addarc); editMenu.add(a_delarc); editMenu.addSeparator(); editMenu.add(a_alignleft); editMenu.add(a_alignright); editMenu.add(a_aligntop); editMenu.add(a_alignbottom); editMenu.add(a_centerhorizontal); editMenu.add(a_centervertical); editMenu.add(a_spacehorizontal); editMenu.add(a_spacevertical); JMenu toolMenu = new JMenu("Tools"); toolMenu.setMnemonic('T'); toolMenu.add(a_networkgenerator); toolMenu.add(a_datagenerator); toolMenu.add(a_datasetter); toolMenu.add(a_learn); toolMenu.add(a_learnCPT); toolMenu.addSeparator(); toolMenu.add(a_layout); toolMenu.addSeparator(); final JCheckBoxMenuItem viewMargins = new JCheckBoxMenuItem("Show Margins", false); viewMargins.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { boolean bPrev = m_bViewMargins; m_bViewMargins = viewMargins.getState(); if (bPrev == false && viewMargins.getState() == true) { updateStatus(); } repaint(); } }); toolMenu.add(viewMargins); final JCheckBoxMenuItem viewCliques = new JCheckBoxMenuItem("Show Cliques", false); viewCliques.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { boolean bPrev = m_bViewCliques; m_bViewCliques = viewCliques.getState(); if (bPrev == false && viewCliques.getState() == true) { updateStatus(); } repaint(); } }); toolMenu.add(viewCliques); m_menuBar.add(toolMenu); JMenu viewMenu = new JMenu("View"); viewMenu.setMnemonic('V'); m_menuBar.add(viewMenu); viewMenu.add(a_zoomin); viewMenu.add(a_zoomout); viewMenu.addSeparator(); viewMenu.add(a_viewtoolbar); viewMenu.add(a_viewstatusbar); JMenu helpMenu = new JMenu("Help"); helpMenu.setMnemonic('H'); m_menuBar.add(helpMenu); helpMenu.add(a_help); helpMenu.add(a_about); } /** * This method sets the node size that is appropriate considering the maximum * label size that is present. It is used internally when custom node size * checkbox is unchecked. */ protected void setAppropriateNodeSize() { int strWidth; FontMetrics fm = this.getFontMetrics(this.getFont()); int nMaxStringWidth = DEFAULT_NODE_WIDTH; if (nMaxStringWidth == 0) { for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { strWidth = fm.stringWidth(m_BayesNet.getNodeName(iNode)); if (strWidth > nMaxStringWidth) { nMaxStringWidth = strWidth; } } } m_nNodeWidth = nMaxStringWidth + 4; m_nPaddedNodeWidth = m_nNodeWidth + PADDING; m_jTfNodeWidth.setText("" + m_nNodeWidth); m_nNodeHeight = 2 * fm.getHeight(); m_jTfNodeHeight.setText("" + m_nNodeHeight); } /** * Sets the preferred size for m_GraphPanel GraphPanel to the minimum size * that is neccessary to display the graph. */ public void setAppropriateSize() { int maxX = 0, maxY = 0; m_GraphPanel.setScale(m_fScale, m_fScale); for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { int nPosX = m_BayesNet.getPositionX(iNode); int nPosY = m_BayesNet.getPositionY(iNode); if (maxX < nPosX) { maxX = nPosX + 100; } if (maxY < nPosY) { maxY = nPosY; } } m_GraphPanel.setPreferredSize(new Dimension((int) ((maxX + m_nPaddedNodeWidth + 2) * m_fScale), (int) ((maxY + m_nNodeHeight + 2) * m_fScale))); m_GraphPanel.revalidate(); } // setAppropriateSize /** * This method is an implementation for LayoutCompleteEventListener class. It * sets the size appropriate for m_GraphPanel GraphPanel and and revalidates * it's container JScrollPane once a LayoutCompleteEvent is received from the * LayoutEngine. Also, it updates positions of the Bayesian network stored in * m_BayesNet. */ @Override public void layoutCompleted(LayoutCompleteEvent le) { LayoutEngine layoutEngine = m_layoutEngine; // (LayoutEngine) // le.getSource(); ArrayList<Integer> nPosX = new ArrayList<Integer>(m_BayesNet.getNrOfNodes()); ArrayList<Integer> nPosY = new ArrayList<Integer>(m_BayesNet.getNrOfNodes()); for (int iNode = 0; iNode < layoutEngine.getNodes().size(); iNode++) { GraphNode gNode = layoutEngine.getNodes().get(iNode); if (gNode.nodeType == GraphNode.NORMAL) { nPosX.add(gNode.x); nPosY.add(gNode.y); } } m_BayesNet.layoutGraph(nPosX, nPosY); m_jStatusBar.setText("Graph layed out"); a_undo.setEnabled(true); a_redo.setEnabled(false); setAppropriateSize(); m_GraphPanel.invalidate(); m_jScrollPane.revalidate(); m_GraphPanel.repaint(); } // layoutCompleted /** * BIF reader<br> * Reads a graph description in XMLBIF03 from an file with name sFileName */ public void readBIFFromFile(String sFileName) throws BIFFormatException, IOException { m_sFileName = sFileName; try { BIFReader bayesNet = new BIFReader(); bayesNet.processFile(sFileName); m_BayesNet = new EditableBayesNet(bayesNet); updateStatus(); a_datagenerator.setEnabled(m_BayesNet.getNrOfNodes() > 0); m_BayesNet.clearUndoStack(); } catch (Exception ex) { ex.printStackTrace(); return; } setAppropriateNodeSize(); setAppropriateSize(); } // readBIFFromFile /* * read arff file from file sFileName and start new Bayesian network with * nodes representing attributes in data set. */ void initFromArffFile(String sFileName) { try { Instances instances = new Instances(new FileReader(sFileName)); m_BayesNet = new EditableBayesNet(instances); m_Instances = instances; a_learn.setEnabled(true); a_learnCPT.setEnabled(true); setAppropriateNodeSize(); setAppropriateSize(); } catch (Exception ex) { ex.printStackTrace(); return; } } // initFromArffFile /** * The panel which contains the actual Bayeian network. */ private class GraphPanel extends PrintablePanel implements Printable { /** for serialization */ private static final long serialVersionUID = -3562813603236753173L; /** node drawing modes */ final static int HIGHLIGHTED = 1; final static int NORMAL = 0; public GraphPanel() { super(); this.addMouseListener(new GraphVisualizerMouseListener()); this.addMouseMotionListener(new GraphVisualizerMouseMotionListener()); this.setToolTipText(""); } // c'tor /* * For showing instructions when hovering over a node (non-Javadoc) * * @see javax.swing.JComponent#getToolTipText(java.awt.event.MouseEvent) */ @Override public String getToolTipText(MouseEvent me) { int x, y; Rectangle r; x = y = 0; r = new Rectangle(0, 0, (int) (m_nPaddedNodeWidth * m_fScale), (int) (m_nNodeHeight * m_fScale)); x += me.getX(); y += me.getY(); for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { r.x = (int) (m_BayesNet.getPositionX(iNode) * m_fScale); r.y = (int) (m_BayesNet.getPositionY(iNode) * m_fScale); if (r.contains(x, y)) { return m_BayesNet.getNodeName(iNode) + " (right click to manipulate this node)"; } } return null; } // getToolTipText /* * Code for showing the graph in the panel. (non-Javadoc) * * @see javax.swing.JComponent#paintComponent(java.awt.Graphics) */ @Override public void paintComponent(Graphics gr) { Graphics2D g = (Graphics2D) gr; RenderingHints rh = new RenderingHints(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); rh.put(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_SPEED); g.setRenderingHints(rh); g.scale(m_fScale, m_fScale); Rectangle r = g.getClipBounds(); g.clearRect(r.x, r.y, r.width, r.height); if (m_bViewCliques) { m_nClique = 1; viewCliques(g, m_marginCalculator.m_root); } for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { drawNode(g, iNode, NORMAL); } if (!a_export.isExporting() && !a_print.isPrinting()) { m_Selection.draw(g); } if (m_nSelectedRect != null) { g.drawRect((int) (m_nSelectedRect.x / m_fScale), (int) (m_nSelectedRect.y / m_fScale), (int) (m_nSelectedRect.width / m_fScale), (int) (m_nSelectedRect.height / m_fScale)); } } // paintComponent /** * number of the clique being drawn. Used for selecting the color of the * clique */ int m_nClique = 1; /* * draws cliques in junction tree. */ void viewCliques(Graphics g, JunctionTreeNode node) { int[] nodes = node.m_nNodes; g.setColor(new Color(m_nClique % 7 * 256 / 7, (m_nClique % 2 * 256 / 2), (m_nClique % 3 * 256 / 3))); int dX = m_nPaddedNodeWidth / 2 + m_nClique; int dY = m_nNodeHeight / 2; int nPosX = 0; int nPosY = 0; String sStr = ""; for (int j = 0; j < nodes.length; j++) { nPosX += m_BayesNet.getPositionX(nodes[j]); nPosY += m_BayesNet.getPositionY(nodes[j]); sStr += " " + nodes[j]; for (int k = j + 1; k < nodes.length; k++) { g.drawLine(m_BayesNet.getPositionX(nodes[j]) + dX, m_BayesNet.getPositionY(nodes[j]) + dY, m_BayesNet.getPositionX(nodes[k]) + dX, m_BayesNet.getPositionY(nodes[k]) + dY); } } m_nClique++; nPosX /= nodes.length; nPosY /= nodes.length; g.drawString("Clique " + m_nClique + "(" + sStr + ")", nPosX, nPosY); for (int iChild = 0; iChild < node.m_children.size(); iChild++) { viewCliques(g, (JunctionTreeNode) node.m_children.elementAt(iChild)); } } // viewCliques /* * Draw a node with index iNode on Graphics g at position Drawing mode can * be NORMAL or HIGHLIGHTED. */ protected void drawNode(Graphics g, int iNode, int mode) { int nPosX = m_BayesNet.getPositionX(iNode); int nPosY = m_BayesNet.getPositionY(iNode); g.setColor(this.getBackground().darker().darker()); FontMetrics fm = getFontMetrics(getFont()); if (mode == HIGHLIGHTED) { g.setXORMode(Color.green); // g.setColor(Color.green); } g.fillOval(nPosX + m_nPaddedNodeWidth - m_nNodeWidth - (m_nPaddedNodeWidth - m_nNodeWidth) / 2, nPosY, m_nNodeWidth, m_nNodeHeight); g.setColor(Color.white); if (mode == HIGHLIGHTED) { g.setXORMode(Color.red); } // Draw the node's label if it can fit inside the node's // current width otherwise just display its node nr // if it can fit in node's current width if (fm.stringWidth(m_BayesNet.getNodeName(iNode)) <= m_nNodeWidth) { g.drawString(m_BayesNet.getNodeName(iNode), nPosX + m_nPaddedNodeWidth / 2 - fm.stringWidth(m_BayesNet.getNodeName(iNode)) / 2, nPosY + m_nNodeHeight / 2 + fm.getHeight() / 2 - 2); } else if (fm.stringWidth("" + iNode) <= m_nNodeWidth) { g.drawString("" + iNode, nPosX + m_nPaddedNodeWidth / 2 - fm.stringWidth("" + iNode) / 2, nPosY + m_nNodeHeight / 2 + fm.getHeight() / 2 - 2); } if (mode == HIGHLIGHTED) { g.setXORMode(Color.green); } if (m_bViewMargins) { if (m_BayesNet.getEvidence(iNode) < 0) { g.setColor(new Color(0, 128, 0)); } else { g.setColor(new Color(128, 0, 0)); } double[] P = m_BayesNet.getMargin(iNode); for (int iValue = 0; iValue < P.length; iValue++) { String sP = P[iValue] + ""; if (sP.charAt(0) == '0') { sP = sP.substring(1); } if (sP.length() > 5) { sP = sP.substring(1, 5); } g.fillRect(nPosX + m_nPaddedNodeWidth, nPosY + iValue * 10 + 2, (int) (P[iValue] * 100), 8); g.drawString(m_BayesNet.getNodeValue(iNode, iValue) + " " + sP, nPosX + m_nPaddedNodeWidth + (int) (P[iValue] * 100), nPosY + iValue * 10 + 10); } } if (m_bViewCliques) { return; } g.setColor(Color.black); // Drawing all incoming edges into the node, for (int iParent = 0; iParent < m_BayesNet.getNrOfParents(iNode); iParent++) { int nParent = m_BayesNet.getParent(iNode, iParent); int nPosX1 = nPosX + m_nPaddedNodeWidth / 2; int nPosY1 = nPosY + m_nNodeHeight; int nPosX2 = m_BayesNet.getPositionX(nParent); int nPosY2 = m_BayesNet.getPositionY(nParent); int nPosX2b = nPosX2 + m_nPaddedNodeWidth / 2; int nPosY2b = nPosY2; double phi = Math.atan2((nPosX2b - nPosX1 + 0.0) * m_nNodeHeight, (nPosY2b - nPosY1 + 0.0) * m_nNodeWidth); nPosX1 = (int) (nPosX + m_nPaddedNodeWidth / 2 + Math.sin(phi) * m_nNodeWidth / 2); nPosY1 = (int) (nPosY + m_nNodeHeight / 2 + Math.cos(phi) * m_nNodeHeight / 2); nPosX2b = (int) (nPosX2 + m_nPaddedNodeWidth / 2 - Math.sin(phi) * m_nNodeWidth / 2); nPosY2b = (int) (nPosY2 + m_nNodeHeight / 2 - Math.cos(phi) * m_nNodeHeight / 2); drawArrow(g, nPosX2b, nPosY2b, nPosX1, nPosY1); } if (mode == HIGHLIGHTED) { ArrayList<Integer> children = m_BayesNet.getChildren(iNode); for (int iChild = 0; iChild < children.size(); iChild++) { int nChild = children.get(iChild); int nPosX1 = nPosX + m_nPaddedNodeWidth / 2; int nPosY1 = nPosY; int nPosX2 = m_BayesNet.getPositionX(nChild); int nPosY2 = m_BayesNet.getPositionY(nChild); int nPosX2b = nPosX2 + m_nPaddedNodeWidth / 2; int nPosY2b = nPosY2 + m_nNodeHeight; double phi = Math.atan2((nPosX2b - nPosX1 + 0.0) * m_nNodeHeight, (nPosY2b - nPosY1 + 0.0) * m_nNodeWidth); nPosX1 = (int) (nPosX + m_nPaddedNodeWidth / 2 + Math.sin(phi) * m_nNodeWidth / 2); nPosY1 = (int) (nPosY + m_nNodeHeight / 2 + Math.cos(phi) * m_nNodeHeight / 2); nPosX2b = (int) (nPosX2 + m_nPaddedNodeWidth / 2 - Math.sin(phi) * m_nNodeWidth / 2); nPosY2b = (int) (nPosY2 + m_nNodeHeight / 2 - Math.cos(phi) * m_nNodeHeight / 2); drawArrow(g, nPosX1, nPosY1, nPosX2b, nPosY2b); } } } // drawNode /** * This method draws an arrow on a line from (x1,y1) to (x2,y2). The arrow * head is seated on (x2,y2) and is in the direction of the line. If the * arrow is needed to be drawn in the opposite direction then simply swap * the order of (x1, y1) and (x2, y2) when calling this function. */ protected void drawArrow(Graphics g, int nPosX1, int nPosY1, int nPosX2, int nPosY2) { g.drawLine(nPosX1, nPosY1, nPosX2, nPosY2); if (nPosX1 == nPosX2) { if (nPosY1 < nPosY2) { g.drawLine(nPosX2, nPosY2, nPosX2 + 4, nPosY2 - 8); g.drawLine(nPosX2, nPosY2, nPosX2 - 4, nPosY2 - 8); } else { g.drawLine(nPosX2, nPosY2, nPosX2 + 4, nPosY2 + 8); g.drawLine(nPosX2, nPosY2, nPosX2 - 4, nPosY2 + 8); } } else { // theta=line's angle from base, beta=angle of arrow's side from // line double hyp = 0, base = 0, perp = 0, theta, beta; int nPosX3 = 0, nPosY3 = 0; if (nPosX2 < nPosX1) { base = nPosX1 - nPosX2; hyp = Math.sqrt((nPosX2 - nPosX1) * (nPosX2 - nPosX1) + (nPosY2 - nPosY1) * (nPosY2 - nPosY1)); theta = Math.acos(base / hyp); } else { // x1>x2 as we already checked x1==x2 before base = nPosX1 - nPosX2; hyp = Math.sqrt((nPosX2 - nPosX1) * (nPosX2 - nPosX1) + (nPosY2 - nPosY1) * (nPosY2 - nPosY1)); theta = Math.acos(base / hyp); } beta = 30 * Math.PI / 180; hyp = 8; base = Math.cos(theta - beta) * hyp; perp = Math.sin(theta - beta) * hyp; nPosX3 = (int) (nPosX2 + base); if (nPosY1 < nPosY2) { nPosY3 = (int) (nPosY2 - perp); } else { nPosY3 = (int) (nPosY2 + perp); } g.drawLine(nPosX2, nPosY2, nPosX3, nPosY3); base = Math.cos(theta + beta) * hyp; perp = Math.sin(theta + beta) * hyp; nPosX3 = (int) (nPosX2 + base); if (nPosY1 < nPosY2) { nPosY3 = (int) (nPosY2 - perp); } else { nPosY3 = (int) (nPosY2 + perp); } g.drawLine(nPosX2, nPosY2, nPosX3, nPosY3); } } // drawArrow /** * This method highlights a given node and all its incoming and outgoing * arcs */ public void highLight(int iNode) { Graphics2D g = (Graphics2D) this.getGraphics(); RenderingHints rh = new RenderingHints(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); rh.put(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_SPEED); g.setRenderingHints(rh); g.setPaintMode(); g.scale(m_fScale, m_fScale); drawNode(g, iNode, HIGHLIGHTED); } // highlight /** * implementation of Printable, used for printing * * @see Printable */ @Override public int print(Graphics g, PageFormat pageFormat, int pageIndex) { if (pageIndex > 0) { return (NO_SUCH_PAGE); } else { Graphics2D g2d = (Graphics2D) g; g2d.translate(pageFormat.getImageableX(), pageFormat.getImageableY()); double fHeight = pageFormat.getImageableHeight(); double fWidth = pageFormat.getImageableWidth(); int xMax = 1; int yMax = 1; for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { if (xMax < m_BayesNet.getPositionX(iNode)) { xMax = m_BayesNet.getPositionX(iNode); } if (yMax < m_BayesNet.getPositionY(iNode)) { yMax = m_BayesNet.getPositionY(iNode); } } double fCurrentScale = m_fScale; xMax += m_nPaddedNodeWidth + 100; if (fWidth / xMax < fHeight / yMax) { m_fScale = fWidth / xMax; } else { m_fScale = fHeight / yMax; } // Turn off double buffering paint(g2d); m_fScale = fCurrentScale; // Turn double buffering back on return (PAGE_EXISTS); } } // print } // class GraphPanel /** * Table Model for the Table for editing CPTs */ private class GraphVisualizerTableModel extends AbstractTableModel { /** for serialization */ private static final long serialVersionUID = -4789813491347366596L; /** labels for the columns */ final String[] m_sColumnNames; /** probability table data **/ final double[][] m_fProbs; public GraphVisualizerTableModel(int iNode) { double[][] probs = m_BayesNet.getDistribution(iNode); m_fProbs = new double[probs.length][probs[0].length]; for (int i = 0; i < probs.length; i++) { for (int j = 0; j < probs[0].length; j++) { m_fProbs[i][j] = probs[i][j]; } } m_sColumnNames = m_BayesNet.getValues(iNode); } // c'tor /** * method that generates random CPTs */ public void randomize() { int nProbs = m_fProbs[0].length; Random random = new Random(); for (int i = 0; i < m_fProbs.length; i++) { // get random nrs for (int j = 0; j < nProbs - 1; j++) { m_fProbs[i][j] = random.nextDouble(); } // sort for (int j = 0; j < nProbs - 1; j++) { for (int k = j + 1; k < nProbs - 1; k++) { if (m_fProbs[i][j] > m_fProbs[i][k]) { double h = m_fProbs[i][j]; m_fProbs[i][j] = m_fProbs[i][k]; m_fProbs[i][k] = h; } } } double sum = m_fProbs[i][0]; for (int j = 1; j < nProbs - 1; j++) { m_fProbs[i][j] = m_fProbs[i][j] - sum; sum += m_fProbs[i][j]; } m_fProbs[i][nProbs - 1] = 1.0 - sum; } } // randomize public void setData() { } /** return nr of colums */ @Override public int getColumnCount() { return m_sColumnNames.length; } /** return nr of rows */ @Override public int getRowCount() { return m_fProbs.length; } /** * return name of specified colum * * @param iCol index of the column */ @Override public String getColumnName(int iCol) { return m_sColumnNames[iCol]; } /** * return data point * * @param iRow index of row in table * @param iCol index of column in table */ @Override public Object getValueAt(int iRow, int iCol) { return new Double(m_fProbs[iRow][iCol]); } /** * Set data point, assigns value to CPT entry specified by row and column. * The remainder of the CPT is normalized so that the values add up to 1. IF * a value below zero of over 1 is given, no changes take place. * * @param oProb data point * @param iRow index of row in table * @param iCol index of column in table */ @Override public void setValueAt(Object oProb, int iRow, int iCol) { Double fProb = (Double) oProb; if (fProb < 0 || fProb > 1) { return; } m_fProbs[iRow][iCol] = fProb; double sum = 0; for (int i = 0; i < m_fProbs[iRow].length; i++) { sum += m_fProbs[iRow][i]; } if (sum > 1) { // handle overflow int i = m_fProbs[iRow].length - 1; while (sum > 1) { if (i != iCol) { if (m_fProbs[iRow][i] > sum - 1) { m_fProbs[iRow][i] -= sum - 1; sum = 1; } else { sum -= m_fProbs[iRow][i]; m_fProbs[iRow][i] = 0; } } i--; } } else { // handle underflow int i = m_fProbs[iRow].length - 1; while (sum < 1) { if (i != iCol) { m_fProbs[iRow][i] += 1 - sum; sum = 1; } i--; } } validate(); } // setData /* * JTable uses this method to determine the default renderer/ editor for * each cell. */ @Override public Class<?> getColumnClass(int c) { return getValueAt(0, c).getClass(); } /* * Implemented this to make sure the table is uneditable. */ @Override public boolean isCellEditable(int row, int col) { return true; } } // class GraphVisualizerTableModel /** * Listener class for processing mouseClicked */ private class GraphVisualizerMouseListener extends MouseAdapter { /** * A left mouseclick on a node adds node to selection (depending on shift * and ctrl keys). A right mouseclick on a node pops up menu with actions to * be performed on the node. A right mouseclick outside another node pops up * menu. */ @Override public void mouseClicked(MouseEvent me) { int x, y; Rectangle r = new Rectangle(0, 0, (int) (m_nPaddedNodeWidth * m_fScale), (int) (m_nNodeHeight * m_fScale)); x = me.getX(); y = me.getY(); for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { r.x = (int) (m_BayesNet.getPositionX(iNode) * m_fScale); r.y = (int) (m_BayesNet.getPositionY(iNode) * m_fScale); if (r.contains(x, y)) { m_nCurrentNode = iNode; if (me.getButton() == MouseEvent.BUTTON3) { handleRightNodeClick(me); } if (me.getButton() == MouseEvent.BUTTON1) { if ((me.getModifiersEx() & MouseEvent.CTRL_DOWN_MASK) != 0) { m_Selection.toggleSelection(m_nCurrentNode); } else if ((me.getModifiersEx() & MouseEvent.SHIFT_DOWN_MASK) != 0) { m_Selection.addToSelection(m_nCurrentNode); } else { m_Selection.clear(); m_Selection.addToSelection(m_nCurrentNode); } repaint(); } return; } } if (me.getButton() == MouseEvent.BUTTON3) { handleRightClick(me, (int) (x / m_fScale), (int) (y / m_fScale)); } } // mouseClicked /* * update selection (non-Javadoc) * * @see * java.awt.event.MouseListener#mouseReleased(java.awt.event.MouseEvent) */ @Override public void mouseReleased(MouseEvent me) { if (m_nSelectedRect != null) { if ((me.getModifiersEx() & MouseEvent.CTRL_DOWN_MASK) != 0) { m_Selection.toggleSelection(m_nSelectedRect); } else if ((me.getModifiersEx() & MouseEvent.SHIFT_DOWN_MASK) != 0) { m_Selection.addToSelection(m_nSelectedRect); } else { m_Selection.clear(); m_Selection.addToSelection(m_nSelectedRect); } m_nSelectedRect = null; repaint(); } } // mouseReleased /** position clicked on */ int m_nPosX = 0, m_nPosY = 0; /* * pop up menu with actions that apply in general or to selection (if any * exists) */ void handleRightClick(MouseEvent me, int nPosX, int nPosY) { ActionListener act = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { if (ae.getActionCommand().equals("Add node")) { a_addnode.addNode(m_nPosX, m_nPosY); return; } repaint(); } }; JPopupMenu popupMenu = new JPopupMenu("Choose a value"); JMenuItem addNodeItem = new JMenuItem("Add node"); addNodeItem.addActionListener(act); popupMenu.add(addNodeItem); ArrayList<Integer> selected = m_Selection.getSelected(); JMenu addArcMenu = new JMenu("Add parent"); popupMenu.add(addArcMenu); if (selected.size() == 0) { addArcMenu.setEnabled(false); } else { int nNodes = m_BayesNet.getNrOfNodes(); boolean[] isNotAllowedAsParent = new boolean[nNodes]; // prevent it being a parent of itself for (int iNode = 0; iNode < selected.size(); iNode++) { isNotAllowedAsParent[selected.get(iNode)] = true; } // prevent a descendant being a parent, since it introduces cycles for (int i = 0; i < nNodes; i++) { for (int iNode = 0; iNode < nNodes; iNode++) { for (int iParent = 0; iParent < m_BayesNet.getNrOfParents(iNode); iParent++) { if (isNotAllowedAsParent[m_BayesNet.getParent(iNode, iParent)]) { isNotAllowedAsParent[iNode] = true; } } } } // prevent nodes that are already a parent for (int iNode = 0; iNode < selected.size(); iNode++) { int nNode = selected.get(iNode); for (int iParent = 0; iParent < m_BayesNet.getNrOfParents(nNode); iParent++) { isNotAllowedAsParent[m_BayesNet.getParent(nNode, iParent)] = true; } } ActionListener addParentAction = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { try { m_BayesNet.addArc(ae.getActionCommand(), m_Selection.getSelected()); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); updateStatus(); } catch (Exception e) { e.printStackTrace(); } } }; // count nr of remaining candidates int nCandidates = 0; for (int i = 0; i < nNodes; i++) { if (!isNotAllowedAsParent[i]) { JMenuItem item = new JMenuItem(m_BayesNet.getNodeName(i)); item.addActionListener(addParentAction); addArcMenu.add(item); nCandidates++; } } if (nCandidates == 0) { addArcMenu.setEnabled(false); } } m_nPosX = nPosX; m_nPosY = nPosY; popupMenu.setLocation(me.getX(), me.getY()); popupMenu.show(m_GraphPanel, me.getX(), me.getY()); } // handleRightClick /* * pop up menu with actions that apply to node that was clicked on */ void handleRightNodeClick(MouseEvent me) { m_Selection.clear(); repaint(); ActionListener renameValueAction = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { renameValue(m_nCurrentNode, ae.getActionCommand()); } }; ActionListener delValueAction = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { delValue(m_nCurrentNode, ae.getActionCommand()); } }; ActionListener addParentAction = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { try { m_BayesNet.addArc(ae.getActionCommand(), m_BayesNet.getNodeName(m_nCurrentNode)); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); updateStatus(); } catch (Exception e) { e.printStackTrace(); } } }; ActionListener delParentAction = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { deleteArc(m_nCurrentNode, ae.getActionCommand()); } }; ActionListener delChildAction = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { deleteArc(ae.getActionCommand(), m_nCurrentNode); } }; ActionListener setAvidenceAction = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { try { String[] outcomes = m_BayesNet.getValues(m_nCurrentNode); int iValue = 0; while (iValue < outcomes.length && !outcomes[iValue].equals(ae.getActionCommand())) { iValue++; } if (iValue == outcomes.length) { iValue = -1; } if (iValue < outcomes.length) { m_jStatusBar.setText("Set evidence for " + m_BayesNet.getNodeName(m_nCurrentNode)); if (m_BayesNet.getEvidence(m_nCurrentNode) < 0 && iValue >= 0) { m_BayesNet.setEvidence(m_nCurrentNode, iValue); m_marginCalculatorWithEvidence.setEvidence(m_nCurrentNode, iValue); } else { m_BayesNet.setEvidence(m_nCurrentNode, iValue); SerializedObject so = new SerializedObject(m_marginCalculator); m_marginCalculatorWithEvidence = (MarginCalculator) so .getObject(); for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { if (m_BayesNet.getEvidence(iNode) >= 0) { m_marginCalculatorWithEvidence.setEvidence(iNode, m_BayesNet.getEvidence(iNode)); } } } for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { m_BayesNet.setMargin(iNode, m_marginCalculatorWithEvidence.getMargin(iNode)); } } } catch (Exception e) { e.printStackTrace(); } repaint(); } }; ActionListener act = new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { if (ae.getActionCommand().equals("Rename")) { renameNode(m_nCurrentNode); return; } if (ae.getActionCommand().equals("Add parent")) { addArcInto(m_nCurrentNode); return; } if (ae.getActionCommand().equals("Add value")) { addValue(); return; } if (ae.getActionCommand().equals("Delete node")) { deleteNode(m_nCurrentNode); return; } if (ae.getActionCommand().equals("Edit CPT")) { editCPT(m_nCurrentNode); return; } repaint(); } }; try { JPopupMenu popupMenu = new JPopupMenu("Choose a value"); JMenu setEvidenceMenu = new JMenu("Set evidence"); String[] outcomes = m_BayesNet.getValues(m_nCurrentNode); for (String outcome : outcomes) { JMenuItem item = new JMenuItem(outcome); item.addActionListener(setAvidenceAction); setEvidenceMenu.add(item); } setEvidenceMenu.addSeparator(); JMenuItem item = new JMenuItem("Clear"); item.addActionListener(setAvidenceAction); setEvidenceMenu.add(item); popupMenu.add(setEvidenceMenu); setEvidenceMenu.setEnabled(m_bViewMargins); popupMenu.addSeparator(); JMenuItem renameItem = new JMenuItem("Rename"); renameItem.addActionListener(act); popupMenu.add(renameItem); JMenuItem delNodeItem = new JMenuItem("Delete node"); delNodeItem.addActionListener(act); popupMenu.add(delNodeItem); JMenuItem editCPTItem = new JMenuItem("Edit CPT"); editCPTItem.addActionListener(act); popupMenu.add(editCPTItem); popupMenu.addSeparator(); JMenu addArcMenu = new JMenu("Add parent"); popupMenu.add(addArcMenu); int nNodes = m_BayesNet.getNrOfNodes(); boolean[] isNotAllowedAsParent = new boolean[nNodes]; // prevent it being a parent of itself isNotAllowedAsParent[m_nCurrentNode] = true; // prevent a descendant being a parent, since it introduces cycles for (int i = 0; i < nNodes; i++) { for (int iNode = 0; iNode < nNodes; iNode++) { for (int iParent = 0; iParent < m_BayesNet.getNrOfParents(iNode); iParent++) { if (isNotAllowedAsParent[m_BayesNet.getParent(iNode, iParent)]) { isNotAllowedAsParent[iNode] = true; } } } } // prevent nodes that are already a parent for (int iParent = 0; iParent < m_BayesNet .getNrOfParents(m_nCurrentNode); iParent++) { isNotAllowedAsParent[m_BayesNet.getParent(m_nCurrentNode, iParent)] = true; } // count nr of remaining candidates int nCandidates = 0; for (int i = 0; i < nNodes; i++) { if (!isNotAllowedAsParent[i]) { item = new JMenuItem(m_BayesNet.getNodeName(i)); item.addActionListener(addParentAction); addArcMenu.add(item); nCandidates++; } } if (nCandidates == 0) { addArcMenu.setEnabled(false); } JMenu delArcMenu = new JMenu("Delete parent"); popupMenu.add(delArcMenu); if (m_BayesNet.getNrOfParents(m_nCurrentNode) == 0) { delArcMenu.setEnabled(false); } for (int iParent = 0; iParent < m_BayesNet .getNrOfParents(m_nCurrentNode); iParent++) { item = new JMenuItem(m_BayesNet.getNodeName(m_BayesNet.getParent( m_nCurrentNode, iParent))); item.addActionListener(delParentAction); delArcMenu.add(item); } JMenu delChildMenu = new JMenu("Delete child"); popupMenu.add(delChildMenu); ArrayList<Integer> nChildren = m_BayesNet.getChildren(m_nCurrentNode); if (nChildren.size() == 0) { delChildMenu.setEnabled(false); } for (int iChild = 0; iChild < nChildren.size(); iChild++) { item = new JMenuItem(m_BayesNet.getNodeName(nChildren.get(iChild))); item.addActionListener(delChildAction); delChildMenu.add(item); } popupMenu.addSeparator(); JMenuItem addValueItem = new JMenuItem("Add value"); addValueItem.addActionListener(act); popupMenu.add(addValueItem); JMenu renameValue = new JMenu("Rename value"); popupMenu.add(renameValue); for (String outcome : outcomes) { item = new JMenuItem(outcome); item.addActionListener(renameValueAction); renameValue.add(item); } JMenu delValue = new JMenu("Delete value"); popupMenu.add(delValue); if (m_BayesNet.getCardinality(m_nCurrentNode) <= 2) { delValue.setEnabled(false); } for (String outcome : outcomes) { JMenuItem delValueItem = new JMenuItem(outcome); delValueItem.addActionListener(delValueAction); delValue.add(delValueItem); } popupMenu.setLocation(me.getX(), me.getY()); popupMenu.show(m_GraphPanel, me.getX(), me.getY()); } catch (Exception e) { e.printStackTrace(); } } // handleRightNodeClick } // class GraphVisualizerMouseListener /** * private class for handling mouseMoved events to highlight nodes if the the * mouse is moved on one, move it around or move selection around */ private class GraphVisualizerMouseMotionListener extends MouseMotionAdapter { /* last node moved over. Used for turning highlight on and off */ int m_nLastNode = -1; /* current mouse position clicked */ int m_nPosX, m_nPosY; /* * identify the node under the mouse * * @returns node index of node under mouse, or -1 if there is no such node */ int getGraphNode(MouseEvent me) { m_nPosX = m_nPosY = 0; Rectangle r = new Rectangle(0, 0, (int) (m_nPaddedNodeWidth * m_fScale), (int) (m_nNodeHeight * m_fScale)); m_nPosX += me.getX(); m_nPosY += me.getY(); for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { r.x = (int) (m_BayesNet.getPositionX(iNode) * m_fScale); r.y = (int) (m_BayesNet.getPositionY(iNode) * m_fScale); if (r.contains(m_nPosX, m_nPosY)) { return iNode; } } return -1; } // getGraphNode /* * handle mouse dragging event (non-Javadoc) * * @see * java.awt.event.MouseMotionListener#mouseDragged(java.awt.event.MouseEvent * ) */ @Override public void mouseDragged(MouseEvent me) { if (m_nSelectedRect != null) { m_nSelectedRect.width = me.getPoint().x - m_nSelectedRect.x; m_nSelectedRect.height = me.getPoint().y - m_nSelectedRect.y; repaint(); return; } int iNode = getGraphNode(me); if (iNode >= 0) { if (m_Selection.getSelected().size() > 0) { if (m_Selection.getSelected().contains(iNode)) { m_BayesNet.setPosition(iNode, (int) ((m_nPosX / m_fScale - m_nPaddedNodeWidth / 2)), (int) ((m_nPosY / m_fScale - m_nNodeHeight / 2)), m_Selection.getSelected()); } else { m_Selection.clear(); m_BayesNet.setPosition(iNode, (int) ((m_nPosX / m_fScale - m_nPaddedNodeWidth / 2)), (int) ((m_nPosY / m_fScale - m_nNodeHeight / 2))); } repaint(); } else { m_BayesNet.setPosition(iNode, (int) ((m_nPosX / m_fScale - m_nPaddedNodeWidth / 2)), (int) ((m_nPosY / m_fScale - m_nNodeHeight / 2))); } m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); m_GraphPanel.highLight(iNode); } if (iNode < 0) { if (m_nLastNode >= 0) { m_GraphPanel.repaint(); m_nLastNode = -1; } else { m_nSelectedRect = new Rectangle(me.getPoint().x, me.getPoint().y, 1, 1); m_GraphPanel.repaint(); } } } // mouseDragged /* * handles mouse move event (non-Javadoc) * * @see * java.awt.event.MouseMotionListener#mouseMoved(java.awt.event.MouseEvent) */ @Override public void mouseMoved(MouseEvent me) { int iNode = getGraphNode(me); if (iNode >= 0) { if (iNode != m_nLastNode) { m_GraphPanel.highLight(iNode); if (m_nLastNode >= 0) { m_GraphPanel.highLight(m_nLastNode); } m_nLastNode = iNode; } } if (iNode < 0 && m_nLastNode >= 0) { m_GraphPanel.repaint(); m_nLastNode = -1; } } // mouseMoved } // class GraphVisualizerMouseMotionListener /* * apply graph layout algorithm to Bayesian network */ void layoutGraph() { if (m_BayesNet.getNrOfNodes() == 0) { return; } try { ArrayList<GraphNode> m_nodes = new ArrayList<GraphNode>(); ArrayList<GraphEdge> m_edges = new ArrayList<GraphEdge>(); BIFParser bp = new BIFParser(m_BayesNet.toXMLBIF03(), m_nodes, m_edges); bp.parse(); updateStatus(); m_layoutEngine = new HierarchicalBCEngine(m_nodes, m_edges, m_nPaddedNodeWidth, m_nNodeHeight); m_layoutEngine.addLayoutCompleteEventListener(this); m_layoutEngine.layoutGraph(); } catch (Exception e) { e.printStackTrace(); } } // layoutGraph /* * Update status of various items that need regular updating such as enabled * status of some menu items, marginal distributions if shown, repainting of * graph. */ void updateStatus() { a_undo.setEnabled(m_BayesNet.canUndo()); a_redo.setEnabled(m_BayesNet.canRedo()); a_datagenerator.setEnabled(m_BayesNet.getNrOfNodes() > 0); if (!m_bViewMargins && !m_bViewCliques) { repaint(); return; } try { m_marginCalculator = new MarginCalculator(); m_marginCalculator.calcMargins(m_BayesNet); SerializedObject so = new SerializedObject(m_marginCalculator); m_marginCalculatorWithEvidence = (MarginCalculator) so.getObject(); for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { if (m_BayesNet.getEvidence(iNode) >= 0) { m_marginCalculatorWithEvidence.setEvidence(iNode, m_BayesNet.getEvidence(iNode)); } } for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { m_BayesNet.setMargin(iNode, m_marginCalculatorWithEvidence.getMargin(iNode)); } } catch (Exception e) { e.printStackTrace(); } repaint(); } // updateStatus /* * add arc with node iChild as child. This pops up a selection list with * potential parents for the child. All decendants and current parents are * excluded from the list as is the child node itself. * * @param iChild index of the node for which to add an arc */ void addArcInto(int iChild) { String sChild = m_BayesNet.getNodeName(iChild); try { int nNodes = m_BayesNet.getNrOfNodes(); boolean[] isNotAllowedAsParent = new boolean[nNodes]; // prevent it being a parent of itself isNotAllowedAsParent[iChild] = true; // prevent a descendant being a parent, since it introduces cycles for (int i = 0; i < nNodes; i++) { for (int iNode = 0; iNode < nNodes; iNode++) { for (int iParent = 0; iParent < m_BayesNet.getNrOfParents(iNode); iParent++) { if (isNotAllowedAsParent[m_BayesNet.getParent(iNode, iParent)]) { isNotAllowedAsParent[iNode] = true; } } } } // prevent nodes that are already a parent for (int iParent = 0; iParent < m_BayesNet.getNrOfParents(iChild); iParent++) { isNotAllowedAsParent[m_BayesNet.getParent(iChild, iParent)] = true; } // count nr of remaining candidates int nCandidates = 0; for (int i = 0; i < nNodes; i++) { if (!isNotAllowedAsParent[i]) { nCandidates++; } } if (nCandidates == 0) { JOptionPane.showMessageDialog(null, "No potential parents available for this node (" + sChild + "). Choose another node as child node."); return; } String[] options = new String[nCandidates]; int k = 0; for (int i = 0; i < nNodes; i++) { if (!isNotAllowedAsParent[i]) { options[k++] = m_BayesNet.getNodeName(i); } } String sParent = (String) JOptionPane.showInputDialog(null, "Select parent node for " + sChild, "Nodes", 0, null, options, options[0]); if (sParent == null || sParent.equals("")) { return; } // update all data structures m_BayesNet.addArc(sParent, sChild); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); updateStatus(); } catch (Exception e) { e.printStackTrace(); } } // addArcInto /* * deletes arc from node with name sParent into child with index iChild */ void deleteArc(int iChild, String sParent) { try { m_BayesNet.deleteArc(m_BayesNet.getNode(sParent), iChild); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); } catch (Exception e) { e.printStackTrace(); } updateStatus(); } // deleteArc /* * deletes arc from node with index iParent into child with name sChild */ void deleteArc(String sChild, int iParent) { try { m_BayesNet.deleteArc(iParent, m_BayesNet.getNode(sChild)); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); } catch (Exception e) { e.printStackTrace(); } updateStatus(); } // deleteArc /* * deletes arc. Pops up list of arcs listed in 'options' as * "<Node1> -> <Node2>". */ void deleteArc(String[] options) { String sResult = (String) JOptionPane.showInputDialog(null, "Select arc to delete", "Arcs", 0, null, options, options[0]); if (sResult != null && !sResult.equals("")) { int nPos = sResult.indexOf(" -> "); String sParent = sResult.substring(0, nPos); String sChild = sResult.substring(nPos + 4); try { m_BayesNet.deleteArc(sParent, sChild); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); } catch (Exception e) { e.printStackTrace(); } updateStatus(); } } // deleteArc /* * Rename node with index nTargetNode. Pops up window that allwos for entering * a new name. */ void renameNode(int nTargetNode) { String sName = JOptionPane.showInputDialog(null, m_BayesNet.getNodeName(nTargetNode), "New name for node", JOptionPane.OK_CANCEL_OPTION); if (sName == null || sName.equals("")) { return; } try { while (m_BayesNet.getNode2(sName) >= 0) { sName = JOptionPane.showInputDialog(null, "Cannot rename to " + sName + ".\nNode with that name already exists."); if (sName == null || sName.equals("")) { return; } } m_BayesNet.setNodeName(nTargetNode, sName); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); } catch (Exception e) { e.printStackTrace(); } repaint(); } // renameNode /* * Rename value with name sValeu of a node with index nTargetNode. Pops up * window that allows entering a new name. */ void renameValue(int nTargetNode, String sValue) { String sNewValue = JOptionPane.showInputDialog(null, "New name for value " + sValue, "Node " + m_BayesNet.getNodeName(nTargetNode), JOptionPane.OK_CANCEL_OPTION); if (sNewValue == null || sNewValue.equals("")) { return; } m_BayesNet.renameNodeValue(nTargetNode, sValue, sNewValue); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); a_undo.setEnabled(true); a_redo.setEnabled(false); repaint(); } // renameValue /* delete a single node with index iNode */ void deleteNode(int iNode) { try { m_BayesNet.deleteNode(iNode); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); } catch (Exception e) { e.printStackTrace(); } updateStatus(); } // deleteNode /* * Add a value to currently selected node. Shows window that allows to enter * the name of the value. */ void addValue() { // GraphNode n = (GraphNode) m_nodes.elementAt(m_nCurrentNode); String sValue = "Value" + (m_BayesNet.getCardinality(m_nCurrentNode) + 1); String sNewValue = JOptionPane.showInputDialog(null, "New value " + sValue, "Node " + m_BayesNet.getNodeName(m_nCurrentNode), JOptionPane.OK_CANCEL_OPTION); if (sNewValue == null || sNewValue.equals("")) { return; } try { m_BayesNet.addNodeValue(m_nCurrentNode, sNewValue); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); // n.outcomes = m_BayesNet.getValues(m_nCurrentNode); // for (int iNode = 0; iNode < m_BayesNet.getNrOfNodes(); iNode++) { // n = (GraphNode) m_nodes.elementAt(iNode); // n.probs = m_BayesNet.getDistribution(iNode); // } } catch (Exception e) { e.printStackTrace(); } updateStatus(); } // addValue /* * remove value with name sValue from the node with index nTargetNode */ void delValue(int nTargetNode, String sValue) { try { m_BayesNet.delNodeValue(nTargetNode, sValue); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); } catch (Exception e) { e.printStackTrace(); } updateStatus(); } // delValue /* * Edits CPT of node with index nTargetNode. Pops up table with probability * table that the user can change or just view. */ void editCPT(int nTargetNode) { m_nCurrentNode = nTargetNode; final GraphVisualizerTableModel tm = new GraphVisualizerTableModel( nTargetNode); JTable jTblProbs = new JTable(tm); JScrollPane js = new JScrollPane(jTblProbs); int nParents = m_BayesNet.getNrOfParents(nTargetNode); if (nParents > 0) { GridBagConstraints gbc = new GridBagConstraints(); JPanel jPlRowHeader = new JPanel(new GridBagLayout()); // indices of the parent nodes in the Vector int[] idx = new int[nParents]; // max length of values of each parent int[] lengths = new int[nParents]; // Adding labels for rows gbc.anchor = GridBagConstraints.NORTHWEST; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.insets = new Insets(0, 1, 0, 0); int addNum = 0, temp = 0; boolean dark = false; while (true) { gbc.gridwidth = 1; for (int k = 0; k < nParents; k++) { int iParent2 = m_BayesNet.getParent(nTargetNode, k); JLabel lb = new JLabel(m_BayesNet.getValueName(iParent2, idx[k])); lb.setFont(new Font("Dialog", Font.PLAIN, 12)); lb.setOpaque(true); lb.setBorder(BorderFactory.createEmptyBorder(1, 2, 1, 1)); lb.setHorizontalAlignment(JLabel.CENTER); if (dark) { lb.setBackground(lb.getBackground().darker()); lb.setForeground(Color.white); } else { lb.setForeground(Color.black); } temp = lb.getPreferredSize().width; lb.setPreferredSize(new Dimension(temp, jTblProbs.getRowHeight())); if (lengths[k] < temp) { lengths[k] = temp; } temp = 0; if (k == nParents - 1) { gbc.gridwidth = GridBagConstraints.REMAINDER; dark = (dark == true) ? false : true; } jPlRowHeader.add(lb, gbc); addNum++; } for (int k = nParents - 1; k >= 0; k--) { int iParent2 = m_BayesNet.getParent(m_nCurrentNode, k); if (idx[k] == m_BayesNet.getCardinality(iParent2) - 1 && k != 0) { idx[k] = 0; continue; } else { idx[k]++; break; } } int iParent2 = m_BayesNet.getParent(m_nCurrentNode, 0); if (idx[0] == m_BayesNet.getCardinality(iParent2)) { JLabel lb = (JLabel) jPlRowHeader.getComponent(addNum - 1); jPlRowHeader.remove(addNum - 1); lb.setPreferredSize(new Dimension(lb.getPreferredSize().width, jTblProbs.getRowHeight())); gbc.gridwidth = GridBagConstraints.REMAINDER; gbc.weighty = 1; jPlRowHeader.add(lb, gbc); gbc.weighty = 0; break; } } gbc.gridwidth = 1; // The following panel contains the names of the // parents // and is displayed above the row names to identify // which value belongs to which parent JPanel jPlRowNames = new JPanel(new GridBagLayout()); for (int j = 0; j < nParents; j++) { JLabel lb2; JLabel lb1 = new JLabel(m_BayesNet.getNodeName(m_BayesNet.getParent( nTargetNode, j))); lb1.setBorder(BorderFactory.createEmptyBorder(1, 2, 1, 1)); Dimension tempd = lb1.getPreferredSize(); if (tempd.width < lengths[j]) { lb1.setPreferredSize(new Dimension(lengths[j], tempd.height)); lb1.setHorizontalAlignment(JLabel.CENTER); lb1.setMinimumSize(new Dimension(lengths[j], tempd.height)); } else if (tempd.width > lengths[j]) { lb2 = (JLabel) jPlRowHeader.getComponent(j); lb2.setPreferredSize(new Dimension(tempd.width, lb2 .getPreferredSize().height)); } jPlRowNames.add(lb1, gbc); } js.setRowHeaderView(jPlRowHeader); js.setCorner(JScrollPane.UPPER_LEFT_CORNER, jPlRowNames); } final JDialog dlg = new JDialog((Frame) GUI.this.getTopLevelAncestor(), "Probability Distribution Table For " + m_BayesNet.getNodeName(nTargetNode), true); /*dlg.setLocation(GUI.this.getLocation().x + GUI.this.getWidth() / 2 - 250, GUI.this.getLocation().y + GUI.this.getHeight() / 2 - 200);*/ dlg.getContentPane().setLayout(new BorderLayout()); dlg.getContentPane().add(js, BorderLayout.CENTER); JButton jBtRandomize = new JButton("Randomize"); jBtRandomize.setMnemonic('R'); jBtRandomize.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { tm.randomize(); dlg.repaint(); } }); JButton jBtOk = new JButton("Ok"); jBtOk.setMnemonic('O'); jBtOk.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { tm.setData(); try { m_BayesNet.setDistribution(m_nCurrentNode, tm.m_fProbs); m_jStatusBar.setText(m_BayesNet.lastActionMsg()); updateStatus(); } catch (Exception e) { e.printStackTrace(); } dlg.setVisible(false); } }); JButton jBtCancel = new JButton("Cancel"); jBtCancel.setMnemonic('C'); jBtCancel.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent ae) { dlg.setVisible(false); } }); Container c = new Container(); c.setLayout(new GridBagLayout()); c.add(jBtRandomize); c.add(jBtOk); c.add(jBtCancel); dlg.getContentPane().add(c, BorderLayout.SOUTH); dlg.pack(); dlg.setSize(450, 350); dlg.setLocationRelativeTo(SwingUtilities.getWindowAncestor(this)); dlg.setVisible(true); } // editCPT /** * Main method. Builds up menus and reads from file if one is specified. */ public static void main(String[] args) { weka.core.logging.Logger.log(weka.core.logging.Logger.Level.INFO, "Logging started"); LookAndFeel.setLookAndFeel(); JFrame jf = new JFrame("Bayes Network Editor"); final GUI g = new GUI(); JMenuBar menuBar = g.getMenuBar(); if (args.length > 0) { try { g.readBIFFromFile(args[0]); } catch (IOException ex) { ex.printStackTrace(); } catch (BIFFormatException bf) { bf.printStackTrace(); } } jf.setJMenuBar(menuBar); jf.getContentPane().add(g); jf.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); jf.setSize(800, 600); jf.setVisible(true); g.m_Selection.updateGUI(); GenericObjectEditor.registerEditors(); } // main } // end of class
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/MarginCalculator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MarginCalculator.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.Serializable; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.RevisionHandler; import weka.core.RevisionUtils; public class MarginCalculator implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 650278019241175534L; boolean m_debug = false; public JunctionTreeNode m_root = null; JunctionTreeNode[] jtNodes; public int getNode(final String sNodeName) { int iNode = 0; while (iNode < this.m_root.m_bayesNet.m_Instances.numAttributes()) { if (this.m_root.m_bayesNet.m_Instances.attribute(iNode).name().equals(sNodeName)) { return iNode; } iNode++; } // throw new Exception("Could not find node [[" + sNodeName + "]]"); return -1; } public String toXMLBIF03() { return this.m_root.m_bayesNet.toXMLBIF03(); } /** * Calc marginal distributions of nodes in Bayesian network Note that a connected network is assumed. Unconnected networks may give unexpected results. * * @param bayesNet */ public void calcMargins(final BayesNet bayesNet) throws Exception { // System.out.println(bayesNet.toString()); boolean[][] bAdjacencyMatrix = this.moralize(bayesNet); this.process(bAdjacencyMatrix, bayesNet); } // calcMargins public void calcFullMargins(final BayesNet bayesNet) throws Exception { // System.out.println(bayesNet.toString()); int nNodes = bayesNet.getNrOfNodes(); boolean[][] bAdjacencyMatrix = new boolean[nNodes][nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { bAdjacencyMatrix[iNode][iNode2] = true; } } this.process(bAdjacencyMatrix, bayesNet); } // calcMargins public void process(boolean[][] bAdjacencyMatrix, final BayesNet bayesNet) throws Exception { int[] order = this.getMaxCardOrder(bAdjacencyMatrix); bAdjacencyMatrix = this.fillIn(order, bAdjacencyMatrix); order = this.getMaxCardOrder(bAdjacencyMatrix); Set<Integer>[] cliques = this.getCliques(order, bAdjacencyMatrix); Set<Integer>[] separators = this.getSeparators(order, cliques); int[] parentCliques = this.getCliqueTree(order, cliques, separators); // report cliques int nNodes = bAdjacencyMatrix.length; if (this.m_debug) { for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { Iterator<Integer> nodes = cliques[iNode].iterator(); while (nodes.hasNext()) { int iNode2 = nodes.next(); System.out.print(iNode2 + " " + bayesNet.getNodeName(iNode2)); if (nodes.hasNext()) { System.out.print(","); } } System.out.print(") S("); nodes = separators[iNode].iterator(); while (nodes.hasNext()) { int iNode2 = nodes.next(); System.out.print(iNode2 + " " + bayesNet.getNodeName(iNode2)); if (nodes.hasNext()) { System.out.print(","); } } System.out.println(") parent clique " + parentCliques[iNode]); } } } this.jtNodes = this.getJunctionTree(cliques, separators, parentCliques, order, bayesNet); this.m_root = null; for (int iNode = 0; iNode < nNodes; iNode++) { if (parentCliques[iNode] < 0 && this.jtNodes[iNode] != null) { this.m_root = this.jtNodes[iNode]; break; } } this.m_Margins = new double[nNodes][]; this.initialize(this.jtNodes, order, cliques, separators, parentCliques); // sanity check for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { if (parentCliques[iNode] == -1 && separators[iNode].size() > 0) { throw new Exception("Something wrong in clique tree"); } } } if (this.m_debug) { // System.out.println(m_root.toString()); } } // process void initialize(final JunctionTreeNode[] jtNodes, final int[] order, final Set<Integer>[] cliques, final Set<Integer>[] separators, final int[] parentCliques) { int nNodes = order.length; for (int i = nNodes - 1; i >= 0; i--) { int iNode = order[i]; if (jtNodes[iNode] != null) { jtNodes[iNode].initializeUp(); } } for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (jtNodes[iNode] != null) { jtNodes[iNode].initializeDown(false); } } } // initialize JunctionTreeNode[] getJunctionTree(final Set<Integer>[] cliques, final Set<Integer>[] separators, final int[] parentCliques, final int[] order, final BayesNet bayesNet) { int nNodes = order.length; JunctionTreeNode[] jtns = new JunctionTreeNode[nNodes]; boolean[] bDone = new boolean[nNodes]; // create junction tree nodes for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { jtns[iNode] = new JunctionTreeNode(cliques[iNode], bayesNet, bDone); } } // create junction tree separators for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { JunctionTreeNode parent = null; if (parentCliques[iNode] > 0) { parent = jtns[parentCliques[iNode]]; JunctionTreeSeparator jts = new JunctionTreeSeparator(separators[iNode], bayesNet, jtns[iNode], parent); jtns[iNode].setParentSeparator(jts); jtns[parentCliques[iNode]].addChildClique(jtns[iNode]); } else { } } } return jtns; } // getJunctionTree public class JunctionTreeSeparator implements Serializable, RevisionHandler { private static final long serialVersionUID = 6502780192411755343L; int[] m_nNodes; int m_nCardinality; double[] m_fiParent; double[] m_fiChild; JunctionTreeNode m_parentNode; JunctionTreeNode m_childNode; BayesNet m_bayesNet; JunctionTreeSeparator(final Set<Integer> separator, final BayesNet bayesNet, final JunctionTreeNode childNode, final JunctionTreeNode parentNode) { // //////////////////// // initialize node set this.m_nNodes = new int[separator.size()]; int iPos = 0; this.m_nCardinality = 1; for (Integer element : separator) { int iNode = element; this.m_nNodes[iPos++] = iNode; this.m_nCardinality *= bayesNet.getCardinality(iNode); } this.m_parentNode = parentNode; this.m_childNode = childNode; this.m_bayesNet = bayesNet; } // c'tor /** * marginalize junciontTreeNode node over all nodes outside the separator set of the parent clique * */ public void updateFromParent() { double[] fis = this.update(this.m_parentNode); if (fis == null) { this.m_fiParent = null; } else { this.m_fiParent = fis; // normalize double sum = 0; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { sum += this.m_fiParent[iPos]; } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { this.m_fiParent[iPos] /= sum; } } } // updateFromParent /** * marginalize junciontTreeNode node over all nodes outside the separator set of the child clique * */ public void updateFromChild() { double[] fis = this.update(this.m_childNode); if (fis == null) { this.m_fiChild = null; } else { this.m_fiChild = fis; // normalize double sum = 0; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { sum += this.m_fiChild[iPos]; } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { this.m_fiChild[iPos] /= sum; } } } // updateFromChild /** * marginalize junciontTreeNode node over all nodes outside the separator set * * @param node * one of the neighboring junciont tree nodes of this separator */ public double[] update(final JunctionTreeNode node) { if (node.m_P == null) { return null; } double[] fi = new double[this.m_nCardinality]; int[] values = new int[node.m_nNodes.length]; int[] order = new int[this.m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < node.m_nNodes.length; iNode++) { order[node.m_nNodes[iNode]] = iNode; } // fill in the values for (int iPos = 0; iPos < node.m_nCardinality; iPos++) { int iNodeCPT = MarginCalculator.this.getCPT(node.m_nNodes, node.m_nNodes.length, values, order, this.m_bayesNet); int iSepCPT = MarginCalculator.this.getCPT(this.m_nNodes, this.m_nNodes.length, values, order, this.m_bayesNet); fi[iSepCPT] += node.m_P[iNodeCPT]; // update values int i = 0; values[i]++; while (i < node.m_nNodes.length && values[i] == this.m_bayesNet.getCardinality(node.m_nNodes[i])) { values[i] = 0; i++; if (i < node.m_nNodes.length) { values[i]++; } } } return fi; } // update /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class JunctionTreeSeparator public class JunctionTreeNode implements Serializable, RevisionHandler { private static final long serialVersionUID = 650278019241175536L; /** * reference Bayes net for information about variables like name, cardinality, etc. but not for relations between nodes **/ BayesNet m_bayesNet; /** nodes of the Bayes net in this junction node **/ public int[] m_nNodes; /** cardinality of the instances of variables in this junction node **/ int m_nCardinality; /** potentials for first network **/ double[] m_fi; /** distribution over this junction node according to first Bayes network **/ double[] m_P; double[][] m_MarginalP; JunctionTreeSeparator m_parentSeparator; public void setParentSeparator(final JunctionTreeSeparator parentSeparator) { this.m_parentSeparator = parentSeparator; } public Vector<JunctionTreeNode> m_children; public void addChildClique(final JunctionTreeNode child) { this.m_children.add(child); } public void initializeUp() { this.m_P = new double[this.m_nCardinality]; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { this.m_P[iPos] = this.m_fi[iPos]; } int[] values = new int[this.m_nNodes.length]; int[] order = new int[this.m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { order[this.m_nNodes[iNode]] = iNode; } for (JunctionTreeNode element : this.m_children) { JunctionTreeNode childNode = element; JunctionTreeSeparator separator = childNode.m_parentSeparator; // Update the values for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { int iSepCPT = MarginCalculator.this.getCPT(separator.m_nNodes, separator.m_nNodes.length, values, order, this.m_bayesNet); int iNodeCPT = MarginCalculator.this.getCPT(this.m_nNodes, this.m_nNodes.length, values, order, this.m_bayesNet); this.m_P[iNodeCPT] *= separator.m_fiChild[iSepCPT]; // update values int i = 0; values[i]++; while (i < this.m_nNodes.length && values[i] == this.m_bayesNet.getCardinality(this.m_nNodes[i])) { values[i] = 0; i++; if (i < this.m_nNodes.length) { values[i]++; } } } } // normalize double sum = 0; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { sum += this.m_P[iPos]; } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { this.m_P[iPos] /= sum; } if (this.m_parentSeparator != null) { // not a root node this.m_parentSeparator.updateFromChild(); } } // initializeUp public void initializeDown(final boolean recursively) { if (this.m_parentSeparator == null) { // a root node this.calcMarginalProbabilities(); } else { this.m_parentSeparator.updateFromParent(); int[] values = new int[this.m_nNodes.length]; int[] order = new int[this.m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { order[this.m_nNodes[iNode]] = iNode; } // Update the values for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { int iSepCPT = MarginCalculator.this.getCPT(this.m_parentSeparator.m_nNodes, this.m_parentSeparator.m_nNodes.length, values, order, this.m_bayesNet); int iNodeCPT = MarginCalculator.this.getCPT(this.m_nNodes, this.m_nNodes.length, values, order, this.m_bayesNet); if (this.m_parentSeparator.m_fiChild[iSepCPT] > 0) { this.m_P[iNodeCPT] *= this.m_parentSeparator.m_fiParent[iSepCPT] / this.m_parentSeparator.m_fiChild[iSepCPT]; } else { this.m_P[iNodeCPT] = 0; } // update values int i = 0; values[i]++; while (i < this.m_nNodes.length && values[i] == this.m_bayesNet.getCardinality(this.m_nNodes[i])) { values[i] = 0; i++; if (i < this.m_nNodes.length) { values[i]++; } } } // normalize double sum = 0; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { sum += this.m_P[iPos]; } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { this.m_P[iPos] /= sum; } this.m_parentSeparator.updateFromChild(); this.calcMarginalProbabilities(); } if (recursively) { for (Object element : this.m_children) { JunctionTreeNode childNode = (JunctionTreeNode) element; childNode.initializeDown(true); } } } // initializeDown /** * calculate marginal probabilities for the individual nodes in the clique. Store results in m_MarginalP */ void calcMarginalProbabilities() { // calculate marginal probabilities int[] values = new int[this.m_nNodes.length]; int[] order = new int[this.m_bayesNet.getNrOfNodes()]; this.m_MarginalP = new double[this.m_nNodes.length][]; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { order[this.m_nNodes[iNode]] = iNode; this.m_MarginalP[iNode] = new double[this.m_bayesNet.getCardinality(this.m_nNodes[iNode])]; } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { int iNodeCPT = MarginCalculator.this.getCPT(this.m_nNodes, this.m_nNodes.length, values, order, this.m_bayesNet); for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { this.m_MarginalP[iNode][values[iNode]] += this.m_P[iNodeCPT]; } // update values int i = 0; values[i]++; while (i < this.m_nNodes.length && values[i] == this.m_bayesNet.getCardinality(this.m_nNodes[i])) { values[i] = 0; i++; if (i < this.m_nNodes.length) { values[i]++; } } } for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { MarginCalculator.this.m_Margins[this.m_nNodes[iNode]] = this.m_MarginalP[iNode]; } } // calcMarginalProbabilities @Override public String toString() { StringBuffer buf = new StringBuffer(); for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { buf.append(this.m_bayesNet.getNodeName(this.m_nNodes[iNode]) + ": "); for (int iValue = 0; iValue < this.m_MarginalP[iNode].length; iValue++) { buf.append(this.m_MarginalP[iNode][iValue] + " "); } buf.append('\n'); } for (Object element : this.m_children) { JunctionTreeNode childNode = (JunctionTreeNode) element; buf.append("----------------\n"); buf.append(childNode.toString()); } return buf.toString(); } // toString void calculatePotentials(final BayesNet bayesNet, final Set<Integer> clique, final boolean[] bDone) { this.m_fi = new double[this.m_nCardinality]; int[] values = new int[this.m_nNodes.length]; int[] order = new int[bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { order[this.m_nNodes[iNode]] = iNode; } // find conditional probabilities that need to be taken in account boolean[] bIsContained = new boolean[this.m_nNodes.length]; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { int nNode = this.m_nNodes[iNode]; bIsContained[iNode] = !bDone[nNode]; for (int iParent = 0; iParent < bayesNet.getNrOfParents(nNode); iParent++) { int nParent = bayesNet.getParent(nNode, iParent); if (!clique.contains(nParent)) { bIsContained[iNode] = false; } } if (bIsContained[iNode]) { bDone[nNode] = true; if (MarginCalculator.this.m_debug) { System.out.println("adding node " + nNode); } } } // fill in the values for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { int iCPT = MarginCalculator.this.getCPT(this.m_nNodes, this.m_nNodes.length, values, order, bayesNet); this.m_fi[iCPT] = 1.0; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { if (bIsContained[iNode]) { int nNode = this.m_nNodes[iNode]; int[] nNodes = bayesNet.getParentSet(nNode).getParents(); int iCPT2 = MarginCalculator.this.getCPT(nNodes, bayesNet.getNrOfParents(nNode), values, order, bayesNet); double f = bayesNet.getDistributions()[nNode][iCPT2].getProbability(values[iNode]); this.m_fi[iCPT] *= f; } } // update values int i = 0; values[i]++; while (i < this.m_nNodes.length && values[i] == bayesNet.getCardinality(this.m_nNodes[i])) { values[i] = 0; i++; if (i < this.m_nNodes.length) { values[i]++; } } } } // calculatePotentials JunctionTreeNode(final Set<Integer> clique, final BayesNet bayesNet, final boolean[] bDone) { this.m_bayesNet = bayesNet; this.m_children = new Vector<JunctionTreeNode>(); // //////////////////// // initialize node set this.m_nNodes = new int[clique.size()]; int iPos = 0; this.m_nCardinality = 1; for (Integer integer : clique) { int iNode = integer; this.m_nNodes[iPos++] = iNode; this.m_nCardinality *= bayesNet.getCardinality(iNode); } // ////////////////////////////// // initialize potential function this.calculatePotentials(bayesNet, clique, bDone); } // JunctionTreeNode c'tor /* * check whether this junciton tree node contains node nNode */ boolean contains(final int nNode) { for (int m_nNode : this.m_nNodes) { if (m_nNode == nNode) { return true; } } return false; } // contains public void setEvidence(final int nNode, final int iValue) throws Exception { int[] values = new int[this.m_nNodes.length]; int[] order = new int[this.m_bayesNet.getNrOfNodes()]; int nNodeIdx = -1; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { order[this.m_nNodes[iNode]] = iNode; if (this.m_nNodes[iNode] == nNode) { nNodeIdx = iNode; } } if (nNodeIdx < 0) { throw new Exception("setEvidence: Node " + nNode + " not found in this clique"); } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { if (values[nNodeIdx] != iValue) { int iNodeCPT = MarginCalculator.this.getCPT(this.m_nNodes, this.m_nNodes.length, values, order, this.m_bayesNet); this.m_P[iNodeCPT] = 0; } // update values int i = 0; values[i]++; while (i < this.m_nNodes.length && values[i] == this.m_bayesNet.getCardinality(this.m_nNodes[i])) { values[i] = 0; i++; if (i < this.m_nNodes.length) { values[i]++; } } } // normalize double sum = 0; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { sum += this.m_P[iPos]; } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { this.m_P[iPos] /= sum; } this.calcMarginalProbabilities(); this.updateEvidence(this); } // setEvidence void updateEvidence(final JunctionTreeNode source) { if (source != this) { int[] values = new int[this.m_nNodes.length]; int[] order = new int[this.m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < this.m_nNodes.length; iNode++) { order[this.m_nNodes[iNode]] = iNode; } int[] nChildNodes = source.m_parentSeparator.m_nNodes; int nNumChildNodes = nChildNodes.length; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { int iNodeCPT = MarginCalculator.this.getCPT(this.m_nNodes, this.m_nNodes.length, values, order, this.m_bayesNet); int iChildCPT = MarginCalculator.this.getCPT(nChildNodes, nNumChildNodes, values, order, this.m_bayesNet); if (source.m_parentSeparator.m_fiParent[iChildCPT] != 0) { this.m_P[iNodeCPT] *= source.m_parentSeparator.m_fiChild[iChildCPT] / source.m_parentSeparator.m_fiParent[iChildCPT]; } else { this.m_P[iNodeCPT] = 0; } // update values int i = 0; values[i]++; while (i < this.m_nNodes.length && values[i] == this.m_bayesNet.getCardinality(this.m_nNodes[i])) { values[i] = 0; i++; if (i < this.m_nNodes.length) { values[i]++; } } } // normalize double sum = 0; for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { sum += this.m_P[iPos]; } for (int iPos = 0; iPos < this.m_nCardinality; iPos++) { this.m_P[iPos] /= sum; } this.calcMarginalProbabilities(); } for (Object element : this.m_children) { JunctionTreeNode childNode = (JunctionTreeNode) element; if (childNode != source) { childNode.initializeDown(true); } } if (this.m_parentSeparator != null) { this.m_parentSeparator.updateFromChild(); this.m_parentSeparator.m_parentNode.updateEvidence(this); this.m_parentSeparator.updateFromParent(); } } // updateEvidence /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class JunctionTreeNode int getCPT(final int[] nodeSet, final int nNodes, final int[] values, final int[] order, final BayesNet bayesNet) { int iCPTnew = 0; for (int iNode = 0; iNode < nNodes; iNode++) { int nNode = nodeSet[iNode]; iCPTnew = iCPTnew * bayesNet.getCardinality(nNode); iCPTnew += values[order[nNode]]; } return iCPTnew; } // getCPT int[] getCliqueTree(final int[] order, final Set<Integer>[] cliques, final Set<Integer>[] separators) { int nNodes = order.length; int[] parentCliques = new int[nNodes]; // for (int i = nNodes - 1; i >= 0; i--) { for (int i = 0; i < nNodes; i++) { int iNode = order[i]; parentCliques[iNode] = -1; if (cliques[iNode] != null && separators[iNode].size() > 0) { // for (int j = nNodes - 1; j > i; j--) { for (int j = 0; j < nNodes; j++) { int iNode2 = order[j]; if (iNode != iNode2 && cliques[iNode2] != null && cliques[iNode2].containsAll(separators[iNode])) { parentCliques[iNode] = iNode2; j = i; j = 0; j = nNodes; } } } } return parentCliques; } // getCliqueTree /** * calculate separator sets in clique tree * * @param order: * maximum cardinality ordering of the graph * @param cliques: * set of cliques * @return set of separator sets */ Set<Integer>[] getSeparators(final int[] order, final Set<Integer>[] cliques) { int nNodes = order.length; @SuppressWarnings("unchecked") Set<Integer>[] separators = new HashSet[nNodes]; Set<Integer> processedNodes = new HashSet<Integer>(); // for (int i = nNodes - 1; i >= 0; i--) { for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { Set<Integer> separator = new HashSet<Integer>(); separator.addAll(cliques[iNode]); separator.retainAll(processedNodes); separators[iNode] = separator; processedNodes.addAll(cliques[iNode]); } } return separators; } // getSeparators /** * get cliques in a decomposable graph represented by an adjacency matrix * * @param order: * maximum cardinality ordering of the graph * @param bAdjacencyMatrix: * decomposable graph * @return set of cliques */ Set<Integer>[] getCliques(final int[] order, final boolean[][] bAdjacencyMatrix) throws Exception { int nNodes = bAdjacencyMatrix.length; @SuppressWarnings("unchecked") Set<Integer>[] cliques = new HashSet[nNodes]; // int[] inverseOrder = new int[nNodes]; // for (int iNode = 0; iNode < nNodes; iNode++) { // inverseOrder[order[iNode]] = iNode; // } // consult nodes in reverse order for (int i = nNodes - 1; i >= 0; i--) { int iNode = order[i]; if (iNode == 22) { } Set<Integer> clique = new HashSet<Integer>(); clique.add(iNode); for (int j = 0; j < i; j++) { int iNode2 = order[j]; if (bAdjacencyMatrix[iNode][iNode2]) { clique.add(iNode2); } } // for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { // if (bAdjacencyMatrix[iNode][iNode2] && inverseOrder[iNode2] < // inverseOrder[iNode]) { // clique.add(iNode2); // } // } cliques[iNode] = clique; } for (int iNode = 0; iNode < nNodes; iNode++) { for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { if (iNode != iNode2 && cliques[iNode] != null && cliques[iNode2] != null && cliques[iNode].containsAll(cliques[iNode2])) { cliques[iNode2] = null; } } } // sanity check if (this.m_debug) { int[] nNodeSet = new int[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { if (cliques[iNode] != null) { Iterator<Integer> it = cliques[iNode].iterator(); int k = 0; while (it.hasNext()) { nNodeSet[k++] = it.next(); } for (int i = 0; i < cliques[iNode].size(); i++) { for (int j = 0; j < cliques[iNode].size(); j++) { if (i != j && !bAdjacencyMatrix[nNodeSet[i]][nNodeSet[j]]) { throw new Exception("Non clique" + i + " " + j); } } } } } } return cliques; } // getCliques /** * moralize DAG and calculate adjacency matrix representation for a Bayes Network, effecively converting the directed acyclic graph to an undirected graph. * * @param bayesNet * Bayes Network to process * @return adjacencies in boolean matrix format */ public boolean[][] moralize(final BayesNet bayesNet) { int nNodes = bayesNet.getNrOfNodes(); boolean[][] bAdjacencyMatrix = new boolean[nNodes][nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { ParentSet parents = bayesNet.getParentSets()[iNode]; this.moralizeNode(parents, iNode, bAdjacencyMatrix); } return bAdjacencyMatrix; } // moralize private void moralizeNode(final ParentSet parents, final int iNode, final boolean[][] bAdjacencyMatrix) { for (int iParent = 0; iParent < parents.getNrOfParents(); iParent++) { int nParent = parents.getParent(iParent); if (this.m_debug && !bAdjacencyMatrix[iNode][nParent]) { System.out.println("Insert " + iNode + "--" + nParent); } bAdjacencyMatrix[iNode][nParent] = true; bAdjacencyMatrix[nParent][iNode] = true; for (int iParent2 = iParent + 1; iParent2 < parents.getNrOfParents(); iParent2++) { int nParent2 = parents.getParent(iParent2); if (this.m_debug && !bAdjacencyMatrix[nParent2][nParent]) { System.out.println("Mary " + nParent + "--" + nParent2); } bAdjacencyMatrix[nParent2][nParent] = true; bAdjacencyMatrix[nParent][nParent2] = true; } } } // moralizeNode /** * Apply Tarjan and Yannakakis (1984) fill in algorithm for graph triangulation. In reverse order, insert edges between any non-adjacent neighbors that are lower numbered in the ordering. * * Side effect: input matrix is used as output * * @param order * node ordering * @param bAdjacencyMatrix * boolean matrix representing the graph * @return boolean matrix representing the graph with fill ins */ public boolean[][] fillIn(final int[] order, final boolean[][] bAdjacencyMatrix) { int nNodes = bAdjacencyMatrix.length; int[] inverseOrder = new int[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { inverseOrder[order[iNode]] = iNode; } // consult nodes in reverse order for (int i = nNodes - 1; i >= 0; i--) { int iNode = order[i]; // find pairs of neighbors with lower order for (int j = 0; j < i; j++) { int iNode2 = order[j]; if (bAdjacencyMatrix[iNode][iNode2]) { for (int k = j + 1; k < i; k++) { int iNode3 = order[k]; if (bAdjacencyMatrix[iNode][iNode3]) { // fill in if (this.m_debug && (!bAdjacencyMatrix[iNode2][iNode3] || !bAdjacencyMatrix[iNode3][iNode2])) { System.out.println("Fill in " + iNode2 + "--" + iNode3); } bAdjacencyMatrix[iNode2][iNode3] = true; bAdjacencyMatrix[iNode3][iNode2] = true; } } } } } return bAdjacencyMatrix; } // fillIn /** * calculate maximum cardinality ordering; start with first node add node that has most neighbors already ordered till all nodes are in the ordering * * This implementation does not assume the graph is connected * * @param bAdjacencyMatrix: * n by n matrix with adjacencies in graph of n nodes * @return maximum cardinality ordering * @throws InterruptedException */ int[] getMaxCardOrder(final boolean[][] bAdjacencyMatrix) throws InterruptedException { int nNodes = bAdjacencyMatrix.length; int[] order = new int[nNodes]; if (nNodes == 0) { return order; } boolean[] bDone = new boolean[nNodes]; // start with node 0 order[0] = 0; bDone[0] = true; // order remaining nodes for (int iNode = 1; iNode < nNodes; iNode++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } int nMaxCard = -1; int iBestNode = -1; // find node with higest cardinality of previously ordered nodes for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { if (!bDone[iNode2]) { int nCard = 0; // calculate cardinality for node iNode2 for (int iNode3 = 0; iNode3 < nNodes; iNode3++) { if (bAdjacencyMatrix[iNode2][iNode3] && bDone[iNode3]) { nCard++; } } if (nCard > nMaxCard) { nMaxCard = nCard; iBestNode = iNode2; } } } order[iNode] = iBestNode; bDone[iBestNode] = true; } return order; } // getMaxCardOrder public void setEvidence(final int nNode, final int iValue) throws Exception { if (this.m_root == null) { throw new Exception("Junction tree not initialize yet"); } int iJtNode = 0; while (iJtNode < this.jtNodes.length && (this.jtNodes[iJtNode] == null || !this.jtNodes[iJtNode].contains(nNode))) { iJtNode++; } if (this.jtNodes.length == iJtNode) { throw new Exception("Could not find node " + nNode + " in junction tree"); } this.jtNodes[iJtNode].setEvidence(nNode, iValue); } // setEvidence @Override public String toString() { return this.m_root.toString(); } // toString double[][] m_Margins; public double[] getMargin(final int iNode) { return this.m_Margins[iNode]; } // getMargin /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } public static void main(final String[] args) { try { BIFReader bayesNet = new BIFReader(); bayesNet.processFile(args[0]); MarginCalculator dc = new MarginCalculator(); dc.calcMargins(bayesNet); int iNode = 2; int iValue = 0; int iNode2 = 4; int iValue2 = 0; dc.setEvidence(iNode, iValue); dc.setEvidence(iNode2, iValue2); System.out.print(dc.toString()); dc.calcFullMargins(bayesNet); dc.setEvidence(iNode, iValue); dc.setEvidence(iNode2, iValue2); System.out.println("=============="); System.out.print(dc.toString()); } catch (Exception e) { e.printStackTrace(); } } // main } // class MarginCalculator
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/ParentSet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ParentSet.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.Serializable; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Helper class for Bayes Network classifiers. Provides datastructures to * represent a set of parents in a graph. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class ParentSet implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 4155021284407181838L; /** * Holds indexes of parents */ private int[] m_nParents; /** * returns index parent of parent specified by index * * @param iParent Index of parent * @return index of parent */ public int getParent(int iParent) { return m_nParents[iParent]; } public int [] getParents() {return m_nParents;} /** * sets index parent of parent specified by index * * @param iParent Index of parent * @param nNode index of the node that becomes parent */ public void SetParent(int iParent, int nNode) { m_nParents[iParent] = nNode; } // SetParent /** * Holds number of parents */ private int m_nNrOfParents = 0; /** * returns number of parents * @return number of parents */ public int getNrOfParents() { return m_nNrOfParents; } /** * test if node is contained in parent set * @param iNode node to test for * @return number of parents */ public boolean contains(int iNode) { for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { if (m_nParents[iParent] == iNode) { return true; } } return false; } /** * Holds cardinality of parents (= number of instantiations the parents can take) */ private int m_nCardinalityOfParents = 1; /** * returns cardinality of parents * * @return the cardinality */ public int getCardinalityOfParents() { return m_nCardinalityOfParents; } /** * returns cardinality of parents after recalculation * * @return the cardinality */ public int getFreshCardinalityOfParents(Instances _Instances) { m_nCardinalityOfParents = 1; for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { m_nCardinalityOfParents *= _Instances.attribute(m_nParents[iParent]).numValues(); } return m_nCardinalityOfParents; } /** * default constructor */ public ParentSet() { m_nParents = new int[10]; m_nNrOfParents = 0; m_nCardinalityOfParents = 1; } // ParentSet /** * constructor * @param nMaxNrOfParents upper bound on nr of parents */ public ParentSet(int nMaxNrOfParents) { m_nParents = new int[nMaxNrOfParents]; m_nNrOfParents = 0; m_nCardinalityOfParents = 1; } // ParentSet /** * copy constructor * @param other other parent set */ public ParentSet(ParentSet other) { m_nNrOfParents = other.m_nNrOfParents; m_nCardinalityOfParents = other.m_nCardinalityOfParents; m_nParents = new int[m_nNrOfParents]; for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { m_nParents[iParent] = other.m_nParents[iParent]; } } // ParentSet /** * reserve memory for parent set * * @param nSize maximum size of parent set to reserver memory for */ public void maxParentSetSize(int nSize) { m_nParents = new int[nSize]; } // MaxParentSetSize /** * Add parent to parent set and update internals (specifically the cardinality of the parent set) * * @param nParent parent to add * @param _Instances used for updating the internals */ public void addParent(int nParent, Instances _Instances) { if (m_nNrOfParents == m_nParents.length) { // 10) { // reserve more memory int [] nParents = new int[2 * m_nParents.length]; // 50]; for (int i = 0; i < m_nNrOfParents; i++) { nParents[i] = m_nParents[i]; } m_nParents = nParents; } m_nParents[m_nNrOfParents] = nParent; m_nNrOfParents++; m_nCardinalityOfParents *= _Instances.attribute(nParent).numValues(); } // AddParent /** * Add parent to parent set at specific location * and update internals (specifically the cardinality of the parent set) * * @param nParent parent to add * @param iParent location to add parent in parent set * @param _Instances used for updating the internals */ public void addParent(int nParent, int iParent, Instances _Instances) { if (m_nNrOfParents == m_nParents.length) { // 10) { // reserve more memory int [] nParents = new int[2 * m_nParents.length]; // 50]; for (int i = 0; i < m_nNrOfParents; i++) { nParents[i] = m_nParents[i]; } m_nParents = nParents; } for (int iParent2 = m_nNrOfParents; iParent2 > iParent; iParent2--) { m_nParents[iParent2] = m_nParents[iParent2 - 1]; } m_nParents[iParent] = nParent; m_nNrOfParents++; m_nCardinalityOfParents *= _Instances.attribute(nParent).numValues(); } // AddParent /** delete node from parent set * @param nParent node number of the parent to delete * @param _Instances data set * @return location of the parent in the parent set. This information can be * used to restore the parent set using the addParent method. */ public int deleteParent(int nParent, Instances _Instances) { int iParent = 0; while ((m_nParents[iParent] != nParent) && (iParent < m_nNrOfParents)) { iParent++; } int iParent2 = -1; if (iParent < m_nNrOfParents) { iParent2 = iParent; } if (iParent < m_nNrOfParents) { while (iParent < m_nNrOfParents - 1) { m_nParents[iParent] = m_nParents[iParent + 1]; iParent++; } m_nNrOfParents--; m_nCardinalityOfParents /= _Instances.attribute(nParent).numValues(); } return iParent2; } // DeleteParent /** * Delete last added parent from parent set and update internals (specifically the cardinality of the parent set) * * @param _Instances used for updating the internals */ public void deleteLastParent(Instances _Instances) { m_nNrOfParents--; m_nCardinalityOfParents = m_nCardinalityOfParents / _Instances.attribute(m_nParents[m_nNrOfParents]).numValues(); } // DeleteLastParent /** Copy makes current parents set equal to other parent set * * @param other : parent set to make a copy from */ public void copy(ParentSet other) { m_nCardinalityOfParents = other.m_nCardinalityOfParents; m_nNrOfParents = other.m_nNrOfParents; for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { m_nParents[iParent] = other.m_nParents[iParent]; } } // Copy /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class ParentSet
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/VaryNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * VaryNode.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Part of ADTree implementation. See ADNode.java for more details. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class VaryNode implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -6196294370675872424L; /** index of the node varied **/ public int m_iNode; /** most common value **/ public int m_nMCV; /** list of ADNode children **/ public ADNode[] m_ADNodes; /** Creates new VaryNode */ public VaryNode(final int iNode) { this.m_iNode = iNode; } /** * get counts for specific instantiation of a set of nodes * * @param nCounts array for storing counts * @param nNodes array of node indexes * @param nOffsets offset for nodes in nNodes in nCounts * @param iNode index into nNode indicating current node * @param iOffset Offset into nCounts due to nodes below iNode * @param parent parant ADNode of this VaryNode * @param bSubstract indicate whether counts should be added or substracted * @throws InterruptedException */ public void getCounts(final int[] nCounts, final int[] nNodes, final int[] nOffsets, final int iNode, final int iOffset, final ADNode parent, final boolean bSubstract) throws InterruptedException { for (int iValue = 0; iValue < this.m_ADNodes.length; iValue++) { if (iValue != this.m_nMCV) { if (this.m_ADNodes[iValue] != null) { this.m_ADNodes[iValue].getCounts(nCounts, nNodes, nOffsets, iNode + 1, iOffset + nOffsets[iNode] * iValue, bSubstract); } } else { parent.getCounts(nCounts, nNodes, nOffsets, iNode + 1, iOffset + nOffsets[iNode] * iValue, bSubstract); for (int iValue2 = 0; iValue2 < this.m_ADNodes.length; iValue2++) { if (iValue2 != this.m_nMCV && this.m_ADNodes[iValue2] != null) { this.m_ADNodes[iValue2].getCounts(nCounts, nNodes, nOffsets, iNode + 1, iOffset + nOffsets[iNode] * iValue, !bSubstract); } } } } } /** * print is used for debugging only, called from ADNode * * @param sTab amount of space. */ public void print(final String sTab) { for (int iValue = 0; iValue < this.m_ADNodes.length; iValue++) { System.out.print(sTab + iValue + ": "); if (this.m_ADNodes[iValue] == null) { if (iValue == this.m_nMCV) { System.out.println("MCV"); } else { System.out.println("null"); } } else { System.out.println(); this.m_ADNodes[iValue].print(); } } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/estimate/BMAEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.search.local.K2; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.Utils; import weka.estimators.Estimator; /** * <!-- globalinfo-start --> BMAEstimator estimates conditional probability * tables of a Bayes network using Bayes Model Averaging (BMA). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -k2 * Whether to use K2 prior. * </pre> * * <pre> * -A &lt;alpha&gt; * Initial count (alpha) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class BMAEstimator extends SimpleEstimator { /** for serialization */ static final long serialVersionUID = -1846028304233257309L; /** whether to use K2 prior */ protected boolean m_bUseK2Prior = false; /** * Returns a string describing this object * * @return a description of the classifier suitable for displaying in the * explorer/experimenter gui */ @Override public String globalInfo() { return "BMAEstimator estimates conditional probability tables of a Bayes " + "network using Bayes Model Averaging (BMA)."; } /** * estimateCPTs estimates the conditional probability tables for the Bayes Net * using the network structure. * * @param bayesNet the bayes net to use * @throws Exception if an error occurs */ @Override public void estimateCPTs(BayesNet bayesNet) throws Exception { initCPTs(bayesNet); Instances instances = bayesNet.m_Instances; // sanity check to see if nodes have not more than one parent for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getNrOfParents() > 1) { throw new Exception( "Cannot handle networks with nodes with more than 1 parent (yet)."); } } BayesNet EmptyNet = new BayesNet(); K2 oSearchAlgorithm = new K2(); oSearchAlgorithm.setInitAsNaiveBayes(false); oSearchAlgorithm.setMaxNrOfParents(0); EmptyNet.setSearchAlgorithm(oSearchAlgorithm); EmptyNet.buildClassifier(instances); BayesNet NBNet = new BayesNet(); oSearchAlgorithm.setInitAsNaiveBayes(true); oSearchAlgorithm.setMaxNrOfParents(1); NBNet.setSearchAlgorithm(oSearchAlgorithm); NBNet.buildClassifier(instances); // estimate CPTs for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != instances.classIndex()) { double w1 = 0.0, w2 = 0.0; int nAttValues = instances.attribute(iAttribute).numValues(); if (m_bUseK2Prior == true) { // use Cooper and Herskovitz's metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics .lnGamma(1 + ((DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0]) .getCount(iAttValue)) - Statistics.lnGamma(1); } w1 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + instances.numInstances()); for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute) .getCardinalityOfParents(); iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent]) .getCount(iAttValue); w2 += Statistics.lnGamma(1 + nCount) - Statistics.lnGamma(1); nTotal += nCount; } w2 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + nTotal); } } else { // use BDe metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics .lnGamma(1.0 / nAttValues + ((DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0]) .getCount(iAttValue)) - Statistics.lnGamma(1.0 / nAttValues); } w1 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + instances.numInstances()); int nParentValues = bayesNet.getParentSet(iAttribute) .getCardinalityOfParents(); for (int iParent = 0; iParent < nParentValues; iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent]) .getCount(iAttValue); w2 += Statistics.lnGamma(1.0 / (nAttValues * nParentValues) + nCount) - Statistics.lnGamma(1.0 / (nAttValues * nParentValues)); nTotal += nCount; } w2 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + nTotal); } } // System.out.println(w1 + " " + w2 + " " + (w2 - w1)); if (w1 < w2) { w2 = w2 - w1; w1 = 0; w1 = 1 / (1 + Math.exp(w2)); w2 = Math.exp(w2) / (1 + Math.exp(w2)); } else { w1 = w1 - w2; w2 = 0; w2 = 1 / (1 + Math.exp(w1)); w1 = Math.exp(w1) / (1 + Math.exp(w1)); } for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute) .getCardinalityOfParents(); iParent++) { bayesNet.m_Distributions[iAttribute][iParent] = new DiscreteEstimatorFullBayes( instances.attribute(iAttribute).numValues(), w1, w2, (DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0], (DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent], m_fAlpha); } } } int iAttribute = instances.classIndex(); bayesNet.m_Distributions[iAttribute][0] = EmptyNet.m_Distributions[iAttribute][0]; } // estimateCPTs /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in the model. */ @Override public void updateClassifier(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("updateClassifier does not apply to BMA estimator"); } // updateClassifier /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception if something goes wrong */ @Override public void initCPTs(BayesNet bayesNet) throws Exception { // Reserve space for CPTs int nMaxParentCardinality = 1; for (int iAttribute = 0; iAttribute < bayesNet.m_Instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getCardinalityOfParents() > nMaxParentCardinality) { nMaxParentCardinality = bayesNet.getParentSet(iAttribute) .getCardinalityOfParents(); } } // Reserve plenty of memory bayesNet.m_Distributions = new Estimator[bayesNet.m_Instances .numAttributes()][nMaxParentCardinality]; } // initCPTs /** * Returns whether K2 prior is used * * @return true if K2 prior is used */ public boolean isUseK2Prior() { return m_bUseK2Prior; } /** * Sets the UseK2Prior. * * @param bUseK2Prior The bUseK2Prior to set */ public void setUseK2Prior(boolean bUseK2Prior) { m_bUseK2Prior = bUseK2Prior; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option("\tWhether to use K2 prior.\n", "k2", 0, "-k2")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -k2 * Whether to use K2 prior. * </pre> * * <pre> * -A &lt;alpha&gt; * Initial count (alpha) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setUseK2Prior(Utils.getFlag("k2", options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (isUseK2Prior()) { options.add("-k2"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class BMAEstimator
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/estimate/BayesNetEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNetEstimator.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instance; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> BayesNetEstimator is the base class for estimating * the conditional probability tables of a Bayes network once the structure has * been learned. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A &lt;alpha&gt; * Initial count (alpha) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class BayesNetEstimator implements OptionHandler, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 2184330197666253884L; /** * Holds prior on count */ protected double m_fAlpha = 0.5; /** * estimateCPTs estimates the conditional probability tables for the Bayes Net * using the network structure. * * @param bayesNet the bayes net to use * @throws Exception always throws an exception, since subclass needs to be * used */ public void estimateCPTs(BayesNet bayesNet) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception always throws an exception, since subclass needs to be * used */ public void updateClassifier(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } /** * Calculates the class membership probabilities for the given test instance. * * @param bayesNet the bayes net to use * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception always throws an exception, since subclass needs to be * used */ public double[] distributionForInstance(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception always throws an exception, since subclass needs to be * used */ public void initCPTs(BayesNet bayesNet) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } // initCPTs /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option("\tInitial count (alpha)\n", "A", 1, "-A <alpha>")); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A &lt;alpha&gt; * Initial count (alpha) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String sAlpha = Utils.getOption('A', options); if (sAlpha.length() != 0) { m_fAlpha = (new Float(sAlpha)).floatValue(); } else { m_fAlpha = 0.5f; } Utils.checkForRemainingOptions(options); } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] options = new String[2]; int current = 0; options[current++] = "-A"; options[current++] = "" + m_fAlpha; return options; } // getOptions /** * Set prior used in probability table estimation * * @param fAlpha representing prior */ public void setAlpha(double fAlpha) { m_fAlpha = fAlpha; } /** * Get prior used in probability table estimation * * @return prior */ public double getAlpha() { return m_fAlpha; } /** * @return a string to describe the Alpha option. */ public String alphaTipText() { return "Alpha is used for estimating the probability tables and can be interpreted" + " as the initial count on each value."; } /** * This will return a string describing the class. * * @return The string. */ public String globalInfo() { return "BayesNetEstimator is the base class for estimating the " + "conditional probability tables of a Bayes network once the " + "structure has been learned."; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // BayesNetEstimator
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/estimate/DiscreteEstimatorBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DiscreteEstimatorBayes.java * Adapted from DiscreteEstimator.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import weka.classifiers.bayes.net.search.local.Scoreable; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.Utils; import weka.estimators.DiscreteEstimator; import weka.estimators.Estimator; /** * Symbolic probability estimator based on symbol counts and a prior. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class DiscreteEstimatorBayes extends Estimator implements Scoreable { /** for serialization */ static final long serialVersionUID = 4215400230843212684L; /** * Hold the counts */ protected double[] m_Counts; /** * Hold the sum of counts */ protected double m_SumOfCounts; /** * Holds number of symbols in distribution */ protected int m_nSymbols = 0; /** * Holds the prior probability */ protected double m_fPrior = 0.0; /** * Constructor * * @param nSymbols the number of possible symbols (remember to include 0) * @param fPrior */ public DiscreteEstimatorBayes(int nSymbols, double fPrior) { m_fPrior = fPrior; m_nSymbols = nSymbols; m_Counts = new double[m_nSymbols]; for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { m_Counts[iSymbol] = m_fPrior; } m_SumOfCounts = m_fPrior * (double) m_nSymbols; } // DiscreteEstimatorBayes /** * Add a new data value to the current estimator. * * @param data the new data value * @param weight the weight assigned to the data value */ public void addValue(double data, double weight) { m_Counts[(int) data] += weight; m_SumOfCounts += weight; } /** * Get a probability estimate for a value * * @param data the value to estimate the probability of * @return the estimated probability of the supplied value */ public double getProbability(double data) { if (m_SumOfCounts == 0) { // this can only happen if numSymbols = 0 in constructor return 0; } return (double) m_Counts[(int) data] / m_SumOfCounts; } /** * Get a counts for a value * * @param data the value to get the counts for * @return the count of the supplied value */ public double getCount(double data) { if (m_SumOfCounts == 0) { // this can only happen if numSymbols = 0 in constructor return 0; } return m_Counts[(int) data]; } /** * Gets the number of symbols this estimator operates with * * @return the number of estimator symbols */ public int getNumSymbols() { return (m_Counts == null) ? 0 : m_Counts.length; } /** * Gets the log score contribution of this distribution * @param nType score type * @return the score */ public double logScore(int nType, int nCardinality) { double fScore = 0.0; switch (nType) { case (Scoreable.BAYES): { for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { fScore += Statistics.lnGamma(m_Counts[iSymbol]); } fScore -= Statistics.lnGamma(m_SumOfCounts); if (m_fPrior != 0.0) { fScore -= m_nSymbols * Statistics.lnGamma(m_fPrior); fScore += Statistics.lnGamma(m_nSymbols * m_fPrior); } } break; case (Scoreable.BDeu): { for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { fScore += Statistics.lnGamma(m_Counts[iSymbol]); } fScore -= Statistics.lnGamma(m_SumOfCounts); //fScore -= m_nSymbols * Statistics.lnGamma(1.0); //fScore += Statistics.lnGamma(m_nSymbols * 1.0); fScore -= m_nSymbols * Statistics.lnGamma(1.0/(m_nSymbols * nCardinality)); fScore += Statistics.lnGamma(1.0/nCardinality); } break; case (Scoreable.MDL): case (Scoreable.AIC): case (Scoreable.ENTROPY): { for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { double fP = getProbability(iSymbol); fScore += m_Counts[iSymbol] * Math.log(fP); } } break; default: {} } return fScore; } /** * Display a representation of this estimator * * @return a string representation of the estimator */ public String toString() { String result = "Discrete Estimator. Counts = "; if (m_SumOfCounts > 1) { for (int i = 0; i < m_Counts.length; i++) { result += " " + Utils.doubleToString(m_Counts[i], 2); } result += " (Total = " + Utils.doubleToString(m_SumOfCounts, 2) + ")\n"; } else { for (int i = 0; i < m_Counts.length; i++) { result += " " + m_Counts[i]; } result += " (Total = " + m_SumOfCounts + ")\n"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv should contain a sequence of integers which * will be treated as symbolic. */ public static void main(String[] argv) { try { if (argv.length == 0) { System.out.println("Please specify a set of instances."); return; } int current = Integer.parseInt(argv[0]); int max = current; for (int i = 1; i < argv.length; i++) { current = Integer.parseInt(argv[i]); if (current > max) { max = current; } } DiscreteEstimator newEst = new DiscreteEstimator(max + 1, true); for (int i = 0; i < argv.length; i++) { current = Integer.parseInt(argv[i]); System.out.println(newEst); System.out.println("Prediction for " + current + " = " + newEst.getProbability(current)); newEst.addValue(current, 1); } } catch (Exception e) { System.out.println(e.getMessage()); } } // main } // class DiscreteEstimatorBayes
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/estimate/DiscreteEstimatorFullBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DiscreteEstimatorFullBayes.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import weka.core.RevisionUtils; import weka.estimators.DiscreteEstimator; /** * Symbolic probability estimator based on symbol counts and a prior. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class DiscreteEstimatorFullBayes extends DiscreteEstimatorBayes { /** for serialization */ static final long serialVersionUID = 6774941981423312133L; /** * Constructor * * @param nSymbols the number of possible symbols (remember to include 0) * @param w1 * @param w2 * @param EmptyDist * @param ClassDist * @param fPrior */ public DiscreteEstimatorFullBayes(int nSymbols, double w1, double w2, DiscreteEstimatorBayes EmptyDist, DiscreteEstimatorBayes ClassDist, double fPrior) { super(nSymbols, fPrior); m_SumOfCounts = 0.0; for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { double p1 = EmptyDist.getProbability(iSymbol); double p2 = ClassDist.getProbability(iSymbol); m_Counts[iSymbol] = w1 * p1 + w2 * p2; m_SumOfCounts += m_Counts[iSymbol]; } } // DiscreteEstimatorFullBayes /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Main method for testing this class. * * @param argv should contain a sequence of integers which * will be treated as symbolic. */ public static void main(String[] argv) { try { if (argv.length == 0) { System.out.println("Please specify a set of instances."); return; } int current = Integer.parseInt(argv[0]); int max = current; for (int i = 1; i < argv.length; i++) { current = Integer.parseInt(argv[i]); if (current > max) { max = current; } } DiscreteEstimator newEst = new DiscreteEstimator(max + 1, true); for (int i = 0; i < argv.length; i++) { current = Integer.parseInt(argv[i]); System.out.println(newEst); System.out.println("Prediction for " + current + " = " + newEst.getProbability(current)); newEst.addValue(current, 1); } } catch (Exception e) { System.out.println(e.getMessage()); } } // main } // class DiscreteEstimatorFullBayes
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/estimate/MultiNomialBMAEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiNomialBMAEstimator.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.search.local.K2; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.Utils; import weka.estimators.Estimator; /** * <!-- globalinfo-start --> Multinomial BMA Estimator. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -k2 * Whether to use K2 prior. * </pre> * * <pre> * -A &lt;alpha&gt; * Initial count (alpha) * </pre> * * <!-- options-end --> * * @version $Revision$ * @author Remco Bouckaert (rrb@xm.co.nz) */ public class MultiNomialBMAEstimator extends BayesNetEstimator { /** for serialization */ static final long serialVersionUID = 8330705772601586313L; /** whether to use K2 prior */ protected boolean m_bUseK2Prior = true; /** * Returns a string describing this object * * @return a description of the classifier suitable for displaying in the * explorer/experimenter gui */ @Override public String globalInfo() { return "Multinomial BMA Estimator."; } /** * estimateCPTs estimates the conditional probability tables for the Bayes Net * using the network structure. * * @param bayesNet the bayes net to use * @throws Exception if number of parents doesn't fit (more than 1) */ @Override public void estimateCPTs(BayesNet bayesNet) throws Exception { initCPTs(bayesNet); // sanity check to see if nodes have not more than one parent for (int iAttribute = 0; iAttribute < bayesNet.m_Instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getNrOfParents() > 1) { throw new Exception( "Cannot handle networks with nodes with more than 1 parent (yet)."); } } // filter data to binary Instances instances = new Instances(bayesNet.m_Instances); for (int iAttribute = instances.numAttributes() - 1; iAttribute >= 0; iAttribute--) { if (instances.attribute(iAttribute).numValues() != 2) { throw new Exception("MultiNomialBMAEstimator can only handle binary nominal attributes!"); } } // ok, now all data is binary, except the class attribute // now learn the empty and tree network BayesNet EmptyNet = new BayesNet(); K2 oSearchAlgorithm = new K2(); oSearchAlgorithm.setInitAsNaiveBayes(false); oSearchAlgorithm.setMaxNrOfParents(0); EmptyNet.setSearchAlgorithm(oSearchAlgorithm); EmptyNet.buildClassifier(instances); BayesNet NBNet = new BayesNet(); oSearchAlgorithm.setInitAsNaiveBayes(true); oSearchAlgorithm.setMaxNrOfParents(1); NBNet.setSearchAlgorithm(oSearchAlgorithm); NBNet.buildClassifier(instances); // estimate CPTs for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != instances.classIndex()) { double w1 = 0.0, w2 = 0.0; int nAttValues = instances.attribute(iAttribute).numValues(); if (m_bUseK2Prior == true) { // use Cooper and Herskovitz's metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics .lnGamma(1 + ((DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0]) .getCount(iAttValue)) - Statistics.lnGamma(1); } w1 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + instances.numInstances()); for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute) .getCardinalityOfParents(); iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent]) .getCount(iAttValue); w2 += Statistics.lnGamma(1 + nCount) - Statistics.lnGamma(1); nTotal += nCount; } w2 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + nTotal); } } else { // use BDe metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics .lnGamma(1.0 / nAttValues + ((DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0]) .getCount(iAttValue)) - Statistics.lnGamma(1.0 / nAttValues); } w1 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + instances.numInstances()); int nParentValues = bayesNet.getParentSet(iAttribute) .getCardinalityOfParents(); for (int iParent = 0; iParent < nParentValues; iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent]) .getCount(iAttValue); w2 += Statistics.lnGamma(1.0 / (nAttValues * nParentValues) + nCount) - Statistics.lnGamma(1.0 / (nAttValues * nParentValues)); nTotal += nCount; } w2 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + nTotal); } } // System.out.println(w1 + " " + w2 + " " + (w2 - w1)); // normalize weigths if (w1 < w2) { w2 = w2 - w1; w1 = 0; w1 = 1 / (1 + Math.exp(w2)); w2 = Math.exp(w2) / (1 + Math.exp(w2)); } else { w1 = w1 - w2; w2 = 0; w2 = 1 / (1 + Math.exp(w1)); w1 = Math.exp(w1) / (1 + Math.exp(w1)); } for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute) .getCardinalityOfParents(); iParent++) { bayesNet.m_Distributions[iAttribute][iParent] = new DiscreteEstimatorFullBayes( instances.attribute(iAttribute).numValues(), w1, w2, (DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0], (DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent], m_fAlpha); } } } int iAttribute = instances.classIndex(); bayesNet.m_Distributions[iAttribute][0] = EmptyNet.m_Distributions[iAttribute][0]; } // estimateCPTs /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in the model. */ @Override public void updateClassifier(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("updateClassifier does not apply to BMA estimator"); } // updateClassifier /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception doesn't apply */ @Override public void initCPTs(BayesNet bayesNet) throws Exception { // Reserve sufficient memory bayesNet.m_Distributions = new Estimator[bayesNet.m_Instances .numAttributes()][2]; } // initCPTs /** * @return boolean */ public boolean isUseK2Prior() { return m_bUseK2Prior; } /** * Sets the UseK2Prior. * * @param bUseK2Prior The bUseK2Prior to set */ public void setUseK2Prior(boolean bUseK2Prior) { m_bUseK2Prior = bUseK2Prior; } /** * Calculates the class membership probabilities for the given test instance. * * @param bayesNet the bayes net to use * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ @Override public double[] distributionForInstance(BayesNet bayesNet, Instance instance) throws Exception { Instances instances = bayesNet.m_Instances; int nNumClasses = instances.numClasses(); double[] fProbs = new double[nNumClasses]; for (int iClass = 0; iClass < nNumClasses; iClass++) { fProbs[iClass] = 1.0; } for (int iClass = 0; iClass < nNumClasses; iClass++) { double logfP = 0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { double iCPT = 0; for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute) .getNrOfParents(); iParent++) { int nParent = bayesNet.getParentSet(iAttribute).getParent(iParent); if (nParent == instances.classIndex()) { iCPT = iCPT * nNumClasses + iClass; } else { iCPT = iCPT * instances.attribute(nParent).numValues() + instance.value(nParent); } } if (iAttribute == instances.classIndex()) { logfP += Math.log(bayesNet.m_Distributions[iAttribute][(int) iCPT] .getProbability(iClass)); } else { logfP += instance.value(iAttribute) * Math.log(bayesNet.m_Distributions[iAttribute][(int) iCPT] .getProbability(instance.value(1))); } } fProbs[iClass] += logfP; } // Find maximum double fMax = fProbs[0]; for (int iClass = 0; iClass < nNumClasses; iClass++) { if (fProbs[iClass] > fMax) { fMax = fProbs[iClass]; } } // transform from log-space to normal-space for (int iClass = 0; iClass < nNumClasses; iClass++) { fProbs[iClass] = Math.exp(fProbs[iClass] - fMax); } // Display probabilities Utils.normalize(fProbs); return fProbs; } // distributionForInstance /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(1); newVector.addElement(new Option("\tWhether to use K2 prior.\n", "k2", 0, "-k2")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -k2 * Whether to use K2 prior. * </pre> * * <pre> * -A &lt;alpha&gt; * Initial count (alpha) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setUseK2Prior(Utils.getFlag("k2", options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (isUseK2Prior()) { options.add("-k2"); } return options.toArray(new String[0]); } // getOptions /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class MultiNomialBMAEstimator
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/estimate/SimpleEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.util.Enumeration; import weka.classifiers.bayes.BayesNet; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.estimators.Estimator; /** * <!-- globalinfo-start --> SimpleEstimator is used for estimating the * conditional probability tables of a Bayes network once the structure has been * learned. Estimates probabilities directly from data. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A &lt;alpha&gt; * Initial count (alpha) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class SimpleEstimator extends BayesNetEstimator { /** for serialization */ static final long serialVersionUID = 5874941612331806172L; /** * Returns a string describing this object * * @return a description of the classifier suitable for displaying in the * explorer/experimenter gui */ @Override public String globalInfo() { return "SimpleEstimator is used for estimating the conditional probability " + "tables of a Bayes network once the structure has been learned. " + "Estimates probabilities directly from data."; } /** * estimateCPTs estimates the conditional probability tables for the Bayes Net * using the network structure. * * @param bayesNet the bayes net to use * @throws Exception if something goes wrong */ @Override public void estimateCPTs(final BayesNet bayesNet) throws Exception { this.initCPTs(bayesNet); // Compute counts Enumeration<Instance> enumInsts = bayesNet.m_Instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { Instance instance = enumInsts.nextElement(); this.updateClassifier(bayesNet, instance); } } // estimateCPTs /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in the model. */ @Override public void updateClassifier(final BayesNet bayesNet, final Instance instance) throws Exception { for (int iAttribute = 0; iAttribute < bayesNet.m_Instances.numAttributes(); iAttribute++) { double iCPT = 0; for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getNrOfParents(); iParent++) { int nParent = bayesNet.getParentSet(iAttribute).getParent(iParent); iCPT = iCPT * bayesNet.m_Instances.attribute(nParent).numValues() + instance.value(nParent); } bayesNet.m_Distributions[iAttribute][(int) iCPT].addValue(instance.value(iAttribute), instance.weight()); } } // updateClassifier /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception if something goes wrong */ @Override public void initCPTs(final BayesNet bayesNet) throws Exception { Instances instances = bayesNet.m_Instances; // Reserve space for CPTs int nMaxParentCardinality = 1; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getCardinalityOfParents() > nMaxParentCardinality) { nMaxParentCardinality = bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); } } // Reserve plenty of memory bayesNet.m_Distributions = new Estimator[instances.numAttributes()][nMaxParentCardinality]; // estimate CPTs for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); iParent++) { bayesNet.m_Distributions[iAttribute][iParent] = new DiscreteEstimatorBayes(instances.attribute(iAttribute).numValues(), this.m_fAlpha); } } } // initCPTs /** * Calculates the class membership probabilities for the given test instance. * * @param bayesNet the bayes net to use * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ @Override public double[] distributionForInstance(final BayesNet bayesNet, final Instance instance) throws Exception { Instances instances = bayesNet.m_Instances; int nNumClasses = instances.numClasses(); double[] fProbs = new double[nNumClasses]; for (int iClass = 0; iClass < nNumClasses; iClass++) { double logfP = 0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } double iCPT = 0; for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getNrOfParents(); iParent++) { int nParent = bayesNet.getParentSet(iAttribute).getParent(iParent); if (nParent == instances.classIndex()) { iCPT = iCPT * nNumClasses + iClass; } else { iCPT = iCPT * instances.attribute(nParent).numValues() + instance.value(nParent); } } if (iAttribute == instances.classIndex()) { // fP *= // m_Distributions[iAttribute][(int) iCPT].getProbability(iClass); logfP += Math.log(bayesNet.m_Distributions[iAttribute][(int) iCPT].getProbability(iClass)); } else { // fP *= // m_Distributions[iAttribute][(int) iCPT] // .getProbability(instance.value(iAttribute)); logfP += Math.log(bayesNet.m_Distributions[iAttribute][(int) iCPT].getProbability(instance.value(iAttribute))); } } // fProbs[iClass] *= fP; fProbs[iClass] += logfP; } // Find maximum double fMax = fProbs[0]; for (int iClass = 0; iClass < nNumClasses; iClass++) { if (fProbs[iClass] > fMax) { fMax = fProbs[iClass]; } } // transform from log-space to normal-space for (int iClass = 0; iClass < nNumClasses; iClass++) { fProbs[iClass] = Math.exp(fProbs[iClass] - fMax); } // Display probabilities try { Utils.normalize(fProbs); } catch (IllegalArgumentException ex) { return new double[nNumClasses]; // predict missing value } return fProbs; } // distributionForInstance /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // SimpleEstimator
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/SearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SearchAlgorithm.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.BIFReader; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * This is the base class for all search algorithms for learning Bayes networks. * It contains some common code, used by other network structure search * algorithms, and should not be used by itself. * * <!-- options-start --> <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class SearchAlgorithm implements OptionHandler, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 6164792240778525312L; /** * Holds upper bound on number of parents */ protected int m_nMaxNrOfParents = 1; /** * determines whether initial structure is an empty graph or a Naive Bayes * network */ protected boolean m_bInitAsNaiveBayes = true; /** * Determines whether after structure is found a MarkovBlanketClassifier * correction should be applied If this is true, m_bInitAsNaiveBayes is * overridden and interpreted as false. */ protected boolean m_bMarkovBlanketClassifier = false; /** * File name containing initial network structure. This can be used as * starting point for structure search It will be ignored if not speficied. * When specified, it overrides the InitAsNaivBayes flag. */ protected String m_sInitalBIFFile; /** c'tor **/ public SearchAlgorithm() { } // SearchAlgorithm /** * AddArcMakesSense checks whether adding the arc from iAttributeTail to * iAttributeHead does not already exists and does not introduce a cycle * * @param bayesNet * @param instances * @param iAttributeHead index of the attribute that becomes head of the arrow * @param iAttributeTail index of the attribute that becomes tail of the arrow * @return true if adding arc is allowed, otherwise false * @throws InterruptedException */ protected boolean addArcMakesSense(final BayesNet bayesNet, final Instances instances, final int iAttributeHead, final int iAttributeTail) throws InterruptedException { if (iAttributeHead == iAttributeTail) { return false; } // sanity check: arc should not be in parent set already if (this.isArc(bayesNet, iAttributeHead, iAttributeTail)) { return false; } // sanity check: arc should not introduce a cycle int nNodes = instances.numAttributes(); boolean[] bDone = new boolean[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { bDone[iNode] = false; } // check for cycles bayesNet.getParentSet(iAttributeHead).addParent(iAttributeTail, instances); for (int iNode = 0; iNode < nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < nNodes; iNode2++) { if (Thread.interrupted()) { Thread.interrupted(); throw new InterruptedException("Killed WEKA"); } if (!bDone[iNode2]) { boolean bHasNoParents = true; for (int iParent = 0; iParent < bayesNet.getParentSet(iNode2).getNrOfParents(); iParent++) { if (!bDone[bayesNet.getParentSet(iNode2).getParent(iParent)]) { bHasNoParents = false; } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { bayesNet.getParentSet(iAttributeHead).deleteLastParent(instances); return false; } } bayesNet.getParentSet(iAttributeHead).deleteLastParent(instances); return true; } // AddArcMakesCycle /** * reverseArcMakesSense checks whether the arc from iAttributeTail to * iAttributeHead exists and reversing does not introduce a cycle * * @param bayesNet * @param instances * @param iAttributeHead index of the attribute that is head of the arrow * @param iAttributeTail index of the attribute that is tail of the arrow * @return true if the arc from iAttributeTail to iAttributeHead exists and * reversing does not introduce a cycle */ protected boolean reverseArcMakesSense(final BayesNet bayesNet, final Instances instances, final int iAttributeHead, final int iAttributeTail) { if (iAttributeHead == iAttributeTail) { return false; } // sanity check: arc should be in parent set already if (!this.isArc(bayesNet, iAttributeHead, iAttributeTail)) { return false; } // sanity check: arc should not introduce a cycle int nNodes = instances.numAttributes(); boolean[] bDone = new boolean[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { bDone[iNode] = false; } // check for cycles bayesNet.getParentSet(iAttributeTail).addParent(iAttributeHead, instances); for (int iNode = 0; iNode < nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < nNodes; iNode2++) { if (!bDone[iNode2]) { ParentSet parentSet = bayesNet.getParentSet(iNode2); boolean bHasNoParents = true; for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { if (!bDone[parentSet.getParent(iParent)]) { // this one has a parent which is not 'done' UNLESS it is the arc // to be reversed if (!(iNode2 == iAttributeHead && parentSet.getParent(iParent) == iAttributeTail)) { bHasNoParents = false; } } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { bayesNet.getParentSet(iAttributeTail).deleteLastParent(instances); return false; } } bayesNet.getParentSet(iAttributeTail).deleteLastParent(instances); return true; } // ReverseArcMakesCycle /** * IsArc checks whether the arc from iAttributeTail to iAttributeHead already * exists * * @param bayesNet * @param iAttributeHead index of the attribute that becomes head of the arrow * @param iAttributeTail index of the attribute that becomes tail of the arrow * @return true if the arc from iAttributeTail to iAttributeHead already * exists */ protected boolean isArc(final BayesNet bayesNet, final int iAttributeHead, final int iAttributeTail) { for (int iParent = 0; iParent < bayesNet.getParentSet(iAttributeHead).getNrOfParents(); iParent++) { if (bayesNet.getParentSet(iAttributeHead).getParent(iParent) == iAttributeTail) { return true; } } return false; } // IsArc /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { return new Vector<Option>(0).elements(); } // listOption /** * Parses a given list of options. * <p/> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { return new String[0]; } // getOptions /** * a string representation of the algorithm * * @return a string representation */ @Override public String toString() { return "SearchAlgorithm\n"; } // toString /** * buildStructure determines the network structure/graph of the network. The * default behavior is creating a network where all nodes have the first node * as its parent (i.e., a BayesNet that behaves like a naive Bayes * classifier). This method can be overridden by derived classes to restrict * the class of network structures that are acceptable. * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ public void buildStructure(final BayesNet bayesNet, final Instances instances) throws Exception { if (this.m_sInitalBIFFile != null && !this.m_sInitalBIFFile.equals("")) { BIFReader initialNet = new BIFReader().processFile(this.m_sInitalBIFFile); for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { int iNode = initialNet.getNode(bayesNet.getNodeName(iAttribute)); for (int iParent = 0; iParent < initialNet.getNrOfParents(iAttribute); iParent++) { String sParent = initialNet.getNodeName(initialNet.getParent(iNode, iParent)); int nParent = 0; while (nParent < bayesNet.getNrOfNodes() && !bayesNet.getNodeName(nParent).equals(sParent)) { nParent++; } if (nParent < bayesNet.getNrOfNodes()) { bayesNet.getParentSet(iAttribute).addParent(nParent, instances); } else { System.err.println("Warning: Node " + sParent + " is ignored. It is found in initial network but not in data set."); } } } } else if (this.m_bInitAsNaiveBayes) { int iClass = instances.classIndex(); // initialize parent sets to have arrow from classifier node to // each of the other nodes for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != iClass) { bayesNet.getParentSet(iAttribute).addParent(iClass, instances); } } } this.search(bayesNet, instances); if (this.m_bMarkovBlanketClassifier) { this.doMarkovBlanketCorrection(bayesNet, instances); } } // buildStructure /** * * @param bayesNet * @param instances */ protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { // placeholder with implementation in derived classes } // search /** * for each node in the network make sure it is in the Markov blanket of the * classifier node, and if not, add arrows so that it is. If the node is an * ancestor of the classifier node, add arrow pointing to the classifier node, * otherwise, add arrow pointing to attribute node. * * @param bayesNet * @param instances */ protected void doMarkovBlanketCorrection(final BayesNet bayesNet, final Instances instances) { // Add class node as parent if it is not in the Markov Boundary int iClass = instances.classIndex(); ParentSet ancestors = new ParentSet(); int nOldSize = 0; ancestors.addParent(iClass, instances); while (nOldSize != ancestors.getNrOfParents()) { nOldSize = ancestors.getNrOfParents(); for (int iNode = 0; iNode < nOldSize; iNode++) { int iCurrent = ancestors.getParent(iNode); ParentSet p = bayesNet.getParentSet(iCurrent); for (int iParent = 0; iParent < p.getNrOfParents(); iParent++) { if (!ancestors.contains(p.getParent(iParent))) { ancestors.addParent(p.getParent(iParent), instances); } } } } for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { boolean bIsInMarkovBoundary = (iAttribute == iClass) || bayesNet.getParentSet(iAttribute).contains(iClass) || bayesNet.getParentSet(iClass).contains(iAttribute); for (int iAttribute2 = 0; !bIsInMarkovBoundary && iAttribute2 < instances.numAttributes(); iAttribute2++) { bIsInMarkovBoundary = bayesNet.getParentSet(iAttribute2).contains(iAttribute) && bayesNet.getParentSet(iAttribute2).contains(iClass); } if (!bIsInMarkovBoundary) { if (ancestors.contains(iAttribute)) { if (bayesNet.getParentSet(iClass).getCardinalityOfParents() < 1024) { bayesNet.getParentSet(iClass).addParent(iAttribute, instances); } else { // too bad } } else { bayesNet.getParentSet(iAttribute).addParent(iClass, instances); } } } } // doMarkovBlanketCorrection /** * * @param bMarkovBlanketClassifier */ protected void setMarkovBlanketClassifier(final boolean bMarkovBlanketClassifier) { this.m_bMarkovBlanketClassifier = bMarkovBlanketClassifier; } /** * * @return */ protected boolean getMarkovBlanketClassifier() { return this.m_bMarkovBlanketClassifier; } /** * @return a string to describe the MaxNrOfParentsoption. */ public String maxNrOfParentsTipText() { return "Set the maximum number of parents a node in the Bayes net can have." + " When initialized as Naive Bayes, setting this parameter to 1 results in" + " a Naive Bayes classifier. When set to 2, a Tree Augmented Bayes Network (TAN)" + " is learned, and when set >2, a Bayes Net Augmented Bayes Network (BAN)" + " is learned. By setting it to a value much larger than the number of nodes" + " in the network (the default of 100000 pretty much guarantees this), no" + " restriction on the number of parents is enforced"; } // maxNrOfParentsTipText /** * @return a string to describe the InitAsNaiveBayes option. */ public String initAsNaiveBayesTipText() { return "When set to true (default), the initial network used for structure learning" + " is a Naive Bayes Network, that is, a network with an arrow from the classifier" + " node to each other node. When set to false, an empty network is used as initial" + " network structure"; } // initAsNaiveBayesTipText /** * @return a string to describe the MarkovBlanketClassifier option. */ protected String markovBlanketClassifierTipText() { return "When set to true (default is false), after a network structure is learned" + " a Markov Blanket correction is applied to the network structure. This ensures" + " that all nodes in the network are part of the Markov blanket of the classifier" + " node."; } // markovBlanketClassifierTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class SearchAlgorithm
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/ci/CISearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CISearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.ci; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.local.LocalScoreSearchAlgorithm; import weka.core.Instances; import weka.core.RevisionUtils; /** * <!-- globalinfo-start --> The CISearchAlgorithm class supports Bayes net structure search algorithms that are based on conditional independence test (as opposed to for example score based of cross validation based search algorithms). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class CISearchAlgorithm extends LocalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = 3165802334119704560L; BayesNet m_BayesNet; Instances m_instances; /** * Returns a string describing this object * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ @Override public String globalInfo() { return "The CISearchAlgorithm class supports Bayes net structure " + "search algorithms that are based on conditional independence " + "test (as opposed to for example score based of cross validation " + "based search algorithms)."; } /** * IsConditionalIndependent tests whether two nodes X and Y are independent given a set of variables Z. The test compares the score of the Bayes network with and without arrow Y->X where all nodes in Z are parents of X. * * @param iAttributeX * - index of attribute representing variable X * @param iAttributeY * - index of attribute representing variable Y * @param iAttributesZ * - array of integers representing indices of attributes in set Z * @param nAttributesZ * - cardinality of Z * @return true if X and Y conditionally independent given Z * @throws InterruptedException */ protected boolean isConditionalIndependent(final int iAttributeX, final int iAttributeY, final int[] iAttributesZ, final int nAttributesZ) throws InterruptedException { ParentSet oParentSetX = this.m_BayesNet.getParentSet(iAttributeX); // clear parent set of AttributeX while (oParentSetX.getNrOfParents() > 0) { oParentSetX.deleteLastParent(this.m_instances); } // insert parents in iAttributeZ for (int iAttributeZ = 0; iAttributeZ < nAttributesZ; iAttributeZ++) { oParentSetX.addParent(iAttributesZ[iAttributeZ], this.m_instances); } double fScoreZ = this.calcNodeScore(iAttributeX); double fScoreZY = this.calcScoreWithExtraParent(iAttributeX, iAttributeY); if (fScoreZY <= fScoreZ) { // the score does not improve by adding Y to the parent set of X // so we conclude that nodes X and Y are conditionally independent // given the set of variables Z return true; } return false; } // IsConditionalIndependent /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class CISearchAlgorithm
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/ci/ICSSearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ICSSearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.ci; import java.io.FileReader; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses conditional independence tests to find a skeleton, finds V-nodes and applies a set of rules to find the directions of the remaining arrows. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -cardinality &lt;num&gt; * When determining whether an edge exists a search is performed * for a set Z that separates the nodes. MaxCardinality determines * the maximum size of the set Z. This greatly influences the * length of the search. (default 2) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class ICSSearchAlgorithm extends CISearchAlgorithm { /** for serialization */ static final long serialVersionUID = -2510985917284798576L; /** * returns the name of the attribute with the given index * * @param iAttribute * the index of the attribute * @return the name of the attribute */ String name(final int iAttribute) { return this.m_instances.attribute(iAttribute).name(); } /** * returns the number of attributes * * @return the number of attributes */ int maxn() { return this.m_instances.numAttributes(); } /** maximum size of separating set **/ private int m_nMaxCardinality = 2; /** * sets the cardinality * * @param nMaxCardinality * the max cardinality */ public void setMaxCardinality(final int nMaxCardinality) { this.m_nMaxCardinality = nMaxCardinality; } /** * returns the max cardinality * * @return the max cardinality */ public int getMaxCardinality() { return this.m_nMaxCardinality; } class SeparationSet implements RevisionHandler { public int[] m_set; /** * constructor */ public SeparationSet() { this.m_set = new int[ICSSearchAlgorithm.this.getMaxCardinality() + 1]; } // c'tor public boolean contains(final int nItem) { for (int iItem = 0; iItem < ICSSearchAlgorithm.this.getMaxCardinality() && this.m_set[iItem] != -1; iItem++) { if (this.m_set[iItem] == nItem) { return true; } } return false; } // contains /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class sepset /** * Search for Bayes network structure using ICS algorithm * * @param bayesNet * datastructure to build network structure for * @param instances * data set to learn from * @throws Exception * if something goes wrong */ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { // init this.m_BayesNet = bayesNet; this.m_instances = instances; boolean edges[][] = new boolean[this.maxn() + 1][]; boolean[][] arrows = new boolean[this.maxn() + 1][]; SeparationSet[][] sepsets = new SeparationSet[this.maxn() + 1][]; for (int iNode = 0; iNode < this.maxn() + 1; iNode++) { edges[iNode] = new boolean[this.maxn()]; arrows[iNode] = new boolean[this.maxn()]; sepsets[iNode] = new SeparationSet[this.maxn()]; } this.calcDependencyGraph(edges, sepsets); this.calcVeeNodes(edges, arrows, sepsets); this.calcArcDirections(edges, arrows); // transfrom into BayesNet datastructure for (int iNode = 0; iNode < this.maxn(); iNode++) { // clear parent set of AttributeX ParentSet oParentSet = this.m_BayesNet.getParentSet(iNode); while (oParentSet.getNrOfParents() > 0) { oParentSet.deleteLastParent(this.m_instances); } for (int iParent = 0; iParent < this.maxn(); iParent++) { if (arrows[iParent][iNode]) { oParentSet.addParent(iParent, this.m_instances); } } } } // search /** * CalcDependencyGraph determines the skeleton of the BayesNetwork by starting with a complete graph and removing edges (a--b) if it can find a set Z such that a and b conditionally independent given Z. The set Z is found by trying all * possible subsets of nodes adjacent to a and b, first of size 0, then of size 1, etc. up to size m_nMaxCardinality * * @param edges * boolean matrix representing the edges * @param sepsets * set of separating sets * @throws InterruptedException */ void calcDependencyGraph(final boolean[][] edges, final SeparationSet[][] sepsets) throws InterruptedException { /* calc undirected graph a-b iff D(a,S,b) for all S) */ SeparationSet oSepSet; for (int iNode1 = 0; iNode1 < this.maxn(); iNode1++) { /* start with complete graph */ for (int iNode2 = 0; iNode2 < this.maxn(); iNode2++) { edges[iNode1][iNode2] = true; } } for (int iNode1 = 0; iNode1 < this.maxn(); iNode1++) { edges[iNode1][iNode1] = false; } for (int iCardinality = 0; iCardinality <= this.getMaxCardinality(); iCardinality++) { for (int iNode1 = 0; iNode1 <= this.maxn() - 2; iNode1++) { for (int iNode2 = iNode1 + 1; iNode2 < this.maxn(); iNode2++) { if (edges[iNode1][iNode2]) { oSepSet = this.existsSepSet(iNode1, iNode2, iCardinality, edges); if (oSepSet != null) { edges[iNode1][iNode2] = false; edges[iNode2][iNode1] = false; sepsets[iNode1][iNode2] = oSepSet; sepsets[iNode2][iNode1] = oSepSet; // report separating set System.err.print("I(" + this.name(iNode1) + ", {"); for (int iNode3 = 0; iNode3 < iCardinality; iNode3++) { System.err.print(this.name(oSepSet.m_set[iNode3]) + " "); } System.err.print("} ," + this.name(iNode2) + ")\n"); } } } } // report current state of dependency graph System.err.print(iCardinality + " "); for (int iNode1 = 0; iNode1 < this.maxn(); iNode1++) { System.err.print(this.name(iNode1) + " "); } System.err.print('\n'); for (int iNode1 = 0; iNode1 < this.maxn(); iNode1++) { for (int iNode2 = 0; iNode2 < this.maxn(); iNode2++) { if (edges[iNode1][iNode2]) { System.err.print("X "); } else { System.err.print(". "); } } System.err.print(this.name(iNode1) + " "); System.err.print('\n'); } } } /* CalcDependencyGraph */ /** * ExistsSepSet tests if a separating set Z of node a and b exists of given cardiniality exists. The set Z is found by trying all possible subsets of nodes adjacent to both a and b of the requested cardinality. * * @param iNode1 * index of first node a * @param iNode2 * index of second node b * @param nCardinality * size of the separating set Z * @param edges * @return SeparationSet containing set that separates iNode1 and iNode2 or null if no such set exists * @throws InterruptedException */ SeparationSet existsSepSet(final int iNode1, final int iNode2, final int nCardinality, final boolean[][] edges) throws InterruptedException { /* Test if a separating set of node d and e exists of cardiniality k */ // int iNode1_, iNode2_; int iNode3, iZ; SeparationSet Z = new SeparationSet(); Z.m_set[nCardinality] = -1; // iNode1_ = iNode1; // iNode2_ = iNode2; // find first candidate separating set Z if (nCardinality > 0) { Z.m_set[0] = this.next(-1, iNode1, iNode2, edges); iNode3 = 1; while (iNode3 < nCardinality) { Z.m_set[iNode3] = this.next(Z.m_set[iNode3 - 1], iNode1, iNode2, edges); iNode3++; } } if (nCardinality > 0) { iZ = this.maxn() - Z.m_set[nCardinality - 1] - 1; } else { iZ = 0; } while (iZ >= 0) { // check if candidate separating set makes iNode2_ and iNode1_ independent if (this.isConditionalIndependent(iNode2, iNode1, Z.m_set, nCardinality)) { return Z; } // calc next candidate separating set if (nCardinality > 0) { Z.m_set[nCardinality - 1] = this.next(Z.m_set[nCardinality - 1], iNode1, iNode2, edges); } iZ = nCardinality - 1; while (iZ >= 0 && Z.m_set[iZ] >= this.maxn()) { iZ = nCardinality - 1; while (iZ >= 0 && Z.m_set[iZ] >= this.maxn()) { iZ--; } if (iZ < 0) { break; } Z.m_set[iZ] = this.next(Z.m_set[iZ], iNode1, iNode2, edges); for (iNode3 = iZ + 1; iNode3 < nCardinality; iNode3++) { Z.m_set[iNode3] = this.next(Z.m_set[iNode3 - 1], iNode1, iNode2, edges); } iZ = nCardinality - 1; } } return null; } /* ExistsSepSet */ /** * determine index of node that makes next candidate separating set adjacent to iNode1 and iNode2, but not iNode2 itself * * @param x * index of current node * @param iNode1 * first node * @param iNode2 * second node (must be larger than iNode1) * @param edges * skeleton so far * @return int index of next node adjacent to iNode1 after x */ int next(int x, final int iNode1, final int iNode2, final boolean[][] edges) { x++; while (x < this.maxn() && (!edges[iNode1][x] || !edges[iNode2][x] || x == iNode2)) { x++; } return x; } /* next */ /** * CalcVeeNodes tries to find V-nodes, i.e. nodes a,b,c such that a->c<-b and a-/-b. These nodes are identified by finding nodes a,b,c in the skeleton such that a--c, c--b and a-/-b and furthermore c is not in the set Z that separates a * and b * * @param edges * skeleton * @param arrows * resulting partially directed skeleton after all V-nodes have been identified * @param sepsets * separating sets */ void calcVeeNodes(final boolean[][] edges, final boolean[][] arrows, final SeparationSet[][] sepsets) { // start with complete empty graph for (int iNode1 = 0; iNode1 < this.maxn(); iNode1++) { for (int iNode2 = 0; iNode2 < this.maxn(); iNode2++) { arrows[iNode1][iNode2] = false; } } for (int iNode1 = 0; iNode1 < this.maxn() - 1; iNode1++) { for (int iNode2 = iNode1 + 1; iNode2 < this.maxn(); iNode2++) { if (!edges[iNode1][iNode2]) { /* i nonadj j */ for (int iNode3 = 0; iNode3 < this.maxn(); iNode3++) { if ((iNode3 != iNode1 && iNode3 != iNode2 && edges[iNode1][iNode3] && edges[iNode2][iNode3]) & (!sepsets[iNode1][iNode2].contains(iNode3))) { arrows[iNode1][iNode3] = true; /* add arc i->k */ arrows[iNode2][iNode3] = true; /* add arc j->k */ } } } } } } // CalcVeeNodes /** * CalcArcDirections assigns directions to edges that remain after V-nodes have been identified. The arcs are directed using the following rules: Rule 1: i->j--k & i-/-k => j->k Rule 2: i->j->k & i--k => i->k Rule 3 m /|\ i | k => m->j * i->j<-k \|/ j * * Rule 4 m / \ i---k => i->m & k->m i->j \ / j Rule 5: if no edges are directed then take a random one (first we can find) * * @param edges * skeleton * @param arrows * resulting fully directed DAG */ void calcArcDirections(final boolean[][] edges, final boolean[][] arrows) { /* give direction to remaining arcs */ int i, j, k, m; boolean bFound; do { bFound = false; /* Rule 1: i->j--k & i-/-k => j->k */ for (i = 0; i < this.maxn(); i++) { for (j = 0; j < this.maxn(); j++) { if (i != j && arrows[i][j]) { for (k = 0; k < this.maxn(); k++) { if (i != k && j != k && edges[j][k] && !edges[i][k] && !arrows[j][k] && !arrows[k][j]) { arrows[j][k] = true; bFound = true; } } } } } /* Rule 2: i->j->k & i--k => i->k */ for (i = 0; i < this.maxn(); i++) { for (j = 0; j < this.maxn(); j++) { if (i != j && arrows[i][j]) { for (k = 0; k < this.maxn(); k++) { if (i != k && j != k && edges[i][k] && arrows[j][k] && !arrows[i][k] && !arrows[k][i]) { arrows[i][k] = true; bFound = true; } } } } } /* * Rule 3 m /|\ i | k => m->j i->j<-k \|/ j */ for (i = 0; i < this.maxn(); i++) { for (j = 0; j < this.maxn(); j++) { if (i != j && arrows[i][j]) { for (k = 0; k < this.maxn(); k++) { if (k != i && k != j && arrows[k][j] && !edges[k][i]) { for (m = 0; m < this.maxn(); m++) { if (m != i && m != j && m != k && edges[m][i] && !arrows[m][i] && !arrows[i][m] && edges[m][j] && !arrows[m][j] && !arrows[j][m] && edges[m][k] && !arrows[m][k] && !arrows[k][m]) { arrows[m][j] = true; bFound = true; } } } } } } } /* * Rule 4 m / \ i---k => i->m & k->m i->j \ / j */ for (i = 0; i < this.maxn(); i++) { for (j = 0; j < this.maxn(); j++) { if (i != j && arrows[j][i]) { for (k = 0; k < this.maxn(); k++) { if (k != i && k != j && edges[k][j] && !arrows[k][j] && !arrows[j][k] && edges[k][i] && !arrows[k][i] && !arrows[i][k]) { for (m = 0; m < this.maxn(); m++) { if (m != i && m != j && m != k && edges[m][i] && !arrows[m][i] && !arrows[i][m] && edges[m][k] && !arrows[m][k] && !arrows[k][m]) { arrows[i][m] = true; arrows[k][m] = true; bFound = true; } } } } } } } /* * Rule 5: if no edges are directed then take a random one (first we can * find) */ if (!bFound) { i = 0; while (!bFound && i < this.maxn()) { j = 0; while (!bFound && j < this.maxn()) { if (edges[i][j] && !arrows[i][j] && !arrows[j][i]) { arrows[i][j] = true; bFound = true; } j++; } i++; } } } while (bFound); } // CalcArcDirections /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option("\tWhen determining whether an edge exists a search is performed \n" + "\tfor a set Z that separates the nodes. MaxCardinality determines \n" + "\tthe maximum size of the set Z. This greatly influences the \n" + "\tlength of the search. (default 2)", "cardinality", 1, "-cardinality <num>")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } // listOption /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -cardinality &lt;num&gt; * When determining whether an edge exists a search is performed * for a set Z that separates the nodes. MaxCardinality determines * the maximum size of the set Z. This greatly influences the * length of the search. (default 2) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption("cardinality", options); if (tmpStr.length() != 0) { this.setMaxCardinality(Integer.parseInt(tmpStr)); } else { this.setMaxCardinality(2); } super.setOptions(options); } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); result.add("-cardinality"); result.add("" + this.getMaxCardinality()); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } // getOptions /** * @return a string to describe the MaxCardinality option. */ public String maxCardinalityTipText() { return "When determining whether an edge exists a search is performed for a set Z " + "that separates the nodes. MaxCardinality determines the maximum size of the set Z. " + "This greatly influences the length of the search. Default value is 2."; } // maxCardinalityTipText /** * This will return a string describing the search algorithm. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses conditional independence tests " + "to find a skeleton, finds V-nodes and applies a set of rules to find the directions " + "of the remaining arrows."; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * for testing the class * * @param argv * the commandline parameters */ static public void main(final String[] argv) { try { BayesNet b = new BayesNet(); b.setSearchAlgorithm(new ICSSearchAlgorithm()); Instances instances = new Instances(new FileReader("C:\\eclipse\\workspace\\weka\\data\\contact-lenses.arff")); instances.setClassIndex(instances.numAttributes() - 1); b.buildClassifier(instances); } catch (Exception e) { e.printStackTrace(); } } // main } // class ICSSearchAlgorithm
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/fixed/FromFile.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FromFile.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.fixed; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.BIFReader; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> The FromFile reads the structure of a Bayes net * from a file in BIFF format. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B &lt;BIF File&gt; * Name of file containing network structure in BIF format * </pre> * * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class FromFile extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = 7334358169507619525L; /** name of file to read structure from **/ String m_sBIFFile = ""; /** * Returns a string describing this object * * @return a description of the classifier suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "The FromFile reads the structure of a Bayes net from a file " + "in BIFF format."; } /** * * @param bayesNet * @param instances the instances to work with * @throws Exception if attribute from BIF file could not be found */ @Override public void buildStructure(BayesNet bayesNet, Instances instances) throws Exception { // read network structure in BIF format BIFReader bifReader = new BIFReader(); bifReader.processFile(m_sBIFFile); // copy parent sets for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { int iBIFAttribute = bifReader.getNode(bayesNet.getNodeName(iAttribute)); ParentSet bifParentSet = bifReader.getParentSet(iBIFAttribute); for (int iBIFParent = 0; iBIFParent < bifParentSet.getNrOfParents(); iBIFParent++) { String sParent = bifReader.getNodeName(bifParentSet .getParent(iBIFParent)); int iParent = 0; while (iParent < instances.numAttributes() && !bayesNet.getNodeName(iParent).equals(sParent)) { iParent++; } if (iParent >= instances.numAttributes()) { throw new Exception("Could not find attribute " + sParent + " from BIF file in data"); } bayesNet.getParentSet(iAttribute).addParent(iParent, instances); } } } // buildStructure /** * Set name of network in BIF file to read structure from * * @param sBIFFile the name of the BIF file */ public void setBIFFile(String sBIFFile) { m_sBIFFile = sBIFFile; } /** * Get name of network in BIF file to read structure from * * @return BIF file name */ public String getBIFFile() { return m_sBIFFile; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.addElement(new Option( "\tName of file containing network structure in BIF format\n", "B", 1, "-B <BIF File>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -B &lt;BIF File&gt; * Name of file containing network structure in BIF format * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setBIFFile(Utils.getOption('B', options)); super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-B"); options.add("" + getBIFFile()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class FromFile
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/fixed/NaiveBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayes.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.fixed; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instances; import weka.core.RevisionUtils; /** <!-- globalinfo-start --> * The NaiveBayes class generates a fixed Bayes network structure with arrows from the class variable to each of the attribute variables. * <p/> <!-- globalinfo-end --> * <!-- options-start --> <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class NaiveBayes extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = -4808572519709755811L; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "The NaiveBayes class generates a fixed Bayes network structure " + "with arrows from the class variable to each of the attribute " + "variables."; } /** * * @param bayesNet * @param instances the instances to work with * @throws Exception if something goes wrong */ public void buildStructure (BayesNet bayesNet, Instances instances) throws Exception { for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != instances.classIndex()) { bayesNet.getParentSet(iAttribute).addParent(instances.classIndex(), instances); } } } // buildStructure /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class NaiveBayes
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/GeneticSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GeneticSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses genetic * search for finding a well scoring Bayes network structure. Genetic search * works by having a population of Bayes network structures and allow them to * mutate and apply cross over to get offspring. The best network structure * found during the process is returned. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Population size * </pre> * * <pre> * -A &lt;integer&gt; * Descendant population size * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -M * Use mutation. * (default true) * </pre> * * <pre> * -C * Use cross-over. * (default true) * </pre> * * <pre> * -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false) * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class GeneticSearch extends GlobalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = 4236165533882462203L; /** number of runs **/ int m_nRuns = 10; /** size of population **/ int m_nPopulationSize = 10; /** size of descendant population **/ int m_nDescendantPopulationSize = 100; /** use cross-over? **/ boolean m_bUseCrossOver = true; /** use mutation? **/ boolean m_bUseMutation = true; /** use tournament selection or take best sub-population **/ boolean m_bUseTournamentSelection = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random = null; /** * used in BayesNetRepresentation for efficiently determining whether a number * is square */ boolean[] g_bIsSquare; class BayesNetRepresentation implements RevisionHandler { /** number of nodes in network **/ int m_nNodes = 0; /** * bit representation of parent sets m_bits[iTail + iHead * m_nNodes] * represents arc iTail->iHead */ boolean[] m_bits; /** score of represented network structure **/ double m_fScore = 0.0f; /** * return score of represented network structure * * @return the score */ public double getScore() { return m_fScore; } // getScore /** * c'tor * * @param nNodes the number of nodes */ BayesNetRepresentation(int nNodes) { m_nNodes = nNodes; } // c'tor /** * initialize with a random structure by randomly placing m_nNodes arcs. */ public void randomInit() { do { m_bits = new boolean[m_nNodes * m_nNodes]; for (int i = 0; i < m_nNodes; i++) { int iPos; do { iPos = m_random.nextInt(m_nNodes * m_nNodes); } while (isSquare(iPos)); m_bits[iPos] = true; } } while (hasCycles()); calcGlobalScore(); } /** * calculate score of current network representation As a side effect, the * parent sets are set */ void calcGlobalScore() { // clear current network for (int iNode = 0; iNode < m_nNodes; iNode++) { ParentSet parentSet = m_BayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(m_BayesNet.m_Instances); } } // insert arrows for (int iNode = 0; iNode < m_nNodes; iNode++) { ParentSet parentSet = m_BayesNet.getParentSet(iNode); for (int iNode2 = 0; iNode2 < m_nNodes; iNode2++) { if (m_bits[iNode2 + iNode * m_nNodes]) { parentSet.addParent(iNode2, m_BayesNet.m_Instances); } } } // calc score try { m_fScore = calcScore(m_BayesNet); } catch (Exception e) { // ignore } } // calcScore /** * check whether there are cycles in the network * * @return true if a cycle is found, false otherwise */ public boolean hasCycles() { // check for cycles boolean[] bDone = new boolean[m_nNodes]; for (int iNode = 0; iNode < m_nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < m_nNodes; iNode2++) { if (!bDone[iNode2]) { boolean bHasNoParents = true; for (int iParent = 0; iParent < m_nNodes; iParent++) { if (m_bits[iParent + iNode2 * m_nNodes] && !bDone[iParent]) { bHasNoParents = false; } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { return true; } } return false; } // hasCycles /** * create clone of current object * * @return cloned object */ BayesNetRepresentation copy() { BayesNetRepresentation b = new BayesNetRepresentation(m_nNodes); b.m_bits = new boolean[m_bits.length]; for (int i = 0; i < m_nNodes * m_nNodes; i++) { b.m_bits[i] = m_bits[i]; } b.m_fScore = m_fScore; return b; } // copy /** * Apply mutation operation to BayesNet Calculate score and as a side effect * sets BayesNet parent sets. */ void mutate() { // flip a bit do { int iBit; do { iBit = m_random.nextInt(m_nNodes * m_nNodes); } while (isSquare(iBit)); m_bits[iBit] = !m_bits[iBit]; } while (hasCycles()); calcGlobalScore(); } // mutate /** * Apply cross-over operation to BayesNet Calculate score and as a side * effect sets BayesNet parent sets. * * @param other BayesNetRepresentation to cross over with */ void crossOver(BayesNetRepresentation other) { boolean[] bits = new boolean[m_bits.length]; for (int i = 0; i < m_bits.length; i++) { bits[i] = m_bits[i]; } int iCrossOverPoint = m_bits.length; do { // restore to original state for (int i = iCrossOverPoint; i < m_bits.length; i++) { m_bits[i] = bits[i]; } // take all bits from cross-over points onwards iCrossOverPoint = m_random.nextInt(m_bits.length); for (int i = iCrossOverPoint; i < m_bits.length; i++) { m_bits[i] = other.m_bits[i]; } } while (hasCycles()); calcGlobalScore(); } // crossOver /** * check if number is square and initialize g_bIsSquare structure if * necessary * * @param nNum number to check (should be below m_nNodes * m_nNodes) * @return true if number is square */ boolean isSquare(int nNum) { if (g_bIsSquare == null || g_bIsSquare.length < nNum) { g_bIsSquare = new boolean[m_nNodes * m_nNodes]; for (int i = 0; i < m_nNodes; i++) { g_bIsSquare[i * m_nNodes + i] = true; } } return g_bIsSquare[nNum]; } // isSquare /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class BayesNetRepresentation /** * search determines the network structure/graph of the network with a genetic * search algorithm. * * @param bayesNet the network to search * @param instances the instances to use * @throws Exception if population size doesn fit or neither cross-over or * mutation was chosen */ @Override protected void search(BayesNet bayesNet, Instances instances) throws Exception { // sanity check if (getDescendantPopulationSize() < getPopulationSize()) { throw new Exception( "Descendant PopulationSize should be at least Population Size"); } if (!getUseCrossOver() && !getUseMutation()) { throw new Exception( "At least one of mutation or cross-over should be used"); } m_random = new Random(m_nSeed); // keeps track of best structure found so far BayesNet bestBayesNet; // keeps track of score pf best structure found so far double fBestScore = calcScore(bayesNet); // initialize bestBayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // initialize population BayesNetRepresentation[] population = new BayesNetRepresentation[getPopulationSize()]; for (int i = 0; i < getPopulationSize(); i++) { population[i] = new BayesNetRepresentation(instances.numAttributes()); population[i].randomInit(); if (population[i].getScore() > fBestScore) { copyParentSets(bestBayesNet, bayesNet); fBestScore = population[i].getScore(); } } // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { // create descendants BayesNetRepresentation[] descendantPopulation = new BayesNetRepresentation[getDescendantPopulationSize()]; for (int i = 0; i < getDescendantPopulationSize(); i++) { descendantPopulation[i] = population[m_random .nextInt(getPopulationSize())].copy(); if (getUseMutation()) { if (getUseCrossOver() && m_random.nextBoolean()) { descendantPopulation[i].crossOver(population[m_random .nextInt(getPopulationSize())]); } else { descendantPopulation[i].mutate(); } } else { // use crossover descendantPopulation[i].crossOver(population[m_random .nextInt(getPopulationSize())]); } if (descendantPopulation[i].getScore() > fBestScore) { copyParentSets(bestBayesNet, bayesNet); fBestScore = descendantPopulation[i].getScore(); } } // select new population boolean[] bSelected = new boolean[getDescendantPopulationSize()]; for (int i = 0; i < getPopulationSize(); i++) { int iSelected = 0; if (m_bUseTournamentSelection) { // use tournament selection iSelected = m_random.nextInt(getDescendantPopulationSize()); while (bSelected[iSelected]) { iSelected = (iSelected + 1) % getDescendantPopulationSize(); } int iSelected2 = m_random.nextInt(getDescendantPopulationSize()); while (bSelected[iSelected2]) { iSelected2 = (iSelected2 + 1) % getDescendantPopulationSize(); } if (descendantPopulation[iSelected2].getScore() > descendantPopulation[iSelected] .getScore()) { iSelected = iSelected2; } } else { // find best scoring network in population while (bSelected[iSelected]) { iSelected++; } double fScore = descendantPopulation[iSelected].getScore(); for (int j = 0; j < getDescendantPopulationSize(); j++) { if (!bSelected[j] && descendantPopulation[j].getScore() > fScore) { fScore = descendantPopulation[j].getScore(); iSelected = j; } } } population[i] = descendantPopulation[iSelected]; bSelected[iSelected] = true; } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; g_bIsSquare = null; } // search /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(7); newVector .addElement(new Option("\tPopulation size", "L", 1, "-L <integer>")); newVector.addElement(new Option("\tDescendant population size", "A", 1, "-A <integer>")); newVector .addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tUse mutation.\n\t(default true)", "M", 0, "-M")); newVector.addElement(new Option("\tUse cross-over.\n\t(default true)", "C", 0, "-C")); newVector .addElement(new Option( "\tUse tournament selection (true) or maximum subpopulatin (false).\n\t(default false)", "O", 0, "-O")); newVector .addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Population size * </pre> * * <pre> * -A &lt;integer&gt; * Descendant population size * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -M * Use mutation. * (default true) * </pre> * * <pre> * -C * Use cross-over. * (default true) * </pre> * * <pre> * -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false) * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String sPopulationSize = Utils.getOption('L', options); if (sPopulationSize.length() != 0) { setPopulationSize(Integer.parseInt(sPopulationSize)); } String sDescendantPopulationSize = Utils.getOption('A', options); if (sDescendantPopulationSize.length() != 0) { setDescendantPopulationSize(Integer.parseInt(sDescendantPopulationSize)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } setUseMutation(Utils.getFlag('M', options)); setUseCrossOver(Utils.getFlag('C', options)); setUseTournamentSelection(Utils.getFlag('O', options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-L"); options.add("" + getPopulationSize()); options.add("-A"); options.add("" + getDescendantPopulationSize()); options.add("-U"); options.add("" + getRuns()); options.add("-R"); options.add("" + getSeed()); if (getUseMutation()) { options.add("-M"); } if (getUseCrossOver()) { options.add("-C"); } if (getUseTournamentSelection()) { options.add("-O"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * @return whether cross-over is used */ public boolean getUseCrossOver() { return m_bUseCrossOver; } /** * @return whether mutation is used */ public boolean getUseMutation() { return m_bUseMutation; } /** * @return descendant population size */ public int getDescendantPopulationSize() { return m_nDescendantPopulationSize; } /** * @return population size */ public int getPopulationSize() { return m_nPopulationSize; } /** * @param bUseCrossOver sets whether cross-over is used */ public void setUseCrossOver(boolean bUseCrossOver) { m_bUseCrossOver = bUseCrossOver; } /** * @param bUseMutation sets whether mutation is used */ public void setUseMutation(boolean bUseMutation) { m_bUseMutation = bUseMutation; } /** * @return whether Tournament Selection (true) or Maximum Sub-Population * (false) should be used */ public boolean getUseTournamentSelection() { return m_bUseTournamentSelection; } /** * @param bUseTournamentSelection sets whether Tournament Selection or Maximum * Sub-Population should be used */ public void setUseTournamentSelection(boolean bUseTournamentSelection) { m_bUseTournamentSelection = bUseTournamentSelection; } /** * @param iDescendantPopulationSize sets descendant population size */ public void setDescendantPopulationSize(int iDescendantPopulationSize) { m_nDescendantPopulationSize = iDescendantPopulationSize; } /** * @param iPopulationSize sets population size */ public void setPopulationSize(int iPopulationSize) { m_nPopulationSize = iPopulationSize; } /** * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses genetic search for finding a well scoring " + "Bayes network structure. Genetic search works by having a population of Bayes network structures " + "and allow them to mutate and apply cross over to get offspring. The best network structure " + "found during the process is returned."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of generations of Bayes network structure populations."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * @return a string to describe the Population Size option. */ public String populationSizeTipText() { return "Sets the size of the population of network structures that is selected each generation."; } // populationSizeTipText /** * @return a string to describe the Descendant Population Size option. */ public String descendantPopulationSizeTipText() { return "Sets the size of the population of descendants that is created each generation."; } // descendantPopulationSizeTipText /** * @return a string to describe the Use Mutation option. */ public String useMutationTipText() { return "Determines whether mutation is allowed. Mutation flips a bit in the bit " + "representation of the network structure. At least one of mutation or cross-over " + "should be used."; } // useMutationTipText /** * @return a string to describe the Use Cross-Over option. */ public String useCrossOverTipText() { return "Determines whether cross-over is allowed. Cross over combined the bit " + "representations of network structure by taking a random first k bits of one" + "and adding the remainder of the other. At least one of mutation or cross-over " + "should be used."; } // useCrossOverTipText /** * @return a string to describe the Use Tournament Selection option. */ public String useTournamentSelectionTipText() { return "Determines the method of selecting a population. When set to true, tournament " + "selection is used (pick two at random and the highest is allowed to continue). " + "When set to false, the top scoring network structures are selected."; } // useTournamentSelectionTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // GeneticSearch
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/GlobalScoreSearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GlobalScoreSearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses cross * validation to estimate classification accuracy. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class GlobalScoreSearchAlgorithm extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = 7341389867906199781L; /** points to Bayes network for which a structure is searched for **/ BayesNet m_BayesNet; /** * toggle between scoring using accuracy = 0-1 loss (when false) or class * probabilities (when true) **/ boolean m_bUseProb = true; /** number of folds for k-fold cross validation **/ int m_nNrOfFolds = 10; /** constant for score type: LOO-CV */ final static int LOOCV = 0; /** constant for score type: k-fold-CV */ final static int KFOLDCV = 1; /** constant for score type: Cumulative-CV */ final static int CUMCV = 2; /** the score types **/ public static final Tag[] TAGS_CV_TYPE = { new Tag(LOOCV, "LOO-CV"), new Tag(KFOLDCV, "k-Fold-CV"), new Tag(CUMCV, "Cumulative-CV") }; /** * Holds the cross validation strategy used to measure quality of network */ int m_nCVType = LOOCV; /** * performCV returns the accuracy calculated using cross validation. The * dataset used is m_Instances associated with the Bayes Network. * * @param bayesNet : Bayes Network containing structure to evaluate * @return accuracy (in interval 0..1) measured using cv. * @throws Exception whn m_nCVType is invalided + exceptions passed on by * updateClassifier */ public double calcScore(BayesNet bayesNet) throws Exception { switch (m_nCVType) { case LOOCV: return leaveOneOutCV(bayesNet); case CUMCV: return cumulativeCV(bayesNet); case KFOLDCV: return kFoldCV(bayesNet, m_nNrOfFolds); default: throw new Exception("Unrecognized cross validation type encountered: " + m_nCVType); } } // calcScore /** * Calc Node Score With Added Parent * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to add to the existing parent set * @return log score * @throws Exception if something goes wrong */ public double calcScoreWithExtraParent(int nNode, int nCandidateParent) throws Exception { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); Instances instances = m_BayesNet.m_Instances; // sanity check: nCandidateParent should not be in parent set already for (int iParent = 0; iParent < oParentSet.getNrOfParents(); iParent++) { if (oParentSet.getParent(iParent) == nCandidateParent) { return -1e100; } } // set up candidate parent oParentSet.addParent(nCandidateParent, instances); // calculate the score double fAccuracy = calcScore(m_BayesNet); // delete temporarily added parent oParentSet.deleteLastParent(instances); return fAccuracy; } // calcScoreWithExtraParent /** * Calc Node Score With Parent Deleted * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to delete from the existing parent * set * @return log score * @throws Exception if something goes wrong */ public double calcScoreWithMissingParent(int nNode, int nCandidateParent) throws Exception { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); Instances instances = m_BayesNet.m_Instances; // sanity check: nCandidateParent should be in parent set already if (!oParentSet.contains(nCandidateParent)) { return -1e100; } // set up candidate parent int iParent = oParentSet.deleteParent(nCandidateParent, instances); // calculate the score double fAccuracy = calcScore(m_BayesNet); // reinsert temporarily deleted parent oParentSet.addParent(nCandidateParent, iParent, instances); return fAccuracy; } // calcScoreWithMissingParent /** * Calc Node Score With Arrow reversed * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to delete from the existing parent * set * @return log score * @throws Exception if something goes wrong */ public double calcScoreWithReversedParent(int nNode, int nCandidateParent) throws Exception { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); ParentSet oParentSet2 = m_BayesNet.getParentSet(nCandidateParent); Instances instances = m_BayesNet.m_Instances; // sanity check: nCandidateParent should be in parent set already if (!oParentSet.contains(nCandidateParent)) { return -1e100; } // set up candidate parent int iParent = oParentSet.deleteParent(nCandidateParent, instances); oParentSet2.addParent(nNode, instances); // calculate the score double fAccuracy = calcScore(m_BayesNet); // restate temporarily reversed arrow oParentSet2.deleteLastParent(instances); oParentSet.addParent(nCandidateParent, iParent, instances); return fAccuracy; } // calcScoreWithReversedParent /** * LeaveOneOutCV returns the accuracy calculated using Leave One Out cross * validation. The dataset used is m_Instances associated with the Bayes * Network. * * @param bayesNet : Bayes Network containing structure to evaluate * @return accuracy (in interval 0..1) measured using leave one out cv. * @throws Exception passed on by updateClassifier */ public double leaveOneOutCV(BayesNet bayesNet) throws Exception { m_BayesNet = bayesNet; double fAccuracy = 0.0; double fWeight = 0.0; Instances instances = bayesNet.m_Instances; bayesNet.estimateCPTs(); for (int iInstance = 0; iInstance < instances.numInstances(); iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); fAccuracy += accuracyIncrease(instance); fWeight += instance.weight(); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); } return fAccuracy / fWeight; } // LeaveOneOutCV /** * CumulativeCV returns the accuracy calculated using cumulative cross * validation. The idea is to run through the data set and try to classify * each of the instances based on the previously seen data. The data set used * is m_Instances associated with the Bayes Network. * * @param bayesNet : Bayes Network containing structure to evaluate * @return accuracy (in interval 0..1) measured using leave one out cv. * @throws Exception passed on by updateClassifier */ public double cumulativeCV(BayesNet bayesNet) throws Exception { m_BayesNet = bayesNet; double fAccuracy = 0.0; double fWeight = 0.0; Instances instances = bayesNet.m_Instances; bayesNet.initCPTs(); for (int iInstance = 0; iInstance < instances.numInstances(); iInstance++) { Instance instance = instances.instance(iInstance); fAccuracy += accuracyIncrease(instance); bayesNet.updateClassifier(instance); fWeight += instance.weight(); } return fAccuracy / fWeight; } // LeaveOneOutCV /** * kFoldCV uses k-fold cross validation to measure the accuracy of a Bayes * network classifier. * * @param bayesNet : Bayes Network containing structure to evaluate * @param nNrOfFolds : the number of folds k to perform k-fold cv * @return accuracy (in interval 0..1) measured using leave one out cv. * @throws Exception passed on by updateClassifier */ public double kFoldCV(BayesNet bayesNet, int nNrOfFolds) throws Exception { m_BayesNet = bayesNet; double fAccuracy = 0.0; double fWeight = 0.0; Instances instances = bayesNet.m_Instances; // estimate CPTs based on complete data set bayesNet.estimateCPTs(); int nFoldStart = 0; int nFoldEnd = instances.numInstances() / nNrOfFolds; int iFold = 1; while (nFoldStart < instances.numInstances()) { // remove influence of fold iFold from the probability distribution for (int iInstance = nFoldStart; iInstance < nFoldEnd; iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); } // measure accuracy on fold iFold for (int iInstance = nFoldStart; iInstance < nFoldEnd; iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); fAccuracy += accuracyIncrease(instance); fWeight += instance.weight(); instance.setWeight(-instance.weight()); } // restore influence of fold iFold from the probability distribution for (int iInstance = nFoldStart; iInstance < nFoldEnd; iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); } // go to next fold nFoldStart = nFoldEnd; iFold++; nFoldEnd = iFold * instances.numInstances() / nNrOfFolds; } return fAccuracy / fWeight; } // kFoldCV /** * accuracyIncrease determines how much the accuracy estimate should be * increased due to the contribution of a single given instance. * * @param instance : instance for which to calculate the accuracy increase. * @return increase in accuracy due to given instance. * @throws Exception passed on by distributionForInstance and classifyInstance */ double accuracyIncrease(Instance instance) throws Exception { if (m_bUseProb) { double[] fProb = m_BayesNet.distributionForInstance(instance); return fProb[(int) instance.classValue()] * instance.weight(); } else { if (m_BayesNet.classifyInstance(instance) == instance.classValue()) { return instance.weight(); } } return 0; } // accuracyIncrease /** * @return use probabilities or not in accuracy estimate */ public boolean getUseProb() { return m_bUseProb; } // getUseProb /** * @param useProb : use probabilities or not in accuracy estimate */ public void setUseProb(boolean useProb) { m_bUseProb = useProb; } // setUseProb /** * set cross validation strategy to be used in searching for networks. * * @param newCVType : cross validation strategy */ public void setCVType(SelectedTag newCVType) { if (newCVType.getTags() == TAGS_CV_TYPE) { m_nCVType = newCVType.getSelectedTag().getID(); } } // setCVType /** * get cross validation strategy to be used in searching for networks. * * @return cross validation strategy */ public SelectedTag getCVType() { return new SelectedTag(m_nCVType, TAGS_CV_TYPE); } // getCVType /** * * @param bMarkovBlanketClassifier */ @Override public void setMarkovBlanketClassifier(boolean bMarkovBlanketClassifier) { super.setMarkovBlanketClassifier(bMarkovBlanketClassifier); } /** * * @return */ @Override public boolean getMarkovBlanketClassifier() { return super.getMarkovBlanketClassifier(); } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.addElement(new Option( "\tApplies a Markov Blanket correction to the network structure, \n" + "\tafter a network structure is learned. This ensures that all \n" + "\tnodes in the network are part of the Markov blanket of the \n" + "\tclassifier node.", "mbc", 0, "-mbc")); newVector.addElement(new Option( "\tScore type (LOO-CV,k-Fold-CV,Cumulative-CV)", "S", 1, "-S [LOO-CV|k-Fold-CV|Cumulative-CV]")); newVector.addElement(new Option( "\tUse probabilistic or 0/1 scoring.\n\t(default probabilistic scoring)", "Q", 0, "-Q")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setMarkovBlanketClassifier(Utils.getFlag("mbc", options)); String sScore = Utils.getOption('S', options); if (sScore.compareTo("LOO-CV") == 0) { setCVType(new SelectedTag(LOOCV, TAGS_CV_TYPE)); } if (sScore.compareTo("k-Fold-CV") == 0) { setCVType(new SelectedTag(KFOLDCV, TAGS_CV_TYPE)); } if (sScore.compareTo("Cumulative-CV") == 0) { setCVType(new SelectedTag(CUMCV, TAGS_CV_TYPE)); } setUseProb(!Utils.getFlag('Q', options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (getMarkovBlanketClassifier()) { options.add("-mbc"); } options.add("-S"); switch (m_nCVType) { case (LOOCV): options.add("LOO-CV"); break; case (KFOLDCV): options.add("k-Fold-CV"); break; case (CUMCV): options.add("Cumulative-CV"); break; } if (!getUseProb()) { options.add("-Q"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * @return a string to describe the CVType option. */ public String CVTypeTipText() { return "Select cross validation strategy to be used in searching for networks." + "LOO-CV = Leave one out cross validation\n" + "k-Fold-CV = k fold cross validation\n" + "Cumulative-CV = cumulative cross validation."; } // CVTypeTipText /** * @return a string to describe the UseProb option. */ public String useProbTipText() { return "If set to true, the probability of the class if returned in the estimate of the " + "accuracy. If set to false, the accuracy estimate is only increased if the classifier returns " + "exactly the correct class."; } // useProbTipText /** * This will return a string describing the search algorithm. * * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses cross validation to estimate " + "classification accuracy."; } // globalInfo /** * @return a string to describe the MarkovBlanketClassifier option. */ @Override public String markovBlanketClassifierTipText() { return super.markovBlanketClassifierTipText(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/HillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.io.Serializable; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses a hill climbing algorithm adding, deleting and reversing arcs. The search is not restricted by an order on the variables (unlike K2). The difference with B and B2 is * that this hill climber also considers arrows part of the naive Bayes structure for deletion. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class HillClimber extends GlobalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = -3885042888195820149L; /** * the Operation class contains info on operations performed on the current Bayesian network. */ class Operation implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -2934970456587374967L; // constants indicating the type of an operation final static int OPERATION_ADD = 0; final static int OPERATION_DEL = 1; final static int OPERATION_REVERSE = 2; /** c'tor **/ public Operation() { } /** * c'tor + initializers * * @param nTail * @param nHead * @param nOperation */ public Operation(final int nTail, final int nHead, final int nOperation) { this.m_nHead = nHead; this.m_nTail = nTail; this.m_nOperation = nOperation; } /** * compare this operation with another * * @param other * operation to compare with * @return true if operation is the same */ public boolean equals(final Operation other) { if (other == null) { return false; } return ((this.m_nOperation == other.m_nOperation) && (this.m_nHead == other.m_nHead) && (this.m_nTail == other.m_nTail)); } // equals /** number of the tail node **/ public int m_nTail; /** number of the head node **/ public int m_nHead; /** type of operation (ADD, DEL, REVERSE) **/ public int m_nOperation; /** change of score due to this operation **/ public double m_fScore = -1E100; /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class Operation /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** * search determines the network structure/graph of the network with the Taby algorithm. * * @param bayesNet * the network to search * @param instances * the instances to work with * @throws Exception * if something goes wrong */ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { this.m_BayesNet = bayesNet; double fScore = this.calcScore(bayesNet); // go do the search Operation oOperation = this.getOptimalOperation(bayesNet, instances); while ((oOperation != null) && (oOperation.m_fScore > fScore)) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } this.performOperation(bayesNet, instances, oOperation); fScore = oOperation.m_fScore; oOperation = this.getOptimalOperation(bayesNet, instances); } } // search /** * check whether the operation is not in the forbidden. For base hill climber, there are no restrictions on operations, so we always return true. * * @param oOperation * operation to be checked * @return true if operation is not in the tabu list */ boolean isNotTabu(final Operation oOperation) { return true; } // isNotTabu /** * getOptimalOperation finds the optimal operation that can be performed on the Bayes network that is not in the tabu list. * * @param bayesNet * Bayes network to apply operation on * @param instances * data set to learn from * @return optimal operation found * @throws Exception * if something goes wrong */ Operation getOptimalOperation(final BayesNet bayesNet, final Instances instances) throws Exception { Operation oBestOperation = new Operation(); // Add??? oBestOperation = this.findBestArcToAdd(bayesNet, instances, oBestOperation); // Delete??? oBestOperation = this.findBestArcToDelete(bayesNet, instances, oBestOperation); // Reverse??? if (this.getUseArcReversal()) { oBestOperation = this.findBestArcToReverse(bayesNet, instances, oBestOperation); } // did we find something? if (oBestOperation.m_fScore == -1E100) { return null; } return oBestOperation; } // getOptimalOperation /** * performOperation applies an operation on the Bayes network and update the cache. * * @param bayesNet * Bayes network to apply operation on * @param instances * data set to learn from * @param oOperation * operation to perform * @throws Exception * if something goes wrong */ void performOperation(final BayesNet bayesNet, final Instances instances, final Operation oOperation) throws Exception { // perform operation switch (oOperation.m_nOperation) { case Operation.OPERATION_ADD: this.applyArcAddition(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Add " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_DEL: this.applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Del " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_REVERSE: this.applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); this.applyArcAddition(bayesNet, oOperation.m_nTail, oOperation.m_nHead, instances); if (bayesNet.getDebug()) { System.out.print("Rev " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; } } // performOperation /** * * @param bayesNet * @param iHead * @param iTail * @param instances */ void applyArcAddition(final BayesNet bayesNet, final int iHead, final int iTail, final Instances instances) { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.addParent(iTail, instances); } // applyArcAddition /** * * @param bayesNet * @param iHead * @param iTail * @param instances */ void applyArcDeletion(final BayesNet bayesNet, final int iHead, final int iTail, final Instances instances) { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.deleteParent(iTail, instances); } // applyArcAddition /** * find best (or least bad) arc addition operation * * @param bayesNet * Bayes network to add arc to * @param instances * data set * @param oBestOperation * @return Operation containing best arc to add, or null if no arc addition is allowed (this can happen if any arc addition introduces a cycle, or all parent sets are filled up to the maximum nr of parents). * @throws Exception * if something goes wrong */ Operation findBestArcToAdd(final BayesNet bayesNet, final Instances instances, Operation oBestOperation) throws Exception { int nNrOfAtts = instances.numAttributes(); // find best arc to add for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { if (bayesNet.getParentSet(iAttributeHead).getNrOfParents() < this.m_nMaxNrOfParents) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (this.addArcMakesSense(bayesNet, instances, iAttributeHead, iAttributeTail)) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); double fScore = this.calcScoreWithExtraParent(oOperation.m_nHead, oOperation.m_nTail); if (fScore > oBestOperation.m_fScore) { if (this.isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fScore = fScore; } } } } } } return oBestOperation; } // findBestArcToAdd /** * find best (or least bad) arc deletion operation * * @param bayesNet * Bayes network to delete arc from * @param instances * data set * @param oBestOperation * @return Operation containing best arc to delete, or null if no deletion can be made (happens when there is no arc in the network yet). * @throws Exception * of something goes wrong */ Operation findBestArcToDelete(final BayesNet bayesNet, final Instances instances, Operation oBestOperation) throws Exception { int nNrOfAtts = instances.numAttributes(); // find best arc to delete for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_DEL); double fScore = this.calcScoreWithMissingParent(oOperation.m_nHead, oOperation.m_nTail); if (fScore > oBestOperation.m_fScore) { if (this.isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fScore = fScore; } } } } return oBestOperation; } // findBestArcToDelete /** * find best (or least bad) arc reversal operation * * @param bayesNet * Bayes network to reverse arc in * @param instances * data set * @param oBestOperation * @return Operation containing best arc to reverse, or null if no reversal is allowed (happens if there is no arc in the network yet, or when any such reversal introduces a cycle). * @throws Exception * if something goes wrong */ Operation findBestArcToReverse(final BayesNet bayesNet, final Instances instances, Operation oBestOperation) throws Exception { int nNrOfAtts = instances.numAttributes(); // find best arc to reverse for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { int iTail = parentSet.getParent(iParent); // is reversal allowed? if (this.reverseArcMakesSense(bayesNet, instances, iNode, iTail) && bayesNet.getParentSet(iTail).getNrOfParents() < this.m_nMaxNrOfParents) { // go check if reversal results in the best step forward Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_REVERSE); double fScore = this.calcScoreWithReversedParent(oOperation.m_nHead, oOperation.m_nTail); if (fScore > oBestOperation.m_fScore) { if (this.isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fScore = fScore; } } } } } return oBestOperation; } // findBestArcToReverse /** * Sets the max number of parents * * @param nMaxNrOfParents * the max number of parents */ public void setMaxNrOfParents(final int nMaxNrOfParents) { this.m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return this.m_nMaxNrOfParents; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setUseArcReversal(Utils.getFlag('R', options)); this.setInitAsNaiveBayes(!(Utils.getFlag('N', options))); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { this.setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { this.setMaxNrOfParents(100000); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (this.getUseArcReversal()) { options.add("-R"); } if (!this.getInitAsNaiveBayes()) { options.add("-N"); } options.add("-P"); options.add("" + this.m_nMaxNrOfParents); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes * whether to init as naive bayes */ public void setInitAsNaiveBayes(final boolean bInitAsNaiveBayes) { this.m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return this.m_bInitAsNaiveBayes; } /** * get use the arc reversal operation * * @return whether the arc reversal operation should be used */ public boolean getUseArcReversal() { return this.m_bUseArcReversal; } // getUseArcReversal /** * set use the arc reversal operation * * @param bUseArcReversal * whether the arc reversal operation should be used */ public void setUseArcReversal(final boolean bUseArcReversal) { this.m_bUseArcReversal = bUseArcReversal; } // setUseArcReversal /** * This will return a string describing the search algorithm. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "adding, deleting and reversing arcs. The search is not restricted by an order " + "on the variables (unlike K2). The difference with B and B2 is that this hill " + "climber also considers arrows part of the naive Bayes structure for deletion."; } // globalInfo /** * @return a string to describe the Use Arc Reversal option. */ public String useArcReversalTipText() { return "When set to true, the arc reversal operation is used in the search."; } // useArcReversalTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // HillClimber
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/K2.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * K2.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses a hill climbing algorithm restricted by an order on the variables.<br/> * <br/> * For more information see:<br/> * <br/> * G.F. Cooper, E. Herskovits (1990). A Bayesian method for constructing Bayesian belief networks from databases.<br/> * <br/> * G. Cooper, E. Herskovits (1992). A Bayesian method for the induction of probabilistic networks from data. Machine Learning. 9(4):309-347.<br/> * <br/> * Works with nominal variables and no missing values only. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;proceedings{Cooper1990, * author = {G.F. Cooper and E. Herskovits}, * booktitle = {Proceedings of the Conference on Uncertainty in AI}, * pages = {86-94}, * title = {A Bayesian method for constructing Bayesian belief networks from databases}, * year = {1990} * } * * &#64;article{Cooper1992, * author = {G. Cooper and E. Herskovits}, * journal = {Machine Learning}, * number = {4}, * pages = {309-347}, * title = {A Bayesian method for the induction of probabilistic networks from data}, * volume = {9}, * year = {1992} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Random order. * (default false) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class K2 extends GlobalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6626871067466338256L; /** Holds flag to indicate ordering should be random **/ boolean m_bRandomOrder = false; /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PROCEEDINGS); result.setValue(Field.AUTHOR, "G.F. Cooper and E. Herskovits"); result.setValue(Field.YEAR, "1990"); result.setValue(Field.TITLE, "A Bayesian method for constructing Bayesian belief networks from databases"); result.setValue(Field.BOOKTITLE, "Proceedings of the Conference on Uncertainty in AI"); result.setValue(Field.PAGES, "86-94"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "G. Cooper and E. Herskovits"); additional.setValue(Field.YEAR, "1992"); additional.setValue(Field.TITLE, "A Bayesian method for the induction of probabilistic networks from data"); additional.setValue(Field.JOURNAL, "Machine Learning"); additional.setValue(Field.VOLUME, "9"); additional.setValue(Field.NUMBER, "4"); additional.setValue(Field.PAGES, "309-347"); return result; } /** * search determines the network structure/graph of the network with the K2 algorithm, restricted by its initial structure (which can be an empty graph, or a Naive Bayes graph. * * @param bayesNet * the network * @param instances * the data to work with * @throws Exception * if something goes wrong */ @Override public void search(final BayesNet bayesNet, final Instances instances) throws Exception { int nOrder[] = new int[instances.numAttributes()]; nOrder[0] = instances.classIndex(); int nAttribute = 0; for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { if (nAttribute == instances.classIndex()) { nAttribute++; } nOrder[iOrder] = nAttribute++; } if (this.m_bRandomOrder) { // generate random ordering (if required) Random random = new Random(); int iClass; if (this.getInitAsNaiveBayes()) { iClass = 0; } else { iClass = -1; } for (int iOrder = 0; iOrder < instances.numAttributes(); iOrder++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } int iOrder2 = random.nextInt(instances.numAttributes()); if (iOrder != iClass && iOrder2 != iClass) { int nTmp = nOrder[iOrder]; nOrder[iOrder] = nOrder[iOrder2]; nOrder[iOrder2] = nTmp; } } } // determine base scores double fBaseScore = this.calcScore(bayesNet); // K2 algorithm: greedy search restricted by ordering for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } int iAttribute = nOrder[iOrder]; double fBestScore = fBaseScore; boolean bProgress = (bayesNet.getParentSet(iAttribute).getNrOfParents() < this.getMaxNrOfParents()); while (bProgress && (bayesNet.getParentSet(iAttribute).getNrOfParents() < this.getMaxNrOfParents())) { int nBestAttribute = -1; for (int iOrder2 = 0; iOrder2 < iOrder; iOrder2++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } int iAttribute2 = nOrder[iOrder2]; double fScore = this.calcScoreWithExtraParent(iAttribute, iAttribute2); if (fScore > fBestScore) { fBestScore = fScore; nBestAttribute = iAttribute2; } } if (nBestAttribute != -1) { bayesNet.getParentSet(iAttribute).addParent(nBestAttribute, instances); fBaseScore = fBestScore; bProgress = true; } else { bProgress = false; } } } } // search /** * Sets the max number of parents * * @param nMaxNrOfParents * the max number of parents */ public void setMaxNrOfParents(final int nMaxNrOfParents) { this.m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return this.m_nMaxNrOfParents; } /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes * whether to init as naive bayes */ public void setInitAsNaiveBayes(final boolean bInitAsNaiveBayes) { this.m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return this.m_bInitAsNaiveBayes; } /** * Set random order flag * * @param bRandomOrder * the random order flag */ public void setRandomOrder(final boolean bRandomOrder) { this.m_bRandomOrder = bRandomOrder; } // SetRandomOrder /** * Get random order flag * * @return the random order flag */ public boolean getRandomOrder() { return this.m_bRandomOrder; } // getRandomOrder /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(0); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tRandom order.\n" + "\t(default false)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Random order. * (default false) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setRandomOrder(Utils.getFlag('R', options)); this.m_bInitAsNaiveBayes = !(Utils.getFlag('N', options)); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { this.setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { this.setMaxNrOfParents(100000); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-P"); options.add("" + this.m_nMaxNrOfParents); if (!this.m_bInitAsNaiveBayes) { options.add("-N"); } if (this.getRandomOrder()) { options.add("-R"); } Collections.addAll(options, super.getOptions()); // Fill up rest with empty strings, not nulls! return options.toArray(new String[0]); } /** * @return a string to describe the RandomOrder option. */ public String randomOrderTipText() { return "When set to true, the order of the nodes in the network is random." + " Default random order is false and the order" + " of the nodes in the dataset is used." + " In any case, when the network was initialized as Naive Bayes Network, the" + " class variable is first in the ordering though."; } // randomOrderTipText /** * This will return a string describing the search algorithm. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "restricted by an order on the variables.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString() + "\n\n" + "Works with nominal variables and no missing values only."; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/RepeatedHillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RepeatedHillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm repeatedly uses hill climbing starting with a randomly generated network structure and return the best structure of the various runs. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -A &lt;seed&gt; * Random number seed * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class RepeatedHillClimber extends HillClimber { /** for serialization */ static final long serialVersionUID = -7359197180460703069L; /** number of runs **/ int m_nRuns = 10; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * search determines the network structure/graph of the network with the repeated hill climbing. * * @param bayesNet * the network to use * @param instances * the data to use * @throws Exception * if something goes wrong **/ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { this.m_random = new Random(this.getSeed()); // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = this.calcScore(bayesNet); // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); this.copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < this.m_nRuns; iRun++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } // generate random nework this.generateRandomNet(bayesNet, instances); // search super.search(bayesNet, instances); // calculate score fCurrentScore = this.calcScore(bayesNet); // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; this.copyParentSets(bestBayesNet, bayesNet); } } // restore current network to best network this.copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; } // search /** * * @param bayesNet * @param instances * @throws InterruptedException */ void generateRandomNet(final BayesNet bayesNet, final Instances instances) throws InterruptedException { int nNodes = instances.numAttributes(); // clear network for (int iNode = 0; iNode < nNodes; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(instances); } } // initialize as naive Bayes? if (this.getInitAsNaiveBayes()) { int iClass = instances.classIndex(); // initialize parent sets to have arrow from classifier node to // each of the other nodes for (int iNode = 0; iNode < nNodes; iNode++) { if (iNode != iClass) { bayesNet.getParentSet(iNode).addParent(iClass, instances); } } } // insert random arcs int nNrOfAttempts = this.m_random.nextInt(nNodes * nNodes); for (int iAttempt = 0; iAttempt < nNrOfAttempts; iAttempt++) { int iTail = this.m_random.nextInt(nNodes); int iHead = this.m_random.nextInt(nNodes); if (bayesNet.getParentSet(iHead).getNrOfParents() < this.getMaxNrOfParents() && this.addArcMakesSense(bayesNet, instances, iHead, iTail)) { bayesNet.getParentSet(iHead).addParent(iTail, instances); } } } // generateRandomNet /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest * destination network * @param source * source network */ void copyParentSets(final BayesNet dest, final BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * Returns the number of runs * * @return number of runs */ public int getRuns() { return this.m_nRuns; } // getRuns /** * Sets the number of runs * * @param nRuns * The number of runs to set */ public void setRuns(final int nRuns) { this.m_nRuns = nRuns; } // setRuns /** * Returns the random seed * * @return random number seed */ public int getSeed() { return this.m_nSeed; } // getSeed /** * Sets the random number seed * * @param nSeed * The number of the seed to set */ public void setSeed(final int nSeed) { this.m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tRandom number seed", "A", 1, "-A <seed>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -A &lt;seed&gt; * Random number seed * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { this.setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('A', options); if (sSeed.length() != 0) { this.setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-U"); options.add("" + this.getRuns()); options.add("-A"); options.add("" + this.getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm repeatedly uses hill climbing starting " + "with a randomly generated network structure and return the best structure of the " + "various runs."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of times hill climbing is performed."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/SimulatedAnnealing.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimulatedAnnealing.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses the * general purpose search method of simulated annealing to find a well scoring * network structure.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to * Inference. Utrecht, Netherlands. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A &lt;float&gt; * Start temperature * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -D &lt;float&gt; * Delta temperature * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class SimulatedAnnealing extends GlobalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -5482721887881010916L; /** start temperature **/ double m_fTStart = 10; /** change in temperature at every run **/ double m_fDelta = 0.999; /** number of runs **/ int m_nRuns = 10000; /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * * @param bayesNet the bayes net to use * @param instances the data to use * @throws Exception if something goes wrong */ @Override public void search(BayesNet bayesNet, Instances instances) throws Exception { m_random = new Random(m_nSeed); // determine base scores double fCurrentScore = calcScore(bayesNet); // keep track of best scoring network double fBestScore = fCurrentScore; BayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); double fTemp = m_fTStart; for (int iRun = 0; iRun < m_nRuns; iRun++) { boolean bRunSucces = false; double fDeltaScore = 0.0; while (!bRunSucces) { // pick two nodes at random int iTailNode = m_random.nextInt(instances.numAttributes()); int iHeadNode = m_random.nextInt(instances.numAttributes()); while (iTailNode == iHeadNode) { iHeadNode = m_random.nextInt(instances.numAttributes()); } if (isArc(bayesNet, iHeadNode, iTailNode)) { bRunSucces = true; // either try a delete bayesNet.getParentSet(iHeadNode).deleteParent(iTailNode, instances); double fScore = calcScore(bayesNet); fDeltaScore = fScore - fCurrentScore; // System.out.println("Try delete " + iTailNode + "->" + iHeadNode + // " dScore = " + fDeltaScore); if (fTemp * Math .log((Math.abs(m_random.nextInt()) % 10000) / 10000.0 + 1e-100) < fDeltaScore) { // System.out.println("success!!!"); fCurrentScore = fScore; } else { // roll back bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); } } else { // try to add an arc if (addArcMakesSense(bayesNet, instances, iHeadNode, iTailNode)) { bRunSucces = true; double fScore = calcScoreWithExtraParent(iHeadNode, iTailNode); fDeltaScore = fScore - fCurrentScore; // System.out.println("Try add " + iTailNode + "->" + iHeadNode + // " dScore = " + fDeltaScore); if (fTemp * Math .log((Math.abs(m_random.nextInt()) % 10000) / 10000.0 + 1e-100) < fDeltaScore) { // System.out.println("success!!!"); bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); fCurrentScore = fScore; } } } } if (fCurrentScore > fBestScore) { copyParentSets(bestBayesNet, bayesNet); } fTemp = fTemp * m_fDelta; } copyParentSets(bayesNet, bestBayesNet); } // buildStructure /** * CopyParentSets copies parent sets of source to dest BayesNet * * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return double */ public double getDelta() { return m_fDelta; } /** * @return double */ public double getTStart() { return m_fTStart; } /** * @return int */ public int getRuns() { return m_nRuns; } /** * Sets the m_fDelta. * * @param fDelta The m_fDelta to set */ public void setDelta(double fDelta) { m_fDelta = fDelta; } /** * Sets the m_fTStart. * * @param fTStart The m_fTStart to set */ public void setTStart(double fTStart) { m_fTStart = fTStart; } /** * Sets the m_nRuns. * * @param nRuns The m_nRuns to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } /** * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector .addElement(new Option("\tStart temperature", "A", 1, "-A <float>")); newVector .addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector .addElement(new Option("\tDelta temperature", "D", 1, "-D <float>")); newVector .addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A &lt;float&gt; * Start temperature * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -D &lt;float&gt; * Delta temperature * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String sTStart = Utils.getOption('A', options); if (sTStart.length() != 0) { setTStart(Double.parseDouble(sTStart)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sDelta = Utils.getOption('D', options); if (sDelta.length() != 0) { setDelta(Double.parseDouble(sDelta)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-A"); options.add("" + getTStart()); options.add("-U"); options.add("" + getRuns()); options.add("-D"); options.add("" + getDelta()); options.add("-R"); options.add("" + getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses the general purpose search method " + "of simulated annealing to find a well scoring network structure.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the TStart option. */ public String TStartTipText() { return "Sets the start temperature of the simulated annealing search. " + "The start temperature determines the probability that a step in the 'wrong' direction in the " + "search space is accepted. The higher the temperature, the higher the probability of acceptance."; } // TStartTipText /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of iterations to be performed by the simulated annealing search."; } // runsTipText /** * @return a string to describe the Delta option. */ public String deltaTipText() { return "Sets the factor with which the temperature (and thus the acceptance probability of " + "steps in the wrong direction in the search space) is decreased in each iteration."; } // deltaTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // SimulatedAnnealing
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/TAN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TAN.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm determines * the maximum weight spanning tree and returns a Naive Bayes network augmented * with a tree.<br/> * <br/> * For more information see:<br/> * <br/> * N. Friedman, D. Geiger, M. Goldszmidt (1997). Bayesian network classifiers. * Machine Learning. 29(2-3):131-163. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Friedman1997, * author = {N. Friedman and D. Geiger and M. Goldszmidt}, * journal = {Machine Learning}, * number = {2-3}, * pages = {131-163}, * title = {Bayesian network classifiers}, * volume = {29}, * year = {1997} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class TAN extends GlobalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 1715277053980895298L; /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result .setValue(Field.AUTHOR, "N. Friedman and D. Geiger and M. Goldszmidt"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.TITLE, "Bayesian network classifiers"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "29"); result.setValue(Field.NUMBER, "2-3"); result.setValue(Field.PAGES, "131-163"); return result; } /** * buildStructure determines the network structure/graph of the network using * the maximimum weight spanning tree algorithm of Chow and Liu * * @param bayesNet * @param instances * @throws Exception if something goes wrong */ @Override public void buildStructure(BayesNet bayesNet, Instances instances) throws Exception { m_BayesNet = bayesNet; m_bInitAsNaiveBayes = true; m_nMaxNrOfParents = 2; super.buildStructure(bayesNet, instances); int nNrOfAtts = instances.numAttributes(); // TAN greedy search (not restricted by ordering like K2) // 1. find strongest link // 2. find remaining links by adding strongest link to already // connected nodes // 3. assign direction to links int nClassNode = instances.classIndex(); int[] link1 = new int[nNrOfAtts - 1]; int[] link2 = new int[nNrOfAtts - 1]; boolean[] linked = new boolean[nNrOfAtts]; // 1. find strongest link int nBestLinkNode1 = -1; int nBestLinkNode2 = -1; double fBestDeltaScore = 0.0; int iLinkNode1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode)) { double fScore = calcScoreWithExtraParent(iLinkNode1, iLinkNode2); if ((nBestLinkNode1 == -1) || (fScore > fBestDeltaScore)) { fBestDeltaScore = fScore; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } } link1[0] = nBestLinkNode1; link2[0] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; // 2. find remaining links by adding strongest link to already // connected nodes for (int iLink = 1; iLink < nNrOfAtts - 2; iLink++) { nBestLinkNode1 = -1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode) && (linked[iLinkNode1] || linked[iLinkNode2]) && (!linked[iLinkNode1] || !linked[iLinkNode2])) { double fScore = calcScoreWithExtraParent(iLinkNode1, iLinkNode2); if ((nBestLinkNode1 == -1) || (fScore > fBestDeltaScore)) { fBestDeltaScore = fScore; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } } link1[iLink] = nBestLinkNode1; link2[iLink] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; } // System.out.println(); // for (int i = 0; i < 3; i++) { // System.out.println(link1[i] + " " + link2[i]); // } // 3. assign direction to links boolean[] hasParent = new boolean[nNrOfAtts]; for (int iLink = 0; iLink < nNrOfAtts - 2; iLink++) { if (!hasParent[link1[iLink]]) { bayesNet.getParentSet(link1[iLink]).addParent(link2[iLink], instances); hasParent[link1[iLink]] = true; } else { if (hasParent[link2[iLink]]) { throw new Exception("Bug condition found: too many arrows"); } bayesNet.getParentSet(link2[iLink]).addParent(link1[iLink], instances); hasParent[link2[iLink]] = true; } } } // buildStructure /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { return super.listOptions(); } // listOption /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { super.setOptions(options); } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { return super.getOptions(); } // getOptions /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm determines the maximum weight spanning tree " + "and returns a Naive Bayes network augmented with a tree.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // TAN
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/global/TabuSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TabuSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses tabu * search for finding a well scoring Bayes network structure. Tabu search is * hill climbing till an optimum is reached. The following step is the least * worst possible step. The last X steps are kept in a list and none of the * steps in this so called tabu list is considered in taking the next step. The * best network found in this traversal is returned.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to * Inference. Utrecht, Netherlands. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Tabu list length * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class TabuSearch extends HillClimber implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 1176705618756672292L; /** number of runs **/ int m_nRuns = 10; /** size of tabu list **/ int m_nTabuList = 5; /** the actual tabu list **/ Operation[] m_oTabuList = null; /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * search determines the network structure/graph of the network with the Tabu * search algorithm. * * @param bayesNet the network to use * @param instances the instances to use * @throws Exception if something goes wrong */ @Override protected void search(BayesNet bayesNet, Instances instances) throws Exception { m_oTabuList = new Operation[m_nTabuList]; int iCurrentTabuList = 0; // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = calcScore(bayesNet); // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { Operation oOperation = getOptimalOperation(bayesNet, instances); performOperation(bayesNet, instances, oOperation); // sanity check if (oOperation == null) { throw new Exception( "Panic: could not find any step to make. Tabu list too long?"); } // update tabu list m_oTabuList[iCurrentTabuList] = oOperation; iCurrentTabuList = (iCurrentTabuList + 1) % m_nTabuList; fCurrentScore += oOperation.m_fScore; // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; copyParentSets(bestBayesNet, bayesNet); } if (bayesNet.getDebug()) { printTabuList(); } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; } // search /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * check whether the operation is not in the tabu list * * @param oOperation operation to be checked * @return true if operation is not in the tabu list */ @Override boolean isNotTabu(Operation oOperation) { for (int iTabu = 0; iTabu < m_nTabuList; iTabu++) { if (oOperation.equals(m_oTabuList[iTabu])) { return false; } } return true; } // isNotTabu /** * print tabu list for debugging purposes. */ void printTabuList() { for (int i = 0; i < m_nTabuList; i++) { Operation o = m_oTabuList[i]; if (o != null) { if (o.m_nOperation == 0) { System.out.print(" +("); } else { System.out.print(" -("); } System.out.print(o.m_nTail + "->" + o.m_nHead + ")"); } } System.out.println(); } // printTabuList /** * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * @return the Tabu List length */ public int getTabuList() { return m_nTabuList; } // getTabuList /** * Sets the Tabu List length. * * @param nTabuList The nTabuList to set */ public void setTabuList(int nTabuList) { m_nTabuList = nTabuList; } // setTabuList /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tTabu list length", "L", 1, "-L <integer>")); newVector .addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option( "\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Tabu list length * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV) * </pre> * * <pre> * -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String sTabuList = Utils.getOption('L', options); if (sTabuList.length() != 0) { setTabuList(Integer.parseInt(sTabuList)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-L"); options.add("" + getTabuList()); options.add("-U"); options.add("" + getRuns()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses tabu search for finding a well scoring " + "Bayes network structure. Tabu search is hill climbing till an optimum is reached. The " + "following step is the least worst possible step. The last X steps are kept in a list and " + "none of the steps in this so called tabu list is considered in taking the next step. " + "The best network found in this traversal is returned.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of steps to be performed."; } // runsTipText /** * @return a string to describe the TabuList option. */ public String tabuListTipText() { return "Sets the length of the tabu list."; } // tabuListTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // TabuSearch
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/GeneticSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GeneticSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses genetic search for finding a well scoring Bayes network structure. Genetic search works by having a population of Bayes network structures and allow them to mutate and * apply cross over to get offspring. The best network structure found during the process is returned. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Population size * </pre> * * <pre> * -A &lt;integer&gt; * Descendant population size * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -M * Use mutation. * (default true) * </pre> * * <pre> * -C * Use cross-over. * (default true) * </pre> * * <pre> * -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false) * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class GeneticSearch extends LocalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = -7037070678911459757L; /** number of runs **/ int m_nRuns = 10; /** size of population **/ int m_nPopulationSize = 10; /** size of descendant population **/ int m_nDescendantPopulationSize = 100; /** use cross-over? **/ boolean m_bUseCrossOver = true; /** use mutation? **/ boolean m_bUseMutation = true; /** use tournament selection or take best sub-population **/ boolean m_bUseTournamentSelection = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random = null; /** * used in BayesNetRepresentation for efficiently determining whether a number is square */ boolean[] g_bIsSquare; class BayesNetRepresentation implements RevisionHandler { /** number of nodes in network **/ int m_nNodes = 0; /** * bit representation of parent sets m_bits[iTail + iHead * m_nNodes] represents arc iTail->iHead */ boolean[] m_bits; /** score of represented network structure **/ double m_fScore = 0.0f; /** * return score of represented network structure * * @return the score */ public double getScore() { return this.m_fScore; } // getScore /** * c'tor * * @param nNodes * the number of nodes */ BayesNetRepresentation(final int nNodes) { this.m_nNodes = nNodes; } // c'tor /** * initialize with a random structure by randomly placing m_nNodes arcs. * * @throws InterruptedException */ public void randomInit() throws InterruptedException { do { this.m_bits = new boolean[this.m_nNodes * this.m_nNodes]; for (int i = 0; i < this.m_nNodes; i++) { int iPos; do { iPos = GeneticSearch.this.m_random.nextInt(this.m_nNodes * this.m_nNodes); } while (this.isSquare(iPos)); this.m_bits[iPos] = true; } } while (this.hasCycles()); this.calcScore(); } /** * calculate score of current network representation As a side effect, the parent sets are set * * @throws InterruptedException */ void calcScore() throws InterruptedException { // clear current network for (int iNode = 0; iNode < this.m_nNodes; iNode++) { ParentSet parentSet = GeneticSearch.this.m_BayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(GeneticSearch.this.m_BayesNet.m_Instances); } } // insert arrows for (int iNode = 0; iNode < this.m_nNodes; iNode++) { ParentSet parentSet = GeneticSearch.this.m_BayesNet.getParentSet(iNode); for (int iNode2 = 0; iNode2 < this.m_nNodes; iNode2++) { if (this.m_bits[iNode2 + iNode * this.m_nNodes]) { parentSet.addParent(iNode2, GeneticSearch.this.m_BayesNet.m_Instances); } } } // calc score this.m_fScore = 0.0; for (int iNode = 0; iNode < this.m_nNodes; iNode++) { this.m_fScore += GeneticSearch.this.calcNodeScore(iNode); } } // calcScore /** * check whether there are cycles in the network * * @return true if a cycle is found, false otherwise */ public boolean hasCycles() { // check for cycles boolean[] bDone = new boolean[this.m_nNodes]; for (int iNode = 0; iNode < this.m_nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < this.m_nNodes; iNode2++) { if (!bDone[iNode2]) { boolean bHasNoParents = true; for (int iParent = 0; iParent < this.m_nNodes; iParent++) { if (this.m_bits[iParent + iNode2 * this.m_nNodes] && !bDone[iParent]) { bHasNoParents = false; } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { return true; } } return false; } // hasCycles /** * create clone of current object * * @return cloned object */ BayesNetRepresentation copy() { BayesNetRepresentation b = new BayesNetRepresentation(this.m_nNodes); b.m_bits = new boolean[this.m_bits.length]; for (int i = 0; i < this.m_nNodes * this.m_nNodes; i++) { b.m_bits[i] = this.m_bits[i]; } b.m_fScore = this.m_fScore; return b; } // copy /** * Apply mutation operation to BayesNet Calculate score and as a side effect sets BayesNet parent sets. * * @throws InterruptedException */ void mutate() throws InterruptedException { // flip a bit do { int iBit; do { iBit = GeneticSearch.this.m_random.nextInt(this.m_nNodes * this.m_nNodes); } while (this.isSquare(iBit)); this.m_bits[iBit] = !this.m_bits[iBit]; } while (this.hasCycles()); this.calcScore(); } // mutate /** * Apply cross-over operation to BayesNet Calculate score and as a side effect sets BayesNet parent sets. * * @param other * BayesNetRepresentation to cross over with * @throws InterruptedException */ void crossOver(final BayesNetRepresentation other) throws InterruptedException { boolean[] bits = new boolean[this.m_bits.length]; for (int i = 0; i < this.m_bits.length; i++) { bits[i] = this.m_bits[i]; } int iCrossOverPoint = this.m_bits.length; do { // restore to original state for (int i = iCrossOverPoint; i < this.m_bits.length; i++) { this.m_bits[i] = bits[i]; } // take all bits from cross-over points onwards iCrossOverPoint = GeneticSearch.this.m_random.nextInt(this.m_bits.length); for (int i = iCrossOverPoint; i < this.m_bits.length; i++) { this.m_bits[i] = other.m_bits[i]; } } while (this.hasCycles()); this.calcScore(); } // crossOver /** * check if number is square and initialize g_bIsSquare structure if necessary * * @param nNum * number to check (should be below m_nNodes * m_nNodes) * @return true if number is square */ boolean isSquare(final int nNum) { if (GeneticSearch.this.g_bIsSquare == null || GeneticSearch.this.g_bIsSquare.length < nNum) { GeneticSearch.this.g_bIsSquare = new boolean[this.m_nNodes * this.m_nNodes]; for (int i = 0; i < this.m_nNodes; i++) { GeneticSearch.this.g_bIsSquare[i * this.m_nNodes + i] = true; } } return GeneticSearch.this.g_bIsSquare[nNum]; } // isSquare /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class BayesNetRepresentation /** * search determines the network structure/graph of the network with a genetic search algorithm. * * @param bayesNet * the network to use * @param instances * the data to use * @throws Exception * if population size doesn fit or neither cross-over or mutation was chosen */ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { // sanity check if (this.getDescendantPopulationSize() < this.getPopulationSize()) { throw new Exception("Descendant PopulationSize should be at least Population Size"); } if (!this.getUseCrossOver() && !this.getUseMutation()) { throw new Exception("At least one of mutation or cross-over should be used"); } this.m_random = new Random(this.m_nSeed); // keeps track of best structure found so far BayesNet bestBayesNet; // keeps track of score pf best structure found so far double fBestScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fBestScore += this.calcNodeScore(iAttribute); } // initialize bestBayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); this.copyParentSets(bestBayesNet, bayesNet); // initialize population BayesNetRepresentation[] population = new BayesNetRepresentation[this.getPopulationSize()]; for (int i = 0; i < this.getPopulationSize(); i++) { population[i] = new BayesNetRepresentation(instances.numAttributes()); population[i].randomInit(); if (population[i].getScore() > fBestScore) { this.copyParentSets(bestBayesNet, bayesNet); fBestScore = population[i].getScore(); } } // go do the search for (int iRun = 0; iRun < this.m_nRuns; iRun++) { // create descendants BayesNetRepresentation[] descendantPopulation = new BayesNetRepresentation[this.getDescendantPopulationSize()]; for (int i = 0; i < this.getDescendantPopulationSize(); i++) { descendantPopulation[i] = population[this.m_random.nextInt(this.getPopulationSize())].copy(); if (this.getUseMutation()) { if (this.getUseCrossOver() && this.m_random.nextBoolean()) { descendantPopulation[i].crossOver(population[this.m_random.nextInt(this.getPopulationSize())]); } else { descendantPopulation[i].mutate(); } } else { // use crossover descendantPopulation[i].crossOver(population[this.m_random.nextInt(this.getPopulationSize())]); } if (descendantPopulation[i].getScore() > fBestScore) { this.copyParentSets(bestBayesNet, bayesNet); fBestScore = descendantPopulation[i].getScore(); } } // select new population boolean[] bSelected = new boolean[this.getDescendantPopulationSize()]; for (int i = 0; i < this.getPopulationSize(); i++) { int iSelected = 0; if (this.m_bUseTournamentSelection) { // use tournament selection iSelected = this.m_random.nextInt(this.getDescendantPopulationSize()); while (bSelected[iSelected]) { iSelected = (iSelected + 1) % this.getDescendantPopulationSize(); } int iSelected2 = this.m_random.nextInt(this.getDescendantPopulationSize()); while (bSelected[iSelected2]) { iSelected2 = (iSelected2 + 1) % this.getDescendantPopulationSize(); } if (descendantPopulation[iSelected2].getScore() > descendantPopulation[iSelected].getScore()) { iSelected = iSelected2; } } else { // find best scoring network in population while (bSelected[iSelected]) { iSelected++; } double fScore = descendantPopulation[iSelected].getScore(); for (int j = 0; j < this.getDescendantPopulationSize(); j++) { if (!bSelected[j] && descendantPopulation[j].getScore() > fScore) { fScore = descendantPopulation[j].getScore(); iSelected = j; } } } population[i] = descendantPopulation[iSelected]; bSelected[iSelected] = true; } } // restore current network to best network this.copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; this.g_bIsSquare = null; } // search /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest * destination network * @param source * source network */ void copyParentSets(final BayesNet dest, final BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return number of runs */ public int getRuns() { return this.m_nRuns; } // getRuns /** * Sets the number of runs * * @param nRuns * The number of runs to set */ public void setRuns(final int nRuns) { this.m_nRuns = nRuns; } // setRuns /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(7); newVector.addElement(new Option("\tPopulation size", "L", 1, "-L <integer>")); newVector.addElement(new Option("\tDescendant population size", "A", 1, "-A <integer>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tUse mutation.\n\t(default true)", "M", 0, "-M")); newVector.addElement(new Option("\tUse cross-over.\n\t(default true)", "C", 0, "-C")); newVector.addElement(new Option("\tUse tournament selection (true) or maximum subpopulatin (false).\n\t(default false)", "O", 0, "-O")); newVector.addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Population size * </pre> * * <pre> * -A &lt;integer&gt; * Descendant population size * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -M * Use mutation. * (default true) * </pre> * * <pre> * -C * Use cross-over. * (default true) * </pre> * * <pre> * -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false) * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String sPopulationSize = Utils.getOption('L', options); if (sPopulationSize.length() != 0) { this.setPopulationSize(Integer.parseInt(sPopulationSize)); } String sDescendantPopulationSize = Utils.getOption('A', options); if (sDescendantPopulationSize.length() != 0) { this.setDescendantPopulationSize(Integer.parseInt(sDescendantPopulationSize)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { this.setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { this.setSeed(Integer.parseInt(sSeed)); } this.setUseMutation(Utils.getFlag('M', options)); this.setUseCrossOver(Utils.getFlag('C', options)); this.setUseTournamentSelection(Utils.getFlag('O', options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-L"); options.add("" + this.getPopulationSize()); options.add("-A"); options.add("" + this.getDescendantPopulationSize()); options.add("-U"); options.add("" + this.getRuns()); options.add("-R"); options.add("" + this.getSeed()); if (this.getUseMutation()) { options.add("-M"); } if (this.getUseCrossOver()) { options.add("-C"); } if (this.getUseTournamentSelection()) { options.add("-O"); } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * @return whether cross-over is used */ public boolean getUseCrossOver() { return this.m_bUseCrossOver; } /** * @return whether mutation is used */ public boolean getUseMutation() { return this.m_bUseMutation; } /** * @return descendant population size */ public int getDescendantPopulationSize() { return this.m_nDescendantPopulationSize; } /** * @return population size */ public int getPopulationSize() { return this.m_nPopulationSize; } /** * @param bUseCrossOver * sets whether cross-over is used */ public void setUseCrossOver(final boolean bUseCrossOver) { this.m_bUseCrossOver = bUseCrossOver; } /** * @param bUseMutation * sets whether mutation is used */ public void setUseMutation(final boolean bUseMutation) { this.m_bUseMutation = bUseMutation; } /** * @return whether Tournament Selection (true) or Maximum Sub-Population (false) should be used */ public boolean getUseTournamentSelection() { return this.m_bUseTournamentSelection; } /** * @param bUseTournamentSelection * sets whether Tournament Selection or Maximum Sub-Population should be used */ public void setUseTournamentSelection(final boolean bUseTournamentSelection) { this.m_bUseTournamentSelection = bUseTournamentSelection; } /** * @param iDescendantPopulationSize * sets descendant population size */ public void setDescendantPopulationSize(final int iDescendantPopulationSize) { this.m_nDescendantPopulationSize = iDescendantPopulationSize; } /** * @param iPopulationSize * sets population size */ public void setPopulationSize(final int iPopulationSize) { this.m_nPopulationSize = iPopulationSize; } /** * @return random number seed */ public int getSeed() { return this.m_nSeed; } // getSeed /** * Sets the random number seed * * @param nSeed * The number of the seed to set */ public void setSeed(final int nSeed) { this.m_nSeed = nSeed; } // setSeed /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses genetic search for finding a well scoring " + "Bayes network structure. Genetic search works by having a population of Bayes network structures " + "and allow them to mutate and apply cross over to get offspring. The best network structure " + "found during the process is returned."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of generations of Bayes network structure populations."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * @return a string to describe the Population Size option. */ public String populationSizeTipText() { return "Sets the size of the population of network structures that is selected each generation."; } // populationSizeTipText /** * @return a string to describe the Descendant Population Size option. */ public String descendantPopulationSizeTipText() { return "Sets the size of the population of descendants that is created each generation."; } // descendantPopulationSizeTipText /** * @return a string to describe the Use Mutation option. */ public String useMutationTipText() { return "Determines whether mutation is allowed. Mutation flips a bit in the bit " + "representation of the network structure. At least one of mutation or cross-over " + "should be used."; } // useMutationTipText /** * @return a string to describe the Use Cross-Over option. */ public String useCrossOverTipText() { return "Determines whether cross-over is allowed. Cross over combined the bit " + "representations of network structure by taking a random first k bits of one" + "and adding the remainder of the other. At least one of mutation or cross-over " + "should be used."; } // useCrossOverTipText /** * @return a string to describe the Use Tournament Selection option. */ public String useTournamentSelectionTipText() { return "Determines the method of selecting a population. When set to true, tournament " + "selection is used (pick two at random and the highest is allowed to continue). " + "When set to false, the top scoring network structures are selected."; } // useTournamentSelectionTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // GeneticSearch
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/HillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.io.Serializable; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses a hill climbing algorithm adding, deleting and reversing arcs. The search is not restricted by an order on the variables (unlike K2). The difference with B and B2 is * that this hill climber also considers arrows part of the naive Bayes structure for deletion. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class HillClimber extends LocalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = 4322783593818122403L; /** * the Operation class contains info on operations performed on the current Bayesian network. */ class Operation implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -4880888790432547895L; // constants indicating the type of an operation final static int OPERATION_ADD = 0; final static int OPERATION_DEL = 1; final static int OPERATION_REVERSE = 2; /** * c'tor */ public Operation() { } /** * c'tor + initializers * * @param nTail * @param nHead * @param nOperation */ public Operation(final int nTail, final int nHead, final int nOperation) { this.m_nHead = nHead; this.m_nTail = nTail; this.m_nOperation = nOperation; } /** * compare this operation with another * * @param other * operation to compare with * @return true if operation is the same */ public boolean equals(final Operation other) { if (other == null) { return false; } return ((this.m_nOperation == other.m_nOperation) && (this.m_nHead == other.m_nHead) && (this.m_nTail == other.m_nTail)); } // equals /** number of the tail node **/ public int m_nTail; /** number of the head node **/ public int m_nHead; /** type of operation (ADD, DEL, REVERSE) **/ public int m_nOperation; /** change of score due to this operation **/ public double m_fDeltaScore = -1E100; /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class Operation /** * cache for remembering the change in score for steps in the search space */ class Cache implements RevisionHandler { /** change in score due to adding an arc **/ double[][] m_fDeltaScoreAdd; /** change in score due to deleting an arc **/ double[][] m_fDeltaScoreDel; /** * c'tor * * @param nNrOfNodes * number of nodes in network, used to determine memory size to reserve */ Cache(final int nNrOfNodes) { this.m_fDeltaScoreAdd = new double[nNrOfNodes][nNrOfNodes]; this.m_fDeltaScoreDel = new double[nNrOfNodes][nNrOfNodes]; } /** * set cache entry * * @param oOperation * operation to perform * @param fValue * value to put in cache */ public void put(final Operation oOperation, final double fValue) { if (oOperation.m_nOperation == Operation.OPERATION_ADD) { this.m_fDeltaScoreAdd[oOperation.m_nTail][oOperation.m_nHead] = fValue; } else { this.m_fDeltaScoreDel[oOperation.m_nTail][oOperation.m_nHead] = fValue; } } // put /** * get cache entry * * @param oOperation * operation to perform * @return cache value */ public double get(final Operation oOperation) { switch (oOperation.m_nOperation) { case Operation.OPERATION_ADD: return this.m_fDeltaScoreAdd[oOperation.m_nTail][oOperation.m_nHead]; case Operation.OPERATION_DEL: return this.m_fDeltaScoreDel[oOperation.m_nTail][oOperation.m_nHead]; case Operation.OPERATION_REVERSE: return this.m_fDeltaScoreDel[oOperation.m_nTail][oOperation.m_nHead] + this.m_fDeltaScoreAdd[oOperation.m_nHead][oOperation.m_nTail]; } // should never get here return 0; } // get /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // class Cache /** cache for storing score differences **/ Cache m_Cache = null; /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** * search determines the network structure/graph of the network with the Taby algorithm. * * @param bayesNet * the network to use * @param instances * the data to use * @throws Exception * if something goes wrong */ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { this.initCache(bayesNet, instances); // go do the search Operation oOperation = this.getOptimalOperation(bayesNet, instances); while ((oOperation != null) && (oOperation.m_fDeltaScore > 0)) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } this.performOperation(bayesNet, instances, oOperation); oOperation = this.getOptimalOperation(bayesNet, instances); } // free up memory this.m_Cache = null; } // search /** * initCache initializes the cache * * @param bayesNet * Bayes network to be learned * @param instances * data set to learn from * @throws Exception * if something goes wrong */ void initCache(final BayesNet bayesNet, final Instances instances) throws Exception { // determine base scores double[] fBaseScores = new double[instances.numAttributes()]; int nNrOfAtts = instances.numAttributes(); this.m_Cache = new Cache(nNrOfAtts); for (int iAttribute = 0; iAttribute < nNrOfAtts; iAttribute++) { this.updateCache(iAttribute, nNrOfAtts, bayesNet.getParentSet(iAttribute)); } for (int iAttribute = 0; iAttribute < nNrOfAtts; iAttribute++) { fBaseScores[iAttribute] = this.calcNodeScore(iAttribute); } for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (iAttributeHead != iAttributeTail) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); this.m_Cache.put(oOperation, this.calcScoreWithExtraParent(iAttributeHead, iAttributeTail) - fBaseScores[iAttributeHead]); } } } } // initCache /** * check whether the operation is not in the forbidden. For base hill climber, there are no restrictions on operations, so we always return true. * * @param oOperation * operation to be checked * @return true if operation is not in the tabu list */ boolean isNotTabu(final Operation oOperation) { return true; } // isNotTabu /** * getOptimalOperation finds the optimal operation that can be performed on the Bayes network that is not in the tabu list. * * @param bayesNet * Bayes network to apply operation on * @param instances * data set to learn from * @return optimal operation found * @throws Exception * if something goes wrong */ Operation getOptimalOperation(final BayesNet bayesNet, final Instances instances) throws Exception { Operation oBestOperation = new Operation(); // Add??? oBestOperation = this.findBestArcToAdd(bayesNet, instances, oBestOperation); // Delete??? oBestOperation = this.findBestArcToDelete(bayesNet, instances, oBestOperation); // Reverse??? if (this.getUseArcReversal()) { oBestOperation = this.findBestArcToReverse(bayesNet, instances, oBestOperation); } // did we find something? if (oBestOperation.m_fDeltaScore == -1E100) { return null; } return oBestOperation; } // getOptimalOperation /** * performOperation applies an operation on the Bayes network and update the cache. * * @param bayesNet * Bayes network to apply operation on * @param instances * data set to learn from * @param oOperation * operation to perform * @throws Exception * if something goes wrong */ void performOperation(final BayesNet bayesNet, final Instances instances, final Operation oOperation) throws Exception { // perform operation switch (oOperation.m_nOperation) { case Operation.OPERATION_ADD: this.applyArcAddition(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Add " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_DEL: this.applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Del " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_REVERSE: this.applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); this.applyArcAddition(bayesNet, oOperation.m_nTail, oOperation.m_nHead, instances); if (bayesNet.getDebug()) { System.out.print("Rev " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; } } // performOperation /** * * @param bayesNet * @param iHead * @param iTail * @param instances * @throws InterruptedException */ void applyArcAddition(final BayesNet bayesNet, final int iHead, final int iTail, final Instances instances) throws InterruptedException { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.addParent(iTail, instances); this.updateCache(iHead, instances.numAttributes(), bestParentSet); } // applyArcAddition /** * * @param bayesNet * @param iHead * @param iTail * @param instances * @throws InterruptedException */ void applyArcDeletion(final BayesNet bayesNet, final int iHead, final int iTail, final Instances instances) throws InterruptedException { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.deleteParent(iTail, instances); this.updateCache(iHead, instances.numAttributes(), bestParentSet); } // applyArcAddition /** * find best (or least bad) arc addition operation * * @param bayesNet * Bayes network to add arc to * @param instances * data set * @param oBestOperation * @return Operation containing best arc to add, or null if no arc addition is allowed (this can happen if any arc addition introduces a cycle, or all parent sets are filled up to the maximum nr of parents). * @throws InterruptedException */ Operation findBestArcToAdd(final BayesNet bayesNet, final Instances instances, Operation oBestOperation) throws InterruptedException { int nNrOfAtts = instances.numAttributes(); // find best arc to add for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { if (bayesNet.getParentSet(iAttributeHead).getNrOfParents() < this.m_nMaxNrOfParents) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA"); } if (this.addArcMakesSense(bayesNet, instances, iAttributeHead, iAttributeTail)) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); if (this.m_Cache.get(oOperation) > oBestOperation.m_fDeltaScore) { if (this.isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fDeltaScore = this.m_Cache.get(oOperation); } } } } } } return oBestOperation; } // findBestArcToAdd /** * find best (or least bad) arc deletion operation * * @param bayesNet * Bayes network to delete arc from * @param instances * data set * @param oBestOperation * @return Operation containing best arc to delete, or null if no deletion can be made (happens when there is no arc in the network yet). */ Operation findBestArcToDelete(final BayesNet bayesNet, final Instances instances, Operation oBestOperation) { int nNrOfAtts = instances.numAttributes(); // find best arc to delete for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_DEL); if (this.m_Cache.get(oOperation) > oBestOperation.m_fDeltaScore) { if (this.isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fDeltaScore = this.m_Cache.get(oOperation); } } } } return oBestOperation; } // findBestArcToDelete /** * find best (or least bad) arc reversal operation * * @param bayesNet * Bayes network to reverse arc in * @param instances * data set * @param oBestOperation * @return Operation containing best arc to reverse, or null if no reversal is allowed (happens if there is no arc in the network yet, or when any such reversal introduces a cycle). */ Operation findBestArcToReverse(final BayesNet bayesNet, final Instances instances, Operation oBestOperation) { int nNrOfAtts = instances.numAttributes(); // find best arc to reverse for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { int iTail = parentSet.getParent(iParent); // is reversal allowed? if (this.reverseArcMakesSense(bayesNet, instances, iNode, iTail) && bayesNet.getParentSet(iTail).getNrOfParents() < this.m_nMaxNrOfParents) { // go check if reversal results in the best step forward Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_REVERSE); if (this.m_Cache.get(oOperation) > oBestOperation.m_fDeltaScore) { if (this.isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fDeltaScore = this.m_Cache.get(oOperation); } } } } } return oBestOperation; } // findBestArcToReverse /** * update the cache due to change of parent set of a node * * @param iAttributeHead * node that has its parent set changed * @param nNrOfAtts * number of nodes/attributes in data set * @param parentSet * new parents set of node iAttributeHead * @throws InterruptedException */ void updateCache(final int iAttributeHead, final int nNrOfAtts, final ParentSet parentSet) throws InterruptedException { // update cache entries for arrows heading towards iAttributeHead double fBaseScore = this.calcNodeScore(iAttributeHead); int nNrOfParents = parentSet.getNrOfParents(); for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (iAttributeTail != iAttributeHead) { if (!parentSet.contains(iAttributeTail)) { // add entries to cache for adding arcs if (nNrOfParents < this.m_nMaxNrOfParents) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); this.m_Cache.put(oOperation, this.calcScoreWithExtraParent(iAttributeHead, iAttributeTail) - fBaseScore); } } else { // add entries to cache for deleting arcs Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_DEL); this.m_Cache.put(oOperation, this.calcScoreWithMissingParent(iAttributeHead, iAttributeTail) - fBaseScore); } } } } // updateCache /** * Sets the max number of parents * * @param nMaxNrOfParents * the max number of parents */ public void setMaxNrOfParents(final int nMaxNrOfParents) { this.m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return this.m_nMaxNrOfParents; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); newVector.addElement(new Option("\tInitial structure specified in XML BIF file", "X", 1, "-X")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setUseArcReversal(Utils.getFlag('R', options)); this.setInitAsNaiveBayes(!(Utils.getFlag('N', options))); this.m_sInitalBIFFile = Utils.getOption('X', options); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { this.setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { this.setMaxNrOfParents(100000); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (this.getUseArcReversal()) { options.add("-R"); } if (!this.getInitAsNaiveBayes()) { options.add("-N"); } if (this.m_sInitalBIFFile != null && !this.m_sInitalBIFFile.equals("")) { options.add("-X"); options.add(this.m_sInitalBIFFile); } options.add("-P"); options.add("" + this.m_nMaxNrOfParents); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes * whether to init as naive bayes */ public void setInitAsNaiveBayes(final boolean bInitAsNaiveBayes) { this.m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return this.m_bInitAsNaiveBayes; } /** * get use the arc reversal operation * * @return whether the arc reversal operation should be used */ public boolean getUseArcReversal() { return this.m_bUseArcReversal; } // getUseArcReversal /** * set use the arc reversal operation * * @param bUseArcReversal * whether the arc reversal operation should be used */ public void setUseArcReversal(final boolean bUseArcReversal) { this.m_bUseArcReversal = bUseArcReversal; } // setUseArcReversal /** * This will return a string describing the search algorithm. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "adding, deleting and reversing arcs. The search is not restricted by an order " + "on the variables (unlike K2). The difference with B and B2 is that this hill " + "climber also considers arrows part of the naive Bayes structure for deletion."; } // globalInfo /** * @return a string to describe the Use Arc Reversal option. */ public String useArcReversalTipText() { return "When set to true, the arc reversal operation is used in the search."; } // useArcReversalTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // HillClimber
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/K2.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * K2.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses a hill climbing algorithm restricted by an order on the variables.<br/> * <br/> * For more information see:<br/> * <br/> * G.F. Cooper, E. Herskovits (1990). A Bayesian method for constructing Bayesian belief networks from databases.<br/> * <br/> * G. Cooper, E. Herskovits (1992). A Bayesian method for the induction of probabilistic networks from data. Machine Learning. 9(4):309-347.<br/> * <br/> * Works with nominal variables and no missing values only. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;proceedings{Cooper1990, * author = {G.F. Cooper and E. Herskovits}, * booktitle = {Proceedings of the Conference on Uncertainty in AI}, * pages = {86-94}, * title = {A Bayesian method for constructing Bayesian belief networks from databases}, * year = {1990} * } * * &#64;article{Cooper1992, * author = {G. Cooper and E. Herskovits}, * journal = {Machine Learning}, * number = {4}, * pages = {309-347}, * title = {A Bayesian method for the induction of probabilistic networks from data}, * volume = {9}, * year = {1992} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Random order. * (default false) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class K2 extends LocalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 6176545934752116631L; /** Holds flag to indicate ordering should be random **/ boolean m_bRandomOrder = false; /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PROCEEDINGS); result.setValue(Field.AUTHOR, "G.F. Cooper and E. Herskovits"); result.setValue(Field.YEAR, "1990"); result.setValue(Field.TITLE, "A Bayesian method for constructing Bayesian belief networks from databases"); result.setValue(Field.BOOKTITLE, "Proceedings of the Conference on Uncertainty in AI"); result.setValue(Field.PAGES, "86-94"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "G. Cooper and E. Herskovits"); additional.setValue(Field.YEAR, "1992"); additional.setValue(Field.TITLE, "A Bayesian method for the induction of probabilistic networks from data"); additional.setValue(Field.JOURNAL, "Machine Learning"); additional.setValue(Field.VOLUME, "9"); additional.setValue(Field.NUMBER, "4"); additional.setValue(Field.PAGES, "309-347"); return result; } /** * search determines the network structure/graph of the network with the K2 algorithm, restricted by its initial structure (which can be an empty graph, or a Naive Bayes graph. * * @param bayesNet * the network * @param instances * the data to work with * @throws Exception * if something goes wrong */ @Override public void search(final BayesNet bayesNet, final Instances instances) throws Exception { int nOrder[] = new int[instances.numAttributes()]; nOrder[0] = instances.classIndex(); int nAttribute = 0; for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { if (nAttribute == instances.classIndex()) { nAttribute++; } nOrder[iOrder] = nAttribute++; } if (this.m_bRandomOrder) { // generate random ordering (if required) Random random = new Random(); int iClass; if (this.getInitAsNaiveBayes()) { iClass = 0; } else { iClass = -1; } for (int iOrder = 0; iOrder < instances.numAttributes(); iOrder++) { int iOrder2 = random.nextInt(instances.numAttributes()); if (iOrder != iClass && iOrder2 != iClass) { int nTmp = nOrder[iOrder]; nOrder[iOrder] = nOrder[iOrder2]; nOrder[iOrder2] = nTmp; } } } // determine base scores double[] fBaseScores = new double[instances.numAttributes()]; for (int iOrder = 0; iOrder < instances.numAttributes(); iOrder++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } int iAttribute = nOrder[iOrder]; fBaseScores[iAttribute] = this.calcNodeScore(iAttribute); } // K2 algorithm: greedy search restricted by ordering for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } int iAttribute = nOrder[iOrder]; double fBestScore = fBaseScores[iAttribute]; boolean bProgress = (bayesNet.getParentSet(iAttribute).getNrOfParents() < this.getMaxNrOfParents()); while (bProgress) { int nBestAttribute = -1; for (int iOrder2 = 0; iOrder2 < iOrder; iOrder2++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } int iAttribute2 = nOrder[iOrder2]; double fScore = this.calcScoreWithExtraParent(iAttribute, iAttribute2); if (fScore > fBestScore) { fBestScore = fScore; nBestAttribute = iAttribute2; } } if (nBestAttribute != -1) { bayesNet.getParentSet(iAttribute).addParent(nBestAttribute, instances); fBaseScores[iAttribute] = fBestScore; bProgress = (bayesNet.getParentSet(iAttribute).getNrOfParents() < this.getMaxNrOfParents()); } else { bProgress = false; } } } } // buildStructure /** * Sets the max number of parents * * @param nMaxNrOfParents * the max number of parents */ public void setMaxNrOfParents(final int nMaxNrOfParents) { this.m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return this.m_nMaxNrOfParents; } /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes * whether to init as naive bayes */ public void setInitAsNaiveBayes(final boolean bInitAsNaiveBayes) { this.m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return this.m_bInitAsNaiveBayes; } /** * Set random order flag * * @param bRandomOrder * the random order flag */ public void setRandomOrder(final boolean bRandomOrder) { this.m_bRandomOrder = bRandomOrder; } // SetRandomOrder /** * Get random order flag * * @return the random order flag */ public boolean getRandomOrder() { return this.m_bRandomOrder; } // getRandomOrder /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tRandom order.\n" + "\t(default false)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Random order. * (default false) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setRandomOrder(Utils.getFlag('R', options)); this.m_bInitAsNaiveBayes = !(Utils.getFlag('N', options)); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { this.setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { this.setMaxNrOfParents(100000); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-P"); options.add("" + this.m_nMaxNrOfParents); if (!this.m_bInitAsNaiveBayes) { options.add("-N"); } if (this.getRandomOrder()) { options.add("-R"); } Collections.addAll(options, super.getOptions()); // Fill up rest with empty strings, not nulls! return options.toArray(new String[0]); } /** * This will return a string describing the search algorithm. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "restricted by an order on the variables.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString() + "\n\n" + "Works with nominal variables and no missing values only."; } /** * @return a string to describe the RandomOrder option. */ public String randomOrderTipText() { return "When set to true, the order of the nodes in the network is random." + " Default random order is false and the order" + " of the nodes in the dataset is used." + " In any case, when the network was initialized as Naive Bayes Network, the" + " class variable is first in the ordering though."; } // randomOrderTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/LAGDHillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LAGDHillClimber.java * Copyright (C) 2005-2012 Manuel Neubach * */ package weka.classifiers.bayes.net.search.local; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses a Look Ahead Hill Climbing algorithm called LAGD Hill Climbing. Unlike Greedy Hill Climbing it doesn't calculate a best greedy operation (adding, deleting or reversing * an arc) but a sequence of nrOfLookAheadSteps operations, which leads to a network structure whose score is most likely higher in comparison to the network obtained by performing a sequence of nrOfLookAheadSteps greedy operations. The * search is not restricted by an order on the variables (unlike K2). The difference with B and B2 is that this hill climber also considers arrows part of the naive Bayes structure for deletion. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;nr of look ahead steps&gt; * Look Ahead Depth * </pre> * * <pre> * -G &lt;nr of good operations&gt; * Nr of Good Operations * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Manuel Neubach * @version $Revision$ */ public class LAGDHillClimber extends HillClimber { /** for serialization */ static final long serialVersionUID = 7217437499439184344L; /** Number of Look Ahead Steps **/ int m_nNrOfLookAheadSteps = 2; /** Number of Good Operations per Step **/ int m_nNrOfGoodOperations = 5; /** * search determines the network structure/graph of the network * * @param bayesNet * the network * @param instances * the data to use * @throws Exception * if something goes wrong */ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { int k = this.m_nNrOfLookAheadSteps; // Number of Look Ahead Steps int l = this.m_nNrOfGoodOperations; // Number of Good Operations per step this.lookAheadInGoodDirectionsSearch(bayesNet, instances, k, l); } // search /** * lookAheadInGoodDirectionsSearch determines the network structure/graph of the network with best score according to LAGD Hill Climbing * * @param bayesNet * the network * @param instances * the data to use * @param nrOfLookAheadSteps * @param nrOfGoodOperations * @throws Exception * if something goes wrong */ protected void lookAheadInGoodDirectionsSearch(final BayesNet bayesNet, final Instances instances, int nrOfLookAheadSteps, final int nrOfGoodOperations) throws Exception { this.initCache(bayesNet, instances); while (nrOfLookAheadSteps > 1) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } boolean legalSequence = true; double sequenceDeltaScore = 0; Operation[] bestOperation = new Operation[nrOfLookAheadSteps]; bestOperation = this.getOptimalOperations(bayesNet, instances, nrOfLookAheadSteps, nrOfGoodOperations); for (int i = 0; i < nrOfLookAheadSteps; i++) { if (bestOperation[i] == null) { legalSequence = false; } else { sequenceDeltaScore += bestOperation[i].m_fDeltaScore; } } while (legalSequence && sequenceDeltaScore > 0) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } for (int i = 0; i < nrOfLookAheadSteps; i++) { this.performOperation(bayesNet, instances, bestOperation[i]); } bestOperation = this.getOptimalOperations(bayesNet, instances, nrOfLookAheadSteps, nrOfGoodOperations); sequenceDeltaScore = 0; for (int i = 0; i < nrOfLookAheadSteps; i++) { if (bestOperation[i] != null) { sequenceDeltaScore += bestOperation[i].m_fDeltaScore; } else { legalSequence = false; } } } --nrOfLookAheadSteps; } /** last steps with greedy HC **/ Operation oOperation = this.getOptimalOperation(bayesNet, instances); while ((oOperation != null) && (oOperation.m_fDeltaScore > 0)) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } this.performOperation(bayesNet, instances, oOperation); oOperation = this.getOptimalOperation(bayesNet, instances); } // free up memory this.m_Cache = null; } // lookAheadInGoodDirectionsSearch /** * getAntiOperation determines the Operation, which is needed to cancel oOperation * * @param oOperation * Operation to cancel * @return antiOperation to oOperation * @throws Exception * if something goes wrong */ protected Operation getAntiOperation(final Operation oOperation) throws Exception { if (oOperation.m_nOperation == Operation.OPERATION_ADD) { return (new Operation(oOperation.m_nTail, oOperation.m_nHead, Operation.OPERATION_DEL)); } else { if (oOperation.m_nOperation == Operation.OPERATION_DEL) { return (new Operation(oOperation.m_nTail, oOperation.m_nHead, Operation.OPERATION_ADD)); } else { return (new Operation(oOperation.m_nHead, oOperation.m_nTail, Operation.OPERATION_REVERSE)); } } } // getAntiOperation /** * getGoodOperations determines the nrOfGoodOperations best Operations, which are considered for the calculation of an optimal operationsequence * * @param bayesNet * Bayes network to apply operation on * @param instances * data set to learn from * @param nrOfGoodOperations * number of good operations to consider * @return good operations to consider * @throws Exception * if something goes wrong **/ protected Operation[] getGoodOperations(final BayesNet bayesNet, final Instances instances, final int nrOfGoodOperations) throws Exception { Operation[] goodOperations = new Operation[nrOfGoodOperations]; for (int i = 0; i < nrOfGoodOperations; i++) { goodOperations[i] = this.getOptimalOperation(bayesNet, instances); if (goodOperations[i] != null) { this.m_Cache.put(goodOperations[i], -1E100); } else { i = nrOfGoodOperations; } } for (int i = 0; i < nrOfGoodOperations; i++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (goodOperations[i] != null) { if (goodOperations[i].m_nOperation != Operation.OPERATION_REVERSE) { this.m_Cache.put(goodOperations[i], goodOperations[i].m_fDeltaScore); } else { this.m_Cache.put(goodOperations[i], goodOperations[i].m_fDeltaScore - this.m_Cache.m_fDeltaScoreAdd[goodOperations[i].m_nHead][goodOperations[i].m_nTail]); } } else { i = nrOfGoodOperations; } } return goodOperations; } // getGoodOperations /** * getOptimalOperations determines an optimal operationsequence in respect of the parameters nrOfLookAheadSteps and nrOfGoodOperations * * @param bayesNet * Bayes network to apply operation on * @param instances * data set to learn from * @param nrOfLookAheadSteps * number of lood ahead steps to use * @param nrOfGoodOperations * number of good operations to consider * @return optimal sequence of operations in respect to nrOfLookAheadSteps and nrOfGoodOperations * @throws Exception * if something goes wrong **/ protected Operation[] getOptimalOperations(final BayesNet bayesNet, final Instances instances, final int nrOfLookAheadSteps, final int nrOfGoodOperations) throws Exception { if (nrOfLookAheadSteps == 1) { // Abbruch der Rekursion Operation[] bestOperation = new Operation[1]; bestOperation[0] = this.getOptimalOperation(bayesNet, instances); return (bestOperation); // Abbruch der Rekursion } else { double bestDeltaScore = 0; double currentDeltaScore = 0; Operation[] bestOperation = new Operation[nrOfLookAheadSteps]; Operation[] goodOperations = new Operation[nrOfGoodOperations]; Operation[] tempOperation = new Operation[nrOfLookAheadSteps - 1]; goodOperations = this.getGoodOperations(bayesNet, instances, nrOfGoodOperations); for (int i = 0; i < nrOfGoodOperations; i++) { // XXX interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (goodOperations[i] != null) { this.performOperation(bayesNet, instances, goodOperations[i]); tempOperation = this.getOptimalOperations(bayesNet, instances, nrOfLookAheadSteps - 1, nrOfGoodOperations); // rekursiver Abstieg currentDeltaScore = goodOperations[i].m_fDeltaScore; for (int j = 0; j < nrOfLookAheadSteps - 1; j++) { if (tempOperation[j] != null) { currentDeltaScore += tempOperation[j].m_fDeltaScore; } } this.performOperation(bayesNet, instances, this.getAntiOperation(goodOperations[i])); if (currentDeltaScore > bestDeltaScore) { bestDeltaScore = currentDeltaScore; bestOperation[0] = goodOperations[i]; for (int j = 1; j < nrOfLookAheadSteps; j++) { bestOperation[j] = tempOperation[j - 1]; } } } else { i = nrOfGoodOperations; } } return (bestOperation); } } // getOptimalOperations /** * Sets the max number of parents * * @param nMaxNrOfParents * the max number of parents */ @Override public void setMaxNrOfParents(final int nMaxNrOfParents) { this.m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ @Override public int getMaxNrOfParents() { return this.m_nMaxNrOfParents; } /** * Sets the number of look-ahead steps * * @param nNrOfLookAheadSteps * the number of look-ahead steps */ public void setNrOfLookAheadSteps(final int nNrOfLookAheadSteps) { this.m_nNrOfLookAheadSteps = nNrOfLookAheadSteps; } /** * Gets the number of look-ahead steps * * @return the number of look-ahead step */ public int getNrOfLookAheadSteps() { return this.m_nNrOfLookAheadSteps; } /** * Sets the number of "good operations" * * @param nNrOfGoodOperations * the number of "good operations" */ public void setNrOfGoodOperations(final int nNrOfGoodOperations) { this.m_nNrOfGoodOperations = nNrOfGoodOperations; } /** * Gets the number of "good operations" * * @return the number of "good operations" */ public int getNrOfGoodOperations() { return this.m_nNrOfGoodOperations; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option("\tLook Ahead Depth", "L", 2, "-L <nr of look ahead steps>")); newVector.addElement(new Option("\tNr of Good Operations", "G", 5, "-G <nr of good operations>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. Valid options are: * <p> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;nr of look ahead steps&gt; * Look Ahead Depth * </pre> * * <pre> * -G &lt;nr of good operations&gt; * Nr of Good Operations * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String sNrOfLookAheadSteps = Utils.getOption('L', options); if (sNrOfLookAheadSteps.length() != 0) { this.setNrOfLookAheadSteps(Integer.parseInt(sNrOfLookAheadSteps)); } else { this.setNrOfLookAheadSteps(2); } String sNrOfGoodOperations = Utils.getOption('G', options); if (sNrOfGoodOperations.length() != 0) { this.setNrOfGoodOperations(Integer.parseInt(sNrOfGoodOperations)); } else { this.setNrOfGoodOperations(5); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-L"); options.add("" + this.m_nNrOfLookAheadSteps); options.add("-G"); options.add("" + this.m_nNrOfGoodOperations); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * This will return a string describing the search algorithm. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses a Look Ahead Hill Climbing algorithm called LAGD Hill Climbing." + " Unlike Greedy Hill Climbing it doesn't calculate a best greedy operation (adding, deleting or reversing an arc) " + "but a sequence of nrOfLookAheadSteps operations, which leads to a network structure whose score is most likely " + "higher in comparison to the network obtained by performing a sequence of nrOfLookAheadSteps greedy operations. " + "The search is not restricted by an order " + "on the variables (unlike K2). The difference with B and B2 is that this hill " + "climber also considers arrows part of the naive Bayes structure for deletion."; } // globalInfo /** * @return a string to describe the Number of Look Ahead Steps option. */ public String nrOfLookAheadStepsTipText() { return "Sets the Number of Look Ahead Steps. 'nrOfLookAheadSteps = 2' means that all network structures in a " + "distance of 2 (from the current network structure) are taken into account for the decision which arcs to add, " + "remove or reverse. 'nrOfLookAheadSteps = 1' results in Greedy Hill Climbing."; } // nrOfLookAheadStepsTipText /** * @return a string to describe the Number of Good Operations option. */ public String nrOfGoodOperationsTipText() { return "Sets the Number of Good Operations per Look Ahead Step. 'nrOfGoodOperations = 5' means that for the next " + "Look Ahead Step only the 5 best Operations (adding, deleting or reversing an arc) are taken into account for the " + "calculation of the best sequence consisting of nrOfLookAheadSteps operations."; } // nrOfGoodOperationsTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // LAGDHillClimber
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/LocalScoreSearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LocalScoreSearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Statistics; import weka.core.Tag; import weka.core.Utils; /** * <!-- globalinfo-start --> The ScoreBasedSearchAlgorithm class supports Bayes net structure search algorithms that are based on maximizing scores (as opposed to for example conditional independence based search algorithms). * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class LocalScoreSearchAlgorithm extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = 3325995552474190374L; /** points to Bayes network for which a structure is searched for **/ BayesNet m_BayesNet; /** * default constructor */ public LocalScoreSearchAlgorithm() { } // c'tor /** * constructor * * @param bayesNet * the network * @param instances * the data */ public LocalScoreSearchAlgorithm(final BayesNet bayesNet, final Instances instances) { this.m_BayesNet = bayesNet; // m_Instances = instances; } // c'tor /** * Holds prior on count */ double m_fAlpha = 0.5; /** the score types */ public static final Tag[] TAGS_SCORE_TYPE = { new Tag(Scoreable.BAYES, "BAYES"), new Tag(Scoreable.BDeu, "BDeu"), new Tag(Scoreable.MDL, "MDL"), new Tag(Scoreable.ENTROPY, "ENTROPY"), new Tag(Scoreable.AIC, "AIC") }; /** * Holds the score type used to measure quality of network */ int m_nScoreType = Scoreable.BAYES; /** * logScore returns the log of the quality of a network (e.g. the posterior probability of the network, or the MDL value). * * @param nType * score type (Bayes, MDL, etc) to calculate score with * @return log score. */ public double logScore(int nType) { if (this.m_BayesNet.m_Distributions == null) { return 0; } if (nType < 0) { nType = this.m_nScoreType; } double fLogScore = 0.0; Instances instances = this.m_BayesNet.m_Instances; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { int nCardinality = this.m_BayesNet.getParentSet(iAttribute).getCardinalityOfParents(); for (int iParent = 0; iParent < nCardinality; iParent++) { fLogScore += ((Scoreable) this.m_BayesNet.m_Distributions[iAttribute][iParent]).logScore(nType, nCardinality); } switch (nType) { case (Scoreable.MDL): { fLogScore -= 0.5 * this.m_BayesNet.getParentSet(iAttribute).getCardinalityOfParents() * (instances.attribute(iAttribute).numValues() - 1) * Math.log(this.m_BayesNet.getNumInstances()); } break; case (Scoreable.AIC): { fLogScore -= this.m_BayesNet.getParentSet(iAttribute).getCardinalityOfParents() * (instances.attribute(iAttribute).numValues() - 1); } break; } } return fLogScore; } // logScore /** * buildStructure determines the network structure/graph of the network with the K2 algorithm, restricted by its initial structure (which can be an empty graph, or a Naive Bayes graph. * * @param bayesNet * the network * @param instances * the data to use * @throws Exception * if something goes wrong */ @Override public void buildStructure(final BayesNet bayesNet, final Instances instances) throws Exception { this.m_BayesNet = bayesNet; super.buildStructure(bayesNet, instances); } // buildStructure /** * Calc Node Score for given parent set * * @param nNode * node for which the score is calculate * @return log score * @throws InterruptedException */ public double calcNodeScore(final int nNode) throws InterruptedException { if (this.m_BayesNet.getUseADTree() && this.m_BayesNet.getADTree() != null) { return this.calcNodeScoreADTree(nNode); } else { return this.calcNodeScorePlain(nNode); } } /** * helper function for CalcNodeScore above using the ADTree data structure * * @param nNode * node for which the score is calculate * @return log score * @throws InterruptedException */ private double calcNodeScoreADTree(final int nNode) throws InterruptedException { Instances instances = this.m_BayesNet.m_Instances; ParentSet oParentSet = this.m_BayesNet.getParentSet(nNode); // get set of parents, insert iNode int nNrOfParents = oParentSet.getNrOfParents(); int[] nNodes = new int[nNrOfParents + 1]; for (int iParent = 0; iParent < nNrOfParents; iParent++) { nNodes[iParent] = oParentSet.getParent(iParent); } nNodes[nNrOfParents] = nNode; // calculate offsets int[] nOffsets = new int[nNrOfParents + 1]; int nOffset = 1; nOffsets[nNrOfParents] = 1; nOffset *= instances.attribute(nNode).numValues(); for (int iNode = nNrOfParents - 1; iNode >= 0; iNode--) { nOffsets[iNode] = nOffset; nOffset *= instances.attribute(nNodes[iNode]).numValues(); } // sort nNodes & offsets for (int iNode = 1; iNode < nNodes.length; iNode++) { int iNode2 = iNode; while (iNode2 > 0 && nNodes[iNode2] < nNodes[iNode2 - 1]) { int h = nNodes[iNode2]; nNodes[iNode2] = nNodes[iNode2 - 1]; nNodes[iNode2 - 1] = h; h = nOffsets[iNode2]; nOffsets[iNode2] = nOffsets[iNode2 - 1]; nOffsets[iNode2 - 1] = h; iNode2--; } } // get counts from ADTree int nCardinality = oParentSet.getCardinalityOfParents(); int numValues = instances.attribute(nNode).numValues(); int[] nCounts = new int[nCardinality * numValues]; // if (nNrOfParents > 1) { this.m_BayesNet.getADTree().getCounts(nCounts, nNodes, nOffsets, 0, 0, false); return this.calcScoreOfCounts(nCounts, nCardinality, numValues, instances); } // CalcNodeScore private double calcNodeScorePlain(final int nNode) throws InterruptedException { Instances instances = this.m_BayesNet.m_Instances; ParentSet oParentSet = this.m_BayesNet.getParentSet(nNode); // determine cardinality of parent set & reserve space for frequency counts int nCardinality = oParentSet.getCardinalityOfParents(); int numValues = instances.attribute(nNode).numValues(); int[] nCounts = new int[nCardinality * numValues]; // initialize (don't need this?) for (int iParent = 0; iParent < nCardinality * numValues; iParent++) { nCounts[iParent] = 0; } // estimate distributions Enumeration<Instance> enumInsts = instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { Instance instance = enumInsts.nextElement(); // updateClassifier; double iCPT = 0; for (int iParent = 0; iParent < oParentSet.getNrOfParents(); iParent++) { int nParent = oParentSet.getParent(iParent); iCPT = iCPT * instances.attribute(nParent).numValues() + instance.value(nParent); } nCounts[numValues * ((int) iCPT) + (int) instance.value(nNode)]++; } return this.calcScoreOfCounts(nCounts, nCardinality, numValues, instances); } // CalcNodeScore /** * utility function used by CalcScore and CalcNodeScore to determine the score based on observed frequencies. * * @param nCounts * array with observed frequencies * @param nCardinality * ardinality of parent set * @param numValues * number of values a node can take * @param instances * to calc score with * @return log score * @throws InterruptedException */ protected double calcScoreOfCounts(final int[] nCounts, final int nCardinality, final int numValues, final Instances instances) throws InterruptedException { // calculate scores using the distributions double fLogScore = 0.0; for (int iParent = 0; iParent < nCardinality; iParent++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } switch (this.m_nScoreType) { case (Scoreable.BAYES): { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (this.m_fAlpha + nCounts[iParent * numValues + iSymbol] != 0) { fLogScore += Statistics.lnGamma(this.m_fAlpha + nCounts[iParent * numValues + iSymbol]); nSumOfCounts += this.m_fAlpha + nCounts[iParent * numValues + iSymbol]; } } if (nSumOfCounts != 0) { fLogScore -= Statistics.lnGamma(nSumOfCounts); } if (this.m_fAlpha != 0) { fLogScore -= numValues * Statistics.lnGamma(this.m_fAlpha); fLogScore += Statistics.lnGamma(numValues * this.m_fAlpha); } } break; case (Scoreable.BDeu): { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (this.m_fAlpha + nCounts[iParent * numValues + iSymbol] != 0) { fLogScore += Statistics.lnGamma(1.0 / (numValues * nCardinality) + nCounts[iParent * numValues + iSymbol]); nSumOfCounts += 1.0 / (numValues * nCardinality) + nCounts[iParent * numValues + iSymbol]; } } fLogScore -= Statistics.lnGamma(nSumOfCounts); fLogScore -= numValues * Statistics.lnGamma(1.0 / (numValues * nCardinality)); fLogScore += Statistics.lnGamma(1.0 / nCardinality); } break; case (Scoreable.MDL): case (Scoreable.AIC): case (Scoreable.ENTROPY): { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } nSumOfCounts += nCounts[iParent * numValues + iSymbol]; } for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (nCounts[iParent * numValues + iSymbol] > 0) { fLogScore += nCounts[iParent * numValues + iSymbol] * Math.log(nCounts[iParent * numValues + iSymbol] / nSumOfCounts); } } } break; default: { } } } switch (this.m_nScoreType) { case (Scoreable.MDL): { fLogScore -= 0.5 * nCardinality * (numValues - 1) * Math.log(this.m_BayesNet.getNumInstances()); // it seems safe to assume that numInstances>0 here } break; case (Scoreable.AIC): { fLogScore -= nCardinality * (numValues - 1); } break; } return fLogScore; } // CalcNodeScore protected double calcScoreOfCounts2(final int[][] nCounts, final int nCardinality, final int numValues, final Instances instances) throws InterruptedException { // calculate scores using the distributions double fLogScore = 0.0; for (int iParent = 0; iParent < nCardinality; iParent++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } switch (this.m_nScoreType) { case (Scoreable.BAYES): { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (this.m_fAlpha + nCounts[iParent][iSymbol] != 0) { fLogScore += Statistics.lnGamma(this.m_fAlpha + nCounts[iParent][iSymbol]); nSumOfCounts += this.m_fAlpha + nCounts[iParent][iSymbol]; } } if (nSumOfCounts != 0) { fLogScore -= Statistics.lnGamma(nSumOfCounts); } if (this.m_fAlpha != 0) { fLogScore -= numValues * Statistics.lnGamma(this.m_fAlpha); fLogScore += Statistics.lnGamma(numValues * this.m_fAlpha); } } break; case (Scoreable.BDeu): { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (this.m_fAlpha + nCounts[iParent][iSymbol] != 0) { fLogScore += Statistics.lnGamma(1.0 / (numValues * nCardinality) + nCounts[iParent][iSymbol]); nSumOfCounts += 1.0 / (numValues * nCardinality) + nCounts[iParent][iSymbol]; } } fLogScore -= Statistics.lnGamma(nSumOfCounts); fLogScore -= numValues * Statistics.lnGamma(1.0 / (nCardinality * numValues)); fLogScore += Statistics.lnGamma(1.0 / nCardinality); } break; case (Scoreable.MDL): case (Scoreable.AIC): case (Scoreable.ENTROPY): { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } nSumOfCounts += nCounts[iParent][iSymbol]; } for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { // XXX kill weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (nCounts[iParent][iSymbol] > 0) { fLogScore += nCounts[iParent][iSymbol] * Math.log(nCounts[iParent][iSymbol] / nSumOfCounts); } } } break; default: { } } } switch (this.m_nScoreType) { case (Scoreable.MDL): { fLogScore -= 0.5 * nCardinality * (numValues - 1) * Math.log(this.m_BayesNet.getNumInstances()); // it seems safe to assume that numInstances>0 here } break; case (Scoreable.AIC): { fLogScore -= nCardinality * (numValues - 1); } break; } return fLogScore; } // CalcNodeScore /** * Calc Node Score With AddedParent * * @param nNode * node for which the score is calculate * @param nCandidateParent * candidate parent to add to the existing parent set * @return log score * @throws InterruptedException */ public double calcScoreWithExtraParent(final int nNode, final int nCandidateParent) throws InterruptedException { ParentSet oParentSet = this.m_BayesNet.getParentSet(nNode); // sanity check: nCandidateParent should not be in parent set already if (oParentSet.contains(nCandidateParent)) { return -1e100; } // set up candidate parent oParentSet.addParent(nCandidateParent, this.m_BayesNet.m_Instances); // calculate the score double logScore = this.calcNodeScore(nNode); // delete temporarily added parent oParentSet.deleteLastParent(this.m_BayesNet.m_Instances); return logScore; } // CalcScoreWithExtraParent /** * Calc Node Score With Parent Deleted * * @param nNode * node for which the score is calculate * @param nCandidateParent * candidate parent to delete from the existing parent set * @return log score * @throws InterruptedException */ public double calcScoreWithMissingParent(final int nNode, final int nCandidateParent) throws InterruptedException { ParentSet oParentSet = this.m_BayesNet.getParentSet(nNode); // sanity check: nCandidateParent should be in parent set already if (!oParentSet.contains(nCandidateParent)) { return -1e100; } // set up candidate parent int iParent = oParentSet.deleteParent(nCandidateParent, this.m_BayesNet.m_Instances); // calculate the score double logScore = this.calcNodeScore(nNode); // restore temporarily deleted parent oParentSet.addParent(nCandidateParent, iParent, this.m_BayesNet.m_Instances); return logScore; } // CalcScoreWithMissingParent /** * set quality measure to be used in searching for networks. * * @param newScoreType * the new score type */ public void setScoreType(final SelectedTag newScoreType) { if (newScoreType.getTags() == TAGS_SCORE_TYPE) { this.m_nScoreType = newScoreType.getSelectedTag().getID(); } } /** * get quality measure to be used in searching for networks. * * @return quality measure */ public SelectedTag getScoreType() { return new SelectedTag(this.m_nScoreType, TAGS_SCORE_TYPE); } /** * * @param bMarkovBlanketClassifier */ @Override public void setMarkovBlanketClassifier(final boolean bMarkovBlanketClassifier) { super.setMarkovBlanketClassifier(bMarkovBlanketClassifier); } /** * * @return */ @Override public boolean getMarkovBlanketClassifier() { return super.getMarkovBlanketClassifier(); } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.addElement(new Option("\tApplies a Markov Blanket correction to the network structure, \n" + "\tafter a network structure is learned. This ensures that all \n" + "\tnodes in the network are part of the Markov blanket of the \n" + "\tclassifier node.", "mbc", 0, "-mbc")); newVector.addElement(new Option("\tScore type (BAYES, BDeu, MDL, ENTROPY and AIC)", "S", 1, "-S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES]")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { this.setMarkovBlanketClassifier(Utils.getFlag("mbc", options)); String sScore = Utils.getOption('S', options); if (sScore.compareTo("BAYES") == 0) { this.setScoreType(new SelectedTag(Scoreable.BAYES, TAGS_SCORE_TYPE)); } if (sScore.compareTo("BDeu") == 0) { this.setScoreType(new SelectedTag(Scoreable.BDeu, TAGS_SCORE_TYPE)); } if (sScore.compareTo("MDL") == 0) { this.setScoreType(new SelectedTag(Scoreable.MDL, TAGS_SCORE_TYPE)); } if (sScore.compareTo("ENTROPY") == 0) { this.setScoreType(new SelectedTag(Scoreable.ENTROPY, TAGS_SCORE_TYPE)); } if (sScore.compareTo("AIC") == 0) { this.setScoreType(new SelectedTag(Scoreable.AIC, TAGS_SCORE_TYPE)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); if (this.getMarkovBlanketClassifier()) { options.add("-mbc"); } options.add("-S"); switch (this.m_nScoreType) { case (Scoreable.BAYES): options.add("BAYES"); break; case (Scoreable.BDeu): options.add("BDeu"); break; case (Scoreable.MDL): options.add("MDL"); break; case (Scoreable.ENTROPY): options.add("ENTROPY"); break; case (Scoreable.AIC): options.add("AIC"); break; } Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * @return a string to describe the ScoreType option. */ public String scoreTypeTipText() { return "The score type determines the measure used to judge the quality of a" + " network structure. It can be one of Bayes, BDeu, Minimum Description Length (MDL)," + " Akaike Information Criterion (AIC), and Entropy."; } /** * @return a string to describe the MarkovBlanketClassifier option. */ @Override public String markovBlanketClassifierTipText() { return super.markovBlanketClassifierTipText(); } /** * This will return a string describing the search algorithm. * * @return The string. */ public String globalInfo() { return "The ScoreBasedSearchAlgorithm class supports Bayes net " + "structure search algorithms that are based on maximizing " + "scores (as opposed to for example conditional independence " + "based search algorithms)."; } // globalInfo /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/RepeatedHillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RepeatedHillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm repeatedly * uses hill climbing starting with a randomly generated network structure and * return the best structure of the various runs. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -A &lt;seed&gt; * Random number seed * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class RepeatedHillClimber extends HillClimber { /** for serialization */ static final long serialVersionUID = -6574084564213041174L; /** number of runs **/ int m_nRuns = 10; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * search determines the network structure/graph of the network with the * repeated hill climbing. * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { this.m_random = new Random(this.getSeed()); // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fCurrentScore += this.calcNodeScore(iAttribute); } // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); this.copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < this.m_nRuns; iRun++) { // generate random nework this.generateRandomNet(bayesNet, instances); // search super.search(bayesNet, instances); // calculate score fCurrentScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fCurrentScore += this.calcNodeScore(iAttribute); } // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; this.copyParentSets(bestBayesNet, bayesNet); } } // restore current network to best network this.copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; this.m_Cache = null; } // search void generateRandomNet(final BayesNet bayesNet, final Instances instances) throws InterruptedException { int nNodes = instances.numAttributes(); // clear network for (int iNode = 0; iNode < nNodes; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(instances); } } // initialize as naive Bayes? if (this.getInitAsNaiveBayes()) { int iClass = instances.classIndex(); // initialize parent sets to have arrow from classifier node to // each of the other nodes for (int iNode = 0; iNode < nNodes; iNode++) { if (iNode != iClass) { bayesNet.getParentSet(iNode).addParent(iClass, instances); } } } // insert random arcs int nNrOfAttempts = this.m_random.nextInt(nNodes * nNodes); for (int iAttempt = 0; iAttempt < nNrOfAttempts; iAttempt++) { int iTail = this.m_random.nextInt(nNodes); int iHead = this.m_random.nextInt(nNodes); if (bayesNet.getParentSet(iHead).getNrOfParents() < this.getMaxNrOfParents() && this.addArcMakesSense(bayesNet, instances, iHead, iTail)) { bayesNet.getParentSet(iHead).addParent(iTail, instances); } } } // generateRandomNet /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest destination network * @param source source network */ void copyParentSets(final BayesNet dest, final BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return number of runs */ public int getRuns() { return this.m_nRuns; } // getRuns /** * Sets the number of runs * * @param nRuns The number of runs to set */ public void setRuns(final int nRuns) { this.m_nRuns = nRuns; } // setRuns /** * @return random number seed */ public int getSeed() { return this.m_nSeed; } // getSeed /** * Sets the random number seed * * @param nSeed The number of the seed to set */ public void setSeed(final int nSeed) { this.m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(2); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tRandom number seed", "A", 1, "-A <seed>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -A &lt;seed&gt; * Random number seed * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { this.setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('A', options); if (sSeed.length() != 0) { this.setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-U"); options.add("" + this.getRuns()); options.add("-A"); options.add("" + this.getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm repeatedly uses hill climbing starting " + "with a randomly generated network structure and return the best structure of the " + "various runs."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of times hill climbing is performed."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/Scoreable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Scoreable.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; /** * Interface for allowing to score a classifier * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public interface Scoreable { /** * score types */ int BAYES = 0; int BDeu = 1; int MDL = 2; int ENTROPY = 3; int AIC = 4; /** * Returns log-score * * @param nType the score type * @return the log-score */ double logScore(int nType, int nCardinality); } // interface Scoreable
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/SimulatedAnnealing.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimulatedAnnealing.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses the general purpose search method of simulated annealing to find a well scoring network structure.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to Inference. Utrecht, Netherlands. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A &lt;float&gt; * Start temperature * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -D &lt;float&gt; * Delta temperature * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class SimulatedAnnealing extends LocalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 6951955606060513191L; /** start temperature **/ double m_fTStart = 10; /** change in temperature at every run **/ double m_fDelta = 0.999; /** number of runs **/ int m_nRuns = 10000; /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * * @param bayesNet * the network * @param instances * the data to use * @throws Exception * if something goes wrong */ @Override public void search(final BayesNet bayesNet, final Instances instances) throws Exception { this.m_random = new Random(this.m_nSeed); // determine base scores double[] fBaseScores = new double[instances.numAttributes()]; double fCurrentScore = 0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } fBaseScores[iAttribute] = this.calcNodeScore(iAttribute); fCurrentScore += fBaseScores[iAttribute]; } // keep track of best scoring network double fBestScore = fCurrentScore; BayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); this.copyParentSets(bestBayesNet, bayesNet); double fTemp = this.m_fTStart; for (int iRun = 0; iRun < this.m_nRuns; iRun++) { boolean bRunSucces = false; double fDeltaScore = 0.0; while (!bRunSucces) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } // pick two nodes at random int iTailNode = this.m_random.nextInt(instances.numAttributes()); int iHeadNode = this.m_random.nextInt(instances.numAttributes()); while (iTailNode == iHeadNode) { iHeadNode = this.m_random.nextInt(instances.numAttributes()); } if (this.isArc(bayesNet, iHeadNode, iTailNode)) { bRunSucces = true; // either try a delete bayesNet.getParentSet(iHeadNode).deleteParent(iTailNode, instances); double fScore = this.calcNodeScore(iHeadNode); fDeltaScore = fScore - fBaseScores[iHeadNode]; // System.out.println("Try delete " + iTailNode + "->" + iHeadNode + // " dScore = " + fDeltaScore); if (fTemp * Math.log((Math.abs(this.m_random.nextInt()) % 10000) / 10000.0 + 1e-100) < fDeltaScore) { // System.out.println("success!!!"); fCurrentScore += fDeltaScore; fBaseScores[iHeadNode] = fScore; } else { // roll back bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); } } else { // try to add an arc if (this.addArcMakesSense(bayesNet, instances, iHeadNode, iTailNode)) { bRunSucces = true; double fScore = this.calcScoreWithExtraParent(iHeadNode, iTailNode); fDeltaScore = fScore - fBaseScores[iHeadNode]; // System.out.println("Try add " + iTailNode + "->" + iHeadNode + // " dScore = " + fDeltaScore); if (fTemp * Math.log((Math.abs(this.m_random.nextInt()) % 10000) / 10000.0 + 1e-100) < fDeltaScore) { // System.out.println("success!!!"); bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); fBaseScores[iHeadNode] = fScore; fCurrentScore += fDeltaScore; } } } } if (fCurrentScore > fBestScore) { this.copyParentSets(bestBayesNet, bayesNet); } fTemp = fTemp * this.m_fDelta; } this.copyParentSets(bayesNet, bestBayesNet); } // buildStructure /** * CopyParentSets copies parent sets of source to dest BayesNet * * @param dest * destination network * @param source * source network * @throws InterruptedException */ void copyParentSets(final BayesNet dest, final BayesNet source) throws InterruptedException { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return double */ public double getDelta() { return this.m_fDelta; } /** * @return double */ public double getTStart() { return this.m_fTStart; } /** * @return int */ public int getRuns() { return this.m_nRuns; } /** * Sets the m_fDelta. * * @param fDelta * The m_fDelta to set */ public void setDelta(final double fDelta) { this.m_fDelta = fDelta; } /** * Sets the m_fTStart. * * @param fTStart * The m_fTStart to set */ public void setTStart(final double fTStart) { this.m_fTStart = fTStart; } /** * Sets the m_nRuns. * * @param nRuns * The m_nRuns to set */ public void setRuns(final int nRuns) { this.m_nRuns = nRuns; } /** * @return random number seed */ public int getSeed() { return this.m_nSeed; } // getSeed /** * Sets the random number seed * * @param nSeed * The number of the seed to set */ public void setSeed(final int nSeed) { this.m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tStart temperature", "A", 1, "-A <float>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tDelta temperature", "D", 1, "-D <float>")); newVector.addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -A &lt;float&gt; * Start temperature * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -D &lt;float&gt; * Delta temperature * </pre> * * <pre> * -R &lt;seed&gt; * Random number seed * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String sTStart = Utils.getOption('A', options); if (sTStart.length() != 0) { this.setTStart(Double.parseDouble(sTStart)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { this.setRuns(Integer.parseInt(sRuns)); } String sDelta = Utils.getOption('D', options); if (sDelta.length() != 0) { this.setDelta(Double.parseDouble(sDelta)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { this.setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-A"); options.add("" + this.getTStart()); options.add("-U"); options.add("" + this.getRuns()); options.add("-D"); options.add("" + this.getDelta()); options.add("-R"); options.add("" + this.getSeed()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses the general purpose search method " + "of simulated annealing to find a well scoring network structure.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the TStart option. */ public String TStartTipText() { return "Sets the start temperature of the simulated annealing search. " + "The start temperature determines the probability that a step in the 'wrong' direction in the " + "search space is accepted. The higher the temperature, the higher the probability of acceptance."; } // TStartTipText /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of iterations to be performed by the simulated annealing search."; } // runsTipText /** * @return a string to describe the Delta option. */ public String deltaTipText() { return "Sets the factor with which the temperature (and thus the acceptance probability of " + "steps in the wrong direction in the search space) is decreased in each iteration."; } // deltaTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // SimulatedAnnealing
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/TAN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TAN.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm determines the maximum weight spanning tree and returns a Naive Bayes network augmented with a tree.<br/> * <br/> * For more information see:<br/> * <br/> * N. Friedman, D. Geiger, M. Goldszmidt (1997). Bayesian network classifiers. Machine Learning. 29(2-3):131-163. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;article{Friedman1997, * author = {N. Friedman and D. Geiger and M. Goldszmidt}, * journal = {Machine Learning}, * number = {2-3}, * pages = {131-163}, * title = {Bayesian network classifiers}, * volume = {29}, * year = {1997} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision$ */ public class TAN extends LocalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 965182127977228690L; /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "N. Friedman and D. Geiger and M. Goldszmidt"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.TITLE, "Bayesian network classifiers"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "29"); result.setValue(Field.NUMBER, "2-3"); result.setValue(Field.PAGES, "131-163"); return result; } /** * buildStructure determines the network structure/graph of the network using the maximimum weight spanning tree algorithm of Chow and Liu * * @param bayesNet * the network * @param instances * the data to use * @throws Exception * if something goes wrong */ @Override public void buildStructure(final BayesNet bayesNet, final Instances instances) throws Exception { this.m_bInitAsNaiveBayes = true; this.m_nMaxNrOfParents = 2; super.buildStructure(bayesNet, instances); int nNrOfAtts = instances.numAttributes(); if (nNrOfAtts <= 2) { return; } // determine base scores double[] fBaseScores = new double[instances.numAttributes()]; for (int iAttribute = 0; iAttribute < nNrOfAtts; iAttribute++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } fBaseScores[iAttribute] = this.calcNodeScore(iAttribute); } // // cache scores & whether adding an arc makes sense double[][] fScore = new double[nNrOfAtts][nNrOfAtts]; for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (iAttributeHead != iAttributeTail) { fScore[iAttributeHead][iAttributeTail] = this.calcScoreWithExtraParent(iAttributeHead, iAttributeTail); } } } // TAN greedy search (not restricted by ordering like K2) // 1. find strongest link // 2. find remaining links by adding strongest link to already // connected nodes // 3. assign direction to links int nClassNode = instances.classIndex(); int[] link1 = new int[nNrOfAtts - 1]; int[] link2 = new int[nNrOfAtts - 1]; boolean[] linked = new boolean[nNrOfAtts]; // 1. find strongest link int nBestLinkNode1 = -1; int nBestLinkNode2 = -1; double fBestDeltaScore = 0.0; int iLinkNode1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode) && ((nBestLinkNode1 == -1) || (fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1] > fBestDeltaScore))) { fBestDeltaScore = fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1]; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } link1[0] = nBestLinkNode1; link2[0] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; // 2. find remaining links by adding strongest link to already // connected nodes for (int iLink = 1; iLink < nNrOfAtts - 2; iLink++) { nBestLinkNode1 = -1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode) && (linked[iLinkNode1] || linked[iLinkNode2]) && (!linked[iLinkNode1] || !linked[iLinkNode2]) && ((nBestLinkNode1 == -1) || (fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1] > fBestDeltaScore))) { fBestDeltaScore = fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1]; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } link1[iLink] = nBestLinkNode1; link2[iLink] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; } // 3. assign direction to links boolean[] hasParent = new boolean[nNrOfAtts]; for (int iLink = 0; iLink < nNrOfAtts - 2; iLink++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } if (!hasParent[link1[iLink]]) { bayesNet.getParentSet(link1[iLink]).addParent(link2[iLink], instances); hasParent[link1[iLink]] = true; } else { if (hasParent[link2[iLink]]) { throw new Exception("Bug condition found: too many arrows"); } bayesNet.getParentSet(link2[iLink]).addParent(link1[iLink], instances); hasParent[link2[iLink]] = true; } } } // buildStructure /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { return super.listOptions(); } // listOption /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { super.setOptions(options); } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { return super.getOptions(); } // getOptions /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm determines the maximum weight spanning tree " + " and returns a Naive Bayes network augmented with a tree.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString(); } // globalInfo /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // TAN
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/bayes/net/search/local/TabuSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TabuSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * <!-- globalinfo-start --> This Bayes Network learning algorithm uses tabu search for finding a well scoring Bayes network structure. Tabu search is hill climbing till an optimum is reached. The following step is the least worst possible * step. The last X steps are kept in a list and none of the steps in this so called tabu list is considered in taking the next step. The best network found in this traversal is returned.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to Inference. Utrecht, Netherlands. * <p/> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Tabu list length * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision$ */ public class TabuSearch extends HillClimber implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 1457344073228786447L; /** number of runs **/ int m_nRuns = 10; /** size of tabu list **/ int m_nTabuList = 5; /** the actual tabu list **/ Operation[] m_oTabuList = null; /** * Returns an instance of a TechnicalInformation object, containing detailed information about the technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * search determines the network structure/graph of the network with the Tabu search algorithm. * * @param bayesNet * the network * @param instances * the data to use * @throws Exception * if something goes wrong */ @Override protected void search(final BayesNet bayesNet, final Instances instances) throws Exception { this.m_oTabuList = new Operation[this.m_nTabuList]; int iCurrentTabuList = 0; this.initCache(bayesNet, instances); // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } fCurrentScore += this.calcNodeScore(iAttribute); } // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); this.copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < this.m_nRuns; iRun++) { // XXX Interrupt weka if (Thread.interrupted()) { throw new InterruptedException("Killed WEKA!"); } Operation oOperation = this.getOptimalOperation(bayesNet, instances); this.performOperation(bayesNet, instances, oOperation); // sanity check if (oOperation == null) { throw new Exception("Panic: could not find any step to make. Tabu list too long?"); } // update tabu list this.m_oTabuList[iCurrentTabuList] = oOperation; iCurrentTabuList = (iCurrentTabuList + 1) % this.m_nTabuList; fCurrentScore += oOperation.m_fDeltaScore; // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; this.copyParentSets(bestBayesNet, bayesNet); } if (bayesNet.getDebug()) { this.printTabuList(); } } // restore current network to best network this.copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; this.m_Cache = null; } // search /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest * destination network * @param source * source network */ void copyParentSets(final BayesNet dest, final BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * check whether the operation is not in the tabu list * * @param oOperation * operation to be checked * @return true if operation is not in the tabu list */ @Override boolean isNotTabu(final Operation oOperation) { for (int iTabu = 0; iTabu < this.m_nTabuList; iTabu++) { if (oOperation.equals(this.m_oTabuList[iTabu])) { return false; } } return true; } // isNotTabu /** * print tabu list for debugging purposes. */ void printTabuList() { for (int i = 0; i < this.m_nTabuList; i++) { Operation o = this.m_oTabuList[i]; if (o != null) { if (o.m_nOperation == 0) { System.out.print(" +("); } else { System.out.print(" -("); } System.out.print(o.m_nTail + "->" + o.m_nHead + ")"); } } System.out.println(); } // printTabuList /** * @return number of runs */ public int getRuns() { return this.m_nRuns; } // getRuns /** * Sets the number of runs * * @param nRuns * The number of runs to set */ public void setRuns(final int nRuns) { this.m_nRuns = nRuns; } // setRuns /** * @return the Tabu List length */ public int getTabuList() { return this.m_nTabuList; } // getTabuList /** * Sets the Tabu List length. * * @param nTabuList * The nTabuList to set */ public void setTabuList(final int nTabuList) { this.m_nTabuList = nTabuList; } // setTabuList /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(4); newVector.addElement(new Option("\tTabu list length", "L", 1, "-L <integer>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } // listOptions /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;integer&gt; * Tabu list length * </pre> * * <pre> * -U &lt;integer&gt; * Number of runs * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -P &lt;nr of parents&gt; * Maximum number of parents * </pre> * * <pre> * -R * Use arc reversal operation. * (default false) * </pre> * * <pre> * -N * Initial structure is empty (instead of Naive Bayes) * </pre> * * <pre> * -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node. * </pre> * * <pre> * -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String sTabuList = Utils.getOption('L', options); if (sTabuList.length() != 0) { this.setTabuList(Integer.parseInt(sTabuList)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { this.setRuns(Integer.parseInt(sRuns)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { Vector<String> options = new Vector<String>(); options.add("-L"); options.add("" + this.getTabuList()); options.add("-U"); options.add("" + this.getRuns()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } // getOptions /** * This will return a string describing the classifier. * * @return The string. */ @Override public String globalInfo() { return "This Bayes Network learning algorithm uses tabu search for finding a well scoring " + "Bayes network structure. Tabu search is hill climbing till an optimum is reached. The " + "following step is the least worst possible step. The last X steps are kept in a list and " + "none of the steps in this so called tabu list is considered in taking the next step. " + "The best network found in this traversal is returned.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of steps to be performed."; } // runsTipText /** * @return a string to describe the TabuList option. */ public String tabuListTipText() { return "Sets the length of the tabu list."; } // tabuListTipText /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } } // TabuSearch
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/AbstractEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.PluginManager; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.Set; /** * Abstract base class for pluggable classification/regression evaluation * metrics. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public abstract class AbstractEvaluationMetric implements Serializable { /** For serialization */ private static final long serialVersionUID = -924507718482386887L; /** * Gets a list of freshly instantiated concrete implementations of available * plugin metrics or null if there are no plugin metrics available * * @return a list of plugin metrics or null if there are no plugin metrics */ public static ArrayList<AbstractEvaluationMetric> getPluginMetrics() { ArrayList<AbstractEvaluationMetric> pluginMetricsList = null; Set<String> pluginMetrics = PluginManager.getPluginNamesOfType(AbstractEvaluationMetric.class .getName()); if (pluginMetrics != null) { pluginMetricsList = new ArrayList<AbstractEvaluationMetric>(); for (String metric : pluginMetrics) { try { Object impl = PluginManager.getPluginInstance( AbstractEvaluationMetric.class.getName(), metric); if (impl instanceof AbstractEvaluationMetric) { pluginMetricsList.add((AbstractEvaluationMetric) impl); } } catch (Exception ex) { ex.printStackTrace(); } } } return pluginMetricsList; } /** * Exception for subclasses to throw if asked for a statistic that is not part * of their implementation * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class UnknownStatisticException extends IllegalArgumentException { /** For serialization */ private static final long serialVersionUID = -8787045492227999839L; /** * Constructs a new UnknownStatisticsException * * @param message the exception's message */ public UnknownStatisticException(String message) { super(message); } } /** * Base evaluation object for subclasses to access for statistics. IMPORTANT: * subclasses should treat this object as read-only */ protected Evaluation m_baseEvaluation; /** * Set the base evaluation object to use. IMPORTANT: subclasses should treat * this object as read-only. * * @param eval */ public void setBaseEvaluation(Evaluation eval) { m_baseEvaluation = eval; } /** * Return true if this evaluation metric can be computed when the class is * nominal * * @return true if this evaluation metric can be computed when the class is * nominal */ public abstract boolean appliesToNominalClass(); /** * Return true if this evaluation metric can be computed when the class is * numeric * * @return true if this evaluation metric can be computed when the class is * numeric */ public abstract boolean appliesToNumericClass(); /** * Get the name of this metric * * @return the name of this metric */ public abstract String getMetricName(); /** * Get a short description of this metric (algorithm, forumulas etc.). * * @return a short description of this metric */ public abstract String getMetricDescription(); /** * Get a list of the names of the statistics that this metrics computes. E.g. * an information theoretic evaluation measure might compute total number of * bits as well as average bits/instance * * @return the names of the statistics that this metric computes */ public abstract List<String> getStatisticNames(); /** * Get the value of the named statistic * * @param statName the name of the statistic to compute the value for * @return the computed statistic or Utils.missingValue() if the statistic * can't be computed for some reason */ public abstract double getStatistic(String statName); /** * True if the optimum value of the named metric is a maximum value; false if * the optimim value is a minimum value. Subclasses should override this * method to suit their statistic(s) * * @return true (default implementation) */ public boolean statisticIsMaximisable(String statName) { return true; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/AggregateableEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AggregateableEvaluation.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation; import java.util.ArrayList; import weka.classifiers.CostMatrix; import weka.core.Aggregateable; import weka.core.Instances; /** * Subclass of Evaluation that provides a method for aggregating the results * stored in another Evaluation object. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class AggregateableEvaluation extends Evaluation implements Aggregateable<Evaluation> { /** * For serialization */ private static final long serialVersionUID = 8734675926526110924L; /** * Constructs a new AggregateableEvaluation object * * @param data the Instances to use * @throws Exception if a problem occurs */ public AggregateableEvaluation(Instances data) throws Exception { super(data); } /** * Constructs a new AggregateableEvaluation object * * @param data the Instances to use * @param costMatrix the cost matrix to use * @throws Exception if a problem occurs */ public AggregateableEvaluation(Instances data, CostMatrix costMatrix) throws Exception { super(data, costMatrix); } /** * Constructs a new AggregateableEvaluation object based on an Evaluation * object * * @param eval the Evaluation object to use */ public AggregateableEvaluation(Evaluation eval) throws Exception { super(eval.m_Header, eval.m_CostMatrix); m_NoPriors = eval.m_NoPriors; m_NumTrainClassVals = eval.m_NumTrainClassVals; m_TrainClassVals = eval.m_TrainClassVals; m_TrainClassWeights = eval.m_TrainClassWeights; m_PriorEstimator = eval.m_PriorEstimator; m_MinTarget = eval.m_MinTarget; m_MaxTarget = eval.m_MaxTarget; m_ClassPriorsSum = eval.m_ClassPriorsSum; m_ClassPriors = eval.m_ClassPriors; m_MinTarget = eval.m_MinTarget; m_MaxTarget = eval.m_MaxTarget; m_TrainClassVals = eval.m_TrainClassVals; m_TrainClassWeights = eval.m_TrainClassWeights; m_NumTrainClassVals = eval.m_NumTrainClassVals; } /** * Adds the statistics encapsulated in the supplied Evaluation object into * this one. Does not perform any checks for compatibility between the * supplied Evaluation object and this one. * * @param evaluation the evaluation object to aggregate */ @Override public AggregateableEvaluation aggregate(Evaluation evaluation) { m_Incorrect += evaluation.incorrect(); m_Correct += evaluation.correct(); m_Unclassified += evaluation.unclassified(); m_MissingClass += evaluation.m_MissingClass; m_WithClass += evaluation.m_WithClass; if (evaluation.m_ConfusionMatrix != null) { double[][] newMatrix = evaluation.confusionMatrix(); if (newMatrix != null) { for (int i = 0; i < m_ConfusionMatrix.length; i++) { for (int j = 0; j < m_ConfusionMatrix[i].length; j++) { m_ConfusionMatrix[i][j] += newMatrix[i][j]; } } } } double[] newClassPriors = evaluation.m_ClassPriors; if (newClassPriors != null && m_ClassPriors != null) { for (int i = 0; i < this.m_ClassPriors.length; i++) { m_ClassPriors[i] = newClassPriors[i]; } } m_ClassPriorsSum = evaluation.m_ClassPriorsSum; m_TotalCost += evaluation.totalCost(); m_SumErr += evaluation.m_SumErr; m_SumAbsErr += evaluation.m_SumAbsErr; m_SumSqrErr += evaluation.m_SumSqrErr; m_SumClass += evaluation.m_SumClass; m_SumSqrClass += evaluation.m_SumSqrClass; m_SumPredicted += evaluation.m_SumPredicted; m_SumSqrPredicted += evaluation.m_SumSqrPredicted; m_SumClassPredicted += evaluation.m_SumClassPredicted; m_SumPriorAbsErr += evaluation.m_SumPriorAbsErr; m_SumPriorSqrErr += evaluation.m_SumPriorSqrErr; m_SumKBInfo += evaluation.m_SumKBInfo; double[] newMarginCounts = evaluation.m_MarginCounts; if (newMarginCounts != null) { for (int i = 0; i < m_MarginCounts.length; i++) { m_MarginCounts[i] += newMarginCounts[i]; } } m_ComplexityStatisticsAvailable = evaluation.m_ComplexityStatisticsAvailable; m_CoverageStatisticsAvailable = evaluation.m_CoverageStatisticsAvailable; m_SumPriorEntropy += evaluation.m_SumPriorEntropy; m_SumSchemeEntropy += evaluation.m_SumSchemeEntropy; m_TotalSizeOfRegions += evaluation.m_TotalSizeOfRegions; m_TotalCoverage += evaluation.m_TotalCoverage; ArrayList<Prediction> predsToAdd = evaluation.m_Predictions; if (predsToAdd != null) { if (m_Predictions == null) { m_Predictions = new ArrayList<Prediction>(); } for (int i = 0; i < predsToAdd.size(); i++) { m_Predictions.add(predsToAdd.get(i)); } } return this; } @Override public void finalizeAggregation() { // nothing to do here } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/ConfusionMatrix.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NominalPrediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.util.ArrayList; import weka.classifiers.CostMatrix; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.matrix.Matrix; /** * Cells of this matrix correspond to counts of the number (or weight) of * predictions for each actual value / predicted value combination. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class ConfusionMatrix extends Matrix { /** for serialization */ private static final long serialVersionUID = -181789981401504090L; /** Stores the names of the classes */ protected String[] m_ClassNames; /** * Creates the confusion matrix with the given class names. * * @param classNames an array containing the names the classes. */ public ConfusionMatrix(String[] classNames) { super(classNames.length, classNames.length); m_ClassNames = classNames.clone(); } /** * Makes a copy of this ConfusionMatrix after applying the supplied CostMatrix * to the cells. The resulting ConfusionMatrix can be used to get * cost-weighted statistics. * * @param costs the CostMatrix. * @return a ConfusionMatrix that has had costs applied. * @exception Exception if the CostMatrix is not of the same size as this * ConfusionMatrix. */ public ConfusionMatrix makeWeighted(CostMatrix costs) throws Exception { if (costs.size() != size()) { throw new Exception("Cost and confusion matrices must be the same size"); } ConfusionMatrix weighted = new ConfusionMatrix(m_ClassNames); for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { weighted.set(row, col, get(row, col) * costs.getElement(row, col)); } } return weighted; } /** * Creates and returns a clone of this object. * * @return a clone of this instance. */ @Override public Object clone() { ConfusionMatrix m = (ConfusionMatrix) super.clone(); m.m_ClassNames = m_ClassNames.clone(); return m; } /** * Gets the number of classes. * * @return the number of classes */ public int size() { return m_ClassNames.length; } /** * Gets the name of one of the classes. * * @param index the index of the class. * @return the class name. */ public String className(int index) { return m_ClassNames[index]; } /** * Includes a prediction in the confusion matrix. * * @param pred the NominalPrediction to include * @exception Exception if no valid prediction was made (i.e. unclassified). */ public void addPrediction(NominalPrediction pred) throws Exception { if (pred.predicted() == NominalPrediction.MISSING_VALUE) { throw new Exception("No predicted value given."); } if (pred.actual() == NominalPrediction.MISSING_VALUE) { throw new Exception("No actual value given."); } set((int) pred.actual(), (int) pred.predicted(), get((int) pred.actual(), (int) pred.predicted()) + pred.weight()); } /** * Includes a whole bunch of predictions in the confusion matrix. * * @param predictions a FastVector containing the NominalPredictions to * include * @exception Exception if no valid prediction was made (i.e. unclassified). */ public void addPredictions(ArrayList<Prediction> predictions) throws Exception { for (int i = 0; i < predictions.size(); i++) { addPrediction((NominalPrediction) predictions.get(i)); } } /** * Gets the performance with respect to one of the classes as a TwoClassStats * object. * * @param classIndex the index of the class of interest. * @return the generated TwoClassStats object. */ public TwoClassStats getTwoClassStats(int classIndex) { double fp = 0, tp = 0, fn = 0, tn = 0; for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { if (row == classIndex) { if (col == classIndex) { tp += get(row, col); } else { fn += get(row, col); } } else { if (col == classIndex) { fp += get(row, col); } else { tn += get(row, col); } } } } return new TwoClassStats(tp, fp, tn, fn); } /** * Gets the number of correct classifications (that is, for which a correct * prediction was made). (Actually the sum of the weights of these * classifications) * * @return the number of correct classifications */ public double correct() { double correct = 0; for (int i = 0; i < size(); i++) { correct += get(i, i); } return correct; } /** * Gets the number of incorrect classifications (that is, for which an * incorrect prediction was made). (Actually the sum of the weights of these * classifications) * * @return the number of incorrect classifications */ public double incorrect() { double incorrect = 0; for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { if (row != col) { incorrect += get(row, col); } } } return incorrect; } /** * Gets the number of predictions that were made (actually the sum of the * weights of predictions where the class value was known). * * @return the number of predictions with known class */ public double total() { double total = 0; for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { total += get(row, col); } } return total; } /** * Returns the estimated error rate. * * @return the estimated error rate (between 0 and 1). */ public double errorRate() { return incorrect() / total(); } /** * Calls toString() with a default title. * * @return the confusion matrix as a string */ @Override public String toString() { return toString("=== Confusion Matrix ===\n"); } /** * Outputs the performance statistics as a classification confusion matrix. * For each class value, shows the distribution of predicted class values. * * @param title the title for the confusion matrix * @return the confusion matrix as a String */ public String toString(String title) { StringBuffer text = new StringBuffer(); char[] IDChars = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' }; int IDWidth; boolean fractional = false; // Find the maximum value in the matrix // and check for fractional display requirement double maxval = 0; for (int i = 0; i < size(); i++) { for (int j = 0; j < size(); j++) { double current = get(i, j); if (current < 0) { current *= -10; } if (current > maxval) { maxval = current; } double fract = current - Math.rint(current); if (!fractional && ((Math.log(fract) / Math.log(10)) >= -2)) { fractional = true; } } } IDWidth = 1 + Math.max( (int) (Math.log(maxval) / Math.log(10) + (fractional ? 3 : 0)), (int) (Math.log(size()) / Math.log(IDChars.length))); text.append(title).append("\n"); for (int i = 0; i < size(); i++) { if (fractional) { text.append(" ").append(num2ShortID(i, IDChars, IDWidth - 3)) .append(" "); } else { text.append(" ").append(num2ShortID(i, IDChars, IDWidth)); } } text.append(" actual class\n"); for (int i = 0; i < size(); i++) { for (int j = 0; j < size(); j++) { text.append(" ").append( Utils.doubleToString(get(i, j), IDWidth, (fractional ? 2 : 0))); } text.append(" | ").append(num2ShortID(i, IDChars, IDWidth)).append(" = ") .append(m_ClassNames[i]).append("\n"); } return text.toString(); } /** * Method for generating indices for the confusion matrix. * * @param num integer to format * @return the formatted integer as a string */ private static String num2ShortID(int num, char[] IDChars, int IDWidth) { char ID[] = new char[IDWidth]; int i; for (i = IDWidth - 1; i >= 0; i--) { ID[i] = IDChars[num % IDChars.length]; num = num / IDChars.length - 1; if (num < 0) { break; } } for (i--; i >= 0; i--) { ID[i] = ' '; } return new String(ID); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/CostCurve.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CostCurve.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.util.ArrayList; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Generates points illustrating probablity cost tradeoffs that can be obtained * by varying the threshold value between classes. For example, the typical * threshold value of 0.5 means the predicted probability of "positive" must be * higher than 0.5 for the instance to be predicted as "positive". * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision$ */ public class CostCurve implements RevisionHandler { /** The name of the relation used in cost curve datasets */ public static final String RELATION_NAME = "CostCurve"; /** attribute name: Probability Cost Function */ public static final String PROB_COST_FUNC_NAME = "Probability Cost Function"; /** attribute name: Normalized Expected Cost */ public static final String NORM_EXPECTED_COST_NAME = "Normalized Expected Cost"; /** attribute name: Threshold */ public static final String THRESHOLD_NAME = "Threshold"; /** * Calculates the performance stats for the default class and return results * as a set of Instances. The structure of these Instances is as follows: * <p> * <ul> * <li><b>Probability Cost Function </b> * <li><b>Normalized Expected Cost</b> * <li><b>Threshold</b> contains the probability threshold that gives rise to * the previous performance values. * </ul> * <p> * * @see TwoClassStats * @param predictions the predictions to base the curve on * @return datapoints as a set of instances, null if no predictions have been * made. * @throws InterruptedException */ public Instances getCurve(final ArrayList<Prediction> predictions) throws InterruptedException { if (predictions.size() == 0) { return null; } return this.getCurve(predictions, ((NominalPrediction) predictions.get(0)).distribution().length - 1); } /** * Calculates the performance stats for the desired class and return results * as a set of Instances. * * @param predictions the predictions to base the curve on * @param classIndex index of the class of interest. * @return datapoints as a set of instances. * @throws InterruptedException */ public Instances getCurve(final ArrayList<Prediction> predictions, final int classIndex) throws InterruptedException { if ((predictions.size() == 0) || (((NominalPrediction) predictions.get(0)).distribution().length <= classIndex)) { return null; } ThresholdCurve tc = new ThresholdCurve(); Instances threshInst = tc.getCurve(predictions, classIndex); Instances insts = this.makeHeader(); int fpind = threshInst.attribute(ThresholdCurve.FP_RATE_NAME).index(); int tpind = threshInst.attribute(ThresholdCurve.TP_RATE_NAME).index(); int threshind = threshInst.attribute(ThresholdCurve.THRESHOLD_NAME).index(); double[] vals; double fpval, tpval, thresh; for (int i = 0; i < threshInst.numInstances(); i++) { fpval = threshInst.instance(i).value(fpind); tpval = threshInst.instance(i).value(tpind); thresh = threshInst.instance(i).value(threshind); vals = new double[3]; vals[0] = 0; vals[1] = fpval; vals[2] = thresh; insts.add(new DenseInstance(1.0, vals)); vals = new double[3]; vals[0] = 1; vals[1] = 1.0 - tpval; vals[2] = thresh; insts.add(new DenseInstance(1.0, vals)); } return insts; } /** * generates the header * * @return the header */ private Instances makeHeader() { ArrayList<Attribute> fv = new ArrayList<Attribute>(); fv.add(new Attribute(PROB_COST_FUNC_NAME)); fv.add(new Attribute(NORM_EXPECTED_COST_NAME)); fv.add(new Attribute(THRESHOLD_NAME)); return new Instances(RELATION_NAME, fv, 100); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Tests the CostCurve generation from the command line. The classifier is * currently hardcoded. Pipe in an arff file. * * @param args currently ignored */ public static void main(final String[] args) { try { Instances inst = new Instances(new java.io.InputStreamReader(System.in)); inst.setClassIndex(inst.numAttributes() - 1); CostCurve cc = new CostCurve(); EvaluationUtils eu = new EvaluationUtils(); Classifier classifier = new weka.classifiers.functions.Logistic(); ArrayList<Prediction> predictions = new ArrayList<Prediction>(); for (int i = 0; i < 2; i++) { // Do two runs. eu.setSeed(i); predictions.addAll(eu.getCVPredictions(classifier, inst, 10)); // System.out.println("\n\n\n"); } Instances result = cc.getCurve(predictions); System.out.println(result); } catch (Exception ex) { ex.printStackTrace(); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/Evaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Evaluation.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.beans.BeanInfo; import java.beans.Introspector; import java.beans.MethodDescriptor; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.FileReader; import java.io.InputStream; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.OutputStream; import java.io.Reader; import java.io.Serializable; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.stream.Stream; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.ConditionalDensityEstimator; import weka.classifiers.CostMatrix; import weka.classifiers.IntervalEstimator; import weka.classifiers.Sourcable; import weka.classifiers.UpdateableBatchProcessor; import weka.classifiers.UpdateableClassifier; import weka.classifiers.evaluation.output.prediction.AbstractOutput; import weka.classifiers.evaluation.output.prediction.PlainText; import weka.classifiers.misc.InputMappedClassifier; import weka.classifiers.pmml.consumer.PMMLClassifier; import weka.classifiers.xml.XMLClassifier; import weka.core.BatchPredictor; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.Summarizable; import weka.core.Utils; import weka.core.Version; import weka.core.WekaException; import weka.core.converters.ConverterUtils.DataSink; import weka.core.converters.ConverterUtils.DataSource; import weka.core.pmml.PMMLFactory; import weka.core.pmml.PMMLModel; import weka.core.xml.KOML; import weka.core.xml.XMLOptions; import weka.core.xml.XMLSerialization; import weka.estimators.UnivariateKernelEstimator; /** * Class for evaluating machine learning models. * <p/> * * ------------------------------------------------------------------- * <p/> * * General options when evaluating a learning scheme from the command-line: * <p/> * * -t filename <br/> * Name of the file with the training data. (required) * <p/> * * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is performed. * <p/> * * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data first with the seed * value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: 1). * <p/> * * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * * -disable list <br/> * A comma separated list of metric names not to include in the output. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with ".xml", a PMML file is * loaded or, if that fails, options are loaded from XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case the filename ends with * ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -output-models-for-training-splits <br/> * Output models for training splits if cross-validation or percentage-split evaluation is used. * <p/> * * -do-not-output-per-class-statistics <br/> * Do not output statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test instances provided and * -no-cv is used), along with the attributes in the specified range (and nothing else). Use '-p 0' * if no attributes are desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with the '-p' option (only * nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph representation of the * classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the extensions, e.g., '.arff' * for ARFF format or '.csv' for CSV. * <p/> * * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first label) * <p/> * * ------------------------------------------------------------------- * <p/> * * Example usage as the main of a classifier (called FunkyClassifier): <code> <pre> * public static void main(String [] args) { * runClassifier(new FunkyClassifier(), args); * } * </pre> </code> * <p/> * * ------------------------------------------------------------------ * <p/> * * Example usage from within an application: <code> <pre> * Instances trainInstances = ... instances got from somewhere * Instances testInstances = ... instances got from somewhere * Classifier scheme = ... scheme got from somewhere * * Evaluation evaluation = new Evaluation(trainInstances); * evaluation.evaluateModel(scheme, testInstances); * System.out.println(evaluation.toSummaryString()); * </pre> </code> * * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class Evaluation implements Summarizable, RevisionHandler, Serializable { /** * For serialization */ private static final long serialVersionUID = -7010314486866816271L; /** * The number of classes. */ protected int m_NumClasses; /** * The number of folds for a cross-validation. */ protected int m_NumFolds; /** * The weight of all incorrectly classified instances. */ protected double m_Incorrect; /** * The weight of all correctly classified instances. */ protected double m_Correct; /** * The weight of all unclassified instances. */ protected double m_Unclassified; /*** The weight of all instances that had no class assigned to them. */ protected double m_MissingClass; /** * The weight of all instances that had a class assigned to them. */ protected double m_WithClass; /** * Array for storing the confusion matrix. */ protected double[][] m_ConfusionMatrix; /** * The names of the classes. */ protected String[] m_ClassNames; /** * Is the class nominal or numeric? */ protected boolean m_ClassIsNominal; /** * The prior probabilities of the classes. */ protected double[] m_ClassPriors; /** * The sum of counts for priors. */ protected double m_ClassPriorsSum; /** * The cost matrix (if given). */ protected CostMatrix m_CostMatrix; /** * The total cost of predictions (includes instance weights). */ protected double m_TotalCost; /** * Sum of errors. */ protected double m_SumErr; /** * Sum of absolute errors. */ protected double m_SumAbsErr; /** * Sum of squared errors. */ protected double m_SumSqrErr; /** * Sum of class values. */ protected double m_SumClass; /** * Sum of squared class values. */ protected double m_SumSqrClass; /*** Sum of predicted values. */ protected double m_SumPredicted; /** * Sum of squared predicted values. */ protected double m_SumSqrPredicted; /** * Sum of predicted * class values. */ protected double m_SumClassPredicted; /** * Sum of absolute errors of the prior. */ protected double m_SumPriorAbsErr; /** * Sum of absolute errors of the prior. */ protected double m_SumPriorSqrErr; /** * Total Kononenko & Bratko Information. */ protected double m_SumKBInfo; /*** Resolution of the margin histogram. */ protected static int k_MarginResolution = 500; /** * Cumulative margin distribution. */ protected double m_MarginCounts[]; /** * Number of non-missing class training instances seen. */ protected int m_NumTrainClassVals; /** * Array containing all numeric training class values seen. */ protected double[] m_TrainClassVals; /** * Array containing all numeric training class weights. */ protected double[] m_TrainClassWeights; /** * Numeric class estimator for prior. */ protected UnivariateKernelEstimator m_PriorEstimator; /** * Whether complexity statistics are available. */ protected boolean m_ComplexityStatisticsAvailable = true; /** * The minimum probability accepted from an estimator to avoid taking log(0) in Sf calculations. */ protected static final double MIN_SF_PROB = Double.MIN_VALUE; /** * Total entropy of prior predictions. */ protected double m_SumPriorEntropy; /** * Total entropy of scheme predictions. */ protected double m_SumSchemeEntropy; /** * Whether coverage statistics are available. */ protected boolean m_CoverageStatisticsAvailable = true; /** * The confidence level used for coverage statistics. */ protected double m_ConfLevel = 0.95; /** * Total size of predicted regions at the given confidence level. */ protected double m_TotalSizeOfRegions; /** * Total coverage of test cases at the given confidence level. */ protected double m_TotalCoverage; /** * Minimum target value. */ protected double m_MinTarget; /** * Maximum target value. */ protected double m_MaxTarget; /** * The list of predictions that have been generated (for computing AUC). */ protected ArrayList<Prediction> m_Predictions; /** * enables/disables the use of priors, e.g., if no training set is present in case of de-serialized * schemes. */ protected boolean m_NoPriors = false; /** * The header of the training set. */ protected Instances m_Header; /** * whether to discard predictions (and save memory). */ protected boolean m_DiscardPredictions; /** * Holds plugin evaluation metrics */ protected List<AbstractEvaluationMetric> m_pluginMetrics; /** * The list of metrics to display in the output */ protected List<String> m_metricsToDisplay = new ArrayList<>(); public static final String[] BUILT_IN_EVAL_METRICS = { "Correct", "Incorrect", "Kappa", "Total cost", "Average cost", "KB relative", "KB information", "Correlation", "Complexity 0", "Complexity scheme", "Complexity improvement", "MAE", "RMSE", "RAE", "RRSE", "Coverage", "Region size", "TP rate", "FP rate", "Precision", "Recall", "F-measure", "MCC", "ROC area", "PRC area" }; /** * Utility method to get a list of the names of all built-in and plugin evaluation metrics * * @return the complete list of available evaluation metrics */ public static List<String> getAllEvaluationMetricNames() { List<String> allEvals = new ArrayList<>(); for (String s : Evaluation.BUILT_IN_EVAL_METRICS) { allEvals.add(s); } final List<AbstractEvaluationMetric> pluginMetrics = AbstractEvaluationMetric.getPluginMetrics(); if (pluginMetrics != null) { for (AbstractEvaluationMetric m : pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { allEvals.add(s); } } else { allEvals.add(m.getMetricName()); } } } return allEvals; } /** * Initializes all the counters for the evaluation. Use <code>useNoPriors()</code> if the dataset is * the test set and you can't initialize with the priors from the training set via * <code>setPriors(Instances)</code>. * * @param data * set of training instances, to get some header information and prior class distribution * information * @throws Exception * if the class is not defined * @see #useNoPriors() * @see #setPriors(Instances) */ public Evaluation(final Instances data) throws Exception { this(data, null); } /** * Initializes all the counters for the evaluation and also takes a cost matrix as parameter. Use * <code>useNoPriors()</code> if the dataset is the test set and you can't initialize with the * priors from the training set via <code>setPriors(Instances)</code>. * * @param data * set of training instances, to get some header information and prior class distribution * information * @param costMatrix * the cost matrix---if null, default costs will be used * @throws Exception * if cost matrix is not compatible with data, the class is not defined or the class is * numeric * @see #useNoPriors() * @see #setPriors(Instances) */ public Evaluation(final Instances data, final CostMatrix costMatrix) throws Exception { this.m_Header = new Instances(data, 0); this.m_NumClasses = data.numClasses(); this.m_NumFolds = 1; this.m_ClassIsNominal = data.classAttribute().isNominal(); if (this.m_ClassIsNominal) { this.m_ConfusionMatrix = new double[this.m_NumClasses][this.m_NumClasses]; this.m_ClassNames = new String[this.m_NumClasses]; for (int i = 0; i < this.m_NumClasses; i++) { this.m_ClassNames[i] = data.classAttribute().value(i); } } this.m_CostMatrix = costMatrix; if (this.m_CostMatrix != null) { if (!this.m_ClassIsNominal) { throw new Exception("Class has to be nominal if cost matrix given!"); } if (this.m_CostMatrix.size() != this.m_NumClasses) { throw new Exception("Cost matrix not compatible with data!"); } } this.m_ClassPriors = new double[this.m_NumClasses]; this.setPriors(data); this.m_MarginCounts = new double[k_MarginResolution + 1]; for (String s : BUILT_IN_EVAL_METRICS) { if (!s.equalsIgnoreCase("Coverage") && !s.equalsIgnoreCase("Region size")) { this.m_metricsToDisplay.add(s.toLowerCase()); } } this.m_pluginMetrics = AbstractEvaluationMetric.getPluginMetrics(); if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { m.setBaseEvaluation(this); if (m instanceof InformationRetrievalEvaluationMetric) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { this.m_metricsToDisplay.add(s.toLowerCase()); } } else { this.m_metricsToDisplay.add(m.getMetricName().toLowerCase()); } } } } /** * Returns the header of the underlying dataset. * * @return the header information */ public Instances getHeader() { return this.m_Header; } /** * Sets whether to discard predictions, ie, not storing them for future reference via predictions() * method in order to conserve memory. * * @param value * true if to discard the predictions * @see #predictions() */ public void setDiscardPredictions(final boolean value) { this.m_DiscardPredictions = value; if (this.m_DiscardPredictions) { this.m_Predictions = null; } } /** * Returns whether predictions are not recorded at all, in order to conserve memory. * * @return true if predictions are not recorded * @see #predictions() */ public boolean getDiscardPredictions() { return this.m_DiscardPredictions; } /** * Returns the list of plugin metrics in use (or null if there are none) * * @return the list of plugin metrics */ public List<AbstractEvaluationMetric> getPluginMetrics() { return this.m_pluginMetrics; } /** * Set a list of the names of metrics to have appear in the output. The default is to display all * built in metrics and plugin metrics that haven't been globally disabled. * * @param display * a list of metric names to have appear in the output */ public void setMetricsToDisplay(final List<String> display) { // make sure all metric names are lower case for matching this.m_metricsToDisplay.clear(); for (String s : display) { this.m_metricsToDisplay.add(s.trim().toLowerCase()); } } /** * Get a list of the names of metrics to have appear in the output The default is to display all * built in metrics and plugin metrics that haven't been globally disabled. * * @return a list of metric names to have appear in the output */ public List<String> getMetricsToDisplay() { return this.m_metricsToDisplay; } /** * Toggle the output of the metrics specified in the supplied list. * * @param metricsToToggle * a list of metrics to toggle */ public void toggleEvalMetrics(final List<String> metricsToToggle) { for (String s : metricsToToggle) { if (this.m_metricsToDisplay.contains(s.toLowerCase())) { this.m_metricsToDisplay.remove(s.toLowerCase()); } else { this.m_metricsToDisplay.add(s.toLowerCase()); } } } /** * Get the named plugin evaluation metric * * @param name * the name of the metric (as returned by AbstractEvaluationMetric.getName()) or the fully * qualified class name of the metric to find * @return the metric or null if the metric is not in the list of plugin metrics */ public AbstractEvaluationMetric getPluginMetric(final String name) { AbstractEvaluationMetric match = null; if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m.getMetricName().equals(name) || m.getClass().getName().equals(name)) { match = m; break; } } } return match; } /** * Returns the area under ROC for those predictions that have been collected in the * evaluateClassifier(Classifier, Instances) method. Returns Utils.missingValue() if the area is not * available. * * @param classIndex * the index of the class to consider as "positive" * @return the area under the ROC curve or not a number * @throws InterruptedException */ public double areaUnderROC(final int classIndex) { // Check if any predictions have been collected if (this.m_Predictions == null) { return Utils.missingValue(); } else { ThresholdCurve tc = new ThresholdCurve(); Instances result; try { result = tc.getCurve(this.m_Predictions, classIndex); } catch (InterruptedException e) { throw new IllegalStateException(e); } return ThresholdCurve.getROCArea(result); } } /** * Calculates the weighted (by class size) AUC. * * @return the weighted AUC. * @throws InterruptedException */ public double weightedAreaUnderROC() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double aucTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.areaUnderROC(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 aucTotal += (temp * classCounts[i]); } } return aucTotal / classCountSum; } /** * Returns the area under precision-recall curve (AUPRC) for those predictions that have been * collected in the evaluateClassifier(Classifier, Instances) method. Returns Utils.missingValue() * if the area is not available. * * @param classIndex * the index of the class to consider as "positive" * @return the area under the precision-recall curve or not a number * @throws InterruptedException */ public double areaUnderPRC(final int classIndex) { // Check if any predictions have been collected if (this.m_Predictions == null) { return Utils.missingValue(); } else { ThresholdCurve tc = new ThresholdCurve(); Instances result; try { result = tc.getCurve(this.m_Predictions, classIndex); } catch (InterruptedException e) { throw new IllegalStateException(e); } return ThresholdCurve.getPRCArea(result); } } /** * Calculates the weighted (by class size) AUPRC. * * @return the weighted AUPRC. * @throws InterruptedException */ public double weightedAreaUnderPRC() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double auprcTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.areaUnderPRC(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 auprcTotal += (temp * classCounts[i]); } } return auprcTotal / classCountSum; } /** * Returns a copy of the confusion matrix. * * @return a copy of the confusion matrix as a two-dimensional array */ public double[][] confusionMatrix() { double[][] newMatrix = new double[this.m_ConfusionMatrix.length][0]; for (int i = 0; i < this.m_ConfusionMatrix.length; i++) { newMatrix[i] = new double[this.m_ConfusionMatrix[i].length]; System.arraycopy(this.m_ConfusionMatrix[i], 0, newMatrix[i], 0, this.m_ConfusionMatrix[i].length); } return newMatrix; } /** * Performs a (stratified if class is nominal) cross-validation for a classifier on a set of * instances. Now performs a deep copy of the classifier before each call to buildClassifier() (just * in case the classifier is not initialized properly). * * @param classifier * the classifier with any options set. * @param data * the data on which the cross-validation is to be performed * @param numFolds * the number of folds for the cross-validation * @param random * random number generator for randomization * @throws Exception * if a classifier could not be generated successfully or the class is not defined */ public void crossValidateModel(final Classifier classifier, final Instances data, final int numFolds, final Random random) throws Exception { this.crossValidateModel(classifier, data, numFolds, random, new Object[0]); } /** * Performs a (stratified if class is nominal) cross-validation for a classifier on a set of * instances. Performs a deep copy of the classifier before each call to buildClassifier() (just in * case the classifier is not initialized properly). * * @param classifier * the classifier with any options set. * @param data * the data on which the cross-validation is to be performed * @param numFolds * the number of folds for the cross-validation * @param random * random number generator for randomization * @param forPrinting * varargs parameter that, if supplied, is expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput object or a StringBuffer * for model output * @throws Exception * if a classifier could not be generated successfully or the class is not defined */ public void crossValidateModel(final Classifier classifier, Instances data, final int numFolds, final Random random, final Object... forPrinting) throws Exception { // Make a copy of the data we can reorder data = new Instances(data); data.randomize(random); if (data.classAttribute().isNominal()) { data.stratify(numFolds); } // We assume that the first element is a // weka.classifiers.evaluation.output.prediction.AbstractOutput object AbstractOutput classificationOutput = null; if (forPrinting.length > 0 && forPrinting[0] instanceof AbstractOutput) { // print the header first classificationOutput = (AbstractOutput) forPrinting[0]; classificationOutput.setHeader(data); classificationOutput.printHeader(); } // Do the folds for (int i = 0; i < numFolds; i++) { Instances train = data.trainCV(numFolds, i, random); this.setPriors(train); Classifier copiedClassifier = AbstractClassifier.makeCopy(classifier); copiedClassifier.buildClassifier(train); if (classificationOutput == null && forPrinting.length > 0) { ((StringBuffer) forPrinting[0]).append("\n=== Classifier model (training fold " + (i + 1) + ") ===\n\n" + copiedClassifier); } Instances test = data.testCV(numFolds, i); if (classificationOutput != null) { this.evaluateModel(copiedClassifier, test, forPrinting); } else { this.evaluateModel(copiedClassifier, test); } } this.m_NumFolds = numFolds; if (classificationOutput != null) { classificationOutput.printFooter(); } } /** * Performs a (stratified if class is nominal) cross-validation for a classifier on a set of * instances. * * @param classifierString * a string naming the class of the classifier * @param data * the data on which the cross-validation is to be performed * @param numFolds * the number of folds for the cross-validation * @param options * the options to the classifier. Any options * @param random * the random number generator for randomizing the data accepted by the classifier will be * removed from this array. * @throws Exception * if a classifier could not be generated successfully or the class is not defined */ public void crossValidateModel(final String classifierString, final Instances data, final int numFolds, final String[] options, final Random random) throws Exception { this.crossValidateModel(AbstractClassifier.forName(classifierString, options), data, numFolds, random); } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * <p> * Valid options are: * <p/> * <p> * -t filename <br/> * Name of the file with the training data. (required) * <p/> * <p> * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is performed. * <p/> * <p> * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * <p> * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * <p> * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * <p> * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * <p> * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data first with the seed * value ('-s'). * <p/> * <p> * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: 1). * <p/> * <p> * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * <p> * -l filename <br/> * Loads classifier from the given file. In case the filename ends with ".xml",a PMML file is loaded * or, if that fails, options are loaded from XML. * <p/> * <p> * -d filename <br/> * Saves classifier built from the training data into the given file. In case the filename ends with * ".xml" the options are saved XML, not the model. * <p/> * <p> * -v <br/> * Outputs no statistics for the training data. * <p/> * <p> * -o <br/> * Outputs statistics only, not the classifier. * <p/> * <p> * -output-models-for-training-splits <br/> * Output models for training splits if cross-validation or percentage-split evaluation is used. * <p/> * <p> * -do-not-output-per-class-statistics <br/> * Do not output statistics per class. * <p/> * <p> * -k <br/> * Outputs information-theoretic statistics. * <p/> * <p> * -classifications "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * <p> * -p range <br/> * Outputs predictions for test instances (or the train instances if no test instances provided and * -no-cv is used), along with the attributes in the specified range (and nothing else). Use '-p 0' * if no attributes are desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * <p> * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with the '-p' option (only * nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * <p> * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * <p> * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * <p> * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph representation of the * classifier (and nothing else). * <p/> * <p> * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * <p> * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the extensions, e.g., '.arff' * for ARFF format or '.csv' for CSV. * <p/> * <p> * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first label) * <p/> * * @param classifierString * class of machine learning classifier as a string * @param options * the array of string containing the options * @return a string describing the results * @throws Exception * if model could not be evaluated successfully */ public static String evaluateModel(final String classifierString, final String[] options) throws Exception { Classifier classifier; // Create classifier try { classifier = AbstractClassifier.forName(classifierString, null); } catch (Exception e) { throw new Exception("Can't find class with name " + classifierString + '.'); } return evaluateModel(classifier, options); } /** * A test method for this class. Just extracts the first command line argument as a classifier class * name and calls evaluateModel. * * @param args * an array of command line arguments, the first of which must be the class name of a * classifier. */ public static void main(final String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the class name of a classifier"); } String classifier = args[0]; args[0] = ""; System.out.println(evaluateModel(classifier, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } } /** * Tries to get the classifier from the provided model file * * @param modelFileName * the name of the model file * @param template * the template header to compare the saved data header to * @return the classifier */ protected static Classifier getModelFromFile(final String modelFileName, final Instances template) throws Exception { Classifier classifier = null; // Do we have a model file or options in XML? if (modelFileName.endsWith(".xml")) { // try to load file as PMML first try { PMMLModel pmmlModel = PMMLFactory.getPMMLModel(modelFileName); if (pmmlModel instanceof PMMLClassifier) { classifier = ((PMMLClassifier) pmmlModel); } } catch (Exception ex) { throw new IllegalArgumentException("Failed to read model XML file " + modelFileName); } } else { // Try to load (gzipped) serialized Java objects or KOML InputStream is = new FileInputStream(modelFileName); if (modelFileName.endsWith(".gz")) { is = new GZIPInputStream(is); } if (!modelFileName.endsWith(".koml")) { ObjectInputStream objectInputStream = SerializationHelper.getObjectInputStream(is); classifier = (Classifier) objectInputStream.readObject(); // try and read a header (if present) Instances savedStructure = null; try { savedStructure = (Instances) objectInputStream.readObject(); } catch (Exception ex) { // don't make a fuss } if (savedStructure != null) { // test for compatibility with template if (!(classifier instanceof InputMappedClassifier) && !template.equalHeaders(savedStructure)) { throw new Exception("training and test set are not compatible\n" + template.equalHeadersMsg(savedStructure)); } } objectInputStream.close(); } else if (KOML.isPresent()) { BufferedInputStream xmlInputStream = new BufferedInputStream(is); classifier = (Classifier) KOML.read(xmlInputStream); xmlInputStream.close(); } else { throw new WekaException("KOML library is not present"); } } if (classifier == null) { throw new IllegalArgumentException("Failed to classifier from model file " + modelFileName); } return classifier; } /** * Saves the given classifier, along with the template Instances object (if appropriate) to the * given file. * * @param classifier * the classifier * @param template * the template * @param objectOutputFileName * the file name */ protected static void saveClassifier(final Classifier classifier, final Instances template, final String objectOutputFileName) throws Exception { OutputStream os = new FileOutputStream(objectOutputFileName); if (!(objectOutputFileName.endsWith(".xml") || (objectOutputFileName.endsWith(".koml") && KOML.isPresent()))) { if (objectOutputFileName.endsWith(".gz")) { os = new GZIPOutputStream(os); } ObjectOutputStream objectOutputStream = new ObjectOutputStream(os); objectOutputStream.writeObject(classifier); if (template != null) { objectOutputStream.writeObject(template); } objectOutputStream.flush(); objectOutputStream.close(); } else { BufferedOutputStream xmlOutputStream = new BufferedOutputStream(os); if (objectOutputFileName.endsWith(".xml")) { XMLSerialization xmlSerial = new XMLClassifier(); xmlSerial.write(xmlOutputStream, classifier); } else // whether KOML is present has already been checked // if not present -> ".koml" is interpreted as binary - see above if (objectOutputFileName.endsWith(".koml")) { KOML.write(xmlOutputStream, classifier); } xmlOutputStream.close(); } } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * * Valid options are: * <p/> * * -t name of training file <br/> * Name of the file with the training data. (required) * <p/> * * -T name of test file <br/> * Name of the file with the test data. If missing a cross-validation is performed. * <p/> * * -c class index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number of folds <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data first with the seed * value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: 1). * <p/> * * -m file with cost matrix <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with ".xml",a PMML file is loaded * or, if that fails, options are loaded from XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case the filename ends with * ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -output-models-for-training-splits <br/> * Output models for training splits if cross-validation or percentage-split evaluation is used. * <p/> * * -do-not-output-per-class-statistics <br/> * Do not output statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test instances provided and * -no-cv is used), along with the attributes in the specified range (and nothing else). Use '-p 0' * if no attributes are desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with the '-p' option (only * nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph representation of the * classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * @param classifier * machine learning classifier * @param options * the array of string containing the options * @throws Exception * if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateModel(Classifier classifier, String[] options) throws Exception { StringBuffer schemeOptionsText = null; long trainTimeStart = 0, trainTimeElapsed = 0, testTimeStart = 0, testTimeElapsed = 0; // help requested? if (Utils.getFlag("h", options) || Utils.getFlag("help", options)) { // global info requested as well? boolean globalInfo = Utils.getFlag("synopsis", options) || Utils.getFlag("info", options); throw new Exception("\nHelp requested." + makeOptionString(classifier, globalInfo)); } // do we get the input from XML instead of normal command-line parameters? try { String xml = Utils.getOption("xml", options); if (!xml.equals("")) { options = new XMLOptions(xml).toArray(); // All other options are ignored } } catch (Exception ex) { throw new Exception("\nWeka exception: " + ex.getMessage() + makeOptionString(classifier, false)); } // Store settings for (almost all) general options boolean noCrossValidation = Utils.getFlag("no-cv", options); String classIndexString = Utils.getOption('c', options); String trainFileName = Utils.getOption('t', options); String objectInputFileName = Utils.getOption('l', options); String objectOutputFileName = Utils.getOption('d', options); String testFileName = Utils.getOption('T', options); String foldsString = Utils.getOption('x', options); String seedString = Utils.getOption('s', options); boolean outputModelsForTrainingSplits = Utils.getFlag("output-models-for-training-splits", options); boolean classStatistics = !Utils.getFlag("do-not-output-per-class-statistics", options); boolean noOutput = Utils.getFlag('o', options); boolean trainStatistics = !Utils.getFlag('v', options); boolean printComplexityStatistics = Utils.getFlag('k', options); boolean printMargins = Utils.getFlag('r', options); boolean printGraph = Utils.getFlag('g', options); String sourceClass = Utils.getOption('z', options); boolean printSource = (sourceClass.length() != 0); String thresholdFile = Utils.getOption("threshold-file", options); String thresholdLabel = Utils.getOption("threshold-label", options); boolean forceBatchTraining = Utils.getFlag("force-batch-training", options); String classifications = Utils.getOption("classifications", options); String classificationsOld = Utils.getOption("p", options); String splitPercentageString = Utils.getOption("split-percentage", options); boolean preserveOrder = Utils.getFlag("preserve-order", options); boolean discardPredictions = Utils.getFlag("no-predictions", options); String metricsToToggle = Utils.getOption("toggle", options); // Some other variables that we might set later. CostMatrix costMatrix = null; double splitPercentage = -1; int classIndex = -1, actualClassIndex = -1; int seed = 1, folds = 10; Instances train = null, test = null, template = null; AbstractOutput classificationOutput = null; List<String> toggleList = new ArrayList<>(); int labelIndex = 0; // We need to output help if something goes wrong with the option settings try { if (metricsToToggle.length() > 0) { String[] parts = metricsToToggle.split(","); for (String p : parts) { toggleList.add(p.trim().toLowerCase()); } } // Read potential .xml model file that may hold scheme-specific options if ((objectInputFileName.length() != 0) && (objectInputFileName.endsWith(".xml"))) { try { // Try to load scheme-specific options as XMLClassifier OptionHandler cl = (OptionHandler) new XMLClassifier().read(objectInputFileName); options = Stream.concat(Arrays.stream(cl.getOptions()), Arrays.stream(options)).toArray(String[]::new); objectInputFileName = ""; // We have not actually read a built model, only some options } catch (Exception ex) { } } // Basic checking for global parameter settings if (trainFileName.length() == 0) { if (objectInputFileName.length() == 0) { throw new IllegalArgumentException("No training file and no object input file given."); } if (testFileName.length() == 0) { throw new IllegalArgumentException("No training file and no test file given."); } } else if ((objectInputFileName.length() != 0) && ((!(classifier instanceof UpdateableClassifier) || forceBatchTraining) || (testFileName.length() == 0))) { throw new IllegalArgumentException("Classifier not incremental or batch training forced, or no test file provided: can't use model file."); } if ((objectInputFileName.length() != 0) && ((splitPercentageString.length() != 0) || (foldsString.length() != 0))) { throw new IllegalArgumentException("Cannot perform percentage split or cross-validation when model provided."); } if (splitPercentageString.length() != 0) { if (foldsString.length() != 0) { throw new IllegalArgumentException("Percentage split cannot be used in conjunction with cross-validation ('-x')."); } splitPercentage = Double.parseDouble(splitPercentageString); if ((splitPercentage <= 0) || (splitPercentage >= 100)) { throw new IllegalArgumentException("Split percentage needs to be >0 and <100."); } } if ((preserveOrder) && (splitPercentage == -1)) { throw new IllegalArgumentException("Split percentage is missing."); } if (discardPredictions && (classifications.length() > 0 || classificationsOld.length() > 0)) { throw new IllegalArgumentException("Cannot both discard and output predictions!"); } if (thresholdFile.length() > 0 && (classifications.length() > 0 || classificationsOld.length() > 0)) { throw new IllegalArgumentException("Cannot output predictions and also write threshold file!"); } if (thresholdFile.length() > 0 && (!trainStatistics && noCrossValidation && splitPercentageString.length() <= 0 && testFileName.length() <= 0)) { throw new IllegalArgumentException("Can only write a threshold file when performance statistics are computed!"); } if (printMargins && (!trainStatistics && noCrossValidation && splitPercentageString.length() <= 0 && testFileName.length() <= 0)) { throw new IllegalArgumentException("Can only print margins when performance statistics are computed!"); } if ((trainFileName.length() == 0) && (printComplexityStatistics)) { // if no training file given, we don't have any priors throw new IllegalArgumentException("Cannot print complexity statistics without training file!"); } if (printGraph && !(classifier instanceof Drawable)) { throw new IllegalArgumentException("Can only print graph if classifier implements Drawable interface!"); } if (printSource && !(classifier instanceof Sourcable)) { throw new IllegalArgumentException("Can only print source if classifier implements Sourcable interface!"); } if (printGraph && !(trainFileName.length() > 0) && !(objectInputFileName.length() > 0)) { throw new IllegalArgumentException("Can only print graph if training file or model file is provided!"); } if (printSource && !(trainFileName.length() > 0) && !(objectInputFileName.length() > 0)) { throw new IllegalArgumentException("Can only print source if training file or model file is provided!"); } if (objectInputFileName.length() > 0 && (trainFileName.length() > 0) && (!(classifier instanceof UpdateableClassifier) || forceBatchTraining)) { throw new IllegalArgumentException("Can't use batch training when updating an existing classifier!"); } if (noCrossValidation && testFileName.length() != 0) { throw new IllegalArgumentException("Attempt to turn off cross-validation when explicit test file is provided!"); } if (splitPercentageString.length() > 0 && testFileName.length() != 0) { throw new IllegalArgumentException("Cannot perform percentage split when explicit test file is provided!"); } if ((thresholdFile.length() != 0) && discardPredictions) { throw new IllegalArgumentException("Can only output to threshold file when predictions are not discarded!"); } if (outputModelsForTrainingSplits && (testFileName.length() > 0 || ((splitPercentageString.length() == 0) && noCrossValidation))) { throw new IllegalArgumentException("Can only output models for training splits if cross-validation or " + "percentage split evaluation is performed!"); } // Set seed, number of folds, and class index if required if (seedString.length() != 0) { seed = Integer.parseInt(seedString); } if (foldsString.length() != 0) { folds = Integer.parseInt(foldsString); } if (classIndexString.length() != 0) { if (classIndexString.equals("first")) { classIndex = 1; } else if (classIndexString.equals("last")) { classIndex = -1; } else { classIndex = Integer.parseInt(classIndexString); } } // Try to open training and/or test file if (testFileName.length() != 0) { try { template = test = new DataSource(testFileName).getStructure(); if (classIndex != -1) { test.setClassIndex(classIndex - 1); } else { if ((test.classIndex() == -1) || (classIndexString.length() != 0)) { test.setClassIndex(test.numAttributes() - 1); } } actualClassIndex = test.classIndex(); } catch (Exception e) { throw new Exception("Can't open file " + testFileName + '.'); } } if (trainFileName.length() != 0) { try { template = train = new DataSource(trainFileName).getStructure(); if (classIndex != -1) { train.setClassIndex(classIndex - 1); } else { if ((train.classIndex() == -1) || (classIndexString.length() != 0)) { train.setClassIndex(train.numAttributes() - 1); } } actualClassIndex = train.classIndex(); } catch (Exception e) { throw new Exception("Can't open file " + trainFileName + '.'); } } // Need to check whether train and test file are compatible if (!(classifier instanceof weka.classifiers.misc.InputMappedClassifier)) { if ((test != null) && (train != null) && !test.equalHeaders(train)) { throw new IllegalArgumentException("Train and test file not compatible!\n" + test.equalHeadersMsg(train)); } } // Need to check whether output of threshold file is possible if desired by user if ((thresholdFile.length() != 0) && !template.classAttribute().isNominal()) { throw new IllegalArgumentException("Can only output to threshold file when class attribute is nominal!"); } // Need to check whether output of margins is possible if desired by user if (printMargins && !template.classAttribute().isNominal()) { throw new IllegalArgumentException("Can only print margins when class is nominal!"); } // Read model file if appropriate (which may just hold scheme-specific options, and not a built // model) if (objectInputFileName.length() != 0) { Classifier backedUpClassifier = classifier; if (objectInputFileName.endsWith(".xml")) { try { // Try to load scheme-specific options as XMLClassifier OptionHandler cl = (OptionHandler) new XMLClassifier().read(objectInputFileName); options = Stream.concat(Arrays.stream(cl.getOptions()), Arrays.stream(options)).toArray(String[]::new); objectInputFileName = ""; // We have not actually read a built model, only some options } catch (IllegalArgumentException ex) { classifier = getModelFromFile(objectInputFileName, template); } } else { classifier = getModelFromFile(objectInputFileName, template); } if (!classifier.getClass().equals(backedUpClassifier.getClass())) { throw new IllegalArgumentException("Loaded classifier is " + classifier.getClass().getCanonicalName() + ", not " + backedUpClassifier.getClass().getCanonicalName() + "!"); } } // Check for cost matrix costMatrix = handleCostOption(Utils.getOption('m', options), template.numClasses()); // Need to check whether use of cost matrix is possible if desired by user if ((costMatrix != null) && !template.classAttribute().isNominal()) { throw new IllegalArgumentException("Can only use cost matrix when class attribute is nominal!"); } // Determine if predictions are to be output if (classifications.length() > 0) { classificationOutput = AbstractOutput.fromCommandline(classifications); if (classificationOutput == null) { throw new IllegalArgumentException("Failed to instantiate class for classification output: " + classifications); } classificationOutput.setHeader(template); } else if (classificationsOld.length() > 0) { // backwards compatible with old "-p range" and "-distribution" options classificationOutput = new PlainText(); classificationOutput.setHeader(template); if (!classificationsOld.equals("0")) { classificationOutput.setAttributes(classificationsOld); } classificationOutput.setOutputDistribution(Utils.getFlag("distribution", options)); } else { if (Utils.getFlag("distribution", options)) { // -distribution flag needs -p option throw new Exception("Cannot print distribution without '-p' option!"); } } if (thresholdLabel.length() != 0) { labelIndex = template.classAttribute().indexOfValue(thresholdLabel); } if (labelIndex == -1) { throw new IllegalArgumentException("Class label '" + thresholdLabel + "' is unknown!"); } // If a model file is given, we shouldn't process scheme-specific options if (objectInputFileName.length() == 0) { if (classifier instanceof OptionHandler) { for (String option : options) { if (option.length() != 0) { if (schemeOptionsText == null) { schemeOptionsText = new StringBuffer(); } if (option.indexOf(' ') != -1) { schemeOptionsText.append('"' + option + "\" "); } else { schemeOptionsText.append(option + " "); } } } ((OptionHandler) classifier).setOptions(options); } } Utils.checkForRemainingOptions(options); } catch (Exception ex) { throw new Exception("\nWeka exception: " + ex.getMessage() + makeOptionString(classifier, false)); } // Build classifier on full training set if necessary Classifier classifierBackup = null; if (objectInputFileName.length() == 0) { classifierBackup = AbstractClassifier.makeCopy(classifier); // Back up configured classifier } if (trainFileName.length() > 0) { if (!noOutput || trainStatistics || printGraph || printSource || objectOutputFileName.length() > 0 || (testFileName.length() > 0) || (classificationOutput != null && noCrossValidation && splitPercentage == -1)) { if ((classifier instanceof UpdateableClassifier) && !forceBatchTraining) { // Build classifier incrementally trainTimeStart = System.currentTimeMillis(); DataSource trainSource = new DataSource(trainFileName); trainSource.getStructure(); // Need to advance in the file to get to the data if (objectInputFileName.length() <= 0) { // Only need to initialize classifier if we haven't loaded one classifier.buildClassifier(new Instances(train, 0)); } while (trainSource.hasMoreElements(train)) { ((UpdateableClassifier) classifier).updateClassifier(trainSource.nextElement(train)); } if (classifier instanceof UpdateableBatchProcessor) { ((UpdateableBatchProcessor) classifier).batchFinished(); } trainTimeElapsed = System.currentTimeMillis() - trainTimeStart; } else { // Build classifier in one go Instances tempTrain = new DataSource(trainFileName).getDataSet(actualClassIndex); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { Instances mappedClassifierDataset = ((weka.classifiers.misc.InputMappedClassifier) classifier).getModelHeader(new Instances(template, 0)); if (!mappedClassifierDataset.equalHeaders(tempTrain)) { for (int zz = 0; zz < tempTrain.numInstances(); zz++) { Instance mapped = ((weka.classifiers.misc.InputMappedClassifier) classifier).constructMappedInstance(tempTrain.instance(zz)); mappedClassifierDataset.add(mapped); } tempTrain = mappedClassifierDataset; } } trainTimeStart = System.currentTimeMillis(); classifier.buildClassifier(tempTrain); trainTimeElapsed = System.currentTimeMillis() - trainTimeStart; } } } // If classifier is drawable output string describing graph if (printGraph) { return ((Drawable) classifier).graph(); } // Output the classifier as equivalent source if (printSource) { return wekaStaticWrapper((Sourcable) classifier, sourceClass); } // Save classifier if appropriate if (objectOutputFileName.length() > 0) { saveClassifier(classifier, template, objectOutputFileName); } // Output model StringBuffer text = new StringBuffer(); if (!(noOutput || printMargins || classificationOutput != null)) { if (classifier instanceof OptionHandler) { if (schemeOptionsText != null) { text.append("\nOptions: " + schemeOptionsText + "\n"); } } text.append("\n=== Classifier model (full training set) ===\n\n" + classifier.toString() + "\n"); text.append("\nTime taken to build model: " + Utils.doubleToString(trainTimeElapsed / 1000.0, 2) + " seconds\n"); } // Stop here if no output of performance statistics or predictions is required and no threshold data // is required if (!trainStatistics && noCrossValidation && splitPercentage != -1 && testFileName.length() <= 0 && classificationOutput == null) { if (noOutput) { return ""; } else { return text.toString(); } } if (!printMargins && (costMatrix != null)) { text.append("\n=== Evaluation Cost Matrix ===\n\n"); text.append(costMatrix.toString()); } // Do we need a mapped classifier header? Instances mappedClassifierHeader = null; if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { mappedClassifierHeader = ((weka.classifiers.misc.InputMappedClassifier) classifier).getModelHeader(new Instances(template, 0)); } // Do we just want to output predictions? if (classificationOutput != null) { // =============================================== // Code path for when predictions are to be output // =============================================== // Set up appropriate header for input mapped classifier if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { classificationOutput.setHeader(mappedClassifierHeader); } // Set up buffer StringBuffer predsBuff = new StringBuffer(); classificationOutput.setBuffer(predsBuff); if (testFileName.length() > 0) { // CASE 1: SEPARATE TEST SET predsBuff.append("\n=== Predictions on test data ===\n\n"); classificationOutput.print(classifier, new DataSource(testFileName)); } else if (splitPercentage > 0) { // CASE 2: PERCENTAGE SPLIT Instances tmpInst = new DataSource(trainFileName).getDataSet(actualClassIndex); if (!preserveOrder) { tmpInst.randomize(new Random(seed)); } int trainSize = (int) Math.round(tmpInst.numInstances() * splitPercentage / 100); int testSize = tmpInst.numInstances() - trainSize; Instances trainInst = new Instances(tmpInst, 0, trainSize); classifier = AbstractClassifier.makeCopy(classifierBackup); classifier.buildClassifier(trainInst); trainInst = null; Instances testInst = new Instances(tmpInst, trainSize, testSize); predsBuff.append("\n=== Predictions on test split ===\n\n"); classificationOutput.print(classifier, testInst); } else if (!noCrossValidation) { // CASE 3: CROSS-VALIDATION Random random = new Random(seed); Evaluation testingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { testingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); } testingEvaluation.toggleEvalMetrics(toggleList); classifier = AbstractClassifier.makeCopy(classifierBackup); predsBuff.append("\n=== Predictions under cross-validation ===\n\n"); testingEvaluation.crossValidateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex), folds, random, classificationOutput); } else { predsBuff.append("\n=== Predictions on training data ===\n\n"); classificationOutput.print(classifier, new DataSource(trainFileName)); } text.append("\n" + predsBuff); } else { // ================================================ // Code path for when performance is to be computed // ================================================ Evaluation testingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { testingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); } testingEvaluation.setDiscardPredictions(discardPredictions); testingEvaluation.toggleEvalMetrics(toggleList); // CASE 1: SEPARATE TEST SET if (testFileName.length() > 0) { // Evaluation on the training data required? if (train != null && trainStatistics && !printMargins) { Evaluation trainingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { trainingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); } trainingEvaluation.setDiscardPredictions(discardPredictions); trainingEvaluation.toggleEvalMetrics(toggleList); trainingEvaluation.setPriors(train); testingEvaluation.setPriors(train); DataSource trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section while (trainSource.hasMoreElements(train)) { Instance trainInst = trainSource.nextElement(train); trainingEvaluation.updatePriors(trainInst); testingEvaluation.updatePriors(trainInst); } if ((classifier instanceof BatchPredictor) && (((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction())) { testTimeStart = System.currentTimeMillis(); trainingEvaluation.evaluateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex)); testTimeElapsed = System.currentTimeMillis() - testTimeStart; } else { trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section testTimeStart = System.currentTimeMillis(); while (trainSource.hasMoreElements(train)) { trainingEvaluation.evaluateModelOnceAndRecordPrediction(classifier, trainSource.nextElement(train)); } testTimeElapsed = System.currentTimeMillis() - testTimeStart; } text.append("\nTime taken to test model on training data: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); text.append(trainingEvaluation.toSummaryString("\n\n=== Error on training data ===\n", printComplexityStatistics)); if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + trainingEvaluation.toClassDetailsString()); } text.append("\n\n" + trainingEvaluation.toMatrixString()); } } // Evaluate on test data if (train == null) { testingEvaluation.useNoPriors(); } if (classifier instanceof BatchPredictor && ((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction()) { testTimeStart = System.currentTimeMillis(); testingEvaluation.evaluateModel(classifier, new DataSource(testFileName).getDataSet(test.classIndex())); testTimeElapsed = System.currentTimeMillis() - testTimeStart; } else { DataSource testSource = new DataSource(testFileName); testSource.getStructure(); // We already know the structure but need to advance to the data section testTimeStart = System.currentTimeMillis(); while (testSource.hasMoreElements(test)) { testingEvaluation.evaluateModelOnceAndRecordPrediction(classifier, testSource.nextElement(test)); } testTimeElapsed = System.currentTimeMillis() - testTimeStart; } if (printMargins) { return testingEvaluation.toCumulativeMarginDistributionString(); } text.append("\nTime taken to test model on test data: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); text.append(testingEvaluation.toSummaryString("\n\n=== Error on test data ===\n", printComplexityStatistics)); if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + testingEvaluation.toClassDetailsString()); } text.append("\n\n" + testingEvaluation.toMatrixString()); } } else if (splitPercentage > 0) { // CASE 2: PERCENTAGE SPLIT // Evaluation on the training data required? if (train != null && trainStatistics && !printMargins) { Evaluation trainingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { trainingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); } trainingEvaluation.setDiscardPredictions(discardPredictions); trainingEvaluation.toggleEvalMetrics(toggleList); DataSource trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section trainingEvaluation.setPriors(train); while (trainSource.hasMoreElements(train)) { trainingEvaluation.updatePriors(trainSource.nextElement(train)); } if ((classifier instanceof BatchPredictor) && (((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction())) { testTimeStart = System.currentTimeMillis(); trainingEvaluation.evaluateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex)); testTimeElapsed = System.currentTimeMillis() - testTimeStart; } else { trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section testTimeStart = System.currentTimeMillis(); while (trainSource.hasMoreElements(train)) { trainingEvaluation.evaluateModelOnceAndRecordPrediction(classifier, trainSource.nextElement(train)); } testTimeElapsed = System.currentTimeMillis() - testTimeStart; } text.append("\nTime taken to test model on training data: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); text.append(trainingEvaluation.toSummaryString("\n\n=== Error on training data ===\n", printComplexityStatistics)); if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + trainingEvaluation.toClassDetailsString()); } text.append("\n\n" + trainingEvaluation.toMatrixString()); } } Instances tmpInst = new DataSource(trainFileName).getDataSet(actualClassIndex); if (!preserveOrder) { tmpInst.randomize(new Random(seed)); } int trainSize = (int) Math.round(tmpInst.numInstances() * splitPercentage / 100); int testSize = tmpInst.numInstances() - trainSize; Instances trainInst = new Instances(tmpInst, 0, trainSize); classifier = AbstractClassifier.makeCopy(classifierBackup); classifier.buildClassifier(trainInst); if (outputModelsForTrainingSplits) { text.append("\n=== Classifier model (training split) ===\n\n" + classifier.toString() + "\n"); } testingEvaluation.setPriors(trainInst); trainInst = null; Instances testInst = new Instances(tmpInst, trainSize, testSize); testTimeStart = System.currentTimeMillis(); testingEvaluation.evaluateModel(classifier, testInst); testTimeElapsed = System.currentTimeMillis() - testTimeStart; if (printMargins) { return testingEvaluation.toCumulativeMarginDistributionString(); } text.append("\nTime taken to test model on test split: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); text.append(testingEvaluation.toSummaryString("\n\n=== Error on test split ===\n", printComplexityStatistics)); if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + testingEvaluation.toClassDetailsString()); } text.append("\n\n" + testingEvaluation.toMatrixString()); } } else if (!noCrossValidation) { // CASE 3: CROSS-VALIDATION // Evaluation on the training data required? if (train != null && trainStatistics && !printMargins) { Evaluation trainingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { trainingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); } trainingEvaluation.setDiscardPredictions(discardPredictions); trainingEvaluation.toggleEvalMetrics(toggleList); DataSource trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section trainingEvaluation.setPriors(train); while (trainSource.hasMoreElements(train)) { trainingEvaluation.updatePriors(trainSource.nextElement(train)); } if ((classifier instanceof BatchPredictor) && (((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction())) { testTimeStart = System.currentTimeMillis(); trainingEvaluation.evaluateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex)); testTimeElapsed = System.currentTimeMillis() - testTimeStart; } else { trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section testTimeStart = System.currentTimeMillis(); while (trainSource.hasMoreElements(train)) { trainingEvaluation.evaluateModelOnceAndRecordPrediction(classifier, trainSource.nextElement(train)); } testTimeElapsed = System.currentTimeMillis() - testTimeStart; } text.append("\nTime taken to test model on training data: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); text.append(trainingEvaluation.toSummaryString("\n\n=== Error on training data ===\n", printComplexityStatistics)); if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + trainingEvaluation.toClassDetailsString()); } text.append("\n\n" + trainingEvaluation.toMatrixString()); } } Random random = new Random(seed); // use untrained (!) classifier for cross-validation classifier = AbstractClassifier.makeCopy(classifierBackup); testTimeStart = System.currentTimeMillis(); if (!outputModelsForTrainingSplits) { testingEvaluation.crossValidateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex), folds, random); } else { testingEvaluation.crossValidateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex), folds, random, text); } testTimeElapsed = System.currentTimeMillis() - testTimeStart; if (printMargins) { return testingEvaluation.toCumulativeMarginDistributionString(); } text.append("\nTime taken to perform cross-validation: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); if (template.classAttribute().isNumeric()) { text.append("\n\n\n" + testingEvaluation.toSummaryString("=== Cross-validation ===\n", printComplexityStatistics)); } else { text.append("\n\n\n" + testingEvaluation.toSummaryString("=== Stratified " + "cross-validation ===\n", printComplexityStatistics)); } if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + testingEvaluation.toClassDetailsString()); } text.append("\n\n" + testingEvaluation.toMatrixString()); } } else if (trainStatistics) { // CASE 4: Only evaluate on the training set // Evaluation on the training data required? if (train != null) { Evaluation trainingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { trainingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); } trainingEvaluation.setDiscardPredictions(discardPredictions); trainingEvaluation.toggleEvalMetrics(toggleList); DataSource trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section trainingEvaluation.setPriors(train); while (trainSource.hasMoreElements(train)) { trainingEvaluation.updatePriors(trainSource.nextElement(train)); } if ((classifier instanceof BatchPredictor) && (((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction())) { testTimeStart = System.currentTimeMillis(); trainingEvaluation.evaluateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex)); testTimeElapsed = System.currentTimeMillis() - testTimeStart; } else { trainSource = new DataSource(trainFileName); trainSource.getStructure(); // We already know the structure but need to advance to the data section testTimeStart = System.currentTimeMillis(); while (trainSource.hasMoreElements(train)) { trainingEvaluation.evaluateModelOnceAndRecordPrediction(classifier, trainSource.nextElement(train)); } testTimeElapsed = System.currentTimeMillis() - testTimeStart; } if (printMargins) { return trainingEvaluation.toCumulativeMarginDistributionString(); } text.append("\nTime taken to test model on training data: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); text.append(trainingEvaluation.toSummaryString("\n\n=== Error on training data ===\n", printComplexityStatistics)); if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + trainingEvaluation.toClassDetailsString()); } text.append("\n\n" + trainingEvaluation.toMatrixString()); } testingEvaluation = trainingEvaluation; } } // Output threshold file if (thresholdFile.length() != 0) { ThresholdCurve tc = new ThresholdCurve(); Instances result = tc.getCurve(testingEvaluation.predictions(), labelIndex); DataSink.write(thresholdFile, result); } } return text.toString(); } /** * Attempts to load a cost matrix. * * @param costFileName * the filename of the cost matrix * @param numClasses * the number of classes that should be in the cost matrix (only used if the cost file is * in old format). * @return a <code>CostMatrix</code> value, or null if costFileName is empty * @throws Exception * if an error occurs. */ protected static CostMatrix handleCostOption(final String costFileName, final int numClasses) throws Exception { if ((costFileName != null) && (costFileName.length() != 0)) { Reader costReader = null; try { costReader = new BufferedReader(new FileReader(costFileName)); } catch (Exception e) { throw new Exception("Can't open file " + e.getMessage() + '.'); } try { // First try as a proper cost matrix format return new CostMatrix(costReader); } catch (Exception ex) { try { // Now try as the poxy old format :-) // System.err.println("Attempting to read old format cost file"); try { costReader.close(); // Close the old one costReader = new BufferedReader(new FileReader(costFileName)); } catch (Exception e) { throw new Exception("Can't open file " + e.getMessage() + '.'); } CostMatrix costMatrix = new CostMatrix(numClasses); // System.err.println("Created default cost matrix"); costMatrix.readOldFormat(costReader); return costMatrix; // System.err.println("Read old format"); } catch (Exception e2) { // re-throw the original exception // System.err.println("Re-throwing original exception"); throw ex; } } } else { return null; } } /** * Evaluates the classifier on a given set of instances. Note that the data must have exactly the * same format (e.g. order of attributes) as the data used to train the classifier! Otherwise the * results will generally be meaningless. * * @param classifier * machine learning classifier * @param data * set of test instances for evaluation * @param forPredictionsPrinting * varargs parameter that, if supplied, is expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput object * @return the predictions * @throws Exception * if model could not be evaluated successfully */ public double[] evaluateModel(final Classifier classifier, final Instances data, final Object... forPredictionsPrinting) throws Exception { // for predictions printing AbstractOutput classificationOutput = null; double predictions[] = new double[data.numInstances()]; if (forPredictionsPrinting.length > 0) { classificationOutput = (AbstractOutput) forPredictionsPrinting[0]; } if (classifier instanceof BatchPredictor && ((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction()) { // make a copy and set the class to missing Instances dataPred = new Instances(data); for (int i = 0; i < data.numInstances(); i++) { dataPred.instance(i).setClassMissing(); } double[][] preds = ((BatchPredictor) classifier).distributionsForInstances(dataPred); for (int i = 0; i < data.numInstances(); i++) { double[] p = preds[i]; predictions[i] = this.evaluationForSingleInstance(p, data.instance(i), true); if (classificationOutput != null) { classificationOutput.printClassification(p, data.instance(i), i); } } } else { // Need to be able to collect predictions if appropriate (for AUC) for (int i = 0; i < data.numInstances(); i++) { predictions[i] = this.evaluateModelOnceAndRecordPrediction(classifier, data.instance(i)); if (classificationOutput != null) { classificationOutput.printClassification(classifier, data.instance(i), i); } } } return predictions; } /** * Evaluates the supplied distribution on a single instance. * * @param dist * the supplied distribution * @param instance * the test instance to be classified * @param storePredictions * whether to store predictions for nominal classifier * @return the prediction * @throws Exception * if model could not be evaluated successfully */ public double evaluationForSingleInstance(final double[] dist, final Instance instance, final boolean storePredictions) throws Exception { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double pred; if (this.m_ClassIsNominal) { pred = Utils.maxIndex(dist); if (dist[(int) pred] <= 0) { pred = Utils.missingValue(); } this.updateStatsForClassifier(dist, instance); if (storePredictions && !this.m_DiscardPredictions) { if (this.m_Predictions == null) { this.m_Predictions = new ArrayList<>(); } this.m_Predictions.add(new NominalPrediction(instance.classValue(), dist, instance.weight())); } } else { pred = dist[0]; this.updateStatsForPredictor(pred, instance); if (storePredictions && !this.m_DiscardPredictions) { if (this.m_Predictions == null) { this.m_Predictions = new ArrayList<>(); } this.m_Predictions.add(new NumericPrediction(instance.classValue(), pred, instance.weight())); } } return pred; } /** * Evaluates the classifier on a single instance and records the prediction. * * @param classifier * machine learning classifier * @param instance * the test instance to be classified * @param storePredictions * whether to store predictions for nominal classifier * @return the prediction made by the clasifier * @throws Exception * if model could not be evaluated successfully or the data contains string attributes */ protected double evaluationForSingleInstance(final Classifier classifier, Instance instance, final boolean storePredictions) throws Exception { Instance classMissing = (Instance) instance.copy(); classMissing.setDataset(instance.dataset()); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { instance = (Instance) instance.copy(); instance = ((weka.classifiers.misc.InputMappedClassifier) classifier).constructMappedInstance(instance); // System.out.println("Mapped instance " + instance); int mappedClass = ((weka.classifiers.misc.InputMappedClassifier) classifier).getMappedClassIndex(); classMissing.setMissing(mappedClass); } else { classMissing.setClassMissing(); } // System.out.println("instance (to predict)" + classMissing); double pred = this.evaluationForSingleInstance(classifier.distributionForInstance(classMissing), instance, storePredictions); // We don't need to do the following if the class is nominal because in that // case // entropy and coverage statistics are always computed. if (!this.m_ClassIsNominal) { if (!instance.classIsMissing() && !Utils.isMissingValue(pred)) { if (classifier instanceof IntervalEstimator) { this.updateStatsForIntervalEstimator((IntervalEstimator) classifier, classMissing, instance.classValue()); } else { this.m_CoverageStatisticsAvailable = false; } if (classifier instanceof ConditionalDensityEstimator) { this.updateStatsForConditionalDensityEstimator((ConditionalDensityEstimator) classifier, classMissing, instance.classValue()); } else { this.m_ComplexityStatisticsAvailable = false; } } } return pred; } /** * Evaluates the classifier on a single instance and records the prediction. * * @param classifier * machine learning classifier * @param instance * the test instance to be classified * @return the prediction made by the clasifier * @throws Exception * if model could not be evaluated successfully or the data contains string attributes */ public double evaluateModelOnceAndRecordPrediction(final Classifier classifier, final Instance instance) throws Exception { return this.evaluationForSingleInstance(classifier, instance, true); } /** * Evaluates the classifier on a single instance. * * @param classifier * machine learning classifier * @param instance * the test instance to be classified * @return the prediction made by the clasifier * @throws Exception * if model could not be evaluated successfully or the data contains string attributes */ public double evaluateModelOnce(final Classifier classifier, final Instance instance) throws Exception { return this.evaluationForSingleInstance(classifier, instance, false); } /** * Evaluates the supplied distribution on a single instance. * * @param dist * the supplied distribution * @param instance * the test instance to be classified * @return the prediction * @throws Exception * if model could not be evaluated successfully */ public double evaluateModelOnce(final double[] dist, final Instance instance) throws Exception { return this.evaluationForSingleInstance(dist, instance, false); } /** * Evaluates the supplied distribution on a single instance. * * @param dist * the supplied distribution * @param instance * the test instance to be classified * @return the prediction * @throws Exception * if model could not be evaluated successfully */ public double evaluateModelOnceAndRecordPrediction(final double[] dist, final Instance instance) throws Exception { return this.evaluationForSingleInstance(dist, instance, true); } /** * Evaluates the supplied prediction on a single instance. * * @param prediction * the supplied prediction * @param instance * the test instance to be classified * @throws Exception * if model could not be evaluated successfully */ public void evaluateModelOnce(final double prediction, final Instance instance) throws Exception { this.evaluateModelOnce(this.makeDistribution(prediction), instance); } /** * Returns the predictions that have been collected. * * @return a reference to the FastVector containing the predictions that have been collected. This * should be null if no predictions have been collected. */ public ArrayList<Prediction> predictions() { if (this.m_DiscardPredictions) { return null; } else { return this.m_Predictions; } } /** * Wraps a static classifier in enough source to test using the weka class libraries. * * @param classifier * a Sourcable Classifier * @param className * the name to give to the source code class * @return the source for a static classifier that can be tested with weka libraries. * @throws Exception * if code-generation fails */ public static String wekaStaticWrapper(final Sourcable classifier, final String className) throws Exception { StringBuffer result = new StringBuffer(); String staticClassifier = classifier.toSource(className); result.append("// Generated with Weka " + Version.VERSION + "\n"); result.append("//\n"); result.append("// This code is public domain and comes with no warranty.\n"); result.append("//\n"); result.append("// Timestamp: " + new Date() + "\n"); result.append("\n"); result.append("package weka.classifiers;\n"); result.append("\n"); result.append("import weka.core.Attribute;\n"); result.append("import weka.core.Capabilities;\n"); result.append("import weka.core.Capabilities.Capability;\n"); result.append("import weka.core.Instance;\n"); result.append("import weka.core.Instances;\n"); result.append("import weka.core.RevisionUtils;\n"); result.append("import weka.classifiers.Classifier;\nimport weka.classifiers.AbstractClassifier;\n"); result.append("\n"); result.append("public class WekaWrapper\n"); result.append(" extends AbstractClassifier {\n"); // globalInfo result.append("\n"); result.append(" /**\n"); result.append(" * Returns only the toString() method.\n"); result.append(" *\n"); result.append(" * @return a string describing the classifier\n"); result.append(" */\n"); result.append(" public String globalInfo() {\n"); result.append(" return toString();\n"); result.append(" }\n"); // getCapabilities result.append("\n"); result.append(" /**\n"); result.append(" * Returns the capabilities of this classifier.\n"); result.append(" *\n"); result.append(" * @return the capabilities\n"); result.append(" */\n"); result.append(" public Capabilities getCapabilities() {\n"); result.append(((Classifier) classifier).getCapabilities().toSource("result", 4)); result.append(" return result;\n"); result.append(" }\n"); // buildClassifier result.append("\n"); result.append(" /**\n"); result.append(" * only checks the data against its capabilities.\n"); result.append(" *\n"); result.append(" * @param i the training data\n"); result.append(" */\n"); result.append(" public void buildClassifier(Instances i) throws Exception {\n"); result.append(" // can classifier handle the data?\n"); result.append(" getCapabilities().testWithFail(i);\n"); result.append(" }\n"); // classifyInstance result.append("\n"); result.append(" /**\n"); result.append(" * Classifies the given instance.\n"); result.append(" *\n"); result.append(" * @param i the instance to classify\n"); result.append(" * @return the classification result\n"); result.append(" */\n"); result.append(" public double classifyInstance(Instance i) throws Exception {\n"); result.append(" Object[] s = new Object[i.numAttributes()];\n"); result.append(" \n"); result.append(" for (int j = 0; j < s.length; j++) {\n"); result.append(" if (!i.isMissing(j)) {\n"); result.append(" if (i.attribute(j).isNominal())\n"); result.append(" s[j] = new String(i.stringValue(j));\n"); result.append(" else if (i.attribute(j).isNumeric())\n"); result.append(" s[j] = new Double(i.value(j));\n"); result.append(" }\n"); result.append(" }\n"); result.append(" \n"); result.append(" // set class value to missing\n"); result.append(" s[i.classIndex()] = null;\n"); result.append(" \n"); result.append(" return " + className + ".classify(s);\n"); result.append(" }\n"); // getRevision result.append("\n"); result.append(" /**\n"); result.append(" * Returns the revision string.\n"); result.append(" * \n"); result.append(" * @return the revision\n"); result.append(" */\n"); result.append(" public String getRevision() {\n"); result.append(" return RevisionUtils.extract(\"1.0\");\n"); result.append(" }\n"); // toString result.append("\n"); result.append(" /**\n"); result.append(" * Returns only the classnames and what classifier it is based on.\n"); result.append(" *\n"); result.append(" * @return a short description\n"); result.append(" */\n"); result.append(" public String toString() {\n"); result.append(" return \"Auto-generated classifier wrapper, based on " + classifier.getClass().getName() + " (generated with Weka " + Version.VERSION + ").\\n" + "\" + this.getClass().getName() + \"/" + className + "\";\n"); result.append(" }\n"); // main result.append("\n"); result.append(" /**\n"); result.append(" * Runs the classfier from commandline.\n"); result.append(" *\n"); result.append(" * @param args the commandline arguments\n"); result.append(" */\n"); result.append(" public static void main(String args[]) {\n"); result.append(" runClassifier(new WekaWrapper(), args);\n"); result.append(" }\n"); result.append("}\n"); // actual classifier code result.append("\n"); result.append(staticClassifier); return result.toString(); } /** * Gets the number of test instances that had a known class value (actually the sum of the weights * of test instances with known class value). * * @return the number of test instances with known class */ public final double numInstances() { return this.m_WithClass; } /** * Gets the coverage of the test cases by the predicted regions at the confidence level specified * when evaluation was performed. * * @return the coverage of the test cases by the predicted regions */ public final double coverageOfTestCasesByPredictedRegions() { if (!this.m_CoverageStatisticsAvailable) { return Double.NaN; } return 100 * this.m_TotalCoverage / this.m_WithClass; } /** * Gets the average size of the predicted regions, relative to the range of the target in the * training data, at the confidence level specified when evaluation was performed. * * @return the average size of the predicted regions */ public final double sizeOfPredictedRegions() { if (this.m_NoPriors || !this.m_CoverageStatisticsAvailable) { return Double.NaN; } return 100 * this.m_TotalSizeOfRegions / this.m_WithClass; } /** * Gets the weight of the instances that had a non-missing class value * * @return the weight of the instances that had a non-missing class value */ public final double withClass() { return this.m_WithClass; } /** * Gets the weight of the instances that had missing class values * * @return the weight of the instances that had missing class values */ public final double missingClass() { return this.m_MissingClass; } /** * Gets the number of instances incorrectly classified (that is, for which an incorrect prediction * was made). (Actually the sum of the weights of these instances) * * @return the number of incorrectly classified instances */ public final double incorrect() { return this.m_Incorrect; } /** * Gets the percentage of instances incorrectly classified (that is, for which an incorrect * prediction was made). * * @return the percent of incorrectly classified instances (between 0 and 100) */ public final double pctIncorrect() { return 100 * this.m_Incorrect / this.m_WithClass; } /** * Gets the total cost, that is, the cost of each prediction times the weight of the instance, * summed over all instances. * * @return the total cost */ public final double totalCost() { return this.m_TotalCost; } /** * Gets the average cost, that is, total cost of misclassifications (incorrect plus unclassified) * over the total number of instances. * * @return the average cost. */ public final double avgCost() { return this.m_TotalCost / this.m_WithClass; } /** * Gets the number of instances correctly classified (that is, for which a correct prediction was * made). (Actually the sum of the weights of these instances) * * @return the number of correctly classified instances */ public final double correct() { return this.m_Correct; } /** * Gets the percentage of instances correctly classified (that is, for which a correct prediction * was made). * * @return the percent of correctly classified instances (between 0 and 100) */ public final double pctCorrect() { return 100 * this.m_Correct / this.m_WithClass; } /** * Gets the number of instances not classified (that is, for which no prediction was made by the * classifier). (Actually the sum of the weights of these instances) * * @return the number of unclassified instances */ public final double unclassified() { return this.m_Unclassified; } /** * Gets the percentage of instances not classified (that is, for which no prediction was made by the * classifier). * * @return the percent of unclassified instances (between 0 and 100) */ public final double pctUnclassified() { return 100 * this.m_Unclassified / this.m_WithClass; } /** * Returns the estimated error rate or the root mean squared error (if the class is numeric). If a * cost matrix was given this error rate gives the average cost. * * @return the estimated error rate (between 0 and 1, or between 0 and maximum cost) */ public final double errorRate() { if (!this.m_ClassIsNominal) { return Math.sqrt(this.m_SumSqrErr / (this.m_WithClass - this.m_Unclassified)); } if (this.m_CostMatrix == null) { return this.m_Incorrect / this.m_WithClass; } else { return this.avgCost(); } } /** * Returns value of kappa statistic if class is nominal. * * @return the value of the kappa statistic */ public final double kappa() { double[] sumRows = new double[this.m_ConfusionMatrix.length]; double[] sumColumns = new double[this.m_ConfusionMatrix.length]; double sumOfWeights = 0; for (int i = 0; i < this.m_ConfusionMatrix.length; i++) { for (int j = 0; j < this.m_ConfusionMatrix.length; j++) { sumRows[i] += this.m_ConfusionMatrix[i][j]; sumColumns[j] += this.m_ConfusionMatrix[i][j]; sumOfWeights += this.m_ConfusionMatrix[i][j]; } } double correct = 0, chanceAgreement = 0; for (int i = 0; i < this.m_ConfusionMatrix.length; i++) { chanceAgreement += (sumRows[i] * sumColumns[i]); correct += this.m_ConfusionMatrix[i][i]; } chanceAgreement /= (sumOfWeights * sumOfWeights); correct /= sumOfWeights; if (chanceAgreement < 1) { return (correct - chanceAgreement) / (1 - chanceAgreement); } else { return 1; } } /** * Returns the correlation coefficient if the class is numeric. * * @return the correlation coefficient * @throws Exception * if class is not numeric */ public final double correlationCoefficient() throws Exception { if (this.m_ClassIsNominal) { throw new Exception("Can't compute correlation coefficient: " + "class is nominal!"); } double correlation = 0; double varActual = this.m_SumSqrClass - this.m_SumClass * this.m_SumClass / (this.m_WithClass - this.m_Unclassified); double varPredicted = this.m_SumSqrPredicted - this.m_SumPredicted * this.m_SumPredicted / (this.m_WithClass - this.m_Unclassified); double varProd = this.m_SumClassPredicted - this.m_SumClass * this.m_SumPredicted / (this.m_WithClass - this.m_Unclassified); if (varActual * varPredicted <= 0) { correlation = 0.0; } else { correlation = varProd / Math.sqrt(varActual * varPredicted); } return correlation; } /** * Returns the mean absolute error. Refers to the error of the predicted values for numeric classes, * and the error of the predicted probability distribution for nominal classes. * * @return the mean absolute error */ public final double meanAbsoluteError() { return this.m_SumAbsErr / (this.m_WithClass - this.m_Unclassified); } /** * Returns the mean absolute error of the prior. * * @return the mean absolute error */ public final double meanPriorAbsoluteError() { if (this.m_NoPriors) { return Double.NaN; } return this.m_SumPriorAbsErr / this.m_WithClass; } /** * Returns the relative absolute error. * * @return the relative absolute error * @throws Exception * if it can't be computed */ public final double relativeAbsoluteError() throws Exception { if (this.m_NoPriors) { return Double.NaN; } return 100 * this.meanAbsoluteError() / this.meanPriorAbsoluteError(); } /** * Returns the root mean squared error. * * @return the root mean squared error */ public final double rootMeanSquaredError() { return Math.sqrt(this.m_SumSqrErr / (this.m_WithClass - this.m_Unclassified)); } /** * Returns the root mean prior squared error. * * @return the root mean prior squared error */ public final double rootMeanPriorSquaredError() { if (this.m_NoPriors) { return Double.NaN; } return Math.sqrt(this.m_SumPriorSqrErr / this.m_WithClass); } /** * Returns the root relative squared error if the class is numeric. * * @return the root relative squared error */ public final double rootRelativeSquaredError() { if (this.m_NoPriors) { return Double.NaN; } return 100.0 * this.rootMeanSquaredError() / this.rootMeanPriorSquaredError(); } /** * Returns the mean base-2 log loss wrt the null model. Just calls SFMeanPriorEntropy. * * @return the null model entropy per instance */ public final double priorEntropy() { // The previous version of this method calculated // mean base-2 log loss for the null model wrt the // instances passed into setPriors(). Now, this method will // return the loss for the null model wrt to the test // instances (whatever they are). return this.SFMeanPriorEntropy(); } /** * Return the total Kononenko & Bratko Information score in bits. * * @return the K&B information score * @throws Exception * if the class is not nominal */ public final double KBInformation() throws Exception { if (!this.m_ClassIsNominal) { throw new Exception("Can't compute K&B Info score: " + "class numeric!"); } if (this.m_NoPriors) { return Double.NaN; } return this.m_SumKBInfo; } /** * Return the Kononenko & Bratko Information score in bits per instance. * * @return the K&B information score * @throws Exception * if the class is not nominal */ public final double KBMeanInformation() throws Exception { if (!this.m_ClassIsNominal) { throw new Exception("Can't compute K&B Info score: class numeric!"); } if (this.m_NoPriors) { return Double.NaN; } return this.m_SumKBInfo / (this.m_WithClass - this.m_Unclassified); } /** * Return the Kononenko & Bratko Relative Information score. Differs slightly from the expression * used in KB's paper because it uses the mean log-loss of the TEST instances wrt to the null model * for normalization. * * @return the K&B relative information score * @throws Exception * if the class is not nominal */ public final double KBRelativeInformation() throws Exception { if (!this.m_ClassIsNominal) { throw new Exception("Can't compute K&B Info score: " + "class numeric!"); } if (this.m_NoPriors) { return Double.NaN; } return 100.0 * this.KBMeanInformation() / this.priorEntropy(); } /** * Returns the base-2 log loss wrt the null model. * * @return the total null model entropy */ public final double SFPriorEntropy() { if (this.m_NoPriors || !this.m_ComplexityStatisticsAvailable) { return Double.NaN; } return this.m_SumPriorEntropy; } /** * Returns the mean base-2 log loss wrt the null model. * * @return the null model entropy per instance */ public final double SFMeanPriorEntropy() { if (this.m_NoPriors || !this.m_ComplexityStatisticsAvailable) { return Double.NaN; } return this.m_SumPriorEntropy / this.m_WithClass; } /** * Returns the base-2 log loss wrt the scheme. * * @return the total scheme entropy */ public final double SFSchemeEntropy() { if (!this.m_ComplexityStatisticsAvailable) { return Double.NaN; } return this.m_SumSchemeEntropy; } /** * Returns the mean base-2 log loss wrt the scheme. * * @return the scheme entropy per instance */ public final double SFMeanSchemeEntropy() { if (!this.m_ComplexityStatisticsAvailable) { return Double.NaN; } return this.m_SumSchemeEntropy / (this.m_WithClass - this.m_Unclassified); } /** * Returns the difference in base-2 log loss between null model and scheme. * * @return the total "SF score" */ public final double SFEntropyGain() { if (this.m_NoPriors || !this.m_ComplexityStatisticsAvailable) { return Double.NaN; } return this.m_SumPriorEntropy - this.m_SumSchemeEntropy; } /** * Returns the mean difference in base-2 log loss between null model and scheme. * * @return the "SF score" per instance */ public final double SFMeanEntropyGain() { if (this.m_NoPriors || !this.m_ComplexityStatisticsAvailable) { return Double.NaN; } return (this.m_SumPriorEntropy - this.m_SumSchemeEntropy) / (this.m_WithClass - this.m_Unclassified); } /** * Output the cumulative margin distribution as a string suitable for input for gnuplot or similar * package. * * @return the cumulative margin distribution * @throws Exception * if the class attribute is nominal */ public String toCumulativeMarginDistributionString() throws Exception { if (!this.m_ClassIsNominal) { throw new Exception("Class must be nominal for margin distributions"); } String result = ""; double cumulativeCount = 0; double margin; for (int i = 0; i <= k_MarginResolution; i++) { if (this.m_MarginCounts[i] != 0) { cumulativeCount += this.m_MarginCounts[i]; margin = i * 2.0 / k_MarginResolution - 1.0; result = result + Utils.doubleToString(margin, 7, 3) + ' ' + Utils.doubleToString(cumulativeCount * 100 / this.m_WithClass, 7, 3) + '\n'; } else if (i == 0) { result = Utils.doubleToString(-1.0, 7, 3) + ' ' + Utils.doubleToString(0, 7, 3) + '\n'; } } return result; } /** * Calls toSummaryString() with no title and no complexity stats. * * @return a summary description of the classifier evaluation */ @Override public String toSummaryString() { return this.toSummaryString("", false); } /** * Calls toSummaryString() with a default title. * * @param printComplexityStatistics * if true, complexity statistics are returned as well * @return the summary string */ public String toSummaryString(final boolean printComplexityStatistics) { return this.toSummaryString("=== Summary ===\n", printComplexityStatistics); } /** * Outputs the performance statistics in summary form. Lists number (and percentage) of instances * classified correctly, incorrectly and unclassified. Outputs the total number of instances * classified, and the number of instances (if any) that had no class value provided. * * @param title * the title for the statistics * @param printComplexityStatistics * if true, complexity statistics are returned as well * @return the summary as a String */ public String toSummaryString(final String title, boolean printComplexityStatistics) { StringBuffer text = new StringBuffer(); if (printComplexityStatistics && this.m_NoPriors) { printComplexityStatistics = false; System.err.println("Priors disabled, cannot print complexity statistics!"); } text.append(title + "\n"); try { if (this.m_WithClass > 0) { if (this.m_ClassIsNominal) { boolean displayCorrect = this.m_metricsToDisplay.contains("correct"); boolean displayIncorrect = this.m_metricsToDisplay.contains("incorrect"); boolean displayKappa = this.m_metricsToDisplay.contains("kappa"); boolean displayTotalCost = this.m_metricsToDisplay.contains("total cost"); boolean displayAverageCost = this.m_metricsToDisplay.contains("average cost"); if (displayCorrect) { text.append("Correctly Classified Instances "); text.append(Utils.doubleToString(this.correct(), 12, 4) + " " + Utils.doubleToString(this.pctCorrect(), 12, 4) + " %\n"); } if (displayIncorrect) { text.append("Incorrectly Classified Instances "); text.append(Utils.doubleToString(this.incorrect(), 12, 4) + " " + Utils.doubleToString(this.pctIncorrect(), 12, 4) + " %\n"); } if (displayKappa) { text.append("Kappa statistic "); text.append(Utils.doubleToString(this.kappa(), 12, 4) + "\n"); } if (this.m_CostMatrix != null) { if (displayTotalCost) { text.append("Total Cost "); text.append(Utils.doubleToString(this.totalCost(), 12, 4) + "\n"); } if (displayAverageCost) { text.append("Average Cost "); text.append(Utils.doubleToString(this.avgCost(), 12, 4) + "\n"); } } if (printComplexityStatistics) { boolean displayKBRelative = this.m_metricsToDisplay.contains("kb relative"); boolean displayKBInfo = this.m_metricsToDisplay.contains("kb information"); if (displayKBRelative) { text.append("K&B Relative Info Score "); text.append(Utils.doubleToString(this.KBRelativeInformation(), 12, 4) + " %\n"); } if (displayKBInfo) { text.append("K&B Information Score "); text.append(Utils.doubleToString(this.KBInformation(), 12, 4) + " bits"); text.append(Utils.doubleToString(this.KBMeanInformation(), 12, 4) + " bits/instance\n"); } } if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof StandardEvaluationMetric && m.appliesToNominalClass() && !m.appliesToNumericClass()) { String metricName = m.getMetricName().toLowerCase(); boolean display = this.m_metricsToDisplay.contains(metricName); // For the GUI and the command line StandardEvaluationMetrics // are an "all or nothing" jobby (because we need the user to // supply how they should be displayed and formatted via the // toSummaryString() method if (display) { String formattedS = ((StandardEvaluationMetric) m).toSummaryString(); text.append(formattedS); } } } } } else { boolean displayCorrelation = this.m_metricsToDisplay.contains("correlation"); if (displayCorrelation) { text.append("Correlation coefficient "); text.append(Utils.doubleToString(this.correlationCoefficient(), 12, 4) + "\n"); } if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof StandardEvaluationMetric && !m.appliesToNominalClass() && m.appliesToNumericClass()) { String metricName = m.getMetricName().toLowerCase(); boolean display = this.m_metricsToDisplay.contains(metricName); if (display) { String formattedS = ((StandardEvaluationMetric) m).toSummaryString(); text.append(formattedS); } } } } } if (printComplexityStatistics && this.m_ComplexityStatisticsAvailable) { boolean displayComplexityOrder0 = this.m_metricsToDisplay.contains("complexity 0"); boolean displayComplexityScheme = this.m_metricsToDisplay.contains("complexity scheme"); boolean displayComplexityImprovement = this.m_metricsToDisplay.contains("complexity improvement"); if (displayComplexityOrder0) { text.append("Class complexity | order 0 "); text.append(Utils.doubleToString(this.SFPriorEntropy(), 12, 4) + " bits"); text.append(Utils.doubleToString(this.SFMeanPriorEntropy(), 12, 4) + " bits/instance\n"); } if (displayComplexityScheme) { text.append("Class complexity | scheme "); text.append(Utils.doubleToString(this.SFSchemeEntropy(), 12, 4) + " bits"); text.append(Utils.doubleToString(this.SFMeanSchemeEntropy(), 12, 4) + " bits/instance\n"); } if (displayComplexityImprovement) { text.append("Complexity improvement (Sf) "); text.append(Utils.doubleToString(this.SFEntropyGain(), 12, 4) + " bits"); text.append(Utils.doubleToString(this.SFMeanEntropyGain(), 12, 4) + " bits/instance\n"); } } if (printComplexityStatistics && this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof InformationTheoreticEvaluationMetric) { if ((this.m_ClassIsNominal && m.appliesToNominalClass()) || (!this.m_ClassIsNominal && m.appliesToNumericClass())) { String metricName = m.getMetricName().toLowerCase(); boolean display = this.m_metricsToDisplay.contains(metricName); List<String> statNames = m.getStatisticNames(); for (String s : statNames) { display = (display && this.m_metricsToDisplay.contains(s.toLowerCase())); } if (display) { String formattedS = ((InformationTheoreticEvaluationMetric) m).toSummaryString(); text.append(formattedS); } } } } } boolean displayMAE = this.m_metricsToDisplay.contains("mae"); boolean displayRMSE = this.m_metricsToDisplay.contains("rmse"); boolean displayRAE = this.m_metricsToDisplay.contains("rae"); boolean displayRRSE = this.m_metricsToDisplay.contains("rrse"); if (displayMAE) { text.append("Mean absolute error "); text.append(Utils.doubleToString(this.meanAbsoluteError(), 12, 4) + "\n"); } if (displayRMSE) { text.append("Root mean squared error "); text.append(Utils.doubleToString(this.rootMeanSquaredError(), 12, 4) + "\n"); } if (!this.m_NoPriors) { if (displayRAE) { text.append("Relative absolute error "); text.append(Utils.doubleToString(this.relativeAbsoluteError(), 12, 4) + " %\n"); } if (displayRRSE) { text.append("Root relative squared error "); text.append(Utils.doubleToString(this.rootRelativeSquaredError(), 12, 4) + " %\n"); } } if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof StandardEvaluationMetric && m.appliesToNominalClass() && m.appliesToNumericClass()) { String metricName = m.getMetricName().toLowerCase(); boolean display = this.m_metricsToDisplay.contains(metricName); List<String> statNames = m.getStatisticNames(); for (String s : statNames) { display = (display && this.m_metricsToDisplay.contains(s.toLowerCase())); } if (display) { String formattedS = ((StandardEvaluationMetric) m).toSummaryString(); text.append(formattedS); } } } } if (this.m_CoverageStatisticsAvailable) { boolean displayCoverage = this.m_metricsToDisplay.contains("coverage"); boolean displayRegionSize = this.m_metricsToDisplay.contains("region size"); if (displayCoverage) { text.append("Coverage of cases (" + Utils.doubleToString(this.m_ConfLevel, 4, 2) + " level) "); text.append(Utils.doubleToString(this.coverageOfTestCasesByPredictedRegions(), 12, 4) + " %\n"); } if (!this.m_NoPriors) { if (displayRegionSize) { text.append("Mean rel. region size (" + Utils.doubleToString(this.m_ConfLevel, 4, 2) + " level) "); text.append(Utils.doubleToString(this.sizeOfPredictedRegions(), 12, 4) + " %\n"); } } } } if (Utils.gr(this.unclassified(), 0)) { text.append("UnClassified Instances "); text.append(Utils.doubleToString(this.unclassified(), 12, 4) + " " + Utils.doubleToString(this.pctUnclassified(), 12, 4) + " %\n"); } text.append("Total Number of Instances "); text.append(Utils.doubleToString(this.m_WithClass, 12, 4) + "\n"); if (this.m_MissingClass > 0) { text.append("Ignored Class Unknown Instances "); text.append(Utils.doubleToString(this.m_MissingClass, 12, 4) + "\n"); } } catch (Exception ex) { // Should never occur since the class is known to be nominal // here System.err.println("Arggh - Must be a bug in Evaluation class"); ex.printStackTrace(); } return text.toString(); } /** * Calls toMatrixString() with a default title. * * @return the confusion matrix as a string * @throws Exception * if the class is numeric */ public String toMatrixString() throws Exception { return this.toMatrixString("=== Confusion Matrix ===\n"); } /** * Outputs the performance statistics as a classification confusion matrix. For each class value, * shows the distribution of predicted class values. * * @param title * the title for the confusion matrix * @return the confusion matrix as a String * @throws Exception * if the class is numeric */ public String toMatrixString(final String title) throws Exception { StringBuffer text = new StringBuffer(); char[] IDChars = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' }; int IDWidth; boolean fractional = false; if (!this.m_ClassIsNominal) { throw new Exception("Evaluation: No confusion matrix possible!"); } // Find the maximum value in the matrix // and check for fractional display requirement double maxval = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { double current = this.m_ConfusionMatrix[i][j]; if (current < 0) { current *= -10; } if (current > maxval) { maxval = current; } double fract = current - Math.rint(current); if (!fractional && ((Math.log(fract) / Math.log(10)) >= -2)) { fractional = true; } } } IDWidth = 1 + Math.max((int) (Math.log(maxval) / Math.log(10) + (fractional ? 3 : 0)), (int) (Math.log(this.m_NumClasses) / Math.log(IDChars.length))); text.append(title).append("\n"); for (int i = 0; i < this.m_NumClasses; i++) { if (fractional) { text.append(" ").append(this.num2ShortID(i, IDChars, IDWidth - 3)).append(" "); } else { text.append(" ").append(this.num2ShortID(i, IDChars, IDWidth)); } } text.append(" <-- classified as\n"); for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { text.append(" ").append(Utils.doubleToString(this.m_ConfusionMatrix[i][j], IDWidth, (fractional ? 2 : 0))); } text.append(" | ").append(this.num2ShortID(i, IDChars, IDWidth)).append(" = ").append(this.m_ClassNames[i]).append("\n"); } return text.toString(); } /** * Generates a breakdown of the accuracy for each class (with default title), incorporating various * information-retrieval statistics, such as true/false positive rate, precision/recall/F-Measure. * Should be useful for ROC curves, recall/precision curves. * * @return the statistics presented as a string * @throws Exception * if class is not nominal */ public String toClassDetailsString() throws Exception { return this.toClassDetailsString("=== Detailed Accuracy By Class ===\n"); } /** * Generates a breakdown of the accuracy for each class, incorporating various information-retrieval * statistics, such as true/false positive rate, precision/recall/F-Measure. Should be useful for * ROC curves, recall/precision curves. * * @param title * the title to prepend the stats string with * @return the statistics presented as a string * @throws Exception * if class is not nominal */ public String toClassDetailsString(final String title) throws Exception { if (!this.m_ClassIsNominal) { throw new Exception("Evaluation: No per class statistics possible!"); } boolean displayTP = this.m_metricsToDisplay.contains("tp rate"); boolean displayFP = this.m_metricsToDisplay.contains("fp rate"); boolean displayP = this.m_metricsToDisplay.contains("precision"); boolean displayR = this.m_metricsToDisplay.contains("recall"); boolean displayFM = this.m_metricsToDisplay.contains("f-measure"); boolean displayMCC = this.m_metricsToDisplay.contains("mcc"); boolean displayROC = this.m_metricsToDisplay.contains("roc area"); boolean displayPRC = this.m_metricsToDisplay.contains("prc area"); StringBuffer text = new StringBuffer(title + "\n " + (displayTP ? "TP Rate " : "") + (displayFP ? "FP Rate " : "") + (displayP ? "Precision " : "") + (displayR ? "Recall " : "") + (displayFM ? "F-Measure " : "") + (displayMCC ? "MCC " : "") + (displayROC ? "ROC Area " : "") + (displayPRC ? "PRC Area " : "")); if (this.m_pluginMetrics != null && this.m_pluginMetrics.size() > 0) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric && m.appliesToNominalClass()) { String metricName = m.getMetricName().toLowerCase(); if (this.m_metricsToDisplay.contains(metricName)) { List<String> statNames = m.getStatisticNames(); for (String name : statNames) { if (this.m_metricsToDisplay.contains(name.toLowerCase())) { if (name.length() < 7) { name = Utils.padRight(name, 7); } text.append(name).append(" "); } } } } } } text.append("Class\n"); for (int i = 0; i < this.m_NumClasses; i++) { text.append(" "); if (displayTP) { double tpr = this.truePositiveRate(i); if (Utils.isMissingValue(tpr)) { text.append("? "); } else { text.append(String.format("%-9.3f", tpr)); } } if (displayFP) { double fpr = this.falsePositiveRate(i); if (Utils.isMissingValue(fpr)) { text.append("? "); } else { text.append(String.format("%-9.3f", fpr)); } } if (displayP) { double p = this.precision(i); if (Utils.isMissingValue(p)) { text.append("? "); } else { text.append(String.format("%-11.3f", this.precision(i))); } } if (displayR) { double r = this.recall(i); if (Utils.isMissingValue(r)) { text.append("? "); } else { text.append(String.format("%-9.3f", this.recall(i))); } } if (displayFM) { double fm = this.fMeasure(i); if (Utils.isMissingValue(fm)) { text.append("? "); } else { text.append(String.format("%-11.3f", this.fMeasure(i))); } } if (displayMCC) { double mat = this.matthewsCorrelationCoefficient(i); if (Utils.isMissingValue(mat)) { text.append("? "); } else { text.append(String.format("%-9.3f", this.matthewsCorrelationCoefficient(i))); } } if (displayROC) { double rocVal = this.areaUnderROC(i); if (Utils.isMissingValue(rocVal)) { text.append("? "); } else { text.append(String.format("%-10.3f", rocVal)); } } if (displayPRC) { double prcVal = this.areaUnderPRC(i); if (Utils.isMissingValue(prcVal)) { text.append("? "); } else { text.append(String.format("%-10.3f", prcVal)); } } if (this.m_pluginMetrics != null && this.m_pluginMetrics.size() > 0) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric && m.appliesToNominalClass()) { String metricName = m.getMetricName().toLowerCase(); if (this.m_metricsToDisplay.contains(metricName)) { List<String> statNames = m.getStatisticNames(); for (String name : statNames) { if (this.m_metricsToDisplay.contains(name.toLowerCase())) { double stat = ((InformationRetrievalEvaluationMetric) m).getStatistic(name, i); if (name.length() < 7) { name = Utils.padRight(name, 7); } if (Utils.isMissingValue(stat)) { Utils.padRight("?", name.length()); } else { text.append(String.format("%-" + name.length() + ".3f", stat)).append(" "); } } } } } } } text.append(this.m_ClassNames[i]).append('\n'); } text.append("Weighted Avg. "); if (displayTP) { double wtpr = this.weightedTruePositiveRate(); if (Utils.isMissingValue(wtpr)) { text.append("? "); } else { text.append(String.format("%-9.3f", wtpr)); } } if (displayFP) { double wfpr = this.weightedFalsePositiveRate(); if (Utils.isMissingValue(wfpr)) { text.append("? "); } else { text.append(String.format("%-9.3f", wfpr)); } } if (displayP) { double wp = this.weightedPrecision(); if (Utils.isMissingValue(wp)) { text.append("? "); } else { text.append(String.format("%-11.3f", wp)); } } if (displayR) { double wr = this.weightedRecall(); if (Utils.isMissingValue(wr)) { text.append("? "); } else { text.append(String.format("%-9.3f", wr)); } } if (displayFM) { double wf = this.weightedFMeasure(); if (Utils.isMissingValue(wf)) { text.append("? "); } else { text.append(String.format("%-11.3f", wf)); } } if (displayMCC) { double wmc = this.weightedMatthewsCorrelation(); if (Utils.isMissingValue(wmc)) { text.append("? "); } else { text.append(String.format("%-9.3f", wmc)); } } if (displayROC) { double wroc = this.weightedAreaUnderROC(); if (Utils.isMissingValue(wroc)) { text.append("? "); } else { text.append(String.format("%-10.3f", wroc)); } } if (displayPRC) { double wprc = this.weightedAreaUnderPRC(); if (Utils.isMissingValue(wprc)) { text.append("? "); } else { text.append(String.format("%-10.3f", wprc)); } } if (this.m_pluginMetrics != null && this.m_pluginMetrics.size() > 0) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric && m.appliesToNominalClass()) { String metricName = m.getMetricName().toLowerCase(); if (this.m_metricsToDisplay.contains(metricName)) { List<String> statNames = m.getStatisticNames(); for (String name : statNames) { if (this.m_metricsToDisplay.contains(name.toLowerCase())) { double stat = ((InformationRetrievalEvaluationMetric) m).getClassWeightedAverageStatistic(name); if (name.length() < 7) { name = Utils.padRight(name, 7); } if (Utils.isMissingValue(stat)) { Utils.padRight("?", name.length()); } else { text.append(String.format("%-" + name.length() + ".3f", stat)).append(" "); } } } } } } } text.append("\n"); return text.toString(); } /** * Calculate the number of true positives with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified positives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the true positive rate */ public double numTruePositives(final int classIndex) { double correct = 0; for (int j = 0; j < this.m_NumClasses; j++) { if (j == classIndex) { correct += this.m_ConfusionMatrix[classIndex][j]; } } return correct; } /** * Calculate the true positive rate with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the true positive rate */ public double truePositiveRate(final int classIndex) { double correct = 0, total = 0; for (int j = 0; j < this.m_NumClasses; j++) { if (j == classIndex) { correct += this.m_ConfusionMatrix[classIndex][j]; } total += this.m_ConfusionMatrix[classIndex][j]; } return correct / total; } /** * Calculates the weighted (by class size) true positive rate. * * @return the weighted true positive rate. */ public double weightedTruePositiveRate() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double truePosTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.truePositiveRate(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 truePosTotal += (temp * classCounts[i]); } } return truePosTotal / classCountSum; } /** * Calculate the number of true negatives with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified negatives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the true positive rate */ public double numTrueNegatives(final int classIndex) { double correct = 0; for (int i = 0; i < this.m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < this.m_NumClasses; j++) { if (j != classIndex) { correct += this.m_ConfusionMatrix[i][j]; } } } } return correct; } /** * Calculate the true negative rate with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified negatives * ------------------------------ * total negatives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the true positive rate */ public double trueNegativeRate(final int classIndex) { double correct = 0, total = 0; for (int i = 0; i < this.m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < this.m_NumClasses; j++) { if (j != classIndex) { correct += this.m_ConfusionMatrix[i][j]; } total += this.m_ConfusionMatrix[i][j]; } } } return correct / total; } /** * Calculates the weighted (by class size) true negative rate. * * @return the weighted true negative rate. */ public double weightedTrueNegativeRate() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double trueNegTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.trueNegativeRate(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 trueNegTotal += (temp * classCounts[i]); } } return trueNegTotal / classCountSum; } /** * Calculate number of false positives with respect to a particular class. This is defined as * <p/> * * <pre> * incorrectly classified negatives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the false positive rate */ public double numFalsePositives(final int classIndex) { double incorrect = 0; for (int i = 0; i < this.m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < this.m_NumClasses; j++) { if (j == classIndex) { incorrect += this.m_ConfusionMatrix[i][j]; } } } } return incorrect; } /** * Calculate the false positive rate with respect to a particular class. This is defined as * <p/> * * <pre> * incorrectly classified negatives * -------------------------------- * total negatives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the false positive rate */ public double falsePositiveRate(final int classIndex) { double incorrect = 0, total = 0; for (int i = 0; i < this.m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < this.m_NumClasses; j++) { if (j == classIndex) { incorrect += this.m_ConfusionMatrix[i][j]; } total += this.m_ConfusionMatrix[i][j]; } } } return incorrect / total; } /** * Calculates the weighted (by class size) false positive rate. * * @return the weighted false positive rate. */ public double weightedFalsePositiveRate() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double falsePosTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.falsePositiveRate(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 falsePosTotal += (temp * classCounts[i]); } } return falsePosTotal / classCountSum; } /** * Calculate number of false negatives with respect to a particular class. This is defined as * <p/> * * <pre> * incorrectly classified positives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the false positive rate */ public double numFalseNegatives(final int classIndex) { double incorrect = 0; for (int i = 0; i < this.m_NumClasses; i++) { if (i == classIndex) { for (int j = 0; j < this.m_NumClasses; j++) { if (j != classIndex) { incorrect += this.m_ConfusionMatrix[i][j]; } } } } return incorrect; } /** * Calculate the false negative rate with respect to a particular class. This is defined as * <p/> * * <pre> * incorrectly classified positives * -------------------------------- * total positives * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the false positive rate */ public double falseNegativeRate(final int classIndex) { double incorrect = 0, total = 0; for (int i = 0; i < this.m_NumClasses; i++) { if (i == classIndex) { for (int j = 0; j < this.m_NumClasses; j++) { if (j != classIndex) { incorrect += this.m_ConfusionMatrix[i][j]; } total += this.m_ConfusionMatrix[i][j]; } } } return incorrect / total; } /** * Calculates the weighted (by class size) false negative rate. * * @return the weighted false negative rate. */ public double weightedFalseNegativeRate() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double falseNegTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.falseNegativeRate(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 falseNegTotal += (temp * classCounts[i]); } } return falseNegTotal / classCountSum; } /** * Calculates the matthews correlation coefficient (sometimes called phi coefficient) for the * supplied class * * @param classIndex * the index of the class to compute the matthews correlation coefficient for * * @return the mathews correlation coefficient */ public double matthewsCorrelationCoefficient(final int classIndex) { double numTP = this.numTruePositives(classIndex); double numTN = this.numTrueNegatives(classIndex); double numFP = this.numFalsePositives(classIndex); double numFN = this.numFalseNegatives(classIndex); double n = (numTP * numTN) - (numFP * numFN); double d = (numTP + numFP) * (numTP + numFN) * (numTN + numFP) * (numTN + numFN); d = Math.sqrt(d); return n / d; } /** * Calculates the weighted (by class size) matthews correlation coefficient. * * @return the weighted matthews correlation coefficient. */ public double weightedMatthewsCorrelation() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double mccTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.matthewsCorrelationCoefficient(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 mccTotal += (temp * classCounts[i]); } } return mccTotal / classCountSum; } /** * Calculate the recall with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * <p/> * (Which is also the same as the truePositiveRate.) * * @param classIndex * the index of the class to consider as "positive" * @return the recall */ public double recall(final int classIndex) { return this.truePositiveRate(classIndex); } /** * Calculates the weighted (by class size) recall. * * @return the weighted recall. */ public double weightedRecall() { return this.weightedTruePositiveRate(); } /** * Calculate the precision with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @param classIndex * the index of the class to consider as "positive" * @return the precision */ public double precision(final int classIndex) { double correct = 0, total = 0; for (int i = 0; i < this.m_NumClasses; i++) { if (i == classIndex) { correct += this.m_ConfusionMatrix[i][classIndex]; } total += this.m_ConfusionMatrix[i][classIndex]; } return correct / total; } /** * Calculates the weighted (by class size) precision. * * @return the weighted precision. */ public double weightedPrecision() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double precisionTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.precision(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 precisionTotal += (temp * classCounts[i]); } } return precisionTotal / classCountSum; } /** * Calculate the F-Measure with respect to a particular class. This is defined as * <p/> * * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * Returns zero when both precision and recall are zero * * @param classIndex * the index of the class to consider as "positive" * @return the F-Measure */ public double fMeasure(final int classIndex) { double precision = this.precision(classIndex); double recall = this.recall(classIndex); if ((precision == 0) && (recall == 0)) { return 0; } return 2 * precision * recall / (precision + recall); } /** * Calculates the macro weighted (by class size) average F-Measure. * * @return the weighted F-Measure. */ public double weightedFMeasure() { double[] classCounts = new double[this.m_NumClasses]; double classCountSum = 0; for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { classCounts[i] += this.m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double fMeasureTotal = 0; for (int i = 0; i < this.m_NumClasses; i++) { double temp = this.fMeasure(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 fMeasureTotal += (temp * classCounts[i]); } } return fMeasureTotal / classCountSum; } /** * Unweighted macro-averaged F-measure. If some classes not present in the test set, they're just * skipped (since recall is undefined there anyway) . * * @return unweighted macro-averaged F-measure. */ public double unweightedMacroFmeasure() { weka.experiment.Stats rr = new weka.experiment.Stats(); for (int c = 0; c < this.m_NumClasses; c++) { // skip if no testing positive cases of this class if (this.numTruePositives(c) + this.numFalseNegatives(c) > 0) { rr.add(this.fMeasure(c)); } } rr.calculateDerived(); return rr.mean; } /** * Unweighted micro-averaged F-measure. If some classes not present in the test set, they have no * effect. * * Note: if the test set is *single-label*, then this is the same as accuracy. * * @return unweighted micro-averaged F-measure. */ public double unweightedMicroFmeasure() { double tp = 0; double fn = 0; double fp = 0; for (int c = 0; c < this.m_NumClasses; c++) { tp += this.numTruePositives(c); fn += this.numFalseNegatives(c); fp += this.numFalsePositives(c); } return 2 * tp / (2 * tp + fn + fp); } /** * Sets the class prior probabilities. * * @param train * the training instances used to determine the prior probabilities * @throws Exception * if the class attribute of the instances is not set */ public void setPriors(final Instances train) throws Exception { this.m_NoPriors = false; if (!this.m_ClassIsNominal) { this.m_NumTrainClassVals = 0; this.m_TrainClassVals = null; this.m_TrainClassWeights = null; this.m_PriorEstimator = null; this.m_MinTarget = Double.MAX_VALUE; this.m_MaxTarget = -Double.MAX_VALUE; for (int i = 0; i < train.numInstances(); i++) { Instance currentInst = train.instance(i); if (!currentInst.classIsMissing()) { this.addNumericTrainClass(currentInst.classValue(), currentInst.weight()); } } this.m_ClassPriors[0] = this.m_ClassPriorsSum = 0; for (int i = 0; i < train.numInstances(); i++) { if (!train.instance(i).classIsMissing()) { this.m_ClassPriors[0] += train.instance(i).classValue() * train.instance(i).weight(); this.m_ClassPriorsSum += train.instance(i).weight(); } } } else { for (int i = 0; i < this.m_NumClasses; i++) { this.m_ClassPriors[i] = 1; } this.m_ClassPriorsSum = this.m_NumClasses; for (int i = 0; i < train.numInstances(); i++) { if (!train.instance(i).classIsMissing()) { this.m_ClassPriors[(int) train.instance(i).classValue()] += train.instance(i).weight(); this.m_ClassPriorsSum += train.instance(i).weight(); } } this.m_MaxTarget = this.m_NumClasses; this.m_MinTarget = 0; } } /** * Get the current weighted class counts. * * @return the weighted class counts */ public double[] getClassPriors() { return this.m_ClassPriors; } /** * Updates the class prior probabilities or the mean respectively (when incrementally training). * * @param instance * the new training instance seen * @throws Exception * if the class of the instance is not set */ public void updatePriors(final Instance instance) throws Exception { if (!instance.classIsMissing()) { if (!this.m_ClassIsNominal) { this.addNumericTrainClass(instance.classValue(), instance.weight()); this.m_ClassPriors[0] += instance.classValue() * instance.weight(); this.m_ClassPriorsSum += instance.weight(); } else { this.m_ClassPriors[(int) instance.classValue()] += instance.weight(); this.m_ClassPriorsSum += instance.weight(); } } } /** * disables the use of priors, e.g., in case of de-serialized schemes that have no access to the * original training set, but are evaluated on a set set. */ public void useNoPriors() { this.m_NoPriors = true; } /** * Tests whether the current evaluation object is equal to another evaluation object. * * @param obj * the object to compare against * @return true if the two objects are equal */ @Override public boolean equals(final Object obj) { if ((obj == null) || !(obj.getClass().equals(this.getClass()))) { return false; } Evaluation cmp = (Evaluation) obj; if (this.m_ClassIsNominal != cmp.m_ClassIsNominal) { return false; } if (this.m_NumClasses != cmp.m_NumClasses) { return false; } if (this.m_Incorrect != cmp.m_Incorrect) { return false; } if (this.m_Correct != cmp.m_Correct) { return false; } if (this.m_Unclassified != cmp.m_Unclassified) { return false; } if (this.m_MissingClass != cmp.m_MissingClass) { return false; } if (this.m_WithClass != cmp.m_WithClass) { return false; } if (this.m_SumErr != cmp.m_SumErr) { return false; } if (this.m_SumAbsErr != cmp.m_SumAbsErr) { return false; } if (this.m_SumSqrErr != cmp.m_SumSqrErr) { return false; } if (this.m_SumClass != cmp.m_SumClass) { return false; } if (this.m_SumSqrClass != cmp.m_SumSqrClass) { return false; } if (this.m_SumPredicted != cmp.m_SumPredicted) { return false; } if (this.m_SumSqrPredicted != cmp.m_SumSqrPredicted) { return false; } if (this.m_SumClassPredicted != cmp.m_SumClassPredicted) { return false; } if (this.m_ClassIsNominal) { for (int i = 0; i < this.m_NumClasses; i++) { for (int j = 0; j < this.m_NumClasses; j++) { if (this.m_ConfusionMatrix[i][j] != cmp.m_ConfusionMatrix[i][j]) { return false; } } } } return true; } /** * Make up the help string giving all the command line options. * * @param classifier * the classifier to include options for * @param globalInfo * include the global information string for the classifier (if available). * @return a string detailing the valid command line options */ protected static String makeOptionString(final Classifier classifier, final boolean globalInfo) { StringBuffer optionsText = new StringBuffer(""); // General options optionsText.append("\n\nGeneral options:\n\n"); optionsText.append("-h or -help\n"); optionsText.append("\tOutput help information.\n"); optionsText.append("-synopsis or -info\n"); optionsText.append("\tOutput synopsis for classifier (use in conjunction " + " with -h)\n"); optionsText.append("-t <name of training file>\n"); optionsText.append("\tSets training file.\n"); optionsText.append("-T <name of test file>\n"); optionsText.append("\tSets test file. If missing, a cross-validation will be performed\n"); optionsText.append("\ton the training data.\n"); optionsText.append("-c <class index>\n"); optionsText.append("\tSets index of class attribute (default: last).\n"); optionsText.append("-x <number of folds>\n"); optionsText.append("\tSets number of folds for cross-validation (default: 10).\n"); optionsText.append("-no-cv\n"); optionsText.append("\tDo not perform any cross validation.\n"); optionsText.append("-force-batch-training\n"); optionsText.append("\tAlways train classifier in batch mode, never incrementally.\n"); optionsText.append("-split-percentage <percentage>\n"); optionsText.append("\tSets the percentage for the train/test set split, e.g., 66.\n"); optionsText.append("-preserve-order\n"); optionsText.append("\tPreserves the order in the percentage split.\n"); optionsText.append("-s <random number seed>\n"); optionsText.append("\tSets random number seed for cross-validation or percentage split\n"); optionsText.append("\t(default: 1).\n"); optionsText.append("-m <name of file with cost matrix>\n"); optionsText.append("\tSets file with cost matrix.\n"); optionsText.append("-toggle <comma-separated list of evaluation metric names>\n"); optionsText.append("\tComma separated list of metric names to toggle in the output.\n\t" + "All metrics are output by default with the exception of 'Coverage' and " + "'Region size'.\n\t"); optionsText.append("Available metrics:\n\t"); List<String> metricsToDisplay = new ArrayList<>(Arrays.asList(BUILT_IN_EVAL_METRICS)); List<AbstractEvaluationMetric> pluginMetrics = AbstractEvaluationMetric.getPluginMetrics(); if (pluginMetrics != null) { for (AbstractEvaluationMetric m : pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { metricsToDisplay.add(s.toLowerCase()); } } else { metricsToDisplay.add(m.getMetricName().toLowerCase()); } } } int length = 0; for (int i = 0; i < metricsToDisplay.size(); i++) { optionsText.append(metricsToDisplay.get(i)); length += metricsToDisplay.get(i).length(); if (i != metricsToDisplay.size() - 1) { optionsText.append(","); } if (length >= 60) { optionsText.append("\n\t"); length = 0; } } optionsText.append("\n"); optionsText.append("-l <name of input file>\n"); optionsText.append("\tSets model input file. In case the filename ends with '.xml',\n"); optionsText.append("\ta PMML file is loaded or, if that fails, options are loaded\n"); optionsText.append("\tfrom the XML file.\n"); optionsText.append("-d <name of output file>\n"); optionsText.append("\tSets model output file. In case the filename ends with '.xml',\n"); optionsText.append("\tonly the options are saved to the XML file, not the model.\n"); optionsText.append("-v\n"); optionsText.append("\tOutputs no statistics for training data.\n"); optionsText.append("-o\n"); optionsText.append("\tOutputs statistics only, not the classifier.\n"); optionsText.append("-output-models-for-training-splits\n"); optionsText.append("\tOutput models for training splits if cross-validation or percentage-split evaluation is used.\n"); optionsText.append("-do-not-output-per-class-statistics\n"); optionsText.append("\tDo not output statistics for each class.\n"); optionsText.append("-k\n"); optionsText.append("\tOutputs information-theoretic statistics.\n"); optionsText.append("-classifications \"weka.classifiers.evaluation.output.prediction.AbstractOutput + options\"\n"); optionsText.append("\tUses the specified class for generating the classification output.\n"); optionsText.append("\tE.g.: " + PlainText.class.getName() + "\n"); optionsText.append("-p range\n"); optionsText.append("\tOutputs predictions for test instances (or the train instances if\n"); optionsText.append("\tno test instances provided and -no-cv is used), along with the \n"); optionsText.append("\tattributes in the specified range (and nothing else). \n"); optionsText.append("\tUse '-p 0' if no attributes are desired.\n"); optionsText.append("\tDeprecated: use \"-classifications ...\" instead.\n"); optionsText.append("-distribution\n"); optionsText.append("\tOutputs the distribution instead of only the prediction\n"); optionsText.append("\tin conjunction with the '-p' option (only nominal classes).\n"); optionsText.append("\tDeprecated: use \"-classifications ...\" instead.\n"); optionsText.append("-r\n"); optionsText.append("\tOnly outputs cumulative margin distribution.\n"); if (classifier instanceof Sourcable) { optionsText.append("-z <class name>\n"); optionsText.append("\tOnly outputs the source representation" + " of the classifier,\n\tgiving it the supplied" + " name.\n"); } if (classifier instanceof Drawable) { optionsText.append("-g\n"); optionsText.append("\tOnly outputs the graph representation" + " of the classifier.\n"); } optionsText.append("-xml filename | xml-string\n"); optionsText.append("\tRetrieves the options from the XML-data instead of the " + "command line.\n"); optionsText.append("-threshold-file <file>\n"); optionsText.append("\tThe file to save the threshold data to.\n" + "\tThe format is determined by the extensions, e.g., '.arff' for ARFF \n" + "\tformat or '.csv' for CSV.\n"); optionsText.append("-threshold-label <label>\n"); optionsText.append("\tThe class label to determine the threshold data for\n" + "\t(default is the first label)\n"); optionsText.append("-no-predictions\n"); optionsText.append("\tTurns off the collection of predictions in order to conserve memory.\n"); // Get scheme-specific options if (classifier instanceof OptionHandler) { optionsText.append("\nOptions specific to " + classifier.getClass().getName() + ":\n\n"); Enumeration<Option> enu = ((OptionHandler) classifier).listOptions(); while (enu.hasMoreElements()) { Option option = enu.nextElement(); optionsText.append(option.synopsis() + '\n'); optionsText.append(option.description() + "\n"); } } // Get global information (if available) if (globalInfo) { try { String gi = getGlobalInfo(classifier); optionsText.append(gi); } catch (Exception ex) { // quietly ignore } } return optionsText.toString(); } /** * Return the global info (if it exists) for the supplied classifier. * * @param classifier * the classifier to get the global info for * @return the global info (synopsis) for the classifier * @throws Exception * if there is a problem reflecting on the classifier */ protected static String getGlobalInfo(final Classifier classifier) throws Exception { BeanInfo bi = Introspector.getBeanInfo(classifier.getClass()); MethodDescriptor[] methods; methods = bi.getMethodDescriptors(); Object[] args = {}; String result = "\nSynopsis for " + classifier.getClass().getName() + ":\n\n"; for (MethodDescriptor method : methods) { String name = method.getDisplayName(); Method meth = method.getMethod(); if (name.equals("globalInfo")) { String globalInfo = (String) (meth.invoke(classifier, args)); result += globalInfo; break; } } return result; } /** * Method for generating indices for the confusion matrix. * * @param num * integer to format * @param IDChars * the characters to use * @param IDWidth * the width of the entry * @return the formatted integer as a string */ protected String num2ShortID(int num, final char[] IDChars, final int IDWidth) { char ID[] = new char[IDWidth]; int i; for (i = IDWidth - 1; i >= 0; i--) { ID[i] = IDChars[num % IDChars.length]; num = num / IDChars.length - 1; if (num < 0) { break; } } for (i--; i >= 0; i--) { ID[i] = ' '; } return new String(ID); } /** * Convert a single prediction into a probability distribution with all zero probabilities except * the predicted value which has probability 1.0. * * @param predictedClass * the index of the predicted class * @return the probability distribution */ protected double[] makeDistribution(final double predictedClass) { double[] result = new double[this.m_NumClasses]; if (Utils.isMissingValue(predictedClass)) { return result; } if (this.m_ClassIsNominal) { result[(int) predictedClass] = 1.0; } else { result[0] = predictedClass; } return result; } /** * Updates all the statistics about a classifiers performance for the current test instance. * * @param predictedDistribution * the probabilities assigned to each class * @param instance * the instance to be classified * @throws Exception * if the class of the instance is not set */ protected void updateStatsForClassifier(final double[] predictedDistribution, final Instance instance) throws Exception { int actualClass = (int) instance.classValue(); if (!instance.classIsMissing()) { this.updateMargins(predictedDistribution, actualClass, instance.weight()); // Determine the predicted class (doesn't detect multiple // classifications) int predictedClass = -1; double bestProb = 0.0; for (int i = 0; i < this.m_NumClasses; i++) { if (predictedDistribution[i] > bestProb) { predictedClass = i; bestProb = predictedDistribution[i]; } } this.m_WithClass += instance.weight(); // Determine misclassification cost if (this.m_CostMatrix != null) { if (predictedClass < 0) { // For missing predictions, we assume the worst possible cost. // This is pretty harsh. // Perhaps we could take the negative of the cost of a correct // prediction (-m_CostMatrix.getElement(actualClass,actualClass)), // although often this will be zero this.m_TotalCost += instance.weight() * this.m_CostMatrix.getMaxCost(actualClass, instance); } else { this.m_TotalCost += instance.weight() * this.m_CostMatrix.getElement(actualClass, predictedClass, instance); } } // Update counts when no class was predicted if (predictedClass < 0) { this.m_Unclassified += instance.weight(); return; } double predictedProb = Math.max(MIN_SF_PROB, predictedDistribution[actualClass]); double priorProb = Math.max(MIN_SF_PROB, this.m_ClassPriors[actualClass] / this.m_ClassPriorsSum); if (predictedProb >= priorProb) { this.m_SumKBInfo += (Utils.log2(predictedProb) - Utils.log2(priorProb)) * instance.weight(); } else { this.m_SumKBInfo -= (Utils.log2(1.0 - predictedProb) - Utils.log2(1.0 - priorProb)) * instance.weight(); } this.m_SumSchemeEntropy -= Utils.log2(predictedProb) * instance.weight(); this.m_SumPriorEntropy -= Utils.log2(priorProb) * instance.weight(); this.updateNumericScores(predictedDistribution, this.makeDistribution(instance.classValue()), instance.weight()); // Update coverage stats int[] indices = Utils.stableSort(predictedDistribution); // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } double sum = 0, sizeOfRegions = 0; for (int i = predictedDistribution.length - 1; i >= 0; i--) { if (sum >= this.m_ConfLevel) { break; } sum += predictedDistribution[indices[i]]; sizeOfRegions++; if (actualClass == indices[i]) { this.m_TotalCoverage += instance.weight(); } } this.m_TotalSizeOfRegions += instance.weight() * sizeOfRegions / (this.m_MaxTarget - this.m_MinTarget); // Update other stats this.m_ConfusionMatrix[actualClass][predictedClass] += instance.weight(); if (predictedClass != actualClass) { this.m_Incorrect += instance.weight(); } else { this.m_Correct += instance.weight(); } } else { this.m_MissingClass += instance.weight(); } if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof StandardEvaluationMetric) { ((StandardEvaluationMetric) m).updateStatsForClassifier(predictedDistribution, instance); } else if (m instanceof InformationRetrievalEvaluationMetric) { ((InformationRetrievalEvaluationMetric) m).updateStatsForClassifier(predictedDistribution, instance); } else if (m instanceof InformationTheoreticEvaluationMetric) { ((InformationTheoreticEvaluationMetric) m).updateStatsForClassifier(predictedDistribution, instance); } } } } /** * Updates stats for interval estimator based on current test instance. * * @param classifier * the interval estimator * @param classMissing * the instance for which the intervals are computed, without a class value * @param classValue * the class value of this instance * @throws Exception * if intervals could not be computed successfully */ protected void updateStatsForIntervalEstimator(final IntervalEstimator classifier, final Instance classMissing, final double classValue) throws Exception { double[][] preds = classifier.predictIntervals(classMissing, this.m_ConfLevel); if (this.m_Predictions != null) { ((NumericPrediction) this.m_Predictions.get(this.m_Predictions.size() - 1)).setPredictionIntervals(preds); } for (double[] pred : preds) { this.m_TotalSizeOfRegions += classMissing.weight() * (pred[1] - pred[0]) / (this.m_MaxTarget - this.m_MinTarget); } for (double[] pred : preds) { if ((pred[1] >= classValue) && (pred[0] <= classValue)) { this.m_TotalCoverage += classMissing.weight(); break; } } if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof IntervalBasedEvaluationMetric) { ((IntervalBasedEvaluationMetric) m).updateStatsForIntervalEstimator(classifier, classMissing, classValue); } } } } /** * Updates stats for conditional density estimator based on current test instance. * * @param classifier * the conditional density estimator * @param classMissing * the instance for which density is to be computed, without a class value * @param classValue * the class value of this instance * @throws Exception * if density could not be computed successfully */ protected void updateStatsForConditionalDensityEstimator(final ConditionalDensityEstimator classifier, final Instance classMissing, final double classValue) throws Exception { if (this.m_PriorEstimator == null) { this.setNumericPriorsFromBuffer(); } this.m_SumSchemeEntropy -= classifier.logDensity(classMissing, classValue) * classMissing.weight() / Utils.log2; this.m_SumPriorEntropy -= this.m_PriorEstimator.logDensity(classValue) * classMissing.weight() / Utils.log2; } /** * Updates all the statistics about a predictors performance for the current test instance. * * @param predictedValue * the numeric value the classifier predicts * @param instance * the instance to be classified * @throws Exception * if the class of the instance is not set */ protected void updateStatsForPredictor(final double predictedValue, final Instance instance) throws Exception { if (!instance.classIsMissing()) { // Update stats this.m_WithClass += instance.weight(); if (Utils.isMissingValue(predictedValue)) { this.m_Unclassified += instance.weight(); return; } this.m_SumClass += instance.weight() * instance.classValue(); this.m_SumSqrClass += instance.weight() * instance.classValue() * instance.classValue(); this.m_SumClassPredicted += instance.weight() * instance.classValue() * predictedValue; this.m_SumPredicted += instance.weight() * predictedValue; this.m_SumSqrPredicted += instance.weight() * predictedValue * predictedValue; this.updateNumericScores(this.makeDistribution(predictedValue), this.makeDistribution(instance.classValue()), instance.weight()); } else { this.m_MissingClass += instance.weight(); } if (this.m_pluginMetrics != null) { for (AbstractEvaluationMetric m : this.m_pluginMetrics) { if (m instanceof StandardEvaluationMetric) { ((StandardEvaluationMetric) m).updateStatsForPredictor(predictedValue, instance); } else if (m instanceof InformationTheoreticEvaluationMetric) { ((InformationTheoreticEvaluationMetric) m).updateStatsForPredictor(predictedValue, instance); } } } } /** * Update the cumulative record of classification margins. * * @param predictedDistribution * the probability distribution predicted for the current instance * @param actualClass * the index of the actual instance class * @param weight * the weight assigned to the instance */ protected void updateMargins(final double[] predictedDistribution, final int actualClass, final double weight) { double probActual = predictedDistribution[actualClass]; double probNext = 0; for (int i = 0; i < this.m_NumClasses; i++) { if ((i != actualClass) && (predictedDistribution[i] > probNext)) { probNext = predictedDistribution[i]; } } double margin = probActual - probNext; int bin = (int) ((margin + 1.0) / 2.0 * k_MarginResolution); this.m_MarginCounts[bin] += weight; } /** * Update the numeric accuracy measures. For numeric classes, the accuracy is between the actual and * predicted class values. For nominal classes, the accuracy is between the actual and predicted * class probabilities. * * @param predicted * the predicted values * @param actual * the actual value * @param weight * the weight associated with this prediction */ protected void updateNumericScores(final double[] predicted, final double[] actual, final double weight) { double diff; double sumErr = 0, sumAbsErr = 0, sumSqrErr = 0; double sumPriorAbsErr = 0, sumPriorSqrErr = 0; for (int i = 0; i < this.m_NumClasses; i++) { diff = predicted[i] - actual[i]; sumErr += diff; sumAbsErr += Math.abs(diff); sumSqrErr += diff * diff; diff = (this.m_ClassPriors[i] / this.m_ClassPriorsSum) - actual[i]; sumPriorAbsErr += Math.abs(diff); sumPriorSqrErr += diff * diff; } this.m_SumErr += weight * sumErr / this.m_NumClasses; this.m_SumAbsErr += weight * sumAbsErr / this.m_NumClasses; this.m_SumSqrErr += weight * sumSqrErr / this.m_NumClasses; this.m_SumPriorAbsErr += weight * sumPriorAbsErr / this.m_NumClasses; this.m_SumPriorSqrErr += weight * sumPriorSqrErr / this.m_NumClasses; } /** * Adds a numeric (non-missing) training class value and weight to the buffer of stored values. Also * updates minimum and maximum target value. * * @param classValue * the class value * @param weight * the instance weight */ protected void addNumericTrainClass(final double classValue, final double weight) { // Update minimum and maximum target value if (classValue > this.m_MaxTarget) { this.m_MaxTarget = classValue; } if (classValue < this.m_MinTarget) { this.m_MinTarget = classValue; } // Update buffer if (this.m_TrainClassVals == null) { this.m_TrainClassVals = new double[100]; this.m_TrainClassWeights = new double[100]; } if (this.m_NumTrainClassVals == this.m_TrainClassVals.length) { double[] temp = new double[this.m_TrainClassVals.length * 2]; System.arraycopy(this.m_TrainClassVals, 0, temp, 0, this.m_TrainClassVals.length); this.m_TrainClassVals = temp; temp = new double[this.m_TrainClassWeights.length * 2]; System.arraycopy(this.m_TrainClassWeights, 0, temp, 0, this.m_TrainClassWeights.length); this.m_TrainClassWeights = temp; } this.m_TrainClassVals[this.m_NumTrainClassVals] = classValue; this.m_TrainClassWeights[this.m_NumTrainClassVals] = weight; this.m_NumTrainClassVals++; } /** * Sets up the priors for numeric class attributes from the training class values that have been * seen so far. */ protected void setNumericPriorsFromBuffer() { this.m_PriorEstimator = new UnivariateKernelEstimator(); for (int i = 0; i < this.m_NumTrainClassVals; i++) { this.m_PriorEstimator.addValue(this.m_TrainClassVals[i], this.m_TrainClassWeights[i]); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/EvaluationMetricHelper.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EvaluationMetricHelper.java * Copyright (C) 2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; /** * Helper routines for extracting metric values from built-in and plugin * evaluation metrics. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public class EvaluationMetricHelper { /** The Evaluation object to extract built-in and plugin metrics from */ protected Evaluation m_eval; /** A lookup for built-in metrics */ protected Map<String, Integer> m_builtin = new HashMap<String, Integer>(); /** A lookup for plugin metrics */ protected Map<String, AbstractEvaluationMetric> m_pluginMetrics = new HashMap<String, AbstractEvaluationMetric>(); /** * Construct a new EvaluationMetricHelper * * @param eval the Evaluation object to use */ public EvaluationMetricHelper(Evaluation eval) { for (int i = 0; i < Evaluation.BUILT_IN_EVAL_METRICS.length; i++) { m_builtin.put(Evaluation.BUILT_IN_EVAL_METRICS[i].toLowerCase(), i); } setEvaluation(eval); } /** * Sets the Evaluation object to use * * @param eval the Evaluation object to use */ public void setEvaluation(Evaluation eval) { m_eval = eval; initializeWithPluginMetrics(); } /** * Initializes the plugin lookup */ protected void initializeWithPluginMetrics() { m_pluginMetrics.clear(); List<AbstractEvaluationMetric> pluginMetrics = m_eval.getPluginMetrics(); if (pluginMetrics != null && pluginMetrics.size() > 0) { for (AbstractEvaluationMetric m : pluginMetrics) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { m_pluginMetrics.put(s.toLowerCase(), m); } } } } /** * Get a list of built-in metric names * * @return a list of built-in metric names */ public static List<String> getBuiltInMetricNames() { List<String> builtIn = new ArrayList<String>(); builtIn.addAll(Arrays.asList(Evaluation.BUILT_IN_EVAL_METRICS)); return builtIn; } /** * Get a list of plugin metric names * * @return a list of plugin metric names */ public static List<String> getPluginMetricNames() { List<String> pluginNames = new ArrayList<String>(); List<AbstractEvaluationMetric> pluginMetrics = AbstractEvaluationMetric.getPluginMetrics(); if (pluginMetrics != null) { for (AbstractEvaluationMetric m : pluginMetrics) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { pluginNames.add(s.toLowerCase()); } } } return pluginNames; } /** * Get a list of all available evaluation metric names * * @return a list of all available evaluation metric names */ public static List<String> getAllMetricNames() { List<String> metrics = getBuiltInMetricNames(); metrics.addAll(getPluginMetricNames()); return metrics; } /** * Returns true if the specified built-in metric is maximisable * * @param metricIndex the index of metric * @return true if the metric in question is optimum at a maximal value * @throws Exception if the metric is not a known built-in metric */ protected boolean builtInMetricIsMaximisable(int metricIndex) throws Exception { switch (metricIndex) { case 0: // correct return true; case 1: // incorrect return false; case 2: // kappa return true; case 3: // total cost return false; case 4: // avg cost return false; case 5: // KB relative info return false; case 6: // KB info return false; case 7: // correlation return true; case 8: // SF prior entropy return false; case 9: // SF scheme entropy return false; case 10: // SF entropy gain return true; case 11: // MAE return false; case 12: // RMSE return false; case 13: // RAE return false; case 14: // RRSE return false; case 15: // coverage of cases by predicted regions return true; case 16: // size of predicted regions return false; case 17: // TPR return true; case 18: // FPR return false; case 19: // precision return true; case 20: // recall return true; case 21: // f-measure return true; case 22: // Matthews correlation return true; case 23: // AUC return true; case 24: // AUPRC return true; } throw new Exception("Unknown built-in metric"); } /** * Gets the value of a built-in metric * * @param metricIndex the index of the metric * @param classValIndex the optional class value index * @return the value of the metric * @throws Exception if the metric is not a known built-in metric */ protected double getBuiltinMetricValue(int metricIndex, int... classValIndex) throws Exception { boolean hasValIndex = classValIndex != null && classValIndex.length == 1; switch (metricIndex) { case 0: return m_eval.correct(); case 1: return m_eval.incorrect(); case 2: return m_eval.kappa(); case 3: return m_eval.totalCost(); case 4: return m_eval.avgCost(); case 5: return m_eval.KBRelativeInformation(); case 6: return m_eval.KBInformation(); case 7: return m_eval.correlationCoefficient(); case 8: return m_eval.SFPriorEntropy(); case 9: return m_eval.SFSchemeEntropy(); case 10: return m_eval.SFEntropyGain(); case 11: return m_eval.meanAbsoluteError(); case 12: return m_eval.rootMeanSquaredError(); case 13: return m_eval.relativeAbsoluteError(); case 14: return m_eval.rootRelativeSquaredError(); case 15: return m_eval.coverageOfTestCasesByPredictedRegions(); case 16: return m_eval.sizeOfPredictedRegions(); case 17: return hasValIndex ? m_eval.truePositiveRate(classValIndex[0]) : m_eval .weightedTruePositiveRate(); case 18: return hasValIndex ? m_eval.falsePositiveRate(classValIndex[0]) : m_eval .weightedFalsePositiveRate(); case 19: return hasValIndex ? m_eval.precision(classValIndex[0]) : m_eval .weightedPrecision(); case 20: return hasValIndex ? m_eval.recall(classValIndex[0]) : m_eval .weightedRecall(); case 21: return hasValIndex ? m_eval.fMeasure(classValIndex[0]) : m_eval .weightedFMeasure(); case 22: return hasValIndex ? m_eval .matthewsCorrelationCoefficient(classValIndex[0]) : m_eval .weightedMatthewsCorrelation(); case 23: return hasValIndex ? m_eval.areaUnderROC(classValIndex[0]) : m_eval .weightedAreaUnderROC(); case 24: return hasValIndex ? m_eval.areaUnderPRC(classValIndex[0]) : m_eval .weightedAreaUnderPRC(); } throw new Exception("Unknown built-in metric"); } /** * Get the value of a plugin metric * * @param m the metric to get the value from * @param statName the name of the statistic to get the value of * @param classValIndex the optional class value index * @return the value of the metric * @throws Exception if a problem occurs */ protected double getPluginMetricValue(AbstractEvaluationMetric m, String statName, int... classValIndex) throws Exception { boolean hasValIndex = classValIndex != null && classValIndex.length == 1; if (m instanceof InformationRetrievalEvaluationMetric) { return hasValIndex ? ((InformationRetrievalEvaluationMetric) m) .getStatistic(statName, classValIndex[0]) : ((InformationRetrievalEvaluationMetric) m) .getClassWeightedAverageStatistic(statName); } return m.getStatistic(statName); } /** * Returns true if the named statistic is maximisable * * @param m the metric to check * @param statName the name of the statistic to check * @return true if the metric in question is optimum at a maximal value */ protected boolean pluginMetricIsMaximisable(AbstractEvaluationMetric m, String statName) { return m.statisticIsMaximisable(statName); } /** * Gets the value of a named metric. For information retrieval metrics if a * class value index is not supplied then the class weighted variant is * returned. * * @param statName the name of the metric/statistic to get * @param classValIndex the optional class value index * @return the value of the metric * @throws Exception if the metric/stat is unknown or a problem occurs */ public double getNamedMetric(String statName, int... classValIndex) throws Exception { if (classValIndex != null && classValIndex.length > 1) { throw new IllegalArgumentException( "Only one class value index should be supplied"); } Integer builtinIndex = m_builtin.get(statName.toLowerCase()); if (builtinIndex != null) { return getBuiltinMetricValue(builtinIndex.intValue(), classValIndex); } else { AbstractEvaluationMetric m = m_pluginMetrics.get(statName.toLowerCase()); if (m == null) { throw new Exception("Unknown evaluation metric: " + statName); } return getPluginMetricValue(m, statName, classValIndex); } } /** * Gets the thresholds produced by the metric, if the metric implements * ThresholdProducingMetric. * * @param statName the name of the metric/statistic to get * @return the thresholds, null if metric does not produce any * @throws Exception if the metric/stat is unknown or a problem occurs */ public double[] getNamedMetricThresholds(String statName) throws Exception { Integer builtinIndex = m_builtin.get(statName.toLowerCase()); if (builtinIndex != null) { return null; // built-in metrics don not produce thresholds } else { AbstractEvaluationMetric m = m_pluginMetrics.get(statName.toLowerCase()); if (m == null) { throw new Exception("Unknown evaluation metric: " + statName); } if (m instanceof ThresholdProducingMetric) { return ((ThresholdProducingMetric)m).getThresholds(); } else { return null; } } } /** * Returns true if the named metric is maximisable * * @param statName the name of the metric/statistic to check * @return true if the metric in question is optimum at a maximal value * @throws Exception if a problem occurs */ public boolean metricIsMaximisable(String statName) throws Exception { Integer builtinIndex = m_builtin.get(statName.toLowerCase()); if (builtinIndex != null) { return builtInMetricIsMaximisable(builtinIndex.intValue()); } else { AbstractEvaluationMetric m = m_pluginMetrics.get(statName.toLowerCase()); if (m == null) { throw new Exception("Unknown evaluation metric: " + statName); } return pluginMetricIsMaximisable(m, statName); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/EvaluationUtils.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EvaluationUtils.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.util.ArrayList; import java.util.Random; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Contains utility functions for generating lists of predictions in various * manners. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class EvaluationUtils implements RevisionHandler { /** Seed used to randomize data in cross-validation */ private int m_Seed = 1; /** Sets the seed for randomization during cross-validation */ public void setSeed(int seed) { m_Seed = seed; } /** Gets the seed for randomization during cross-validation */ public int getSeed() { return m_Seed; } /** * Generate a bunch of predictions ready for processing, by performing a * cross-validation on the supplied dataset. * * @param classifier the Classifier to evaluate * @param data the dataset * @param numFolds the number of folds in the cross-validation. * @exception Exception if an error occurs */ public ArrayList<Prediction> getCVPredictions(Classifier classifier, Instances data, int numFolds) throws Exception { ArrayList<Prediction> predictions = new ArrayList<Prediction>(); Instances runInstances = new Instances(data); Random random = new Random(m_Seed); runInstances.randomize(random); if (runInstances.classAttribute().isNominal() && (numFolds > 1)) { runInstances.stratify(numFolds); } for (int fold = 0; fold < numFolds; fold++) { Instances train = runInstances.trainCV(numFolds, fold, random); Instances test = runInstances.testCV(numFolds, fold); ArrayList<Prediction> foldPred = getTrainTestPredictions(classifier, train, test); predictions.addAll(foldPred); } return predictions; } /** * Generate a bunch of predictions ready for processing, by performing a * evaluation on a test set after training on the given training set. * * @param classifier the Classifier to evaluate * @param train the training dataset * @param test the test dataset * @exception Exception if an error occurs */ public ArrayList<Prediction> getTrainTestPredictions(Classifier classifier, Instances train, Instances test) throws Exception { classifier.buildClassifier(train); return getTestPredictions(classifier, test); } /** * Generate a bunch of predictions ready for processing, by performing a * evaluation on a test set assuming the classifier is already trained. * * @param classifier the pre-trained Classifier to evaluate * @param test the test dataset * @exception Exception if an error occurs */ public ArrayList<Prediction> getTestPredictions(Classifier classifier, Instances test) throws Exception { ArrayList<Prediction> predictions = new ArrayList<Prediction>(); for (int i = 0; i < test.numInstances(); i++) { if (!test.instance(i).classIsMissing()) { predictions.add(getPrediction(classifier, test.instance(i))); } } return predictions; } /** * Generate a single prediction for a test instance given the pre-trained * classifier. * * @param classifier the pre-trained Classifier to evaluate * @param test the test instance * @exception Exception if an error occurs */ public Prediction getPrediction(Classifier classifier, Instance test) throws Exception { double actual = test.classValue(); double[] dist = classifier.distributionForInstance(test); if (test.classAttribute().isNominal()) { return new NominalPrediction(actual, dist, test.weight()); } else { return new NumericPrediction(actual, dist[0], test.weight()); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/InformationRetrievalEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InformationRetrievalEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.Instance; /** * An interface for information retrieval evaluation metrics to implement. * Allows the command line interface to display these metrics or not based on * user-supplied options. These statistics will be displayed as new columns in * the table of information retrieval statistics. As such, a toSummaryString() * formatted representation is not required. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public interface InformationRetrievalEvaluationMetric { /** * Updates the statistics about a classifiers performance for the current test * instance. Implementers need only implement this method if it is not * possible to compute their statistics from what is stored in the base * Evaluation object. * * @param predictedDistribution the probabilities assigned to each class * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForClassifier(double[] predictedDistribution, Instance instance) throws Exception; /** * Get the value of the named statistic for the given class index. * * If the implementing class is extending AbstractEvaluationMetric then the * implementation of getStatistic(String statName) should just call this * method with a classIndex of 0. * * @param statName the name of the statistic to compute the value for * @param classIndex the class index for which to compute the statistic * @return the value of the named statistic for the given class index or * Utils.missingValue() if the statistic can't be computed for some * reason */ double getStatistic(String statName, int classIndex); /** * Get the weighted (by class) average for this statistic. * * @param statName the name of the statistic to compute * @return the weighted (by class) average value of the statistic or * Utils.missingValue() if this can't be computed (or isn't * appropriate). */ double getClassWeightedAverageStatistic(String statName); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/InformationTheoreticEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InformationTheoreticEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.classifiers.ConditionalDensityEstimator; import weka.core.Instance; /** * Primarily a marker interface for information theoretic evaluation metrics to * implement. Allows the command line interface to display these metrics or not * based on user-supplied options * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public interface InformationTheoreticEvaluationMetric { /** * Updates the statistics about a classifiers performance for the current test * instance. Gets called when the class is nominal. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedDistribution the probabilities assigned to each class * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForClassifier(double[] predictedDistribution, Instance instance) throws Exception; /** * Updates the statistics about a predictors performance for the current test * instance. Gets called when the class is numeric. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedValue the numeric value the classifier predicts * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForPredictor(double predictedValue, Instance instance) throws Exception; /** * Updates stats for conditional density estimator based on current test * instance. Gets called when the class is numeric and the classifier is a * ConditionalDensityEstimators. Implementers need only implement this method * if it is not possible to compute their statistics from what is stored in * the base Evaluation object. * * @param classifier the conditional density estimator * @param classMissing the instance for which density is to be computed, * without a class value * @param classValue the class value of this instance * @throws Exception if density could not be computed successfully */ void updateStatsForConditionalDensityEstimator( ConditionalDensityEstimator classifier, Instance classMissing, double classValue) throws Exception; /** * Return a formatted string (suitable for displaying in console or GUI * output) containing all the statistics that this metric computes. * * @return a formatted string containing all the computed statistics */ String toSummaryString(); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/IntervalBasedEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IntervalBasedEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.classifiers.IntervalEstimator; import weka.core.Instance; /** * Primarily a marker interface for interval-based evaluation metrics to * implement. Allows the command line interface to display these metrics or not * based on user-supplied options * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public interface IntervalBasedEvaluationMetric { /** * Updates stats for interval estimator based on current test instance. * Implementers need only implement this method if it is not possible to * compute their statistics from what is stored in the base Evaluation object. * * @param classifier the interval estimator * @param classMissing the instance for which the intervals are computed, * without a class value * @param classValue the class value of this instance * @throws Exception if intervals could not be computed successfully */ void updateStatsForIntervalEstimator(IntervalEstimator classifier, Instance classMissing, double classValue) throws Exception; /** * Return a formatted string (suitable for displaying in console or GUI * output) containing all the statistics that this metric computes. * * @return a formatted string containing all the computed statistics */ String toSummaryString(); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/MarginCurve.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MarginCurve.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.util.ArrayList; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Generates points illustrating the prediction margin. The margin is defined as * the difference between the probability predicted for the actual class and the * highest probability predicted for the other classes. One hypothesis as to the * good performance of boosting algorithms is that they increaes the margins on * the training data and this gives better performance on test data. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class MarginCurve implements RevisionHandler { /** * Calculates the cumulative margin distribution for the set of predictions, * returning the result as a set of Instances. The structure of these * Instances is as follows: * <p> * <ul> * <li><b>Margin</b> contains the margin value (which should be plotted as an * x-coordinate) * <li><b>Current</b> contains the count of instances with the current margin * (plot as y axis) * <li><b>Cumulative</b> contains the count of instances with margin less than * or equal to the current margin (plot as y axis) * </ul> * <p> * * @return datapoints as a set of instances, null if no predictions have been * made. * @throws InterruptedException */ public Instances getCurve(final ArrayList<Prediction> predictions) throws InterruptedException { if (predictions.size() == 0) { return null; } Instances insts = this.makeHeader(); double[] margins = this.getMargins(predictions); int[] sorted = Utils.sort(margins); int binMargin = 0; int totalMargin = 0; insts.add(this.makeInstance(-1, binMargin, totalMargin)); for (int element : sorted) { double current = margins[element]; double weight = ((NominalPrediction) predictions.get(element)).weight(); totalMargin += weight; binMargin += weight; if (true) { insts.add(this.makeInstance(current, binMargin, totalMargin)); binMargin = 0; } } return insts; } /** * Pulls all the margin values out of a vector of NominalPredictions. * * @param predictions a FastVector containing NominalPredictions * @return an array of margin values. */ private double[] getMargins(final ArrayList<Prediction> predictions) { // sort by predicted probability of the desired class. double[] margins = new double[predictions.size()]; for (int i = 0; i < margins.length; i++) { NominalPrediction pred = (NominalPrediction) predictions.get(i); margins[i] = pred.margin(); } return margins; } /** * Creates an Instances object with the attributes we will be calculating. * * @return the Instances structure. */ private Instances makeHeader() { ArrayList<Attribute> fv = new ArrayList<Attribute>(); fv.add(new Attribute("Margin")); fv.add(new Attribute("Current")); fv.add(new Attribute("Cumulative")); return new Instances("MarginCurve", fv, 100); } /** * Creates an Instance object with the attributes calculated. * * @param margin the margin for this data point. * @param current the number of instances with this margin. * @param cumulative the number of instances with margin less than or equal to * this margin. * @return the Instance object. */ private Instance makeInstance(final double margin, final int current, final int cumulative) { int count = 0; double[] vals = new double[3]; vals[count++] = margin; vals[count++] = current; vals[count++] = cumulative; return new DenseInstance(1.0, vals); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Tests the MarginCurve generation from the command line. The classifier is * currently hardcoded. Pipe in an arff file. * * @param args currently ignored */ public static void main(final String[] args) { try { Utils.SMALL = 0; Instances inst = new Instances(new java.io.InputStreamReader(System.in)); inst.setClassIndex(inst.numAttributes() - 1); MarginCurve tc = new MarginCurve(); EvaluationUtils eu = new EvaluationUtils(); weka.classifiers.meta.LogitBoost classifier = new weka.classifiers.meta.LogitBoost(); classifier.setNumIterations(20); ArrayList<Prediction> predictions = eu.getTrainTestPredictions(classifier, inst, inst); Instances result = tc.getCurve(predictions); System.out.println(result); } catch (Exception ex) { ex.printStackTrace(); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/NominalPrediction.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NominalPrediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Encapsulates an evaluatable nominal prediction: the predicted probability * distribution plus the actual class value. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class NominalPrediction implements Prediction, Serializable, RevisionHandler { /** * Remove this if you change this class so that serialization would be * affected. */ static final long serialVersionUID = -8871333992740492788L; /** The predicted probabilities */ private double [] m_Distribution; /** The actual class value */ private double m_Actual = MISSING_VALUE; /** The predicted class value */ private double m_Predicted = MISSING_VALUE; /** The weight assigned to this prediction */ private double m_Weight = 1; /** * Creates the NominalPrediction object with a default weight of 1.0. * * @param actual the actual value, or MISSING_VALUE. * @param distribution the predicted probability distribution. Use * NominalPrediction.makeDistribution() if you only know the predicted value. */ public NominalPrediction(double actual, double [] distribution) { this(actual, distribution, 1); } /** * Creates the NominalPrediction object. * * @param actual the actual value, or MISSING_VALUE. * @param distribution the predicted probability distribution. Use * NominalPrediction.makeDistribution() if you only know the predicted value. * @param weight the weight assigned to the prediction. */ public NominalPrediction(double actual, double [] distribution, double weight) { if (distribution == null) { throw new NullPointerException("Null distribution in NominalPrediction."); } m_Actual = actual; m_Distribution = distribution.clone(); m_Weight = weight; updatePredicted(); } /** * Gets the predicted probabilities * * @return the predicted probabilities */ public double [] distribution() { return m_Distribution; } /** * Gets the actual class value. * * @return the actual class value, or MISSING_VALUE if no * prediction was made. */ public double actual() { return m_Actual; } /** * Gets the predicted class value. * * @return the predicted class value, or MISSING_VALUE if no * prediction was made. */ public double predicted() { return m_Predicted; } /** * Gets the weight assigned to this prediction. This is typically the weight * of the test instance the prediction was made for. * * @return the weight assigned to this prediction. */ public double weight() { return m_Weight; } /** * Calculates the prediction margin. This is defined as the difference * between the probability predicted for the actual class and the highest * predicted probability of the other classes. * * @return the margin for this prediction, or * MISSING_VALUE if either the actual or predicted value * is missing. */ public double margin() { if ((m_Actual == MISSING_VALUE) || (m_Predicted == MISSING_VALUE)) { return MISSING_VALUE; } double probActual = m_Distribution[(int)m_Actual]; double probNext = 0; for(int i = 0; i < m_Distribution.length; i++) if ((i != m_Actual) && (m_Distribution[i] > probNext)) probNext = m_Distribution[i]; return probActual - probNext; } /** * Convert a single prediction into a probability distribution * with all zero probabilities except the predicted value which * has probability 1.0. If no prediction was made, all probabilities * are zero. * * @param predictedClass the index of the predicted class, or * MISSING_VALUE if no prediction was made. * @param numClasses the number of possible classes for this nominal * prediction. * @return the probability distribution. */ public static double [] makeDistribution(double predictedClass, int numClasses) { double [] dist = new double [numClasses]; if (predictedClass == MISSING_VALUE) { return dist; } dist[(int)predictedClass] = 1.0; return dist; } /** * Creates a uniform probability distribution -- where each of the * possible classes is assigned equal probability. * * @param numClasses the number of possible classes for this nominal * prediction. * @return the probability distribution. */ public static double [] makeUniformDistribution(int numClasses) { double [] dist = new double [numClasses]; for (int i = 0; i < numClasses; i++) { dist[i] = 1.0 / numClasses; } return dist; } /** * Determines the predicted class (doesn't detect multiple * classifications). If no prediction was made (i.e. all zero * probababilities in the distribution), m_Prediction is set to * MISSING_VALUE. */ private void updatePredicted() { int predictedClass = -1; double bestProb = 0.0; for(int i = 0; i < m_Distribution.length; i++) { if (m_Distribution[i] > bestProb) { predictedClass = i; bestProb = m_Distribution[i]; } } if (predictedClass != -1) { m_Predicted = predictedClass; } else { m_Predicted = MISSING_VALUE; } } /** * Gets a human readable representation of this prediction. * * @return a human readable representation of this prediction. */ public String toString() { StringBuffer sb = new StringBuffer(); sb.append("NOM: ").append(actual()).append(" ").append(predicted()); sb.append(' ').append(weight()); double [] dist = distribution(); for (int i = 0; i < dist.length; i++) { sb.append(' ').append(dist[i]); } return sb.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/NumericPrediction.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NumericPrediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.io.Serializable; import weka.classifiers.IntervalEstimator; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Encapsulates an evaluatable numeric prediction: the predicted class value * plus the actual class value. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class NumericPrediction implements Prediction, Serializable, RevisionHandler { /** for serialization. */ private static final long serialVersionUID = -4880216423674233887L; /** The actual class value. */ private double m_Actual = MISSING_VALUE; /** The predicted class value. */ private double m_Predicted = MISSING_VALUE; /** The weight assigned to this prediction. */ private double m_Weight = 1; /** the prediction intervals. */ private double[][] m_PredictionIntervals; /** * Creates the NumericPrediction object with a default weight of 1.0. * * @param actual the actual value, or MISSING_VALUE. * @param predicted the predicted value, or MISSING_VALUE. */ public NumericPrediction(double actual, double predicted) { this(actual, predicted, 1); } /** * Creates the NumericPrediction object. * * @param actual the actual value, or MISSING_VALUE. * @param predicted the predicted value, or MISSING_VALUE. * @param weight the weight assigned to the prediction. */ public NumericPrediction(double actual, double predicted, double weight) { this(actual, predicted, weight, new double[0][]); } /** * Creates the NumericPrediction object. * * @param actual the actual value, or MISSING_VALUE. * @param predicted the predicted value, or MISSING_VALUE. * @param weight the weight assigned to the prediction. * @param predInt the prediction intervals from classifiers implementing * the <code>IntervalEstimator</code> interface. * @see IntervalEstimator */ public NumericPrediction(double actual, double predicted, double weight, double[][] predInt) { m_Actual = actual; m_Predicted = predicted; m_Weight = weight; setPredictionIntervals(predInt); } /** * Gets the actual class value. * * @return the actual class value, or MISSING_VALUE if no * prediction was made. */ public double actual() { return m_Actual; } /** * Gets the predicted class value. * * @return the predicted class value, or MISSING_VALUE if no * prediction was made. */ public double predicted() { return m_Predicted; } /** * Gets the weight assigned to this prediction. This is typically the weight * of the test instance the prediction was made for. * * @return the weight assigned to this prediction. */ public double weight() { return m_Weight; } /** * Calculates the prediction error. This is defined as the predicted * value minus the actual value. * * @return the error for this prediction, or * MISSING_VALUE if either the actual or predicted value * is missing. */ public double error() { if ((m_Actual == MISSING_VALUE) || (m_Predicted == MISSING_VALUE)) { return MISSING_VALUE; } return m_Predicted - m_Actual; } /** * Sets the prediction intervals for this prediction. * * @param predInt the prediction intervals */ public void setPredictionIntervals(double[][] predInt) { m_PredictionIntervals = predInt.clone(); } /** * Returns the predictions intervals. Only classifiers implementing the * <code>IntervalEstimator</code> interface. * * @return the prediction intervals. * @see IntervalEstimator */ public double[][] predictionIntervals() { return m_PredictionIntervals; } /** * Gets a human readable representation of this prediction. * * @return a human readable representation of this prediction. */ public String toString() { StringBuffer sb = new StringBuffer(); sb.append("NUM: ").append(actual()).append(' ').append(predicted()); sb.append(' ').append(weight()); return sb.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/Prediction.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Prediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; /** * Encapsulates a single evaluatable prediction: the predicted value plus the * actual class value. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public interface Prediction { /** * Constant representing a missing value. This should have the same value * as weka.core.Instance.MISSING_VALUE */ double MISSING_VALUE = weka.core.Utils.missingValue(); /** * Gets the weight assigned to this prediction. This is typically the weight * of the test instance the prediction was made for. * * @return the weight assigned to this prediction. */ double weight(); /** * Gets the actual class value. * * @return the actual class value, or MISSING_VALUE if no * prediction was made. */ double actual(); /** * Gets the predicted class value. * * @return the predicted class value, or MISSING_VALUE if no * prediction was made. */ double predicted(); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/RegressionAnalysis.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegressionAnalysis.java * Copyright (C) 1999-2013 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.Attribute; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.matrix.Matrix; /** * Analyzes linear regression model by using the Student's t-test on each * coefficient. Also calculates R^2 value and F-test value. * * More information: http://en.wikipedia.org/wiki/Student's_t-test * http://en.wikipedia.org/wiki/Linear_regression * http://en.wikipedia.org/wiki/Ordinary_least_squares * * @author Chris Meyer: cmeyer@udel.edu University of Delaware, Newark, DE, USA * CISC 612: Design extension implementation * @version $Revision: $ */ public class RegressionAnalysis { /** * Returns the sum of squared residuals of the simple linear regression model: * y = a + bx. * * @param data (the data set) * @param chosen (chosen x-attribute) * @param slope (slope determined by simple linear regression model) * @param intercept (intercept determined by simple linear regression model) * * @return sum of squared residuals * @throws Exception if there is a missing class value in data */ public static double calculateSSR(Instances data, Attribute chosen, double slope, double intercept) throws Exception { double ssr = 0.0; for (int i = 0; i < data.numInstances(); i++) { double yHat = slope * data.instance(i).value(chosen) + intercept; double resid = data.instance(i).value(data.classIndex()) - yHat; ssr += resid * resid; } return ssr; } /** * Returns the R-squared value for a linear regression model, where sum of * squared residuals is already calculated. This works for either a simple or * a multiple linear regression model. * * @param data (the data set) * @param ssr (sum of squared residuals) * @return R^2 value * @throws Exception if there is a missing class value in data */ public static double calculateRSquared(Instances data, double ssr) throws Exception { // calculate total sum of squares (derivation of y from mean) double yMean = data.meanOrMode(data.classIndex()); double tss = 0.0; for (int i = 0; i < data.numInstances(); i++) { tss += (data.instance(i).value(data.classIndex()) - yMean) * (data.instance(i).value(data.classIndex()) - yMean); } // calculate R-squared value and return double rsq = 1 - (ssr / tss); return rsq; } /** * Returns the adjusted R-squared value for a linear regression model. This * works for either a simple or a multiple linear regression model. * * @param rsq (the model's R-squared value) * @param n (the number of instances in the data) * @param k (the number of coefficients in the model: k>=2) * @return the adjusted R squared value */ public static double calculateAdjRSquared(double rsq, int n, int k) { if (n < 1 || k < 2 || n == k) { System.err.println("Cannot calculate Adjusted R^2."); return Double.NaN; } return 1 - ((1 - rsq) * (n - 1) / (n - k)); } /** * Returns the F-statistic for a linear regression model. * * @param rsq (the model's R-squared value) * @param n (the number of instances in the data) * @param k (the number of coefficients in the model: k>=2) * @return F-statistic */ public static double calculateFStat(double rsq, int n, int k) { if (n < 1 || k < 2 || n == k) { System.err.println("Cannot calculate F-stat."); return Double.NaN; } double numerator = rsq / (k - 1); double denominator = (1 - rsq) / (n - k); return numerator / denominator; } /** * Returns the standard errors of slope and intercept for a simple linear * regression model: y = a + bx. The first element is the standard error of * slope, the second element is standard error of intercept. * * @param data (the data set) * @param chosen (chosen x-attribute) * @param slope (slope determined by simple linear regression model) * @param intercept (intercept determined by simple linear regression model) * @param df (number of instances - 2) * * @return array of standard errors of slope and intercept * @throws Exception if there is a missing class value in data */ public static double[] calculateStdErrorOfCoef(Instances data, Attribute chosen, double slope, double intercept, int df) throws Exception { // calculate sum of squared residuals, mean squared error double ssr = calculateSSR(data, chosen, slope, intercept); double mse = ssr / df; /* * put data into 2-D array with 2 columns first column is value of chosen * attribute second column is constant (1's) */ double[][] array = new double[data.numInstances()][2]; for (int i = 0; i < data.numInstances(); i++) { array[i][0] = data.instance(i).value(chosen); array[i][1] = 1.0; } /* * linear algebra calculation: covariance matrix = mse * (XtX)^-1 diagonal * of covariance matrix is square of standard error of coefficients */ Matrix X = new Matrix(array); Matrix Xt = X.transpose(); Matrix XtX = Xt.times(X); Matrix inverse = XtX.inverse(); Matrix cov = inverse.times(mse); double[] result = new double[2]; for (int i = 0; i < 2; i++) { result[i] = Math.sqrt(cov.get(i, i)); } return result; } /** * Returns an array of the standard errors of the coefficients in a multiple * linear regression. The last element in the array is the standard error of * the constant coefficient. The standard error array is used to calculate the * t-statistics. * * @param data (the data set * @param selected (flags indicating variables used in the regression) * @param ssr (sum of squared residuals) * @param n (number of instances) * @param k (number of coefficients; includes constant) * * @return array of standard errors of coefficients * @throws Exception if there is a missing class value in data */ public static double[] calculateStdErrorOfCoef(Instances data, boolean[] selected, double ssr, int n, int k) throws Exception { // Construct a matrix to hold X variables double[][] array = new double[n][k]; // put data into 2-D array format int column = 0; for (int j = 0; j < data.numAttributes(); j++) { if ((data.classIndex() != j) && (selected[j])) { for (int i = 0; i < n; i++) { array[i][column] = data.instance(i).value(j); } column++; } } // last column in array is constant (1's) for (int i = 0; i < n; i++) { array[i][k - 1] = 1.0; } /* * linear algebra calculation: covariance matrix = mse * (XtX)^-1 diagonal * of covariance matrix is square of standard error of coefficients */ Matrix X = new Matrix(array); Matrix Xt = X.transpose(); Matrix XtX = Xt.times(X); Matrix inverse = XtX.inverse(); double mse = ssr / (n - k); Matrix cov = inverse.times(mse); double[] result = new double[k]; for (int i = 0; i < k; i++) { result[i] = Math.sqrt(cov.get(i, i)); } return result; } /** * Returns an array of the t-statistic of each coefficient in a multiple * linear regression model. * * @param coef (array holding the value of each coefficient) * @param stderror (array holding each coefficient's standard error) * @param k (number of coefficients, includes constant) * @return array of t-statistics of coefficients */ public static double[] calculateTStats(double[] coef, double[] stderror, int k) { double[] result = new double[k]; for (int i = 0; i < k; i++) { result[i] = coef[i] / stderror[i]; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: ? $"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/StandardEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * StandardEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.Instance; /** * Primarily a marker interface for a "standard" evaluation metric - i.e. one * that would be part of the normal output in Weka without having to turn * specific display options. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision$ */ public interface StandardEvaluationMetric { /** * Return a formatted string (suitable for displaying in console or GUI * output) containing all the statistics that this metric computes. * * @return a formatted string containing all the computed statistics */ String toSummaryString(); /** * Updates the statistics about a classifiers performance for the current test * instance. Gets called when the class is nominal. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedDistribution the probabilities assigned to each class * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForClassifier(double[] predictedDistribution, Instance instance) throws Exception; /** * Updates the statistics about a predictors performance for the current test * instance. Gets called when the class is numeric. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedValue the numeric value the classifier predicts * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForPredictor(double predictedValue, Instance instance) throws Exception; }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/ThresholdCurve.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ThresholdCurve.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.util.ArrayList; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Generates points illustrating prediction tradeoffs that can be obtained by * varying the threshold value between classes. For example, the typical * threshold value of 0.5 means the predicted probability of "positive" must be * higher than 0.5 for the instance to be predicted as "positive". The resulting * dataset can be used to visualize precision/recall tradeoff, or for ROC curve * analysis (true positive rate vs false positive rate). Weka just varies the * threshold on the class probability estimates in each case. The Mann Whitney * statistic is used to calculate the AUC. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class ThresholdCurve implements RevisionHandler { /** The name of the relation used in threshold curve datasets */ public static final String RELATION_NAME = "ThresholdCurve"; /** attribute name: True Positives */ public static final String TRUE_POS_NAME = "True Positives"; /** attribute name: False Negatives */ public static final String FALSE_NEG_NAME = "False Negatives"; /** attribute name: False Positives */ public static final String FALSE_POS_NAME = "False Positives"; /** attribute name: True Negatives */ public static final String TRUE_NEG_NAME = "True Negatives"; /** attribute name: False Positive Rate" */ public static final String FP_RATE_NAME = "False Positive Rate"; /** attribute name: True Positive Rate */ public static final String TP_RATE_NAME = "True Positive Rate"; /** attribute name: Precision */ public static final String PRECISION_NAME = "Precision"; /** attribute name: Recall */ public static final String RECALL_NAME = "Recall"; /** attribute name: Fallout */ public static final String FALLOUT_NAME = "Fallout"; /** attribute name: FMeasure */ public static final String FMEASURE_NAME = "FMeasure"; /** attribute name: Sample Size */ public static final String SAMPLE_SIZE_NAME = "Sample Size"; /** attribute name: Lift */ public static final String LIFT_NAME = "Lift"; /** attribute name: Threshold */ public static final String THRESHOLD_NAME = "Threshold"; /** * Calculates the performance stats for the default class and return results * as a set of Instances. The structure of these Instances is as follows: * <p> * <ul> * <li><b>True Positives </b> * <li><b>False Negatives</b> * <li><b>False Positives</b> * <li><b>True Negatives</b> * <li><b>False Positive Rate</b> * <li><b>True Positive Rate</b> * <li><b>Precision</b> * <li><b>Recall</b> * <li><b>Fallout</b> * <li><b>Threshold</b> contains the probability threshold that gives rise to * the previous performance values. * </ul> * <p> * For the definitions of these measures, see TwoClassStats * <p> * * @see TwoClassStats * @param predictions the predictions to base the curve on * @return datapoints as a set of instances, null if no predictions have been * made. * @throws InterruptedException */ public Instances getCurve(final ArrayList<Prediction> predictions) throws InterruptedException { if (predictions.size() == 0) { return null; } return this.getCurve(predictions, ((NominalPrediction) predictions.get(0)).distribution().length - 1); } /** * Calculates the performance stats for the desired class and return results * as a set of Instances. * * @param predictions the predictions to base the curve on * @param classIndex index of the class of interest. * @return datapoints as a set of instances. * @throws InterruptedException */ public Instances getCurve(final ArrayList<Prediction> predictions, final int classIndex) throws InterruptedException { if ((predictions.size() == 0) || (((NominalPrediction) predictions.get(0)).distribution().length <= classIndex)) { return null; } double totPos = 0, totNeg = 0; double[] probs = this.getProbabilities(predictions, classIndex); // Get distribution of positive/negatives for (int i = 0; i < probs.length; i++) { NominalPrediction pred = (NominalPrediction) predictions.get(i); if (pred.actual() == Prediction.MISSING_VALUE) { System.err.println(this.getClass().getName() + " Skipping prediction with missing class value"); continue; } if (pred.weight() < 0) { System.err.println(this.getClass().getName() + " Skipping prediction with negative weight"); continue; } if (pred.actual() == classIndex) { totPos += pred.weight(); } else { totNeg += pred.weight(); } } Instances insts = this.makeHeader(); int[] sorted = Utils.sort(probs); TwoClassStats tc = new TwoClassStats(totPos, totNeg, 0, 0); double threshold = 0; double cumulativePos = 0; double cumulativeNeg = 0; for (int i = 0; i < sorted.length; i++) { if ((i == 0) || (probs[sorted[i]] > threshold)) { tc.setTruePositive(tc.getTruePositive() - cumulativePos); tc.setFalseNegative(tc.getFalseNegative() + cumulativePos); tc.setFalsePositive(tc.getFalsePositive() - cumulativeNeg); tc.setTrueNegative(tc.getTrueNegative() + cumulativeNeg); threshold = probs[sorted[i]]; insts.add(this.makeInstance(tc, threshold)); cumulativePos = 0; cumulativeNeg = 0; if (i == sorted.length - 1) { break; } } NominalPrediction pred = (NominalPrediction) predictions.get(sorted[i]); if (pred.actual() == Prediction.MISSING_VALUE) { System.err.println(this.getClass().getName() + " Skipping prediction with missing class value"); continue; } if (pred.weight() < 0) { System.err.println(this.getClass().getName() + " Skipping prediction with negative weight"); continue; } if (pred.actual() == classIndex) { cumulativePos += pred.weight(); } else { cumulativeNeg += pred.weight(); } /* * System.out.println(tc + " " + probs[sorted[i]] + " " + (pred.actual() * == classIndex)); */ /* * if ((i != (sorted.length - 1)) && ((i == 0) || (probs[sorted[i]] != * probs[sorted[i - 1]]))) { insts.add(makeInstance(tc, * probs[sorted[i]])); } */ } // make sure a zero point gets into the curve if (tc.getFalseNegative() != totPos || tc.getTrueNegative() != totNeg) { tc = new TwoClassStats(0, 0, totNeg, totPos); threshold = probs[sorted[sorted.length - 1]] + 10e-6; insts.add(this.makeInstance(tc, threshold)); } return insts; } /** * Calculates the n point precision result, which is the precision averaged * over n evenly spaced (w.r.t recall) samples of the curve. * * @param tcurve a previously extracted threshold curve Instances. * @param n the number of points to average over. * @return the n-point precision. * @throws InterruptedException */ public static double getNPointPrecision(final Instances tcurve, final int n) throws InterruptedException { if (!RELATION_NAME.equals(tcurve.relationName()) || (tcurve.numInstances() == 0)) { return Double.NaN; } int recallInd = tcurve.attribute(RECALL_NAME).index(); int precisInd = tcurve.attribute(PRECISION_NAME).index(); double[] recallVals = tcurve.attributeToDoubleArray(recallInd); int[] sorted = Utils.sort(recallVals); double isize = 1.0 / (n - 1); double psum = 0; for (int i = 0; i < n; i++) { int pos = binarySearch(sorted, recallVals, i * isize); double recall = recallVals[sorted[pos]]; double precis = tcurve.instance(sorted[pos]).value(precisInd); /* * System.err.println("Point " + (i + 1) + ": i=" + pos + " r=" + (i * * isize) + " p'=" + precis + " r'=" + recall); */ // interpolate figures for non-endpoints while ((pos != 0) && (pos < sorted.length - 1)) { pos++; double recall2 = recallVals[sorted[pos]]; if (recall2 != recall) { double precis2 = tcurve.instance(sorted[pos]).value(precisInd); double slope = (precis2 - precis) / (recall2 - recall); double offset = precis - recall * slope; precis = isize * i * slope + offset; /* * System.err.println("Point2 " + (i + 1) + ": i=" + pos + " r=" + (i * * isize) + " p'=" + precis2 + " r'=" + recall2 + " p''=" + precis); */ break; } } psum += precis; } return psum / n; } /** * Calculates the area under the precision-recall curve (AUPRC). * * @param tcurve a previously extracted threshold curve Instances. * @return the PRC area, or Double.NaN if you don't pass in a ThresholdCurve * generated Instances. */ public static double getPRCArea(final Instances tcurve) { final int n = tcurve.numInstances(); if (!RELATION_NAME.equals(tcurve.relationName()) || (n == 0)) { return Double.NaN; } final int pInd = tcurve.attribute(PRECISION_NAME).index(); final int rInd = tcurve.attribute(RECALL_NAME).index(); final double[] pVals = tcurve.attributeToDoubleArray(pInd); final double[] rVals = tcurve.attributeToDoubleArray(rInd); double area = 0; double xlast = rVals[n - 1]; // start from the first real p/r pair (not the artificial zero point) for (int i = n - 2; i >= 0; i--) { double recallDelta = rVals[i] - xlast; area += (pVals[i] * recallDelta); xlast = rVals[i]; } if (area == 0) { return Utils.missingValue(); } return area; } /** * Calculates the area under the ROC curve as the Wilcoxon-Mann-Whitney * statistic. * * @param tcurve a previously extracted threshold curve Instances. * @return the ROC area, or Double.NaN if you don't pass in a ThresholdCurve * generated Instances. */ public static double getROCArea(final Instances tcurve) { final int n = tcurve.numInstances(); if (!RELATION_NAME.equals(tcurve.relationName()) || (n == 0)) { return Double.NaN; } final int tpInd = tcurve.attribute(TRUE_POS_NAME).index(); final int fpInd = tcurve.attribute(FALSE_POS_NAME).index(); final double[] tpVals = tcurve.attributeToDoubleArray(tpInd); final double[] fpVals = tcurve.attributeToDoubleArray(fpInd); double area = 0.0, cumNeg = 0.0; final double totalPos = tpVals[0]; final double totalNeg = fpVals[0]; for (int i = 0; i < n; i++) { double cip, cin; if (i < n - 1) { cip = tpVals[i] - tpVals[i + 1]; cin = fpVals[i] - fpVals[i + 1]; } else { cip = tpVals[n - 1]; cin = fpVals[n - 1]; } area += cip * (cumNeg + (0.5 * cin)); cumNeg += cin; } area /= (totalNeg * totalPos); return area; } /** * Gets the index of the instance with the closest threshold value to the * desired target * * @param tcurve a set of instances that have been generated by this class * @param threshold the target threshold * @return the index of the instance that has threshold closest to the target, * or -1 if this could not be found (i.e. no data, or bad threshold * target) * @throws InterruptedException */ public static int getThresholdInstance(final Instances tcurve, final double threshold) throws InterruptedException { if (!RELATION_NAME.equals(tcurve.relationName()) || (tcurve.numInstances() == 0) || (threshold < 0) || (threshold > 1.0)) { return -1; } if (tcurve.numInstances() == 1) { return 0; } double[] tvals = tcurve.attributeToDoubleArray(tcurve.numAttributes() - 1); int[] sorted = Utils.sort(tvals); return binarySearch(sorted, tvals, threshold); } /** * performs a binary search * * @param index the indices * @param vals the values * @param target the target to look for * @return the index of the target */ private static int binarySearch(final int[] index, final double[] vals, final double target) { int lo = 0, hi = index.length - 1; while (hi - lo > 1) { int mid = lo + (hi - lo) / 2; double midval = vals[index[mid]]; if (target > midval) { lo = mid; } else if (target < midval) { hi = mid; } else { while ((mid > 0) && (vals[index[mid - 1]] == target)) { mid--; } return mid; } } return lo; } /** * * @param predictions the predictions to use * @param classIndex the class index * @return the probabilities */ private double[] getProbabilities(final ArrayList<Prediction> predictions, final int classIndex) { // sort by predicted probability of the desired class. double[] probs = new double[predictions.size()]; for (int i = 0; i < probs.length; i++) { NominalPrediction pred = (NominalPrediction) predictions.get(i); probs[i] = pred.distribution()[classIndex]; } return probs; } /** * generates the header * * @return the header */ private Instances makeHeader() { ArrayList<Attribute> fv = new ArrayList<Attribute>(); fv.add(new Attribute(TRUE_POS_NAME)); fv.add(new Attribute(FALSE_NEG_NAME)); fv.add(new Attribute(FALSE_POS_NAME)); fv.add(new Attribute(TRUE_NEG_NAME)); fv.add(new Attribute(FP_RATE_NAME)); fv.add(new Attribute(TP_RATE_NAME)); fv.add(new Attribute(PRECISION_NAME)); fv.add(new Attribute(RECALL_NAME)); fv.add(new Attribute(FALLOUT_NAME)); fv.add(new Attribute(FMEASURE_NAME)); fv.add(new Attribute(SAMPLE_SIZE_NAME)); fv.add(new Attribute(LIFT_NAME)); fv.add(new Attribute(THRESHOLD_NAME)); return new Instances(RELATION_NAME, fv, 100); } /** * generates an instance out of the given data * * @param tc the statistics * @param prob the probability * @return the generated instance */ private Instance makeInstance(final TwoClassStats tc, final double prob) { int count = 0; double[] vals = new double[13]; vals[count++] = tc.getTruePositive(); vals[count++] = tc.getFalseNegative(); vals[count++] = tc.getFalsePositive(); vals[count++] = tc.getTrueNegative(); vals[count++] = tc.getFalsePositiveRate(); vals[count++] = tc.getTruePositiveRate(); vals[count++] = tc.getPrecision(); vals[count++] = tc.getRecall(); vals[count++] = tc.getFallout(); vals[count++] = tc.getFMeasure(); double ss = (tc.getTruePositive() + tc.getFalsePositive()) / (tc.getTruePositive() + tc.getFalsePositive() + tc.getTrueNegative() + tc.getFalseNegative()); vals[count++] = ss; double expectedByChance = (ss * (tc.getTruePositive() + tc.getFalseNegative())); if (expectedByChance < 1) { vals[count++] = Utils.missingValue(); } else { vals[count++] = tc.getTruePositive() / expectedByChance; } vals[count++] = prob; return new DenseInstance(1.0, vals); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } /** * Tests the ThresholdCurve generation from the command line. The classifier * is currently hardcoded. Pipe in an arff file. * * @param args currently ignored */ public static void main(final String[] args) { try { Instances inst = new Instances(new java.io.InputStreamReader(System.in)); if (0 != Math.log(1)) { // false System.out.println(ThresholdCurve.getNPointPrecision(inst, 11)); } else if (3 != 1 + 1) { // true inst.setClassIndex(inst.numAttributes() - 1); ThresholdCurve tc = new ThresholdCurve(); EvaluationUtils eu = new EvaluationUtils(); Classifier classifier = new weka.classifiers.functions.Logistic(); ArrayList<Prediction> predictions = new ArrayList<Prediction>(); for (int i = 0; i < 2; i++) { // Do two runs. eu.setSeed(i); predictions.addAll(eu.getCVPredictions(classifier, inst, 10)); // System.out.println("\n\n\n"); } Instances result = tc.getCurve(predictions); System.out.println(result); } } catch (Exception ex) { ex.printStackTrace(); } } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/ThresholdProducingMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ThresholdProducingMetric.java * Copyright (C) 2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; /** * Some evaluation measures may optimize thresholds on the * class probabilities. These measures should implement this interface. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface ThresholdProducingMetric { /** * Returns the threshold values, one for each class value, associated with the value * of the measure that is returned. * * @return thresholds */ public double[] getThresholds(); }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/TwoClassStats.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TwoClassStats.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Encapsulates performance functions for two-class problems. * * @author Len Trigg (len@reeltwo.com) * @version $Revision$ */ public class TwoClassStats implements RevisionHandler { /** The names used when converting this object to a confusion matrix */ private static final String[] CATEGORY_NAMES = { "negative", "positive" }; /** Pos predicted as pos */ private double m_TruePos; /** Neg predicted as pos */ private double m_FalsePos; /** Neg predicted as neg */ private double m_TrueNeg; /** Pos predicted as neg */ private double m_FalseNeg; /** * Creates the TwoClassStats with the given initial performance values. * * @param tp the number of correctly classified positives * @param fp the number of incorrectly classified negatives * @param tn the number of correctly classified negatives * @param fn the number of incorrectly classified positives */ public TwoClassStats(double tp, double fp, double tn, double fn) { setTruePositive(tp); setFalsePositive(fp); setTrueNegative(tn); setFalseNegative(fn); } /** Sets the number of positive instances predicted as positive */ public void setTruePositive(double tp) { m_TruePos = tp; } /** Sets the number of negative instances predicted as positive */ public void setFalsePositive(double fp) { m_FalsePos = fp; } /** Sets the number of negative instances predicted as negative */ public void setTrueNegative(double tn) { m_TrueNeg = tn; } /** Sets the number of positive instances predicted as negative */ public void setFalseNegative(double fn) { m_FalseNeg = fn; } /** Gets the number of positive instances predicted as positive */ public double getTruePositive() { return m_TruePos; } /** Gets the number of negative instances predicted as positive */ public double getFalsePositive() { return m_FalsePos; } /** Gets the number of negative instances predicted as negative */ public double getTrueNegative() { return m_TrueNeg; } /** Gets the number of positive instances predicted as negative */ public double getFalseNegative() { return m_FalseNeg; } /** * Calculate the true positive rate. This is defined as * <p> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * * @return the true positive rate */ public double getTruePositiveRate() { if (0 == (m_TruePos + m_FalseNeg)) { return Double.NaN; } else { return m_TruePos / (m_TruePos + m_FalseNeg); } } /** * Calculate the false positive rate. This is defined as * <p> * * <pre> * incorrectly classified negatives * -------------------------------- * total negatives * </pre> * * @return the false positive rate */ public double getFalsePositiveRate() { if (0 == (m_FalsePos + m_TrueNeg)) { return Double.NaN; } else { return m_FalsePos / (m_FalsePos + m_TrueNeg); } } /** * Calculate the precision. This is defined as * <p> * * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @return the precision */ public double getPrecision() { if (0 == (m_TruePos + m_FalsePos)) { return Double.NaN; } else { return m_TruePos / (m_TruePos + m_FalsePos); } } /** * Calculate the recall. This is defined as * <p> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * <p> * (Which is also the same as the truePositiveRate.) * * @return the recall */ public double getRecall() { return getTruePositiveRate(); } /** * Calculate the F-Measure. This is defined as * <p> * * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * @return the F-Measure */ public double getFMeasure() { double precision = getPrecision(); double recall = getRecall(); if ((precision + recall) == 0) { return Double.NaN; } return 2 * precision * recall / (precision + recall); } /** * Calculate the fallout. This is defined as * <p> * * <pre> * incorrectly classified negatives * -------------------------------- * total predicted as positive * </pre> * * @return the fallout */ public double getFallout() { if (0 == (m_TruePos + m_FalsePos)) { return Double.NaN; } else { return m_FalsePos / (m_TruePos + m_FalsePos); } } /** * Generates a <code>ConfusionMatrix</code> representing the current two-class * statistics, using class names "negative" and "positive". * * @return a <code>ConfusionMatrix</code>. */ public ConfusionMatrix getConfusionMatrix() { ConfusionMatrix cm = new ConfusionMatrix(CATEGORY_NAMES); cm.set(0, 0, m_TrueNeg); cm.set(0, 1, m_FalsePos); cm.set(1, 0, m_FalseNeg); cm.set(1, 1, m_TruePos); return cm; } /** * Returns a string containing the various performance measures for the * current object */ @Override public String toString() { StringBuffer res = new StringBuffer(); res.append(getTruePositive()).append(' '); res.append(getFalseNegative()).append(' '); res.append(getTrueNegative()).append(' '); res.append(getFalsePositive()).append(' '); res.append(getFalsePositiveRate()).append(' '); res.append(getTruePositiveRate()).append(' '); res.append(getPrecision()).append(' '); res.append(getRecall()).append(' '); res.append(getFMeasure()).append(' '); res.append(getFallout()).append(' '); return res.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output/prediction/AbstractOutput.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractOutput.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.misc.InputMappedClassifier; import weka.core.*; import weka.core.converters.ConverterUtils.DataSource; /** * A superclass for outputting the classifications of a classifier. * <p/> * Basic use with a classifier and a test set: * * <pre> * Classifier classifier = ... // trained classifier * Instances testset = ... // the test set to output the predictions for * StringBuffer buffer = ... // the string buffer to add the output to * AbstractOutput output = new FunkyOutput(); * output.setHeader(...); * output.printClassifications(classifier, testset); * </pre> * * Basic use with a classifier and a data source: * * <pre> * Classifier classifier = ... // trained classifier * DataSource testset = ... // the data source to obtain the test set from to output the predictions for * StringBuffer buffer = ... // the string buffer to add the output to * AbstractOutput output = new FunkyOutput(); * output.setHeader(...); * output.printClassifications(classifier, testset); * </pre> * * In order to make the output generation easily integrate into GUI components, * one can output the header, classifications and footer separately: * * <pre> * Classifier classifier = ... // trained classifier * Instances testset = ... // the test set to output the predictions for * StringBuffer buffer = ... // the string buffer to add the output to * AbstractOutput output = new FunkyOutput(); * output.setHeader(...); * // print the header * output.printHeader(); * // print the classifications one-by-one * for (int i = 0; i &lt; testset.numInstances(); i++) { * output.printClassification(classifier, testset.instance(i), i); * // output progress information * if ((i+1) % 100 == 0) * System.out.println((i+1) + "/" + testset.numInstances()); * } * // print the footer * output.printFooter(); * </pre> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public abstract class AbstractOutput implements Serializable, OptionHandler { /** for serialization. */ private static final long serialVersionUID = 752696986017306241L; /** the header of the dataset. */ protected Instances m_Header; /** the buffer to write to. */ protected StringBuffer m_Buffer; /** the file buffer to write to. */ protected StringBuffer m_FileBuffer; /** whether to output the class distribution. */ protected boolean m_OutputDistribution; /** the range of attributes to output. */ protected Range m_Attributes; /** the number of decimals after the decimal point. */ protected int m_NumDecimals; /** the file to store the output in. */ protected File m_OutputFile; /** whether to suppress the regular output and only store in file. */ protected boolean m_SuppressOutput; /** * Initializes the output class. */ public AbstractOutput() { m_Header = null; m_OutputDistribution = false; m_Attributes = null; m_Buffer = null; m_NumDecimals = 3; m_OutputFile = new File("."); m_FileBuffer = new StringBuffer(); m_SuppressOutput = false; } /** * Returns a string describing the output generator. * * @return a description suitable for displaying in the GUI */ public abstract String globalInfo(); /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public abstract String getDisplay(); /** * Returns an enumeration of all the available options.. * * @return an enumeration of all available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result; result = new Vector<Option>(); result.addElement(new Option( "\tThe range of attributes to print in addition to the classification.\n" + "\t(default: none)", "p", 1, "-p <range>")); result.addElement(new Option( "\tWhether to turn on the output of the class distribution.\n" + "\tOnly for nominal class attributes.\n" + "\t(default: off)", "distribution", 0, "-distribution")); result.addElement(new Option( "\tThe number of digits after the decimal point.\n" + "\t(default: " + getDefaultNumDecimals() + ")", "decimals", 1, "-decimals <num>")); result.addElement(new Option( "\tThe file to store the output in, instead of outputting it on stdout.\n" + "\tGets ignored if the supplied path is a directory.\n" + "\t(default: .)", "file", 1, "-file <path>")); result .addElement(new Option( "\tIn case the data gets stored in a file, then this flag can be used\n" + "\tto suppress the regular output.\n" + "\t(default: not suppressed)", "suppress", 0, "-suppress")); return result.elements(); } /** * Sets the OptionHandler's options using the given list. All options will be * set (or reset) during this call (i.e. incremental setting of options is not * possible). * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; setAttributes(Utils.getOption("p", options)); setOutputDistribution(Utils.getFlag("distribution", options)); tmpStr = Utils.getOption("decimals", options); if (tmpStr.length() > 0) { setNumDecimals(Integer.parseInt(tmpStr)); } else { setNumDecimals(getDefaultNumDecimals()); } tmpStr = Utils.getOption("file", options); if (tmpStr.length() > 0) { setOutputFile(new File(tmpStr)); } else { setOutputFile(new File(".")); } setSuppressOutput(Utils.getFlag("suppress", options)); } /** * Gets the current option settings for the OptionHandler. * * @return the list of current option settings as an array of strings */ @Override public String[] getOptions() { Vector<String> result; result = new Vector<String>(); if (getAttributes().length() > 0) { result.add("-p"); result.add(getAttributes()); } if (getOutputDistribution()) { result.add("-distribution"); } if (getNumDecimals() != getDefaultNumDecimals()) { result.add("-decimals"); result.add("" + getNumDecimals()); } if (!getOutputFile().isDirectory()) { result.add("-file"); result.add(getOutputFile().getAbsolutePath()); if (getSuppressOutput()) { result.add("-suppress"); } } return result.toArray(new String[result.size()]); } /** * Sets the header of the dataset. * * @param value the header */ public void setHeader(Instances value) { if (value != null) { m_Header = new Instances(value, 0); } } /** * Returns the header of the dataset. * * @return the header */ public Instances getHeader() { return m_Header; } /** * Sets the buffer to use. * * @param value the buffer */ public void setBuffer(StringBuffer value) { m_Buffer = value; } /** * Returns the current buffer. * * @return the buffer, can be null */ public StringBuffer getBuffer() { return m_Buffer; } /** * Sets the range of attributes to output. * * @param value the range */ public void setAttributes(String value) { if (value.length() == 0) { m_Attributes = null; } else { m_Attributes = new Range(value); } } /** * Returns the range of attributes to output. * * @return the range */ public String getAttributes() { if (m_Attributes == null) { return ""; } else { return m_Attributes.getRanges(); } } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String attributesTipText() { return "The indices of the attributes to print in addition."; } /** * Sets whether to output the class distribution or not. * * @param value true if the class distribution is to be output as well */ public void setOutputDistribution(boolean value) { m_OutputDistribution = value; } /** * Returns whether to output the class distribution as well. * * @return true if the class distribution is output as well */ public boolean getOutputDistribution() { return m_OutputDistribution; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String outputDistributionTipText() { return "Whether to ouput the class distribution as well (only nominal class attributes)."; } /** * Returns the default number of digits to output after the decimal point. * * @return the default number of digits */ public int getDefaultNumDecimals() { return 3; } /** * Sets the number of digits to output after the decimal point. * * @param value the number of digits */ public void setNumDecimals(int value) { if (value >= 0) { m_NumDecimals = value; } else { System.err.println("Number of decimals cannot be negative (provided: " + value + ")!"); } } /** * Returns the number of digits to output after the decimal point. * * @return the number of digits */ public int getNumDecimals() { return m_NumDecimals; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String numDecimalsTipText() { return "The number of digits to output after the decimal point."; } /** * Sets the output file to write to. A directory disables this feature. * * @param value the file to write to or a directory */ public void setOutputFile(File value) { m_OutputFile = value; } /** * Returns the output file to write to. A directory if turned off. * * @return the file to write to or a directory */ public File getOutputFile() { return m_OutputFile; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String outputFileTipText() { return "The file to write the generated output to (disabled if path is a directory)."; } /** * Sets whether to the regular output is suppressed in case the output is * stored in a file. * * @param value true if the regular output is to be suppressed */ public void setSuppressOutput(boolean value) { m_SuppressOutput = value; } /** * Returns whether to the regular output is suppressed in case the output is * stored in a file. * * @return true if the regular output is to be suppressed */ public boolean getSuppressOutput() { return m_SuppressOutput; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String suppressOutputTipText() { return "Whether to suppress the regular output when storing the output in a file."; } /** * Performs basic checks. * * @return null if everything is in order, otherwise the error message */ protected String checkBasic() { if (m_Buffer == null) { return "Buffer is null!"; } if (m_Header == null) { return "No dataset structure provided!"; } if (m_Attributes != null) { m_Attributes.setUpper(m_Header.numAttributes() - 1); } return null; } /** * Returns whether regular output is generated or not. * * @return true if regular output is generated */ public boolean generatesOutput() { return m_OutputFile.isDirectory() || (!m_OutputFile.isDirectory() && !m_SuppressOutput); } /** * If an output file was defined, then the string gets added to the file * buffer, otherwise to the actual buffer. * * @param s the string to append * @see #m_Buffer * @see #m_FileBuffer */ protected void append(String s) { if (generatesOutput()) { m_Buffer.append(s); } if (!m_OutputFile.isDirectory()) { m_FileBuffer.append(s); } } /** * Performs checks whether everything is correctly setup for the header. * * @return null if everything is in order, otherwise the error message */ protected String checkHeader() { return checkBasic(); } /** * Performs the actual printing of the header. */ protected abstract void doPrintHeader(); /** * Prints the header to the buffer. */ public void printHeader() { String error; if ((error = checkHeader()) != null) { throw new IllegalStateException(error); } doPrintHeader(); } /** * Performs the actual printing of the classification. * * @param classifier the classifier to use for printing the classification * @param inst the instance to print * @param index the index of the instance * @throws Exception if printing of classification fails */ protected abstract void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception; /** * Performs the actual printing of the classification. * * @param dist the distribution to use for printing the classification * @param inst the instance to print * @param index the index of the instance * @throws Exception if printing of classification fails */ protected abstract void doPrintClassification(double[] dist, Instance inst, int index) throws Exception; /** * Preprocesses an input instance. Basically this only does something * special in the case when the classifier is an InputMappedClassifier. * * @param inst the original instance to predict * @param classifier the classifier that will be used to make the prediction * @return the original instance unchanged or mapped (in the case of an * InputMappedClassifier) . * @throws Exception if a problem occurs. */ protected Instance preProcessInstance(Instance inst, Classifier classifier) throws Exception { if (classifier instanceof InputMappedClassifier) { return ((InputMappedClassifier) classifier).constructMappedInstance(inst); } else { return inst; } } /** * Prints the classification to the buffer. * * @param classifier the classifier to use for printing the classification * @param inst the instance to print * @param index the index of the instance * @throws Exception if check fails or error occurs during printing of * classification */ public void printClassification(Classifier classifier, Instance inst, int index) throws Exception { String error; if ((error = checkBasic()) != null) { throw new WekaException(error); } doPrintClassification(classifier.distributionForInstance(inst), preProcessInstance(inst, classifier), index); } /** * Prints the classification to the buffer. * * @param dist the distribution from classifier for the supplied instance * @param inst the instance to print * @param index the index of the instance * @throws Exception if check fails or error occurs during printing of * classification */ public void printClassification(double[] dist, Instance inst, int index) throws Exception { String error; if ((error = checkBasic()) != null) { throw new WekaException(error); } doPrintClassification(dist, inst, index); } /** * Prints the classifications to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the data source to obtain the test instances from * @throws Exception if check fails or error occurs during printing of * classifications */ public void printClassifications(Classifier classifier, DataSource testset) throws Exception { int i; Instances test; Instance inst; i = 0; testset.reset(); if (classifier instanceof BatchPredictor && ((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction()) { test = testset.getDataSet(); if (!(classifier instanceof InputMappedClassifier)) { try { test.setClassIndex(m_Header.classIndex()); } catch (Exception e) { throw new IllegalArgumentException("AbstractOutput: header of test set does not match."); } if (!(test.equalHeaders(m_Header))) { throw new IllegalArgumentException("AbstractOutput: header of test set does not match."); } } double[][] predictions = ((BatchPredictor) classifier).distributionsForInstances(test); for (i = 0; i < test.numInstances(); i++) { printClassification(predictions[i], preProcessInstance(test.instance(i), classifier), i); } } else { test = testset.getStructure(); if (!(classifier instanceof InputMappedClassifier)) { try { test.setClassIndex(m_Header.classIndex()); } catch (Exception e) { throw new IllegalArgumentException("AbstractOutput: header of test set does not match."); } if (!(test.equalHeaders(m_Header))) { throw new IllegalArgumentException("AbstractOutput: header of test set does not match."); } } while (testset.hasMoreElements(test)) { inst = testset.nextElement(test); printClassification(classifier.distributionForInstance(inst), preProcessInstance(inst, classifier), i); i++; } } } /** * Prints the classifications to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the test instances * @throws Exception if check fails or error occurs during printing of * classifications */ public void printClassifications(Classifier classifier, Instances testset) throws Exception { int i; if (classifier instanceof BatchPredictor && ((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction()) { double[][] predictions = ((BatchPredictor) classifier).distributionsForInstances(testset); for (i = 0; i < testset.numInstances(); i++) { printClassification(predictions[i], preProcessInstance(testset.instance(i), classifier), i); } } else { for (i = 0; i < testset.numInstances(); i++) { printClassification(classifier.distributionForInstance(testset.instance(i)), preProcessInstance(testset.instance(i), classifier), i); } } } /** * Performs the actual printing of the footer. */ protected abstract void doPrintFooter(); /** * Prints the footer to the buffer. This will also store the generated output * in a file if an output file was specified. * * @throws Exception if check fails */ public void printFooter() throws Exception { String error; BufferedWriter writer; if ((error = checkBasic()) != null) { throw new WekaException(error); } doPrintFooter(); // write output to file if (!m_OutputFile.isDirectory()) { try { writer = new BufferedWriter(new FileWriter(m_OutputFile)); writer.write(m_FileBuffer.toString()); writer.newLine(); writer.flush(); writer.close(); } catch (Exception e) { e.printStackTrace(); } } } /** * Prints the header, classifications and footer to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the data source to obtain the test instances from * @throws Exception if check fails or error occurs during printing of * classifications */ public void print(Classifier classifier, DataSource testset) throws Exception { printHeader(); printClassifications(classifier, testset); printFooter(); } /** * Prints the header, classifications and footer to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the test instances * @throws Exception if check fails or error occurs during printing of * classifications */ public void print(Classifier classifier, Instances testset) throws Exception { printHeader(); printClassifications(classifier, testset); printFooter(); } /** * Returns a fully configured object from the given commandline. * * @param cmdline the commandline to turn into an object * @return the object or null in case of an error */ public static AbstractOutput fromCommandline(String cmdline) { AbstractOutput result; String[] options; String classname; try { options = Utils.splitOptions(cmdline); classname = options[0]; options[0] = ""; result = (AbstractOutput) Utils .forName(AbstractOutput.class, classname, options); } catch (Exception e) { result = null; } return result; } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output/prediction/CSV.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CSV.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import java.util.Collections; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Option; import weka.core.Utils; /** * <!-- globalinfo-start --> Outputs the predictions as CSV. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none) * </pre> * * <pre> * -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off) * </pre> * * <pre> * -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3) * </pre> * * <pre> * -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .) * </pre> * * <pre> * -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed) * </pre> * * <pre> * -use-tab * Whether to use TAB as separator instead of comma. * (default: comma) * </pre> * * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class CSV extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 3401604538169573720L; /** the delimiter. */ protected String m_Delimiter = ","; /** * Returns a string describing the output generator. * * @return a description suitable for displaying in the GUI */ @Override public String globalInfo() { return "Outputs the predictions as CSV."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ @Override public String getDisplay() { return "CSV"; } /** * Returns an enumeration of all the available options.. * * @return an enumeration of all available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> result; result = new Vector<Option>(); result.addElement(new Option( "\tWhether to use TAB as separator instead of comma.\n" + "\t(default: comma)", "use-tab", 0, "-use-tab")); result.addAll(Collections.list(super.listOptions())); return result.elements(); } /** * Sets the OptionHandler's options using the given list. All options will be * set (or reset) during this call (i.e. incremental setting of options is not * possible). * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setUseTab(Utils.getFlag("use-tab", options)); super.setOptions(options); } /** * Gets the current option settings for the OptionHandler. * * @return the list of current option settings as an array of strings */ @Override public String[] getOptions() { Vector<String> result = new Vector<String>(); if (getUseTab()) { result.add("-use-tab"); } Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Sets whether to use tab instead of comma as separator. * * @param value true if tab is to be used */ public void setUseTab(boolean value) { if (value) { m_Delimiter = "\t"; } else { m_Delimiter = ","; } } /** * Returns whether tab is used as separator. * * @return true if tab is used instead of comma */ public boolean getUseTab() { return m_Delimiter.equals("\t"); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String useTabTipText() { return "Whether to use TAB instead of COMMA as column separator."; } /** * Performs the actual printing of the header. */ @Override protected void doPrintHeader() { if (m_Header.classAttribute().isNominal()) { if (m_OutputDistribution) { append("inst#" + m_Delimiter + "actual" + m_Delimiter + "predicted" + m_Delimiter + "error" + m_Delimiter + "distribution"); for (int i = 1; i < m_Header.classAttribute().numValues(); i++) { append(m_Delimiter); } } else { append("inst#" + m_Delimiter + "actual" + m_Delimiter + "predicted" + m_Delimiter + "error" + m_Delimiter + "prediction"); } } else { append("inst#" + m_Delimiter + "actual" + m_Delimiter + "predicted" + m_Delimiter + "error"); } if (m_Attributes != null) { append(m_Delimiter); boolean first = true; for (int i = 0; i < m_Header.numAttributes(); i++) { if (i == m_Header.classIndex()) { continue; } if (m_Attributes.isInRange(i)) { if (!first) { append(m_Delimiter); } append(m_Header.attribute(i).name()); first = false; } } } append("\n"); } /** * Builds a string listing the attribute values in a specified range of * indices, separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { m_Attributes.setUpper(instance.numAttributes() - 1); boolean first = true; for (int i = 0; i < instance.numAttributes(); i++) { if (m_Attributes.isInRange(i) && i != instance.classIndex()) { if (!first) { text.append(m_Delimiter); } text.append(instance.toString(i)); first = false; } } } return text.toString(); } /** * Store the prediction made by the classifier as a string. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ @Override protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int prec = m_NumDecimals; Instance withMissing = (Instance) inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // index append("" + (index + 1)); if (inst.dataset().classAttribute().isNumeric()) { // actual if (inst.classIsMissing()) { append(m_Delimiter + "?"); } else { append(m_Delimiter + Utils.doubleToString(inst.classValue(), prec)); } // predicted if (Utils.isMissingValue(predValue)) { append(m_Delimiter + "?"); } else { append(m_Delimiter + Utils.doubleToString(predValue, prec)); } // error if (Utils.isMissingValue(predValue) || inst.classIsMissing()) { append(m_Delimiter + "?"); } else { append(m_Delimiter + Utils.doubleToString(predValue - inst.classValue(), prec)); } } else { // actual append(m_Delimiter + ((int) inst.classValue() + 1) + ":" + inst.toString(inst.classIndex())); // predicted if (Utils.isMissingValue(predValue)) { append(m_Delimiter + "?"); } else { append(m_Delimiter + ((int) predValue + 1) + ":" + inst.dataset().classAttribute().value((int) predValue)); } // error? if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue + 1 != (int) inst.classValue() + 1)) { append(m_Delimiter + "+"); } else { append(m_Delimiter + ""); } // prediction/distribution if (m_OutputDistribution) { if (Utils.isMissingValue(predValue)) { append(m_Delimiter + "?"); } else { append(m_Delimiter); for (int n = 0; n < dist.length; n++) { if (n > 0) { append(m_Delimiter); } if (n == (int) predValue) { append("*"); } append(Utils.doubleToString(dist[n], prec)); } } } else { if (Utils.isMissingValue(predValue)) { append(m_Delimiter + "?"); } else { append(m_Delimiter + Utils.doubleToString(dist[(int) predValue], prec)); } } } // attributes if (m_Attributes != null) { append(m_Delimiter + attributeValuesString(withMissing)); } append("\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ @Override protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ @Override protected void doPrintFooter() { } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output/prediction/HTML.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HTML.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Utils; /** <!-- globalinfo-start --> * Outputs the predictions in HTML. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class HTML extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 7241252244954353300L; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Outputs the predictions in HTML."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "HTML"; } /** * Replaces certain characters with their HTML entities. * * @param s the string to process * @return the processed string */ protected String sanitize(String s) { String result; result = s; result = result.replaceAll("&", "&amp;"); result = result.replaceAll("<", "&lt;"); result = result.replaceAll(">", "&gt;"); result = result.replaceAll("\"", "&quot;"); return result; } /** * Performs the actual printing of the header. */ protected void doPrintHeader() { append("<html>\n"); append("<head>\n"); append("<title>Predictions for dataset " + sanitize(m_Header.relationName()) + "</title>\n"); append("</head>\n"); append("<body>\n"); append("<div align=\"center\">\n"); append("<h3>Predictions for dataset " + sanitize(m_Header.relationName()) + "</h3>\n"); append("<table border=\"1\">\n"); append("<tr>\n"); if (m_Header.classAttribute().isNominal()) if (m_OutputDistribution) append("<td>inst#</td><td>actual</td><td>predicted</td><td>error</td><td colspan=\"" + m_Header.classAttribute().numValues() + "\">distribution</td>"); else append("<td>inst#</td><td>actual</td><td>predicted</td><td>error</td><td>prediction</td>"); else append("<td>inst#</td><td>actual</td><td>predicted</td><td>error</td>"); if (m_Attributes != null) { append("<td>"); boolean first = true; for (int i = 0; i < m_Header.numAttributes(); i++) { if (i == m_Header.classIndex()) continue; if (m_Attributes.isInRange(i)) { if (!first) append("</td><td>"); append(sanitize(m_Header.attribute(i).name())); first = false; } } append("</td>"); } append("</tr>\n"); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { boolean firstOutput = true; m_Attributes.setUpper(instance.numAttributes() - 1); for (int i=0; i<instance.numAttributes(); i++) if (m_Attributes.isInRange(i) && i != instance.classIndex()) { if (!firstOutput) text.append("</td>"); if (m_Header.attribute(i).isNumeric()) text.append("<td align=\"right\">"); else text.append("<td>"); text.append(sanitize(instance.toString(i))); firstOutput = false; } if (!firstOutput) text.append("</td>"); } return text.toString(); } protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int prec = m_NumDecimals; Instance withMissing = (Instance)inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // index append("<tr>"); append("<td>" + (index+1) + "</td>"); if (inst.dataset().classAttribute().isNumeric()) { // actual if (inst.classIsMissing()) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(inst.classValue(), prec) + "</td>"); // predicted if (Utils.isMissingValue(predValue)) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(predValue, prec) + "</td>"); // error if (Utils.isMissingValue(predValue) || inst.classIsMissing()) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(predValue - inst.classValue(), prec) + "</td>"); } else { // actual append("<td>" + ((int) inst.classValue()+1) + ":" + sanitize(inst.toString(inst.classIndex())) + "</td>"); // predicted if (Utils.isMissingValue(predValue)) append("<td>" + "?" + "</td>"); else append("<td>" + ((int) predValue+1) + ":" + sanitize(inst.dataset().classAttribute().value((int)predValue)) + "</td>"); // error? if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue+1 != (int) inst.classValue()+1)) append("<td>" + "+" + "</td>"); else append("<td>" + "&nbsp;" + "</td>"); // prediction/distribution if (m_OutputDistribution) { if (Utils.isMissingValue(predValue)) { append("<td>" + "?" + "</td>"); } else { append("<td align=\"right\">"); for (int n = 0; n < dist.length; n++) { if (n > 0) append("</td><td align=\"right\">"); if (n == (int) predValue) append("*"); append(Utils.doubleToString(dist[n], prec)); } append("</td>"); } } else { if (Utils.isMissingValue(predValue)) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(dist[(int)predValue], prec) + "</td>"); } } // attributes append(attributeValuesString(withMissing) + "</tr>\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ protected void doPrintFooter() { append("</table>\n"); append("</div>\n"); append("</body>\n"); append("</html>\n"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output/prediction/Null.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Null.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Instance; /** <!-- globalinfo-start --> * Suppresses all output. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class Null extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 4988413155999044966L; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Suppresses all output."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "No output"; } /** * Returns always false. * * @return always false */ public boolean generatesOutput() { return false; } /** * Does nothing. */ protected void doPrintHeader() { } /** * Does nothing. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { } /** * Does nothing. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { } /** * Does nothing. */ protected void doPrintFooter() { } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output/prediction/PlainText.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PlainText.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Utils; /** <!-- globalinfo-start --> * Outputs the predictions in plain text. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class PlainText extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 2033389864898242735L; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Outputs the predictions in plain text."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "Plain text"; } /** * Performs the actual printing of the header. */ protected void doPrintHeader() { if (m_Header.classAttribute().isNominal()) if (m_OutputDistribution) append(" inst# actual predicted error distribution"); else append(" inst# actual predicted error prediction"); else append(" inst# actual predicted error"); if (m_Attributes != null) { append(" ("); boolean first = true; for (int i = 0; i < m_Header.numAttributes(); i++) { if (i == m_Header.classIndex()) continue; if (m_Attributes.isInRange(i)) { if (!first) append(","); append(m_Header.attribute(i).name()); first = false; } } append(")"); } append("\n"); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { boolean firstOutput = true; m_Attributes.setUpper(instance.numAttributes() - 1); for (int i=0; i<instance.numAttributes(); i++) if (m_Attributes.isInRange(i) && i != instance.classIndex()) { if (firstOutput) text.append("("); else text.append(","); text.append(instance.toString(i)); firstOutput = false; } if (!firstOutput) text.append(")"); } return text.toString(); } /** * Store the prediction made by the classifier as a string. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int width = 7 + m_NumDecimals; int prec = m_NumDecimals; Instance withMissing = (Instance)inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // index append(Utils.padLeftAndAllowOverflow("" + (index+1), 9)); if (inst.dataset().classAttribute().isNumeric()) { // actual if (inst.classIsMissing()) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.doubleToString(inst.classValue(), width, prec)); // predicted if (Utils.isMissingValue(predValue)) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.doubleToString(predValue, width, prec)); // error if (Utils.isMissingValue(predValue) || inst.classIsMissing()) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.doubleToString(predValue - inst.classValue(), width, prec)); } else { // actual append(" " + Utils.padLeftAndAllowOverflow(((int) inst.classValue()+1) + ":" + inst.toString(inst.classIndex()), width)); // predicted if (Utils.isMissingValue(predValue)) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.padLeftAndAllowOverflow(((int) predValue+1) + ":" + inst.dataset().classAttribute().value((int)predValue), width)); // error? if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue+1 != (int) inst.classValue()+1)) append(" " + " + "); else append(" " + " "); // prediction/distribution if (m_OutputDistribution) { if (Utils.isMissingValue(predValue)) { append(" " + "?"); } else { append(" "); for (int n = 0; n < dist.length; n++) { if (n > 0) append(","); if (n == (int) predValue) append("*"); append(Utils.doubleToString(dist[n], prec)); } } } else { if (Utils.isMissingValue(predValue)) append(" " + "?"); else append(" " + Utils.doubleToString(dist[(int)predValue], prec)); } } // attributes append(" " + attributeValuesString(withMissing) + "\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ protected void doPrintFooter() { } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/evaluation/output/prediction/XML.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * XML.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Instance; import weka.core.Utils; import weka.core.Version; import weka.core.xml.XMLDocument; /** <!-- globalinfo-start --> * Outputs the predictions in XML.<br/> * <br/> * The following DTD is used:<br/> * <br/> * &lt;!DOCTYPE predictions<br/> * [<br/> * &lt;!ELEMENT predictions (prediction*)&gt;<br/> * &lt;!ATTLIST predictions version CDATA "3.5.8"&gt;<br/> * &lt;!ATTLIST predictions name CDATA #REQUIRED&gt;<br/> * <br/> * &lt;!ELEMENT prediction ((actual_label,predicted_label,error,(prediction|distribution),attributes?)|(actual_value,predicted_value,error,attributes?))&gt;<br/> * &lt;!ATTLIST prediction index CDATA #REQUIRED&gt;<br/> * <br/> * &lt;!ELEMENT actual_label ANY&gt;<br/> * &lt;!ATTLIST actual_label index CDATA #REQUIRED&gt;<br/> * &lt;!ELEMENT predicted_label ANY&gt;<br/> * &lt;!ATTLIST predicted_label index CDATA #REQUIRED&gt;<br/> * &lt;!ELEMENT error ANY&gt;<br/> * &lt;!ELEMENT prediction ANY&gt;<br/> * &lt;!ELEMENT distribution (class_label+)&gt;<br/> * &lt;!ELEMENT class_label ANY&gt;<br/> * &lt;!ATTLIST class_label index CDATA #REQUIRED&gt;<br/> * &lt;!ATTLIST class_label predicted (yes|no) "no"&gt;<br/> * &lt;!ELEMENT actual_value ANY&gt;<br/> * &lt;!ELEMENT predicted_value ANY&gt;<br/> * &lt;!ELEMENT attributes (attribute+)&gt;<br/> * &lt;!ELEMENT attribute ANY&gt;<br/> * &lt;!ATTLIST attribute index CDATA #REQUIRED&gt;<br/> * &lt;!ATTLIST attribute name CDATA #REQUIRED&gt;<br/> * &lt;!ATTLIST attribute type (numeric|date|nominal|string|relational) #REQUIRED&gt;<br/> * ]<br/> * &gt; * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public class XML extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = -3165514277316824801L; /** the DocType definition. */ public final static String DTD_DOCTYPE = XMLDocument.DTD_DOCTYPE; /** the Element definition. */ public final static String DTD_ELEMENT = XMLDocument.DTD_ELEMENT; /** the AttList definition. */ public final static String DTD_ATTLIST = XMLDocument.DTD_ATTLIST; /** the optional marker. */ public final static String DTD_OPTIONAL = XMLDocument.DTD_OPTIONAL; /** the at least one marker. */ public final static String DTD_AT_LEAST_ONE = XMLDocument.DTD_AT_LEAST_ONE; /** the zero or more marker. */ public final static String DTD_ZERO_OR_MORE = XMLDocument.DTD_ZERO_OR_MORE; /** the option separator. */ public final static String DTD_SEPARATOR = XMLDocument.DTD_SEPARATOR; /** the CDATA placeholder. */ public final static String DTD_CDATA = XMLDocument.DTD_CDATA; /** the ANY placeholder. */ public final static String DTD_ANY = XMLDocument.DTD_ANY; /** the #PCDATA placeholder. */ public final static String DTD_PCDATA = XMLDocument.DTD_PCDATA; /** the #IMPLIED placeholder. */ public final static String DTD_IMPLIED = XMLDocument.DTD_IMPLIED; /** the #REQUIRED placeholder. */ public final static String DTD_REQUIRED = XMLDocument.DTD_REQUIRED; /** the "version" attribute. */ public final static String ATT_VERSION = XMLDocument.ATT_VERSION; /** the "name" attribute. */ public final static String ATT_NAME = XMLDocument.ATT_NAME; /** the "type" attribute. */ public final static String ATT_TYPE = "type"; /** the value "yes". */ public final static String VAL_YES = XMLDocument.VAL_YES; /** the value "no". */ public final static String VAL_NO = XMLDocument.VAL_NO; /** the predictions tag. */ public final static String TAG_PREDICTIONS = "predictions"; /** the prediction tag. */ public final static String TAG_PREDICTION = "prediction"; /** the actual_nominal tag. */ public final static String TAG_ACTUAL_LABEL = "actual_label"; /** the predicted_nominal tag. */ public final static String TAG_PREDICTED_LABEL = "predicted_label"; /** the error tag. */ public final static String TAG_ERROR = "error"; /** the distribution tag. */ public final static String TAG_DISTRIBUTION = "distribution"; /** the class_label tag. */ public final static String TAG_CLASS_LABEL = "class_label"; /** the actual_numeric tag. */ public final static String TAG_ACTUAL_VALUE = "actual_value"; /** the predicted_numeric tag. */ public final static String TAG_PREDICTED_VALUE = "predicted_value"; /** the attributes tag. */ public final static String TAG_ATTRIBUTES = "attributes"; /** the attribute tag. */ public final static String TAG_ATTRIBUTE = "attribute"; /** the index attribute. */ public final static String ATT_INDEX = "index"; /** the predicted attribute. */ public final static String ATT_PREDICTED = "predicted"; /** the DTD. */ public final static String DTD = "<!" + DTD_DOCTYPE + " " + TAG_PREDICTIONS + "\n" + "[\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTIONS + " (" + TAG_PREDICTION + DTD_ZERO_OR_MORE + ")" + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTIONS + " " + ATT_VERSION + " " + DTD_CDATA + " \"" + Version.VERSION + "\"" + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTIONS + " " + ATT_NAME + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + "\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTION + " " + "(" + "(" + TAG_ACTUAL_LABEL + "," + TAG_PREDICTED_LABEL + "," + TAG_ERROR + "," + "(" + TAG_PREDICTION + DTD_SEPARATOR + TAG_DISTRIBUTION + ")" + "," + TAG_ATTRIBUTES + DTD_OPTIONAL + ")" + DTD_SEPARATOR + "(" + TAG_ACTUAL_VALUE + "," + TAG_PREDICTED_VALUE + "," + TAG_ERROR + "," + TAG_ATTRIBUTES + DTD_OPTIONAL + ")" + ")" + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTION + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + "\n" + " <!" + DTD_ELEMENT + " " + TAG_ACTUAL_LABEL + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ACTUAL_LABEL + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTED_LABEL + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTED_LABEL + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ERROR + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTION + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_DISTRIBUTION + " (" + TAG_CLASS_LABEL + DTD_AT_LEAST_ONE + ")" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_CLASS_LABEL + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_CLASS_LABEL + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_CLASS_LABEL + " " + ATT_PREDICTED + " (" + VAL_YES + DTD_SEPARATOR + VAL_NO + ") " + "\"" + VAL_NO + "\"" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ACTUAL_VALUE + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTED_VALUE + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ATTRIBUTES + " (" + TAG_ATTRIBUTE + DTD_AT_LEAST_ONE + ")" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ATTRIBUTE + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_NAME + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_TYPE + " " + "(" + Attribute.typeToString(Attribute.NUMERIC) + DTD_SEPARATOR + Attribute.typeToString(Attribute.DATE) + DTD_SEPARATOR + Attribute.typeToString(Attribute.NOMINAL) + DTD_SEPARATOR + Attribute.typeToString(Attribute.STRING) + DTD_SEPARATOR + Attribute.typeToString(Attribute.RELATIONAL) + ")" + " " + DTD_REQUIRED + ">\n" + "]\n" + ">"; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Outputs the predictions in XML.\n\n" + "The following DTD is used:\n\n" + DTD; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "XML"; } /** * Replaces certain characters with their XML entities. * * @param s the string to process * @return the processed string */ protected String sanitize(String s) { String result; result = s; result = result.replaceAll("&", "&amp;"); result = result.replaceAll("<", "&lt;"); result = result.replaceAll(">", "&gt;"); result = result.replaceAll("\"", "&quot;"); return result; } /** * Performs the actual printing of the header. */ protected void doPrintHeader() { append("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"); append("\n"); append(DTD + "\n\n"); append("<" + TAG_PREDICTIONS + " " + ATT_VERSION + "=\"" + Version.VERSION + "\"" + " " + ATT_NAME + "=\"" + sanitize(m_Header.relationName()) + "\">\n"); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { text.append(" <" + TAG_ATTRIBUTES + ">\n"); m_Attributes.setUpper(instance.numAttributes() - 1); for (int i=0; i<instance.numAttributes(); i++) { if (m_Attributes.isInRange(i) && i != instance.classIndex()) { text.append(" <" + TAG_ATTRIBUTE + " " + ATT_INDEX + "=\"" + (i+1) + "\"" + " " + ATT_NAME + "=\"" + sanitize(instance.attribute(i).name()) + "\"" + " " + ATT_TYPE + "=\"" + Attribute.typeToString(instance.attribute(i).type()) + "\"" + ">"); text.append(sanitize(instance.toString(i))); text.append("</" + TAG_ATTRIBUTE + ">\n"); } } text.append(" </" + TAG_ATTRIBUTES + ">\n"); } return text.toString(); } /** * Store the prediction made by the classifier as a string. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int prec = m_NumDecimals; Instance withMissing = (Instance)inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // opening tag append(" <" + TAG_PREDICTION + " " + ATT_INDEX + "=\"" + (index+1) + "\">\n"); if (inst.dataset().classAttribute().isNumeric()) { // actual append(" <" + TAG_ACTUAL_VALUE + ">"); if (inst.classIsMissing()) append("?"); else append(Utils.doubleToString(inst.classValue(), prec)); append("</" + TAG_ACTUAL_VALUE + ">\n"); // predicted append(" <" + TAG_PREDICTED_VALUE + ">"); if (inst.classIsMissing()) append("?"); else append(Utils.doubleToString(predValue, prec)); append("</" + TAG_PREDICTED_VALUE + ">\n"); // error append(" <" + TAG_ERROR + ">"); if (Utils.isMissingValue(predValue) || inst.classIsMissing()) append("?"); else append(Utils.doubleToString(predValue - inst.classValue(), prec)); append("</" + TAG_ERROR + ">\n"); } else { // actual append(" <" + TAG_ACTUAL_LABEL + " " + ATT_INDEX + "=\"" + ((int) inst.classValue()+1) + "\"" + ">"); append(sanitize(inst.toString(inst.classIndex()))); append("</" + TAG_ACTUAL_LABEL + ">\n"); // predicted append(" <" + TAG_PREDICTED_LABEL + " " + ATT_INDEX + "=\"" + ((int) predValue+1) + "\"" + ">"); if (Utils.isMissingValue(predValue)) append("?"); else append(sanitize(inst.dataset().classAttribute().value((int)predValue))); append("</" + TAG_PREDICTED_LABEL + ">\n"); // error? append(" <" + TAG_ERROR + ">"); if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue+1 != (int) inst.classValue()+1)) append(VAL_YES); else append(VAL_NO); append("</" + TAG_ERROR + ">\n"); // prediction/distribution if (m_OutputDistribution) { append(" <" + TAG_DISTRIBUTION + ">\n"); for (int n = 0; n < dist.length; n++) { append(" <" + TAG_CLASS_LABEL + " " + ATT_INDEX + "=\"" + (n+1) + "\""); if (!Utils.isMissingValue(predValue) && (n == (int) predValue)) append(" " + ATT_PREDICTED + "=\"" + VAL_YES + "\""); append(">"); append(Utils.doubleToString(dist[n], prec)); append("</" + TAG_CLASS_LABEL + ">\n"); } append(" </" + TAG_DISTRIBUTION + ">\n"); } else { append(" <" + TAG_PREDICTION + ">"); if (Utils.isMissingValue(predValue)) append("?"); else append(Utils.doubleToString(dist[(int)predValue], prec)); append("</" + TAG_PREDICTION + ">\n"); } } // attributes if (m_Attributes != null) append(attributeValuesString(withMissing)); // closing tag append(" </" + TAG_PREDICTION + ">\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ protected void doPrintFooter() { append("</" + TAG_PREDICTIONS + ">\n"); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/FLDA.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FLDA.java * Copyright (C) 2004, 2014 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.AbstractClassifier; import weka.core.Instances; import weka.core.Instance; import weka.core.RevisionUtils; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Option; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.RemoveUseless; import no.uib.cipr.matrix.*; import no.uib.cipr.matrix.Matrix; import java.util.Enumeration; import java.util.Collections; /** * <!-- globalinfo-start --> * Builds Fisher's Linear Discriminant function. The threshold is selected so that the separator is half-way between centroids. The class must be binary and all other attributes must be numeric. Missing values are not permitted. Constant attributes are removed using RemoveUseless. No standardization or normalization of attributes is performed. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> * Valid options are: <p/> * * <pre> -R * The ridge parameter. * (default is 1e-6)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <!-- options-end --> * * @author Eibe Frank, University of Waikato * @version $Revision: 10382 $ */ public class FLDA extends AbstractClassifier { /** for serialization */ static final long serialVersionUID = -9212385698193681291L; /** Holds header of training date */ protected Instances m_Data; /** The weight vector */ protected Vector m_Weights; /** The threshold */ protected double m_Threshold; /** Ridge parameter */ protected double m_Ridge = 1e-6; /** Rmeove useless filter */ protected RemoveUseless m_RemoveUseless; /** * Global info for this classifier. */ public String globalInfo() { return "Builds Fisher\'s Linear Discriminant function. The threshold is selected " + "so that the separator is half-way between centroids. The class must be " + "binary and all other attributes must be numeric. Missing values are not " + "permitted. Constant attributes are removed using RemoveUseless. No " + "standardization or normalization of attributes is performed."; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Computes the mean vector for each class. */ protected Vector[] getClassMeans(Instances data, int[] counts) { double[][] centroids = new double[2][data.numAttributes() - 1]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); int index = 0; for (int j = 0; j < data.numAttributes(); j++) { if (j != data.classIndex()) { centroids[(int)inst.classValue()][index++] += inst.value(j); } } counts[(int)inst.classValue()] ++; } Vector[] centroidVectors = new DenseVector[2]; for (int i = 0; i < 2; i++) { centroidVectors[i] = new DenseVector(centroids[i]); centroidVectors[i].scale(1.0 / (double)counts[i]); } if (m_Debug) { System.out.println("Count for class 0: " + counts[0]); System.out.println("Centroid 0:" + centroidVectors[0]); System.out.println("Count for class 11: " + counts[1]); System.out.println("Centroid 1:" + centroidVectors[1]); } return centroidVectors; } /** * Computes centered subsets as matrix with instances as columns. */ protected Matrix[] getCenteredData(Instances data, int[] counts, Vector[] centroids) { Matrix[] centeredData = new Matrix[2]; for (int i = 0; i < 2; i++) { centeredData[i] = new DenseMatrix(data.numAttributes() - 1, counts[i]); } int[] indexC = new int[2]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); int classIndex = (int)inst.classValue(); int index = 0; for (int j = 0; j < data.numAttributes(); j++) { if (j != data.classIndex()) { centeredData[classIndex].set(index, indexC[classIndex], inst.value(j) - centroids[classIndex].get(index)); index++; } } indexC[classIndex]++; } if (m_Debug) { System.out.println("Centered data for class 0:\n" + centeredData[0]); System.out.println("Centered data for class 1:\n" + centeredData[1]); } return centeredData; } /** * Builds the classifier. */ public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(insts); // Remove constant attributes m_RemoveUseless = new RemoveUseless(); m_RemoveUseless.setInputFormat(insts); insts = Filter.useFilter(insts, m_RemoveUseless); insts.deleteWithMissingClass(); // Establish class frequencies and centroids int[] classCounts = new int[2]; Vector[] centroids = getClassMeans(insts, classCounts); // Compute difference of centroids Vector diff = centroids[0].copy().add(-1.0, centroids[1]); // Center data for each class Matrix[] data = getCenteredData(insts, classCounts, centroids); // Compute scatter matrix and add ridge Matrix scatter = new UpperSymmDenseMatrix(data[0].numRows()).rank1(data[0]). add(new UpperSymmDenseMatrix(data[1].numRows()).rank1(data[1])); for (int i = 0; i < scatter.numColumns(); i++) { scatter.add(i, i, m_Ridge); } if (m_Debug) { System.out.println("Scatter:\n" + scatter); } // Establish and normalize weight vector m_Weights = scatter.solve(diff, new DenseVector(scatter.numColumns())); m_Weights.scale(1.0 / m_Weights.norm(Vector.Norm.Two)); // Compute threshold m_Threshold = 0.5 * m_Weights.dot(centroids[0].copy().add(centroids[1])); // Store header only m_Data = new Instances(insts, 0); } /** * Output class "probabilities". These need to be calibrated. */ public double[] distributionForInstance(Instance inst) throws Exception { // Filter instance m_RemoveUseless.input(inst); inst = m_RemoveUseless.output(); // Convert instance to matrix Vector instM = new DenseVector(inst.numAttributes() - 1); int index = 0; for (int i = 0; i < inst.numAttributes(); i++) { if (i != m_Data.classIndex()) { instM.set(index++, inst.value(i)); } } // Pipe output through sigmoid double[] dist = new double[2]; dist[1] = 1/(1 + Math.exp(instM.dot(m_Weights) - m_Threshold)); dist[0] = 1 - dist[1]; return dist; } /** * Outputs description of classifier as a string. * @return the description */ public String toString() { if (m_Weights == null) { return "No model has been built yet."; } StringBuffer result = new StringBuffer(); result.append("Fisher's Linear Discriminant Analysis\n\n"); result.append("Threshold: " + m_Threshold + "\n\n"); result.append("Weights:\n\n"); int index = 0; for (int i = 0; i < m_Data.numAttributes(); i++) { if (i != m_Data.classIndex()) { result.append(m_Data.attribute(i).name() + ": \t"); double weight = m_Weights.get(index++); if (weight >= 0) { result.append(" "); } result.append(weight + "\n"); } } return result.toString(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String ridgeTipText() { return "The value of the ridge parameter."; } /** * Get the value of Ridge. * * @return Value of Ridge. */ public double getRidge() { return m_Ridge; } /** * Set the value of Ridge. * * @param newRidge Value to assign to Ridge. */ public void setRidge(double newRidge) { m_Ridge = newRidge; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { java.util.Vector<Option> newVector = new java.util.Vector<Option>(7); newVector.addElement(new Option( "\tThe ridge parameter.\n"+ "\t(default is 1e-6)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. <p/> * * <!-- options-start --> * Valid options are: <p/> * * <pre> -R * The ridge parameter. * (default is 1e-6)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) { setRidge(Double.parseDouble(ridgeString)); } else { setRidge(1e-6); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of IBk. * * @return an array of strings suitable for passing to setOptions() */ public String [] getOptions() { java.util.Vector<String> options = new java.util.Vector<String>(); options.add("-R"); options.add("" + getRidge()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 10382 $"); } /** * Generates an FLDA classifier. * * @param argv the options */ public static void main(String [] argv){ runClassifier(new FLDA(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/GaussianProcesses.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GaussianProcesses.java * Copyright (C) 2005-2012,2015 University of Waikato */ package weka.classifiers.functions; import java.util.Collections; import java.util.Enumeration; import no.uib.cipr.matrix.DenseCholesky; import no.uib.cipr.matrix.DenseVector; import no.uib.cipr.matrix.Matrices; import no.uib.cipr.matrix.Matrix; import no.uib.cipr.matrix.UpperSPDDenseMatrix; import no.uib.cipr.matrix.Vector; import weka.classifiers.ConditionalDensityEstimator; import weka.classifiers.IntervalEstimator; import weka.classifiers.RandomizableClassifier; import weka.classifiers.functions.supportVector.CachedKernel; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.PolyKernel; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.SelectedTag; import weka.core.Statistics; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; /** * <!-- globalinfo-start --> * Implements Gaussian processes for regression without * hyperparameter-tuning. To make choosing an appropriate noise level easier, this implementation * applies normalization/standardization to the target attribute as well as the other attributes (if * normalization/standardizaton is turned on). Missing values are replaced by the global mean/mode. * Nominal attributes are converted to binary ones. Note that kernel caching is turned off if the * kernel used implements CachedKernel. * <br> * <br> * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> * BibTeX: * * * <pre> * * &#64;misc{Mackay1998, * * address = {Dept. of Physics, Cambridge University, UK}, * * author = {David J.C. Mackay}, * * title = {Introduction to Gaussian Processes}, * * year = {1998}, * * PS = {http://wol.ra.phy.cam.ac.uk/mackay/gpB.ps.gz} * * } * * * </pre> * * * <br> * <br> * <!-- technical-bibtex-end --> * * <!-- options-start --> * Valid options are: * <p> * * * * * <pre> * -L &lt;double&gt; * * Level of Gaussian Noise wrt transformed target. (default 1) * </pre> * * * * * * <pre> * -N * * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) * </pre> * * * * * * <pre> * -K &lt;classname and parameters&gt; * * The Kernel to use. * * (default: weka.classifiers.functions.supportVector.PolyKernel) * </pre> * * * * * * <pre> * -S &lt;num&gt; * * Random number seed. * * (default 1) * </pre> * * * * * * <pre> * -output-debug-info * * If set, classifier is run in debug mode and * * may output additional info to the console * </pre> * * * * * * <pre> * -do-not-check-capabilities * * If set, classifier capabilities are not checked before classifier is built * * (use with caution). * </pre> * * * * * * <pre> * -num-decimal-places * * The number of decimal places for the output of numbers in the model (default 2). * </pre> * * * * * * <pre> * * * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * * * </pre> * * * * * * <pre> * -E &lt;num&gt; * * The Exponent to use. * * (default: 1.0) * </pre> * * * * * * <pre> * -L * * Use lower-order terms. * * (default: no) * </pre> * * * * * * <pre> * -C &lt;num&gt; * * The size of the cache (a prime number), 0 for full cache and * * -1 to turn it off. * * (default: 250007) * </pre> * * * * * * <pre> * -output-debug-info * * Enables debugging output (if available) to be printed. * * (default: off) * </pre> * * * * * * <pre> * -no-checks * * Turns off all checks - use with caution! * * (default: checks on) * </pre> * * * <!-- options-end --> * * @author Kurt Driessens (kurtd@cs.waikato.ac.nz) * @author Remco Bouckaert (remco@cs.waikato.ac.nz) * @author Eibe Frank, University of Waikato * @version $Revision$ */ public class GaussianProcesses extends RandomizableClassifier implements IntervalEstimator, ConditionalDensityEstimator, TechnicalInformationHandler, WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -8620066949967678545L; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** normalizes the data */ public static final int FILTER_NORMALIZE = 0; /** standardizes the data */ public static final int FILTER_STANDARDIZE = 1; /** no filter */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag[] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** * Turn off all checks and conversions? Turning them off assumes that data is purely numeric, * doesn't contain any missing values, and has a numeric class. */ protected boolean m_checksTurnedOff = false; /** Gaussian Noise Value. */ protected double m_delta = 1; /** The squared noise value. */ protected double m_deltaSquared = 1; /** * The parameters of the linear transformation realized by the filter on the class attribute */ protected double m_Alin; protected double m_Blin; /** Template of kernel to use */ protected Kernel m_kernel = new PolyKernel(); /** Actual kernel object to use */ protected Kernel m_actualKernel; /** The number of training instances */ protected int m_NumTrain = 0; /** The training data. */ protected double m_avg_target; /** (negative) covariance matrix in symmetric matrix representation **/ public Matrix m_L; /** The vector of target values. */ protected Vector m_t; /** The weight of the training instances. */ protected double[] m_weights; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return " Implements Gaussian processes for " + "regression without hyperparameter-tuning. To make choosing an " + "appropriate noise level easier, this implementation applies " + "normalization/standardization to the target attribute as well " + "as the other attributes (if " + " normalization/standardizaton is turned on). Missing values " + "are replaced by the global mean/mode. Nominal attributes are " + "converted to binary ones. Note that kernel caching is turned off " + "if the kernel used implements CachedKernel."; } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "David J.C. Mackay"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Introduction to Gaussian Processes"); result.setValue(Field.ADDRESS, "Dept. of Physics, Cambridge University, UK"); result.setValue(Field.PS, "http://wol.ra.phy.cam.ac.uk/mackay/gpB.ps.gz"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = this.getKernel().getCapabilities(); result.setOwner(this); // attribute result.enableAllAttributeDependencies(); // with NominalToBinary we can also handle nominal attributes, but only // if the kernel can handle numeric attributes if (result.handles(Capability.NUMERIC_ATTRIBUTES)) { result.enable(Capability.NOMINAL_ATTRIBUTES); } result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.disable(Capability.NO_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method for building the classifier. * * @param insts * the set of training instances * @throws Exception * if the classifier can't be built successfully */ @Override public void buildClassifier(Instances insts) throws Exception { // check the set of training instances if (!this.m_checksTurnedOff) { // can classifier handle the data? this.getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); this.m_Missing = new ReplaceMissingValues(); this.m_Missing.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_Missing); } else { this.m_Missing = null; } if (this.getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { boolean onlyNumeric = true; if (!this.m_checksTurnedOff) { for (int i = 0; i < insts.numAttributes(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (i != insts.classIndex()) { if (!insts.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } } if (!onlyNumeric) { this.m_NominalToBinary = new NominalToBinary(); this.m_NominalToBinary.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_NominalToBinary); } else { this.m_NominalToBinary = null; } } else { this.m_NominalToBinary = null; } if (this.m_filterType == FILTER_STANDARDIZE) { this.m_Filter = new Standardize(); ((Standardize) this.m_Filter).setIgnoreClass(true); this.m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_Filter); } else if (this.m_filterType == FILTER_NORMALIZE) { this.m_Filter = new Normalize(); ((Normalize) this.m_Filter).setIgnoreClass(true); this.m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, this.m_Filter); } else { this.m_Filter = null; } this.m_NumTrain = insts.numInstances(); // determine which linear transformation has been // applied to the class by the filter if (this.m_Filter != null) { Instance witness = (Instance) insts.instance(0).copy(); witness.setValue(insts.classIndex(), 0); this.m_Filter.input(witness); this.m_Filter.batchFinished(); Instance res = this.m_Filter.output(); this.m_Blin = res.value(insts.classIndex()); witness.setValue(insts.classIndex(), 1); this.m_Filter.input(witness); this.m_Filter.batchFinished(); res = this.m_Filter.output(); this.m_Alin = res.value(insts.classIndex()) - this.m_Blin; } else { this.m_Alin = 1.0; this.m_Blin = 0.0; } // Initialize kernel this.m_actualKernel = Kernel.makeCopy(this.m_kernel); if (this.m_kernel instanceof CachedKernel) { ((CachedKernel) this.m_actualKernel).setCacheSize(-1); // We don't need a cache at all } this.m_actualKernel.buildKernel(insts); // Compute average target value double sum = 0.0; for (int i = 0; i < insts.numInstances(); i++) { sum += insts.instance(i).weight() * insts.instance(i).classValue(); } this.m_avg_target = sum / insts.sumOfWeights(); // Store squared noise level this.m_deltaSquared = this.m_delta * this.m_delta; // Store square roots of instance m_weights this.m_weights = new double[insts.numInstances()]; for (int i = 0; i < insts.numInstances(); i++) { this.m_weights[i] = Math.sqrt(insts.instance(i).weight()); } // initialize kernel matrix/covariance matrix int n = insts.numInstances(); // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_L = new UpperSPDDenseMatrix(n); for (int i = 0; i < n; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } for (int j = i + 1; j < n; j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } this.m_L.set(i, j, this.m_weights[i] * this.m_weights[j] * this.m_actualKernel.eval(i, j, insts.instance(i))); } this.m_L.set(i, i, this.m_weights[i] * this.m_weights[i] * this.m_actualKernel.eval(i, i, insts.instance(i)) + this.m_deltaSquared); } // Compute inverse of kernel matrix this.m_L = new DenseCholesky(n, true).factor((UpperSPDDenseMatrix) this.m_L).solve(Matrices.identity(n)); this.m_L = new UpperSPDDenseMatrix(this.m_L); // Convert from DenseMatrix // Compute t Vector tt = new DenseVector(n); for (int i = 0; i < n; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } tt.set(i, this.m_weights[i] * (insts.instance(i).classValue() - this.m_avg_target)); } this.m_t = this.m_L.mult(tt, new DenseVector(insts.numInstances())); } // buildClassifier /** * Classifies a given instance. * * @param inst * the instance to be classified * @return the classification * @throws Exception * if instance could not be classified successfully */ @Override public double classifyInstance(Instance inst) throws Exception { // Filter instance inst = this.filterInstance(inst); // Build K vector Vector k = new DenseVector(this.m_NumTrain); for (int i = 0; i < this.m_NumTrain; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } k.set(i, this.m_weights[i] * this.m_actualKernel.eval(-1, i, inst)); } double result = (k.dot(this.m_t) + this.m_avg_target - this.m_Blin) / this.m_Alin; return result; } /** * Filters an instance. */ protected Instance filterInstance(Instance inst) throws Exception { if (!this.m_checksTurnedOff) { this.m_Missing.input(inst); this.m_Missing.batchFinished(); inst = this.m_Missing.output(); } if (this.m_NominalToBinary != null) { this.m_NominalToBinary.input(inst); this.m_NominalToBinary.batchFinished(); inst = this.m_NominalToBinary.output(); } if (this.m_Filter != null) { this.m_Filter.input(inst); this.m_Filter.batchFinished(); inst = this.m_Filter.output(); } return inst; } /** * Computes standard deviation for given instance, without transforming target back into original * space. */ protected double computeStdDev(final Instance inst, final Vector k) throws Exception { double kappa = this.m_actualKernel.eval(-1, -1, inst) + this.m_deltaSquared; double s = this.m_L.mult(k, new DenseVector(k.size())).dot(k); double sigma = this.m_delta; if (kappa > s) { sigma = Math.sqrt(kappa - s); } return sigma; } /** * Computes a prediction interval for the given instance and confidence level. * * @param inst * the instance to make the prediction for * @param confidenceLevel * the percentage of cases the interval should cover * @return a 1*2 array that contains the boundaries of the interval * @throws Exception * if interval could not be estimated successfully */ @Override public double[][] predictIntervals(Instance inst, double confidenceLevel) throws Exception { inst = this.filterInstance(inst); // Build K vector (and Kappa) Vector k = new DenseVector(this.m_NumTrain); for (int i = 0; i < this.m_NumTrain; i++) { k.set(i, this.m_weights[i] * this.m_actualKernel.eval(-1, i, inst)); } double estimate = k.dot(this.m_t) + this.m_avg_target; double sigma = this.computeStdDev(inst, k); confidenceLevel = 1.0 - ((1.0 - confidenceLevel) / 2.0); double z = Statistics.normalInverse(confidenceLevel); double[][] interval = new double[1][2]; interval[0][0] = estimate - z * sigma; interval[0][1] = estimate + z * sigma; interval[0][0] = (interval[0][0] - this.m_Blin) / this.m_Alin; interval[0][1] = (interval[0][1] - this.m_Blin) / this.m_Alin; return interval; } /** * Gives standard deviation of the prediction at the given instance. * * @param inst * the instance to get the standard deviation for * @return the standard deviation * @throws Exception * if computation fails */ public double getStandardDeviation(Instance inst) throws Exception { inst = this.filterInstance(inst); // Build K vector (and Kappa) Vector k = new DenseVector(this.m_NumTrain); for (int i = 0; i < this.m_NumTrain; i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } k.set(i, this.m_weights[i] * this.m_actualKernel.eval(-1, i, inst)); } return this.computeStdDev(inst, k) / this.m_Alin; } /** * Returns natural logarithm of density estimate for given value based on given instance. * * @param inst * the instance to make the prediction for. * @param value * the value to make the prediction for. * @return the natural logarithm of the density estimate * @exception Exception * if the density cannot be computed */ @Override public double logDensity(Instance inst, double value) throws Exception { inst = this.filterInstance(inst); // Build K vector (and Kappa) Vector k = new DenseVector(this.m_NumTrain); for (int i = 0; i < this.m_NumTrain; i++) { k.set(i, this.m_weights[i] * this.m_actualKernel.eval(-1, i, inst)); } double estimate = k.dot(this.m_t) + this.m_avg_target; double sigma = this.computeStdDev(inst, k); // transform to GP space value = value * this.m_Alin + this.m_Blin; // center around estimate value = value - estimate; double z = -Math.log(sigma * Math.sqrt(2 * Math.PI)) - value * value / (2.0 * sigma * sigma); return z + Math.log(this.m_Alin); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { java.util.Vector<Option> result = new java.util.Vector<>(); result.addElement(new Option("\tLevel of Gaussian Noise wrt transformed target." + " (default 1)", "L", 1, "-L <double>")); result.addElement(new Option("\tWhether to 0=normalize/1=standardize/2=neither. " + "(default 0=normalize)", "N", 1, "-N")); result.addElement(new Option("\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addAll(Collections.list(super.listOptions())); result.addElement(new Option("", "", 0, "\nOptions specific to kernel " + this.getKernel().getClass().getName() + ":")); result.addAll(Collections.list(((OptionHandler) this.getKernel()).listOptions())); return result.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> * Valid options are: * <p> * * * * * <pre> * -L &lt;double&gt; * * Level of Gaussian Noise wrt transformed target. (default 1) * </pre> * * * * * * <pre> * -N * * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) * </pre> * * * * * * <pre> * -K &lt;classname and parameters&gt; * * The Kernel to use. * * (default: weka.classifiers.functions.supportVector.PolyKernel) * </pre> * * * * * * <pre> * -S &lt;num&gt; * * Random number seed. * * (default 1) * </pre> * * * * * * <pre> * -output-debug-info * * If set, classifier is run in debug mode and * * may output additional info to the console * </pre> * * * * * * <pre> * -do-not-check-capabilities * * If set, classifier capabilities are not checked before classifier is built * * (use with caution). * </pre> * * * * * * <pre> * -num-decimal-places * * The number of decimal places for the output of numbers in the model (default 2). * </pre> * * * * * * <pre> * * * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * * * </pre> * * * * * * <pre> * -E &lt;num&gt; * * The Exponent to use. * * (default: 1.0) * </pre> * * * * * * <pre> * -L * * Use lower-order terms. * * (default: no) * </pre> * * * * * * <pre> * -C &lt;num&gt; * * The size of the cache (a prime number), 0 for full cache and * * -1 to turn it off. * * (default: 250007) * </pre> * * * * * * <pre> * -output-debug-info * * Enables debugging output (if available) to be printed. * * (default: off) * </pre> * * * * * * <pre> * -no-checks * * Turns off all checks - use with caution! * * (default: checks on) * </pre> * * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String tmpStr; String[] tmpOptions; tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) { this.setNoise(Double.parseDouble(tmpStr)); } else { this.setNoise(1); } tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) { this.setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); } else { this.setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); } tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; this.setKernel(Kernel.forName(tmpStr, tmpOptions)); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { java.util.Vector<String> result = new java.util.Vector<>(); result.addElement("-L"); result.addElement("" + this.getNoise()); result.addElement("-N"); result.addElement("" + this.m_filterType); result.addElement("-K"); result.addElement("" + this.m_kernel.getClass().getName() + " " + Utils.joinOptions(this.m_kernel.getOptions())); Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * Gets the kernel to use. * * @return the kernel */ public Kernel getKernel() { return this.m_kernel; } /** * Sets the kernel to use. * * @param value * the new kernel */ public void setKernel(final Kernel value) { this.m_kernel = value; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "Determines how/if the data will be transformed."; } /** * Gets how the training data will be transformed. Will be one of FILTER_NORMALIZE, * FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(this.m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of FILTER_NORMALIZE, * FILTER_STANDARDIZE, FILTER_NONE. * * @param newType * the new filtering mode */ public void setFilterType(final SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { this.m_filterType = newType.getSelectedTag().getID(); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String noiseTipText() { return "The level of Gaussian Noise (added to the diagonal of the Covariance Matrix), after the " + "target has been normalized/standardized/left unchanged)."; } /** * Get the value of noise. * * @return Value of noise. */ public double getNoise() { return this.m_delta; } /** * Set the level of Gaussian Noise. * * @param v * Value to assign to noise. */ public void setNoise(final double v) { this.m_delta = v; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); if (this.m_t == null) { return "Gaussian Processes: No model built yet."; } try { text.append("Gaussian Processes\n\n"); text.append("Kernel used:\n " + this.m_kernel.toString() + "\n\n"); text.append("All values shown based on: " + TAGS_FILTER[this.m_filterType].getReadable() + "\n\n"); text.append("Average Target Value : " + this.m_avg_target + "\n"); text.append("Inverted Covariance Matrix:\n"); double min = this.m_L.get(0, 0); double max = this.m_L.get(0, 0); for (int i = 0; i < this.m_NumTrain; i++) { for (int j = 0; j <= i; j++) { if (this.m_L.get(i, j) < min) { min = this.m_L.get(i, j); } else if (this.m_L.get(i, j) > max) { max = this.m_L.get(i, j); } } } text.append(" Lowest Value = " + min + "\n"); text.append(" Highest Value = " + max + "\n"); text.append("Inverted Covariance Matrix * Target-value Vector:\n"); min = this.m_t.get(0); max = this.m_t.get(0); for (int i = 0; i < this.m_NumTrain; i++) { if (this.m_t.get(i) < min) { min = this.m_t.get(i); } else if (this.m_t.get(i) > max) { max = this.m_t.get(i); } } text.append(" Lowest Value = " + min + "\n"); text.append(" Highest Value = " + max + "\n \n"); } catch (Exception e) { return "Can't print the classifier."; } return text.toString(); } /** * Main method for testing this class. * * @param argv * the commandline parameters */ public static void main(final String[] argv) { runClassifier(new GaussianProcesses(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/LDA.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LDA.java * Copyright (C) 2016 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.AbstractClassifier; import weka.core.*; import weka.core.Capabilities.Capability; import weka.estimators.MultivariateGaussianEstimator; import weka.filters.Filter; import weka.filters.unsupervised.attribute.RemoveUseless; import java.util.Collections; import java.util.Enumeration; /** * <!-- globalinfo-start --> * Generates an LDA model. The covariance matrix is estimated using maximum likelihood from the pooled data. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> * Valid options are: <p/> * * <pre> -R * The ridge parameter. * (default is 1e-6)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <!-- options-end --> * * @author Eibe Frank, University of Waikato * @version $Revision: 10382 $ */ public class LDA extends AbstractClassifier implements WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -8213283598193689271L; /** Holds header of training date */ protected Instances m_Data; /** The pooled estimator */ protected MultivariateGaussianEstimator m_Estimator; /** The per-class mean vectors */ protected double[][] m_Means; /** The global mean */ protected double[] m_GlobalMean; /** The logs of the prior probabilities */ protected double[] m_LogPriors; /** Ridge parameter */ protected double m_Ridge = 1e-6; /** Rmeove useless filter */ protected RemoveUseless m_RemoveUseless; /** * Global info for this classifier. */ public String globalInfo() { return "Generates an LDA model. The covariance matrix is estimated using maximum likelihood from the pooled data."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String ridgeTipText() { return "The value of the ridge parameter."; } /** * Get the value of Ridge. * * @return Value of Ridge. */ public double getRidge() { return m_Ridge; } /** * Set the value of Ridge. * * @param newRidge Value to assign to Ridge. */ public void setRidge(double newRidge) { m_Ridge = newRidge; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { java.util.Vector<Option> newVector = new java.util.Vector<Option>(7); newVector.addElement(new Option( "\tThe ridge parameter.\n"+ "\t(default is 1e-6)", "R", 0, "-R")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Parses a given list of options. <p/> * * <!-- options-start --> * Valid options are: <p/> * * <pre> -R * The ridge parameter. * (default is 1e-6)</pre> * * <pre> -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution).</pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) { setRidge(Double.parseDouble(ridgeString)); } else { setRidge(1e-6); } super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of IBk. * * @return an array of strings suitable for passing to setOptions() */ public String [] getOptions() { java.util.Vector<String> options = new java.util.Vector<String>(); options.add("-R"); options.add("" + getRidge()); Collections.addAll(options, super.getOptions()); return options.toArray(new String[0]); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Builds the classifier. */ public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(insts); // Remove constant attributes m_RemoveUseless = new RemoveUseless(); m_RemoveUseless.setInputFormat(insts); insts = Filter.useFilter(insts, m_RemoveUseless); insts.deleteWithMissingClass(); if (insts.numInstances() == 0) { m_Data = new Instances(insts, 0); m_Means = new double[insts.numClasses()][]; return; } // Establish class counts, etc. int[] counts = new int[insts.numClasses()]; double[] sumOfWeightsPerClass = new double[insts.numClasses()]; for (int i = 0; i < insts.numInstances(); i++) { Instance inst = insts.instance(i); int classIndex = (int) inst.classValue(); counts[classIndex]++; sumOfWeightsPerClass[classIndex] += inst.weight(); } // Collect relevant data into array double[][][] data = new double[insts.numClasses()][][]; double[][] weights = new double[insts.numClasses()][]; for (int i = 0; i < insts.numClasses(); i++) { data[i] = new double[counts[i]][insts.numAttributes() - 1]; weights[i] = new double[counts[i]]; } int[] currentCount = new int[insts.numClasses()]; for (int i = 0; i < insts.numInstances(); i++) { Instance inst = insts.instance(i); int classIndex = (int) inst.classValue(); weights[classIndex][currentCount[classIndex]] = inst.weight(); int index = 0; double[] row = data[classIndex][currentCount[classIndex]++]; for (int j = 0; j < inst.numAttributes(); j++) { if (j != insts.classIndex()) { row[index++] = inst.value(j); } } } // Establish pooled estimator m_Estimator = new MultivariateGaussianEstimator(); m_Estimator.setRidge(getRidge()); m_Means = m_Estimator.estimatePooled(data, weights); m_GlobalMean = m_Estimator.getMean(); // Establish prior probabilities for each class m_LogPriors = new double[insts.numClasses()]; double sumOfWeights = Utils.sum(sumOfWeightsPerClass); for (int i = 0; i < insts.numClasses(); i++) { if (sumOfWeightsPerClass[i] > 0) { m_LogPriors[i] = Math.log(sumOfWeightsPerClass[i]) - Math.log(sumOfWeights); } } // Store header only m_Data = new Instances(insts, 0); } /** * Output class probabilities using Bayes' rule. */ public double[] distributionForInstance(Instance inst) throws Exception { // Filter instance m_RemoveUseless.input(inst); inst = m_RemoveUseless.output(); // Convert instance to array double[] posteriorProbs = new double[m_Data.numClasses()]; double[] values = new double[inst.numAttributes() - 1]; for (int i = 0; i < m_Data.numClasses(); i++) { if (m_Means[i] != null) { int index = 0; for (int j = 0; j < m_Data.numAttributes(); j++) { if (j != m_Data.classIndex()) { values[index] = inst.value(j) - m_Means[i][index] + m_GlobalMean[index]; index++; } } posteriorProbs[i] = m_Estimator.logDensity(values) + m_LogPriors[i]; } else { posteriorProbs[i] = -Double.MAX_VALUE; } } posteriorProbs = Utils.logs2probs(posteriorProbs); return posteriorProbs; } /** * Produces textual description of the classifier. * @return the textual description */ public String toString() { if (m_Means == null) { return "No model has been built yet."; } StringBuffer result = new StringBuffer(); result.append("LDA model (multivariate Gaussian for each class)\n\n"); result.append("Pooled estimator\n\n" + m_Estimator + "\n\n"); for (int i = 0; i < m_Data.numClasses(); i++) { if (m_Means[i] != null) { result.append("Estimates for class value " + m_Data.classAttribute().value(i) + "\n\n"); result.append("Natural logarithm of class prior probability: " + Utils.doubleToString(m_LogPriors[i], getNumDecimalPlaces()) + "\n"); result.append("Class prior probability: " + Utils.doubleToString(Math.exp(m_LogPriors[i]), getNumDecimalPlaces()) + "\n\n"); int index = 0; result.append("Mean vector:\n\n"); for (int j = 0; j < m_Data.numAttributes(); j++) { if (j != m_Data.classIndex()) { result.append(m_Data.attribute(j).name() + ": " + Utils.doubleToString(m_Means[i][index], getNumDecimalPlaces()) + "\n"); index++; } } result.append("\n"); } } return result.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 10382 $"); } /** * Generates an LDA classifier. * * @param argv the options */ public static void main(String [] argv){ runClassifier(new LDA(), argv); } }
0
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/functions/LinearRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LinearRegression.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Collections; import java.util.Enumeration; import no.uib.cipr.matrix.DenseMatrix; import no.uib.cipr.matrix.DenseVector; import no.uib.cipr.matrix.Matrix; import no.uib.cipr.matrix.UpperSymmDenseMatrix; import no.uib.cipr.matrix.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.evaluation.RegressionAnalysis; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.supervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** * <!-- globalinfo-start --> Class for using linear regression for prediction. Uses the Akaike * criterion for model selection, and is able to deal with weighted instances. * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -S &lt;number of selection method&gt; * Set the attribute selection method to use. 1 = None, 2 = Greedy. * (default 0 = M5' method) * </pre> * * <pre> * -C * Do not try to eliminate colinear attributes. * </pre> * * <pre> * -R &lt;double&gt; * Set ridge parameter (default 1.0e-8). * </pre> * * <pre> * -minimal * Conserve memory, don't keep dataset header and means/stdevs. * Model cannot be printed out if this option is enabled. (default: keep data) * </pre> * * <pre> * -additional-stats * Output additional statistics. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision$ */ public class LinearRegression extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler { /** Attribute selection method: M5 method */ public static final int SELECTION_M5 = 0; /** Attribute selection method: No attribute selection */ public static final int SELECTION_NONE = 1; /** Attribute selection method: Greedy method */ public static final int SELECTION_GREEDY = 2; /** Attribute selection methods */ public static final Tag[] TAGS_SELECTION = { new Tag(SELECTION_NONE, "No attribute selection"), new Tag(SELECTION_M5, "M5 method"), new Tag(SELECTION_GREEDY, "Greedy method") }; /** for serialization */ static final long serialVersionUID = -3364580862046573747L; /** Array for storing coefficients of linear regression. */ protected double[] m_Coefficients; /** Which attributes are relevant? */ protected boolean[] m_SelectedAttributes; /** Variable for storing transformed training data. */ protected Instances m_TransformedData; /** The filter for removing missing values. */ protected ReplaceMissingValues m_MissingFilter; /** * The filter storing the transformation from nominal to binary attributes. */ protected NominalToBinary m_TransformFilter; /** The standard deviations of the class attribute */ protected double m_ClassStdDev; /** The mean of the class attribute */ protected double m_ClassMean; /** The index of the class attribute */ protected int m_ClassIndex; /** The attributes means */ protected double[] m_Means; /** The attribute standard deviations */ protected double[] m_StdDevs; /** * Whether to output additional statistics such as std. dev. of coefficients and t-stats */ protected boolean m_outputAdditionalStats; /** The current attribute selection method */ protected int m_AttributeSelection; /** Try to eliminate correlated attributes? */ protected boolean m_EliminateColinearAttributes = true; /** Turn off all checks and conversions? */ protected boolean m_checksTurnedOff = false; /** The ridge parameter */ protected double m_Ridge = 1.0e-8; /** Conserve memory? */ protected boolean m_Minimal = false; /** Model already built? */ protected boolean m_ModelBuilt = false; /** True if the model is a zero R one */ protected boolean m_isZeroR; /** The degrees of freedom of the regression model */ private int m_df; /** The R-squared value of the regression model */ private double m_RSquared; /** The adjusted R-squared value of the regression model */ private double m_RSquaredAdj; /** The F-statistic of the regression model */ private double m_FStat; /** Array for storing the standard error of each coefficient */ private double[] m_StdErrorOfCoef; /** Array for storing the t-statistic of each coefficient */ private double[] m_TStats; public LinearRegression() { this.m_numDecimalPlaces = 4; } /** * Generates a linear regression function predictor. * * @param argv * the options */ public static void main(final String argv[]) { runClassifier(new LinearRegression(), argv); } /** * Returns a string describing this classifier * * @return a description of the classifier suitable for displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for using linear regression for prediction. Uses the Akaike " + "criterion for model selection, and is able to deal with weighted " + "instances."; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds a regression model for the given data. * * @param data * the training data to be used for generating the linear regression function * @throws Exception * if the classifier could not be built successfully */ @Override public void buildClassifier(Instances data) throws Exception { this.m_ModelBuilt = false; this.m_isZeroR = false; if (data.numInstances() == 1) { this.m_Coefficients = new double[1]; this.m_Coefficients[0] = data.instance(0).classValue(); this.m_SelectedAttributes = new boolean[data.numAttributes()]; this.m_isZeroR = true; return; } if (!this.m_checksTurnedOff) { // can classifier handle the data? this.getCapabilities().testWithFail(data); if (this.m_outputAdditionalStats) { // check that the instances weights are all 1 // because the RegressionAnalysis class does // not handle weights boolean ok = true; for (int i = 0; i < data.numInstances(); i++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (data.instance(i).weight() != 1) { ok = false; break; } } if (!ok) { throw new Exception("Can only compute additional statistics on unweighted data"); } } // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); this.m_TransformFilter = new NominalToBinary(); this.m_TransformFilter.setInputFormat(data); data = Filter.useFilter(data, this.m_TransformFilter); this.m_MissingFilter = new ReplaceMissingValues(); this.m_MissingFilter.setInputFormat(data); data = Filter.useFilter(data, this.m_MissingFilter); data.deleteWithMissingClass(); } else { this.m_TransformFilter = null; this.m_MissingFilter = null; } this.m_ClassIndex = data.classIndex(); this.m_TransformedData = data; // Turn all attributes on for a start this.m_Coefficients = null; // Compute means and standard deviations this.m_SelectedAttributes = new boolean[data.numAttributes()]; this.m_Means = new double[data.numAttributes()]; this.m_StdDevs = new double[data.numAttributes()]; for (int j = 0; j < data.numAttributes(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if (j != this.m_ClassIndex) { this.m_SelectedAttributes[j] = true; // Turn attributes on for a start this.m_Means[j] = data.meanOrMode(j); this.m_StdDevs[j] = Math.sqrt(data.variance(j)); if (this.m_StdDevs[j] == 0) { this.m_SelectedAttributes[j] = false; } } } this.m_ClassStdDev = Math.sqrt(data.variance(this.m_TransformedData.classIndex())); this.m_ClassMean = data.meanOrMode(this.m_TransformedData.classIndex()); // Perform the regression this.findBestModel(); if (this.m_outputAdditionalStats) { // find number of coefficients, degrees of freedom int k = 1; for (int i = 0; i < data.numAttributes(); i++) { if (i != data.classIndex()) { if (this.m_SelectedAttributes[i]) { k++; } } } this.m_df = this.m_TransformedData.numInstances() - k; // calculate R^2 and F-stat double se = this.calculateSE(this.m_SelectedAttributes, this.m_Coefficients); this.m_RSquared = RegressionAnalysis.calculateRSquared(this.m_TransformedData, se); this.m_RSquaredAdj = RegressionAnalysis.calculateAdjRSquared(this.m_RSquared, this.m_TransformedData.numInstances(), k); this.m_FStat = RegressionAnalysis.calculateFStat(this.m_RSquared, this.m_TransformedData.numInstances(), k); // calculate std error of coefficients and t-stats this.m_StdErrorOfCoef = RegressionAnalysis.calculateStdErrorOfCoef(this.m_TransformedData, this.m_SelectedAttributes, se, this.m_TransformedData.numInstances(), k); this.m_TStats = RegressionAnalysis.calculateTStats(this.m_Coefficients, this.m_StdErrorOfCoef, k); } // Save memory if (this.m_Minimal) { this.m_TransformedData = null; this.m_Means = null; this.m_StdDevs = null; } else { this.m_TransformedData = new Instances(data, 0); } this.m_ModelBuilt = true; } /** * Classifies the given instance using the linear regression function. * * @param instance * the test instance * @return the classification * @throws Exception * if classification can't be done successfully */ @Override public double classifyInstance(final Instance instance) throws Exception { // Transform the input instance Instance transformedInstance = instance; if (!this.m_checksTurnedOff && !this.m_isZeroR) { this.m_TransformFilter.input(transformedInstance); this.m_TransformFilter.batchFinished(); transformedInstance = this.m_TransformFilter.output(); this.m_MissingFilter.input(transformedInstance); this.m_MissingFilter.batchFinished(); transformedInstance = this.m_MissingFilter.output(); } // Calculate the dependent variable from the regression model return this.regressionPrediction(transformedInstance, this.m_SelectedAttributes, this.m_Coefficients); } /** * Outputs the linear regression model as a string. * * @return the model as string */ @Override public String toString() { if (!this.m_ModelBuilt) { return "Linear Regression: No model built yet."; } if (this.m_Minimal) { return "Linear Regression: Model built."; } try { StringBuilder text = new StringBuilder(); int column = 0; boolean first = true; text.append("\nLinear Regression Model\n\n"); text.append(this.m_TransformedData.classAttribute().name() + " =\n\n"); for (int i = 0; i < this.m_TransformedData.numAttributes(); i++) { if ((i != this.m_ClassIndex) && (this.m_SelectedAttributes[i])) { if (!first) { text.append(" +\n"); } else { first = false; } text.append(Utils.doubleToString(this.m_Coefficients[column], 12, this.m_numDecimalPlaces) + " * "); text.append(this.m_TransformedData.attribute(i).name()); column++; } } text.append(" +\n" + Utils.doubleToString(this.m_Coefficients[column], 12, this.m_numDecimalPlaces)); if (this.m_outputAdditionalStats) { int maxAttLength = 0; for (int i = 0; i < this.m_TransformedData.numAttributes(); i++) { if ((i != this.m_ClassIndex) && (this.m_SelectedAttributes[i])) { if (this.m_TransformedData.attribute(i).name().length() > maxAttLength) { maxAttLength = this.m_TransformedData.attribute(i).name().length(); } } } maxAttLength += 3; if (maxAttLength < "Variable".length() + 3) { maxAttLength = "Variable".length() + 3; } text.append("\n\nRegression Analysis:\n\n" + Utils.padRight("Variable", maxAttLength) + " Coefficient SE of Coef t-Stat"); column = 0; for (int i = 0; i < this.m_TransformedData.numAttributes(); i++) { if ((i != this.m_ClassIndex) && (this.m_SelectedAttributes[i])) { text.append("\n" + Utils.padRight(this.m_TransformedData.attribute(i).name(), maxAttLength)); text.append(Utils.doubleToString(this.m_Coefficients[column], 12, this.m_numDecimalPlaces)); text.append(" " + Utils.doubleToString(this.m_StdErrorOfCoef[column], 12, this.m_numDecimalPlaces)); text.append(" " + Utils.doubleToString(this.m_TStats[column], 12, this.m_numDecimalPlaces)); column++; } } text.append(Utils.padRight("\nconst", maxAttLength + 1) + Utils.doubleToString(this.m_Coefficients[column], 12, this.m_numDecimalPlaces)); text.append(" " + Utils.doubleToString(this.m_StdErrorOfCoef[column], 12, this.m_numDecimalPlaces)); text.append(" " + Utils.doubleToString(this.m_TStats[column], 12, this.m_numDecimalPlaces)); text.append("\n\nDegrees of freedom = " + Integer.toString(this.m_df)); text.append("\nR^2 value = " + Utils.doubleToString(this.m_RSquared, this.m_numDecimalPlaces)); text.append("\nAdjusted R^2 = " + Utils.doubleToString(this.m_RSquaredAdj, 5)); text.append("\nF-statistic = " + Utils.doubleToString(this.m_FStat, this.m_numDecimalPlaces)); } return text.toString(); } catch (Exception e) { return "Can't print Linear Regression!"; } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { java.util.Vector<Option> newVector = new java.util.Vector<>(); newVector.addElement(new Option("\tSet the attribute selection method" + " to use. 1 = None, 2 = Greedy.\n" + "\t(default 0 = M5' method)", "S", 1, "-S <number of selection method>")); newVector.addElement(new Option("\tDo not try to eliminate colinear" + " attributes.\n", "C", 0, "-C")); newVector.addElement(new Option("\tSet ridge parameter (default 1.0e-8).\n", "R", 1, "-R <double>")); newVector.addElement(new Option("\tConserve memory, don't keep dataset header and means/stdevs.\n" + "\tModel cannot be printed out if this option is enabled." + "\t(default: keep data)", "minimal", 0, "-minimal")); newVector.addElement(new Option("\tOutput additional statistics.", "additional-stats", 0, "-additional-stats")); newVector.addAll(Collections.list(super.listOptions())); return newVector.elements(); } /** * Returns the coefficients for this linear model. * * @return the coefficients for this linear model */ public double[] coefficients() { double[] coefficients = new double[this.m_SelectedAttributes.length + 1]; int counter = 0; for (int i = 0; i < this.m_SelectedAttributes.length; i++) { if ((this.m_SelectedAttributes[i]) && ((i != this.m_ClassIndex))) { coefficients[i] = this.m_Coefficients[counter++]; } } coefficients[this.m_SelectedAttributes.length] = this.m_Coefficients[counter]; return coefficients; } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { java.util.Vector<String> result = new java.util.Vector<>(); result.add("-S"); result.add("" + this.getAttributeSelectionMethod().getSelectedTag().getID()); if (!this.getEliminateColinearAttributes()) { result.add("-C"); } result.add("-R"); result.add("" + this.getRidge()); if (this.getMinimal()) { result.add("-minimal"); } if (this.getOutputAdditionalStats()) { result.add("-additional-stats"); } Collections.addAll(result, super.getOptions()); return result.toArray(new String[result.size()]); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -S &lt;number of selection method&gt; * Set the attribute selection method to use. 1 = None, 2 = Greedy. * (default 0 = M5' method) * </pre> * * <pre> * -C * Do not try to eliminate colinear attributes. * </pre> * * <pre> * -R &lt;double&gt; * Set ridge parameter (default 1.0e-8). * </pre> * * <pre> * -minimal * Conserve memory, don't keep dataset header and means/stdevs. * Model cannot be printed out if this option is enabled. (default: keep data) * </pre> * * <pre> * -additional-stats * Output additional statistics. * </pre> * * <pre> * -output-debug-info * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -do-not-check-capabilities * If set, classifier capabilities are not checked before classifier is built * (use with caution). * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ @Override public void setOptions(final String[] options) throws Exception { String selectionString = Utils.getOption('S', options); if (selectionString.length() != 0) { this.setAttributeSelectionMethod(new SelectedTag(Integer.parseInt(selectionString), TAGS_SELECTION)); } else { this.setAttributeSelectionMethod(new SelectedTag(SELECTION_M5, TAGS_SELECTION)); } String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) { this.setRidge(new Double(ridgeString).doubleValue()); } else { this.setRidge(1.0e-8); } this.setEliminateColinearAttributes(!Utils.getFlag('C', options)); this.setMinimal(Utils.getFlag("minimal", options)); this.setOutputAdditionalStats(Utils.getFlag("additional-stats", options)); super.setOptions(options); Utils.checkForRemainingOptions(options); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String ridgeTipText() { return "The value of the Ridge parameter."; } /** * Get the value of Ridge. * * @return Value of Ridge. */ public double getRidge() { return this.m_Ridge; } /** * Set the value of Ridge. * * @param newRidge * Value to assign to Ridge. */ public void setRidge(final double newRidge) { this.m_Ridge = newRidge; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String eliminateColinearAttributesTipText() { return "Eliminate colinear attributes."; } /** * Get the value of EliminateColinearAttributes. * * @return Value of EliminateColinearAttributes. */ public boolean getEliminateColinearAttributes() { return this.m_EliminateColinearAttributes; } /** * Set the value of EliminateColinearAttributes. * * @param newEliminateColinearAttributes * Value to assign to EliminateColinearAttributes. */ public void setEliminateColinearAttributes(final boolean newEliminateColinearAttributes) { this.m_EliminateColinearAttributes = newEliminateColinearAttributes; } /** * Get the number of coefficients used in the model * * @return the number of coefficients */ public int numParameters() { return this.m_Coefficients.length - 1; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String attributeSelectionMethodTipText() { return "Set the method used to select attributes for use in the linear " + "regression. Available methods are: no attribute selection, attribute " + "selection using M5's method (step through the attributes removing the one " + "with the smallest standardised coefficient until no improvement is observed " + "in the estimate of the error given by the Akaike " + "information criterion), and a greedy selection using the Akaike information " + "metric."; } /** * Gets the method used to select attributes for use in the linear regression. * * @return the method to use. */ public SelectedTag getAttributeSelectionMethod() { return new SelectedTag(this.m_AttributeSelection, TAGS_SELECTION); } /** * Sets the method used to select attributes for use in the linear regression. * * @param method * the attribute selection method to use. */ public void setAttributeSelectionMethod(final SelectedTag method) { if (method.getTags() == TAGS_SELECTION) { this.m_AttributeSelection = method.getSelectedTag().getID(); } } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String minimalTipText() { return "If enabled, dataset header, means and stdevs get discarded to conserve memory; also, the model cannot be printed out."; } /** * Returns whether to be more memory conservative or being able to output the model as string. * * @return true if memory conservation is preferred over outputting model description */ public boolean getMinimal() { return this.m_Minimal; } /** * Sets whether to be more memory conservative or being able to output the model as string. * * @param value * if true memory will be conserved */ public void setMinimal(final boolean value) { this.m_Minimal = value; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the explorer/experimenter gui */ public String outputAdditionalStatsTipText() { return "Output additional statistics (such as " + "std deviation of coefficients and t-statistics)"; } /** * Get whether to output additional statistics (such as std. deviation of coefficients and * t-statistics * * @return true if additional stats are to be output */ public boolean getOutputAdditionalStats() { return this.m_outputAdditionalStats; } /** * Set whether to output additional statistics (such as std. deviation of coefficients and * t-statistics * * @param additional * true if additional stats are to be output */ public void setOutputAdditionalStats(final boolean additional) { this.m_outputAdditionalStats = additional; } /** * Turns off checks for missing values, etc. Use with caution. Also turns off scaling. */ public void turnChecksOff() { this.m_checksTurnedOff = true; } /** * Turns on checks for missing values, etc. Also turns on scaling. */ public void turnChecksOn() { this.m_checksTurnedOff = false; } /** * Removes the attribute with the highest standardised coefficient greater than 1.5 from the * selected attributes. * * @param selectedAttributes * an array of flags indicating which attributes are included in the regression model * @param coefficients * an array of coefficients for the regression model * @return true if an attribute was removed */ protected boolean deselectColinearAttributes(final boolean[] selectedAttributes, final double[] coefficients) { double maxSC = 1.5; int maxAttr = -1, coeff = 0; for (int i = 0; i < selectedAttributes.length; i++) { if (selectedAttributes[i]) { double SC = Math.abs(coefficients[coeff] * this.m_StdDevs[i] / this.m_ClassStdDev); if (SC > maxSC) { maxSC = SC; maxAttr = i; } coeff++; } } if (maxAttr >= 0) { selectedAttributes[maxAttr] = false; if (this.m_Debug) { System.out.println("Deselected colinear attribute:" + (maxAttr + 1) + " with standardised coefficient: " + maxSC); } return true; } return false; } /** * Performs a greedy search for the best regression model using Akaike's criterion. * * @throws Exception * if regression can't be done */ protected void findBestModel() throws Exception { // For the weighted case we still use numInstances in // the calculation of the Akaike criterion. int numInstances = this.m_TransformedData.numInstances(); if (this.m_Debug) { System.out.println((new Instances(this.m_TransformedData, 0)).toString()); } // Perform a regression for the full model, and remove colinear attributes do { this.m_Coefficients = this.doRegression(this.m_SelectedAttributes); } while (this.m_EliminateColinearAttributes && this.deselectColinearAttributes(this.m_SelectedAttributes, this.m_Coefficients)); // Figure out current number of attributes + 1. (We treat this model // as the full model for the Akaike-based methods.) int numAttributes = 1; for (boolean m_SelectedAttribute : this.m_SelectedAttributes) { if (m_SelectedAttribute) { numAttributes++; } } double fullMSE = this.calculateSE(this.m_SelectedAttributes, this.m_Coefficients); double akaike = (numInstances - numAttributes) + 2 * numAttributes; if (this.m_Debug) { System.out.println("Initial Akaike value: " + akaike); } boolean improved; int currentNumAttributes = numAttributes; switch (this.m_AttributeSelection) { case SELECTION_GREEDY: // Greedy attribute removal do { boolean[] currentSelected = this.m_SelectedAttributes.clone(); improved = false; currentNumAttributes--; for (int i = 0; i < this.m_SelectedAttributes.length; i++) { if (currentSelected[i]) { // Calculate the akaike rating without this attribute currentSelected[i] = false; double[] currentCoeffs = this.doRegression(currentSelected); double currentMSE = this.calculateSE(currentSelected, currentCoeffs); double currentAkaike = currentMSE / fullMSE * (numInstances - numAttributes) + 2 * currentNumAttributes; if (this.m_Debug) { System.out.println("(akaike: " + currentAkaike); } // If it is better than the current best if (currentAkaike < akaike) { if (this.m_Debug) { System.err.println("Removing attribute " + (i + 1) + " improved Akaike: " + currentAkaike); } improved = true; akaike = currentAkaike; System.arraycopy(currentSelected, 0, this.m_SelectedAttributes, 0, this.m_SelectedAttributes.length); this.m_Coefficients = currentCoeffs; } currentSelected[i] = true; } } } while (improved); break; case SELECTION_M5: // Step through the attributes removing the one with the smallest // standardised coefficient until no improvement in Akaike do { improved = false; currentNumAttributes--; // Find attribute with smallest SC double minSC = 0; int minAttr = -1, coeff = 0; for (int i = 0; i < this.m_SelectedAttributes.length; i++) { if (this.m_SelectedAttributes[i]) { double SC = Math.abs(this.m_Coefficients[coeff] * this.m_StdDevs[i] / this.m_ClassStdDev); if ((coeff == 0) || (SC < minSC)) { minSC = SC; minAttr = i; } coeff++; } } // See whether removing it improves the Akaike score if (minAttr >= 0) { this.m_SelectedAttributes[minAttr] = false; double[] currentCoeffs = this.doRegression(this.m_SelectedAttributes); double currentMSE = this.calculateSE(this.m_SelectedAttributes, currentCoeffs); double currentAkaike = currentMSE / fullMSE * (numInstances - numAttributes) + 2 * currentNumAttributes; if (this.m_Debug) { System.out.println("(akaike: " + currentAkaike); } // If it is better than the current best if (currentAkaike < akaike) { if (this.m_Debug) { System.err.println("Removing attribute " + (minAttr + 1) + " improved Akaike: " + currentAkaike); } improved = true; akaike = currentAkaike; this.m_Coefficients = currentCoeffs; } else { this.m_SelectedAttributes[minAttr] = true; } } } while (improved); break; case SELECTION_NONE: break; } } /** * Calculate the squared error of a regression model on the training data * * @param selectedAttributes * an array of flags indicating which attributes are included in the regression model * @param coefficients * an array of coefficients for the regression model * @return the mean squared error on the training data * @throws Exception * if there is a missing class value in the training data */ protected double calculateSE(final boolean[] selectedAttributes, final double[] coefficients) throws Exception { double mse = 0; for (int i = 0; i < this.m_TransformedData.numInstances(); i++) { double prediction = this.regressionPrediction(this.m_TransformedData.instance(i), selectedAttributes, coefficients); double error = prediction - this.m_TransformedData.instance(i).classValue(); mse += error * error; } return mse; } /** * Calculate the dependent value for a given instance for a given regression model. * * @param transformedInstance * the input instance * @param selectedAttributes * an array of flags indicating which attributes are included in the regression model * @param coefficients * an array of coefficients for the regression model * @return the regression value for the instance. * @throws Exception * if the class attribute of the input instance is not assigned */ protected double regressionPrediction(final Instance transformedInstance, final boolean[] selectedAttributes, final double[] coefficients) throws Exception { double result = 0; int column = 0; for (int j = 0; j < transformedInstance.numAttributes(); j++) { // XXX kill weka execution if (Thread.interrupted()) { throw new InterruptedException("Thread got interrupted, thus, kill WEKA."); } if ((this.m_ClassIndex != j) && (selectedAttributes[j])) { result += coefficients[column] * transformedInstance.value(j); column++; } } result += coefficients[column]; return result; } /** * Calculate a linear regression using the selected attributes * * @param selectedAttributes * an array of booleans where each element is true if the corresponding attribute should be * included in the regression. * @return an array of coefficients for the linear regression model. * @throws Exception * if an error occurred during the regression. */ protected double[] doRegression(final boolean[] selectedAttributes) throws Exception { if (this.m_Debug) { System.out.print("doRegression("); for (boolean selectedAttribute : selectedAttributes) { System.out.print(" " + selectedAttribute); } System.out.println(" )"); } int numAttributes = 0; for (boolean selectedAttribute : selectedAttributes) { if (selectedAttribute) { numAttributes++; } } // Check whether there are still attributes left Matrix independentTransposed = null; Vector dependent = null; if (numAttributes > 0) { independentTransposed = new DenseMatrix(numAttributes, this.m_TransformedData.numInstances()); dependent = new DenseVector(this.m_TransformedData.numInstances()); for (int i = 0; i < this.m_TransformedData.numInstances(); i++) { Instance inst = this.m_TransformedData.instance(i); double sqrt_weight = Math.sqrt(inst.weight()); int row = 0; for (int j = 0; j < this.m_TransformedData.numAttributes(); j++) { if (j == this.m_ClassIndex) { dependent.set(i, inst.classValue() * sqrt_weight); } else { if (selectedAttributes[j]) { double value = inst.value(j) - this.m_Means[j]; // We only need to do this if we want to // scale the input if (!this.m_checksTurnedOff) { value /= this.m_StdDevs[j]; } independentTransposed.set(row, i, value * sqrt_weight); row++; } } } } } // Compute coefficients (note that we have to treat the // intercept separately so that it doesn't get affected // by the ridge constant.) double[] coefficients = new double[numAttributes + 1]; if (numAttributes > 0) { Vector aTy = independentTransposed.mult(dependent, new DenseVector(numAttributes)); Matrix aTa = new UpperSymmDenseMatrix(numAttributes).rank1(independentTransposed); independentTransposed = null; dependent = null; boolean success = true; Vector coeffsWithoutIntercept = null; double ridge = this.getRidge(); do { for (int i = 0; i < numAttributes; i++) { aTa.add(i, i, ridge); } try { coeffsWithoutIntercept = aTa.solve(aTy, new DenseVector(numAttributes)); success = true; } catch (Exception ex) { for (int i = 0; i < numAttributes; i++) { aTa.add(i, i, -ridge); } ridge *= 10; success = false; } } while (!success); System.arraycopy(((DenseVector) coeffsWithoutIntercept).getData(), 0, coefficients, 0, numAttributes); } coefficients[numAttributes] = this.m_ClassMean; // Convert coefficients into original scale int column = 0; for (int i = 0; i < this.m_TransformedData.numAttributes(); i++) { if ((i != this.m_TransformedData.classIndex()) && (selectedAttributes[i])) { // We only need to do this if we have scaled the // input. if (!this.m_checksTurnedOff) { coefficients[column] /= this.m_StdDevs[i]; } // We have centred the input coefficients[coefficients.length - 1] -= coefficients[column] * this.m_Means[i]; column++; } } return coefficients; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision$"); } }