index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/IncrementalEvaluationStatisticsHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* IncrementalEvaluationStatisticsHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.classifiers.multilabel.MultiLabelClassifier;
import weka.core.Instances;
import java.util.List;
/**
* For handlers that support incremental writes.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface IncrementalEvaluationStatisticsHandler
extends EvaluationStatisticsHandler {
/**
* Returns whether the handler supports incremental write.
*
* @return true if supported
*/
public boolean supportsIncrementalUpdate();
/**
* Checks whether the specified combination of classifier and dataset is required for evaluation
* or already present from previous evaluation.
*
* @param classifier the classifier to check
* @param dataset the dataset to check
* @return true if it needs evaluating
*/
public boolean requires(MultiLabelClassifier classifier, Instances dataset);
/**
* Retrieves the statis for the specified combination of classifier and dataset.
*
* @param classifier the classifier to check
* @param dataset the dataset to check
* @return the stats, null if not available
*/
public List<EvaluationStatistics> retrieve(MultiLabelClassifier classifier, Instances dataset);
/**
* Adds the given statistics.
*
* @param stats the statistics to store
* @return null if successfully stored, otherwise error message
*/
public String append(List<EvaluationStatistics> stats);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/KeyValuePairs.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* KeyValuePairs.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.FileUtils;
import meka.core.OptionUtils;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Utils;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.util.*;
/**
* Simple plain text format. One statistics object per line, as tab-separated key-value pairs.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class KeyValuePairs
extends AbstractFileBasedEvaluationStatisticsHandler
implements OptionalIncrementalEvaluationStatisticsHandler {
private static final long serialVersionUID = -1090631157162943295L;
/** the key for the classifier. */
public final static String KEY_CLASSIFIER = "Classifier";
/** the key for the relation. */
public final static String KEY_RELATION = "Relation";
/** the statistics so far. */
protected List<EvaluationStatistics> m_Statistics = new ArrayList<>();
/** whether the incremental mode is off. */
protected boolean m_IncrementalDisabled;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Simple plain text format that places one statistcs result per line, as tab-separated "
+ "key-value pairs (separated by '=').";
}
/**
* Returns the format description.
*
* @return the file format
*/
public String getFormatDescription() {
return "Key-value pairs";
}
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
public String[] getFormatExtensions() {
return new String[]{".txt"};
}
/**
* Sets whether incremental model is turned off.
*
* @param value true to turn off incremental mode
*/
public void setIncrementalDisabled(boolean value) {
m_IncrementalDisabled = value;
}
/**
* Returns whether incremental mode is turned off.
*
* @return true if incremental mode is pff
*/
public boolean isIncrementalDisabled() {
return m_IncrementalDisabled;
}
/**
* Describes this property.
*
* @return the description
*/
public String incrementalDisabledTipText() {
return "If enabled, incremental mode is turned off.";
}
/**
* Returns whether the handler is threadsafe.
*
* @return true if threadsafe
*/
@Override
public boolean isThreadSafe() {
return m_IncrementalDisabled;
}
/**
* Returns whether the handler supports incremental write.
*
* @return true if supported
*/
@Override
public boolean supportsIncrementalUpdate() {
return !m_IncrementalDisabled;
}
/**
* Returns an enumeration of all the available options.
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, incrementalDisabledTipText(), "no", "incremental-disabled");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception never
*/
@Override
public void setOptions(String[] options) throws Exception {
setIncrementalDisabled(Utils.getFlag("incremental-disabled", options));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "incremental-disabled", isIncrementalDisabled());
return OptionUtils.toArray(result);
}
/**
* Initializes the handler.
*
* @return null if successfully initialized, otherwise error message
*/
@Override
public String initialize() {
String result;
result = super.initialize();
if (result == null) {
m_Statistics.clear();
if (m_File.exists()) {
log("File '" + m_File + "' exists, loading...");
m_Statistics.addAll(read());
}
}
return result;
}
/**
* Reads the statistics.
*
* @return the statistics that were read
*/
@Override
public List<EvaluationStatistics> read() {
List<EvaluationStatistics> result;
String line;
String[] parts;
String[] entries;
HashMap<String,String> raw;
EvaluationStatistics stat;
BufferedReader breader;
FileReader freader;
result = new ArrayList<>();
freader = null;
breader = null;
try {
freader = new FileReader(m_File);
breader = new BufferedReader(freader);
while ((line = breader.readLine()) != null) {
if (line.trim().isEmpty())
continue;
entries = line.split("\t");
raw = new HashMap<>();
for (String entry: entries) {
if (entry.trim().isEmpty())
continue;
parts = entry.split("=");
if (parts.length == 2)
raw.put(parts[0], parts[1]);
else
log("Failed to parse: " + entry);
}
if (raw.containsKey(KEY_CLASSIFIER) && raw.containsKey(KEY_RELATION)) {
stat = new EvaluationStatistics(
OptionUtils.fromCommandLine(MultiLabelClassifier.class, raw.get(KEY_CLASSIFIER)),
raw.get(KEY_RELATION),
null);
for (String key: raw.keySet()) {
if (key.equals(KEY_CLASSIFIER) || key.equals(KEY_RELATION))
continue;
try {
stat.put(key, Double.parseDouble(raw.get(key)));
}
catch (Exception e) {
log("Failed to parse double value of '" + key + "': " + raw.get(key));
}
}
result.add(stat);
}
}
}
catch (Exception e) {
result = null;
handleException("Failed to read serialized statistics from: " + m_File, e);
}
finally {
FileUtils.closeQuietly(breader);
FileUtils.closeQuietly(freader);
}
return result;
}
/**
* Checks whether the specified combination of classifier and dataset is required for evaluation
* or already present from previous evaluation.
*
* @param classifier the classifier to check
* @param dataset the dataset to check
* @return true if it needs evaluating
*/
public boolean requires(MultiLabelClassifier classifier, Instances dataset) {
boolean result;
String cls;
String rel;
result = true;
cls = Utils.toCommandLine(classifier);
rel = dataset.relationName();
for (EvaluationStatistics stat: m_Statistics) {
if (stat.getCommandLine().equals(cls) && stat.getRelation().equals(rel)) {
result = false;
break;
}
}
return result;
}
/**
* Retrieves the statis for the specified combination of classifier and dataset.
*
* @param classifier the classifier to check
* @param dataset the dataset to check
* @return the stats, null if not available
*/
public List<EvaluationStatistics> retrieve(MultiLabelClassifier classifier, Instances dataset) {
List<EvaluationStatistics> result;
String cls;
String rel;
result = new ArrayList<>();
cls = Utils.toCommandLine(classifier);
rel = dataset.relationName();
for (EvaluationStatistics stat: m_Statistics) {
if (stat.getCommandLine().equals(cls) && stat.getRelation().equals(rel))
result.add(stat);
}
return result;
}
/**
* Adds the given statistics.
*
* @param stats the statistics to store
* @return null if successfully stored, otherwise error message
*/
@Override
public String append(List<EvaluationStatistics> stats) {
BufferedWriter bwriter;
FileWriter fwriter;
log("Writing " + stats.size() + " statistics to: " + m_File);
bwriter = null;
fwriter = null;
try {
fwriter = new FileWriter(m_File, true);
bwriter = new BufferedWriter(fwriter);
for (EvaluationStatistics stat: stats) {
bwriter.write(KEY_CLASSIFIER + "=" + stat.getCommandLine());
bwriter.write("\t");
bwriter.write(KEY_RELATION + "=" + stat.getRelation());
bwriter.write("\t");
for (String key: stat.keySet()) {
bwriter.write("\t");
bwriter.write(key + "=" + stat.get(key));
}
bwriter.newLine();
}
return null;
}
catch (Exception e) {
return handleException("Failed to write statistics to: " + m_File, e);
}
finally {
FileUtils.closeQuietly(bwriter);
FileUtils.closeQuietly(fwriter);
}
}
/**
* Stores the given statistics.
*
* @param stats the statistics to store
* @return null if successfully stored, otherwise error message
*/
@Override
public String write(List<EvaluationStatistics> stats) {
return append(stats);
}
/**
* Gets called after the experiment finished.
*
* @return null if successfully finished, otherwise error message
*/
public String finish() {
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/OptionalIncrementalEvaluationStatisticsHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* OptionalIncrementalEvaluationStatisticsHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
/**
* For handlers that support incremental writes but use it as optional feature that can be turned off.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface OptionalIncrementalEvaluationStatisticsHandler
extends IncrementalEvaluationStatisticsHandler {
/**
* Sets whether incremental model is turned on.
*
* @param value true to turn off incremental mode
*/
public void setIncrementalDisabled(boolean value);
/**
* Returns whether incremental mode is turned on.
*
* @return true if incremental mode is off
*/
public boolean isIncrementalDisabled();
/**
* Describes this property.
*
* @return the description
*/
public String incrementalDisabledTipText();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/Serialized.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Serialized.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import weka.core.SerializationHelper;
import java.util.List;
/**
* Uses Java serialization for readin/writing the statistics.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class Serialized
extends AbstractFileBasedEvaluationStatisticsHandler {
private static final long serialVersionUID = -1090631157162943295L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Uses Java serialization for readin/writing the statistics.";
}
/**
* Returns the format description.
*
* @return the file format
*/
public String getFormatDescription() {
return "Java serialized statistics";
}
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
public String[] getFormatExtensions() {
return new String[]{".ser"};
}
/**
* Reads the statistics.
*
* @return the statistics that were read
*/
@Override
public List<EvaluationStatistics> read() {
List<EvaluationStatistics> result;
try {
result = (List<EvaluationStatistics>) SerializationHelper.read(m_File.getAbsolutePath());
}
catch (Exception e) {
result = null;
handleException("Failed to read serialized statistics from: " + m_File, e);
}
return result;
}
/**
* Stores the given statistics.
*
* @param stats the statistics to store
* @return null if successfully stored, otherwise error message
*/
@Override
public String write(List<EvaluationStatistics> stats) {
log("Writing " + stats.size() + " statistics to: " + m_File);
try {
SerializationHelper.write(m_File.getAbsolutePath(), stats);
return null;
}
catch (Exception e) {
return handleException("Failed to write statistics to: " + m_File, e);
}
}
/**
* Gets called after the experiment finished.
*
* @return null if successfully finished, otherwise error message
*/
public String finish() {
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluators/AbstractEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* TrainTestSplit.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluators;
import meka.core.OptionUtils;
import meka.events.LogObject;
import weka.core.Option;
import java.util.Enumeration;
import java.util.Vector;
/**
* Ancestor for evaluators.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractEvaluator
extends LogObject
implements Evaluator {
private static final long serialVersionUID = 6318297857792961890L;
/** whether the evaluation got stopped. */
protected boolean m_Stopped;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public abstract String globalInfo();
/**
* Initializes the evaluator.
*
* @return null if successfully initialized, otherwise error message
*/
public String initialize() {
m_Stopped = false;
return null;
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
return new String[0];
}
/**
* Stops the evaluation, if possible.
*/
public void stop() {
m_Stopped = true;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluators/AbstractMetaEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractMetaEvaluator.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluators;
import meka.core.OptionUtils;
import meka.events.LogListener;
import weka.core.Option;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Ancestor for evaluators that wrap a base evaluator.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractMetaEvaluator
extends AbstractEvaluator {
private static final long serialVersionUID = -6851297570375542238L;
/** the base evaluator. */
protected Evaluator m_Evaluator = getDefaultEvaluator();
/**
* Initializes the evaluator.
*
* @return null if successfully initialized, otherwise error message
*/
@Override
public String initialize() {
String result;
result = super.initialize();
if (result == null)
result = m_Evaluator.initialize();
return result;
}
/**
* Returns the default evaluator to use.
*
* @return the default
*/
protected abstract Evaluator getDefaultEvaluator();
/**
* Sets the evaluator to use.
*
* @param value the evaluator
*/
public void setEvaluator(Evaluator value) {
m_Evaluator = value;
}
/**
* Returns the evaluator in use.
*
* @return the evaluator
*/
public Evaluator getEvaluator() {
return m_Evaluator;
}
/**
* Describes this property.
*
* @return the description
*/
public String evaluatorTipText() {
return "The base evaluator to use.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, evaluatorTipText(), getDefaultEvaluator().getClass().getName(), "base");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setEvaluator((Evaluator) OptionUtils.parse(options, "base", getDefaultEvaluator()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "base", getEvaluator());
return OptionUtils.toArray(result);
}
/**
* Stops the evaluation, if possible.
*/
@Override
public void stop() {
m_Evaluator.stop();
super.stop();
}
/**
* Adds the log listener to use.
*
* @param l the listener
*/
public void addLogListener(LogListener l) {
super.addLogListener(l);
m_Evaluator.addLogListener(l);
}
/**
* Remove the log listener to use.
*
* @param l the listener
*/
public void removeLogListener(LogListener l) {
super.removeLogListener(l);
m_Evaluator.removeLogListener(l);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluators/CrossValidation.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CrossValidation.java
* Copyright (C) 2015-2017 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluators;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.OptionUtils;
import meka.core.Result;
import meka.core.ThreadLimiter;
import meka.core.ThreadUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Randomizable;
import weka.core.Utils;
import java.util.*;
import java.util.concurrent.*;
/**
* Evaluates the classifier using cross-validation. Order can be preserved.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class CrossValidation
extends AbstractEvaluator
implements Randomizable, ThreadLimiter {
private static final long serialVersionUID = 6318297857792961890L;
/** the key for the fold. */
public final static String KEY_FOLD = "Fold";
/** the number of folds. */
protected int m_NumFolds = getDefaultNumFolds();
/** whether to preserve the order. */
protected boolean m_PreserveOrder = false;
/** the seed value. */
protected int m_Seed = getDefaultSeed();
/** the number of threads to use for parallel execution. */
protected int m_NumThreads = getDefaultNumThreads();
/** the actual number of threads to use. */
protected int m_ActualNumThreads;
/** the executor service to use for parallel execution. */
protected transient ExecutorService m_Executor;
/** the threshold option. */
protected String m_Threshold = getDefaultThreshold();
/** the verbosity option. */
protected String m_Verbosity = getDefaultVerbosity();
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Evaluates the classifier using cross-validation. Order can be preserved.";
}
/**
* Gets the number of folds.
*
* @return the defaut
*/
protected int getDefaultNumFolds() {
return 10;
}
/**
* Set the number of folds.
*
* @param value the folds (>= 2)
*/
public void setNumFolds(int value) {
if (value >= 2)
m_NumFolds = value;
else
System.err.println("Number of folds must >= 2, provided: " + value);
}
/**
* Gets the number of folds
*
* @return the folds (>= 2)
*/
public int getNumFolds() {
return m_NumFolds;
}
/**
* Describes this property.
*
* @return the description
*/
public String numFoldsTipText() {
return "The number of folds to use.";
}
/**
* Sets whether to preserve the order instead of randomizing the data.
*
* @param value true if to preserve the order
*/
public void setPreserveOrder(boolean value) {
m_PreserveOrder = value;
}
/**
* Returns whether to preserve the order instead of randomizing the data.
*
* @return true if to preserve the order
*/
public boolean getPreserveOrder() {
return m_PreserveOrder;
}
/**
* Describes this property.
*
* @return the description
*/
public String preserveOrderTipText() {
return "If enabled, no randomization is occurring and the order in the data is preserved.";
}
/**
* Gets the default seed for the random number generations
*
* @return the default
*/
protected int getDefaultSeed() {
return 0;
}
/**
* Set the seed for random number generation.
*
* @param value the seed
*/
@Override
public void setSeed(int value) {
m_Seed = value;
}
/**
* Gets the seed for the random number generations
*
* @return the seed for the random number generation
*/
@Override
public int getSeed() {
return m_Seed;
}
/**
* Describes this property.
*
* @return the description
*/
public String seedTipText() {
return "The seed to use for randomization.";
}
/**
* Returns the default number of threads to use.
*
* @return the number of threads: -1 = # of CPUs/cores; 0/1 = sequential execution
*/
protected int getDefaultNumThreads() {
return ThreadUtils.ALL;
}
/**
* Sets the number of threads to use.
*
* @param value the number of threads: -1 = # of CPUs/cores; 0/1 = sequential execution
*/
public void setNumThreads(int value) {
if (value >= -1) {
m_NumThreads = value;
}
else {
log("Number of threads must be >= -1, provided: " + value);
}
}
/**
* Returns the number of threads to use.
*
* @return the number of threads: -1 = # of CPUs/cores; 0/1 = sequential execution
*/
public int getNumThreads() {
return m_NumThreads;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for
* displaying in the GUI or for listing the options.
*/
public String numThreadsTipText() {
return "The number of threads to use ; -1 = number of CPUs/cores; 0 or 1 = sequential execution.";
}
/**
* Gets the default threshold option.
*
* @return the defaut
*/
protected String getDefaultThreshold() {
return "PCut1";
}
/**
* Set the threshold option.
*
* @param value the option
*/
public void setThreshold(String value) {
m_Threshold = value;
}
/**
* Gets the threshold option.
*
* @return the option
*/
public String getThreshold() {
return m_Threshold;
}
/**
* Describes this property.
*
* @return the description
*/
public String thresholdTipText() {
return "The threshold option.";
}
/**
* Gets the default threshold option.
*
* @return the defaut
*/
protected String getDefaultVerbosity() {
return "3";
}
/**
* Set the verbosity option.
*
* @param value the option
*/
public void setVerbosity(String value) {
m_Verbosity = value;
}
/**
* Gets the verbosity option.
*
* @return the option
*/
public String getVerbosity() {
return m_Verbosity;
}
/**
* Describes this property.
*
* @return the description
*/
public String verbosityTipText() {
return "The verbosity option.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, numFoldsTipText(), "" + getDefaultNumFolds(), 'F');
OptionUtils.addFlag(result, preserveOrderTipText(), 'O');
OptionUtils.addOption(result, seedTipText(), "" + getDefaultSeed(), 'S');
OptionUtils.addOption(result, thresholdTipText(), "" + getDefaultThreshold(), 'T');
OptionUtils.addOption(result, verbosityTipText(), "" + getDefaultVerbosity(), 'V');
OptionUtils.addOption(result, numThreadsTipText(), "" + getDefaultNumThreads(), "num-threads");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setNumFolds(OptionUtils.parse(options, 'F', getDefaultNumFolds()));
setPreserveOrder(Utils.getFlag('O', options));
setSeed(OptionUtils.parse(options, 'S', getDefaultSeed()));
setThreshold(OptionUtils.parse(options, 'T', getDefaultThreshold()));
setVerbosity(OptionUtils.parse(options, 'V', getDefaultVerbosity()));
setNumThreads(OptionUtils.parse(options, "num-threads", getDefaultNumThreads()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, 'F', getNumFolds());
OptionUtils.add(result, 'O', getPreserveOrder());
OptionUtils.add(result, 'S', getSeed());
OptionUtils.add(result, 'T', getThreshold());
OptionUtils.add(result, 'V', getVerbosity());
OptionUtils.add(result, "num-threads", getNumThreads());
return OptionUtils.toArray(result);
}
/**
* Returns the evaluation statistics generated for the dataset (sequential execution).
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
protected List<EvaluationStatistics> evaluateSequential(MultiLabelClassifier classifier, Instances dataset) {
List<EvaluationStatistics> result;
EvaluationStatistics stats;
Instances train;
Instances test;
Result res;
int i;
Random rand;
MultiLabelClassifier current;
Instances data;
result = new ArrayList<>();
rand = new Random(m_Seed);
data = new Instances(dataset);
if (!m_PreserveOrder)
data.randomize(rand);
for (i = 1; i <= m_NumFolds; i++) {
log("Fold: " + i);
if (m_PreserveOrder)
train = data.trainCV(m_NumFolds, i - 1);
else
train = data.trainCV(m_NumFolds, i - 1, rand);
test = data.testCV(m_NumFolds, i - 1);
try {
current = (MultiLabelClassifier) OptionUtils.shallowCopy(classifier);
res = Evaluation.evaluateModel(current, train, test, m_Threshold, m_Verbosity);
stats = new EvaluationStatistics(classifier, data, res);
stats.put(KEY_FOLD, i);
result.add(stats);
}
catch (Exception e) {
handleException(
"Failed to evaluate dataset '" + dataset.relationName() + "' with classifier: " + Utils.toCommandLine(classifier), e);
break;
}
if (m_Stopped)
break;
}
if (m_Stopped)
result.clear();
return result;
}
/**
* Returns the evaluation statistics generated for the dataset (parallel execution).
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
protected List<EvaluationStatistics> evaluateParallel(final MultiLabelClassifier classifier, final Instances dataset) {
List<EvaluationStatistics> result;
ArrayList<EvaluatorJob> jobs;
EvaluatorJob job;
int i;
Random rand;
Instances data;
result = new ArrayList<>();
debug("pre: create jobs");
jobs = new ArrayList<>();
rand = new Random(m_Seed);
data = new Instances(dataset);
if (!m_PreserveOrder)
data.randomize(rand);
for (i = 1; i <= m_NumFolds; i++) {
final int index = i;
final Instances train;
final Instances test;
final MultiLabelClassifier current;
if (m_PreserveOrder)
train = data.trainCV(m_NumFolds, index - 1);
else
train = data.trainCV(m_NumFolds, index - 1, rand);
test = data.testCV(m_NumFolds, index - 1);
current = (MultiLabelClassifier) OptionUtils.shallowCopy(classifier);
job = new EvaluatorJob() {
protected List<EvaluationStatistics> doCall() throws Exception {
List<EvaluationStatistics> result = new ArrayList<>();
log("Executing fold #" + index + "...");
try {
Result res = Evaluation.evaluateModel(current, train, test, m_Threshold, m_Verbosity);
EvaluationStatistics stats = new EvaluationStatistics(classifier, dataset, res);
stats.put(KEY_FOLD, index);
result.add(stats);
}
catch (Exception e) {
handleException(
"Failed to evaluate dataset '" + dataset.relationName() + "' with classifier: " + Utils.toCommandLine(classifier), e);
}
log("...finished fold #" + index);
return result;
}
};
jobs.add(job);
}
debug("post: create jobs");
// execute jobs
m_Executor = Executors.newFixedThreadPool(m_ActualNumThreads);
debug("pre: submit");
try {
for (i = 0; i < jobs.size(); i++)
m_Executor.submit(jobs.get(i));
}
catch (RejectedExecutionException e) {
// ignored
}
catch (Exception e) {
handleException("Failed to start up jobs", e);
}
debug("post: submit");
debug("pre: shutdown");
m_Executor.shutdown();
debug("post: shutdown");
// wait for threads to finish
debug("pre: wait");
while (!m_Executor.isTerminated()) {
try {
m_Executor.awaitTermination(100, TimeUnit.MILLISECONDS);
}
catch (InterruptedException e) {
// ignored
}
catch (Exception e) {
handleException("Failed to await termination", e);
}
}
debug("post: wait");
// collect results
debug("pre: collect");
for (i = 0; i < jobs.size(); i++)
result.addAll(jobs.get(i).getResult());
debug("post: collect");
return result;
}
/**
* Returns the evaluation statistics generated for the dataset.
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
@Override
public List<EvaluationStatistics> evaluate(MultiLabelClassifier classifier, Instances dataset) {
List<EvaluationStatistics> result;
m_ActualNumThreads = ThreadUtils.getActualNumThreads(m_NumThreads, m_NumFolds);
log("Number of threads (" + ThreadUtils.SEQUENTIAL + " = sequential): " + m_ActualNumThreads);
if (m_ActualNumThreads == ThreadUtils.SEQUENTIAL)
result = evaluateSequential(classifier, dataset);
else
result = evaluateParallel(classifier, dataset);
if (m_Stopped)
result.clear();
return result;
}
/**
* Stops the evaluation, if possible.
*/
@Override
public void stop() {
if (m_Executor != null) {
debug("pre: shutdownNow");
m_Executor.shutdownNow();
debug("post: shutdownNow");
}
super.stop();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluators/Evaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Evaluator.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluators;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.events.LogSupporter;
import weka.core.Instances;
import weka.core.OptionHandler;
import java.io.Serializable;
import java.util.List;
/**
* Interface for classes that evaluate on a dataset.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface Evaluator
extends OptionHandler, Serializable, LogSupporter {
/**
* Initializes the evaluator.
*
* @return null if successfully initialized, otherwise error message
*/
public String initialize();
/**
* Returns the evaluation statistics generated for the dataset.
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
public List<EvaluationStatistics> evaluate(MultiLabelClassifier classifier, Instances dataset);
/**
* Stops the evaluation, if possible.
*/
public void stop();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluators/EvaluatorJob.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* EvaluatorJob.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluators;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
/**
* Job to be used in parallel execution.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class EvaluatorJob
implements Callable<List<EvaluationStatistics>> {
/** the results. */
protected List<EvaluationStatistics> m_Result = new ArrayList<>();
/**
* Performs the actual evaluation.
*
* @return the generated results
* @throws Exception if evaluation fails
*/
protected abstract List<EvaluationStatistics> doCall() throws Exception;
/**
* Performs the evaluation and stores the results.
*
* @return the generated results
* @throws Exception if evaluation fails
*/
@Override
public List<EvaluationStatistics> call() throws Exception {
m_Result = doCall();
return m_Result;
}
/**
* Returns the results.
*
* @return the results
*/
public List<EvaluationStatistics> getResult() {
return m_Result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluators/PercentageSplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* PercentageSplit.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluators;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.OptionUtils;
import meka.core.Result;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Randomizable;
import weka.core.Utils;
import java.util.*;
/**
* Evaluates the classifier on a percentage split. Order can be preserved.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class PercentageSplit
extends AbstractEvaluator
implements Randomizable {
private static final long serialVersionUID = 6318297857792961890L;
/** percentage to use for training. */
protected double m_TrainPercentage = getDefaultTrainPercentage();
/** whether to preserve the order. */
protected boolean m_PreserveOrder = false;
/** the seed value. */
protected int m_Seed = getDefaultSeed();
/** the threshold option. */
protected String m_Threshold = getDefaultThreshold();
/** the verbosity option. */
protected String m_Verbosity = getDefaultVerbosity();
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Evaluates the classifier on a percentage split. Order can be preserved.";
}
/**
* Gets the default percentage to use for training.
*
* @return the defaut
*/
protected double getDefaultTrainPercentage() {
return 67.0;
}
/**
* Set the percentage to use for training.
*
* @param value the percentage (0-100)
*/
public void setTrainPercentage(double value) {
if ((value > 0) && (value < 100))
m_TrainPercentage = value;
else
System.err.println("Train percentage must satisfy 0 < x < 100, provided: " + value);
}
/**
* Gets the percentage to use for training.
*
* @return the percentage (0-100)
*/
public double getTrainPercentage() {
return m_TrainPercentage;
}
/**
* Describes this property.
*
* @return the description
*/
public String trainPercentageTipText() {
return "The percentage of the dataset to use for training.";
}
/**
* Sets whether to preserve the order instead of randomizing the data.
*
* @param value true if to preserve the order
*/
public void setPreserveOrder(boolean value) {
m_PreserveOrder = value;
}
/**
* Returns whether to preserve the order instead of randomizing the data.
*
* @return true if to preserve the order
*/
public boolean getPreserveOrder() {
return m_PreserveOrder;
}
/**
* Describes this property.
*
* @return the description
*/
public String preserveOrderTipText() {
return "If enabled, no randomization is occurring and the order in the data is preserved.";
}
/**
* Gets the default seed for the random number generations
*
* @return the default
*/
protected int getDefaultSeed() {
return 0;
}
/**
* Set the seed for random number generation.
*
* @param value the seed
*/
@Override
public void setSeed(int value) {
m_Seed = value;
}
/**
* Gets the seed for the random number generations
*
* @return the seed for the random number generation
*/
@Override
public int getSeed() {
return m_Seed;
}
/**
* Describes this property.
*
* @return the description
*/
public String seedTipText() {
return "The seed to use for randomization.";
}
/**
* Gets the default threshold option.
*
* @return the defaut
*/
protected String getDefaultThreshold() {
return "PCut1";
}
/**
* Set the threshold option.
*
* @param value the option
*/
public void setThreshold(String value) {
m_Threshold = value;
}
/**
* Gets the threshold option.
*
* @return the option
*/
public String getThreshold() {
return m_Threshold;
}
/**
* Describes this property.
*
* @return the description
*/
public String thresholdTipText() {
return "The threshold option.";
}
/**
* Gets the default threshold option.
*
* @return the defaut
*/
protected String getDefaultVerbosity() {
return "3";
}
/**
* Set the verbosity option.
*
* @param value the option
*/
public void setVerbosity(String value) {
m_Verbosity = value;
}
/**
* Gets the verbosity option.
*
* @return the option
*/
public String getVerbosity() {
return m_Verbosity;
}
/**
* Describes this property.
*
* @return the description
*/
public String verbosityTipText() {
return "The verbosity option.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, trainPercentageTipText(), "" + getDefaultTrainPercentage(), 'P');
OptionUtils.addFlag(result, preserveOrderTipText(), 'O');
OptionUtils.addOption(result, seedTipText(), "" + getDefaultSeed(), 'S');
OptionUtils.addOption(result, thresholdTipText(), "" + getDefaultThreshold(), 'T');
OptionUtils.addOption(result, verbosityTipText(), "" + getDefaultVerbosity(), 'V');
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setTrainPercentage(OptionUtils.parse(options, 'P', getDefaultTrainPercentage()));
setPreserveOrder(Utils.getFlag('O', options));
setSeed(OptionUtils.parse(options, 'S', getDefaultSeed()));
setThreshold(OptionUtils.parse(options, 'T', getDefaultThreshold()));
setVerbosity(OptionUtils.parse(options, 'V', getDefaultVerbosity()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, 'P', getTrainPercentage());
OptionUtils.add(result, 'O', getPreserveOrder());
OptionUtils.add(result, 'S', getSeed());
OptionUtils.add(result, 'T', getThreshold());
OptionUtils.add(result, 'V', getVerbosity());
return OptionUtils.toArray(result);
}
/**
* Returns the evaluation statistics generated for the dataset.
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
@Override
public List<EvaluationStatistics> evaluate(MultiLabelClassifier classifier, Instances dataset) {
List<EvaluationStatistics> result;
int trainSize;
Instances train;
Instances test;
Result res;
result = new ArrayList<>();
if (!m_PreserveOrder) {
dataset = new Instances(dataset);
dataset.randomize(new Random(m_Seed));
}
trainSize = (int) (dataset.numInstances() * m_TrainPercentage / 100.0);
train = new Instances(dataset, 0, trainSize);
test = new Instances(dataset, trainSize, dataset.numInstances() - trainSize);
try {
res = Evaluation.evaluateModel(classifier, train, test, m_Threshold, m_Verbosity);
result.add(new EvaluationStatistics(classifier, dataset, res));
}
catch (Exception e) {
handleException(
"Failed to evaluate dataset '" + dataset.relationName() + "' with classifier: " + Utils.toCommandLine(classifier), e);
}
if (m_Stopped)
result.clear();
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluators/RepeatedRuns.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RepeatedRuns.java
* Copyright (C) 2015-2017 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluators;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.OptionUtils;
import meka.core.ThreadLimiter;
import meka.core.ThreadUtils;
import meka.events.LogListener;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Randomizable;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
/**
* Repeatedly executes the base evaluator.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class RepeatedRuns
extends AbstractMetaEvaluator
implements ThreadLimiter {
private static final long serialVersionUID = -1230107553603089463L;
/** the key for the run number. */
public final static String KEY_RUN = "Run";
/** the lower number of runs (included). */
protected int m_LowerRuns = getDefaultLowerRuns();
/** the upper number of runs (included). */
protected int m_UpperRuns = getDefaultUpperRuns();
/** the number of threads to use for parallel execution. */
protected int m_NumThreads = getDefaultNumThreads();
/** the actual number of threads to use. */
protected int m_ActualNumThreads;
/** the executor service to use for parallel execution. */
protected transient ExecutorService m_Executor;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Performs repeated runs of the base evaluator. If the base evaluator is randomizable, "
+ "the run number is used as seed. The base evaluator gets initialized before each "
+ "run.";
}
/**
* Returns the default evaluator to use.
*
* @return the default
*/
@Override
protected Evaluator getDefaultEvaluator() {
return new CrossValidation();
}
/**
* Returns the default lower number of runs to perform.
*
* @return the default
*/
protected int getDefaultLowerRuns() {
return 1;
}
/**
* Sets the lower number of runs to perform (included).
*
* @param value the number of runs
*/
public void setLowerRuns(int value) {
m_LowerRuns = value;
}
/**
* Returns the lower number of runs to perform (included).
*
* @return the number of runs
*/
public int getLowerRuns() {
return m_LowerRuns;
}
/**
* Describes this property.
*
* @return the description
*/
public String lowerRunsTipText() {
return "The lower number of runs to perform (included).";
}
/**
* Returns the default upper number of runs to perform.
*
* @return the default
*/
protected int getDefaultUpperRuns() {
return 10;
}
/**
* Sets the upper number of runs to perform (included).
*
* @param value the number of runs
*/
public void setUpperRuns(int value) {
m_UpperRuns = value;
}
/**
* Returns the upper number of runs to perform (included).
*
* @return the number of runs
*/
public int getUpperRuns() {
return m_UpperRuns;
}
/**
* Describes this property.
*
* @return the description
*/
public String upperRunsTipText() {
return "The upper number of runs to perform (included).";
}
/**
* Returns the default number of threads to use.
*
* @return the number of threads: -1 = # of CPUs/cores; 0/1 = sequential execution
*/
protected int getDefaultNumThreads() {
return ThreadUtils.SEQUENTIAL;
}
/**
* Sets the number of threads to use.
*
* @param value the number of threads: -1 = # of CPUs/cores; 0/1 = sequential execution
*/
public void setNumThreads(int value) {
if (value >= -1) {
m_NumThreads = value;
}
else {
log("Number of threads must be >= -1, provided: " + value);
}
}
/**
* Returns the number of threads to use.
*
* @return the number of threads: -1 = # of CPUs/cores; 0/1 = sequential execution
*/
public int getNumThreads() {
return m_NumThreads;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for
* displaying in the GUI or for listing the options.
*/
public String numThreadsTipText() {
return "The number of threads to use ; -1 = number of CPUs/cores; 0 or 1 = sequential execution.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, lowerRunsTipText(), "" + getDefaultLowerRuns(), "lower");
OptionUtils.addOption(result, upperRunsTipText(), "" + getDefaultUpperRuns(), "upper");
OptionUtils.addOption(result, numThreadsTipText(), "" + getDefaultNumThreads(), "num-threads");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setLowerRuns(OptionUtils.parse(options, "lower", getDefaultLowerRuns()));
setUpperRuns(OptionUtils.parse(options, "upper", getDefaultUpperRuns()));
setNumThreads(OptionUtils.parse(options, "num-threads", getDefaultNumThreads()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "lower", getLowerRuns());
OptionUtils.add(result, "upper", getUpperRuns());
OptionUtils.add(result, "num-threads", getNumThreads());
return OptionUtils.toArray(result);
}
/**
* Executes the runs in sequential order.
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
protected List<EvaluationStatistics> evaluateSequential(MultiLabelClassifier classifier, Instances dataset) {
List<EvaluationStatistics> result;
List<EvaluationStatistics> stats;
int i;
result = new ArrayList<>();
for (i = m_LowerRuns; i <= m_UpperRuns; i++) {
log("Run: " + i);
if (m_Evaluator instanceof Randomizable)
((Randomizable) m_Evaluator).setSeed(i);
m_Evaluator.initialize();
stats = m_Evaluator.evaluate(classifier, dataset);
if (stats != null) {
for (EvaluationStatistics stat: stats) {
stat.put(KEY_RUN, i);
result.add(stat);
}
}
if (m_Stopped)
break;
}
return result;
}
/**
* Executes the runs in sequential order.
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
protected List<EvaluationStatistics> evaluateParallel(final MultiLabelClassifier classifier, final Instances dataset) {
List<EvaluationStatistics> result;
ArrayList<EvaluatorJob> jobs;
EvaluatorJob job;
int i;
result = new ArrayList<>();
debug("pre: create jobs");
jobs = new ArrayList<>();
for (i = m_LowerRuns; i <= m_UpperRuns; i++) {
final int index = i;
job = new EvaluatorJob() {
protected List<EvaluationStatistics> doCall() throws Exception {
log("Executing run #" + index + "...");
Evaluator evaluator = (Evaluator) OptionUtils.shallowCopy(m_Evaluator);
for (LogListener l: m_LogListeners)
evaluator.addLogListener(l);
if (evaluator instanceof Randomizable)
((Randomizable) evaluator).setSeed(index);
evaluator.initialize();
List<EvaluationStatistics> stats = m_Evaluator.evaluate(classifier, dataset);
for (LogListener l: m_LogListeners)
evaluator.removeLogListener(l);
log("...finished run #" + index + ((stats == null) ? "" : " with error"));
return stats;
}
};
jobs.add(job);
}
debug("post: create jobs");
// execute jobs
m_Executor = Executors.newFixedThreadPool(m_ActualNumThreads);
debug("pre: submit");
try {
for (i = 0; i < jobs.size(); i++)
m_Executor.submit(jobs.get(i));
}
catch (RejectedExecutionException e) {
// ignored
}
catch (Exception e) {
handleException("Failed to start up jobs", e);
}
debug("post: submit");
debug("pre: shutdown");
m_Executor.shutdown();
debug("post: shutdown");
// wait for threads to finish
debug("pre: wait");
while (!m_Executor.isTerminated()) {
try {
m_Executor.awaitTermination(100, TimeUnit.MILLISECONDS);
}
catch (InterruptedException e) {
// ignored
}
catch (Exception e) {
handleException("Failed to await termination", e);
}
}
debug("post: wait");
// collect results
debug("pre: collect");
for (i = 0; i < jobs.size(); i++)
result.addAll(jobs.get(i).getResult());
debug("post: collect");
return result;
}
/**
* Returns the evaluation statistics generated for the dataset.
*
* @param classifier the classifier to evaluate
* @param dataset the dataset to evaluate on
* @return the statistics
*/
@Override
public List<EvaluationStatistics> evaluate(MultiLabelClassifier classifier, Instances dataset) {
List<EvaluationStatistics> result;
m_ActualNumThreads = ThreadUtils.getActualNumThreads(m_NumThreads, m_UpperRuns - m_LowerRuns + 1);
log("Number of threads (" + ThreadUtils.SEQUENTIAL + " = sequential): " + m_ActualNumThreads);
if (m_ActualNumThreads == ThreadUtils.SEQUENTIAL)
result = evaluateSequential(classifier, dataset);
else
result = evaluateParallel(classifier, dataset);
if (m_Stopped)
result.clear();
return result;
}
/**
* Stops the evaluation, if possible.
*/
@Override
public void stop() {
if (m_Executor != null) {
debug("pre: shutdownNow");
m_Executor.shutdownNow();
debug("post: shutdownNow");
}
super.stop();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/events/ExecutionStageEvent.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ExecutionStageEvent.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.events;
import meka.experiment.Experiment;
import java.util.EventObject;
/**
* Event that gets sent by an experiment when it enters a new stage in the execution.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ExecutionStageEvent
extends EventObject {
private static final long serialVersionUID = 7732581989591408787L;
/**
* The stages of an experiment.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public enum Stage {
INITIALIZING,
RUNNING,
STOPPING,
FINISH
}
/** the stage. */
protected Stage m_Stage;
/**
* Gets called when the experiment enters a new stage.
*
* @param source the experiment that triggered the event
* @param stage the stage
*/
public ExecutionStageEvent(Experiment source, Stage stage) {
super(source);
m_Stage = stage;
}
/**
* Returns the associated experiment.
*
* @return the experiment
*/
public Experiment getExperiment() {
return (Experiment) getSource();
}
/**
* Returns the stage.
*
* @return the stage
*/
public Stage getStage() {
return m_Stage;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/events/ExecutionStageListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ExecutionStageListener.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.events;
/**
* Interface for classes that want to be notified whenever an experiment enters a new stage.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface ExecutionStageListener {
/**
* Gets called when the experiment enters a new stage.
*
* @param e the event
*/
public void experimentStage(ExecutionStageEvent e);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/events/IterationNotificationEvent.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* IterationNotificationEvent.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.events;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.experiment.Experiment;
import weka.core.Instances;
import java.util.EventObject;
/**
* Event that gets sent by an experiment when a new classifier/dataset combination is being evaluated.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class IterationNotificationEvent
extends EventObject {
private static final long serialVersionUID = 7732581989591408787L;
/** the classifier. */
protected MultiLabelClassifier m_Classifier;
/** the dataset. */
protected Instances m_Dataset;
/**
* Gets called when the experiment starts on a new evaluation.
*
* @param source the experiment that triggered the event
* @param classifier the classifier
* @param dataset the dataset
*/
public IterationNotificationEvent(Experiment source, MultiLabelClassifier classifier, Instances dataset) {
super(source);
m_Classifier = classifier;
m_Dataset = dataset;
}
/**
* Returns the associated experiment.
*
* @return the experiment
*/
public Experiment getExperiment() {
return (Experiment) getSource();
}
/**
* Returns the classifier.
*
* @return the classifier
*/
public MultiLabelClassifier getClassifier() {
return m_Classifier;
}
/**
* Returns the dataset.
*
* @return the datasetD
*/
public Instances getDataset() {
return m_Dataset;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/events/IterationNotificationListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* IterationNotificationListener.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.events;
/**
* Interface for classes that listen to iteration events of an experiment.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface IterationNotificationListener {
/**
* Gets called if there is a new iteration (classifier/dataset combination) occurring in the experiment.
*
* @param e the event
*/
public void nextIteration(IterationNotificationEvent e);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/events/StatisticsNotificationEvent.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* StatisticsNotificationEvent.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.events;
import meka.experiment.Experiment;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import java.util.EventObject;
import java.util.List;
/**
* Event that gets sent by an experiment when new statistics become available.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class StatisticsNotificationEvent
extends EventObject {
private static final long serialVersionUID = 7732581989591408787L;
/** the statistics. */
protected List<EvaluationStatistics> m_Statistics;
/**
* Gets called when the experiment makes new statistics available.
*
* @param source the experiment that triggered the event
* @param stats the statistics
*/
public StatisticsNotificationEvent(Experiment source, List<EvaluationStatistics> stats) {
super(source);
m_Statistics = stats;
}
/**
* Returns the associated experiment.
*
* @return the experiment
*/
public Experiment getExperiment() {
return (Experiment) getSource();
}
/**
* Returns the statistics.
*
* @return the statistics
*/
public List<EvaluationStatistics> getStatistics() {
return m_Statistics;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/events/StatisticsNotificationListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* StatisticsNotificationListener.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.events;
/**
* Interface for classes that want to be notified whenever new statistics are available.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface StatisticsNotificationListener {
/**
* Gets called if new statistics have become available.
*
* @param e the event
*/
public void statisticsAvailable(StatisticsNotificationEvent e);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/filehandlers/AbstractExperimentFileHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractExperimentFileHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.filehandlers;
import meka.events.LogObject;
import meka.experiment.Experiment;
import weka.core.Option;
import java.io.File;
import java.util.Enumeration;
import java.util.Vector;
/**
* Ancestor for experiment file handler classes.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractExperimentFileHandler
extends LogObject
implements ExperimentFileHandler {
private static final long serialVersionUID = -5374752688504488703L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public abstract String globalInfo();
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
return new Vector().elements();
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception never
*/
@Override
public void setOptions(String[] options) throws Exception {
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
return new String[0];
}
/**
* Reads an experiment from disk.
*
* @param file the file to load
* @return the experiment, null if failed to load
*/
@Override
public abstract Experiment read(File file);
/**
* Writes and experiment to disk.
*
* @param exp the experiment to save
* @param file the file to save to
* @return null if successful, otherwise error message
*/
@Override
public abstract String write(Experiment exp, File file);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/filehandlers/CommandLineSetup.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* CommandLineSetup.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.filehandlers;
import meka.core.FileUtils;
import meka.core.OptionUtils;
import meka.experiment.Experiment;
import weka.core.Utils;
import java.io.*;
/**
* Stores the setup of the experiment as a commandline.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class CommandLineSetup
extends AbstractExperimentFileHandler {
private static final long serialVersionUID = -5374752688504488703L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Stores the setup of the experiment as a commandline.";
}
/**
* A description of the file format.
*
* @return the description
*/
@Override
public String getFormatDescription() {
return "Experiment setup";
}
/**
* Returns the format extensions.
*
* @return the extensions (incl dot)
*/
@Override
public String[] getFormatExtensions() {
return new String[]{".setup"};
}
/**
* Reads an experiment from disk.
*
* @param file the file to load
* @return the experiment, null if failed to load
*/
@Override
public Experiment read(File file) {
Experiment result;
FileReader freader;
BufferedReader breader;
String line;
result = null;
freader = null;
breader = null;
try {
freader = new FileReader(file);
breader = new BufferedReader(freader);
line = breader.readLine();
result = OptionUtils.fromCommandLine(Experiment.class, line);
}
catch (Exception e) {
result = null;
handleException("Failed to read experiment from: " + file, e);
}
finally {
FileUtils.closeQuietly(breader);
FileUtils.closeQuietly(freader);
}
return result;
}
/**
* Writes and experiment to disk.
*
* @param exp the experiment to save
* @param file the file to save to
* @return null if successful, otherwise error message
*/
@Override
public String write(Experiment exp, File file) {
String result;
FileWriter fwriter;
BufferedWriter bwriter;
result = null;
fwriter = null;
bwriter = null;
try {
fwriter = new FileWriter(file);
bwriter = new BufferedWriter(fwriter);
bwriter.write(Utils.toCommandLine(exp));
bwriter.newLine();
bwriter.flush();
}
catch (Exception e) {
result = handleException("Failed to write experiment to: " + file, e);
}
finally {
FileUtils.closeQuietly(bwriter);
FileUtils.closeQuietly(fwriter);
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/filehandlers/ExperimentFileHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ExperimentFileHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.filehandlers;
import meka.experiment.Experiment;
import meka.events.LogSupporter;
import weka.core.OptionHandler;
import java.io.File;
import java.io.Serializable;
/**
* Interface for classes load/save experiments.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface ExperimentFileHandler
extends OptionHandler, Serializable, LogSupporter {
/**
* A description of the file format.
*
* @return the description
*/
public String getFormatDescription();
/**
* Returns the format extensions.
*
* @return the extensions (incl dot)
*/
public String[] getFormatExtensions();
/**
* Reads an experiment from disk.
*
* @param file the file to load
* @return the experiment, null if failed to load
*/
public Experiment read(File file);
/**
* Writes and experiment to disk.
*
* @param exp the experiment to save
* @param file the file to save to
* @return null if successful, otherwise error message
*/
public String write(Experiment exp, File file);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/filehandlers/SerializedExperiment.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* SerializedExperiment.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.filehandlers;
import meka.experiment.Experiment;
import weka.core.SerializationHelper;
import java.io.File;
/**
* Stores the experiment as a Java serialized object.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class SerializedExperiment
extends AbstractExperimentFileHandler {
private static final long serialVersionUID = -5374752688504488703L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Stores the experiment as a Java serialized object.";
}
/**
* A description of the file format.
*
* @return the description
*/
@Override
public String getFormatDescription() {
return "Serialized experiment";
}
/**
* Returns the format extensions.
*
* @return the extensions (incl dot)
*/
@Override
public String[] getFormatExtensions() {
return new String[]{".ser"};
}
/**
* Reads an experiment from disk.
*
* @param file the file to load
* @return the experiment, null if failed to load
*/
@Override
public Experiment read(File file) {
try {
return (Experiment) SerializationHelper.read(file.getAbsolutePath());
}
catch (Exception e) {
handleException("Failed to read experiment from: " + file, e);
return null;
}
}
/**
* Writes and experiment to disk.
*
* @param exp the experiment to save
* @param file the file to save to
* @return null if successful, otherwise error message
*/
@Override
public String write(Experiment exp, File file) {
String result;
result = null;
try {
SerializationHelper.write(file.getAbsolutePath(), exp);
}
catch (Exception e) {
result = handleException("Failed to write experiment to: " + file, e);
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/AbstractEvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractEvaluationStatisticsExporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.events.LogObject;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import weka.core.Option;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Ancestor for statistics exporters.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractEvaluationStatisticsExporter
extends LogObject
implements EvaluationStatisticsExporter {
private static final long serialVersionUID = 8950819250563958834L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public abstract String globalInfo();
/**
* Returns an enumeration of all the available options.
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
return new Vector().elements();
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception never
*/
@Override
public void setOptions(String[] options) throws Exception {
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
return new String[0];
}
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
protected abstract String doExport(List<EvaluationStatistics> stats);
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
public String export(List<EvaluationStatistics> stats) {
return doExport(stats);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/AbstractFileBasedEvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractFileBasedEvaluationStatisticsExporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.OptionUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import weka.core.Option;
import java.io.File;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Ancestor for file-based statistics exporters.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractFileBasedEvaluationStatisticsExporter
extends AbstractEvaluationStatisticsExporter
implements FileBasedEvaluationStatisticsExporter {
private static final long serialVersionUID = -8039274378207154807L;
/** the file to read from/write to. */
protected File m_File = getDefaultFile();
/**
* Returns the default file.
*
* @return the default
*/
protected File getDefaultFile() {
return new File(".");
}
/**
* Sets the file to read from/write to.
*
* @param value the file
*/
public void setFile(File value) {
m_File = value;
}
/**
* Returns the file to read from/write to.
*
* @return the file
*/
public File getFile() {
return m_File;
}
/**
* Describes this property.
*
* @return the description
*/
public String fileTipText() {
return "The file to read from/write to.";
}
/**
* Returns an enumeration of all the available options.
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, fileTipText(), "" + getDefaultFile(), 'F');
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception never
*/
@Override
public void setOptions(String[] options) throws Exception {
setFile(OptionUtils.parse(options, 'F', getDefaultFile()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, 'F', getFile());
return OptionUtils.toArray(result);
}
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
public String export(List<EvaluationStatistics> stats) {
log("Exporting to: " + m_File);
return super.export(stats);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/AbstractMeasurementEvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractMeasurementEvaluationStatisticsExporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.OptionUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import weka.core.Option;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Ancestor for classes that export a single statistic.
* First column are datasets, first row are classifiers.
* Automatically aggregates the statistics and displays the "mean".
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractMeasurementEvaluationStatisticsExporter
extends AbstractFileBasedEvaluationStatisticsExporter
implements FileBasedMeasurementEvaluationStatisticsExporter {
private static final long serialVersionUID = -2891664931765964612L;
/** the measurement to output. */
protected String m_Measurement = getDefaultMeasurement();
/**
* Returns the default exporters to use.
*
* @return the default
*/
protected String getDefaultMeasurement() {
return "Hamming loss";
}
/**
* Sets the measurement to use.
*
* @param value the measurement
*/
public void setMeasurement(String value) {
m_Measurement = value;
}
/**
* Returns the measurement in use.
*
* @return the measurement
*/
public String getMeasurement() {
return m_Measurement;
}
/**
* Describes this property.
*
* @return the description
*/
public String measurementTipText() {
return "The measurement to output.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, measurementTipText(), getDefaultMeasurement(), 'M');
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setMeasurement(OptionUtils.parse(options, 'M', getDefaultMeasurement()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, 'M', getMeasurement());
return OptionUtils.toArray(result);
}
/**
* Aggregates the stats and returns the "mean".
*
* @param stats the stats to aggregate
* @return the aggregated stats
*/
protected List<EvaluationStatistics> aggregate(List<EvaluationStatistics> stats) {
InMemory inmem;
SimpleAggregate aggregate;
inmem = new InMemory();
aggregate = new SimpleAggregate();
aggregate.setSuffixMean("");
aggregate.setExporter(inmem);
aggregate.export(stats);
return inmem.getStatistics();
}
/**
* Exports the aggregated statistics.
*
* @param stats the aggregated statistics to export
* @return null if successfully exported, otherwise error message
*/
protected abstract String doExportAggregated(List<EvaluationStatistics> stats);
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExport(List<EvaluationStatistics> stats) {
return doExportAggregated(aggregate(stats));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/AbstractMetaEvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractMetaEvaluationStatisticsExporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.OptionUtils;
import meka.events.LogListener;
import weka.core.Option;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Wraps another exporter.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractMetaEvaluationStatisticsExporter
extends AbstractEvaluationStatisticsExporter {
private static final long serialVersionUID = 7974229256817823349L;
/** the base exporter. */
protected EvaluationStatisticsExporter m_Exporter = getDefaultExporter();
/**
* Returns the default exporter to use.
*
* @return the default
*/
protected abstract EvaluationStatisticsExporter getDefaultExporter();
/**
* Sets the exporter to use.
*
* @param value the exporter
*/
public void setExporter(EvaluationStatisticsExporter value) {
m_Exporter = value;
}
/**
* Returns the exporter in use.
*
* @return the exporter
*/
public EvaluationStatisticsExporter getExporter() {
return m_Exporter;
}
/**
* Describes this property.
*
* @return the description
*/
public String exporterTipText() {
return "The base exporter to use.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, exporterTipText(), getDefaultExporter().getClass().getName(), "base");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setExporter((EvaluationStatisticsExporter) OptionUtils.parse(options, "base", getDefaultExporter()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "base", getExporter());
return OptionUtils.toArray(result);
}
/**
* Adds the log listener to use.
*
* @param l the listener
*/
public void addLogListener(LogListener l) {
super.addLogListener(l);
m_Exporter.addLogListener(l);
}
/**
* Remove the log listener to use.
*
* @param l the listener
*/
public void removeLogListener(LogListener l) {
super.removeLogListener(l);
m_Exporter.removeLogListener(l);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/AggregatedEvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AggregatedEvaluationStatisticsExporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import java.util.List;
/**
* Interface for statistics exporter that aggregate their data first.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface AggregatedEvaluationStatisticsExporter
extends EvaluationStatisticsExporter {
/**
* Sets the blank-separated list of keys to use for aggregating.
*
* @param keys the keys (blank-separated)
*/
public void setAggregationKeys(String keys);
/**
* Returns the blank-separated list of keys used for aggregating.
*
* @return the keys (blank-separated)
*/
public String getAggregationKeys();
/**
* Describes this property.
*
* @return the description
*/
public String aggregationKeysTipText();
/**
* Aggregates the statistics and returns these.
*
* @param stats the statistics to aggregate
* @return the aggregated stats
*/
public List<EvaluationStatistics> aggregate(List<EvaluationStatistics> stats);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/EvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* EvaluationStatisticsExport.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.events.LogSupporter;
import weka.core.OptionHandler;
import java.io.Serializable;
import java.util.List;
/**
* Interface for classes that export statistics into other formats.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface EvaluationStatisticsExporter
extends OptionHandler, Serializable, LogSupporter {
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
public String export(List<EvaluationStatistics> stats);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/FileBasedEvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* FileBasedEvaluationStatisticsHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.FileFormatSupporter;
import java.io.File;
/**
* Interface for file-based statistics exporters.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface FileBasedEvaluationStatisticsExporter
extends EvaluationStatisticsExporter, FileFormatSupporter {
/**
* Returns the format description.
*
* @return the file format
*/
public String getFormatDescription();
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
public String[] getFormatExtensions();
/**
* Sets the file to read from/write to.
*
* @param value the file
*/
public void setFile(File value);
/**
* Returns the file to read from/write to.
*
* @return the file
*/
public File getFile();
/**
* Describes this property.
*
* @return the description
*/
public String fileTipText();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/FileBasedMeasurementEvaluationStatisticsExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* FileBasedMeasurementEvaluationStatisticsExporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
/**
* Interface for file-based statistics exporters for a single measurement.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface FileBasedMeasurementEvaluationStatisticsExporter
extends FileBasedEvaluationStatisticsExporter {
/**
* Sets the measurement to use.
*
* @param value the measurement
*/
public void setMeasurement(String value);
/**
* Returns the measurement in use.
*
* @return the measurement
*/
public String getMeasurement();
/**
* Describes this property.
*
* @return the description
*/
public String measurementTipText();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/InMemory.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* InMemory.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import java.util.ArrayList;
import java.util.List;
/**
* Just stores the statistics in mmemory.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class InMemory
extends AbstractEvaluationStatisticsExporter {
private static final long serialVersionUID = 7694488908134978844L;
/** the statistics. */
protected List<EvaluationStatistics> m_Statistics = new ArrayList<>();
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Just stores the statistics in mmemory.";
}
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExport(List<EvaluationStatistics> stats) {
m_Statistics.clear();
m_Statistics.addAll(stats);
return null;
}
/**
* Returns the statistics.
*
* @return the statistics
*/
public List<EvaluationStatistics> getStatistics() {
return m_Statistics;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/LatexMeasurement.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* LatexMeasurement.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.*;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.experiment.evaluationstatistics.EvaluationStatisticsUtils;
import weka.core.Utils;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.util.List;
/**
* Exports a single statistic to a tab-separated file.
* First column are datasets, first row are classifiers.
* Automatically aggregates the statistics and displays the "mean".
*
* @author Jesse Read
* @version $Revision$
*/
public class LatexMeasurement
extends AbstractMeasurementEvaluationStatisticsExporter {
private static final long serialVersionUID = -2891664931765964612L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Exports a single statistic to a LaTex file.\n"
+ "First column are datasets, first row are classifiers.\n"
+ "Automatically aggregates the statistics and displays the 'mean'.";
}
/**
* Returns the format description.
*
* @return the file format
*/
@Override
public String getFormatDescription() {
return "LaTex statistic";
}
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
@Override
public String[] getFormatExtensions() {
return new String[]{".tex"};
}
/**
* Exports the aggregated statistics.
*
* @param stats the aggregated statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExportAggregated(List<EvaluationStatistics> stats) {
String result;
List<String> classifiers;
List<String> relations;
List<Number> measurements;
FileWriter fwriter;
BufferedWriter bwriter;
int i;
result = null;
classifiers = EvaluationStatisticsUtils.commandLines(stats, true);
relations = EvaluationStatisticsUtils.relations(stats, true);
fwriter = null;
bwriter = null;
try {
fwriter = new FileWriter(m_File);
bwriter = new BufferedWriter(fwriter);
// output header
String tabularConfig = "lllllllllllllllllllllllllllllllllllllllllllllllllll".substring(0,classifiers.size()+1);
bwriter.write("% " + m_Measurement);
bwriter.newLine();
bwriter.write("\\begin{tabular}{"+tabularConfig+"}");
bwriter.newLine();
bwriter.write("\\hline");
bwriter.newLine();
bwriter.write(String.format("%12s ", "Datasets"));
for (i = 0; i < classifiers.size(); i++) {
String name = MLUtils.getShortMethodName(classifiers.get(i));
name = LatexUtils.escape(name);
bwriter.write(String.format("& [%d] %-6s", (i + 1), name));
}
bwriter.write("\\\\");
bwriter.newLine();
bwriter.write("\\hline");
bwriter.newLine();
bwriter.flush();
int ranks[][] = EvaluationStatisticsUtils.rankMatrix(stats,m_Measurement);
// output statistics
for (int d = 0; d < relations.size(); d++) {
String relation = relations.get(d);
String name = MLUtils.getRelationName(relation);
name = LatexUtils.escape(name);
bwriter.write(String.format("%12s ", name));
for (i = 0; i < classifiers.size(); i++) {
measurements = EvaluationStatisticsUtils.measurements(stats, classifiers.get(i), relation, m_Measurement);
if (measurements.size() > 0) {
if (measurements.size() > 1)
log("Found " + measurements.size() + " measurements for combination " + classifiers.get(i) + "/" + relation);
String value = String.format("& %5.3f (%d) ", measurements.get(0), ranks[d][i]);
bwriter.write(value);
}
}
bwriter.write(" \\\\");
bwriter.newLine();
bwriter.flush();
}
bwriter.write("\\hline");
bwriter.newLine();
bwriter.write(String.format("%12s ", "Avg.\\ Rank"));
for (i = 0; i < classifiers.size(); i++) {
String value = String.format("& %5.3f ", Utils.mean(A.toDoubleArray(MatrixUtils.getCol(ranks, i))));
bwriter.write(value);
}
bwriter.write(" \\\\");
bwriter.newLine();
bwriter.write("\\hline");
bwriter.newLine();
bwriter.write("\\end{tabular}");
bwriter.newLine();
bwriter.flush();
// output key
bwriter.newLine();
bwriter.write("%Index");
bwriter.write("\t");
bwriter.write("Classifier");
bwriter.newLine();
for (i = 0; i < classifiers.size(); i++) {
bwriter.write("%[" + (i+1) + "]");
bwriter.write("\t");
bwriter.write(classifiers.get(i));
bwriter.newLine();
}
}
catch (Exception e) {
result = handleException("Failed to export statistics to: " + m_File, e);
}
finally {
FileUtils.closeQuietly(bwriter);
FileUtils.closeQuietly(fwriter);
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/MultiExporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractMetaEvaluationStatisticsExporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.OptionUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.events.LogListener;
import weka.core.Option;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Exports the statistics using multiple exporters.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MultiExporter
extends AbstractEvaluationStatisticsExporter {
private static final long serialVersionUID = 7974229256817823349L;
/** the base exporter. */
protected EvaluationStatisticsExporter[] m_Exporters = getDefaultExporters();
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Exports the statistics using multiple exporters.";
}
/**
* Returns the default exporters to use.
*
* @return the default
*/
protected EvaluationStatisticsExporter[] getDefaultExporters() {
return new EvaluationStatisticsExporter[0];
}
/**
* Sets the exporters to use.
*
* @param value the exporters
*/
public void setExporters(EvaluationStatisticsExporter[] value) {
m_Exporters = value;
}
/**
* Returns the exporter in use.
*
* @return the exporter
*/
public EvaluationStatisticsExporter[] getExporters() {
return m_Exporters;
}
/**
* Describes this property.
*
* @return the description
*/
public String exportersTipText() {
return "The base exporters to use.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, exportersTipText(), "none", "exporter");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setExporters(OptionUtils.parse(options, "exporter", EvaluationStatisticsExporter.class));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "exporter", getExporters());
return OptionUtils.toArray(result);
}
/**
* Adds the log listener to use.
*
* @param l the listener
*/
public void addLogListener(LogListener l) {
super.addLogListener(l);
for (EvaluationStatisticsExporter exporter: m_Exporters)
exporter.addLogListener(l);
}
/**
* Remove the log listener to use.
*
* @param l the listener
*/
public void removeLogListener(LogListener l) {
super.removeLogListener(l);
for (EvaluationStatisticsExporter exporter: m_Exporters)
exporter.removeLogListener(l);
}
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExport(List<EvaluationStatistics> stats) {
String result;
int i;
result = null;
for (i = 0; i < m_Exporters.length; i++) {
log("Exporter #" + (i+1) + ": " + m_Exporters[i].getClass().getName());
result = m_Exporters[i].export(stats);
if (result != null) {
result = "Exporter #" + (i+1) + ": " + result;
log(result);
break;
}
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/SimpleAggregate.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* SimpleAggregate.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import gnu.trove.list.array.TDoubleArrayList;
import meka.core.OptionUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.experiment.evaluationstatistics.EvaluationStatisticsComparator;
import meka.experiment.evaluationstatistics.EvaluationStatisticsUtils;
import weka.core.Option;
import weka.core.Utils;
import java.util.*;
/**
* Simple aggregator of statistics.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class SimpleAggregate
extends AbstractMetaEvaluationStatisticsExporter
implements AggregatedEvaluationStatisticsExporter {
private static final long serialVersionUID = 45553920349638331L;
/** the suffix for the count. */
public final static String SUFFIX_COUNT = "-Count";
/** the suffix for the mean. */
public final static String SUFFIX_MEAN = "-Mean";
/** the suffix for the stdev. */
public final static String SUFFIX_STDEV = "-StdDev";
/** the aggregation keys. */
protected String m_AggregationKeys = getDefaultAggregationKeys();
/** the count suffix. */
protected String m_SuffixCount = getDefaultSuffixCount();
/** the mean suffix. */
protected String m_SuffixMean = getDefaultSuffixMean();
/** the stdev suffix. */
protected String m_SuffixStdDev = getDefaultSuffixStdDev();
/** whether to skip the count. */
protected boolean m_SkipCount = false;
/** whether to skip the mean. */
protected boolean m_SkipMean = false;
/** whether to skip the stdev. */
protected boolean m_SkipStdDev = false;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Simple aggregator of statistics.\n"
+ "For each numeric attribute the following attributes get generated:\n"
+ "- " + getDefaultSuffixCount() + ": the number of rows used to calculate this aggregate\n"
+ "- " + getDefaultSuffixMean() + ": the average/mean\n"
+ "- " + getDefaultSuffixStdDev() + ": the standard deviation";
}
/**
* Returns the default exporter to use.
*
* @return the default
*/
@Override
protected EvaluationStatisticsExporter getDefaultExporter() {
return new TabSeparated();
}
/**
* Returns the default keys used for aggregation.
*
* @return the default
*/
protected String getDefaultAggregationKeys() {
StringBuilder result;
result = new StringBuilder();
for (String key: EvaluationStatisticsComparator.DEFAULT_KEYS) {
if (result.length() > 0)
result.append(" ");
result.append(key);
}
return result.toString();
}
/**
* Sets the blank-separated list of keys to use for aggregating.
*
* @param keys the keys (blank-separated)
*/
@Override
public void setAggregationKeys(String keys) {
m_AggregationKeys = keys;
}
/**
* Returns the blank-separated list of keys used for aggregating.
*
* @return the keys (blank-separated)
*/
@Override
public String getAggregationKeys() {
return m_AggregationKeys;
}
/**
* Describes this property.
*
* @return the description
*/
@Override
public String aggregationKeysTipText() {
return "The keys to use for aggregating the statistics (blank-separated).";
}
/**
* Returns the default suffix for the count.
*
* @return the default
*/
protected String getDefaultSuffixCount() {
return SUFFIX_COUNT;
}
/**
* Sets the suffix for the count.
*
* @param value the suffix
*/
public void setSuffixCount(String value) {
m_SuffixCount = value;
}
/**
* Returns the suffix for the count.
*
* @return the suffix
*/
public String getSuffixCount() {
return m_SuffixCount;
}
/**
* Describes this property.
*
* @return the description
*/
public String suffixCountTipText() {
return "The suffix for the 'count' statistic.";
}
/**
* Returns the default suffix for the mean.
*
* @return the default
*/
protected String getDefaultSuffixMean() {
return SUFFIX_MEAN;
}
/**
* Sets the suffix for the mean.
*
* @param value the suffix
*/
public void setSuffixMean(String value) {
m_SuffixMean = value;
}
/**
* Returns the suffix for the mean.
*
* @return the suffix
*/
public String getSuffixMean() {
return m_SuffixMean;
}
/**
* Describes this property.
*
* @return the description
*/
public String suffixMeanTipText() {
return "The suffix for the 'mean' statistic.";
}
/**
* Returns the default suffix for the stddev.
*
* @return the default
*/
protected String getDefaultSuffixStdDev() {
return SUFFIX_STDEV;
}
/**
* Sets the suffix for the stddev.
*
* @param value the suffix
*/
public void setSuffixStdDev(String value) {
m_SuffixStdDev = value;
}
/**
* Returns the suffix for the stddev.
*
* @return the suffix
*/
public String getSuffixStdDev() {
return m_SuffixStdDev;
}
/**
* Describes this property.
*
* @return the description
*/
public String suffixStdDevTipText() {
return "The suffix for the 'stddev' statistic.";
}
/**
* Sets whether to skip the count.
*
* @param value true if to skip
*/
public void setSkipCount(boolean value) {
m_SkipCount = value;
}
/**
* Returns whether to skip the count.
*
* @return true if to skip
*/
public boolean getSkipCount() {
return m_SkipCount;
}
/**
* Describes this property.
*
* @return the description
*/
public String skipCountTipText() {
return "If enabled, the count is skipped, ie not output.";
}
/**
* Sets whether to skip the mean.
*
* @param value true if to skip
*/
public void setSkipMean(boolean value) {
m_SkipMean = value;
}
/**
* Returns whether to skip the mean.
*
* @return true if to skip
*/
public boolean getSkipMean() {
return m_SkipMean;
}
/**
* Describes this property.
*
* @return the description
*/
public String skipMeanTipText() {
return "If enabled, the mean is skipped, ie not output.";
}
/**
* Sets whether to skip the stdDev.
*
* @param value true if to skip
*/
public void setSkipStdDev(boolean value) {
m_SkipStdDev = value;
}
/**
* Returns whether to skip the stdDev.
*
* @return true if to skip
*/
public boolean getSkipStdDev() {
return m_SkipStdDev;
}
/**
* Describes this property.
*
* @return the description
*/
public String skipStdDevTipText() {
return "If enabled, the standard deviation is skipped, ie not output.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, aggregationKeysTipText(), getDefaultAggregationKeys(), "key");
OptionUtils.addOption(result, suffixCountTipText(), getDefaultSuffixCount(), "suffix-count");
OptionUtils.addOption(result, suffixMeanTipText(), getDefaultSuffixMean(), "suffix-mean");
OptionUtils.addOption(result, suffixStdDevTipText(), getDefaultSuffixStdDev(), "suffix-stddev");
OptionUtils.addOption(result, skipCountTipText(), "no", "skip-count");
OptionUtils.addOption(result, skipMeanTipText(), "no", "skip-mean");
OptionUtils.addOption(result, skipStdDevTipText(), "no", "skip-stddev");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setAggregationKeys(OptionUtils.parse(options, "key", getDefaultAggregationKeys()));
setSuffixCount(OptionUtils.parse(options, "suffix-count", getDefaultSuffixCount()));
setSuffixMean(OptionUtils.parse(options, "suffix-mean", getDefaultSuffixMean()));
setSuffixStdDev(OptionUtils.parse(options, "suffix-stddev", getDefaultSuffixStdDev()));
setSkipCount(Utils.getFlag("skip-count", options));
setSkipMean(Utils.getFlag("skip-mean", options));
setSkipStdDev(Utils.getFlag("skip-stddev", options));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "key", getAggregationKeys());
OptionUtils.add(result, "suffix-count", getSuffixCount());
OptionUtils.add(result, "suffix-mean", getSuffixMean());
OptionUtils.add(result, "suffix-stddev", getSuffixStdDev());
OptionUtils.add(result, "skip-count", getSkipCount());
OptionUtils.add(result, "skip-mean", getSkipMean());
OptionUtils.add(result, "skip-stddev", getSkipStdDev());
return OptionUtils.toArray(result);
}
/**
* Calculates the actual aggregates. For each numeric statistic, a mean, stdev and count column get generated.
*
* @param stats the statistics to aggregate
* @return the aggregated values
*/
protected EvaluationStatistics doAggregate(List<EvaluationStatistics> stats) {
EvaluationStatistics result;
List<String> keys;
TDoubleArrayList values;
result = new EvaluationStatistics(stats.get(0).getClassifier(), stats.get(0).getRelation(), null);
// collect all stats
keys = EvaluationStatisticsUtils.keys(stats, false);
// collect values
for (String key: keys) {
values = new TDoubleArrayList();
for (EvaluationStatistics stat: stats) {
if (stat.containsKey(key))
values.add(stat.get(key).doubleValue());
}
if (values.size() > 0) {
if (!m_SkipCount)
result.put(key + m_SuffixCount, values.size());
if (!m_SkipMean)
result.put(key + m_SuffixMean, Utils.mean(values.toArray()));
if (!m_SkipStdDev)
result.put(key + m_SuffixStdDev, Math.sqrt(Utils.variance(values.toArray())));
}
}
return result;
}
/**
* Aggregates the statistics and returns these.
*
* @param stats the statistics to aggregate
* @return the aggregated stats
*/
@Override
public List<EvaluationStatistics> aggregate(List<EvaluationStatistics> stats) {
List<EvaluationStatistics> result;
List<EvaluationStatistics> temp;
EvaluationStatisticsComparator comp;
int i;
try {
stats = new ArrayList<>(stats);
result = new ArrayList<>();
comp = new EvaluationStatisticsComparator(Utils.splitOptions(m_AggregationKeys));
// sort
Collections.sort(stats, comp);
// create groups and aggregate them
i = 0;
temp = new ArrayList<>();
while (i < stats.size()) {
if ((temp.size() == 0) || (comp.compare(temp.get(temp.size() - 1), stats.get(i)) == 0)) {
temp.add(stats.get(i));
i++;
}
else {
result.add(doAggregate(temp));
temp.clear();
}
}
if (temp.size() > 0)
result.add(doAggregate(temp));
}
catch (Exception e) {
result = stats;
handleException("Failed to aggregate!", e);
}
return result;
}
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExport(List<EvaluationStatistics> stats) {
return m_Exporter.export(aggregate(stats));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/TabSeparated.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* TabSeparated.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.ExceptionUtils;
import meka.core.FileUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.experiment.evaluationstatistics.EvaluationStatisticsUtils;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.util.List;
/**
* Exports the statistics to a tab-separated file.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class TabSeparated
extends AbstractFileBasedEvaluationStatisticsExporter {
private static final long serialVersionUID = -2891664931765964612L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Exports the statistics to a tab-separated file.";
}
/**
* Returns the format description.
*
* @return the file format
*/
@Override
public String getFormatDescription() {
return "Tab-separated";
}
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
@Override
public String[] getFormatExtensions() {
return new String[]{".tsv"};
}
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExport(List<EvaluationStatistics> stats) {
String result;
List<String> headers;
FileWriter fwriter;
BufferedWriter bwriter;
int i;
result = null;
headers = EvaluationStatisticsUtils.headers(stats, true, true);
fwriter = null;
bwriter = null;
try {
fwriter = new FileWriter(m_File);
bwriter = new BufferedWriter(fwriter);
// output header
for (i = 0; i < headers.size(); i++) {
if (i > 0)
bwriter.write("\t");
bwriter.write(headers.get(i));
}
bwriter.newLine();
bwriter.flush();
// output statistics
for (EvaluationStatistics stat: stats) {
for (i = 0; i < headers.size(); i++) {
if (i > 0)
bwriter.write("\t");
if (i == 0) {
bwriter.write(stat.getCommandLine());
}
else if (i == 1) {
bwriter.write(stat.getRelation());
}
else {
if (stat.containsKey(headers.get(i)))
bwriter.write("" + stat.get(headers.get(i)));
}
}
bwriter.newLine();
bwriter.flush();
}
}
catch (Exception e) {
result = handleException("Failed to export statistics to: " + m_File, e);
}
finally {
FileUtils.closeQuietly(bwriter);
FileUtils.closeQuietly(fwriter);
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/TabSeparatedMeasurement.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* TabSeparated.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.core.FileUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.experiment.evaluationstatistics.EvaluationStatisticsUtils;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.util.List;
/**
* Exports a single statistic to a tab-separated file.
* First column are datasets, first row are classifiers.
* Automatically aggregates the statistics and displays the "mean".
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class TabSeparatedMeasurement
extends AbstractMeasurementEvaluationStatisticsExporter {
private static final long serialVersionUID = -2891664931765964612L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Exports a single statistic to a tab-separated file.\n"
+ "First column are datasets, first row are classifiers.\n"
+ "Automatically aggregates the statistics and displays the 'mean'.";
}
/**
* Returns the format description.
*
* @return the file format
*/
@Override
public String getFormatDescription() {
return "Tab-separated statistic";
}
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
@Override
public String[] getFormatExtensions() {
return new String[]{".tsv"};
}
/**
* Exports the aggregated statistics.
*
* @param stats the aggregated statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExportAggregated(List<EvaluationStatistics> stats) {
String result;
List<String> classifiers;
List<String> relations;
List<Number> measurements;
FileWriter fwriter;
BufferedWriter bwriter;
int i;
result = null;
classifiers = EvaluationStatisticsUtils.commandLines(stats, true);
relations = EvaluationStatisticsUtils.relations(stats, true);
fwriter = null;
bwriter = null;
try {
fwriter = new FileWriter(m_File);
bwriter = new BufferedWriter(fwriter);
// output header
bwriter.write(m_Measurement);
for (i = 0; i < classifiers.size(); i++) {
bwriter.write("\t");
bwriter.write("[" + (i+1) + "]");
}
bwriter.newLine();
bwriter.flush();
// output statistics
for (String relation: relations) {
bwriter.write(relation);
for (i = 0; i < classifiers.size(); i++) {
bwriter.write("\t");
measurements = EvaluationStatisticsUtils.measurements(stats, classifiers.get(i), relation, m_Measurement);
if (measurements.size() > 0) {
if (measurements.size() > 1)
log("Found " + measurements.size() + " measurements for combination " + classifiers.get(i) + "/" + relation);
bwriter.write("" + measurements.get(0));
}
}
bwriter.newLine();
bwriter.flush();
}
// output key
bwriter.newLine();
bwriter.write("Index");
bwriter.write("\t");
bwriter.write("Classifier");
bwriter.newLine();
for (i = 0; i < classifiers.size(); i++) {
bwriter.write("[" + (i+1) + "]");
bwriter.write("\t");
bwriter.write(classifiers.get(i));
bwriter.newLine();
}
}
catch (Exception e) {
result = handleException("Failed to export statistics to: " + m_File, e);
}
finally {
FileUtils.closeQuietly(bwriter);
FileUtils.closeQuietly(fwriter);
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/statisticsexporters/WekaFilter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* WekaFilter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.statisticsexporters;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.OptionUtils;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.experiment.evaluationstatistics.EvaluationStatisticsUtils;
import weka.core.*;
import weka.filters.AllFilter;
import weka.filters.Filter;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Applies a Weka filter to the data.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class WekaFilter
extends AbstractMetaEvaluationStatisticsExporter {
private static final long serialVersionUID = 3442329448692418564L;
/** the filter to apply to the data before passing it on to the base exporter. */
protected Filter m_Filter = getDefaultFilter();
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return
"Applies the specified Weka filter to the statistics before passing them on to the base exporter.\n"
+ "This allows you to remove attributes, filter instances, etc.";
}
/**
* Returns the default keys used for aggregation.
*
* @return the default
*/
protected Filter getDefaultFilter() {
return new AllFilter();
}
/**
* Returns the default exporter to use.
*
* @return the default
*/
@Override
protected EvaluationStatisticsExporter getDefaultExporter() {
return new TabSeparated();
}
/**
* Sets the filter to use.
*
* @param value the filter
*/
public void setFilter(Filter value) {
m_Filter = value;
}
/**
* Returns the filter to use.
*
* @return the filter
*/
public Filter getFilter() {
return m_Filter;
}
/**
* Describes this property.
*
* @return the description
*/
public String filterTipText() {
return "The Weka filter to apply to the data.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, filterTipText(), getDefaultFilter().getClass().getName(), "filter");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options to parse
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setFilter((Filter) OptionUtils.parse(options, "filter", getDefaultFilter()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the current options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "filter", getFilter());
return OptionUtils.toArray(result);
}
/**
* Turns the statistics into Instances.
*
* @param stats the statistics to convert
* @return the generated data
*/
protected Instances toInstances(List<EvaluationStatistics> stats) {
Instances result;
ArrayList<Attribute> atts;
List<String> headers;
Instance inst;
double[] values;
int i;
// header
headers = EvaluationStatisticsUtils.headers(stats, true, true);
atts = new ArrayList<>();
for (String header: headers) {
if (header.equals(EvaluationStatistics.KEY_CLASSIFIER) || header.equals(EvaluationStatistics.KEY_RELATION))
atts.add(new Attribute(header, (List) null));
else
atts.add(new Attribute(header));
}
result = new Instances("stats", atts, stats.size());
// data
for (EvaluationStatistics stat: stats) {
values = new double[result.numAttributes()];
for (i = 0; i < values.length; i++) {
if (headers.get(i).equals(EvaluationStatistics.KEY_CLASSIFIER))
values[i] = result.attribute(i).addStringValue(stat.getCommandLine());
else if (headers.get(i).equals(EvaluationStatistics.KEY_RELATION))
values[i] = result.attribute(i).addStringValue(stat.getRelation());
else if (stat.containsKey(headers.get(i)))
values[i] = stat.get(headers.get(i)).doubleValue();
else
values[i] = Utils.missingValue();
}
inst = new DenseInstance(1.0, values);
result.add(inst);
}
return result;
}
/**
* Converts the Instances back into statistics.
*
* @param data the data to convert
* @return the generated statistics
*/
protected List<EvaluationStatistics> fromInstances(Instances data) {
List<EvaluationStatistics> result;
EvaluationStatistics stat;
MultiLabelClassifier cls;
String rel;
int i;
int n;
Instance inst;
result = new ArrayList<>();
if (data.attribute(EvaluationStatistics.KEY_CLASSIFIER) == null) {
log("Failed to locate attribute: " + EvaluationStatistics.KEY_CLASSIFIER);
return result;
}
if (data.attribute(EvaluationStatistics.KEY_RELATION) == null) {
log("Failed to locate attribute: " + EvaluationStatistics.KEY_RELATION);
return result;
}
for (i = 0; i < data.numInstances(); i++) {
inst = data.instance(i);
try {
cls = OptionUtils.fromCommandLine(MultiLabelClassifier.class, inst.stringValue(data.attribute(EvaluationStatistics.KEY_CLASSIFIER)));
rel = inst.stringValue(data.attribute(EvaluationStatistics.KEY_RELATION));
stat = new EvaluationStatistics(cls, rel, null);
for (n = 0; n < inst.numAttributes(); n++) {
if (inst.attribute(n).isNumeric() && !inst.isMissing(n)) {
stat.put(inst.attribute(n).name(), inst.value(n));
}
}
result.add(stat);
}
catch (Exception e) {
handleException("Failed to process instance: " + inst, e);
}
}
return result;
}
/**
* Filters the statistics using the specified filter.
*
* @param stats the stats to filter
* @return the filtered stats
*/
protected List<EvaluationStatistics> filter(List<EvaluationStatistics> stats) {
Instances data;
Instances filtered;
Filter filter;
try {
data = toInstances(stats);
filter = Filter.makeCopy(m_Filter);
filter.setInputFormat(data);
filtered = Filter.useFilter(data, filter);
stats = fromInstances(filtered);
}
catch (Exception e) {
handleException("Failed to filter statistics!", e);
}
return stats;
}
/**
* Exports the statistics.
*
* @param stats the statistics to export
* @return null if successfully exported, otherwise error message
*/
@Override
protected String doExport(List<EvaluationStatistics> stats) {
return m_Exporter.export(filter(stats));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/filters
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/filters/multilabel/SuperNodeFilter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.filters.multilabel;
import meka.core.SuperLabelUtils;
import weka.core.*;
import meka.core.MLUtils;
import meka.classifiers.multitarget.NSR;
import weka.filters.*;
import java.util.*;
import java.io.*; // for test routin main()
/**
* SuperNodeFilter.java - Super Class Filter.
*
* Input:<br>
* Data with label attributes, e.g., [0,1,2,3,4]<br>
* A desired partition of indices, e.g., [[1,3],[4],[0,2]], filter <br>
* Output:<br>
* New data with label attributes: [1+3,4,0+2]<br>
* (the values each attribute can take are pruned if necessary)<br>
*
* @author Jesse Read
* @version June 2012
*/
public class SuperNodeFilter extends SimpleBatchFilter {
protected Instance x_template = null;
protected int m_P = 0, m_N = 0;
protected int indices[][] = null;
public void setIndices(int n[][]) {
for(int i = 0; i < n.length; i++) {
Arrays.sort(n[i]); // always sorted!
}
this.indices = n;
}
public void setP(int p) {
this.m_P = p;
}
public int getP() {
return this.m_P;
}
public void setN(int n) {
this.m_N = n;
}
@Override
public Instances determineOutputFormat(Instances D) throws Exception {
//System.out.println("DETERMINE OUTPUT FORMAT = "+D.numInstances());
Instances D_out = new Instances(D,0);
int L = D.classIndex();
for(int i = 0; i < L-indices.length; i++) {
D_out.deleteAttributeAt(0);
}
return D_out;
}
public Instance getTemplate() {
return x_template;
}
@Override
public Instances process(Instances D) throws Exception {
//System.out.println("PROCESS! = "+D.numInstances());
int L = D.classIndex();
D = new Instances(D); // D_
// rename classes
for(int j = 0; j < L; j++) {
D.renameAttribute(j,encodeClass(j));
}
// merge labels
D = mergeLabels(D,indices,m_P,m_N);
// templates
x_template = D.firstInstance();
setOutputFormat(D);
//System.out.println("PROCESS! => "+D);
return D;
}
private static String join(int objs[], final String delimiter) {
if (objs == null || objs.length < 1)
return "";
StringBuffer buffer = new StringBuffer(String.valueOf(objs[0]));
for(int j = 1; j < objs.length; j++) {
buffer.append(delimiter).append(String.valueOf(objs[j]));
}
return buffer.toString();
}
/** (3,'_') -> "c_3" */
public static String encodeClass(int j) {
return "c_"+j;
}
/** ("c_3",'_') -> 3 */
public static int decodeClass(String a) {
//System.out.println(""+a);
return Integer.parseInt(a.substring(a.indexOf('_')+1));
}
/** (["c_3","c_1"]) -> "c_3+1" */
public static String encodeClass(String c_j, String c_k) {
return "c_"+join(decodeClasses(c_j),"+")+"+"+join(decodeClasses(c_k),"+");
}
/** ([3,1]) -> "c_3+1" */
public static String encodeClass(int c_[]) {
String c = "c_";
for(int j = 0; j < c_.length; j++) {
c = c + c_[j] + "+";
}
c = c.substring(0,c.length()-1);
return c;
}
/** ("c_3+1") -> [3,1] */
public static int[] decodeClasses(String a) {
String s[] = new String(a.substring(a.indexOf('_')+1)).split("\\+");
int vals[] = new int[s.length];
for(int j = 0; j < vals.length; j++) {
vals[j] = Integer.parseInt(s[j]);
}
return vals;
}
/** (3,1) -> "3+1" */
public static String encodeValue(String v_j, String v_k) {
return String.valueOf(v_j)+"+"+String.valueOf(v_k);
}
/** (3,1,2) -> "3+1+2" */
public static String encodeValue(Instance x, int indices[]) {
String v = "";
for(int j = 0; j < indices.length; j++) {
v+=x.stringValue(indices[j])+"+";
}
v = v.substring(0,v.length()-1);
return v;
}
/** "C+A+B" -> ["C","A","B"] */
public static String[] decodeValue(String a) {
return a.split("\\+");
}
/**
* Return a set of all the combinations of attributes at 'indices' in 'D', pruned by 'p'; e.g., {00,01,11}.
*/
public static Set<String> getValues(Instances D, int indices[], int p) {
HashMap<String,Integer> count = getCounts(D, indices, p);
return count.keySet();
}
/**
* Return a set of all the combinations of attributes at 'indices' in 'D', pruned by 'p'; AND THEIR COUNTS, e.g., {(00:3),(01:8),(11:3))}.
*/
public static HashMap<String,Integer> getCounts(Instances D, int indices[], int p) {
HashMap<String,Integer> count = new HashMap<String,Integer>();
for(int i = 0; i < D.numInstances(); i++) {
String v = encodeValue(D.instance(i), indices);
count.put(v, count.containsKey(v) ? count.get(v) + 1 : 1);
}
MLUtils.pruneCountHashMap(count,p);
return count;
}
/**
* Merge Labels - Make a new 'D', with labels made into superlabels, according to partition 'indices', and pruning values 'p' and 'n'.
* @param D assume attributes in D labeled by original index
* @return Instances with attributes at j and k moved to position L as (j,k), with classIndex = L-1
*/
public static Instances mergeLabels(Instances D, int indices[][], int p, int n) {
int L = D.classIndex();
int K = indices.length;
ArrayList<String> values[] = new ArrayList[K];
HashMap<String,Integer> counts[] = new HashMap[K];
// create D_
Instances D_ = new Instances(D);
// clear D_
for(int j = 0; j < L; j++) {
D_.deleteAttributeAt(0);
}
// create atts
for(int j = 0; j < K; j++) {
int att[] = indices[j];
//int values[] = new int[2]; //getValues(indices,D,p);
counts[j] = getCounts(D,att,p);
Set<String> vals = counts[j].keySet(); //getValues(D,att,p);
values[j] = new ArrayList(vals);
D_.insertAttributeAt(new Attribute(encodeClass(att),new ArrayList(vals)),j);
}
// copy over values
ArrayList<Integer> deleteList = new ArrayList<Integer>();
for(int i = 0; i < D.numInstances(); i++) {
Instance x = D.instance(i);
for(int j = 0; j < K; j++) {
String y = encodeValue(x,indices[j]);
try {
D_.instance(i).setValue(j,y); // y =
} catch(Exception e) {
// value not allowed
deleteList.add(i); // mark it for deletion
String y_close[] = NSR.getTopNSubsets(y,counts[j],n); // get N subsets
for(int m = 0; m < y_close.length; m++) {
//System.out.println("add "+y_close[m]+" "+counts[j]);
Instance x_copy = (Instance)D_.instance(i).copy();
x_copy.setValue(j,y_close[m]);
x_copy.setWeight(1.0/y_close.length);
D_.add(x_copy);
}
}
}
}
// clean up
Collections.sort(deleteList,Collections.reverseOrder());
//System.out.println("Deleting "+deleteList.size()+" defunct instances.");
for (int i : deleteList) {
D_.delete(i);
}
// set class
D_.setClassIndex(K);
// done!
D = null;
return D_;
}
/**
* Merge Labels.
*
* @param j index 1 (assume that <code>j < k</code>)
* @param k index 2 (assume that <code>j < k</code>)
* @param D iInstances, with attributes in labeled by original index
* @return Instaces with attributes at j and k moved to position L as (j,k), with classIndex = L-1
*/
public static Instances mergeLabels(Instances D, int j, int k, int p) {
int L = D.classIndex();
HashMap<String,Integer> count = new HashMap<String,Integer>();
Set<String> values = new HashSet<String>();
for(int i = 0; i < D.numInstances(); i++) {
String v = encodeValue(D.instance(i).stringValue(j),D.instance(i).stringValue(k));
String w = ""+(int)D.instance(i).value(j)+(int)D.instance(i).value(k);
//System.out.println("w = "+w);
count.put(v,count.containsKey(v) ? count.get(v) + 1 : 1);
values.add(encodeValue(D.instance(i).stringValue(j),D.instance(i).stringValue(k)));
}
//System.out.println("("+j+","+k+")"+values);
System.out.print("pruned from "+count.size()+" to ");
MLUtils.pruneCountHashMap(count,p);
String y_max = (String)MLUtils.argmax(count); // @todo won't need this in the future
System.out.println(""+count.size()+" with p = "+p);
System.out.println(""+count);
values = count.keySet();
// Create and insert the new attribute
D.insertAttributeAt(new Attribute(encodeClass(D.attribute(j).name(),D.attribute(k).name()),new ArrayList(values)),L);
// Set values for the new attribute
for(int i = 0; i < D.numInstances(); i++) {
Instance x = D.instance(i);
String y_jk = encodeValue(x.stringValue(j),x.stringValue(k));
try {
x.setValue(L,y_jk); // y_jk =
} catch(Exception e) {
//x.setMissing(L);
//D.delete(i);
//i--;
String y_close[] = getNeighbours(y_jk,count,1); // A+B+NEG, A+C+NEG
//System.out.println("OK, that value ("+y_jk+") didn't exist ... set the closests ones ...: "+Arrays.toString(y_close));
int max_c = 0;
for (String y_ : y_close) {
int c = count.get(y_);
if (c > max_c) {
max_c = c;
y_max = y_;
}
}
//System.out.println("we actually found "+Arrays.toString(y_close)+" but will only set one for now (the one with the highest count) : "+y_max+" ...");
x.setValue(L,y_max);
// ok, that value didn't exist, set the maximum one (@TODO: set the nearest one)
}
}
// Delete separate attributes
D.deleteAttributeAt(k > j ? k : j);
D.deleteAttributeAt(k > j ? j : k);
// Set class index
D.setClassIndex(L-1);
return D;
}
/**
* GetNeighbours - return from set S, label-vectors closest to y, having no more different than 'n' bits different.
*/
public static String[] getNeighbours(String y, ArrayList<String> S, int n) {
String ya[] = decodeValue(y);
ArrayList<String> Y = new ArrayList<String>();
for(String y_ : S) {
if(MLUtils.bitDifference(ya,decodeValue(y_)) <= n) {
Y.add(y_);
}
}
return (String[])Y.toArray(new String[Y.size()]);
}
/**
* GetNeighbours - return from set S (the keySet of HashMap C), label-vectors closest to y, having no more different than 'n' bits different.
*/
public static String[] getNeighbours(String y, HashMap <String,Integer>C, int n) {
return getNeighbours(y,new ArrayList<String>(C.keySet()),n);
}
protected int m_Seed = 0;
@Override
public String globalInfo() {
return "A SuperNode Filter";
}
public static void main(String[] argv) {
try {
String fname = Utils.getOption('i',argv);
Instances D = new Instances(new BufferedReader(new FileReader(fname)));
SuperNodeFilter f = new SuperNodeFilter();
int c = Integer.parseInt(Utils.getOption('c',argv));
D.setClassIndex(c);
System.out.println(""+f.process(D));
//runFilter(new SuperNodeFilter(), argv);
} catch(Exception e) {
System.err.println("");
e.printStackTrace();
//System.exit(1);
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/filters/unsupervised
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/filters/unsupervised/attribute/MekaClassAttributes.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* MekaClassAttributes.java
* Copyright (C) 2012 University of Waikato, Hamilton, New Zealand
*/
package meka.filters.unsupervised.attribute;
import java.util.Enumeration;
import java.util.Vector;
import weka.filters.unsupervised.attribute.Reorder;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Range;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.WekaException;
import weka.filters.SimpleStreamFilter;
/**
* Reorders attributes for MEKA to use as class attributes.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision: 66 $
*/
public class MekaClassAttributes
extends SimpleStreamFilter {
/** for serialization. */
private static final long serialVersionUID = 6733841542030233313L;
/** the range of attributes to use as class attributes. */
protected Range m_AttributeIndices = new Range("last");
/** for reordering the attributes. */
protected Reorder m_Reorder = new Reorder();
/**
* Returns a string describing this filter.
*
* @return a description of the filter suitable for
* displaying in the explorer/experimenter gui
*/
public String globalInfo() {
return
"Reorders attributes for MEKA. Attribute range defines the "
+ "attributes to use as class attributes.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
Vector newVector = new Vector();
newVector.addElement(new Option(
"\tSpecify list of columns to use as MEKA class attributes. 'first' and 'last' are valid\n"
+"\tindexes. (default: last)",
"R", 1, "-R <index1,index2-index4,...>"));
return newVector.elements();
}
/**
* Parses a given list of options.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
String orderList = Utils.getOption('R', options);
if (orderList.length() != 0)
setAttributeIndices(orderList);
else
setAttributeIndices("last");
if (getInputFormat() != null)
setInputFormat(getInputFormat());
}
/**
* Gets the current settings of the filter.
*
* @return an array of strings suitable for passing to setOptions
*/
public String[] getOptions() {
String[] options = new String [2];
int current = 0;
if (!getAttributeIndices().equals("")) {
options[current++] = "-R";
options[current++] = getAttributeIndices();
}
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Set which attributes are to be used as MEKA class attributes.
*
* @param value a string representing the list of attributes. Since
* the string will typically come from a user, attributes are indexed from
* 1. <br>
* eg: first-3,5,6-last<br>
* Note: use this method before you call
* <code>setInputFormat(Instances)</code>, since the output format is
* determined in that method.
* @throws Exception if an invalid range list is supplied
*/
public void setAttributeIndices(String value) throws Exception {
// simple test
if (value.replaceAll("[afilrst0-9\\-,]*", "").length() != 0)
throw new IllegalArgumentException("Not a valid range string!");
m_AttributeIndices.setRanges(value);
}
/**
* Get the current range selection
*
* @return a string containing a comma separated list of ranges
*/
public String getAttributeIndices() {
return m_AttributeIndices.getRanges();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String attributeIndicesTipText() {
return "Specify range of attributes to use as MEKA class attributes."
+ " This is a comma separated list of attribute indices, with"
+ " \"first\" and \"last\" valid values. Specify an inclusive"
+ " range with \"-\". E.g: \"first-3,5,6-10,last\".";
}
/**
* Returns the Capabilities of this filter.
*
* @return the capabilities of this object
* @see Capabilities
*/
public Capabilities getCapabilities() {
Capabilities result;
result = m_Reorder.getCapabilities();
result.setOwner(this);
return result;
}
/**
* Determines the output format based on the input format and returns
* this. In case the output format cannot be returned immediately, i.e.,
* hasImmediateOutputFormat() returns false, then this method will called
* from batchFinished() after the call of preprocess(Instances), in which,
* e.g., statistics for the actual processing step can be gathered.
*
* @param inputFormat the input format to base the output format on
* @return the output format
* @throws Exception in case the determination goes wrong
*/
protected Instances determineOutputFormat(Instances inputFormat) throws Exception {
int i;
int[] indices;
StringBuilder order;
Instances output;
m_AttributeIndices.setUpper(inputFormat.numAttributes() - 1);
order = new StringBuilder();
indices = m_AttributeIndices.getSelection();
if (indices.length == 0)
throw new WekaException("No attributes defined as class attributes!");
for (i = 0; i < indices.length; i++) {
if (i > 0)
order.append(",");
order.append("" + (indices[i]+1));
}
for (i = 0; i < inputFormat.numAttributes(); i++) {
if (m_AttributeIndices.isInRange(i))
continue;
order.append(",");
order.append("" + (i+1));
}
m_Reorder.setAttributeIndices(order.toString());
m_Reorder.setInputFormat(inputFormat);
output = m_Reorder.getOutputFormat();
output.setClassIndex(indices.length);
output.setRelationName("-C " + indices.length);
return output;
}
/**
* processes the given instance (may change the provided instance) and
* returns the modified version.
*
* @param instance the instance to process
* @return the modified data
* @throws Exception in case the processing goes wrong
*/
protected Instance process(Instance instance) throws Exception {
m_Reorder.input(instance);
m_Reorder.batchFinished();
return m_Reorder.output();
}
public String getRevision() {
return RevisionUtils.extract("$Revision: 66 $");
}
/**
* runs the filter with the given arguments.
*
* @param args the commandline arguments
*/
public static void main(String[] args) {
runFilter(new MekaClassAttributes(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/classifiers/CrossValidate.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* CrossValidate.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.classifiers;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
/**
* Cross-validates a BR Meka classifier on a dataset supplied by the user.
* <br>
* Expected parameters: <dataset>
* <br>
* Note: The dataset must have been prepared for Meka already.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class CrossValidate {
public static void main(String[] args) throws Exception {
if (args.length != 1)
throw new IllegalArgumentException("Required arguments: <dataset>");
System.out.println("Loading data: " + args[0]);
Instances data = DataSource.read(args[0]);
MLUtils.prepareData(data);
int numFolds = 10;
System.out.println("Cross-validate BR classifier using " + numFolds + " folds");
BR classifier = new BR();
// further configuration of classifier
String top = "PCut1";
String vop = "3";
Result result = Evaluation.cvModel(classifier, data, numFolds, top, vop);
System.out.println(result);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/classifiers/ExportPredictionsOnTestSet.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ExportPredictionsOnTestSet.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.classifiers;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSink;
import weka.core.converters.ConverterUtils.DataSource;
/**
* Builds and evaluates a BR Meka classifier on user supplied train/test datasets
* and outputs the predictions on the test to the specified file
* (ARFF or CSV, auto-detect based on extension).
* <br>
* Expected parameters: <train> <test> <test> <output>
* <br>
* Note: The datasets must have been prepared for Meka already and compatible.
* The format of the output file is determined by its extension
* (eg .arff or .csv).
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ExportPredictionsOnTestSet {
public static void main(String[] args) throws Exception {
if (args.length != 3)
throw new IllegalArgumentException("Required arguments: <train> <test> <output>");
System.out.println("Loading train: " + args[0]);
Instances train = DataSource.read(args[0]);
MLUtils.prepareData(train);
System.out.println("Loading test: " + args[1]);
Instances test = DataSource.read(args[1]);
MLUtils.prepareData(test);
// compatible?
String msg = train.equalHeadersMsg(test);
if (msg != null)
throw new IllegalStateException(msg);
System.out.println("Build BR classifier on " + args[0]);
BR classifier = new BR();
// further configuration of classifier
classifier.buildClassifier(train);
System.out.println("Evaluate BR classifier on " + args[1]);
String top = "PCut1";
String vop = "3";
Result result = Evaluation.evaluateModel(classifier, train, test, top, vop);
System.out.println(result);
System.out.println("Saving predictions test set to " + args[2]);
Instances performance = Result.getPredictionsAsInstances(result);
DataSink.write(args[2], performance);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/classifiers/JustBuild.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* JustBuild.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.classifiers;
import meka.classifiers.multilabel.BR;
import meka.core.MLUtils;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
/**
* Builds a BR Meka classifier on a dataset supplied by the user.
* <br>
* Expected parameters: <dataset>
* <br>
* Note: The dataset must have been prepared for Meka already.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class JustBuild {
public static void main(String[] args) throws Exception {
if (args.length != 1)
throw new IllegalArgumentException("Required arguments: <dataset>");
System.out.println("Loading data: " + args[0]);
Instances data = DataSource.read(args[0]);
MLUtils.prepareData(data);
System.out.println("Build BR classifier");
BR classifier = new BR();
// further configuration of classifier
classifier.buildClassifier(data);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/classifiers/TrainAndPredict.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* TrainAndPredict.java
* Copyright (C) 2017 University of Waikato, Hamilton, NZ
*/
package mekaexamples.classifiers;
import meka.classifiers.multilabel.BR;
import meka.core.MLUtils;
import weka.core.Instances;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
/**
* Builds a BR Meka classifier on user supplied train dataset and outputs
* predictions on a supplied dataset with missing class values.
* <br>
* Expected parameters: <train> <predict>
* <br>
* Note: The datasets must have been prepared for Meka already and compatible.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class TrainAndPredict {
public static void main(String[] args) throws Exception {
if (args.length != 2)
throw new IllegalArgumentException("Required arguments: <train> <predict>");
System.out.println("Loading train: " + args[0]);
Instances train = DataSource.read(args[0]);
MLUtils.prepareData(train);
System.out.println("Loading predict: " + args[1]);
Instances predict = DataSource.read(args[1]);
MLUtils.prepareData(predict);
// compatible?
String msg = train.equalHeadersMsg(predict);
if (msg != null)
throw new IllegalStateException(msg);
System.out.println("Build BR classifier on " + args[0]);
BR classifier = new BR();
// further configuration of classifier
classifier.buildClassifier(train);
System.out.println("Use BR classifier on " + args[1]);
for (int i = 0; i < predict.numInstances(); i++) {
double[] dist = classifier.distributionForInstance(predict.instance(i));
System.out.println((i+1) + ": " + Utils.arrayToString(dist));
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/classifiers/TrainTestSet.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* TrainTestSet.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.classifiers;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
/**
* Builds and evaluates a BR Meka classifier on user supplied train/test datasets.
* <br>
* Expected parameters: <train> <test>
* <br>
* Note: The datasets must have been prepared for Meka already and compatible.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class TrainTestSet {
public static void main(String[] args) throws Exception {
if (args.length != 2)
throw new IllegalArgumentException("Required arguments: <train> <test>");
System.out.println("Loading train: " + args[0]);
Instances train = DataSource.read(args[0]);
MLUtils.prepareData(train);
System.out.println("Loading test: " + args[1]);
Instances test = DataSource.read(args[1]);
MLUtils.prepareData(test);
// compatible?
String msg = train.equalHeadersMsg(test);
if (msg != null)
throw new IllegalStateException(msg);
System.out.println("Build BR classifier on " + args[0]);
BR classifier = new BR();
// further configuration of classifier
classifier.buildClassifier(train);
System.out.println("Evaluate BR classifier on " + args[1]);
String top = "PCut1";
String vop = "3";
Result result = Evaluation.evaluateModel(classifier, train, test, top, vop);
System.out.println(result);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/classifiers/TrainTestSplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* TrainTestSplit.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.classifiers;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
/**
* Builds and evaluates a BR Meka classifier on a train/test split dataset supplied by the user.
* <br>
* Expected parameters: <dataset> <percentage>
* <br>
* Note: The dataset must have been prepared for Meka already.
* And the percentage must be between 0 and 100.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class TrainTestSplit {
public static void main(String[] args) throws Exception {
if (args.length != 2)
throw new IllegalArgumentException("Required arguments: <dataset> <percentage>");
System.out.println("Loading data: " + args[0]);
Instances data = DataSource.read(args[0]);
MLUtils.prepareData(data);
double percentage = Double.parseDouble(args[1]);
int trainSize = (int) (data.numInstances() * percentage / 100.0);
Instances train = new Instances(data, 0, trainSize);
Instances test = new Instances(data, trainSize, data.numInstances() - trainSize);
System.out.println("Build BR classifier on " + percentage + "%");
BR classifier = new BR();
// further configuration of classifier
classifier.buildClassifier(train);
System.out.println("Evaluate BR classifier on " + (100.0 - percentage) + "%");
String top = "PCut1";
String vop = "3";
Result result = Evaluation.evaluateModel(classifier, train, test, top, vop);
System.out.println(result);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/experiment/ExperimentExample.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ExperimentExample.java
* Copyright (C) 2015-2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.experiment;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.CC;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.OptionUtils;
import meka.events.LogEvent;
import meka.events.LogListener;
import meka.experiment.DefaultExperiment;
import meka.experiment.Experiment;
import meka.experiment.datasetproviders.DatasetProvider;
import meka.experiment.datasetproviders.LocalDatasetProvider;
import meka.experiment.datasetproviders.MultiDatasetProvider;
import meka.experiment.evaluationstatistics.KeyValuePairs;
import meka.experiment.evaluators.CrossValidation;
import meka.experiment.evaluators.RepeatedRuns;
import meka.experiment.events.ExecutionStageEvent;
import meka.experiment.events.ExecutionStageListener;
import meka.experiment.events.IterationNotificationEvent;
import meka.experiment.events.IterationNotificationListener;
import meka.experiment.events.StatisticsNotificationEvent;
import meka.experiment.events.StatisticsNotificationListener;
import meka.experiment.statisticsexporters.EvaluationStatisticsExporter;
import meka.experiment.statisticsexporters.MultiExporter;
import meka.experiment.statisticsexporters.SimpleAggregate;
import meka.experiment.statisticsexporters.TabSeparated;
import meka.experiment.statisticsexporters.TabSeparatedMeasurement;
import weka.core.Utils;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
/**
* Creates an experiment using BR and CC classifiers, evaluating them on the
* user-supplied datasets.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ExperimentExample {
public static void main(String[] args) throws Exception {
if (args.length == 0)
throw new IllegalArgumentException("Requirement arguments: <dataset1> [<dataset2> [...]]");
String tmpDir = System.getProperty("java.io.tmpdir");
System.out.println("Using tmp dir: " + tmpDir);
Experiment exp = new DefaultExperiment();
// classifiers
exp.setClassifiers(new MultiLabelClassifier[]{
new BR(),
new CC()
});
// datasets
List<File> files = new ArrayList<>();
for (String f: args)
files.add(new File(f));
LocalDatasetProvider dp1 = new LocalDatasetProvider();
dp1.setDatasets(files.toArray(new File[files.size()]));
LocalDatasetProvider dp2 = new LocalDatasetProvider();
dp2.setDatasets(new File[]{
new File("src/main/data/solar_flare.arff"),
});
MultiDatasetProvider mdp = new MultiDatasetProvider();
mdp.setProviders(new DatasetProvider[]{dp1, dp2});
exp.setDatasetProvider(mdp);
// output of metrics
KeyValuePairs sh = new KeyValuePairs();
sh.setFile(new File(tmpDir + "/mekaexp.txt"));
sh.getFile().delete(); // remove old run
exp.setStatisticsHandler(sh);
// evaluation
RepeatedRuns eval = new RepeatedRuns();
eval.setEvaluator(new CrossValidation());
exp.setEvaluator(eval);
// stage
exp.addExecutionStageListener(new ExecutionStageListener() {
@Override
public void experimentStage(ExecutionStageEvent e) {
System.err.println("[STAGE] " + e.getStage());
}
});
// iterations
exp.addIterationNotificationListener(new IterationNotificationListener() {
@Override
public void nextIteration(IterationNotificationEvent e) {
System.err.println("[ITERATION] " + Utils.toCommandLine(e.getClassifier()) + " --> " + e.getDataset().relationName());
}
});
// statistics
exp.addStatisticsNotificationListener(new StatisticsNotificationListener() {
@Override
public void statisticsAvailable(StatisticsNotificationEvent e) {
System.err.println("[STATISTICS] #" + e.getStatistics().size());
}
});
// log events
exp.addLogListener(new LogListener() {
@Override
public void logMessage(LogEvent e) {
System.err.println("[LOG] " + e.getSource().getClass().getName() + ": " + e.getMessage());
}
});
// output options
System.out.println("Setup:\n" + OptionUtils.toCommandLine(exp) + "\n");
// execute
String msg = exp.initialize();
System.out.println("initialize: " + msg);
if (msg != null)
return;
msg = exp.run();
System.out.println("run: " + msg);
msg = exp.finish();
System.out.println("finish: " + msg);
// export them
TabSeparated tabsepAgg = new TabSeparated();
tabsepAgg.setFile(new File(tmpDir + "/mekaexp-agg.tsv"));
SimpleAggregate aggregate = new SimpleAggregate();
aggregate.setSuffixMean("");
aggregate.setSuffixStdDev(" (stdev)");
aggregate.setSkipCount(true);
aggregate.setSkipMean(false);
aggregate.setSkipStdDev(false);
aggregate.setExporter(tabsepAgg);
TabSeparated tabsepFull = new TabSeparated();
tabsepFull.setFile(new File(tmpDir + "/mekaexp-full.tsv"));
TabSeparatedMeasurement tabsepHL = new TabSeparatedMeasurement();
tabsepHL.setMeasurement("Hamming loss");
tabsepHL.setFile(new File(tmpDir + "/mekaexp-HL.tsv"));
TabSeparatedMeasurement tabsepZOL = new TabSeparatedMeasurement();
tabsepZOL.setMeasurement("ZeroOne loss");
tabsepZOL.setFile(new File(tmpDir + "/mekaexp-ZOL.tsv"));
MultiExporter multiexp = new MultiExporter();
multiexp.setExporters(new EvaluationStatisticsExporter[]{aggregate, tabsepFull, tabsepHL, tabsepZOL});
multiexp.addLogListener(new LogListener() {
@Override
public void logMessage(LogEvent e) {
System.err.println("[EXPORT] " + e.getSource().getClass().getName() + ": " + e.getMessage());
}
});
System.out.println(OptionUtils.toCommandLine(multiexp));
msg = multiexp.export(exp.getStatistics());
System.out.println("export: " + msg);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/filter/PrepareClassAttributes.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* PrepareClassAttributes.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.filter;
import meka.filters.unsupervised.attribute.MekaClassAttributes;
import weka.core.Instances;
import weka.core.converters.ArffSaver;
import weka.core.converters.ConverterUtils.DataSink;
import weka.core.converters.ConverterUtils.DataSource;
import weka.filters.Filter;
import java.io.File;
/**
* Prepares a dataset for use in Meka, if it isn't already prepared properly
* (the relation name in an ARFF file used by Meka stores information on how
* many attributes from the left are used as class attributes).
* <br>
* Expects the following parameters: <input> <attribute_indices> <output>
* <br>
* The "input" parameter points to a dataset that Meka can read (eg CSV or ARFF).
* The "attribute_indices" parameter is a comma-separated list of 1-based indices
* of the attributes to use as class attributes in Meka.
* The "output" parameters is the filename where to store the generated output data (as ARFF).
*
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class PrepareClassAttributes {
public static void main(String[] args) throws Exception {
if (args.length != 3)
throw new IllegalArgumentException("Required parameters: <input> <attribute_indices> <output>");
System.out.println("Loading input data: " + args[0]);
Instances input = DataSource.read(args[0]);
System.out.println("Applying filter using indices: " + args[1]);
MekaClassAttributes filter = new MekaClassAttributes();
filter.setAttributeIndices(args[1]);
filter.setInputFormat(input);
Instances output = Filter.useFilter(input, filter);
System.out.println("Saving filtered data to: " + args[2]);
ArffSaver saver = new ArffSaver();
saver.setFile(new File(args[2]));
DataSink.write(saver, output);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui/classify/MacroCurve.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* MacroCurve.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.gui.classify;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
import weka.gui.visualize.PlotData2D;
import weka.gui.visualize.ThresholdVisualizePanel;
import weka.gui.visualize.VisualizePanel;
import javax.swing.JDialog;
import javax.swing.JFrame;
import java.awt.BorderLayout;
/**
* Cross-validates a BR Meka classifier on a dataset supplied by the user
* and displays the macro curve.
* <br>
* Expected parameters: <dataset>
* <br>
* Note: The dataset must have been prepared for Meka already.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MacroCurve {
public static final String CURVE_DATA_MACRO = "Macro Curve Data";
public static final String SAMPLES = "Samples";
public static final String ACCURACY = "Accuracy";
/**
* Creates a panel displaying the data.
*
* @param data the plot data
* @return the panel
* @throws Exception if plot generation fails
*/
protected static VisualizePanel createPanel(Instances data) throws Exception {
VisualizePanel result = new ThresholdVisualizePanel();
PlotData2D plot = new PlotData2D(data);
plot.setPlotName("Macro-averaged Performance");
plot.m_displayAllPoints = true;
boolean[] connectPoints = new boolean [data.numInstances()];
for (int cp = 1; cp < connectPoints.length; cp++)
connectPoints[cp] = true;
plot.setConnectPoints(connectPoints);
result.addPlot(plot);
if (data.attribute(SAMPLES) != null)
result.setXIndex(data.attribute(SAMPLES).index());
if (data.attribute(ACCURACY) != null)
result.setYIndex(data.attribute(ACCURACY).index());
return result;
}
public static void main(String[] args) throws Exception {
if (args.length != 1)
throw new IllegalArgumentException("Required arguments: <dataset>");
System.out.println("Loading data: " + args[0]);
Instances data = DataSource.read(args[0]);
MLUtils.prepareData(data);
System.out.println("Cross-validate BR classifier");
BR classifier = new BR();
// further configuration of classifier
String top = "PCut1";
String vop = "3";
Result result = Evaluation.cvModel(classifier, data, 10, top, vop);
JFrame frame = new JFrame("Macro curve");
frame.setDefaultCloseOperation(JDialog.EXIT_ON_CLOSE);
frame.getContentPane().setLayout(new BorderLayout());
Instances performance = (Instances) result.getMeasurement(CURVE_DATA_MACRO);
try {
VisualizePanel panel = createPanel(performance);
frame.getContentPane().add(panel, BorderLayout.CENTER);
}
catch (Exception ex) {
System.err.println("Failed to create plot!");
ex.printStackTrace();
}
frame.setSize(800, 600);
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui/classify/MicroCurve.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* MicroCurve.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.gui.classify;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
import weka.gui.visualize.PlotData2D;
import weka.gui.visualize.ThresholdVisualizePanel;
import weka.gui.visualize.VisualizePanel;
import javax.swing.JDialog;
import javax.swing.JFrame;
import java.awt.BorderLayout;
/**
* Cross-validates a BR Meka classifier on a dataset supplied by the user
* and displays the macro curve.
* <br>
* Expected parameters: <dataset>
* <br>
* Note: The dataset must have been prepared for Meka already.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MicroCurve {
public static final String CURVE_DATA_MICRO = "Micro Curve Data";
public static final String SAMPLES = "Samples";
public static final String ACCURACY = "Accuracy";
/**
* Creates a panel displaying the data.
*
* @param data the plot data
* @return the panel
* @throws Exception if plot generation fails
*/
protected static VisualizePanel createPanel(Instances data) throws Exception {
VisualizePanel result = new ThresholdVisualizePanel();
PlotData2D plot = new PlotData2D(data);
plot.setPlotName("Micro-averaged Performance");
plot.m_displayAllPoints = true;
boolean[] connectPoints = new boolean [data.numInstances()];
for (int cp = 1; cp < connectPoints.length; cp++)
connectPoints[cp] = true;
plot.setConnectPoints(connectPoints);
result.addPlot(plot);
if (data.attribute(SAMPLES) != null)
result.setXIndex(data.attribute(SAMPLES).index());
if (data.attribute(ACCURACY) != null)
result.setYIndex(data.attribute(ACCURACY).index());
return result;
}
public static void main(String[] args) throws Exception {
if (args.length != 1)
throw new IllegalArgumentException("Required arguments: <dataset>");
System.out.println("Loading data: " + args[0]);
Instances data = DataSource.read(args[0]);
MLUtils.prepareData(data);
System.out.println("Cross-validate BR classifier");
BR classifier = new BR();
// further configuration of classifier
String top = "PCut1";
String vop = "3";
Result result = Evaluation.cvModel(classifier, data, 10, top, vop);
JFrame frame = new JFrame("Micro curve");
frame.setDefaultCloseOperation(JDialog.EXIT_ON_CLOSE);
frame.getContentPane().setLayout(new BorderLayout());
Instances performance = (Instances) result.getMeasurement(CURVE_DATA_MICRO);
try {
VisualizePanel panel = createPanel(performance);
frame.getContentPane().add(panel, BorderLayout.CENTER);
}
catch (Exception ex) {
System.err.println("Failed to create plot!");
ex.printStackTrace();
}
frame.setSize(800, 600);
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui/classify/PrecisionRecall.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* PrecisionRecall.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.gui.classify;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.classifiers.evaluation.ThresholdCurve;
import weka.core.Instances;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
import weka.gui.visualize.PlotData2D;
import weka.gui.visualize.ThresholdVisualizePanel;
import javax.swing.JDialog;
import javax.swing.JFrame;
import javax.swing.JTabbedPane;
import java.awt.BorderLayout;
/**
* Cross-validates a BR Meka classifier on a dataset supplied by the user
* and displays the precision recall curves per label.
* <br>
* Expected parameters: <dataset>
* <br>
* Note: The dataset must have been prepared for Meka already.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class PrecisionRecall {
public static final String CURVE_DATA = "Curve Data";
/**
* Creates a panel displaying the data.
*
* @param data the plot data
* @param title the title
* @return the panel
* @throws Exception if plot generation fails
*/
protected static ThresholdVisualizePanel createPanel(Instances data, String title) throws Exception {
ThresholdVisualizePanel result = new ThresholdVisualizePanel();
PlotData2D plot = new PlotData2D(data);
plot.setPlotName(title);
plot.m_displayAllPoints = true;
boolean[] connectPoints = new boolean [data.numInstances()];
for (int cp = 1; cp < connectPoints.length; cp++)
connectPoints[cp] = true;
plot.setConnectPoints(connectPoints);
result.addPlot(plot);
result.setROCString("PRC area: " + Utils.doubleToString(ThresholdCurve.getPRCArea(data), 3));
result.setUpComboBoxes(result.getInstances());
if (data.attribute(ThresholdCurve.RECALL_NAME) != null)
result.setXIndex(data.attribute(ThresholdCurve.RECALL_NAME).index());
if (data.attribute(ThresholdCurve.PRECISION_NAME) != null)
result.setYIndex(data.attribute(ThresholdCurve.PRECISION_NAME).index());
return result;
}
public static void main(String[] args) throws Exception {
if (args.length != 1)
throw new IllegalArgumentException("Required arguments: <dataset>");
System.out.println("Loading data: " + args[0]);
Instances data = DataSource.read(args[0]);
MLUtils.prepareData(data);
System.out.println("Cross-validate BR classifier");
BR classifier = new BR();
// further configuration of classifier
String top = "PCut1";
String vop = "3";
Result result = Evaluation.cvModel(classifier, data, 10, top, vop);
JFrame frame = new JFrame("Precision-recall");
frame.setDefaultCloseOperation(JDialog.EXIT_ON_CLOSE);
frame.getContentPane().setLayout(new BorderLayout());
JTabbedPane tabbed = new JTabbedPane();
frame.getContentPane().add(tabbed, BorderLayout.CENTER);
Instances[] curves = (Instances[]) result.getMeasurement(CURVE_DATA);
for (int i = 0; i < curves.length; i++) {
try {
ThresholdVisualizePanel panel = createPanel(curves[i], "Label " + i);
tabbed.addTab("" + i, panel);
}
catch (Exception ex) {
System.err.println("Failed to create plot for label " + i);
ex.printStackTrace();
}
}
frame.setSize(800, 600);
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/mekaexamples/gui/classify/ROC.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ROC.java
* Copyright (C) 2016 University of Waikato, Hamilton, NZ
*/
package mekaexamples.gui.classify;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.core.MLUtils;
import meka.core.Result;
import weka.classifiers.evaluation.ThresholdCurve;
import weka.core.Instances;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
import weka.gui.visualize.PlotData2D;
import weka.gui.visualize.ThresholdVisualizePanel;
import javax.swing.JDialog;
import javax.swing.JFrame;
import javax.swing.JTabbedPane;
import java.awt.BorderLayout;
/**
* Cross-validates a BR Meka classifier on a dataset supplied by the user
* and displays the ROC curves per label.
* <br>
* Expected parameters: <dataset>
* <br>
* Note: The dataset must have been prepared for Meka already.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ROC {
public static final String CURVE_DATA = "Curve Data";
/**
* Creates a panel displaying the data.
*
* @param data the plot data
* @param title the title
* @return the panel
* @throws Exception if plot generation fails
*/
protected static ThresholdVisualizePanel createPanel(Instances data, String title) throws Exception {
ThresholdVisualizePanel result = new ThresholdVisualizePanel();
PlotData2D plot = new PlotData2D(data);
plot.setPlotName(title);
plot.m_displayAllPoints = true;
boolean[] connectPoints = new boolean [data.numInstances()];
for (int cp = 1; cp < connectPoints.length; cp++)
connectPoints[cp] = true;
plot.setConnectPoints(connectPoints);
result.addPlot(plot);
result.setROCString("AUC: " + Utils.doubleToString(ThresholdCurve.getROCArea(data), 3));
result.setUpComboBoxes(result.getInstances());
if (data.attribute(ThresholdCurve.FP_RATE_NAME) != null)
result.setXIndex(data.attribute(ThresholdCurve.FP_RATE_NAME).index());
if (data.attribute(ThresholdCurve.TP_RATE_NAME) != null)
result.setYIndex(data.attribute(ThresholdCurve.TP_RATE_NAME).index());
return result;
}
public static void main(String[] args) throws Exception {
if (args.length != 1)
throw new IllegalArgumentException("Required arguments: <dataset>");
System.out.println("Loading data: " + args[0]);
Instances data = DataSource.read(args[0]);
MLUtils.prepareData(data);
System.out.println("Cross-validate BR classifier");
BR classifier = new BR();
// further configuration of classifier
String top = "PCut1";
String vop = "3";
Result result = Evaluation.cvModel(classifier, data, 10, top, vop);
JFrame frame = new JFrame("ROC");
frame.setDefaultCloseOperation(JDialog.EXIT_ON_CLOSE);
frame.getContentPane().setLayout(new BorderLayout());
JTabbedPane tabbed = new JTabbedPane();
frame.getContentPane().add(tabbed, BorderLayout.CENTER);
Instances[] curves = (Instances[]) result.getMeasurement(CURVE_DATA);
for (int i = 0; i < curves.length; i++) {
try {
ThresholdVisualizePanel panel = createPanel(curves[i], "Label " + i);
tabbed.addTab("" + i, panel);
}
catch (Exception ex) {
System.err.println("Failed to create plot for label " + i);
ex.printStackTrace();
}
}
frame.setSize(800, 600);
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/rbms/DBM.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package rbms;
import Jama.Matrix;
/**
* DBM - Stacked Restricted Boltzmann Machines.
*
* Like RBM, but with multiple layers (trained greedily). Default: N = 2 layers (both with H hidden
* units).
*
* <verbatim> ============== TRAINING ===================== DBM dbm = new DBM(); dbm.setOptions("-E
* 100 -H 10 -r 0.1 -m 0.8"); ... </verbatim>
*
* @see RBM
* @author Jesse Read
* @version April 2013
*/
public class DBM extends RBM {
protected RBM rbm[] = null;
protected int h[] = null; // all layers
/**
* DBM - Create a DBM with 'options' (we use WEKA-style option processing).
*/
public DBM(final String options[]) throws Exception {
super.setOptions(options);
}
public RBM[] getRBMs() {
return this.rbm;
}
@Override
public double[] prob_z(double z[]) {
if (this.rbm == null) {
return null;
}
for (int i = 0; i < this.h.length; i++) {
z = this.rbm[i].prob_z(z); // input = rbm(input)
}
return z;
}
@Override
public double[][] prob_Z(double X_[][]) {
if (this.rbm == null) {
return null;
}
for (int i = 0; i < this.h.length; i++) {
X_ = this.rbm[i].prob_Z(X_); // input = rbm(input)
}
return X_;
}
/*
* // forward propagation public double[][][] prop_Z_downs(double X_[][]) throws Exception {
*
* double Z[][][] = new double[m_N+1][][];
*
* Z[0] = X_; for(int i = 0; i < m_N; i++) { Z[i+1] = rbm[i].prop_Z_down(Z[i]); // input =
* rbm(input) } return Z; }
*/
/** Set hidden layers specification */
public void setH(final int h[]) {
this.h = h;
}
/**
* SetH - for a discriminative DBM (where the last h == L)
*
* @param H
* hidden layers
* @param L
* output linear layer
* @param N
* number of hidden layers
*/
public void setH(final int H, final int L, final int N) {
int h[] = new int[N];
for (int i = 0; i < N - 1; i++) {
h[i] = H;
}
h[N - 1] = L;
this.h = h;
}
/**
* SetH -
*
* @param H
* hidden layers
* @param N
* number of hidden layers
*/
public void setH(final int H, final int N) {
int h[] = new int[N];
for (int i = 0; i < N; i++) {
h[i] = H;
}
this.h = h;
}
@Override
public void setH(final int H) {
// default
this.setH(H, 2);
}
/*
* not sure if will use this? private int[] geth(int H, int N) { int h[] = new int[N]; if (H > 0) {
* // first layer is h h[0] = H; // each layer above is half as big for(int i = 1; i < N; i++) {
* h[i] = h[i-1] / 2; } } else { // output layer is -H h[N-1] = Math.abs(H); // each layer below is
* 4 times bigger for(int i = N-2; i>=0; i--) { h[i] = h[i+1] * 4; } } return h; }
*/
@Override
// should an RBM be a DBM of one layer?
public Matrix[] getWs() {
Matrix W[] = new Matrix[this.rbm.length];
for (int i = 0; i < W.length; i++) {
W[i] = this.rbm[i].getW();
}
return W;
}
@Override
public double train(final double X_[][]) throws Exception {
return this.train(X_, 0);
}
@Override
public double train(double X_[][], final int batchSize) throws Exception {
int N = this.h.length;
this.rbm = new RBM[N];
// Greedily train RBMs, get Z off the top
for (int i = 0; i < N; i++) {
this.rbm[i] = new RBM(this.getOptions()); // same options as this instantiation
this.rbm[i].setH(this.h[i]); // but different number of hidden units
if (batchSize == 0) {
this.rbm[i].train(X_);
} else {
this.rbm[i].train(X_, batchSize);
}
X_ = this.rbm[i].prob_Z(X_); // input = rbm(input)
}
return 1.0;
}
@Override
public void update(Matrix X) throws InterruptedException {
for (int i = 0; i < this.h.length; i++) {
this.rbm[i].update(X);
try {
X = this.rbm[i].prob_Z(X);
} catch (Exception e) {
System.err.println("AHH!!");
e.printStackTrace();
}
}
}
@Override
public void update(Matrix X, final double s) throws InterruptedException {
for (int i = 0; i < this.h.length; i++) {
this.rbm[i].update(X, s);
try {
X = this.rbm[i].prob_Z(X);
} catch (Exception e) {
System.err.println("AHH!!");
e.printStackTrace();
}
}
}
@Override
public void update(double X_[][]) throws InterruptedException {
for (int i = 0; i < this.h.length; i++) {
this.rbm[i].update(X_);
try {
X_ = this.rbm[i].prob_Z(X_); // input = rbm(input)
} catch (Exception e) {
System.err.println("AHH!!");
e.printStackTrace();
}
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/rbms/RBM.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package rbms;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Random;
import Jama.Matrix;
import meka.core.MatrixUtils;
import weka.core.Utils;
/**
* RBM.java - Restricted Boltzmann Machine. Using Contrastive Divergence.
*
* You have inputs X; You want to output to hidden units Z. To do this, you learn weight matrix W,
* where Z ~ sigma(W'X).
*
* <verbatim> ============== TRAINING (batches of 10) ===== RBM rbm = new RBM(); rbm.setOptions("-E
* 100 -H 10 -r 0.1 -m 0.8"); // to build 10 hidden units, over 100 epochs, with learning rate 0.1,
* momentum 0.8 rbm.train(X,10); // train in batches of 10 Z = rbm.getZ(X); // get output
* ============== UPDATING (one epoch) ========= rbm.update(xnew); ============== TESTING (single
* input) ======= z = rbm.getz(xnew); </verbatim> Note: should be binary for hidden states, can be
* probabilities for visible states.
*
* @author Jesse Read (jesse@tsc.uc3m.es)
* @version April 2013
*/
public class RBM {
protected double LEARNING_RATE = 0.1; // set between v small and 0.1 in [0,1]
protected double MOMENTUM = 0.1; // set between 0.1 and 0.9 in [0,1]
protected double COST = 0.0002 * this.LEARNING_RATE; // set between v small to ~ 0.001 * LEARNING_RATE The rate at which to degrade connection weights to
// penalize large weights
protected int m_E = 1000;
protected int m_H = 10;
private boolean m_V = false; // cut out of var(10) < 0.0001
private int batch_size = 0; // @todo implement this option (with CLI option)
protected Matrix W = null; // the weight matrix
protected Matrix dW_ = null; // used for momentum
protected Random m_R = new Random(0); // for random init. of matrices and sampling
/**
* RBM - Create an RBM with default options.
*/
public RBM() {
}
/**
* RBM - Create an RBM with 'options' (using WEKA-style option processing).
*/
public RBM(final String options[]) throws Exception {
this.setOptions(options);
}
/**
* Set Options - WEKA-style option processing.
*/
public void setOptions(final String[] options) throws Exception {
try {
this.setH(Integer.parseInt(Utils.getOption('H', options)));
this.setE(Integer.parseInt(Utils.getOption('E', options)));
this.setLearningRate(Double.parseDouble(Utils.getOption('r', options)));
this.setMomentum(Double.parseDouble(Utils.getOption('m', options)));
} catch (Exception e) {
System.err.println("Missing option!");
e.printStackTrace();
System.exit(1);
}
// super.setOptions(options);
}
/**
* GetOptions - WEKA-style option processing.
*/
public String[] getOptions() throws Exception {
ArrayList<String> result;
result = new ArrayList<>(); // Arrays.asList(super.getOptions()));
result.add("-r");
result.add(String.valueOf(this.LEARNING_RATE));
result.add("-m");
result.add(String.valueOf(this.MOMENTUM));
result.add("-E");
result.add(String.valueOf(this.getE()));
result.add("-H");
result.add(String.valueOf(this.getH()));
return result.toArray(new String[result.size()]);
}
/**
* Hidden Activation Probability - returns P(z|x) where p(z[i]==1|x) for each element. A Bias is
* added (and removed) automatically.
*
* @param x_
* x (without bias)
* @return z (without bias)
*/
public double[] prob_z(final double x_[]) {
Matrix x = new Matrix(MatrixUtils.addBias(x_), 1);
double z[] = MatrixUtils.sigma(x.times(this.W).getArray()[0]);
return MatrixUtils.removeBias(z);
}
/**
* Hidden Activation Probability - returns P(Z|X). A Bias column added (and removed) automatically.
*
* @param X_
* X (without bias)
* @return P(Z|X)
*/
public double[][] prob_Z(final double X_[][]) {
Matrix X = new Matrix(MatrixUtils.addBias(X_));
return MatrixUtils.removeBias(this.prob_Z(X).getArray());
}
/**
* Hidden Activation Probability - returns P(Z|X). A bias column is assumed to be included.
*
* @param X
* X (bias included)
* @return P(Z|X)
*/
public Matrix prob_Z(final Matrix X) {
Matrix P_Z = MatrixUtils.sigma(X.times(this.W)); // (this is the activation function)
MatrixUtils.fillCol(P_Z.getArray(), 0, 1.0); // fix bias ... set first col to 1.0
return P_Z;
}
/**
* Hidden Activation Value. A bias column added (and removed) automatically.
*
* @param X_
* X (without bias)
* @return 1 if P(Z|X) greater than 0.5
*/
public double[][] propUp(final double X_[][]) {
return MatrixUtils.threshold(this.prob_Z(X_), 0.5); // ... or just go down
}
/**
* Sample Hidden Value - returns Z ~ P(Z|X). A bias column is assumed to be included.
*
* @param X
* X (bias included)
* @return Z ~ P(Z|X)
*/
public Matrix sample_Z(final Matrix X) {
Matrix P_Z = this.prob_Z(X);
return MatrixUtils.sample(P_Z, this.m_R);
}
/**
* Sample Hidden Value - returns z[i] ~ p(z[i]==1|x) for each i-th element. A bias column added (and
* removed) automatically.
*
* @param x_
* x (without bias)
* @return z ~ P(z|x) (without bias)
*/
public double[] sample_z(final double x_[]) {
double p[] = this.prob_z(x_);
return MatrixUtils.sample(p, this.m_R);
}
/**
* Sample Visible - returns x[j] ~ p(x[j]==1|z) for each j-th element. A bias is added (and removed)
* automatically.
*
* @param z_
* z (without bias)
* @return x ~ P(x|z) (without bias)
*/
public double[] sample_x(final double z_[]) {
double p_x[] = this.prob_x(z_);
return MatrixUtils.sample(p_x, this.m_R);
}
/**
* Sample Visible - returns X ~ P(X|Z). A bias column is assumed to be included.
*
* @param Z
* Z (bias included)
* @return X ~ P(X|Z)
*/
public Matrix sample_X(final Matrix Z) {
Matrix P_X = this.prob_X(Z);
return MatrixUtils.sample(P_X, this.m_R);
}
/**
* Visible Activation Probability - returns P(x|z) where p(x[j]==1|z) for each j-th element. A bias
* is added (and removed) automatically.
*
* @param z_
* z (without bias)
* @return x (without bias)
*/
public double[] prob_x(final double z_[]) {
Matrix z = new Matrix(MatrixUtils.addBias(z_), 1);
double x[] = MatrixUtils.sigma(z.times(this.W.transpose()).getArray()[0]);
return MatrixUtils.removeBias(x);
}
/**
* Visible Activation Probability - returns P(X|Z). A bias column is assumed to be included.
*
* @param Z
* z (bias included)
* @return P(X|Z)
*/
public Matrix prob_X(final Matrix Z) {
Matrix X = new Matrix(MatrixUtils.sigma(Z.times(this.W.transpose()).getArray())); // (this is the activation function)
MatrixUtils.fillCol(X.getArray(), 0, 1.0); // fix bias - set first col to 1.0
return X;
}
/**
* Make W matrix of dimensions d+1 and h+1 (+1 for biases). Initialized from ~N(0,0.2) (seems to
* work better than ~N(0.0.01)) -- except biases (set to 0)
*
* @param d
* number of rows (visible units)
* @param h
* number of columns (hidden units)
* @param r
* for getting random rumbers
* @return W
*/
public static Matrix makeW(final int d, final int h, final Random r) {
double W_[][] = MatrixUtils.multiply(MatrixUtils.randn(d + 1, h + 1, r), 0.20); // ~ N(0.0,0.01)
MatrixUtils.fillRow(W_, 0, 0.0); // set the first row to 0 for bias
MatrixUtils.fillCol(W_, 0, 0.0); // set the first col to 0 for bias
return new Matrix(W_);
}
protected Matrix makeW(final int d, final int h) {
return makeW(d, h, this.m_R);
}
/**
* Initialize W, and make _dW (for momentum) of the same dimensions.
*
* @param X_
* X (only to know d = X_[0].length)
*/
private void initWeights(final double X_[][]) {
this.initWeights(X_[0].length, this.m_H);
}
/**
* Initialize W, and make _dW (for momentum) of the same dimensions.
*
* @param d
* number of visible units
* @param h
* number of hidden units
*/
private void initWeights(final int d, final int h) {
this.W = this.makeW(d, h);
this.dW_ = new Matrix(this.W.getRowDimension(), this.W.getColumnDimension()); // for momentum
}
/**
* Initialize W, and make _dW (for momentum) of the same dimensions.
*
* @param d
* number of visible units
*/
public void initWeights(final int d) {
this.initWeights(d, this.m_H);
}
/**
* Update - Carry out one epoch of CD, update W. We use dW_ to manage momentum. <br>
* TODO weight decay SHOULD NOT BE APPLIED TO BIASES
*
* @param X
* X
* @throws InterruptedException
*/
public void update(final Matrix X) throws InterruptedException {
Matrix CD = this.epoch(X);
Matrix dW = (CD.minusEquals(this.W.times(this.COST))).timesEquals(this.LEARNING_RATE); // with COST
this.W.plusEquals(dW.plus(this.dW_.timesEquals(this.MOMENTUM))); // with MOMENTUM.
this.dW_ = dW; // for the next update
}
/**
* Update - Carry out one epoch of CD, update W. <br>
* TODO combine with above fn.
*
* @param X
* X
* @param s
* multiply the gradient by this scalar
* @throws InterruptedException
*/
public void update(final Matrix X, final double s) throws InterruptedException {
Matrix CD = this.epoch(X);
Matrix dW = (CD.minusEquals(this.W.times(this.COST))).timesEquals(this.LEARNING_RATE); // with COST
dW = dW.times(s); // *scaling factor
this.W.plusEquals(dW.plus(this.dW_.timesEquals(this.MOMENTUM))); // with MOMENTUM.
this.dW_ = dW; // for the next update
}
/**
* Update - On raw data (with no bias column)
*
* @param X_
* raw double[][] data (with no bias column)
* @throws InterruptedException
*/
public void update(final double X_[][]) throws InterruptedException {
Matrix X = new Matrix(MatrixUtils.addBias(X_));
this.update(X);
}
/**
* Update - On raw data (with no bias column)
*
* @param x_
* raw double[] data (with no bias column)
* @throws InterruptedException
*/
public void update(final double x_[]) throws InterruptedException {
this.update(new double[][] { x_ });
}
/**
* Update - On raw data (with no bias column)
*
* @param x_
* raw double[] data (with no bias column)
* @param s
* multiply the gradient by this scalar
* @throws InterruptedException
*/
public void update(final double x_[], final double s) throws InterruptedException {
Matrix X = new Matrix(MatrixUtils.addBias(new double[][] { x_ }));
this.update(X, s);
}
/**
* Train - Setup and train the RBM on X, over m_E epochs.
*
* @param X_
* X
* @return the error (@TODO unnecessary)
*/
public double train(final double X_[][]) throws Exception {
this.initWeights(X_);
Matrix X = new Matrix(MatrixUtils.addBias(X_));
double _error = Double.MAX_VALUE; // prev error , necessary only when using m_V
// TRAIN FOR m_E EPOCHS.
for (int e = 0; e < this.m_E; e++) {
// BREAK OUT IF THE GRADIENT IS POSITIVE
if (this.m_V) {
double err_now = this.calculateError(X); // Retrieve error
if (_error < err_now) {
System.out.println("broken out @" + e);
break;
}
_error = err_now;
}
/*
* The update
*/
this.update(X);
}
return _error;
}
/**
* Train - Setup and batch-train the RBM on X. <br>
* TODO, above function train(X_) could really be trained with train(X_,N), so, should share code
* with train(X) <br>
* TODO, divide gradient by the size of the batch! (doing already? .. no)
*
* @param X_
* X
* @param batchSize
* the batch size
*/
public double train(double X_[][], final int batchSize) throws Exception {
this.initWeights(X_);
X_ = MatrixUtils.addBias(X_);
int N = X_.length; // N
if (batchSize == N) {
return this.train(X_);
}
int N_n = (int) Math.ceil(N * 1. / batchSize);// Number of batches
Matrix X_n[] = new Matrix[N_n];
for (int n = 0, i = 0; n < N; n += batchSize, i++) {
// @TODO, could save some small-time memory/speed here
X_n[i] = new Matrix(Arrays.copyOfRange(X_, n, Math.min(n + batchSize, N)));
}
for (int e = 0; e < this.m_E; e++) {
// @TODO could be random, see function below
for (Matrix X : X_n) {
this.update(X, 1. / N_n);
}
}
return 1.0;
}
/**
* Train - Setup and batch-train the RBM on X, with some random sampling involved. <br>
* TODO should share code with train(X)
*
* @param X_
* X
* @param batchSize
* the batch size
* @param r
* the randomness
*/
public double train(double X_[][], final int batchSize, final Random r) throws Exception {
this.initWeights(X_);
X_ = MatrixUtils.addBias(X_);
int N = X_.length; // N
int N_n = (int) Math.ceil(N * 1. / batchSize);// Number of batches
// @TODO select the batches randomly at each epoch
Matrix X_n[] = new Matrix[N_n];
for (int n = 0, i = 0; n < N; n += batchSize, i++) {
X_n[i] = new Matrix(Arrays.copyOfRange(X_, n, Math.min(n + batchSize, N)));
}
for (int e = 0; e < this.m_E; e++) {
for (int i = 0; i < N_n; i++) {
this.update(X_n[r.nextInt(N_n)]);
}
}
return 1.0;
}
/**
* Calculate the Error right now. <br>
* NOTE: this will take a few miliseconds longer than calculating directly in the epoch() loop
* (where we have to calculate X_down anyway). <br>
* TODO rename this function
*
* @param X
* X
* @return The error
*/
public double calculateError(final Matrix X) {
Matrix Z_up = this.prob_Z(X); // up @TODO replace with getZ(X,W), etc
Matrix X_down = this.prob_X(Z_up); // go down
// MSE
return MatrixUtils.meanSquaredError(X.getArray(), X_down.getArray()); // @note: this can take some milliseconds to calculate
}
/**
* Epoch - Run X through one epcho of CD of the RBM.
*
* @param X_0
* The input matrix (includes bias column).
* @return the contrastive divergence (CD) for this epoch.
*
* <verbatim> x_0 = x
*
* for k = 0,...,K-1 z_k = sample up x_k+1 = sample down
*
* e+ = pz|x_0 e- = pz|x_K
*
* CD = e+ - e- </verbatim>
*
* Note: should be binary for hidden states, can be probabilities for visible states.
* @throws InterruptedException
*/
public Matrix epoch(final Matrix X_0) throws InterruptedException {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread got interrupted");
}
int N = X_0.getArray().length;
// POSITIVE
Matrix Z_0 = this.prob_Z(X_0); // sample up
Matrix E_pos = X_0.transpose().times(Z_0); // positive energy, H_1 * V_1
// NEGATIVE
Matrix X_1 = this.prob_X(Z_0); // go down -- can either sample down
// Matrix X_1 = Mat.threshold(prob_X(Z_0),0.5); // ... or just go down
Matrix pZ_1 = this.prob_Z(X_1); // go back up again
Matrix E_neg = X_1.transpose().times(pZ_1); // negative energy, P(Z_1) * X_1
// CALCULATE ERROR (Optional!)
// double _Err = Mat.meanSquaredError(X_0.getArray(),X_1.getArray()); // @note: this take some
// milliseconds to calculate
// System.out.println(""+_Err);
// CONTRASTIVE DIVERGENCE
Matrix CD = ((E_pos.minusEquals(E_neg)).times(1. / N)); // CD = difference between energies
return CD;
}
// SAME AS ABOVE, BUT USES SAMPLING INSTEAD OF RAW PROBABILITIES. DOESN'T SEEM TO WORK AS WELL.
public Matrix sample_epoch(final Matrix X_0) {
int N = X_0.getArray().length;
// POSITIVE
Matrix Z_0 = this.sample_Z(X_0); // sample up
Matrix E_pos = X_0.transpose().times(Z_0); // positive energy, H_1 * V_1
// NEGATIVE
Matrix X_1 = this.sample_X(Z_0); // go down -- can either sample down
// Matrix X_1 = Mat.threshold(prob_X(Z_0),0.5); // ... or just go down
Matrix pZ_1 = this.prob_Z(X_1); // go back up again
Matrix E_neg = X_1.transpose().times(pZ_1); // negative energy, P(Z_1) * X_1
// CALCULATE ERROR (Optional!)
double _Err = MatrixUtils.meanSquaredError(X_0.getArray(), X_1.getArray()); // @note: this take some milliseconds to calculate
System.out.println("" + _Err);
// CONTRASTIVE DIVERGENCE
Matrix CD = ((E_pos.minusEquals(E_neg)).times(1. / N)); // CD = difference between energies
return CD;
}
/*
* ********************************************************************************* Get / Set
* Parameters
**********************************************************************************/
public void setH(final int h) {
this.m_H = h;
}
public int getH() {
return this.m_H;
}
/**
* SetE - set the number of epochs (if n is negative, it means max epochs).
*/
public void setE(final int n) {
if (n < 0) {
this.m_V = true;
this.m_E = -n;
} else {
this.m_E = n;
}
}
public int getE() {
return this.m_E;
}
public void setLearningRate(final double r) {
this.LEARNING_RATE = r;
this.COST = 0.0002 * this.LEARNING_RATE;
}
public double getLearningRate() {
return this.LEARNING_RATE;
}
public void setMomentum(final double m) {
this.MOMENTUM = m;
}
public double getMomentum() {
return this.MOMENTUM;
}
public void setSeed(final int seed) {
this.m_R = new Random(seed);
}
/*
* ********************************************************************************* Get Weight
* Matrix(es)
**********************************************************************************/
public Matrix[] getWs() {
return new Matrix[] { this.W };
}
public Matrix getW() {
return this.W;
}
/**
* ToString - return a String representation of the weight Matrix defining this RBM.
*/
@Override
public String toString() {
Matrix W = this.getW();
return MatrixUtils.toString(W);
}
/**
* Main - do some test routines.
*/
public static void main(final String argv[]) throws Exception {
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/Run.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Run.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka;
import java.util.ArrayList;
import java.util.List;
import weka.core.Utils;
import weka.core.WekaPackageClassLoaderManager;
/**
* Helper class that executes Weka schemes from the command line. Performs
* Suffix matching on the scheme name entered by the user - e.g.<br>
* <br>
*
* java weka.Run NaiveBayes <br>
* <br>
*
* will prompt the user to choose among
* weka.classifiers.bayes.ComplementNaiveBayes,
* weka.classifiers.bayes.NaiveBayes,
* weka.classifiers.bayes.NaiveBayesMultinomial,
* weka.classifiers.bayes.NaiveBayesMultinomialUpdateable,
* weka.classifiers.bayes.NaiveBayesSimple,
* weka.classifiers.bayes.NaiveBayesUpdateable
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*
*/
public class Run {
public enum SchemeType {
CLASSIFIER("classifier"), CLUSTERER("clusterer"), ASSOCIATOR(
"association rules"), ATTRIBUTE_SELECTION("attribute selection"), FILTER(
"filter"), LOADER("loader"), SAVER("saver"), DATAGENERATOR(
"data generator"), COMMANDLINE("general commandline runnable");
private final String m_stringVal;
SchemeType(String name) {
m_stringVal = name;
}
@Override
public String toString() {
return m_stringVal;
}
}
/**
* Find a scheme that matches the supplied suffix
*
* @param classType matching schemes must be of this class type
* @param schemeToFind the name of the scheme to find
* @param matchAnywhere if true, the name is matched anywhere in the
* non-package part of candidate schemes
* @return a list of fully qualified matching scheme names
*/
public static List<String> findSchemeMatch(Class<?> classType,
String schemeToFind, boolean matchAnywhere, boolean notJustRunnables) {
weka.core.ClassDiscovery.clearCache();
ArrayList<String> matches = weka.core.ClassDiscovery.find(schemeToFind);
ArrayList<String> prunedMatches = new ArrayList<String>();
// prune list for anything that isn't a runnable scheme
for (int i = 0; i < matches.size(); i++) {
if (matches.get(i).endsWith(schemeToFind) || matchAnywhere) {
try {
// Object scheme = java.beans.Beans.instantiate((new Run()).getClass()
// .getClassLoader(), matches.get(i));
Object scheme = WekaPackageClassLoaderManager.objectForName(matches.get(i));
if (classType == null
|| classType.isAssignableFrom(scheme.getClass())) {
if (notJustRunnables
|| scheme instanceof weka.classifiers.Classifier
|| scheme instanceof weka.clusterers.Clusterer
|| scheme instanceof weka.associations.Associator
|| scheme instanceof weka.attributeSelection.ASEvaluation
|| scheme instanceof weka.filters.Filter
|| scheme instanceof weka.core.converters.AbstractFileLoader
|| scheme instanceof weka.core.converters.AbstractFileSaver
|| scheme instanceof weka.datagenerators.DataGenerator
|| scheme instanceof weka.core.CommandlineRunnable) {
prunedMatches.add(matches.get(i));
}
}
} catch (Exception ex) {
// ignore any classes that we can't instantiate due to no no-arg
// constructor
}
}
}
return prunedMatches;
}
/**
* Find a scheme that matches the supplied suffix
*
* @param schemeToFind the name of the scheme to find
* @param matchAnywhere if true, the name is matched anywhere in the
* non-package part of candidate schemes
* @return a list of fully qualified matching scheme names
*/
public static List<String> findSchemeMatch(String schemeToFind,
boolean matchAnywhere) {
return findSchemeMatch(null, schemeToFind, matchAnywhere, false);
}
/**
* Main method for this class. -help or -h prints usage info.
*
* @param args
*/
public static void main(String[] args) {
System.setProperty("apple.awt.UIElement", "true");
try {
if (args.length == 0 || args[0].equalsIgnoreCase("-h")
|| args[0].equalsIgnoreCase("-help")) {
System.err
.println("Usage:\n\tweka.Run [-no-scan] [-no-load] [-match-anywhere] <scheme name [scheme options]>");
return;
}
boolean noScan = false;
boolean noLoad = false;
boolean matchAnywhere = false;
boolean dontPromptIfMultipleMatches = false;
if (Utils.getFlag("list-packages", args)) {
weka.core.WekaPackageManager.loadPackages(true, true, false);
return;
}
int schemeIndex = 0;
if (Utils.getFlag("no-load", args)) {
noLoad = true;
schemeIndex++;
}
if (Utils.getFlag("no-scan", args)) {
noScan = true;
schemeIndex++;
}
if (Utils.getFlag("match-anywhere", args)) {
matchAnywhere = true;
schemeIndex++;
}
if (Utils.getFlag("do-not-prompt-if-multiple-matches", args)) {
dontPromptIfMultipleMatches = true;
schemeIndex++;
}
if (!noLoad) {
weka.core.WekaPackageManager.loadPackages(false, true, false);
}
String schemeToRun = null;
String[] options = null;
if (schemeIndex >= args.length) {
System.err.println("No scheme name given.");
return;
}
schemeToRun = args[schemeIndex];
options = new String[args.length - schemeIndex - 1];
if (options.length > 0) {
System.arraycopy(args, schemeIndex + 1, options, 0, options.length);
}
if (!noScan) {
List<String> prunedMatches =
findSchemeMatch(schemeToRun, matchAnywhere);
if (prunedMatches.size() == 0) {
System.err.println("Can't find scheme " + schemeToRun
+ ", or it is not runnable.");
// System.exit(1);
return;
} else if (prunedMatches.size() > 1) {
if (dontPromptIfMultipleMatches) {
System.out.println("There are multiple matches:");
for (int i = 0; i < prunedMatches.size(); i++) {
System.out.println("\t" + (i + 1) + ") " + prunedMatches.get(i));
}
System.out.println("\nPlease make your scheme name more specific "
+ "(i.e. qualify it with more of the package name).");
return;
}
java.io.BufferedReader br =
new java.io.BufferedReader(new java.io.InputStreamReader(System.in));
boolean done = false;
while (!done) {
System.out.println("Select a scheme to run, or <return> to exit:");
for (int i = 0; i < prunedMatches.size(); i++) {
System.out.println("\t" + (i + 1) + ") " + prunedMatches.get(i));
}
System.out.print("\nEnter a number > ");
String choice = null;
int schemeNumber = 0;
try {
choice = br.readLine();
if (choice.equals("")) {
// System.exit(0);
return;
} else {
schemeNumber = Integer.parseInt(choice);
schemeNumber--;
if (schemeNumber >= 0 && schemeNumber < prunedMatches.size()) {
schemeToRun = prunedMatches.get(schemeNumber);
done = true;
}
}
} catch (java.io.IOException ex) {
// ignore
}
}
} else {
schemeToRun = prunedMatches.get(0);
}
}
Object scheme = null;
try {
// scheme = java.beans.Beans.instantiate((new Run()).getClass()
// .getClassLoader(), schemeToRun);
scheme = WekaPackageClassLoaderManager.objectForName(schemeToRun);
} catch (Exception ex) {
System.err.println(schemeToRun + " is not runnable!\n"
+ ex.getMessage());
// System.exit(1);
return;
}
// now see which interfaces/classes this scheme implements/extends
ArrayList<SchemeType> types = new ArrayList<SchemeType>();
if (scheme instanceof weka.core.CommandlineRunnable) {
types.add(SchemeType.COMMANDLINE);
} else {
if (scheme instanceof weka.classifiers.Classifier) {
types.add(SchemeType.CLASSIFIER);
}
if (scheme instanceof weka.clusterers.Clusterer) {
types.add(SchemeType.CLUSTERER);
}
if (scheme instanceof weka.associations.Associator) {
types.add(SchemeType.ASSOCIATOR);
}
if (scheme instanceof weka.attributeSelection.ASEvaluation) {
types.add(SchemeType.ATTRIBUTE_SELECTION);
}
if (scheme instanceof weka.filters.Filter) {
types.add(SchemeType.FILTER);
}
if (scheme instanceof weka.core.converters.AbstractFileLoader) {
types.add(SchemeType.LOADER);
}
if (scheme instanceof weka.core.converters.AbstractFileSaver) {
types.add(SchemeType.SAVER);
}
if (scheme instanceof weka.datagenerators.DataGenerator) {
types.add(SchemeType.DATAGENERATOR);
}
}
SchemeType selectedType = null;
if (types.size() == 0) {
System.err.println("" + schemeToRun + " is not runnable!");
// System.exit(1);
return;
}
if (types.size() == 1) {
selectedType = types.get(0);
} else {
java.io.BufferedReader br =
new java.io.BufferedReader(new java.io.InputStreamReader(System.in));
boolean done = false;
while (!done) {
System.out.println("" + schemeToRun
+ " can be executed as any of the following:");
for (int i = 0; i < types.size(); i++) {
System.out.println("\t" + (i + 1) + ") " + types.get(i));
}
System.out.print("\nEnter a number > ");
String choice = null;
int typeNumber = 0;
try {
choice = br.readLine();
if (choice.equals("")) {
// System.exit(0);
return;
} else {
typeNumber = Integer.parseInt(choice);
typeNumber--;
if (typeNumber >= 0 && typeNumber < types.size()) {
selectedType = types.get(typeNumber);
done = true;
}
}
} catch (java.io.IOException ex) {
// ignore
}
}
}
if (selectedType == SchemeType.CLASSIFIER) {
weka.classifiers.AbstractClassifier.runClassifier(
(weka.classifiers.Classifier) scheme, options);
} else if (selectedType == SchemeType.CLUSTERER) {
weka.clusterers.AbstractClusterer.runClusterer(
(weka.clusterers.Clusterer) scheme, options);
} else if (selectedType == SchemeType.ATTRIBUTE_SELECTION) {
weka.attributeSelection.ASEvaluation.runEvaluator(
(weka.attributeSelection.ASEvaluation) scheme, options);
} else if (selectedType == SchemeType.ASSOCIATOR) {
weka.associations.AbstractAssociator.runAssociator(
(weka.associations.Associator) scheme, options);
} else if (selectedType == SchemeType.FILTER) {
weka.filters.Filter.runFilter((weka.filters.Filter) scheme, options);
} else if (selectedType == SchemeType.LOADER) {
weka.core.converters.AbstractFileLoader.runFileLoader(
(weka.core.converters.AbstractFileLoader) scheme, options);
} else if (selectedType == SchemeType.SAVER) {
weka.core.converters.AbstractFileSaver.runFileSaver(
(weka.core.converters.AbstractFileSaver) scheme, options);
} else if (selectedType == SchemeType.DATAGENERATOR) {
weka.datagenerators.DataGenerator.runDataGenerator(
(weka.datagenerators.DataGenerator) scheme, options);
} else if (selectedType == SchemeType.COMMANDLINE) {
((weka.core.CommandlineRunnable) scheme).run(scheme, options);
}
} catch (Exception e) {
if (((e.getMessage() != null) && (e.getMessage().indexOf(
"General options") == -1))
|| (e.getMessage() == null)) {
e.printStackTrace();
} else {
System.err.println(e.getMessage());
}
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/AbstractAssociator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Associator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import weka.core.Capabilities;
import weka.core.CapabilitiesHandler;
import weka.core.CapabilitiesIgnorer;
import weka.core.CommandlineRunnable;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.Utils;
import java.io.Serializable;
import java.util.Enumeration;
import java.util.Vector;
/**
* Abstract scheme for learning associations. All schemes for learning
* associations implemement this class
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class AbstractAssociator
implements Cloneable, Associator, Serializable, CapabilitiesHandler,
CapabilitiesIgnorer, RevisionHandler, OptionHandler, CommandlineRunnable {
/** for serialization */
private static final long serialVersionUID = -3017644543382432070L;
/** Whether capabilities should not be checked */
protected boolean m_DoNotCheckCapabilities = false;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = Option
.listOptionsForClassHierarchy(this.getClass(), AbstractAssociator.class);
return newVector.elements();
}
/**
* Parses a given list of options.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
Option.setOptionsForHierarchy(options, this, AbstractAssociator.class);
}
/**
* Gets the current settings of the associator
*
* @return an array of strings suitable for passing to setOptions
*/
public String[] getOptions() {
Vector<String> options = new Vector<String>();
for (String s : Option.getOptionsForHierarchy(this,
AbstractAssociator.class)) {
options.add(s);
}
return options.toArray(new String[0]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String doNotCheckCapabilitiesTipText() {
return "If set, associator capabilities are not checked before associator is built"
+ " (Use with caution to reduce runtime).";
}
/**
* Set whether not to check capabilities.
*
* @param doNotCheckCapabilities true if capabilities are not to be checked.
*/
public void setDoNotCheckCapabilities(boolean doNotCheckCapabilities) {
m_DoNotCheckCapabilities = doNotCheckCapabilities;
}
/**
* Get whether capabilities checking is turned off.
*
* @return true if capabilities checking is turned off.
*/
public boolean getDoNotCheckCapabilities() {
return m_DoNotCheckCapabilities;
}
/**
* Creates a new instance of a associator given it's class name and (optional)
* arguments to pass to it's setOptions method. If the associator implements
* OptionHandler and the options parameter is non-null, the associator will
* have it's options set.
*
* @param associatorName the fully qualified class name of the associator
* @param options an array of options suitable for passing to setOptions. May
* be null.
* @return the newly created associator, ready for use.
* @exception Exception if the associator name is invalid, or the options
* supplied are not acceptable to the associator
*/
public static Associator forName(String associatorName, String[] options)
throws Exception {
return (Associator) Utils.forName(Associator.class, associatorName,
options);
}
/**
* Creates a deep copy of the given associator using serialization.
*
* @param model the associator to copy
* @return a deep copy of the associator
* @exception Exception if an error occurs
*/
public static Associator makeCopy(Associator model) throws Exception {
return (Associator) new SerializedObject(model).getObject();
}
/**
* Creates copies of the current associator. Note that this method now uses
* Serialization to perform a deep copy, so the Associator object must be
* fully Serializable. Any currently built model will now be copied as well.
*
* @param model an example associator to copy
* @param num the number of associators copies to create.
* @return an array of associators.
* @exception Exception if an error occurs
*/
public static Associator[] makeCopies(Associator model, int num)
throws Exception {
if (model == null) {
throw new Exception("No model associator set");
}
Associator[] associators = new Associator[num];
SerializedObject so = new SerializedObject(model);
for (int i = 0; i < associators.length; i++) {
associators[i] = (Associator) so.getObject();
}
return associators;
}
/**
* Returns the Capabilities of this associator. Maximally permissive
* capabilities are allowed by default. Derived associators should override
* this method and first disable all capabilities and then enable just those
* capabilities that make sense for the scheme.
*
* @return the capabilities of this object
* @see Capabilities
*/
public Capabilities getCapabilities() {
Capabilities defaultC = new Capabilities(this);
defaultC.enableAll();
return defaultC;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* runs the associator with the given commandline options
*
* @param associator the associator to run
* @param options the commandline options
*/
public static void runAssociator(Associator associator, String[] options) {
try {
if (associator instanceof CommandlineRunnable) {
((CommandlineRunnable)associator).preExecution();
}
System.out.println(AssociatorEvaluation.evaluate(associator, options));
} catch (Exception e) {
if ((e.getMessage() != null)
&& (e.getMessage().indexOf("General options") == -1))
e.printStackTrace();
else
System.err.println(e.getMessage());
}
try {
if (associator instanceof CommandlineRunnable) {
((CommandlineRunnable) associator).postExecution();
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Perform any setup stuff that might need to happen before commandline
* execution. Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during setup
*/
@Override
public void preExecution() throws Exception {
}
/**
* Execute the supplied object. Subclasses need to override this method.
*
* @param toRun the object to execute
* @param options any options to pass to the object
* @throws Exception if a problem occurs
*/
@Override
public void run(Object toRun, String[] options) throws Exception {
if (!(toRun instanceof Associator)) {
throw new IllegalArgumentException(
"Object to run is not an instance of Associator!");
}
preExecution();
runAssociator((Associator) toRun, options);
postExecution();
}
/**
* Perform any teardown stuff that might need to happen after execution.
* Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during teardown
*/
@Override
public void postExecution() throws Exception {
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/Apriori.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Apriori.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.List;
import java.util.Vector;
import weka.core.AttributeStats;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.core.WekaEnumeration;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
/**
* <!-- globalinfo-start --> Class implementing an Apriori-type algorithm.
* Iteratively reduces the minimum support until it finds the required number of
* rules with the given minimum confidence.<br/>
* The algorithm has an option to mine class association rules. It is adapted as
* explained in the second reference.<br/>
* <br/>
* For more information see:<br/>
* <br/>
* R. Agrawal, R. Srikant: Fast Algorithms for Mining Association Rules in Large
* Databases. In: 20th International Conference on Very Large Data Bases,
* 478-499, 1994.<br/>
* <br/>
* Bing Liu, Wynne Hsu, Yiming Ma: Integrating Classification and Association
* Rule Mining. In: Fourth International Conference on Knowledge Discovery and
* Data Mining, 80-86, 1998.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @inproceedings{Agrawal1994,
* author = {R. Agrawal and R. Srikant},
* booktitle = {20th International Conference on Very Large Data Bases},
* pages = {478-499},
* publisher = {Morgan Kaufmann, Los Altos, CA},
* title = {Fast Algorithms for Mining Association Rules in Large Databases},
* year = {1994}
* }
*
* @inproceedings{Liu1998,
* author = {Bing Liu and Wynne Hsu and Yiming Ma},
* booktitle = {Fourth International Conference on Knowledge Discovery and Data Mining},
* pages = {80-86},
* publisher = {AAAI Press},
* title = {Integrating Classification and Association Rule Mining},
* year = {1998}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -N <required number of rules output>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric type by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum confidence of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -S <significance level>
* If used, rules are tested for significance at
* the given level. Slower. (default = no significance testing)
* </pre>
*
* <pre>
* -I
* If set the itemsets found are also output. (default = no)
* </pre>
*
* <pre>
* -R
* Remove columns that contain all missing values (default = no)
* </pre>
*
* <pre>
* -V
* Report progress iteratively. (default = no)
* </pre>
*
* <pre>
* -A
* If set class association rules are mined. (default = no)
* </pre>
*
* <pre>
* -Z
* Treat zero (i.e. first value of nominal attributes) as missing
* </pre>
*
* <pre>
* -B <toString delimiters>
* If used, two characters to use as rule delimiters
* in the result of toString: the first to delimit fields,
* the second to delimit items within fields.
* (default = traditional toString result)
* </pre>
*
* <pre>
* -c <the class index>
* The class index. (default = last)
* </pre>
*
* <!-- options-end -->
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author Stefan Mutter (mutter@cs.waikato.ac.nz)
* @version $Revision$
*/
public class Apriori extends AbstractAssociator implements OptionHandler,
AssociationRulesProducer, CARuleMiner, TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = 3277498842319212687L;
/** The minimum support. */
protected double m_minSupport;
/** The upper bound on the support */
protected double m_upperBoundMinSupport;
/** The lower bound for the minimum support. */
protected double m_lowerBoundMinSupport;
/** Metric type: Confidence */
protected static final int CONFIDENCE = 0;
/** Metric type: Lift */
protected static final int LIFT = 1;
/** Metric type: Leverage */
protected static final int LEVERAGE = 2;
/** Metric type: Conviction */
protected static final int CONVICTION = 3;
/** Metric types. */
public static final Tag[] TAGS_SELECTION = {
new Tag(CONFIDENCE, "Confidence"), new Tag(LIFT, "Lift"),
new Tag(LEVERAGE, "Leverage"), new Tag(CONVICTION, "Conviction") };
/** The selected metric type. */
protected int m_metricType = CONFIDENCE;
/** The minimum metric score. */
protected double m_minMetric;
/** The maximum number of rules that are output. */
protected int m_numRules;
/** Delta by which m_minSupport is decreased in each iteration. */
protected double m_delta;
/** Significance level for optional significance test. */
protected double m_significanceLevel;
/** Number of cycles used before required number of rules was one. */
protected int m_cycles;
/** The set of all sets of itemsets L. */
protected ArrayList<ArrayList<Object>> m_Ls;
/** The same information stored in hash tables. */
protected ArrayList<Hashtable<ItemSet, Integer>> m_hashtables;
/** The list of all generated rules. */
protected ArrayList<Object>[] m_allTheRules;
/**
* The instances (transactions) to be used for generating the association
* rules.
*/
protected Instances m_instances;
/** Output itemsets found? */
protected boolean m_outputItemSets;
/** Remove columns with all missing values */
protected boolean m_removeMissingCols;
/** Report progress iteratively */
protected boolean m_verbose;
/** Only the class attribute of all Instances. */
protected Instances m_onlyClass;
/** The class index. */
protected int m_classIndex;
/** Flag indicating whether class association rules are mined. */
protected boolean m_car;
/**
* Treat zeros as missing (rather than a value in their own right)
*/
protected boolean m_treatZeroAsMissing = false;
/**
* ToString delimiters, if any
*/
protected String m_toStringDelimiters = null;
/**
* Returns a string describing this associator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Class implementing an Apriori-type algorithm. Iteratively reduces "
+ "the minimum support until it finds the required number of rules with "
+ "the given minimum confidence.\n"
+ "The algorithm has an option to mine class association rules. It is "
+ "adapted as explained in the second reference.\n\n"
+ "For more information see:\n\n" + getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
TechnicalInformation additional;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "R. Agrawal and R. Srikant");
result.setValue(Field.TITLE,
"Fast Algorithms for Mining Association Rules in Large Databases");
result.setValue(Field.BOOKTITLE,
"20th International Conference on Very Large Data Bases");
result.setValue(Field.YEAR, "1994");
result.setValue(Field.PAGES, "478-499");
result.setValue(Field.PUBLISHER, "Morgan Kaufmann, Los Altos, CA");
additional = result.add(Type.INPROCEEDINGS);
additional.setValue(Field.AUTHOR, "Bing Liu and Wynne Hsu and Yiming Ma");
additional.setValue(Field.TITLE,
"Integrating Classification and Association Rule Mining");
additional.setValue(Field.BOOKTITLE,
"Fourth International Conference on Knowledge Discovery and Data Mining");
additional.setValue(Field.YEAR, "1998");
additional.setValue(Field.PAGES, "80-86");
additional.setValue(Field.PUBLISHER, "AAAI Press");
return result;
}
/**
* Constructor that allows to sets default values for the minimum confidence
* and the maximum number of rules the minimum confidence.
*/
public Apriori() {
resetOptions();
}
/**
* Resets the options to the default values.
*/
public void resetOptions() {
m_removeMissingCols = false;
m_verbose = false;
m_delta = 0.05;
m_minMetric = 0.90;
m_numRules = 10;
m_lowerBoundMinSupport = 0.1;
m_upperBoundMinSupport = 1.0;
m_significanceLevel = -1;
m_outputItemSets = false;
m_car = false;
m_classIndex = -1;
m_treatZeroAsMissing = false;
m_metricType = CONFIDENCE;
}
/**
* Removes columns that are all missing from the data
*
* @param instances the instances
* @return a new set of instances with all missing columns removed
* @throws Exception if something goes wrong
*/
protected Instances removeMissingColumns(Instances instances)
throws Exception {
int numInstances = instances.numInstances();
StringBuffer deleteString = new StringBuffer();
int removeCount = 0;
boolean first = true;
int maxCount = 0;
for (int i = 0; i < instances.numAttributes(); i++) {
AttributeStats as = instances.attributeStats(i);
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
// see if we can decrease this by looking for the most frequent value
int[] counts = as.nominalCounts;
if (counts[Utils.maxIndex(counts)] > maxCount) {
maxCount = counts[Utils.maxIndex(counts)];
}
}
if (as.missingCount == numInstances) {
if (first) {
deleteString.append((i + 1));
first = false;
} else {
deleteString.append("," + (i + 1));
}
removeCount++;
}
}
if (m_verbose) {
System.err.println("Removed : " + removeCount
+ " columns with all missing " + "values.");
}
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
m_upperBoundMinSupport = (double) maxCount / (double) numInstances;
if (m_verbose) {
System.err.println("Setting upper bound min support to : "
+ m_upperBoundMinSupport);
}
}
if (deleteString.toString().length() > 0) {
Remove af = new Remove();
af.setAttributeIndices(deleteString.toString());
af.setInvertSelection(false);
af.setInputFormat(instances);
Instances newInst = Filter.useFilter(instances, af);
return newInst;
}
return instances;
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// enable what we can handle
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class (can handle a nominal class if CAR rules are selected). This
result.enable(Capability.NO_CLASS);
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Method that generates all large itemsets with a minimum support, and from
* these all association rules with a minimum confidence.
*
* @param instances the instances to be used for generating the associations
* @throws Exception if rules can't be built successfully
*/
@SuppressWarnings("unchecked")
@Override
public void buildAssociations(Instances instances) throws Exception {
double[] confidences, supports;
int[] indices;
ArrayList<Object>[] sortedRuleSet;
double necSupport = 0;
instances = new Instances(instances);
if (m_removeMissingCols) {
instances = removeMissingColumns(instances);
}
if (m_car && m_metricType != CONFIDENCE) {
throw new Exception("For CAR-Mining metric type has to be confidence!");
}
// only set class index if CAR is requested
if (m_car) {
if (m_classIndex == -1) {
instances.setClassIndex(instances.numAttributes() - 1);
} else if (m_classIndex <= instances.numAttributes() && m_classIndex > 0) {
instances.setClassIndex(m_classIndex - 1);
} else {
throw new Exception("Invalid class index.");
}
}
// can associator handle the data?
getCapabilities().testWithFail(instances);
m_cycles = 0;
// make sure that the lower bound is equal to at least one instance
double lowerBoundMinSupportToUse = (m_lowerBoundMinSupport
* instances.numInstances() < 1.0) ? 1.0 / instances.numInstances()
: m_lowerBoundMinSupport;
if (m_car) {
// m_instances does not contain the class attribute
m_instances = LabeledItemSet.divide(instances, false);
// m_onlyClass contains only the class attribute
m_onlyClass = LabeledItemSet.divide(instances, true);
} else {
m_instances = instances;
}
if (m_car && m_numRules == Integer.MAX_VALUE) {
// Set desired minimum support
m_minSupport = lowerBoundMinSupportToUse;
} else {
// Decrease minimum support until desired number of rules found.
// m_minSupport = m_upperBoundMinSupport - m_delta;
m_minSupport = 1.0 - m_delta;
m_minSupport = (m_minSupport < lowerBoundMinSupportToUse) ? lowerBoundMinSupportToUse
: m_minSupport;
}
do {
// Reserve space for variables
m_Ls = new ArrayList<ArrayList<Object>>();
m_hashtables = new ArrayList<Hashtable<ItemSet, Integer>>();
m_allTheRules = new ArrayList[6];
m_allTheRules[0] = new ArrayList<Object>();
m_allTheRules[1] = new ArrayList<Object>();
m_allTheRules[2] = new ArrayList<Object>();
// if (m_metricType != CONFIDENCE || m_significanceLevel != -1) {
m_allTheRules[3] = new ArrayList<Object>();
m_allTheRules[4] = new ArrayList<Object>();
m_allTheRules[5] = new ArrayList<Object>();
// }
sortedRuleSet = new ArrayList[6];
sortedRuleSet[0] = new ArrayList<Object>();
sortedRuleSet[1] = new ArrayList<Object>();
sortedRuleSet[2] = new ArrayList<Object>();
// if (m_metricType != CONFIDENCE || m_significanceLevel != -1) {
sortedRuleSet[3] = new ArrayList<Object>();
sortedRuleSet[4] = new ArrayList<Object>();
sortedRuleSet[5] = new ArrayList<Object>();
// }
if (!m_car) {
// Find large itemsets and rules
findLargeItemSets();
if (m_significanceLevel != -1 || m_metricType != CONFIDENCE) {
findRulesBruteForce();
} else {
findRulesQuickly();
}
} else {
findLargeCarItemSets();
findCarRulesQuickly();
}
// prune rules for upper bound min support
if (m_upperBoundMinSupport < 1.0) {
pruneRulesForUpperBoundSupport();
}
// Sort rules according to their support
/*
* supports = new double[m_allTheRules[2].size()]; for (int i = 0; i <
* m_allTheRules[2].size(); i++) supports[i] =
* (double)((AprioriItemSet)m_allTheRules[1].elementAt(i)).support();
* indices = Utils.stableSort(supports); for (int i = 0; i <
* m_allTheRules[2].size(); i++) {
* sortedRuleSet[0].add(m_allTheRules[0].get(indices[i]));
* sortedRuleSet[1].add(m_allTheRules[1].get(indices[i]));
* sortedRuleSet[2].add(m_allTheRules[2].get(indices[i])); if
* (m_metricType != CONFIDENCE || m_significanceLevel != -1) {
* sortedRuleSet[3].add(m_allTheRules[3].get(indices[i]));
* sortedRuleSet[4].add(m_allTheRules[4].get(indices[i]));
* sortedRuleSet[5].add(m_allTheRules[5].get(indices[i])); } }
*/
int j = m_allTheRules[2].size() - 1;
supports = new double[m_allTheRules[2].size()];
for (int i = 0; i < (j + 1); i++) {
supports[j - i] = ((double) ((ItemSet) m_allTheRules[1].get(j - i))
.support()) * (-1);
}
indices = Utils.stableSort(supports);
for (int i = 0; i < (j + 1); i++) {
sortedRuleSet[0].add(m_allTheRules[0].get(indices[j - i]));
sortedRuleSet[1].add(m_allTheRules[1].get(indices[j - i]));
sortedRuleSet[2].add(m_allTheRules[2].get(indices[j - i]));
if (!m_car) {
// if (m_metricType != CONFIDENCE || m_significanceLevel != -1) {
sortedRuleSet[3].add(m_allTheRules[3].get(indices[j - i]));
sortedRuleSet[4].add(m_allTheRules[4].get(indices[j - i]));
sortedRuleSet[5].add(m_allTheRules[5].get(indices[j - i]));
}
// }
}
// Sort rules according to their confidence
m_allTheRules[0].clear();
m_allTheRules[1].clear();
m_allTheRules[2].clear();
// if (m_metricType != CONFIDENCE || m_significanceLevel != -1) {
m_allTheRules[3].clear();
m_allTheRules[4].clear();
m_allTheRules[5].clear();
// }
confidences = new double[sortedRuleSet[2].size()];
int sortType = 2 + m_metricType;
for (int i = 0; i < sortedRuleSet[2].size(); i++) {
confidences[i] = ((Double) sortedRuleSet[sortType].get(i))
.doubleValue();
}
indices = Utils.stableSort(confidences);
for (int i = sortedRuleSet[0].size() - 1; (i >= (sortedRuleSet[0].size() - m_numRules))
&& (i >= 0); i--) {
m_allTheRules[0].add(sortedRuleSet[0].get(indices[i]));
m_allTheRules[1].add(sortedRuleSet[1].get(indices[i]));
m_allTheRules[2].add(sortedRuleSet[2].get(indices[i]));
// if (m_metricType != CONFIDENCE || m_significanceLevel != -1) {
if (!m_car) {
m_allTheRules[3].add(sortedRuleSet[3].get(indices[i]));
m_allTheRules[4].add(sortedRuleSet[4].get(indices[i]));
m_allTheRules[5].add(sortedRuleSet[5].get(indices[i]));
}
// }
}
if (m_verbose) {
if (m_Ls.size() > 1) {
System.out.println(toString());
}
}
if (m_minSupport == lowerBoundMinSupportToUse
|| m_minSupport - m_delta > lowerBoundMinSupportToUse) {
m_minSupport -= m_delta;
} else {
m_minSupport = lowerBoundMinSupportToUse;
}
necSupport = Math.rint(m_minSupport * m_instances.numInstances());
m_cycles++;
} while ((m_allTheRules[0].size() < m_numRules)
&& (Utils.grOrEq(m_minSupport, lowerBoundMinSupportToUse))
/* (necSupport >= lowerBoundNumInstancesSupport) */
/* (Utils.grOrEq(m_minSupport, m_lowerBoundMinSupport)) */&& (necSupport >= 1));
m_minSupport += m_delta;
}
private void pruneRulesForUpperBoundSupport() {
int necMaxSupport = (int) (m_upperBoundMinSupport
* m_instances.numInstances() + 0.5);
@SuppressWarnings("unchecked")
ArrayList<Object>[] prunedRules = new ArrayList[6];
for (int i = 0; i < 6; i++) {
prunedRules[i] = new ArrayList<Object>();
}
for (int i = 0; i < m_allTheRules[0].size(); i++) {
if (((ItemSet) m_allTheRules[1].get(i)).support() <= necMaxSupport) {
prunedRules[0].add(m_allTheRules[0].get(i));
prunedRules[1].add(m_allTheRules[1].get(i));
prunedRules[2].add(m_allTheRules[2].get(i));
if (!m_car) {
prunedRules[3].add(m_allTheRules[3].get(i));
prunedRules[4].add(m_allTheRules[4].get(i));
prunedRules[5].add(m_allTheRules[5].get(i));
}
}
}
m_allTheRules[0] = prunedRules[0];
m_allTheRules[1] = prunedRules[1];
m_allTheRules[2] = prunedRules[2];
m_allTheRules[3] = prunedRules[3];
m_allTheRules[4] = prunedRules[4];
m_allTheRules[5] = prunedRules[5];
}
/**
* Method that mines all class association rules with minimum support and with
* a minimum confidence.
*
* @return an sorted array of FastVector (confidence depended) containing the
* rules and metric information
* @param data the instances for which class association rules should be mined
* @throws Exception if rules can't be built successfully
*/
@Override
public ArrayList<Object>[] mineCARs(Instances data) throws Exception {
m_car = true;
buildAssociations(data);
return m_allTheRules;
}
/**
* Gets the instances without the class atrribute.
*
* @return the instances without the class attribute.
*/
@Override
public Instances getInstancesNoClass() {
return m_instances;
}
/**
* Gets only the class attribute of the instances.
*
* @return the class attribute of all instances.
*/
@Override
public Instances getInstancesOnlyClass() {
return m_onlyClass;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
String string1 = "\tThe required number of rules. (default = " + m_numRules
+ ")", string2 = "\tThe minimum confidence of a rule. (default = "
+ m_minMetric + ")", string3 = "\tThe delta by which the minimum support is decreased in\n", string4 = "\teach iteration. (default = "
+ m_delta + ")", string5 = "\tThe lower bound for the minimum support. (default = "
+ m_lowerBoundMinSupport + ")", string6 = "\tIf used, rules are tested for significance at\n", string7 = "\tthe given level. Slower. (default = no significance testing)", string8 = "\tIf set the itemsets found are also output. (default = no)", string9 = "\tIf set class association rules are mined. (default = no)", string10 = "\tThe class index. (default = last)", stringType = "\tThe metric type by which to rank rules. (default = "
+ "confidence)", stringZeroAsMissing = "\tTreat zero (i.e. first value of nominal attributes) as "
+ "missing", stringToStringDelimiters = "\tIf used, two characters to use as rule delimiters\n"
+ "\tin the result of toString: the first to delimit fields,\n"
+ "\tthe second to delimit items within fields.\n"
+ "\t(default = traditional toString result)";
Vector<Option> newVector = new Vector<Option>(14);
newVector.add(new Option(string1, "N", 1,
"-N <required number of rules output>"));
newVector.add(new Option(stringType, "T", 1, "-T <0=confidence | 1=lift | "
+ "2=leverage | 3=Conviction>"));
newVector.add(new Option(string2, "C", 1,
"-C <minimum metric score of a rule>"));
newVector.add(new Option(string3 + string4, "D", 1,
"-D <delta for minimum support>"));
newVector.add(new Option("\tUpper bound for minimum support. "
+ "(default = 1.0)", "U", 1, "-U <upper bound for minimum support>"));
newVector.add(new Option(string5, "M", 1,
"-M <lower bound for minimum support>"));
newVector.add(new Option(string6 + string7, "S", 1,
"-S <significance level>"));
newVector.add(new Option(string8, "I", 0, "-I"));
newVector.add(new Option("\tRemove columns that contain "
+ "all missing values (default = no)", "R", 0, "-R"));
newVector.add(new Option("\tReport progress iteratively. (default "
+ "= no)", "V", 0, "-V"));
newVector.add(new Option(string9, "A", 0, "-A"));
newVector.add(new Option(stringZeroAsMissing, "Z", 0, "-Z"));
newVector.add(new Option(stringToStringDelimiters, "B", 1,
"-B <toString delimiters>"));
newVector.add(new Option(string10, "c", 1, "-c <the class index>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -N <required number of rules output>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric type by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum confidence of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -S <significance level>
* If used, rules are tested for significance at
* the given level. Slower. (default = no significance testing)
* </pre>
*
* <pre>
* -I
* If set the itemsets found are also output. (default = no)
* </pre>
*
* <pre>
* -R
* Remove columns that contain all missing values (default = no)
* </pre>
*
* <pre>
* -V
* Report progress iteratively. (default = no)
* </pre>
*
* <pre>
* -A
* If set class association rules are mined. (default = no)
* </pre>
*
* <pre>
* -Z
* Treat zero (i.e. first value of nominal attributes) as missing
* </pre>
*
* <pre>
* -B <toString delimiters>
* If used, two characters to use as rule delimiters
* in the result of toString: the first to delimit fields,
* the second to delimit items within fields.
* (default = traditional toString result)
* </pre>
*
* <pre>
* -c <the class index>
* The class index. (default = last)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
resetOptions();
String numRulesString = Utils.getOption('N', options), minConfidenceString = Utils
.getOption('C', options), deltaString = Utils.getOption('D', options), maxSupportString = Utils
.getOption('U', options), minSupportString = Utils
.getOption('M', options), significanceLevelString = Utils.getOption('S',
options), classIndexString = Utils.getOption('c', options), toStringDelimitersString = Utils
.getOption('B', options);
String metricTypeString = Utils.getOption('T', options);
if (metricTypeString.length() != 0) {
setMetricType(new SelectedTag(Integer.parseInt(metricTypeString),
TAGS_SELECTION));
}
if (numRulesString.length() != 0) {
m_numRules = Integer.parseInt(numRulesString);
}
if (classIndexString.length() != 0) {
if (classIndexString.equalsIgnoreCase("last")) {
m_classIndex = -1;
} else if (classIndexString.equalsIgnoreCase("first")) {
m_classIndex = 0;
} else {
m_classIndex = Integer.parseInt(classIndexString);
}
}
if (minConfidenceString.length() != 0) {
m_minMetric = (new Double(minConfidenceString)).doubleValue();
}
if (deltaString.length() != 0) {
m_delta = (new Double(deltaString)).doubleValue();
}
if (maxSupportString.length() != 0) {
setUpperBoundMinSupport((new Double(maxSupportString)).doubleValue());
}
if (minSupportString.length() != 0) {
m_lowerBoundMinSupport = (new Double(minSupportString)).doubleValue();
}
if (significanceLevelString.length() != 0) {
m_significanceLevel = (new Double(significanceLevelString)).doubleValue();
}
m_outputItemSets = Utils.getFlag('I', options);
m_car = Utils.getFlag('A', options);
m_verbose = Utils.getFlag('V', options);
m_treatZeroAsMissing = Utils.getFlag('Z', options);
setRemoveAllMissingCols(Utils.getFlag('R', options));
if (toStringDelimitersString.length() == 2) {
m_toStringDelimiters = toStringDelimitersString;
}
}
/**
* Gets the current settings of the Apriori object.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
String[] options = new String[23];
int current = 0;
if (m_outputItemSets) {
options[current++] = "-I";
}
if (getRemoveAllMissingCols()) {
options[current++] = "-R";
}
options[current++] = "-N";
options[current++] = "" + m_numRules;
options[current++] = "-T";
options[current++] = "" + m_metricType;
options[current++] = "-C";
options[current++] = "" + m_minMetric;
options[current++] = "-D";
options[current++] = "" + m_delta;
options[current++] = "-U";
options[current++] = "" + m_upperBoundMinSupport;
options[current++] = "-M";
options[current++] = "" + m_lowerBoundMinSupport;
options[current++] = "-S";
options[current++] = "" + m_significanceLevel;
if (m_car) {
options[current++] = "-A";
}
if (m_verbose) {
options[current++] = "-V";
}
if (m_treatZeroAsMissing) {
options[current++] = "-Z";
}
options[current++] = "-c";
options[current++] = "" + m_classIndex;
if (m_toStringDelimiters != null) {
options[current++] = "-B";
options[current++] = m_toStringDelimiters;
}
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Outputs the size of all the generated sets of itemsets and the rules.
*
* @return a string representation of the model
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (m_Ls.size() <= 1) {
return "\nNo large itemsets and rules found!\n";
}
text.append("\nApriori\n=======\n\n");
text.append("Minimum support: " + Utils.doubleToString(m_minSupport, 2)
+ " (" + ((int) (m_minSupport * m_instances.numInstances() + 0.5))
+ " instances)" + '\n');
text.append("Minimum metric <");
switch (m_metricType) {
case CONFIDENCE:
text.append("confidence>: ");
break;
case LIFT:
text.append("lift>: ");
break;
case LEVERAGE:
text.append("leverage>: ");
break;
case CONVICTION:
text.append("conviction>: ");
break;
}
text.append(Utils.doubleToString(m_minMetric, 2) + '\n');
if (m_significanceLevel != -1) {
text.append("Significance level: "
+ Utils.doubleToString(m_significanceLevel, 2) + '\n');
}
text.append("Number of cycles performed: " + m_cycles + '\n');
text.append("\nGenerated sets of large itemsets:\n");
if (!m_car) {
for (int i = 0; i < m_Ls.size(); i++) {
text.append("\nSize of set of large itemsets L(" + (i + 1) + "): "
+ (m_Ls.get(i)).size() + '\n');
if (m_outputItemSets) {
text.append("\nLarge Itemsets L(" + (i + 1) + "):\n");
for (int j = 0; j < (m_Ls.get(i)).size(); j++) {
text.append(((AprioriItemSet) (m_Ls.get(i)).get(j))
.toString(m_instances) + "\n");
}
}
}
text.append("\nBest rules found:\n\n");
if (m_toStringDelimiters != null) {
text
.append("Number,Premise,Premise Support,Consequence,Consequence Support,Confidence,Lift,Leverage,LeverageT,Conviction\n");
}
for (int i = 0; i < m_allTheRules[0].size(); i++) {
/*
* text.append(Utils.doubleToString((double) i + 1, (int)
* (Math.log(m_numRules) / Math.log(10) + 1), 0) + ". " +
* ((AprioriItemSet) m_allTheRules[0].get(i)) .toString(m_instances) +
* " ==> " + ((AprioriItemSet) m_allTheRules[1].get(i))
* .toString(m_instances)); text.append(" " + ((m_metricType ==
* CONFIDENCE) ? "<" : "") + "conf:(" + Utils.doubleToString( ((Double)
* m_allTheRules[2].get(i)).doubleValue(), 2) + ")" + ((m_metricType ==
* CONFIDENCE) ? ">" : ""));
*/
String outerDelim;
String innerDelim;
String stop;
String implies;
String confOpen;
String confClose;
String liftOpen;
String liftClose;
String levOpen;
String levInner;
String levClose;
String convOpen;
String convClose;
if (m_toStringDelimiters != null) {
outerDelim = m_toStringDelimiters.substring(0, 1);
innerDelim = m_toStringDelimiters.substring(1, 2);
stop = outerDelim;
implies = outerDelim;
confOpen = outerDelim;
confClose = "";
liftOpen = outerDelim;
liftClose = "";
levOpen = outerDelim;
levInner = outerDelim;
levClose = "";
convOpen = outerDelim;
convClose = "";
} else {
outerDelim = " ";
innerDelim = " ";
stop = ". ";
implies = " ==> ";
confOpen = " " + (m_metricType == CONFIDENCE ? "<" : "")
+ "conf:(";
confClose = ")" + (m_metricType == CONFIDENCE ? ">" : "");
liftOpen = (m_metricType == LIFT ? " <" : "") + " lift:(";
liftClose = ")" + (m_metricType == LIFT ? ">" : "");
levOpen = (m_metricType == LEVERAGE ? " <" : "") + " lev:(";
levInner = ")" + " [";
levClose = "]" + (m_metricType == LEVERAGE ? ">" : "");
convOpen = (m_metricType == CONVICTION ? " <" : "") + " conv:(";
convClose = ")" + (m_metricType == CONVICTION ? ">" : "");
}
char odc = outerDelim.charAt(0);
char idc = innerDelim.charAt(0);
String n = Utils.doubleToString((double) i + 1,
(int) (Math.log(m_numRules) / Math.log(10) + 1), 0);
String premise = ((AprioriItemSet) m_allTheRules[0].get(i)).toString(
m_instances, odc, idc);
String consequence = ((AprioriItemSet) m_allTheRules[1].get(i))
.toString(m_instances, odc, idc);
String confidence = Utils.doubleToString(
((Double) m_allTheRules[2].get(i)).doubleValue(), 2);
String lift = Utils.doubleToString(
((Double) m_allTheRules[3].get(i)).doubleValue(), 2);
String leverage = Utils.doubleToString(
((Double) m_allTheRules[4].get(i)).doubleValue(), 2);
String conviction = Utils.doubleToString(
((Double) m_allTheRules[5].get(i)).doubleValue(), 2);
int leverageT = (int) (((Double) m_allTheRules[4].get(i)).doubleValue() * m_instances
.numInstances());
text.append(n).append(stop);
text.append(premise).append(implies).append(consequence);
text.append(confOpen).append(confidence).append(confClose);
// if (/*m_metricType != CONFIDENCE ||*/ m_significanceLevel != -1) {
text.append(liftOpen).append(lift).append(liftClose);
text.append(levOpen).append(leverage).append(levInner)
.append(leverageT).append(levClose);
text.append(convOpen).append(conviction).append(convClose);
// if (/*m_metricType != CONFIDENCE ||*/ m_significanceLevel != -1) {
/*
* text.append((m_metricType == LIFT ? " <" : "") + " lift:(" +
* Utils.doubleToString( ((Double)
* m_allTheRules[3].get(i)).doubleValue(), 2) + ")" + (m_metricType ==
* LIFT ? ">" : "")); text.append((m_metricType == LEVERAGE ? " <" : "")
* + " lev:(" + Utils.doubleToString( ((Double)
* m_allTheRules[4].get(i)).doubleValue(), 2) + ")"); text.append(" [" +
* (int) (((Double) m_allTheRules[4].get(i)).doubleValue() * m_instances
* .numInstances()) + "]" + (m_metricType == LEVERAGE ? ">" : ""));
* text.append((m_metricType == CONVICTION ? " <" : "") + " conv:(" +
* Utils.doubleToString( ((Double)
* m_allTheRules[5].get(i)).doubleValue(), 2) + ")" + (m_metricType ==
* CONVICTION ? ">" : ""));
*/
// }
text.append('\n');
}
} else {
for (int i = 0; i < m_Ls.size(); i++) {
text.append("\nSize of set of large itemsets L(" + (i + 1) + "): "
+ (m_Ls.get(i)).size() + '\n');
if (m_outputItemSets) {
text.append("\nLarge Itemsets L(" + (i + 1) + "):\n");
for (int j = 0; j < (m_Ls.get(i)).size(); j++) {
text.append(((ItemSet) (m_Ls.get(i)).get(j)).toString(m_instances)
+ "\n");
text.append(((LabeledItemSet) (m_Ls.get(i)).get(j)).m_classLabel
+ " ");
text.append(((LabeledItemSet) (m_Ls.get(i)).get(j)).support()
+ "\n");
}
}
}
text.append("\nBest rules found:\n\n");
if (m_toStringDelimiters != null) {
text
.append("Number,Premise,Premise Support,Consequence,Consequence Support,Confidence\n");
}
for (int i = 0; i < m_allTheRules[0].size(); i++) {
/*
* text.append(Utils.doubleToString((double) i + 1, (int)
* (Math.log(m_numRules) / Math.log(10) + 1), 0) + ". " + ((ItemSet)
* m_allTheRules[0].get(i)).toString(m_instances) + " ==> " + ((ItemSet)
* m_allTheRules[1].get(i)).toString(m_onlyClass) + " conf:(" +
* Utils.doubleToString( ((Double)
* m_allTheRules[2].get(i)).doubleValue(), 2) + ")");
*/
String outerDelim;
String innerDelim;
String stop;
String implies;
String confOpen;
String confClose;
if (m_toStringDelimiters != null) {
outerDelim = m_toStringDelimiters.substring(0, 1);
innerDelim = m_toStringDelimiters.substring(1, 2);
stop = outerDelim;
implies = outerDelim;
confOpen = outerDelim;
confClose = "";
} else {
outerDelim = " ";
innerDelim = " ";
stop = ". ";
implies = " ==> ";
confOpen = " " + "conf:(";
confClose = ")";
}
char odc = outerDelim.charAt(0);
char idc = innerDelim.charAt(0);
String n = Utils.doubleToString((double) i + 1,
(int) (Math.log(m_numRules) / Math.log(10) + 1), 0);
String premise = ((ItemSet) m_allTheRules[0].get(i)).toString(
m_instances, odc, idc);
String consequence = ((ItemSet) m_allTheRules[1].get(i)).toString(
m_onlyClass, odc, idc);
String confidence = Utils.doubleToString(
((Double) m_allTheRules[2].get(i)).doubleValue(), 2);
text.append(n).append(stop).append(premise).append(implies)
.append(consequence).append(confOpen).append(confidence)
.append(confClose);
text.append('\n');
}
}
return text.toString();
}
/**
* Returns the metric string for the chosen metric type
*
* @return a string describing the used metric for the interestingness of a
* class association rule
*/
@Override
public String metricString() {
switch (m_metricType) {
case LIFT:
return "lif";
case LEVERAGE:
return "leverage";
case CONVICTION:
return "conviction";
default:
return "conf";
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String removeAllMissingColsTipText() {
return "Remove columns with all missing values.";
}
/**
* Remove columns containing all missing values.
*
* @param r true if cols are to be removed.
*/
public void setRemoveAllMissingCols(boolean r) {
m_removeMissingCols = r;
}
/**
* Returns whether columns containing all missing values are to be removed
*
* @return true if columns are to be removed.
*/
public boolean getRemoveAllMissingCols() {
return m_removeMissingCols;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String upperBoundMinSupportTipText() {
return "Upper bound for minimum support. Start iteratively decreasing "
+ "minimum support from this value.";
}
/**
* Get the value of upperBoundMinSupport.
*
* @return Value of upperBoundMinSupport.
*/
public double getUpperBoundMinSupport() {
return m_upperBoundMinSupport;
}
/**
* Set the value of upperBoundMinSupport.
*
* @param v Value to assign to upperBoundMinSupport.
*/
public void setUpperBoundMinSupport(double v) {
m_upperBoundMinSupport = v;
}
/**
* Sets the class index
*
* @param index the class index
*/
@Override
public void setClassIndex(int index) {
m_classIndex = index;
}
/**
* Gets the class index
*
* @return the index of the class attribute
*/
public int getClassIndex() {
return m_classIndex;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String classIndexTipText() {
return "Index of the class attribute. If set to -1, the last attribute is taken as class attribute.";
}
/**
* Sets class association rule mining
*
* @param flag if class association rules are mined, false otherwise
*/
public void setCar(boolean flag) {
m_car = flag;
}
/**
* Gets whether class association ruels are mined
*
* @return true if class association rules are mined, false otherwise
*/
public boolean getCar() {
return m_car;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String carTipText() {
return "If enabled class association rules are mined instead of (general) association rules.";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String lowerBoundMinSupportTipText() {
return "Lower bound for minimum support.";
}
/**
* Get the value of lowerBoundMinSupport.
*
* @return Value of lowerBoundMinSupport.
*/
public double getLowerBoundMinSupport() {
return m_lowerBoundMinSupport;
}
/**
* Set the value of lowerBoundMinSupport.
*
* @param v Value to assign to lowerBoundMinSupport.
*/
public void setLowerBoundMinSupport(double v) {
m_lowerBoundMinSupport = v;
}
/**
* Get the metric type
*
* @return the type of metric to use for ranking rules
*/
public SelectedTag getMetricType() {
return new SelectedTag(m_metricType, TAGS_SELECTION);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String metricTypeTipText() {
return "Set the type of metric by which to rank rules. Confidence is "
+ "the proportion of the examples covered by the premise that are also "
+ "covered by the consequence (Class association rules can only be mined using confidence). Lift is confidence divided by the "
+ "proportion of all examples that are covered by the consequence. This "
+ "is a measure of the importance of the association that is independent "
+ "of support. Leverage is the proportion of additional examples covered "
+ "by both the premise and consequence above those expected if the "
+ "premise and consequence were independent of each other. The total "
+ "number of examples that this represents is presented in brackets "
+ "following the leverage. Conviction is "
+ "another measure of departure from independence. Conviction is given "
+ "by P(premise)P(!consequence) / P(premise, !consequence).";
}
/**
* Set the metric type for ranking rules
*
* @param d the type of metric
*/
public void setMetricType(SelectedTag d) {
if (d.getTags() == TAGS_SELECTION) {
m_metricType = d.getSelectedTag().getID();
}
if (m_metricType == CONFIDENCE) {
setMinMetric(0.9);
}
if (m_metricType == LIFT || m_metricType == CONVICTION) {
setMinMetric(1.1);
}
if (m_metricType == LEVERAGE) {
setMinMetric(0.1);
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minMetricTipText() {
return "Minimum metric score. Consider only rules with scores higher than "
+ "this value.";
}
/**
* Get the value of minConfidence.
*
* @return Value of minConfidence.
*/
public double getMinMetric() {
return m_minMetric;
}
/**
* Set the value of minConfidence.
*
* @param v Value to assign to minConfidence.
*/
public void setMinMetric(double v) {
m_minMetric = v;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numRulesTipText() {
return "Number of rules to find.";
}
/**
* Get the value of numRules.
*
* @return Value of numRules.
*/
public int getNumRules() {
return m_numRules;
}
/**
* Set the value of numRules.
*
* @param v Value to assign to numRules.
*/
public void setNumRules(int v) {
m_numRules = v;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String deltaTipText() {
return "Iteratively decrease support by this factor. Reduces support "
+ "until min support is reached or required number of rules has been "
+ "generated.";
}
/**
* Get the value of delta.
*
* @return Value of delta.
*/
public double getDelta() {
return m_delta;
}
/**
* Set the value of delta.
*
* @param v Value to assign to delta.
*/
public void setDelta(double v) {
m_delta = v;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String significanceLevelTipText() {
return "Significance level. Significance test (confidence metric only).";
}
/**
* Get the value of significanceLevel.
*
* @return Value of significanceLevel.
*/
public double getSignificanceLevel() {
return m_significanceLevel;
}
/**
* Set the value of significanceLevel.
*
* @param v Value to assign to significanceLevel.
*/
public void setSignificanceLevel(double v) {
m_significanceLevel = v;
}
/**
* Sets whether itemsets are output as well
*
* @param flag true if itemsets are to be output as well
*/
public void setOutputItemSets(boolean flag) {
m_outputItemSets = flag;
}
/**
* Gets whether itemsets are output as well
*
* @return true if itemsets are output as well
*/
public boolean getOutputItemSets() {
return m_outputItemSets;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String outputItemSetsTipText() {
return "If enabled the itemsets are output as well.";
}
/**
* Sets verbose mode
*
* @param flag true if algorithm should be run in verbose mode
*/
public void setVerbose(boolean flag) {
m_verbose = flag;
}
/**
* Gets whether algorithm is run in verbose mode
*
* @return true if algorithm is run in verbose mode
*/
public boolean getVerbose() {
return m_verbose;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String verboseTipText() {
return "If enabled the algorithm will be run in verbose mode.";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String treatZeroAsMissingTipText() {
return "If enabled, zero (that is, the first value of a nominal) is "
+ "treated in the same way as a missing value.";
}
/**
* Sets whether zeros (i.e. the first value of a nominal attribute) should be
* treated as missing values.
*
* @param z true if zeros should be treated as missing values.
*/
public void setTreatZeroAsMissing(boolean z) {
m_treatZeroAsMissing = z;
}
/**
* Gets whether zeros (i.e. the first value of a nominal attribute) is to be
* treated int he same way as missing values.
*
* @return true if zeros are to be treated like missing values.
*/
public boolean getTreatZeroAsMissing() {
return m_treatZeroAsMissing;
}
/**
* Method that finds all large itemsets for the given set of instances.
*
* @throws Exception if an attribute is numeric
*/
private void findLargeItemSets() throws Exception {
ArrayList<Object> kMinusOneSets, kSets;
Hashtable<ItemSet, Integer> hashtable;
int necSupport, i = 0;
// Find large itemsets
// minimum support
necSupport = (int) (m_minSupport * m_instances.numInstances() + 0.5);
kSets = AprioriItemSet.singletons(m_instances, m_treatZeroAsMissing);
if (m_treatZeroAsMissing) {
AprioriItemSet.upDateCountersTreatZeroAsMissing(kSets, m_instances);
} else {
AprioriItemSet.upDateCounters(kSets, m_instances);
}
kSets = AprioriItemSet.deleteItemSets(kSets, necSupport,
m_instances.numInstances());
if (kSets.size() == 0) {
return;
}
do {
m_Ls.add(kSets);
kMinusOneSets = kSets;
kSets = AprioriItemSet.mergeAllItemSets(kMinusOneSets, i,
m_instances.numInstances());
hashtable = AprioriItemSet.getHashtable(kMinusOneSets,
kMinusOneSets.size());
m_hashtables.add(hashtable);
kSets = AprioriItemSet.pruneItemSets(kSets, hashtable);
if (m_treatZeroAsMissing) {
AprioriItemSet.upDateCountersTreatZeroAsMissing(kSets, m_instances);
} else {
AprioriItemSet.upDateCounters(kSets, m_instances);
}
kSets = AprioriItemSet.deleteItemSets(kSets, necSupport,
m_instances.numInstances());
i++;
} while (kSets.size() > 0);
}
/**
* Method that finds all association rules and performs significance test.
*
* @throws Exception if an attribute is numeric
*/
private void findRulesBruteForce() throws Exception {
ArrayList<Object>[] rules;
// Build rules
for (int j = 1; j < m_Ls.size(); j++) {
ArrayList<Object> currentItemSets = m_Ls.get(j);
Enumeration<Object> enumItemSets = new WekaEnumeration<Object>(
currentItemSets);
while (enumItemSets.hasMoreElements()) {
AprioriItemSet currentItemSet = (AprioriItemSet) enumItemSets
.nextElement();
// AprioriItemSet currentItemSet = new
// AprioriItemSet((ItemSet)enumItemSets.nextElement());
rules = currentItemSet.generateRulesBruteForce(m_minMetric,
m_metricType, m_hashtables, j + 1, m_instances.numInstances(),
m_significanceLevel);
for (int k = 0; k < rules[0].size(); k++) {
m_allTheRules[0].add(rules[0].get(k));
m_allTheRules[1].add(rules[1].get(k));
m_allTheRules[2].add(rules[2].get(k));
m_allTheRules[3].add(rules[3].get(k));
m_allTheRules[4].add(rules[4].get(k));
m_allTheRules[5].add(rules[5].get(k));
}
}
}
}
/**
* Method that finds all association rules.
*
* @throws Exception if an attribute is numeric
*/
private void findRulesQuickly() throws Exception {
ArrayList<Object>[] rules;
// Build rules
for (int j = 1; j < m_Ls.size(); j++) {
ArrayList<Object> currentItemSets = m_Ls.get(j);
Enumeration<Object> enumItemSets = new WekaEnumeration<Object>(
currentItemSets);
while (enumItemSets.hasMoreElements()) {
AprioriItemSet currentItemSet = (AprioriItemSet) enumItemSets
.nextElement();
// AprioriItemSet currentItemSet = new
// AprioriItemSet((ItemSet)enumItemSets.nextElement());
rules = currentItemSet.generateRules(m_minMetric, m_hashtables, j + 1);
for (int k = 0; k < rules[0].size(); k++) {
m_allTheRules[0].add(rules[0].get(k));
m_allTheRules[1].add(rules[1].get(k));
m_allTheRules[2].add(rules[2].get(k));
if (rules.length > 3) {
m_allTheRules[3].add(rules[3].get(k));
m_allTheRules[4].add(rules[4].get(k));
m_allTheRules[5].add(rules[5].get(k));
}
}
}
}
}
/**
*
* Method that finds all large itemsets for class association rules for the
* given set of instances.
*
* @throws Exception if an attribute is numeric
*/
private void findLargeCarItemSets() throws Exception {
ArrayList<Object> kMinusOneSets, kSets;
Hashtable<ItemSet, Integer> hashtable;
int necSupport, i = 0;
// Find large itemsets
// minimum support
double nextMinSupport = m_minSupport * m_instances.numInstances();
double nextMaxSupport = m_upperBoundMinSupport * m_instances.numInstances();
if (Math.rint(nextMinSupport) == nextMinSupport) {
necSupport = (int) nextMinSupport;
} else {
necSupport = Math.round((float) (nextMinSupport + 0.5));
}
if (Math.rint(nextMaxSupport) == nextMaxSupport) {
} else {
Math.round((float) (nextMaxSupport + 0.5));
}
// find item sets of length one
kSets = LabeledItemSet.singletons(m_instances, m_onlyClass);
LabeledItemSet.upDateCounters(kSets, m_instances, m_onlyClass);
// check if a item set of lentgh one is frequent, if not delete it
kSets = LabeledItemSet.deleteItemSets(kSets, necSupport,
m_instances.numInstances());
if (kSets.size() == 0) {
return;
}
do {
m_Ls.add(kSets);
kMinusOneSets = kSets;
kSets = LabeledItemSet.mergeAllItemSets(kMinusOneSets, i,
m_instances.numInstances());
hashtable = LabeledItemSet.getHashtable(kMinusOneSets,
kMinusOneSets.size());
kSets = LabeledItemSet.pruneItemSets(kSets, hashtable);
LabeledItemSet.upDateCounters(kSets, m_instances, m_onlyClass);
kSets = LabeledItemSet.deleteItemSets(kSets, necSupport,
m_instances.numInstances());
i++;
} while (kSets.size() > 0);
}
/**
* Method that finds all class association rules.
*
* @throws Exception if an attribute is numeric
*/
private void findCarRulesQuickly() throws Exception {
ArrayList<Object>[] rules;
// Build rules
for (int j = 0; j < m_Ls.size(); j++) {
ArrayList<Object> currentLabeledItemSets = m_Ls.get(j);
Enumeration<Object> enumLabeledItemSets = new WekaEnumeration<Object>(
currentLabeledItemSets);
while (enumLabeledItemSets.hasMoreElements()) {
LabeledItemSet currentLabeledItemSet = (LabeledItemSet) enumLabeledItemSets
.nextElement();
rules = currentLabeledItemSet.generateRules(m_minMetric, false);
for (int k = 0; k < rules[0].size(); k++) {
m_allTheRules[0].add(rules[0].get(k));
m_allTheRules[1].add(rules[1].get(k));
m_allTheRules[2].add(rules[2].get(k));
}
}
}
}
/**
* returns all the rules
*
* @return all the rules
* @see #m_allTheRules
*/
public ArrayList<Object>[] getAllTheRules() {
return m_allTheRules;
}
@Override
public AssociationRules getAssociationRules() {
List<AssociationRule> rules = new ArrayList<AssociationRule>();
if (m_allTheRules != null && m_allTheRules.length > 3) {
for (int i = 0; i < m_allTheRules[0].size(); i++) {
// Construct the Lists for the premise and consequence
List<Item> premise = new ArrayList<Item>();
List<Item> consequence = new ArrayList<Item>();
AprioriItemSet premiseSet = (AprioriItemSet) m_allTheRules[0].get(i);
AprioriItemSet consequenceSet = (AprioriItemSet) m_allTheRules[1]
.get(i);
for (int j = 0; j < m_instances.numAttributes(); j++) {
if (premiseSet.m_items[j] != -1) {
try {
Item newItem = new NominalItem(m_instances.attribute(j),
premiseSet.m_items[j]);
premise.add(newItem);
} catch (Exception ex) {
ex.printStackTrace();
}
}
if (consequenceSet.m_items[j] != -1) {
try {
Item newItem = new NominalItem(m_instances.attribute(j),
consequenceSet.m_items[j]);
consequence.add(newItem);
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
// get the constituents of the metrics
int totalTrans = premiseSet.m_totalTransactions;
int totalSupport = consequenceSet.m_counter;
int premiseSupport = premiseSet.m_counter;
int consequenceSupport = consequenceSet.m_secondaryCounter;
// map the primary metric
DefaultAssociationRule.METRIC_TYPE metric = null;
switch (m_metricType) {
case CONFIDENCE:
metric = DefaultAssociationRule.METRIC_TYPE.CONFIDENCE;
break;
case LIFT:
metric = DefaultAssociationRule.METRIC_TYPE.LIFT;
break;
case LEVERAGE:
metric = DefaultAssociationRule.METRIC_TYPE.LEVERAGE;
break;
case CONVICTION:
metric = DefaultAssociationRule.METRIC_TYPE.CONVICTION;
break;
}
DefaultAssociationRule newRule = new DefaultAssociationRule(premise,
consequence, metric, premiseSupport, consequenceSupport,
totalSupport, totalTrans);
rules.add(newRule);
}
}
return new AssociationRules(rules, this);
}
/**
* Gets a list of the names of the metrics output for each rule. This list
* should be the same (in terms of the names and order thereof) as that
* produced by AssociationRule.getMetricNamesForRule().
*
* @return an array of the names of the metrics available for each rule
* learned by this producer.
*/
@Override
public String[] getRuleMetricNames() {
String[] metricNames = new String[DefaultAssociationRule.TAGS_SELECTION.length];
for (int i = 0; i < DefaultAssociationRule.TAGS_SELECTION.length; i++) {
metricNames[i] = DefaultAssociationRule.TAGS_SELECTION[i].getReadable();
}
return metricNames;
}
/**
* Returns true if this AssociationRulesProducer can actually produce rules.
* Most implementing classes will always return true from this method
* (obviously :-)). However, an implementing class that actually acts as a
* wrapper around things that may or may not implement
* AssociationRulesProducer will want to return false if the thing they wrap
* can't produce rules.
*
* @return true if this producer can produce rules in its current
* configuration
*/
@Override
public boolean canProduceRules() {
return true;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method.
*
* @param args the commandline options
*/
public static void main(String[] args) {
runAssociator(new Apriori(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/AprioriItemSet.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AprioriItemSet.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import weka.core.ContingencyTables;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.WekaEnumeration;
/**
* Class for storing a set of items. Item sets are stored in a lexicographic
* order, which is determined by the header information of the set of instances
* used for generating the set of items. All methods in this class assume that
* item sets are stored in lexicographic order. The class provides methods that
* are used in the Apriori algorithm to construct association rules.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Stefan Mutter (mutter@cs.waikato.ac.nz)
* @version $Revision$
*/
public class AprioriItemSet extends ItemSet implements Serializable,
RevisionHandler {
/** for serialization */
static final long serialVersionUID = 7684467755712672058L;
/**
* Constructor
*
* @param totalTrans the total number of transactions in the data
*/
public AprioriItemSet(int totalTrans) {
super(totalTrans);
}
/**
* Outputs the confidence for a rule.
*
* @param premise the premise of the rule
* @param consequence the consequence of the rule
* @return the confidence on the training data
*/
public static double confidenceForRule(AprioriItemSet premise,
AprioriItemSet consequence) {
return (double) consequence.m_counter / (double) premise.m_counter;
}
/**
* Outputs the lift for a rule. Lift is defined as:<br>
* confidence / prob(consequence)
*
* @param premise the premise of the rule
* @param consequence the consequence of the rule
* @param consequenceCount how many times the consequence occurs independent
* of the premise
* @return the lift on the training data
*/
public double liftForRule(AprioriItemSet premise, AprioriItemSet consequence,
int consequenceCount) {
double confidence = confidenceForRule(premise, consequence);
return confidence
/ ((double) consequenceCount / (double) m_totalTransactions);
}
/**
* Outputs the leverage for a rule. Leverage is defined as: <br>
* prob(premise & consequence) - (prob(premise) * prob(consequence))
*
* @param premise the premise of the rule
* @param consequence the consequence of the rule
* @param premiseCount how many times the premise occurs independent of the
* consequent
* @param consequenceCount how many times the consequence occurs independent
* of the premise
* @return the leverage on the training data
*/
public double leverageForRule(AprioriItemSet premise,
AprioriItemSet consequence, int premiseCount, int consequenceCount) {
double coverageForItemSet = (double) consequence.m_counter
/ (double) m_totalTransactions;
double expectedCoverageIfIndependent = ((double) premiseCount / (double) m_totalTransactions)
* ((double) consequenceCount / (double) m_totalTransactions);
double lev = coverageForItemSet - expectedCoverageIfIndependent;
return lev;
}
/**
* Outputs the conviction for a rule. Conviction is defined as: <br>
* prob(premise) * prob(!consequence) / prob(premise & !consequence)
*
* @param premise the premise of the rule
* @param consequence the consequence of the rule
* @param premiseCount how many times the premise occurs independent of the
* consequent
* @param consequenceCount how many times the consequence occurs independent
* of the premise
* @return the conviction on the training data
*/
public double convictionForRule(AprioriItemSet premise,
AprioriItemSet consequence, int premiseCount, int consequenceCount) {
double num = (double) premiseCount
* (double) (m_totalTransactions - consequenceCount) / m_totalTransactions;
double denom = ((premiseCount - consequence.m_counter) + 1);
if (num < 0 || denom < 0) {
System.err.println("*** " + num + " " + denom);
System.err.println("premis count: " + premiseCount
+ " consequence count " + consequenceCount + " total trans "
+ m_totalTransactions);
}
return num / denom;
}
/**
* Generates all rules for an item set.
*
* @param minConfidence the minimum confidence the rules have to have
* @param hashtables containing all(!) previously generated item sets
* @param numItemsInSet the size of the item set for which the rules are to be
* generated
* @return all the rules with minimum confidence for the given item set
*/
public ArrayList<Object>[] generateRules(double minConfidence,
ArrayList<Hashtable<ItemSet, Integer>> hashtables, int numItemsInSet) {
ArrayList<Object> premises = new ArrayList<Object>(), consequences = new ArrayList<Object>(), conf = new ArrayList<Object>();
// TODO
ArrayList<Object> lift = new ArrayList<Object>(), lev = new ArrayList<Object>(), conv = new ArrayList<Object>();
// TODO
@SuppressWarnings("unchecked")
ArrayList<Object>[] rules = new ArrayList[6], moreResults;
AprioriItemSet premise, consequence;
Hashtable<ItemSet, Integer> hashtable = hashtables.get(numItemsInSet - 2);
// Generate all rules with one item in the consequence.
for (int i = 0; i < m_items.length; i++) {
if (m_items[i] != -1) {
premise = new AprioriItemSet(m_totalTransactions);
consequence = new AprioriItemSet(m_totalTransactions);
premise.m_items = new int[m_items.length];
consequence.m_items = new int[m_items.length];
consequence.m_counter = m_counter;
for (int j = 0; j < m_items.length; j++) {
consequence.m_items[j] = -1;
}
System.arraycopy(m_items, 0, premise.m_items, 0, m_items.length);
premise.m_items[i] = -1;
consequence.m_items[i] = m_items[i];
premise.m_counter = hashtable.get(premise).intValue();
Hashtable<ItemSet, Integer> hashtableForConsequence = hashtables.get(0);
int consequenceUnconditionedCounter = hashtableForConsequence.get(
consequence).intValue();
consequence.m_secondaryCounter = consequenceUnconditionedCounter;
premises.add(premise);
consequences.add(consequence);
conf.add(new Double(confidenceForRule(premise, consequence)));
double tempLift = liftForRule(premise, consequence,
consequenceUnconditionedCounter);
double tempLev = leverageForRule(premise, consequence,
premise.m_counter, consequenceUnconditionedCounter);
double tempConv = convictionForRule(premise, consequence,
premise.m_counter, consequenceUnconditionedCounter);
lift.add(new Double(tempLift));
lev.add(new Double(tempLev));
conv.add(new Double(tempConv));
}
}
rules[0] = premises;
rules[1] = consequences;
rules[2] = conf;
rules[3] = lift;
rules[4] = lev;
rules[5] = conv;
pruneRules(rules, minConfidence);
// Generate all the other rules
moreResults = moreComplexRules(rules, numItemsInSet, 1, minConfidence,
hashtables);
if (moreResults != null) {
for (int i = 0; i < moreResults[0].size(); i++) {
rules[0].add(moreResults[0].get(i));
rules[1].add(moreResults[1].get(i));
rules[2].add(moreResults[2].get(i));
// TODO
rules[3].add(moreResults[3].get(i));
rules[4].add(moreResults[4].get(i));
rules[5].add(moreResults[5].get(i));
}
}
return rules;
}
/**
* Generates all significant rules for an item set.
*
* @param minMetric the minimum metric (confidence, lift, leverage,
* improvement) the rules have to have
* @param metricType (confidence=0, lift, leverage, improvement)
* @param hashtables containing all(!) previously generated item sets
* @param numItemsInSet the size of the item set for which the rules are to be
* generated
* @param numTransactions
* @param significanceLevel the significance level for testing the rules
* @return all the rules with minimum metric for the given item set
* @exception Exception if something goes wrong
*/
public final ArrayList<Object>[] generateRulesBruteForce(double minMetric,
int metricType, ArrayList<Hashtable<ItemSet, Integer>> hashtables,
int numItemsInSet, int numTransactions, double significanceLevel)
throws Exception {
ArrayList<Object> premises = new ArrayList<Object>(), consequences = new ArrayList<Object>(), conf = new ArrayList<Object>(), lift = new ArrayList<Object>(), lev = new ArrayList<Object>(), conv = new ArrayList<Object>();
@SuppressWarnings("unchecked")
ArrayList<Object>[] rules = new ArrayList[6];
AprioriItemSet premise, consequence;
Hashtable<ItemSet, Integer> hashtableForPremise, hashtableForConsequence;
int numItemsInPremise, help, max, consequenceUnconditionedCounter;
double[][] contingencyTable = new double[2][2];
double metric, chiSquared = 0;
// Generate all possible rules for this item set and test their
// significance.
max = (int) Math.pow(2, numItemsInSet);
for (int j = 1; j < max; j++) {
numItemsInPremise = 0;
help = j;
while (help > 0) {
if (help % 2 == 1) {
numItemsInPremise++;
}
help /= 2;
}
if (numItemsInPremise < numItemsInSet) {
hashtableForPremise = hashtables.get(numItemsInPremise - 1);
hashtableForConsequence = hashtables.get(numItemsInSet
- numItemsInPremise - 1);
premise = new AprioriItemSet(m_totalTransactions);
consequence = new AprioriItemSet(m_totalTransactions);
premise.m_items = new int[m_items.length];
consequence.m_items = new int[m_items.length];
consequence.m_counter = m_counter;
help = j;
for (int i = 0; i < m_items.length; i++) {
if (m_items[i] != -1) {
if (help % 2 == 1) {
premise.m_items[i] = m_items[i];
consequence.m_items[i] = -1;
} else {
premise.m_items[i] = -1;
consequence.m_items[i] = m_items[i];
}
help /= 2;
} else {
premise.m_items[i] = -1;
consequence.m_items[i] = -1;
}
}
premise.m_counter = hashtableForPremise.get(premise).intValue();
consequenceUnconditionedCounter = hashtableForConsequence.get(
consequence).intValue();
consequence.m_secondaryCounter = consequenceUnconditionedCounter;
if (significanceLevel != -1) {
contingencyTable[0][0] = (consequence.m_counter);
contingencyTable[0][1] = (premise.m_counter - consequence.m_counter);
contingencyTable[1][0] = (consequenceUnconditionedCounter - consequence.m_counter);
contingencyTable[1][1] = (numTransactions - premise.m_counter
- consequenceUnconditionedCounter + consequence.m_counter);
chiSquared = ContingencyTables.chiSquared(contingencyTable, false);
}
if (metricType == 0) {
metric = confidenceForRule(premise, consequence);
if ((!(metric < minMetric))
&& (significanceLevel == -1 || !(chiSquared > significanceLevel))) {
premises.add(premise);
consequences.add(consequence);
conf.add(new Double(metric));
lift.add(new Double(liftForRule(premise, consequence,
consequenceUnconditionedCounter)));
lev.add(new Double(leverageForRule(premise, consequence,
premise.m_counter, consequenceUnconditionedCounter)));
conv.add(new Double(convictionForRule(premise, consequence,
premise.m_counter, consequenceUnconditionedCounter)));
}
} else {
double tempConf = confidenceForRule(premise, consequence);
double tempLift = liftForRule(premise, consequence,
consequenceUnconditionedCounter);
double tempLev = leverageForRule(premise, consequence,
premise.m_counter, consequenceUnconditionedCounter);
double tempConv = convictionForRule(premise, consequence,
premise.m_counter, consequenceUnconditionedCounter);
switch (metricType) {
case 1:
metric = tempLift;
break;
case 2:
metric = tempLev;
break;
case 3:
metric = tempConv;
break;
default:
throw new Exception("ItemSet: Unknown metric type!");
}
if (!(metric < minMetric)
&& (significanceLevel == -1 || !(chiSquared > significanceLevel))) {
premises.add(premise);
consequences.add(consequence);
conf.add(new Double(tempConf));
lift.add(new Double(tempLift));
lev.add(new Double(tempLev));
conv.add(new Double(tempConv));
}
}
}
}
rules[0] = premises;
rules[1] = consequences;
rules[2] = conf;
rules[3] = lift;
rules[4] = lev;
rules[5] = conv;
return rules;
}
/**
* Subtracts an item set from another one.
*
* @param toSubtract the item set to be subtracted from this one.
* @return an item set that only contains items form this item sets that are
* not contained by toSubtract
*/
public final AprioriItemSet subtract(AprioriItemSet toSubtract) {
AprioriItemSet result = new AprioriItemSet(m_totalTransactions);
result.m_items = new int[m_items.length];
for (int i = 0; i < m_items.length; i++) {
if (toSubtract.m_items[i] == -1) {
result.m_items[i] = m_items[i];
} else {
result.m_items[i] = -1;
}
}
result.m_counter = 0;
return result;
}
/**
* Generates rules with more than one item in the consequence.
*
* @param rules all the rules having (k-1)-item sets as consequences
* @param numItemsInSet the size of the item set for which the rules are to be
* generated
* @param numItemsInConsequence the value of (k-1)
* @param minConfidence the minimum confidence a rule has to have
* @param hashtables the hashtables containing all(!) previously generated
* item sets
* @return all the rules having (k)-item sets as consequences
*/
@SuppressWarnings("unchecked")
private final ArrayList<Object>[] moreComplexRules(ArrayList<Object>[] rules,
int numItemsInSet, int numItemsInConsequence, double minConfidence,
ArrayList<Hashtable<ItemSet, Integer>> hashtables) {
AprioriItemSet newPremise;
ArrayList<Object>[] result, moreResults;
ArrayList<Object> newConsequences, newPremises = new ArrayList<Object>(), newConf = new ArrayList<Object>();
Hashtable<ItemSet, Integer> hashtable;
ArrayList<Object> newLift = null, newLev = null, newConv = null;
// if (rules.length > 3) {
newLift = new ArrayList<Object>();
newLev = new ArrayList<Object>();
newConv = new ArrayList<Object>();
// }
if (numItemsInSet > numItemsInConsequence + 1) {
hashtable = hashtables.get(numItemsInSet - numItemsInConsequence - 2);
newConsequences = mergeAllItemSets(rules[1], numItemsInConsequence - 1,
m_totalTransactions);
int newNumInConsequence = numItemsInConsequence + 1;
Hashtable<ItemSet, Integer> hashtableForConsequence = hashtables
.get(newNumInConsequence - 1);
Enumeration<Object> enu = new WekaEnumeration<Object>(newConsequences);
while (enu.hasMoreElements()) {
AprioriItemSet current = (AprioriItemSet) enu.nextElement();
for (int m_item : current.m_items) {
if (m_item != -1) {
}
}
current.m_counter = m_counter;
newPremise = subtract(current);
newPremise.m_counter = hashtable.get(newPremise).intValue();
newPremises.add(newPremise);
newConf.add(new Double(confidenceForRule(newPremise, current)));
// if (rules.length > 3) {
int consequenceUnconditionedCounter = hashtableForConsequence.get(
current).intValue();
current.m_secondaryCounter = consequenceUnconditionedCounter;
double tempLift = liftForRule(newPremise, current,
consequenceUnconditionedCounter);
double tempLev = leverageForRule(newPremise, current,
newPremise.m_counter, consequenceUnconditionedCounter);
double tempConv = convictionForRule(newPremise, current,
newPremise.m_counter, consequenceUnconditionedCounter);
newLift.add(new Double(tempLift));
newLev.add(new Double(tempLev));
newConv.add(new Double(tempConv));
// }
}
result = new ArrayList[rules.length];
result[0] = newPremises;
result[1] = newConsequences;
result[2] = newConf;
// if (rules.length > 3) {
result[3] = newLift;
result[4] = newLev;
result[5] = newConv;
// }
pruneRules(result, minConfidence);
moreResults = moreComplexRules(result, numItemsInSet,
numItemsInConsequence + 1, minConfidence, hashtables);
if (moreResults != null) {
for (int i = 0; i < moreResults[0].size(); i++) {
result[0].add(moreResults[0].get(i));
result[1].add(moreResults[1].get(i));
result[2].add(moreResults[2].get(i));
//
result[3].add(moreResults[3].get(i));
result[4].add(moreResults[4].get(i));
result[5].add(moreResults[5].get(i));
}
}
return result;
} else {
return null;
}
}
/**
* Returns the contents of an item set as a string.
*
* @param instances contains the relevant header information
* @return string describing the item set
*/
@Override
public final String toString(Instances instances) {
return super.toString(instances);
}
/**
* Converts the header info of the given set of instances into a set of item
* sets (singletons). The ordering of values in the header file determines the
* lexicographic order.
*
* @param instances the set of instances whose header info is to be used
* @return a set of item sets, each containing a single item
* @exception Exception if singletons can't be generated successfully
*/
public static ArrayList<Object> singletons(Instances instances,
boolean treatZeroAsMissing) throws Exception {
ArrayList<Object> setOfItemSets = new ArrayList<Object>();
AprioriItemSet current;
for (int i = 0; i < instances.numAttributes(); i++) {
if (instances.attribute(i).isNumeric()) {
throw new Exception("Can't handle numeric attributes!");
}
int j = (treatZeroAsMissing) ? 1 : 0;
for (; j < instances.attribute(i).numValues(); j++) {
current = new AprioriItemSet(instances.numInstances());
current.m_items = new int[instances.numAttributes()];
for (int k = 0; k < instances.numAttributes(); k++) {
current.m_items[k] = -1;
}
current.m_items[i] = j;
setOfItemSets.add(current);
}
}
return setOfItemSets;
}
/**
* Merges all item sets in the set of (k-1)-item sets to create the (k)-item
* sets and updates the counters.
*
* @param itemSets the set of (k-1)-item sets
* @param size the value of (k-1)
* @param totalTrans the total number of transactions in the data
* @return the generated (k)-item sets
*/
public static ArrayList<Object> mergeAllItemSets(ArrayList<Object> itemSets,
int size, int totalTrans) {
ArrayList<Object> newVector = new ArrayList<Object>();
AprioriItemSet result;
int numFound, k;
for (int i = 0; i < itemSets.size(); i++) {
ItemSet first = (ItemSet) itemSets.get(i);
out: for (int j = i + 1; j < itemSets.size(); j++) {
ItemSet second = (ItemSet) itemSets.get(j);
result = new AprioriItemSet(totalTrans);
result.m_items = new int[first.m_items.length];
// Find and copy common prefix of size 'size'
numFound = 0;
k = 0;
while (numFound < size) {
if (first.m_items[k] == second.m_items[k]) {
if (first.m_items[k] != -1) {
numFound++;
}
result.m_items[k] = first.m_items[k];
} else {
break out;
}
k++;
}
// Check difference
while (k < first.m_items.length) {
if ((first.m_items[k] != -1) && (second.m_items[k] != -1)) {
break;
} else {
if (first.m_items[k] != -1) {
result.m_items[k] = first.m_items[k];
} else {
result.m_items[k] = second.m_items[k];
}
}
k++;
}
if (k == first.m_items.length) {
result.m_counter = 0;
newVector.add(result);
}
}
}
return newVector;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/AssociationRule.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AssociationRule.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.util.ArrayList;
import java.util.Collection;
/**
* Abstract class for storing and manipulating an association rule.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com).
* @version $Revision$
*/
public abstract class AssociationRule implements Comparable<AssociationRule> {
/**
* Get the premise of this rule.
*
* @return the premise of this rule.
*/
public abstract Collection<Item> getPremise();
/**
* Get the consequence of this rule.
*
* @return the consequence of this rule.
*/
public abstract Collection<Item> getConsequence();
/**
* Get the name of the primary metric of this rule (e.g. confidence).
*
* @return the name of the primary metric of this rule.
*/
public abstract String getPrimaryMetricName();
/**
* Get the value of the metric for this rule.
*
* @return the value of the metric for this rule.
*/
public abstract double getPrimaryMetricValue();
/**
* Get the value of the named metric for this rule
*
* @param metricName the metric to get the value for
* @return the value of the named metric
* @throws Exception if the requested metric is unknown for this rule
*/
public abstract double getNamedMetricValue(String metricName)
throws Exception;
/**
* Gets the number of metrics available for this rule.
*
* @return the number of metrics available for this rule
*/
public abstract int getNumberOfMetricsForRule();
/**
* Return the names of the metrics available for this rule.
*
* @return the names of the metrics that are available for this rule.
*/
public abstract String[] getMetricNamesForRule();
/**
* Get all the available metric values for this rule. Values are
* returned in an array with entries that correspond to the metric
* names returned by getMetricNamesForRule().
*
* @return all the available metrics for this rule.
* @throws Exception if a metric can't be computed for some reason.
*/
public abstract double[] getMetricValuesForRule() throws Exception;
/**
* Get the support for the premise.
*
* @return the support for the premise.
*/
public abstract int getPremiseSupport();
/**
* Get the support for the consequence.
*
* @return the support for the consequence.
*/
public abstract int getConsequenceSupport();
/**
* Get the total support for this rule (premise + consequence).
*
* @return the total support for this rule.
*/
public abstract int getTotalSupport();
/**
* Get the total number of transactions in the data.
*
* @return the total number of transactions in the data.
*/
public abstract int getTotalTransactions();
/**
* Compare this rule to the supplied rule.
*
* @param other the rule to compare to.
* @return the result of the comparison.
*/
public int compareTo(AssociationRule other) {
return -Double.compare(getPrimaryMetricValue(), other.getPrimaryMetricValue());
}
/**
* Return true if this rule is equal to the supplied one.
*
* @return true if this rule is the same as the supplied rule.
*/
public boolean equals(Object other) {
if (!(other instanceof AssociationRule)) {
return false;
}
AssociationRule otherRule = (AssociationRule)other;
boolean result = getPremise().equals(otherRule.getPremise()) &&
getConsequence().equals(otherRule.getConsequence()) &&
(getPrimaryMetricValue() == otherRule.getPrimaryMetricValue());
return result;
}
public boolean containsItems(ArrayList<Item> items, boolean useOr) {
int numItems = items.size();
int count = 0;
for (Item i : getPremise()) {
if (items.contains(i)) {
if (useOr) {
return true; // can stop here
} else {
count++;
}
}
}
for (Item i : getConsequence()) {
if (items.contains(i)) {
if (useOr) {
return true; // can stop here
} else {
count++;
}
}
}
if (!useOr) {
if (count == numItems) {
return true;
}
}
return false;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/AssociationRules.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AssociationRules.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import java.util.List;
import weka.core.OptionHandler;
import weka.core.Utils;
/**
* Class encapsulating a list of association rules.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class AssociationRules implements Serializable {
/** For serialization */
private static final long serialVersionUID = 8889198755948056749L;
/** The scheme that produced these rules */
protected String m_producer = "Unknown";
/** The list of rules */
protected List<AssociationRule> m_rules;
/**
* Constructs a new AssociationRules.
*
* @param rules the list of rules.
* @param producer a string describing the scheme that produced these rules.
*/
public AssociationRules(List<AssociationRule> rules, String producer) {
m_rules = rules;
m_producer = producer;
}
/**
* Constructs a new AssociationRules.
*
* @param rules the list of rules.
* @param producer the scheme that produced the rules.
*/
public AssociationRules(List<AssociationRule> rules, Object producer) {
String producerString = producer.getClass().getName();
if (producerString.startsWith("weka.associations.")) {
producerString = producerString.substring("weka.associations.".length());
}
if (producer instanceof OptionHandler) {
String [] o = ((OptionHandler) producer).getOptions();
producerString += " " + Utils.joinOptions(o);
}
m_rules = rules;
m_producer = producerString;
}
/**
* Constructs a new AssociationRules.
*
* @param rules the list of rules.
*/
public AssociationRules(List<AssociationRule> rules) {
this(rules, "Unknown");
}
/**
* Set the rules to use.
*
* @param rules the rules to use.
*/
public void setRules(List<AssociationRule> rules) {
m_rules = rules;
}
/**
* Get the rules.
*
* @return the rules.
*/
public List<AssociationRule> getRules() {
return m_rules;
}
/**
* Get the number of rules.
*
* @return the number of rules.
*/
public int getNumRules() {
return m_rules.size();
}
/**
* Set a textual description of the scheme that produced
* these rules.
*
* @param producer a textual description of the scheme that produced
* these rules.
*/
public void setProducer(String producer) {
m_producer = producer;
}
/**
* Get a string describing the scheme that produced these rules.
*
* @return producer
*/
public String getProducer() {
return m_producer;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/AssociationRulesProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AssociationRulesProducer.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
/**
* Interface to something that can provide a list of
* AssociationRules.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public interface AssociationRulesProducer {
/**
* Gets the list of mined association rules.
*
* @return the list of association rules discovered during mining.
* Returns null if mining hasn't been performed yet.
*/
AssociationRules getAssociationRules();
/**
* Gets a list of the names of the metrics output for
* each rule. This list should be the same (in terms of
* the names and order thereof) as that produced by
* AssociationRule.getMetricNamesForRule().
*
* @return an array of the names of the metrics available
* for each rule learned by this producer.
*/
String[] getRuleMetricNames();
/**
* Returns true if this AssociationRulesProducer can actually
* produce rules. Most implementing classes will always return
* true from this method (obviously :-)). However, an implementing
* class that actually acts as a wrapper around things that may
* or may not implement AssociationRulesProducer will want to
* return false if the thing they wrap can't produce rules.
*
* @return true if this producer can produce rules in its current
* configuration
*/
boolean canProduceRules();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/Associator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Associator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import weka.core.Capabilities;
import weka.core.Instances;
public interface Associator {
/**
* Generates an associator. Must initialize all fields of the associator
* that are not being set via options (ie. multiple calls of buildAssociator
* must always lead to the same result). Must not change the dataset
* in any way.
*
* @param data set of instances serving as training data
* @exception Exception if the associator has not been
* generated successfully
*/
void buildAssociations(Instances data) throws Exception;
/**
* Returns the Capabilities of this associator. Derived associators have to
* override this method to enable capabilities.
*
* @return the capabilities of this object
* @see Capabilities
*/
Capabilities getCapabilities();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/AssociatorEvaluation.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AssociatorEvaluation.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.associations;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.util.Enumeration;
import weka.core.Drawable;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
/**
* Class for evaluating Associaters.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class AssociatorEvaluation implements RevisionHandler {
/** the result string */
protected StringBuffer m_Result;
/**
* default constructor
*/
public AssociatorEvaluation() {
super();
m_Result = new StringBuffer();
}
/**
* Generates an option string to output on the commandline.
*
* @param associator the associator to generate the string for
* @return the option string
*/
protected static String makeOptionString(Associator associator) {
StringBuffer text;
text = new StringBuffer();
// general options
text.append("\nGeneral options:\n\n");
text.append("-t <training file>\n");
text.append("\tThe name of the training file.\n");
text.append("-g <name of graph file>\n");
text
.append("\tOutputs the graph representation (if supported) of the associator to a file.\n");
// associator specific options, if any
if (associator instanceof OptionHandler) {
text.append("\nOptions specific to "
+ associator.getClass().getName().replaceAll(".*\\.", "") + ":\n\n");
Enumeration<Option> enm = ((OptionHandler) associator).listOptions();
while (enm.hasMoreElements()) {
Option option = enm.nextElement();
text.append(option.synopsis() + "\n");
text.append(option.description() + "\n");
}
}
return text.toString();
}
/**
* Evaluates an associator with the options given in an array of strings.
*
* @param associatorString class of associator as a string
* @param options the array of string containing the options
* @throws Exception if model could not be evaluated successfully
* @return a string describing the results
*/
public static String evaluate(String associatorString, String[] options)
throws Exception {
Associator associator;
// Create associator
try {
associator = (Associator) Class.forName(associatorString).newInstance();
} catch (Exception e) {
throw new Exception("Can't find class with name " + associatorString
+ '.');
}
return evaluate(associator, options);
}
/**
* Evaluates the associator with the given commandline options and returns the
* evaluation string.
*
* @param associator the Associator to evaluate
* @param options the commandline options
* @return the generated output string
* @throws Exception if evaluation fails
*/
public static String evaluate(Associator associator, String[] options)
throws Exception {
String trainFileString = "";
String graphFileName = "";
AssociatorEvaluation eval;
DataSource loader;
// help?
if (Utils.getFlag('h', options)) {
throw new Exception("\nHelp requested.\n" + makeOptionString(associator));
}
try {
// general options
trainFileString = Utils.getOption('t', options);
if (trainFileString.length() == 0) {
throw new Exception("No training file given!");
}
loader = new DataSource(trainFileString);
graphFileName = Utils.getOption('g', options);
// associator specific options
if (associator instanceof OptionHandler) {
((OptionHandler) associator).setOptions(options);
}
// left-over options?
Utils.checkForRemainingOptions(options);
} catch (Exception e) {
throw new Exception("\nWeka exception: " + e.getMessage() + "\n"
+ makeOptionString(associator));
}
// load file and build associations
eval = new AssociatorEvaluation();
String results = eval.evaluate(associator,
new Instances(loader.getDataSet()));
// If associator is drawable output string describing graph
if ((associator instanceof Drawable) && (graphFileName.length() != 0)) {
BufferedWriter writer = new BufferedWriter(new FileWriter(graphFileName));
writer.write(((Drawable) associator).graph());
writer.newLine();
writer.flush();
writer.close();
}
return results;
}
/**
* Evaluates the associator with the given commandline options and returns the
* evaluation string.
*
* @param associator the Associator to evaluate
* @param data the data to run the associator with
* @return the generated output string
* @throws Exception if evaluation fails
*/
public String evaluate(Associator associator, Instances data)
throws Exception {
long startTime;
long endTime;
// build associations
startTime = System.currentTimeMillis();
associator.buildAssociations(data);
endTime = System.currentTimeMillis();
m_Result = new StringBuffer(associator.toString());
m_Result.append("\n=== Evaluation ===\n\n");
m_Result.append("Elapsed time: "
+ (((double) (endTime - startTime)) / 1000) + "s");
m_Result.append("\n");
return m_Result.toString();
}
/**
* Tests whether the current evaluation object is equal to another evaluation
* object
*
* @param obj the object to compare against
* @return true if the two objects are equal
*/
@Override
public boolean equals(Object obj) {
if ((obj == null) || !(obj.getClass().equals(this.getClass()))) {
return false;
}
AssociatorEvaluation cmp = (AssociatorEvaluation) obj;
// TODO: better comparison???
String associatingResults1 = m_Result.toString().replaceAll(
"Elapsed time.*", "");
String associatingResults2 = cmp.m_Result.toString().replaceAll(
"Elapsed time.*", "");
if (!associatingResults1.equals(associatingResults2)) {
return false;
}
return true;
}
/**
* returns a summary string of the evaluation with a no title
*
* @return the summary string
*/
public String toSummaryString() {
return toSummaryString("");
}
/**
* returns a summary string of the evaluation with a default title
*
* @param title the title to print before the result
* @return the summary string
*/
public String toSummaryString(String title) {
StringBuffer result;
result = new StringBuffer(title);
if (title.length() != 0) {
result.append("\n");
}
result.append(m_Result);
return result.toString();
}
/**
* returns the current result
*
* @return the currently stored result
* @see #toSummaryString()
*/
@Override
public String toString() {
return toSummaryString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* A test method for this class. Just extracts the first command line argument
* as an associator class name and calls evaluate.
*
* @param args an array of command line arguments, the first of which must be
* the class name of an associator.
*/
public static void main(String[] args) {
try {
if (args.length == 0) {
throw new Exception(
"The first argument must be the class name of a kernel");
}
String associator = args[0];
args[0] = "";
System.out.println(evaluate(associator, args));
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/BinaryItem.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BinaryItem.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import weka.core.Attribute;
/**
* Class that encapsulates an item whose backing Attribute is
* binary or unary.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision; $
*/
public class BinaryItem extends NominalItem
implements Serializable {
/** For serialization */
private static final long serialVersionUID = -3372941834914147669L;
/**
* Constructor.
*
* @param att the attribute that backs this item.
* @param valueIndex the index of the value for this item.
* @throws Exception if the backing attribute is not binary or unary.
*/
public BinaryItem(Attribute att, int valueIndex) throws Exception {
super(att, valueIndex);
if (att.isNumeric() || (att.isNominal() && att.numValues() > 2)) {
throw new Exception("BinaryItem must be constructed using a nominal attribute" +
" with at most 2 values!");
}
}
/**
* Equals. Just compares attribute and valueIndex.
* @return true if this BinaryItem is equal to the argument.
*/
public boolean equals(Object compareTo) {
if (!(compareTo instanceof BinaryItem)) {
return false;
}
BinaryItem b = (BinaryItem)compareTo;
if (m_attribute.equals(b.getAttribute()) &&
// m_frequency == b.getFrequency() &&
m_valueIndex == b.getValueIndex()) {
return true;
}
return false;
}
public int hashCode() {
return (m_attribute.name().hashCode() ^
m_attribute.numValues()) * m_frequency;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/CARuleMiner.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CARuleMiner.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.util.ArrayList;
import weka.core.Instances;
import weka.core.OptionHandler;
/**
* Interface for learning class association rules. All schemes for learning
* class association rules implemement this interface.
*
* @author Stefan Mutter (mutter@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface CARuleMiner extends OptionHandler {
/**
* Method for mining class association rules. Must initialize all fields of
* the CARuleMiner that are not being set via options (ie. multiple calls of
* mineCARs must always lead to the same result). Must not change the dataset
* in any way.
*
* @param data the insatnces for which class association rules are mined
* @throws Exception throws exception if class association rules cannot be
* mined
* @return class association rules and their scoring metric in an FastVector
* array
*/
public ArrayList<Object>[] mineCARs(Instances data) throws Exception;
/**
* Gets the instances without the class attribute
*
* @return the instances withoput the class attribute
*/
public Instances getInstancesNoClass();
/**
* Gets the class attribute and its values for all instances
*
* @return the class attribute and its values for all instances
*/
public Instances getInstancesOnlyClass();
/**
* Gets name of the scoring metric used for car mining
*
* @return string containing the name of the scoring metric
*/
public String metricString();
/**
* Sets the class index for the class association rule miner
*
* @param index the class index
*/
public void setClassIndex(int index);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/CheckAssociator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckAssociator.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.CheckScheme;
import weka.core.Instances;
import weka.core.MultiInstanceCapabilitiesHandler;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializationHelper;
import weka.core.TestInstances;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
/**
* Class for examining the capabilities and finding problems with associators.
* If you implement an associators using the WEKA.libraries, you should run the
* checks on it to ensure robustness and correct operation. Passing all the
* tests of this object does not mean bugs in the associators don't exist, but
* this will help find some common ones.
* <p/>
*
* Typical usage:
* <p/>
* <code>java weka.associations.CheckAssociator -W associator_name
* -- associator_options </code>
* <p/>
*
* CheckAssociator reports on the following:
* <ul>
* <li>Associator abilities
* <ul>
* <li>Possible command line options to the associators</li>
* <li>Whether the associators can predict nominal, numeric, string, date or
* relational class attributes.</li>
* <li>Whether the associators can handle numeric predictor attributes</li>
* <li>Whether the associators can handle nominal predictor attributes</li>
* <li>Whether the associators can handle string predictor attributes</li>
* <li>Whether the associators can handle date predictor attributes</li>
* <li>Whether the associators can handle relational predictor attributes</li>
* <li>Whether the associators can handle multi-instance data</li>
* <li>Whether the associators can handle missing predictor values</li>
* <li>Whether the associators can handle missing class values</li>
* <li>Whether a nominal associators only handles 2 class problems</li>
* <li>Whether the associators can handle instance weights</li>
* </ul>
* </li>
* <li>Correct functioning
* <ul>
* <li>Correct initialisation during buildAssociations (i.e. no result changes
* when buildAssociations called repeatedly)</li>
* <li>Whether the associators alters the data pased to it (number of instances,
* instance order, instance weights, etc)</li>
* </ul>
* </li>
* <li>Degenerate cases
* <ul>
* <li>building associators with zero training instances</li>
* <li>all but one predictor attribute values missing</li>
* <li>all predictor attribute values missing</li>
* <li>all but one class values missing</li>
* <li>all class values missing</li>
* </ul>
* </li>
* </ul>
* Running CheckAssociator with the debug option set will output the training
* dataset for any failed tests.
* <p/>
*
* The <code>weka.associations.AbstractAssociatorTest</code> uses this class to
* test all the associators. Any changes here, have to be checked in that
* abstract test class, too.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 20).
* </pre>
*
* <pre>
* -nominal <num>
* The number of nominal attributes (default 2).
* </pre>
*
* <pre>
* -nominal-values <num>
* The number of values for nominal attributes (default 1).
* </pre>
*
* <pre>
* -numeric <num>
* The number of numeric attributes (default 1).
* </pre>
*
* <pre>
* -string <num>
* The number of string attributes (default 1).
* </pre>
*
* <pre>
* -date <num>
* The number of date attributes (default 1).
* </pre>
*
* <pre>
* -relational <num>
* The number of relational attributes (default 1).
* </pre>
*
* <pre>
* -num-instances-relational <num>
* The number of instances in relational/bag attributes (default 10).
* </pre>
*
* <pre>
* -words <comma-separated-list>
* The words to use in string attributes.
* </pre>
*
* <pre>
* -word-separators <chars>
* The word separators to use in string attributes.
* </pre>
*
* <pre>
* -W
* Full name of the associator analysed.
* eg: weka.associations.Apriori
* (default weka.associations.Apriori)
* </pre>
*
* <pre>
* Options specific to associator weka.associations.Apriori:
* </pre>
*
* <pre>
* -N <required number of rules output>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric type by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum confidence of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -S <significance level>
* If used, rules are tested for significance at
* the given level. Slower. (default = no significance testing)
* </pre>
*
* <pre>
* -I
* If set the itemsets found are also output. (default = no)
* </pre>
*
* <pre>
* -R
* Remove columns that contain all missing values (default = no)
* </pre>
*
* <pre>
* -V
* Report progress iteratively. (default = no)
* </pre>
*
* <pre>
* -A
* If set class association rules are mined. (default = no)
* </pre>
*
* <pre>
* -c <the class index>
* The class index. (default = last)
* </pre>
*
* <!-- options-end -->
*
* Options after -- are passed to the designated associator.
* <p/>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see TestInstances
*/
public class CheckAssociator extends CheckScheme implements RevisionHandler {
/*
* Note about test methods: - methods return array of booleans - first index:
* success or not - second index: acceptable or not (e.g., Exception is OK)
*
* FracPete (fracpete at waikato dot ac dot nz)
*/
/** a "dummy" class type */
public final static int NO_CLASS = -1;
/*** The associator to be examined */
protected Associator m_Associator = new weka.associations.Apriori();
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addAll(Collections.list(super.listOptions()));
result.add(new Option("\tFull name of the associator analysed.\n"
+ "\teg: weka.associations.Apriori\n"
+ "\t(default weka.associations.Apriori)", "W", 1, "-W"));
if ((m_Associator != null) && (m_Associator instanceof OptionHandler)) {
result.add(new Option("", "", 0, "\nOptions specific to associator "
+ m_Associator.getClass().getName() + ":"));
result.addAll(Collections.list(((OptionHandler) m_Associator)
.listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options.
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 20).
* </pre>
*
* <pre>
* -nominal <num>
* The number of nominal attributes (default 2).
* </pre>
*
* <pre>
* -nominal-values <num>
* The number of values for nominal attributes (default 1).
* </pre>
*
* <pre>
* -numeric <num>
* The number of numeric attributes (default 1).
* </pre>
*
* <pre>
* -string <num>
* The number of string attributes (default 1).
* </pre>
*
* <pre>
* -date <num>
* The number of date attributes (default 1).
* </pre>
*
* <pre>
* -relational <num>
* The number of relational attributes (default 1).
* </pre>
*
* <pre>
* -num-instances-relational <num>
* The number of instances in relational/bag attributes (default 10).
* </pre>
*
* <pre>
* -words <comma-separated-list>
* The words to use in string attributes.
* </pre>
*
* <pre>
* -word-separators <chars>
* The word separators to use in string attributes.
* </pre>
*
* <pre>
* -W
* Full name of the associator analysed.
* eg: weka.associations.Apriori
* (default weka.associations.Apriori)
* </pre>
*
* <pre>
* Options specific to associator weka.associations.Apriori:
* </pre>
*
* <pre>
* -N <required number of rules output>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric type by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum confidence of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -S <significance level>
* If used, rules are tested for significance at
* the given level. Slower. (default = no significance testing)
* </pre>
*
* <pre>
* -I
* If set the itemsets found are also output. (default = no)
* </pre>
*
* <pre>
* -R
* Remove columns that contain all missing values (default = no)
* </pre>
*
* <pre>
* -V
* Report progress iteratively. (default = no)
* </pre>
*
* <pre>
* -A
* If set class association rules are mined. (default = no)
* </pre>
*
* <pre>
* -c <the class index>
* The class index. (default = last)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('W', options);
if (tmpStr.length() == 0) {
tmpStr = weka.associations.Apriori.class.getName();
}
setAssociator((Associator) forName("weka.associations", Associator.class,
tmpStr, Utils.partitionOptions(options)));
}
/**
* Gets the current settings of the CheckAssociator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
if (getAssociator() != null) {
result.add("-W");
result.add(getAssociator().getClass().getName());
}
if ((m_Associator != null) && (m_Associator instanceof OptionHandler)) {
String[] options = ((OptionHandler) m_Associator).getOptions();
if (options.length > 0) {
result.add("--");
Collections.addAll(result, options);
}
}
return result.toArray(new String[result.size()]);
}
/**
* Begin the tests, reporting results to System.out
*/
@Override
public void doTests() {
if (getAssociator() == null) {
println("\n=== No associator set ===");
return;
}
println("\n=== Check on Associator: "
+ getAssociator().getClass().getName() + " ===\n");
// Start tests
m_ClasspathProblems = false;
println("--> Checking for interfaces");
canTakeOptions();
boolean weightedInstancesHandler = weightedInstancesHandler()[0];
boolean multiInstanceHandler = multiInstanceHandler()[0];
println("--> Associator tests");
declaresSerialVersionUID();
println("--> no class attribute");
testsWithoutClass(weightedInstancesHandler, multiInstanceHandler);
println("--> with class attribute");
testsPerClassType(Attribute.NOMINAL, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.NUMERIC, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.DATE, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.STRING, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.RELATIONAL, weightedInstancesHandler,
multiInstanceHandler);
}
/**
* Set the associator to test.
*
* @param newAssociator the Associator to use.
*/
public void setAssociator(Associator newAssociator) {
m_Associator = newAssociator;
}
/**
* Get the associator being tested
*
* @return the associator being tested
*/
public Associator getAssociator() {
return m_Associator;
}
/**
* Run a battery of tests for a given class attribute type
*
* @param classType true if the class attribute should be numeric
* @param weighted true if the associator says it handles weights
* @param multiInstance true if the associator is a multi-instance associator
*/
protected void testsPerClassType(int classType, boolean weighted,
boolean multiInstance) {
boolean PNom = canPredict(true, false, false, false, false, multiInstance,
classType)[0];
boolean PNum = canPredict(false, true, false, false, false, multiInstance,
classType)[0];
boolean PStr = canPredict(false, false, true, false, false, multiInstance,
classType)[0];
boolean PDat = canPredict(false, false, false, true, false, multiInstance,
classType)[0];
boolean PRel;
if (!multiInstance) {
PRel = canPredict(false, false, false, false, true, multiInstance,
classType)[0];
} else {
PRel = false;
}
if (PNom || PNum || PStr || PDat || PRel) {
if (weighted) {
instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType);
}
if (classType == Attribute.NOMINAL) {
canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4);
}
if (!multiInstance) {
canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel,
multiInstance, classType, 0);
canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel,
multiInstance, classType, 1);
}
canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType);
boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr,
PDat, PRel, multiInstance, classType, true, false, 20)[0];
if (handleMissingPredictors) {
canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType, true, false, 100);
}
boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat,
PRel, multiInstance, classType, false, true, 20)[0];
if (handleMissingClass) {
canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType, false, true, 100);
}
correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType);
datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType,
handleMissingPredictors, handleMissingClass);
}
}
/**
* Run a battery of tests without a class
*
* @param weighted true if the associator says it handles weights
* @param multiInstance true if the associator is a multi-instance associator
*/
protected void testsWithoutClass(boolean weighted, boolean multiInstance) {
boolean PNom = canPredict(true, false, false, false, false, multiInstance,
NO_CLASS)[0];
boolean PNum = canPredict(false, true, false, false, false, multiInstance,
NO_CLASS)[0];
boolean PStr = canPredict(false, false, true, false, false, multiInstance,
NO_CLASS)[0];
boolean PDat = canPredict(false, false, false, true, false, multiInstance,
NO_CLASS)[0];
boolean PRel;
if (!multiInstance) {
PRel = canPredict(false, false, false, false, true, multiInstance,
NO_CLASS)[0];
} else {
PRel = false;
}
if (PNom || PNum || PStr || PDat || PRel) {
if (weighted) {
instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS);
}
canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance,
NO_CLASS);
boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr,
PDat, PRel, multiInstance, NO_CLASS, true, false, 20)[0];
if (handleMissingPredictors) {
canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS,
true, false, 100);
}
correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance,
NO_CLASS);
datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS,
handleMissingPredictors, false);
}
}
/**
* Checks whether the scheme can take command line options.
*
* @return index 0 is true if the associator can take options
*/
protected boolean[] canTakeOptions() {
boolean[] result = new boolean[2];
print("options...");
if (m_Associator instanceof OptionHandler) {
println("yes");
if (m_Debug) {
println("\n=== Full report ===");
Enumeration<Option> enu = ((OptionHandler) m_Associator).listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
print(option.synopsis() + "\n" + option.description() + "\n");
}
println("\n");
}
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme says it can handle instance weights.
*
* @return true if the associator handles instance weights
*/
protected boolean[] weightedInstancesHandler() {
boolean[] result = new boolean[2];
print("weighted instances associator...");
if (m_Associator instanceof WeightedInstancesHandler) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme handles multi-instance data.
*
* @return true if the associator handles multi-instance data
*/
protected boolean[] multiInstanceHandler() {
boolean[] result = new boolean[2];
print("multi-instance associator...");
if (m_Associator instanceof MultiInstanceCapabilitiesHandler) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* tests for a serialVersionUID. Fails in case the scheme doesn't declare a
* UID.
*
* @return index 0 is true if the scheme declares a UID
*/
protected boolean[] declaresSerialVersionUID() {
boolean[] result = new boolean[2];
print("serialVersionUID...");
result[0] = !SerializationHelper.needsUID(m_Associator.getClass());
if (result[0]) {
println("yes");
} else {
println("no");
}
return result;
}
/**
* Checks basic prediction of the scheme, for simple non-troublesome datasets.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NOMINAL, NUMERIC, etc.)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canPredict(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
print("basic predict");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("any");
accepts.add("unary");
accepts.add("binary");
accepts.add("nominal");
accepts.add("numeric");
accepts.add("string");
accepts.add("date");
accepts.add("relational");
accepts.add("multi-instance");
accepts.add("not in classpath");
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether nominal schemes can handle more than two classes. If a
* scheme is only designed for two-class problems it should throw an
* appropriate exception for multi-class problems.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param numClasses the number of classes to test
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleNClasses(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int numClasses) {
print("more than two class problems");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("number");
accepts.add("class");
int numTrain = getNumInstances(), missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the scheme can handle class attributes as Nth attribute.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the index of the class attribute (0-based, -1 means last
* attribute)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
* @see TestInstances#CLASS_IS_LAST
*/
protected boolean[] canHandleClassAsNthAttribute(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
int classIndex) {
if (classIndex == TestInstances.CLASS_IS_LAST) {
print("class attribute as last attribute");
} else {
print("class attribute as " + (classIndex + 1) + ". attribute");
}
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType, classIndex,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the scheme can handle zero training instances.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleZeroTraining(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
print("handle zero training instances");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("train");
accepts.add("value");
int numTrain = 0, numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the scheme correctly initialises models when
* buildAssociations is called. This test calls buildAssociations with one
* training dataset. buildAssociations is then called on a training set with
* different structure, and then again with the original training set. If the
* equals method of the AssociatorEvaluation class returns false, this is
* noted as incorrect build initialisation.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 is true if the test was passed
*/
protected boolean[] correctBuildInitialisation(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
boolean[] result = new boolean[2];
print("correct initialisation during buildAssociations");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
Instances train1 = null;
Instances train2 = null;
Associator associator = null;
AssociatorEvaluation evaluation1A = null;
AssociatorEvaluation evaluation1B = null;
AssociatorEvaluation evaluation2 = null;
int stage = 0;
try {
// Make two train sets with different numbers of attributes
train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
multiInstance);
train2 = makeTestDataset(84, numTrain,
nominalPredictor ? getNumNominal() + 1 : 0,
numericPredictor ? getNumNumeric() + 1 : 0,
stringPredictor ? getNumString() + 1 : 0,
datePredictor ? getNumDate() + 1 : 0,
relationalPredictor ? getNumRelational() + 1 : 0, numClasses,
classType, multiInstance);
if (missingLevel > 0) {
addMissing(train1, missingLevel, predictorMissing, classMissing);
addMissing(train2, missingLevel, predictorMissing, classMissing);
}
associator = AbstractAssociator.makeCopies(getAssociator(), 1)[0];
evaluation1A = new AssociatorEvaluation();
evaluation1B = new AssociatorEvaluation();
evaluation2 = new AssociatorEvaluation();
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
stage = 0;
evaluation1A.evaluate(associator, train1);
stage = 1;
evaluation2.evaluate(associator, train2);
stage = 2;
evaluation1B.evaluate(associator, train1);
stage = 3;
if (!evaluation1A.equals(evaluation1B)) {
if (m_Debug) {
println("\n=== Full report ===\n"
+ evaluation1A.toSummaryString("\nFirst buildAssociations()")
+ "\n\n");
println(evaluation1B.toSummaryString("\nSecond buildAssociations()")
+ "\n\n");
}
throw new Exception("Results differ between buildAssociations calls");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during building");
switch (stage) {
case 0:
print(" of dataset 1");
break;
case 1:
print(" of dataset 2");
break;
case 2:
print(" of dataset 1 (2nd build)");
break;
case 3:
print(", comparing results from builds of dataset 1");
break;
}
println(": " + ex.getMessage() + "\n");
println("here are the datasets:\n");
println("=== Train1 Dataset ===\n" + train1.toString() + "\n");
println("=== Train2 Dataset ===\n" + train2.toString() + "\n");
}
}
return result;
}
/**
* Checks basic missing value handling of the scheme. If the missing values
* cause an exception to be thrown by the scheme, this will be recorded.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param predictorMissing true if the missing values may be in the predictors
* @param classMissing true if the missing values may be in the class
* @param missingLevel the percentage of missing values
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleMissing(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
boolean predictorMissing, boolean classMissing, int missingLevel) {
if (missingLevel == 100) {
print("100% ");
}
print("missing");
if (predictorMissing) {
print(" predictor");
if (classMissing) {
print(" and");
}
}
if (classMissing) {
print(" class");
}
print(" values");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("missing");
accepts.add("value");
accepts.add("train");
int numTrain = getNumInstances(), numClasses = 2;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the associator can handle instance weights. This test
* compares the associator performance on two datasets that are identical
* except for the training weights. If the results change, then the associator
* must be using the weights. It may be possible to get a false positive from
* this test if the weight changes aren't significant enough to induce a
* change in associator performance (but the weights are chosen to minimize
* the likelihood of this).
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 true if the test was passed
*/
protected boolean[] instanceWeights(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
print("associator uses instance weights");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
int numTrain = 2 * getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
boolean[] result = new boolean[2];
Instances train = null;
Associator[] associators = null;
AssociatorEvaluation evaluationB = null;
AssociatorEvaluation evaluationI = null;
boolean evalFail = false;
try {
train = makeTestDataset(42, numTrain,
nominalPredictor ? getNumNominal() + 1 : 0,
numericPredictor ? getNumNumeric() + 1 : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
multiInstance);
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing, classMissing);
}
associators = AbstractAssociator.makeCopies(getAssociator(), 2);
evaluationB = new AssociatorEvaluation();
evaluationI = new AssociatorEvaluation();
evaluationB.evaluate(associators[0], train);
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
// Now modify instance weights and re-built/test
for (int i = 0; i < train.numInstances(); i++) {
train.instance(i).setWeight(0);
}
Random random = new Random(1);
for (int i = 0; i < train.numInstances() / 2; i++) {
int inst = random.nextInt(train.numInstances());
int weight = random.nextInt(10) + 1;
train.instance(inst).setWeight(weight);
}
evaluationI.evaluate(associators[1], train);
if (evaluationB.equals(evaluationI)) {
// println("no");
evalFail = true;
throw new Exception("evalFail");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
if (evalFail) {
println("Results don't differ between non-weighted and "
+ "weighted instance models.");
println("Here are the results:\n");
println(evaluationB.toSummaryString("\nboth methods\n"));
} else {
print("Problem during building");
println(": " + ex.getMessage() + "\n");
}
println("Here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
println("=== Train Weights ===\n");
for (int i = 0; i < train.numInstances(); i++) {
println(" " + (i + 1) + " " + train.instance(i).weight());
}
}
}
return result;
}
/**
* Checks whether the scheme alters the training dataset during building. If
* the scheme needs to modify the data it should take a copy of the training
* data. Currently checks for changes to header structure, number of
* instances, order of instances, instance weights.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param predictorMissing true if we know the associator can handle (at
* least) moderate missing predictor values
* @param classMissing true if we know the associator can handle (at least)
* moderate missing class values
* @return index 0 is true if the test was passed
*/
protected boolean[] datasetIntegrity(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
boolean predictorMissing, boolean classMissing) {
print("associator doesn't alter original datasets");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 20;
boolean[] result = new boolean[2];
Instances train = null;
Associator associator = null;
try {
train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
multiInstance);
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing, classMissing);
}
associator = AbstractAssociator.makeCopies(getAssociator(), 1)[0];
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
Instances trainCopy = new Instances(train);
associator.buildAssociations(trainCopy);
compareDatasets(train, trainCopy);
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during building");
println(": " + ex.getMessage() + "\n");
println("Here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
}
return result;
}
/**
* Runs a text on the datasets with the given characteristics.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param missingLevel the percentage of missing values
* @param predictorMissing true if the missing values may be in the predictors
* @param classMissing true if the missing values may be in the class
* @param numTrain the number of instances in the training set
* @param numClasses the number of classes
* @param accepts the acceptable string in an exception
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] runBasicTest(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
int missingLevel, boolean predictorMissing, boolean classMissing,
int numTrain, int numClasses, ArrayList<String> accepts) {
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing,
classMissing, numTrain, numClasses, accepts);
}
/**
* Runs a text on the datasets with the given characteristics.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the attribute index of the class
* @param missingLevel the percentage of missing values
* @param predictorMissing true if the missing values may be in the predictors
* @param classMissing true if the missing values may be in the class
* @param numTrain the number of instances in the training set
* @param numClasses the number of classes
* @param accepts the acceptable string in an exception
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] runBasicTest(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
int classIndex, int missingLevel, boolean predictorMissing,
boolean classMissing, int numTrain, int numClasses,
ArrayList<String> accepts) {
boolean[] result = new boolean[2];
Instances train = null;
Associator associator = null;
try {
train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
classIndex, multiInstance);
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing, classMissing);
}
associator = AbstractAssociator.makeCopies(getAssociator(), 1)[0];
} catch (Exception ex) {
ex.printStackTrace();
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
associator.buildAssociations(train);
println("yes");
result[0] = true;
} catch (Exception ex) {
boolean acceptable = false;
String msg;
if (ex.getMessage() == null) {
msg = "";
} else {
msg = ex.getMessage().toLowerCase();
}
if (msg.indexOf("not in classpath") > -1) {
m_ClasspathProblems = true;
}
for (int i = 0; i < accepts.size(); i++) {
if (msg.indexOf(accepts.get(i)) >= 0) {
acceptable = true;
}
}
println("no" + (acceptable ? " (OK error message)" : ""));
result[1] = acceptable;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during building");
println(": " + ex.getMessage() + "\n");
if (!acceptable) {
if (accepts.size() > 0) {
print("Error message doesn't mention ");
for (int i = 0; i < accepts.size(); i++) {
if (i != 0) {
print(" or ");
}
print('"' + accepts.get(i) + '"');
}
}
println("here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
}
}
return result;
}
/**
* Make a simple set of instances, which can later be modified for use in
* specific tests.
*
* @param seed the random number seed
* @param numInstances the number of instances to generate
* @param numNominal the number of nominal attributes
* @param numNumeric the number of numeric attributes
* @param numString the number of string attributes
* @param numDate the number of date attributes
* @param numRelational the number of relational attributes
* @param numClasses the number of classes (if nominal class)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param multiInstance whether the dataset should a multi-instance dataset
* @return the test dataset
* @throws Exception if the dataset couldn't be generated
* @see #process(Instances)
*/
protected Instances makeTestDataset(int seed, int numInstances,
int numNominal, int numNumeric, int numString, int numDate,
int numRelational, int numClasses, int classType, boolean multiInstance)
throws Exception {
return makeTestDataset(seed, numInstances, numNominal, numNumeric,
numString, numDate, numRelational, numClasses, classType,
TestInstances.CLASS_IS_LAST, multiInstance);
}
/**
* Make a simple set of instances with variable position of the class
* attribute, which can later be modified for use in specific tests.
*
* @param seed the random number seed
* @param numInstances the number of instances to generate
* @param numNominal the number of nominal attributes
* @param numNumeric the number of numeric attributes
* @param numString the number of string attributes
* @param numDate the number of date attributes
* @param numRelational the number of relational attributes
* @param numClasses the number of classes (if nominal class)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the index of the class (0-based, -1 as last)
* @param multiInstance whether the dataset should a multi-instance dataset
* @return the test dataset
* @throws Exception if the dataset couldn't be generated
* @see TestInstances#CLASS_IS_LAST
* @see #process(Instances)
*/
protected Instances makeTestDataset(int seed, int numInstances,
int numNominal, int numNumeric, int numString, int numDate,
int numRelational, int numClasses, int classType, int classIndex,
boolean multiInstance) throws Exception {
TestInstances dataset = new TestInstances();
dataset.setSeed(seed);
dataset.setNumInstances(numInstances);
dataset.setNumNominal(numNominal);
dataset.setNumNumeric(numNumeric);
dataset.setNumString(numString);
dataset.setNumDate(numDate);
dataset.setNumRelational(numRelational);
dataset.setNumClasses(numClasses);
if (classType == NO_CLASS) {
dataset.setClassType(Attribute.NOMINAL); // ignored
dataset.setClassIndex(TestInstances.NO_CLASS);
} else {
dataset.setClassType(classType);
dataset.setClassIndex(classIndex);
}
dataset.setNumClasses(numClasses);
dataset.setMultiInstance(multiInstance);
dataset.setWords(getWords());
dataset.setWordSeparators(getWordSeparators());
return process(dataset.generate());
}
/**
* Print out a short summary string for the dataset characteristics
*
* @param nominalPredictor true if nominal predictor attributes are present
* @param numericPredictor true if numeric predictor attributes are present
* @param stringPredictor true if string predictor attributes are present
* @param datePredictor true if date predictor attributes are present
* @param relationalPredictor true if relational predictor attributes are
* present
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
*/
protected void printAttributeSummary(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
String str = "";
if (numericPredictor) {
str += " numeric";
}
if (nominalPredictor) {
if (str.length() > 0) {
str += " &";
}
str += " nominal";
}
if (stringPredictor) {
if (str.length() > 0) {
str += " &";
}
str += " string";
}
if (datePredictor) {
if (str.length() > 0) {
str += " &";
}
str += " date";
}
if (relationalPredictor) {
if (str.length() > 0) {
str += " &";
}
str += " relational";
}
str += " predictors)";
switch (classType) {
case Attribute.NUMERIC:
str = " (numeric class," + str;
break;
case Attribute.NOMINAL:
str = " (nominal class," + str;
break;
case Attribute.STRING:
str = " (string class," + str;
break;
case Attribute.DATE:
str = " (date class," + str;
break;
case Attribute.RELATIONAL:
str = " (relational class," + str;
break;
case NO_CLASS:
str = " (no class," + str;
break;
}
print(str);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Test method for this class
*
* @param args the commandline parameters
*/
public static void main(String[] args) {
runCheck(new CheckAssociator(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/DefaultAssociationRule.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DefaultAssociationRule.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import java.util.Collection;
import weka.core.Tag;
import weka.core.Utils;
/**
* Class for storing and manipulating an association rule.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com).
*/
public class DefaultAssociationRule extends AssociationRule
implements Serializable {
/** For serialization */
private static final long serialVersionUID = -661269018702294489L;
/** Enum for holding different metric types */
public static enum METRIC_TYPE {
CONFIDENCE("conf") {
double compute(int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions) {
return (double)totalSupport / (double)premiseSupport;
}
},
LIFT("lift") {
double compute(int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions) {
double confidence =
METRIC_TYPE.CONFIDENCE.compute(premiseSupport, consequenceSupport,
totalSupport, totalTransactions);
return confidence / ((double)consequenceSupport /
(double)totalTransactions);
}
},
LEVERAGE("lev") {
double compute(int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions) {
double coverageForItemSet = (double)totalSupport /
(double)totalTransactions;
double expectedCoverageIfIndependent =
((double)premiseSupport / (double)totalTransactions) *
((double)consequenceSupport / (double)totalTransactions);
return coverageForItemSet - expectedCoverageIfIndependent;
}
},
CONVICTION("conv") {
double compute(int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions) {
double num =
(double)premiseSupport * (double)(totalTransactions - consequenceSupport) /
(double)totalTransactions;
double denom = premiseSupport - totalSupport + 1;
return num / denom;
}
};
private final String m_stringVal;
METRIC_TYPE(String name) {
m_stringVal = name;
}
abstract double compute(int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions);
public String toString() {
return m_stringVal;
}
public String toStringMetric(int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions) {
return m_stringVal + ":(" + Utils.doubleToString(compute(premiseSupport, consequenceSupport,
totalSupport, totalTransactions), 2) + ")";
}
public String toXML(int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions) {
String result = "<CRITERE name=\"" + m_stringVal + "\" value=\" " +
Utils.doubleToString(compute(premiseSupport, consequenceSupport,
totalSupport, totalTransactions), 2) + "\"/>";
return result;
}
}
/** Tags for display in the GUI */
public static final Tag[] TAGS_SELECTION = {
new Tag(METRIC_TYPE.CONFIDENCE.ordinal(), "Confidence"),
new Tag(METRIC_TYPE.LIFT.ordinal(), "Lift"),
new Tag(METRIC_TYPE.LEVERAGE.ordinal(), "Leverage"),
new Tag(METRIC_TYPE.CONVICTION.ordinal(), "Conviction")
};
/** The metric type for this rule */
protected DefaultAssociationRule.METRIC_TYPE m_metricType = METRIC_TYPE.CONFIDENCE;
/** The premise of the rule */
protected Collection<Item> m_premise;
/** The consequence of the rule */
protected Collection<Item> m_consequence;
/** The support for the premise */
protected int m_premiseSupport;
/** The support for the consequence */
protected int m_consequenceSupport;
/** The total support for the item set (premise + consequence) */
protected int m_totalSupport;
/** The total number of transactions in the data */
protected int m_totalTransactions;
/**
* Construct a new default association rule.
*
* @param premise the premise of the rule
* @param consequence the consequence of the rule
* @param metric the metric for the rule
* @param premiseSupport the support of the premise
* @param consequenceSupport the support of the consequence
* @param totalSupport the total support of the rule
* @param totalTransactions the number of transactions in the data
*/
public DefaultAssociationRule(Collection<Item> premise,
Collection<Item> consequence, METRIC_TYPE metric,
int premiseSupport, int consequenceSupport,
int totalSupport, int totalTransactions) {
m_premise = premise;
m_consequence = consequence;
m_metricType = metric;
m_premiseSupport = premiseSupport;
m_consequenceSupport = consequenceSupport;
m_totalSupport = totalSupport;
m_totalTransactions = totalTransactions;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getPremise()
*/
public Collection<Item> getPremise() {
return m_premise;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getConsequence()
*/
public Collection<Item> getConsequence() {
return m_consequence;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getPrimaryMetricName()
*/
public String getPrimaryMetricName() {
return TAGS_SELECTION[m_metricType.ordinal()].getReadable();
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getPrimaryMetricValue()
*/
public double getPrimaryMetricValue() {
return m_metricType.compute(m_premiseSupport, m_consequenceSupport,
m_totalSupport, m_totalTransactions);
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getNamedMetricValue(java.lang.String)
*/
public double getNamedMetricValue(String metricName) throws Exception {
DefaultAssociationRule.METRIC_TYPE requested = null;
for (DefaultAssociationRule.METRIC_TYPE m : METRIC_TYPE.values()) {
if (TAGS_SELECTION[m.ordinal()].getReadable().equals(metricName)) {
requested = m;
}
}
if (requested == null) {
throw new Exception("[AssociationRule] Unknown metric: " + metricName);
}
return requested.compute(m_premiseSupport, m_consequenceSupport,
m_totalSupport, m_totalTransactions);
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getNumberOfMetricsForRule()
*/
public int getNumberOfMetricsForRule() {
return METRIC_TYPE.values().length;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getMetricNamesForRule()
*/
public String[] getMetricNamesForRule() {
String[] metricNames = new String[TAGS_SELECTION.length];
for (int i = 0; i < TAGS_SELECTION.length; i++) {
metricNames[i] = TAGS_SELECTION[i].getReadable();
}
return metricNames;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getMetricValuesForRule()
*/
public double[] getMetricValuesForRule() throws Exception {
double[] values = new double[TAGS_SELECTION.length];
for (int i = 0; i < TAGS_SELECTION.length; i++) {
values[i] = getNamedMetricValue(TAGS_SELECTION[i].getReadable());
}
return values;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getPremiseSupport()
*/
public int getPremiseSupport() {
return m_premiseSupport;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getConsequenceSupport()
*/
public int getConsequenceSupport() {
return m_consequenceSupport;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getTotalSupport()
*/
public int getTotalSupport() {
return m_totalSupport;
}
/* (non-Javadoc)
* @see weka.associations.AssociationRule#getTotalTransactions()
*/
public int getTotalTransactions() {
return m_totalTransactions;
}
/**
* Get a textual description of this rule.
*
* @return a textual description of this rule.
*/
public String toString() {
StringBuffer result = new StringBuffer();
result.append(m_premise.toString() + ": " + m_premiseSupport
+ " ==> " + m_consequence.toString() + ": " + m_totalSupport
+ " ");
for (DefaultAssociationRule.METRIC_TYPE m : METRIC_TYPE.values()) {
if (m.equals(m_metricType)) {
result.append("<" +
m.toStringMetric(m_premiseSupport, m_consequenceSupport,
m_totalSupport, m_totalTransactions) + "> ");
} else {
result.append("" +
m.toStringMetric(m_premiseSupport, m_consequenceSupport,
m_totalSupport, m_totalTransactions) + " ");
}
}
return result.toString();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/FPGrowth.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* FPGrowth.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.SparseInstance;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Class implementing the FP-growth algorithm for
* finding large item sets without candidate generation. Iteratively reduces the
* minimum support until it finds the required number of rules with the given
* minimum metric. For more information see:<br/>
* <br/>
* J. Han, J.Pei, Y. Yin: Mining frequent patterns without candidate generation.
* In: Proceedings of the 2000 ACM-SIGMID International Conference on Management
* of Data, 1-12, 2000.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @inproceedings{Han2000,
* author = {J. Han and J.Pei and Y. Yin},
* booktitle = {Proceedings of the 2000 ACM-SIGMID International Conference on Management of Data},
* pages = {1-12},
* title = {Mining frequent patterns without candidate generation},
* year = {2000}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -P <attribute index of positive value>
* Set the index of the attribute value to consider as 'positive'
* for binary attributes in normal dense instances. Index 2 is always
* used for sparse instances. (default = 2)
* </pre>
*
* <pre>
* -I <max items>
* The maximum number of items to include in large items sets (and rules). (default = -1, i.e. no limit.)
* </pre>
*
* <pre>
* -N <require number of rules>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum metric score of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -S
* Find all rules that meet the lower bound on
* minimum support and the minimum metric constraint.
* Turning this mode on will disable the iterative support reduction
* procedure to find the specified number of rules.
* </pre>
*
* <pre>
* -transactions <comma separated list of attribute names>
* Only consider transactions that contain these items (default = no restriction)
* </pre>
*
* <pre>
* -rules <comma separated list of attribute names>
* Only print rules that contain these items. (default = no restriction)
* </pre>
*
* <pre>
* -use-or
* Use OR instead of AND for must contain list(s). Use in conjunction
* with -transactions and/or -rules
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class FPGrowth extends AbstractAssociator implements
AssociationRulesProducer, OptionHandler, TechnicalInformationHandler {
/** For serialization */
private static final long serialVersionUID = 3620717108603442911L;
/**
* Class for maintaining a frequent item set.
*/
protected static class FrequentBinaryItemSet implements Serializable,
Cloneable {
/** For serialization */
private static final long serialVersionUID = -6543815873565829448L;
/** The list of items in the item set */
protected ArrayList<BinaryItem> m_items = new ArrayList<BinaryItem>();
/** the support of this item set **/
protected int m_support;
/**
* Constructor
*
* @param items the items that make up the frequent item set.
* @param support the support of this item set.
*/
public FrequentBinaryItemSet(ArrayList<BinaryItem> items, int support) {
m_items = items;
m_support = support;
Collections.sort(m_items);
}
/**
* Add an item to this item set.
*
* @param i the item to add.
*/
public void addItem(BinaryItem i) {
m_items.add(i);
Collections.sort(m_items);
}
/**
* Set the support for this item set.
*
* @param support the support for this item set.
*/
public void setSupport(int support) {
m_support = support;
}
/**
* Get the support of this item set.
*
* @return the support of this item set.
*/
public int getSupport() {
return m_support;
}
/**
* Get the items in this item set.
*
* @return the items in this item set.
*/
public Collection<BinaryItem> getItems() {
return m_items;
}
/**
* Get a particular item from this item set.
*
* @param index the index of the item to get.
* @return the item.
*/
public BinaryItem getItem(int index) {
return m_items.get(index);
}
/**
* Get the number of items in this item set.
*
* @return the number of items in this item set.
*/
public int numberOfItems() {
return m_items.size();
}
/**
* Get a textual description of this item set.
*
* @return a textual description of this item set.
*/
@Override
public String toString() {
StringBuffer buff = new StringBuffer();
Iterator<BinaryItem> i = m_items.iterator();
while (i.hasNext()) {
buff.append(i.next().toString() + " ");
}
buff.append(": " + m_support);
return buff.toString();
}
/**
* Make a copy of this item set.
*
* @return a copy of this item set.
*/
@Override
public Object clone() {
ArrayList<BinaryItem> items = new ArrayList<BinaryItem>(m_items);
return new FrequentBinaryItemSet(items, m_support);
}
}
/**
* Maintains a list of frequent item sets.
*/
protected static class FrequentItemSets implements Serializable {
/** For serialization */
private static final long serialVersionUID = 4173606872363973588L;
/** The list of frequent item sets */
protected ArrayList<FrequentBinaryItemSet> m_sets = new ArrayList<FrequentBinaryItemSet>();
/** The total number of transactions in the data */
protected int m_numberOfTransactions;
/**
* Constructor.
*
* @param numTransactions the total number of transactions in the data.
*/
public FrequentItemSets(int numTransactions) {
m_numberOfTransactions = numTransactions;
}
/**
* Get an item set.
*
* @param index the index of the item set to get.
* @return an item set.
*/
public FrequentBinaryItemSet getItemSet(int index) {
return m_sets.get(index);
}
/**
* Get an iterator that can be used to access all the item sets.
*
* @return an iterator.
*/
public Iterator<FrequentBinaryItemSet> iterator() {
return m_sets.iterator();
}
/**
* Get the total number of transactions in the data that the item sets were
* derived from.
*
* @return the total number of transactions in the data.
*/
public int getNumberOfTransactions() {
return m_numberOfTransactions;
}
/**
* Add an item set.
*
* @param setToAdd the item set to add.
*/
public void addItemSet(FrequentBinaryItemSet setToAdd) {
m_sets.add(setToAdd);
}
/**
* Sort the item sets according to the supplied comparator.
*
* @param comp the comparator to use.
*/
public void sort(Comparator<FrequentBinaryItemSet> comp) {
Collections.sort(m_sets, comp);
}
/**
* Get the number of item sets.
*
* @return the number of item sets.
*/
public int size() {
return m_sets.size();
}
/**
* Sort the item sets. Sorts by item set length. Ties are broken by
* comparing the items in the two item sets.
*/
public void sort() {
Comparator<FrequentBinaryItemSet> compF = new Comparator<FrequentBinaryItemSet>() {
@Override
public int compare(FrequentBinaryItemSet one, FrequentBinaryItemSet two) {
Collection<BinaryItem> compOne = one.getItems();
Collection<BinaryItem> compTwo = two.getItems();
// if (one.getSupport() == two.getSupport()) {
// if supports are equal then list shorter item sets before longer
// ones
if (compOne.size() < compTwo.size()) {
return -1;
} else if (compOne.size() > compTwo.size()) {
return 1;
} else {
// compare items
Iterator<BinaryItem> twoIterator = compTwo.iterator();
for (BinaryItem oneI : compOne) {
BinaryItem twoI = twoIterator.next();
int result = oneI.compareTo(twoI);
if (result != 0) {
return result;
}
}
return 0; // equal
}
// return 0;
/*
* } else if (one.getSupport() > two.getSupport()) { // reverse
* ordering (i.e. descending by support) return -1; }
*/
// return 1;
}
};
sort(compF);
}
/**
* Get a textual description of this list of item sets.
*
* @param numSets the number of item sets to display.
* @return a textual description of the item sets.
*/
public String toString(int numSets) {
if (m_sets.size() == 0) {
return "No frequent items sets found!";
}
StringBuffer result = new StringBuffer();
result.append("" + m_sets.size() + " frequent item sets found");
if (numSets > 0) {
result.append(" , displaying " + numSets);
}
result.append(":\n\n");
int count = 0;
for (FrequentBinaryItemSet i : m_sets) {
if (numSets > 0 && count > numSets) {
break;
}
result.append(i.toString() + "\n");
count++;
}
return result.toString();
}
}
/**
* This class holds the counts for projected tree nodes and header lists.
*/
protected static class ShadowCounts implements Serializable {
/** For serialization */
private static final long serialVersionUID = 4435433714185969155L;
/** Holds the counts at different recursion levels */
private final ArrayList<Integer> m_counts = new ArrayList<Integer>();
/**
* Get the count at the specified recursion depth.
*
* @param recursionLevel the depth of the recursion.
* @return the count.
*/
public int getCount(int recursionLevel) {
if (recursionLevel >= m_counts.size()) {
return 0;
} else {
return m_counts.get(recursionLevel);
}
}
/**
* Increase the count at a given recursion level.
*
* @param recursionLevel the level at which to increase the count.
* @param incr the amount by which to increase the count.
*/
public void increaseCount(int recursionLevel, int incr) {
// basically treat the list like a stack where we
// can add a new element, or increment the element
// at the top
if (recursionLevel == m_counts.size()) {
// new element
m_counts.add(incr);
} else if (recursionLevel == m_counts.size() - 1) {
// otherwise increment the top
int n = m_counts.get(recursionLevel).intValue();
m_counts.set(recursionLevel, (n + incr));
}
}
/**
* Remove the count at the given recursion level.
*
* @param recursionLevel the level at which to remove the count.
*/
public void removeCount(int recursionLevel) {
if (recursionLevel < m_counts.size()) {
m_counts.remove(recursionLevel);
}
}
}
/**
* A node in the FP-tree.
*/
protected static class FPTreeNode implements Serializable {
/** For serialization */
private static final long serialVersionUID = 4396315323673737660L;
/** link to another sibling at this level in the tree */
protected FPTreeNode m_levelSibling;
/** link to the parent node */
protected FPTreeNode m_parent;
/** item at this node */
protected BinaryItem m_item;
/** ID (for graphing the tree) */
protected int m_ID;
/** the children of this node */
protected Map<BinaryItem, FPTreeNode> m_children = new HashMap<BinaryItem, FPTreeNode>();
/** counts associated with projected versions of this node */
protected ShadowCounts m_projectedCounts = new ShadowCounts();
/**
* Construct a new node with the given parent link and item.
*
* @param parent a pointer to the parent of this node.
* @param item the item at this node.
*/
public FPTreeNode(FPTreeNode parent, BinaryItem item) {
m_parent = parent;
m_item = item;
}
/**
* Insert an item set into the tree at this node. Removes the first item
* from the supplied item set and makes a recursive call to insert the
* remaining items.
*
* @param itemSet the item set to insert.
* @param headerTable the header table for the tree.
* @param incr the amount by which to increase counts.
*/
public void addItemSet(Collection<BinaryItem> itemSet,
Map<BinaryItem, FPTreeRoot.Header> headerTable, int incr) {
Iterator<BinaryItem> i = itemSet.iterator();
if (i.hasNext()) {
BinaryItem first = i.next();
FPTreeNode aChild;
if (!m_children.containsKey(first)) {
// not in the tree, so add it.
aChild = new FPTreeNode(this, first);
m_children.put(first, aChild);
// update the header
if (!headerTable.containsKey(first)) {
headerTable.put(first, new FPTreeRoot.Header());
}
// append new node to header list
headerTable.get(first).addToList(aChild);
} else {
// get the appropriate child node
aChild = m_children.get(first);
}
// update counts in header table
headerTable.get(first).getProjectedCounts().increaseCount(0, incr);
// increase the child's count
aChild.increaseProjectedCount(0, incr);
// proceed recursively
itemSet.remove(first);
aChild.addItemSet(itemSet, headerTable, incr);
}
}
/**
* Increase the projected count at the given recursion level at this node
*
* @param recursionLevel the recursion level to increase the node count at.
* @param incr the amount by which to increase the count.
*/
public void increaseProjectedCount(int recursionLevel, int incr) {
m_projectedCounts.increaseCount(recursionLevel, incr);
}
/**
* Remove the projected count at the given recursion level for this node.
*
* @param recursionLevel the recursion level at which to remove the count.
*/
public void removeProjectedCount(int recursionLevel) {
m_projectedCounts.removeCount(recursionLevel);
}
/**
* Get the projected count at the given recursion level for this node.
*
* @param recursionLevel the recursion level at which to get the count.
* @return the count.
*/
public int getProjectedCount(int recursionLevel) {
return m_projectedCounts.getCount(recursionLevel);
}
/**
* Get the parent node.
*
* @return the parent node.
*/
public FPTreeNode getParent() {
return m_parent;
}
/**
* Get the item at this node.
*
* @return the item at this node.
*/
public BinaryItem getItem() {
return m_item;
}
/**
* Return a textual description of this node for a given recursion level.
*
* @param recursionLevel the recursion depth to use.
* @return a textual description of this node.
*/
public String toString(int recursionLevel) {
return toString("", recursionLevel);
}
/**
* Return a textual description of this node for a given recursion level.
*
* @param prefix a prefix string to prepend.
* @param recursionLevel the recursion level to use.
* @return a textual description of this node.
*/
public String toString(String prefix, int recursionLevel) {
StringBuffer buffer = new StringBuffer();
buffer.append(prefix);
buffer.append("| ");
buffer.append(m_item.toString());
buffer.append(" (");
buffer.append(m_projectedCounts.getCount(recursionLevel));
buffer.append(")\n");
for (FPTreeNode node : m_children.values()) {
buffer.append(node.toString(prefix + "| ", recursionLevel));
}
return buffer.toString();
}
protected int assignIDs(int lastID) {
int currentLastID = lastID + 1;
m_ID = currentLastID;
if (m_children != null) {
Collection<FPTreeNode> kids = m_children.values();
for (FPTreeNode n : kids) {
currentLastID = n.assignIDs(currentLastID);
}
}
return currentLastID;
}
/**
* Generate a dot graph description string for the tree.
*
* @param text a StringBuffer to store the graph description in.
*/
public void graphFPTree(StringBuffer text) {
if (m_children != null) {
Collection<FPTreeNode> kids = m_children.values();
for (FPTreeNode n : kids) {
text.append("N" + n.m_ID);
text.append(" [label=\"");
text.append(n.getItem().toString() + " (" + n.getProjectedCount(0)
+ ")\\n");
text.append("\"]\n");
n.graphFPTree(text);
text.append("N" + m_ID + "->" + "N" + n.m_ID + "\n");
}
}
}
}
/**
* Root of the FPTree
*/
private static class FPTreeRoot extends FPTreeNode {
/** For serialization */
private static final long serialVersionUID = 632150939785333297L;
/**
* Stores a header entry for an FPTree
*/
protected static class Header implements Serializable {
/** For serialization */
private static final long serialVersionUID = -6583156284891368909L;
/** The list of pointers into the tree structure */
protected List<FPTreeNode> m_headerList = new LinkedList<FPTreeNode>();
/** Projected header counts for this entry */
protected ShadowCounts m_projectedHeaderCounts = new ShadowCounts();
/**
* Add a tree node into the list for this header entry.
*
* @param toAdd the node to add.
*/
public void addToList(FPTreeNode toAdd) {
m_headerList.add(toAdd);
}
/**
* Get the list of nodes for this header entry.
*
* @return the list of nodes for this header entry.
*/
public List<FPTreeNode> getHeaderList() {
return m_headerList;
}
/**
* Get the projected counts for this header entry.
*
* @return the projected counts for this header entry.
*/
public ShadowCounts getProjectedCounts() {
return m_projectedHeaderCounts;
}
}
/** Stores the header table as mapped Header entries */
protected Map<BinaryItem, Header> m_headerTable = new HashMap<BinaryItem, Header>();
/**
* Create a new FPTreeRoot.
*/
public FPTreeRoot() {
super(null, null);
}
/**
* Insert an item set into the tree.
*
* @param itemSet the item set to insert into the tree.
* @param incr the increment by which to increase counters.
*/
public void addItemSet(Collection<BinaryItem> itemSet, int incr) {
super.addItemSet(itemSet, m_headerTable, incr);
}
/**
* Get the header table for this tree.
*
* @return the header table for this tree.
*/
public Map<BinaryItem, Header> getHeaderTable() {
return m_headerTable;
}
public boolean isEmpty(int recursionLevel) {
for (FPTreeNode c : m_children.values()) {
if (c.getProjectedCount(recursionLevel) > 0) {
return false;
}
}
return true;
}
/**
* Get a textual description of the tree at a given recursion (projection)
* level.
*
* @param pad the string to use as a prefix for indenting nodes.
* @param recursionLevel the recursion level (projection) to use.
* @return the textual description of the tree.
*/
@Override
public String toString(String pad, int recursionLevel) {
StringBuffer result = new StringBuffer();
result.append(pad);
result.append("+ ROOT\n");
for (FPTreeNode node : m_children.values()) {
result.append(node.toString(pad + "| ", recursionLevel));
}
return result.toString();
}
}
private static void nextSubset(boolean[] subset) {
for (int i = 0; i < subset.length; i++) {
if (!subset[i]) {
subset[i] = true;
break;
} else {
subset[i] = false;
}
}
}
private static Collection<Item> getPremise(FrequentBinaryItemSet fis,
boolean[] subset) {
boolean ok = false;
for (int i = 0; i < subset.length; i++) {
if (!subset[i]) {
ok = true;
break;
}
}
if (!ok) {
return null;
}
List<Item> premise = new ArrayList<Item>();
ArrayList<Item> items = new ArrayList<Item>(fis.getItems());
for (int i = 0; i < subset.length; i++) {
if (subset[i]) {
premise.add(items.get(i));
}
}
return premise;
}
private static Collection<Item> getConsequence(FrequentBinaryItemSet fis,
boolean[] subset) {
List<Item> consequence = new ArrayList<Item>();
ArrayList<Item> items = new ArrayList<Item>(fis.getItems());
for (int i = 0; i < subset.length; i++) {
if (!subset[i]) {
consequence.add(items.get(i));
}
}
return consequence;
}
/**
* Generate all association rules, from the supplied frequet item sets, that
* meet a given minimum metric threshold. Uses a brute force approach.
*
* @param largeItemSets the set of frequent item sets
* @param metricToUse the metric to use
* @param metricThreshold the threshold value that a rule must meet
* @param upperBoundMinSuppAsInstances the upper bound on the support in order
* to accept the rule
* @param lowerBoundMinSuppAsInstances the lower bound on the support in order
* to accept the rule
* @param totalTransactions the total number of transactions in the data
* @return a list of association rules
*/
public static List<AssociationRule> generateRulesBruteForce(
FrequentItemSets largeItemSets,
DefaultAssociationRule.METRIC_TYPE metricToUse, double metricThreshold,
int upperBoundMinSuppAsInstances, int lowerBoundMinSuppAsInstances,
int totalTransactions) {
List<AssociationRule> rules = new ArrayList<AssociationRule>();
largeItemSets.sort();
Map<Collection<BinaryItem>, Integer> frequencyLookup = new HashMap<Collection<BinaryItem>, Integer>();
Iterator<FrequentBinaryItemSet> setI = largeItemSets.iterator();
// process each large item set
while (setI.hasNext()) {
FrequentBinaryItemSet fis = setI.next();
frequencyLookup.put(fis.getItems(), fis.getSupport());
if (fis.getItems().size() > 1) {
// generate all the possible subsets for the premise
boolean[] subset = new boolean[fis.getItems().size()];
Collection<Item> premise = null;
Collection<Item> consequence = null;
while ((premise = getPremise(fis, subset)) != null) {
if (premise.size() > 0 && premise.size() < fis.getItems().size()) {
consequence = getConsequence(fis, subset);
int totalSupport = fis.getSupport();
int supportPremise = frequencyLookup.get(premise).intValue();
int supportConsequence = frequencyLookup.get(consequence)
.intValue();
// a candidate rule
DefaultAssociationRule candidate = new DefaultAssociationRule(
premise, consequence, metricToUse, supportPremise,
supportConsequence, totalSupport, totalTransactions);
if (candidate.getPrimaryMetricValue() >= metricThreshold
&& candidate.getTotalSupport() >= lowerBoundMinSuppAsInstances
&& candidate.getTotalSupport() <= upperBoundMinSuppAsInstances) {
// accept this rule
rules.add(candidate);
}
}
nextSubset(subset);
}
}
}
return rules;
}
public static List<AssociationRule> pruneRules(
List<AssociationRule> rulesToPrune, ArrayList<Item> itemsToConsider,
boolean useOr) {
ArrayList<AssociationRule> result = new ArrayList<AssociationRule>();
for (AssociationRule r : rulesToPrune) {
if (r.containsItems(itemsToConsider, useOr)) {
result.add(r);
}
}
return result;
}
/** The number of rules to find */
protected int m_numRulesToFind = 10;
// protected double m_upperBoundMinSupport = 0.36;
/** The upper bound on the minimum support */
protected double m_upperBoundMinSupport = 1.0;
/** The lower bound on minimum support */
protected double m_lowerBoundMinSupport = 0.1;
/** The amount by which to decrease the support in each iteration */
protected double m_delta = 0.05;
/** The number of instances in the data */
protected int m_numInstances;
/**
* When processing data off of disk report progress this frequently (number of
* instances).
*/
protected int m_offDiskReportingFrequency = 10000;
/**
* If true, just all rules meeting the lower bound on the minimum support will
* be found. The number of rules to find will be ignored and the iterative
* reduction of support will not be done.
*/
protected boolean m_findAllRulesForSupportLevel = false;
// protected double m_lowerBoundMinSupport = 0.0;
/** The index (1 based) of binary attributes to treat as the positive value */
protected int m_positiveIndex = 2;
protected DefaultAssociationRule.METRIC_TYPE m_metric = DefaultAssociationRule.METRIC_TYPE.CONFIDENCE;
protected double m_metricThreshold = 0.9;
/** Holds the large item sets found */
protected FrequentItemSets m_largeItemSets;
/** Holds the rules */
protected List<AssociationRule> m_rules;
// maximum number of items in a large item set (zero means no limit)
protected int m_maxItems = -1;
/**
* If set, limit the transactions (instances) input to the algorithm to those
* that contain these items
*/
protected String m_transactionsMustContain = "";
/** Use OR rather than AND when considering must contain lists */
protected boolean m_mustContainOR = false;
/** If set, then only output rules containing these itmes */
protected String m_rulesMustContain = "";
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// enable what we can handle
// attributes
result.enable(Capability.UNARY_ATTRIBUTES);
result.enable(Capability.BINARY_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
result.enable(Capability.NO_CLASS);
return result;
}
/**
* Returns a string describing this associator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Class implementing the FP-growth algorithm for finding"
+ " large item sets without candidate generation. Iteratively"
+ " reduces the minimum support until it finds the required"
+ " number of rules with the given minimum metric."
+ " For more information see:\n\n" + getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "J. Han and J.Pei and Y. Yin");
result.setValue(Field.TITLE,
"Mining frequent patterns without candidate generation");
result.setValue(Field.BOOKTITLE,
"Proceedings of the 2000 ACM-SIGMID International"
+ " Conference on Management of Data");
result.setValue(Field.YEAR, "2000");
result.setValue(Field.PAGES, "1-12");
return result;
}
private boolean passesMustContain(Instance inst,
boolean[] transactionsMustContainIndexes,
int numInTransactionsMustContainList) {
boolean result = false;
if (inst instanceof SparseInstance) {
int containsCount = 0;
for (int i = 0; i < inst.numValues(); i++) {
int attIndex = inst.index(i);
if (m_mustContainOR) {
if (transactionsMustContainIndexes[attIndex]) {
// break here since the operator is OR and this
// instance contains at least one of the items
return true;
}
} else {
if (transactionsMustContainIndexes[attIndex]) {
containsCount++;
}
}
}
if (!m_mustContainOR) {
if (containsCount == numInTransactionsMustContainList) {
return true;
}
}
} else {
int containsCount = 0;
for (int i = 0; i < transactionsMustContainIndexes.length; i++) {
if (transactionsMustContainIndexes[i]) {
if ((int) inst.value(i) == m_positiveIndex - 1) {
if (m_mustContainOR) {
// break here since the operator is OR and
// this instance contains at least one of the
// requested items
return true;
} else {
containsCount++;
}
}
}
}
if (!m_mustContainOR) {
if (containsCount == numInTransactionsMustContainList) {
return true;
}
}
}
return result;
}
private void processSingleton(Instance current,
ArrayList<BinaryItem> singletons) throws Exception {
if (current instanceof SparseInstance) {
for (int j = 0; j < current.numValues(); j++) {
int attIndex = current.index(j);
singletons.get(attIndex).increaseFrequency();
}
} else {
for (int j = 0; j < current.numAttributes(); j++) {
if (!current.isMissing(j)) {
if (current.attribute(j).numValues() == 1
|| current.value(j) == m_positiveIndex - 1) {
singletons.get(j).increaseFrequency();
}
}
}
}
}
/**
* Get the singleton items in the data
*
* @param source the source of the data (either Instances or an ArffLoader).
* @return a list of singleton item sets
* @throws Exception if the singletons can't be found for some reason
*/
protected ArrayList<BinaryItem> getSingletons(Object source) throws Exception {
ArrayList<BinaryItem> singletons = new ArrayList<BinaryItem>();
Instances data = null;
if (source instanceof Instances) {
data = (Instances) source;
} else if (source instanceof weka.core.converters.ArffLoader) {
data = ((weka.core.converters.ArffLoader) source).getStructure();
}
for (int i = 0; i < data.numAttributes(); i++) {
singletons.add(new BinaryItem(data.attribute(i), m_positiveIndex - 1));
}
if (source instanceof Instances) {
// set the number of instances
m_numInstances = data.numInstances();
for (int i = 0; i < data.numInstances(); i++) {
Instance current = data.instance(i);
processSingleton(current, singletons);
}
} else if (source instanceof weka.core.converters.ArffLoader) {
weka.core.converters.ArffLoader loader = (weka.core.converters.ArffLoader) source;
Instance current = null;
int count = 0;
while ((current = loader.getNextInstance(data)) != null) {
processSingleton(current, singletons);
count++;
if (count % m_offDiskReportingFrequency == 0) {
System.err.println("Singletons: done " + count);
}
}
// set the number of instances
m_numInstances = count;
loader.reset();
}
return singletons;
}
/**
* Get the singleton items in the data
*
* @param data the Instances to process
* @return a list of singleton item sets
* @throws Exception if the singletons can't be found for some reason
*/
protected ArrayList<BinaryItem> getSingletons(Instances data)
throws Exception {
return getSingletons((Object) data);
/*
* ArrayList<BinaryItem> singletons = new ArrayList<BinaryItem>();
*
* for (int i = 0; i < data.numAttributes(); i++) { singletons.add(new
* BinaryItem(data.attribute(i), m_positiveIndex - 1)); }
*
* for (int i = 0; i < data.numInstances(); i++) { Instance current =
* data.instance(i); if (current instanceof SparseInstance) { for (int j =
* 0; j < current.numValues(); j++) { int attIndex = current.index(j);
* singletons.get(attIndex).increaseFrequency(); } } else { for (int j = 0;
* j < data.numAttributes(); j++) { if (!current.isMissing(j)) { if
* (current.attribute(j).numValues() == 1 || current.value(j) ==
* m_positiveIndex - 1) { singletons.get(j).increaseFrequency(); } } } } }
*
* return singletons;
*/
}
/*
* protected ArrayList<BinaryItem> getFrequent(ArrayList<BinaryItem> items,
* int minSupport) { ArrayList<BinaryItem> frequent = new
* ArrayList<BinaryItem>(); for (BinaryItem b : items) { if (b.getFrequency()
* > minSupport) { frequent.add(b); } }
*
* // sort in descending order of support Collections.sort(frequent); return
* frequent; }
*/
/**
* Inserts a single instance into the FPTree.
*
* @param current the instance to insert
* @param singletons the singleton item sets
* @param tree the tree to insert into
* @param minSupport the minimum support threshold
*/
private void insertInstance(Instance current,
ArrayList<BinaryItem> singletons, FPTreeRoot tree, int minSupport) {
ArrayList<BinaryItem> transaction = new ArrayList<BinaryItem>();
if (current instanceof SparseInstance) {
for (int j = 0; j < current.numValues(); j++) {
int attIndex = current.index(j);
if (singletons.get(attIndex).getFrequency() >= minSupport) {
transaction.add(singletons.get(attIndex));
}
}
Collections.sort(transaction);
tree.addItemSet(transaction, 1);
} else {
for (int j = 0; j < current.numAttributes(); j++) {
if (!current.isMissing(j)) {
if (current.attribute(j).numValues() == 1
|| current.value(j) == m_positiveIndex - 1) {
if (singletons.get(j).getFrequency() >= minSupport) {
transaction.add(singletons.get(j));
}
}
}
}
Collections.sort(transaction);
tree.addItemSet(transaction, 1);
}
}
/**
* Construct the frequent pattern tree by inserting each transaction in the
* data into the tree. Only those items from each transaction that meet the
* minimum support threshold are inserted.
*
* @param singletons the singleton item sets
* @param data the Instances containing the transactions
* @param minSupport the minimum support
* @return the root of the tree
*/
protected FPTreeRoot buildFPTree(ArrayList<BinaryItem> singletons,
Object dataSource, int minSupport) throws Exception {
FPTreeRoot tree = new FPTreeRoot();
Instances data = null;
if (dataSource instanceof Instances) {
data = (Instances) dataSource;
} else if (dataSource instanceof weka.core.converters.ArffLoader) {
data = ((weka.core.converters.ArffLoader) dataSource).getStructure();
}
if (dataSource instanceof Instances) {
for (int i = 0; i < data.numInstances(); i++) {
insertInstance(data.instance(i), singletons, tree, minSupport);
}
} else if (dataSource instanceof weka.core.converters.ArffLoader) {
weka.core.converters.ArffLoader loader = (weka.core.converters.ArffLoader) dataSource;
Instance current = null;
int count = 0;
while ((current = loader.getNextInstance(data)) != null) {
insertInstance(current, singletons, tree, minSupport);
count++;
if (count % m_offDiskReportingFrequency == 0) {
System.err.println("build tree done: " + count);
}
}
}
return tree;
}
/**
* Construct the frequent pattern tree by inserting each transaction in the
* data into the tree. Only those items from each transaction that meet the
* minimum support threshold are inserted.
*
* @param singletons the singleton item sets
* @param data the Instances containing the transactions
* @param minSupport the minimum support
* @return the root of the tree
*/
/*
* protected FPTreeRoot buildFPTree(ArrayList<BinaryItem> singletons,
* Instances data, int minSupport) {
*
* FPTreeRoot tree = new FPTreeRoot();
*
* for (int i = 0; i < data.numInstances(); i++) { Instance current =
* data.instance(i); ArrayList<BinaryItem> transaction = new
* ArrayList<BinaryItem>(); if (current instanceof SparseInstance) { for (int
* j = 0; j < current.numValues(); j++) { int attIndex = current.index(j); if
* (singletons.get(attIndex).getFrequency() >= minSupport) {
* transaction.add(singletons.get(attIndex)); } }
* Collections.sort(transaction); tree.addItemSet(transaction, 1); } else {
* for (int j = 0; j < data.numAttributes(); j++) { if (!current.isMissing(j))
* { if (current.attribute(j).numValues() == 1 || current.value(j) ==
* m_positiveIndex - 1) { if (singletons.get(j).getFrequency() >= minSupport)
* { transaction.add(singletons.get(j)); } } } }
* Collections.sort(transaction); tree.addItemSet(transaction, 1); } }
*
* return tree; }
*/
/**
* Find large item sets in the FP-tree.
*
* @param tree the root of the tree to mine
* @param largeItemSets holds the large item sets found
* @param recursionLevel the recursion level for the current projected counts
* @param conditionalItems the current set of items that the current
* (projected) tree is conditional on
* @param minSupport the minimum acceptable support
*/
protected void mineTree(FPTreeRoot tree, FrequentItemSets largeItemSets,
int recursionLevel, FrequentBinaryItemSet conditionalItems, int minSupport) {
if (!tree.isEmpty(recursionLevel)) {
if (m_maxItems > 0 && recursionLevel >= m_maxItems) {
// don't mine any further
return;
}
Map<BinaryItem, FPTreeRoot.Header> headerTable = tree.getHeaderTable();
Set<BinaryItem> keys = headerTable.keySet();
// System.err.println("Number of freq item sets collected " +
// largeItemSets.size());
Iterator<BinaryItem> i = keys.iterator();
while (i.hasNext()) {
BinaryItem item = i.next();
FPTreeRoot.Header itemHeader = headerTable.get(item);
// check for minimum support at this level
int support = itemHeader.getProjectedCounts().getCount(recursionLevel);
if (support >= minSupport) {
// process header list at this recursion level
for (FPTreeNode n : itemHeader.getHeaderList()) {
// push count up path to root
int currentCount = n.getProjectedCount(recursionLevel);
if (currentCount > 0) {
FPTreeNode temp = n.getParent();
while (temp != tree) {
// set/increase for the node
temp.increaseProjectedCount(recursionLevel + 1, currentCount);
// set/increase for the header table
headerTable.get(temp.getItem()).getProjectedCounts()
.increaseCount(recursionLevel + 1, currentCount);
temp = temp.getParent();
}
}
}
FrequentBinaryItemSet newConditional = (FrequentBinaryItemSet) conditionalItems
.clone();
// this item gets added to the conditional items
newConditional.addItem(item);
newConditional.setSupport(support);
// now add this conditional item set to the list of large item sets
largeItemSets.addItemSet(newConditional);
// now recursively process the new tree
mineTree(tree, largeItemSets, recursionLevel + 1, newConditional,
minSupport);
// reverse the propagated counts
for (FPTreeNode n : itemHeader.getHeaderList()) {
FPTreeNode temp = n.getParent();
while (temp != tree) {
temp.removeProjectedCount(recursionLevel + 1);
temp = temp.getParent();
}
}
// reverse the propagated counts in the header list
// at this recursion level
for (FPTreeRoot.Header h : headerTable.values()) {
h.getProjectedCounts().removeCount(recursionLevel + 1);
}
}
}
}
}
/**
* Construct a new FPGrowth object.
*/
public FPGrowth() {
resetOptions();
}
/**
* Reset all options to their default values.
*/
public void resetOptions() {
m_delta = 0.05;
m_metricThreshold = 0.9;
m_numRulesToFind = 10;
m_lowerBoundMinSupport = 0.1;
m_upperBoundMinSupport = 1.0;
// m_minSupport = -1;
m_positiveIndex = 2;
m_transactionsMustContain = "";
m_rulesMustContain = "";
m_mustContainOR = false;
}
/**
* Tip text for this property suitable for displaying in the GUI.
*
* @return the tip text for this property.
*/
public String positiveIndexTipText() {
return "Set the index of binary valued attributes that is to be considered"
+ " the positive index. Has no effect for sparse data (in this case"
+ " the first index (i.e. non-zero values) is always treated as "
+ " positive. Also has no effect for unary valued attributes (i.e."
+ " when using the Weka Apriori-style format for market basket data,"
+ " which uses missing value \"?\" to indicate" + " absence of an item.";
}
/**
* Set the index of the attribute value to consider as positive for binary
* attributes in normal dense instances. Index 1 is always used for sparse
* instances.
*
* @param index the index to use for positive values in binary attributes.
*/
public void setPositiveIndex(int index) {
m_positiveIndex = index;
}
/**
* Get the index of the attribute value to consider as positive for binary
* attributes in normal dense instances. Index 1 is always used for sparse
* instances.
*
* @return the index to use for positive values in binary attributes.
*/
public int getPositiveIndex() {
return m_positiveIndex;
}
/**
* Set the desired number of rules to find.
*
* @param numR the number of rules to find.
*/
public void setNumRulesToFind(int numR) {
m_numRulesToFind = numR;
}
/**
* Get the number of rules to find.
*
* @return the number of rules to find.
*/
public int getNumRulesToFind() {
return m_numRulesToFind;
}
/**
* Tip text for this property suitable for displaying in the GUI.
*
* @return the tip text for this property.
*/
public String numRulesToFindTipText() {
return "The number of rules to output";
}
/**
* Set the metric type to use.
*
* @param d the metric type
*/
public void setMetricType(SelectedTag d) {
int ordinal = d.getSelectedTag().getID();
for (DefaultAssociationRule.METRIC_TYPE m : DefaultAssociationRule.METRIC_TYPE
.values()) {
if (m.ordinal() == ordinal) {
m_metric = m;
break;
}
}
}
/**
* Set the maximum number of items to include in large items sets.
*
* @param max the maxim number of items to include in large item sets.
*/
public void setMaxNumberOfItems(int max) {
m_maxItems = max;
}
/**
* Gets the maximum number of items to be included in large item sets.
*
* @return the maximum number of items to be included in large items sets.
*/
public int getMaxNumberOfItems() {
return m_maxItems;
}
/**
* Tip text for this property suitable for displaying in the GUI.
*
* @return the tip text for this property.
*/
public String maxNumberOfItemsTipText() {
return "The maximum number of items to include in frequent item sets. -1 "
+ "means no limit.";
}
/**
* Get the metric type to use.
*
* @return the metric type to use.
*/
public SelectedTag getMetricType() {
return new SelectedTag(m_metric.ordinal(),
DefaultAssociationRule.TAGS_SELECTION);
}
/**
* Tip text for this property suitable for displaying in the GUI.
*
* @return the tip text for this property.
*/
public String metricTypeTipText() {
return "Set the type of metric by which to rank rules. Confidence is "
+ "the proportion of the examples covered by the premise that are also "
+ "covered by the consequence(Class association rules can only be mined using confidence). Lift is confidence divided by the "
+ "proportion of all examples that are covered by the consequence. This "
+ "is a measure of the importance of the association that is independent "
+ "of support. Leverage is the proportion of additional examples covered "
+ "by both the premise and consequence above those expected if the "
+ "premise and consequence were independent of each other. The total "
+ "number of examples that this represents is presented in brackets "
+ "following the leverage. Conviction is "
+ "another measure of departure from independence.";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minMetricTipText() {
return "Minimum metric score. Consider only rules with scores higher than "
+ "this value.";
}
/**
* Get the value of minConfidence.
*
* @return Value of minConfidence.
*/
public double getMinMetric() {
return m_metricThreshold;
}
/**
* Set the value of minConfidence.
*
* @param v Value to assign to minConfidence.
*/
public void setMinMetric(double v) {
m_metricThreshold = v;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String transactionsMustContainTipText() {
return "Limit input to FPGrowth to those transactions (instances)"
+ " that contain these items. Provide a comma separated"
+ " list of attribute names.";
}
/**
* Set the comma separated list of items that transactions must contain in
* order to be considered for large item sets and rules.
*
* @param list a comma separated list of items (empty string indicates no
* restriction on the transactions).
*/
public void setTransactionsMustContain(String list) {
m_transactionsMustContain = list;
}
/**
* Gets the comma separated list of items that transactions must contain in
* order to be considered for large item sets and rules.
*
* @return return the comma separated list of items that transactions must
* contain.
*/
public String getTransactionsMustContain() {
return m_transactionsMustContain;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String rulesMustContainTipText() {
return "Only print rules that contain these items. Provide "
+ "a comma separated list of attribute names.";
}
/**
* Set the comma separated list of items that rules must contain in order to
* be output.
*
* @param list a comma separated list of items (empty string indicates no
* restriction on the rules).
*/
public void setRulesMustContain(String list) {
m_rulesMustContain = list;
}
/**
* Get the comma separated list of items that rules must contain in order to
* be output.
*
* @return the comma separated list of items that rules must contain in order
* to be output.
*/
public String getRulesMustContain() {
return m_rulesMustContain;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String useORForMustContainListTipText() {
return "Use OR instead of AND for transactions/rules must contain lists.";
}
/**
* Set whether to use OR rather than AND when considering must contain lists.
*
* @param b true if OR should be used instead of AND when considering
* transaction and rules must contain lists.
*/
public void setUseORForMustContainList(boolean b) {
m_mustContainOR = b;
}
/**
* Gets whether OR is to be used rather than AND when considering must contain
* lists.
*
* @return true if OR is used instead of AND.
*/
public boolean getUseORForMustContainList() {
return m_mustContainOR;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying, in the
* explorer/experimenter gui
*/
public String deltaTipText() {
return "Iteratively decrease support by this factor. Reduces support "
+ "until min support is reached or required number of rules has been "
+ "generated.";
}
/**
* Get the value of delta.
*
* @return Value of delta.
*/
public double getDelta() {
return m_delta;
}
/**
* Set the value of delta.
*
* @param v Value to assign to delta.
*/
public void setDelta(double v) {
m_delta = v;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String lowerBoundMinSupportTipText() {
return "Lower bound for minimum support as a fraction or number of instances.";
}
/**
* Get the value of lowerBoundMinSupport.
*
* @return Value of lowerBoundMinSupport.
*/
public double getLowerBoundMinSupport() {
return m_lowerBoundMinSupport;
}
/**
* Set the value of lowerBoundMinSupport.
*
* @param v Value to assign to lowerBoundMinSupport.
*/
public void setLowerBoundMinSupport(double v) {
m_lowerBoundMinSupport = v;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String upperBoundMinSupportTipText() {
return "Upper bound for minimum support as a fraction or number of instances. "
+ "Start iteratively decreasing " + "minimum support from this value.";
}
/**
* Get the value of upperBoundMinSupport.
*
* @return Value of upperBoundMinSupport.
*/
public double getUpperBoundMinSupport() {
return m_upperBoundMinSupport;
}
/**
* Set the value of upperBoundMinSupport.
*
* @param v Value to assign to upperBoundMinSupport.
*/
public void setUpperBoundMinSupport(double v) {
m_upperBoundMinSupport = v;
}
/**
* Tip text for this property suitable for displaying in the GUI.
*
* @return the tip text for this property.
*/
public String findAllRulesForSupportLevelTipText() {
return "Find all rules that meet "
+ "the lower bound on minimum support and the minimum metric constraint. "
+ "Turning this mode on will disable the iterative support reduction "
+ "procedure to find the specified number of rules.";
}
/**
* If true then turn off the iterative support reduction method of finding x
* rules that meet the minimum support and metric thresholds and just return
* all the rules that meet the lower bound on minimum support and the minimum
* metric.
*
* @param s true if all rules meeting the lower bound on the support and
* minimum metric thresholds are to be found.
*/
public void setFindAllRulesForSupportLevel(boolean s) {
m_findAllRulesForSupportLevel = s;
}
/**
* Get whether all rules meeting the lower bound on min support and the
* minimum metric threshold are to be found.
*
* @return true if all rules meeting the lower bound on min support and the
* min metric threshold are to be found.
*/
public boolean getFindAllRulesForSupportLevel() {
return m_findAllRulesForSupportLevel;
}
/**
* Set how often to report some progress when the data is being read
* incrementally off of the disk rather than loaded into memory.
*
* @param freq the frequency to print progress.
*/
public void setOffDiskReportingFrequency(int freq) {
m_offDiskReportingFrequency = freq;
}
/*
* public void setMinimumSupport(double minSupp) { m_minSupport = minSupp; }
*
* public double getMinimumSupport() { return m_minSupport; }
*/
/**
* Gets the list of mined association rules.
*
* @return the list of association rules discovered during mining. Returns
* null if mining hasn't been performed yet.
*/
@Override
public AssociationRules getAssociationRules() {
List<AssociationRule> rulesToReturn = new ArrayList<AssociationRule>();
int count = 0;
for (AssociationRule r : m_rules) {
rulesToReturn.add(r);
count++;
if (!m_findAllRulesForSupportLevel && count == m_numRulesToFind) {
break;
}
}
return new AssociationRules(rulesToReturn, this);
}
/**
* Gets a list of the names of the metrics output for each rule. This list
* should be the same (in terms of the names and order thereof) as that
* produced by AssociationRule.getMetricNamesForRule().
*
* @return an array of the names of the metrics available for each rule
* learned by this producer.
*/
@Override
public String[] getRuleMetricNames() {
String[] metricNames = new String[DefaultAssociationRule.TAGS_SELECTION.length];
for (int i = 0; i < DefaultAssociationRule.TAGS_SELECTION.length; i++) {
metricNames[i] = DefaultAssociationRule.TAGS_SELECTION[i].getReadable();
}
return metricNames;
}
/**
* Returns true if this AssociationRulesProducer can actually produce rules.
* Most implementing classes will always return true from this method
* (obviously :-)). However, an implementing class that actually acts as a
* wrapper around things that may or may not implement
* AssociationRulesProducer will want to return false if the thing they wrap
* can't produce rules.
*
* @return true if this producer can produce rules in its current
* configuration
*/
@Override
public boolean canProduceRules() {
return true;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>();
String string00 = "\tSet the index of the attribute value to consider as 'positive'\n\t"
+ "for binary attributes in normal dense instances. Index 2 is always\n\t"
+ "used for sparse instances. (default = 2)";
String string0 = "\tThe maximum number of items to include "
+ "in large items sets (and rules). (default " + "= -1, i.e. no limit.)";
String string1 = "\tThe required number of rules. (default = "
+ m_numRulesToFind + ")";
String string2 = "\tThe minimum metric score of a rule. (default" + " = "
+ m_metricThreshold + ")";
String string3 = "\tThe metric by which to rank rules. (default"
+ " = confidence)";
String string4 = "\tThe lower bound for the minimum support as a fraction"
+ " or number of instances. (default = " + m_lowerBoundMinSupport + ")";
String string5 = "\tUpper bound for minimum support as a fraction or number of instances. "
+ "(default = 1.0)";
String string6 = "\tThe delta by which the minimum support is decreased in\n"
+ "\teach iteration as a fraction or number of instances. (default = "
+ m_delta + ")";
String string7 = "\tFind all rules that meet the lower bound on\n\t"
+ "minimum support and the minimum metric constraint.\n\t"
+ "Turning this mode on will disable the iterative support reduction\n\t"
+ "procedure to find the specified number of rules.";
String string8 = "\tOnly consider transactions that contain these items (default = no restriction)";
String string9 = "\tOnly print rules that contain these items. (default = no restriction)";
String string10 = "\tUse OR instead of AND for must contain list(s). Use in conjunction"
+ "\n\twith -transactions and/or -rules";
newVector.add(new Option(string00, "P", 1,
"-P <attribute index of positive value>"));
newVector.add(new Option(string0, "I", 1, "-I <max items>"));
newVector.add(new Option(string1, "N", 1, "-N <require number of rules>"));
newVector.add(new Option(string3, "T", 1, "-T <0=confidence | 1=lift | "
+ "2=leverage | 3=Conviction>"));
newVector.add(new Option(string2, "C", 1,
"-C <minimum metric score of a rule>"));
newVector.add(new Option(string5, "U", 1,
"-U <upper bound for minimum support>"));
newVector.add(new Option(string4, "M", 1,
"-M <lower bound for minimum support>"));
newVector
.add(new Option(string6, "D", 1, "-D <delta for minimum support>"));
newVector.add(new Option(string7, "S", 0, "-S"));
newVector.add(new Option(string8, "transactions", 1,
"-transactions <comma separated " + "list of attribute names>"));
newVector.add(new Option(string9, "rules", 1,
"-rules <comma separated list " + "of attribute names>"));
newVector.add(new Option(string10, "use-or", 0, "-use-or"));
return newVector.elements();
}
/**
*
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -P <attribute index of positive value>
* Set the index of the attribute value to consider as 'positive'
* for binary attributes in normal dense instances. Index 2 is always
* used for sparse instances. (default = 2)
* </pre>
*
* <pre>
* -I <max items>
* The maximum number of items to include in large items sets (and rules). (default = -1, i.e. no limit.)
* </pre>
*
* <pre>
* -N <require number of rules>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum metric score of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -S
* Find all rules that meet the lower bound on
* minimum support and the minimum metric constraint.
* Turning this mode on will disable the iterative support reduction
* procedure to find the specified number of rules.
* </pre>
*
* <pre>
* -transactions <comma separated list of attribute names>
* Only consider transactions that contain these items (default = no restriction)
* </pre>
*
* <pre>
* -rules <comma separated list of attribute names>
* Only print rules that contain these items. (default = no restriction)
* </pre>
*
* <pre>
* -use-or
* Use OR instead of AND for must contain list(s). Use in conjunction
* with -transactions and/or -rules
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
resetOptions();
String positiveIndexString = Utils.getOption('P', options);
String maxItemsString = Utils.getOption('I', options);
String numRulesString = Utils.getOption('N', options);
String minMetricString = Utils.getOption('C', options);
String metricTypeString = Utils.getOption("T", options);
String lowerBoundSupportString = Utils.getOption("M", options);
String upperBoundSupportString = Utils.getOption("U", options);
String deltaString = Utils.getOption("D", options);
String transactionsString = Utils.getOption("transactions", options);
String rulesString = Utils.getOption("rules", options);
if (positiveIndexString.length() != 0) {
setPositiveIndex(Integer.parseInt(positiveIndexString));
}
if (maxItemsString.length() != 0) {
setMaxNumberOfItems(Integer.parseInt(maxItemsString));
}
if (metricTypeString.length() != 0) {
setMetricType(new SelectedTag(Integer.parseInt(metricTypeString),
DefaultAssociationRule.TAGS_SELECTION));
}
if (numRulesString.length() != 0) {
setNumRulesToFind(Integer.parseInt(numRulesString));
}
if (minMetricString.length() != 0) {
setMinMetric(Double.parseDouble(minMetricString));
}
if (deltaString.length() != 0) {
setDelta(Double.parseDouble(deltaString));
}
if (lowerBoundSupportString.length() != 0) {
setLowerBoundMinSupport(Double.parseDouble(lowerBoundSupportString));
}
if (upperBoundSupportString.length() != 0) {
setUpperBoundMinSupport(Double.parseDouble(upperBoundSupportString));
}
if (transactionsString.length() != 0) {
setTransactionsMustContain(transactionsString);
}
if (rulesString.length() > 0) {
setRulesMustContain(rulesString);
}
setUseORForMustContainList(Utils.getFlag("use-or", options));
setFindAllRulesForSupportLevel(Utils.getFlag('S', options));
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
ArrayList<String> options = new ArrayList<String>();
options.add("-P");
options.add("" + getPositiveIndex());
options.add("-I");
options.add("" + getMaxNumberOfItems());
options.add("-N");
options.add("" + getNumRulesToFind());
options.add("-T");
options.add("" + getMetricType().getSelectedTag().getID());
options.add("-C");
options.add("" + getMinMetric());
options.add("-D");
options.add("" + getDelta());
options.add("-U");
options.add("" + getUpperBoundMinSupport());
options.add("-M");
options.add("" + getLowerBoundMinSupport());
if (getFindAllRulesForSupportLevel()) {
options.add("-S");
}
if (getTransactionsMustContain().length() > 0) {
options.add("-transactions");
options.add(getTransactionsMustContain());
}
if (getRulesMustContain().length() > 0) {
options.add("-rules");
options.add(getRulesMustContain());
}
if (getUseORForMustContainList()) {
options.add("-use-or");
}
return options.toArray(new String[1]);
}
private Instances parseTransactionsMustContain(Instances data) {
String[] split = m_transactionsMustContain.trim().split(",");
boolean[] transactionsMustContainIndexes = new boolean[data.numAttributes()];
int numInTransactionsMustContainList = split.length;
for (String element : split) {
String attName = element.trim();
Attribute att = data.attribute(attName);
if (att == null) {
System.err.println("[FPGrowth] : WARNING - can't find attribute "
+ attName + " in the data.");
numInTransactionsMustContainList--;
} else {
transactionsMustContainIndexes[att.index()] = true;
}
}
if (numInTransactionsMustContainList == 0) {
return data;
} else {
Instances newInsts = new Instances(data, 0);
for (int i = 0; i < data.numInstances(); i++) {
if (passesMustContain(data.instance(i), transactionsMustContainIndexes,
numInTransactionsMustContainList)) {
newInsts.add(data.instance(i));
}
}
newInsts.compactify();
return newInsts;
}
}
private ArrayList<Item> parseRulesMustContain(Instances data) {
ArrayList<Item> result = new ArrayList<Item>();
String[] split = m_rulesMustContain.trim().split(",");
for (String element : split) {
String attName = element.trim();
Attribute att = data.attribute(attName);
if (att == null) {
System.err.println("[FPGrowth] : WARNING - can't find attribute "
+ attName + " in the data.");
} else {
BinaryItem tempI = null;
try {
tempI = new BinaryItem(att, m_positiveIndex - 1);
} catch (Exception e) {
// this should never happen
e.printStackTrace();
}
result.add(tempI);
}
}
return result;
}
/**
* Method that generates all large item sets with a minimum support, and from
* these all association rules with a minimum metric (i.e. confidence, lift
* etc.).
*
* @param source the source of the data. May be an Instances object or an
* ArffLoader. In the case of the latter, the two passes over the
* data that FPGrowth requires will be done off of disk (i.e. only
* one instance will be in memory at any one time).
* @throws Exception if rules can't be built successfully
*/
private void buildAssociations(Object source) throws Exception {
Instances data = null;
Capabilities capabilities = getCapabilities();
boolean arffLoader = false;
boolean breakOnNext = false;
if (source instanceof weka.core.converters.ArffLoader) {
data = ((weka.core.converters.ArffLoader) source).getStructure();
capabilities.setMinimumNumberInstances(0);
arffLoader = true;
} else {
data = (Instances) source;
}
// can we handle the data?
capabilities.testWithFail(data);
// prune any instances that don't contain the requested items (if any)
// can only do this if we are not reading the data incrementally
if (m_transactionsMustContain.length() > 0 && (source instanceof Instances)) {
data = parseTransactionsMustContain(data);
getCapabilities().testWithFail(data);
}
ArrayList<Item> rulesMustContain = null;
if (m_rulesMustContain.length() > 0) {
rulesMustContain = parseRulesMustContain(data);
}
ArrayList<BinaryItem> singletons = getSingletons(source);
int upperBoundMinSuppAsInstances = (m_upperBoundMinSupport > 1) ? (int) m_upperBoundMinSupport
: (int) Math.ceil(m_upperBoundMinSupport * m_numInstances);
int lowerBoundMinSuppAsInstances = (m_lowerBoundMinSupport > 1) ? (int) m_lowerBoundMinSupport
: (int) Math.ceil(m_lowerBoundMinSupport * m_numInstances);
double lowerBoundMinSuppAsFraction = (m_lowerBoundMinSupport > 1) ? m_lowerBoundMinSupport
/ m_numInstances
: m_lowerBoundMinSupport;
double deltaAsFraction = (m_delta > 1) ? m_delta / m_numInstances : m_delta;
// double currentSupport = upperBoundMinSuppAsFraction;
double currentSupport = 1.0;
if (m_findAllRulesForSupportLevel) {
currentSupport = lowerBoundMinSuppAsFraction;
}
do {
if (arffLoader) {
((weka.core.converters.ArffLoader) source).reset();
}
int currentSupportAsInstances = (currentSupport > 1) ? (int) currentSupport
: (int) Math.ceil(currentSupport * m_numInstances);
// build the FPTree
if (arffLoader) {
System.err.println("Building FP-tree...");
}
FPTreeRoot tree = buildFPTree(singletons, source,
currentSupportAsInstances);
FrequentItemSets largeItemSets = new FrequentItemSets(m_numInstances);
if (arffLoader) {
System.err.println("Mining tree for min supp " + currentSupport);
}
// mine the tree
FrequentBinaryItemSet conditionalItems = new FrequentBinaryItemSet(
new ArrayList<BinaryItem>(), 0);
mineTree(tree, largeItemSets, 0, conditionalItems,
currentSupportAsInstances);
m_largeItemSets = largeItemSets;
if (arffLoader) {
System.err.println("Number of large item sets: "
+ m_largeItemSets.size());
}
// save memory
tree = null;
m_rules = generateRulesBruteForce(m_largeItemSets, m_metric,
m_metricThreshold, upperBoundMinSuppAsInstances,
lowerBoundMinSuppAsInstances, m_numInstances);
if (arffLoader) {
System.err.println("Number of rules found " + m_rules.size());
}
if (rulesMustContain != null && rulesMustContain.size() > 0) {
m_rules = pruneRules(m_rules, rulesMustContain, m_mustContainOR);
}
if (!m_findAllRulesForSupportLevel) {
if (breakOnNext) {
break;
}
currentSupport -= deltaAsFraction;
// System.err.println("currentSupport " + currentSupport +
// " lowBoundAsFrac " + lowerBoundMinSuppAsFraction);
if (currentSupport < lowerBoundMinSuppAsFraction) {
if (currentSupport + deltaAsFraction > lowerBoundMinSuppAsFraction) {
// ensure that the lower bound does get evaluated
currentSupport = lowerBoundMinSuppAsFraction;
breakOnNext = true;
} else {
break;
}
}
} else {
// just break out of the loop as we are just finding all rules
// with a minimum support + metric
break;
}
} while (m_rules.size() < m_numRulesToFind);
Collections.sort(m_rules);
}
/**
* Method that generates all large item sets with a minimum support, and from
* these all association rules with a minimum metric (i.e. confidence, lift
* etc.).
*
* @param data the instances to be used for generating the associations
* @throws Exception if rules can't be built successfully
*/
@Override
public void buildAssociations(Instances data) throws Exception {
buildAssociations((Object) data);
return;
}
/**
* Output the association rules.
*
* @return a string representation of the model.
*/
@Override
public String toString() {
// return m_largeItemSets.toString(m_numItemSetsToFind);
if (m_rules == null) {
return "FPGrowth hasn't been trained yet!";
}
StringBuffer result = new StringBuffer();
int numRules = (m_rules.size() < m_numRulesToFind) ? m_rules.size()
: m_numRulesToFind;
if (m_rules.size() == 0) {
return "No rules found!";
} else {
result.append("FPGrowth found " + m_rules.size() + " rules");
if (!m_findAllRulesForSupportLevel) {
result.append(" (displaying top " + numRules + ")");
}
if (m_transactionsMustContain.length() > 0
|| m_rulesMustContain.length() > 0) {
result.append("\n");
if (m_transactionsMustContain.length() > 0) {
result.append("\nUsing only transactions that contain: "
+ m_transactionsMustContain);
}
if (m_rulesMustContain.length() > 0) {
result.append("\nShowing only rules that contain: "
+ m_rulesMustContain);
}
}
result.append("\n\n");
}
int count = 0;
for (AssociationRule r : m_rules) {
result.append(Utils.doubleToString((double) count + 1,
(int) (Math.log(numRules) / Math.log(10) + 1), 0)
+ ". ");
result.append(r + "\n");
count++;
if (!m_findAllRulesForSupportLevel && count == m_numRulesToFind) {
break;
}
}
return result.toString();
}
/**
* Assemble a dot graph representation of the FP-tree.
*
* @param tree the root of the FP-tree
* @return a graph representation as a String in dot format.
*/
public String graph(FPTreeRoot tree) {
// int maxID = tree.assignIDs(-1);
StringBuffer text = new StringBuffer();
text.append("digraph FPTree {\n");
text.append("N0 [label=\"ROOT\"]\n");
tree.graphFPTree(text);
// tree.graphHeaderTable(text, maxID+1);
text.append("}\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method.
*
* @param args the commandline options
*/
public static void main(String[] args) {
try {
String[] argsCopy = args.clone();
if (Utils.getFlag('h', argsCopy) || Utils.getFlag("help", argsCopy)) {
runAssociator(new FPGrowth(), args);
System.out
.println("-disk\n\tProcess data off of disk instead of loading\n\t"
+ "into main memory. This is a command line only option.");
return;
}
if (!Utils.getFlag("disk", args)) {
runAssociator(new FPGrowth(), args);
} else {
String filename;
filename = Utils.getOption('t', args);
weka.core.converters.ArffLoader loader = null;
if (filename.length() != 0) {
loader = new weka.core.converters.ArffLoader();
loader.setFile(new java.io.File(filename));
} else {
throw new Exception("No training file specified!");
}
FPGrowth fpGrowth = new FPGrowth();
fpGrowth.setOptions(args);
Utils.checkForRemainingOptions(args);
fpGrowth.buildAssociations(loader);
System.out.print(fpGrowth.toString());
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/FilteredAssociationRules.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* FilteredAssociationRules.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.util.List;
import weka.filters.Filter;
/**
* Class encapsulating a list of association rules and the preprocessing filter
* that was applied before they were generated.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*
*/
public class FilteredAssociationRules extends AssociationRules {
/** For serialization */
private static final long serialVersionUID = -4223408305476916955L;
protected Filter m_filter;
protected AssociationRules m_wrappedRules;
/**
* Constructs a new FilteredAssociationRules.
*
* @param producer a string describing the scheme that produced these rules.
* @param filter the filter applied to preprocess the data used to learn the rules.
* @param rules the wrapped AssociationRules object.
*/
public FilteredAssociationRules(String producer, Filter filter, AssociationRules rules) {
super(null, producer);
m_filter = filter;
m_wrappedRules = rules;
}
/**
* Constructs a new FilteredAssociationRules.
*
* @param producer the scheme that produced the rules
* @param filter the filter applied to preprocess the data used to learn the rules.
* @param rules the wrapped AssociationRules object.
*/
public FilteredAssociationRules(Object producer, Filter filter, AssociationRules rules) {
super(null, producer);
m_filter = filter;
m_wrappedRules = rules;
}
/**
* Constructs a new FilteredAssociationRules.
*
* @param filter the filter applied to preprocess the data used to learn the rules.
* @param rules the wrapped AssociationRules object.
*/
public FilteredAssociationRules(Filter filter, AssociationRules rules) {
super(null);
m_filter = filter;
m_wrappedRules = rules;
}
/**
* Set the rules to use. Passes them to the wrapped AssociationRules object.
*
* @param rules the rules to use.
*/
public void setRules(List<AssociationRule> rules) {
// delegate to our wrapped association rules
m_wrappedRules.setRules(rules);
}
/**
* Get the rules.
*
* @return the rules.
*/
public List<AssociationRule> getRules() {
// delegate to our wrapped association rules
return m_wrappedRules.getRules();
}
/**
* Get the number of rules.
*
* @return the number of rules.
*/
public int getNumRules() {
// delegate to our wrapped association rules
return m_wrappedRules.getNumRules();
}
/**
* Set the wrapped <code>AssociationRules</code> object to use.
*
* @param rules the <code>AssociationRules</code> object to wrap.
*/
public void setWrappedRules(AssociationRules rules) {
m_wrappedRules = rules;
}
/**
* Get the wrapped <code>AssociationRules</code> object.
*
* @return the wrapped <code>AssociationRules</code> object.
*/
public AssociationRules getWrappedRules() {
return m_wrappedRules;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/FilteredAssociator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* FilteredAssociator.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.MultiFilter;
/**
* <!-- globalinfo-start --> Class for running an arbitrary associator on data
* that has been passed through an arbitrary filter. Like the associator, the
* structure of the filter is based exclusively on the training data and test
* instances will be processed by the filter without changing their structure.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <filter specification>
* Full class name of filter to use, followed
* by filter options.
* eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2"
* (default: weka.filters.MultiFilter with
* weka.filters.unsupervised.attribute.ReplaceMissingValues)
* </pre>
*
* <pre>
* -c <the class index>
* The class index.
* (default: -1, i.e. unset)
* </pre>
*
* <pre>
* -W
* Full name of base associator.
* (default: weka.associations.Apriori)
* </pre>
*
* <pre>
* Options specific to associator weka.associations.Apriori:
* </pre>
*
* <pre>
* -N <required number of rules output>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric type by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum confidence of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -S <significance level>
* If used, rules are tested for significance at
* the given level. Slower. (default = no significance testing)
* </pre>
*
* <pre>
* -I
* If set the itemsets found are also output. (default = no)
* </pre>
*
* <pre>
* -R
* Remove columns that contain all missing values (default = no)
* </pre>
*
* <pre>
* -V
* Report progress iteratively. (default = no)
* </pre>
*
* <pre>
* -A
* If set class association rules are mined. (default = no)
* </pre>
*
* <pre>
* -c <the class index>
* The class index. (default = last)
* </pre>
*
* <!-- options-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class FilteredAssociator extends SingleAssociatorEnhancer implements
AssociationRulesProducer {
/** for serialization */
static final long serialVersionUID = -4523450618538717400L;
/** The filter */
protected Filter m_Filter;
/** The instance structure of the filtered instances */
protected Instances m_FilteredInstances;
/** The class index. */
protected int m_ClassIndex;
/**
* Default constructor.
*/
public FilteredAssociator() {
m_Associator = new Apriori();
m_Filter = new MultiFilter();
((MultiFilter) m_Filter)
.setFilters(new Filter[] { new weka.filters.unsupervised.attribute.ReplaceMissingValues() });
m_ClassIndex = -1;
}
/**
* Returns a string describing this Associator
*
* @return a description of the Associator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Class for running an arbitrary associator on data that has been passed "
+ "through an arbitrary filter. Like the associator, the structure of the filter "
+ "is based exclusively on the training data and test instances will be processed "
+ "by the filter without changing their structure.";
}
/**
* String describing default associator.
*
* @return the default associator classname
*/
@Override
protected String defaultAssociatorString() {
return Apriori.class.getName();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option(
"\tFull class name of filter to use, followed\n"
+ "\tby filter options.\n"
+ "\teg: \"weka.filters.unsupervised.attribute.Remove -V -R 1,2\"\n"
+ "\t(default: weka.filters.MultiFilter with\n"
+ "\tweka.filters.unsupervised.attribute.ReplaceMissingValues)", "F",
1, "-F <filter specification>"));
result.addElement(new Option("\tThe class index.\n"
+ "\t(default: -1, i.e. unset)", "c", 1, "-c <the class index>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <filter specification>
* Full class name of filter to use, followed
* by filter options.
* eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2"
* (default: weka.filters.MultiFilter with
* weka.filters.unsupervised.attribute.ReplaceMissingValues)
* </pre>
*
* <pre>
* -c <the class index>
* The class index.
* (default: -1, i.e. unset)
* </pre>
*
* <pre>
* -W
* Full name of base associator.
* (default: weka.associations.Apriori)
* </pre>
*
* <pre>
* Options specific to associator weka.associations.Apriori:
* </pre>
*
* <pre>
* -N <required number of rules output>
* The required number of rules. (default = 10)
* </pre>
*
* <pre>
* -T <0=confidence | 1=lift | 2=leverage | 3=Conviction>
* The metric type by which to rank rules. (default = confidence)
* </pre>
*
* <pre>
* -C <minimum metric score of a rule>
* The minimum confidence of a rule. (default = 0.9)
* </pre>
*
* <pre>
* -D <delta for minimum support>
* The delta by which the minimum support is decreased in
* each iteration. (default = 0.05)
* </pre>
*
* <pre>
* -U <upper bound for minimum support>
* Upper bound for minimum support. (default = 1.0)
* </pre>
*
* <pre>
* -M <lower bound for minimum support>
* The lower bound for the minimum support. (default = 0.1)
* </pre>
*
* <pre>
* -S <significance level>
* If used, rules are tested for significance at
* the given level. Slower. (default = no significance testing)
* </pre>
*
* <pre>
* -I
* If set the itemsets found are also output. (default = no)
* </pre>
*
* <pre>
* -R
* Remove columns that contain all missing values (default = no)
* </pre>
*
* <pre>
* -V
* Report progress iteratively. (default = no)
* </pre>
*
* <pre>
* -A
* If set class association rules are mined. (default = no)
* </pre>
*
* <pre>
* -c <the class index>
* The class index. (default = last)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption('F', options);
if (tmpStr.length() > 0) {
String[] filterSpec = Utils.splitOptions(tmpStr);
if (filterSpec.length == 0) {
throw new IllegalArgumentException(
"Invalid filter specification string");
}
String filterName = filterSpec[0];
filterSpec[0] = "";
setFilter((Filter) Utils.forName(Filter.class, filterName, filterSpec));
} else {
setFilter(new weka.filters.supervised.attribute.Discretize());
}
tmpStr = Utils.getOption('c', options);
if (tmpStr.length() > 0) {
if (tmpStr.equalsIgnoreCase("last")) {
setClassIndex(0);
} else if (tmpStr.equalsIgnoreCase("first")) {
setClassIndex(1);
} else {
setClassIndex(Integer.parseInt(tmpStr));
}
} else {
setClassIndex(-1);
}
super.setOptions(options);
}
/**
* Gets the current settings of the Associator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-F");
result.add("" + getFilterSpec());
result.add("-c");
result.add("" + getClassIndex());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String filterTipText() {
return "The filter to be used.";
}
/**
* Sets the filter
*
* @param value the filter with all options set.
*/
public void setFilter(Filter value) {
m_Filter = value;
}
/**
* Gets the filter used.
*
* @return the current filter
*/
public Filter getFilter() {
return m_Filter;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String classIndexTipText() {
return "Index of the class attribute. If set to -1, the last attribute is taken as class attribute.";
}
/**
* Sets the class index
*
* @param value the class index
*/
public void setClassIndex(int value) {
m_ClassIndex = value;
}
/**
* Gets the class index
*
* @return the index of the class attribute
*/
public int getClassIndex() {
return m_ClassIndex;
}
/**
* Gets the filter specification string, which contains the class name of the
* filter and any options to the filter
*
* @return the filter string.
*/
protected String getFilterSpec() {
Filter c = getFilter();
if (c instanceof OptionHandler) {
return c.getClass().getName() + " "
+ Utils.joinOptions(((OptionHandler) c).getOptions());
} else {
return c.getClass().getName();
}
}
/**
* Returns default capabilities of the associator.
*
* @return the capabilities of this associator
*/
@Override
public Capabilities getCapabilities() {
Capabilities result;
if (getFilter() == null) {
result = super.getCapabilities();
result.disableAll();
} else {
result = getFilter().getCapabilities();
}
result.enable(Capability.NO_CLASS);
// set dependencies
for (Capability cap : Capability.values()) {
result.enableDependency(cap);
}
return result;
}
/**
* Build the associator on the filtered data.
*
* @param data the training data
* @throws Exception if the Associator could not be built successfully
*/
@Override
public void buildAssociations(Instances data) throws Exception {
if (m_Associator == null) {
throw new Exception("No base associator has been set!");
}
// create copy and set class-index
data = new Instances(data);
if (getClassIndex() == 0) {
data.setClassIndex(data.numAttributes() - 1);
} else {
data.setClassIndex(getClassIndex() - 1);
}
if (getClassIndex() != -1) {
// remove instances with missing class
data.deleteWithMissingClass();
}
m_Filter.setInputFormat(data); // filter capabilities are checked here
data = Filter.useFilter(data, m_Filter);
// can associator handle the data?
getAssociator().getCapabilities().testWithFail(data);
m_FilteredInstances = data.stringFreeStructure();
m_Associator.buildAssociations(data);
}
/**
* Gets the list of mined association rules.
*
* @return the list of association rules discovered during mining. Returns
* null if mining hasn't been performed yet.
*/
@Override
public AssociationRules getAssociationRules() {
if (m_Associator instanceof AssociationRulesProducer) {
AssociationRules rules = ((AssociationRulesProducer) m_Associator)
.getAssociationRules();
// construct a new FilteredAssociationRules
FilteredAssociationRules fRules = new FilteredAssociationRules(
FilteredAssociator.this, m_Filter, rules);
return fRules;
}
// return null if we don't wrap an association rules producer
return null;
}
/**
* Gets a list of the names of the metrics output for each rule. This list
* should be the same (in terms of the names and order thereof) as that
* produced by AssociationRule.getMetricNamesForRule().
*
* @return an array of the names of the metrics available for each rule
* learned by this producer.
*/
@Override
public String[] getRuleMetricNames() {
if (m_Associator instanceof AssociationRulesProducer) {
return ((AssociationRulesProducer) m_Associator).getRuleMetricNames();
}
return new String[0];
}
/**
* Returns true if this AssociationRulesProducer can actually produce rules.
* Most implementing classes will always return true from this method
* (obviously :-)). However, an implementing class that actually acts as a
* wrapper around things that may or may not implement
* AssociationRulesProducer will want to return false if the thing they wrap
* can't produce rules.
*
* @return true if this producer can produce rules in its current
* configuration
*/
@Override
public boolean canProduceRules() {
if (m_Associator instanceof AssociationRulesProducer) {
return ((AssociationRulesProducer) m_Associator).canProduceRules();
}
return false;
}
/**
* Output a representation of this associator
*
* @return a representation of this associator
*/
@Override
public String toString() {
String result;
if (m_FilteredInstances == null) {
result = "FilteredAssociator: No model built yet.";
} else {
result = "FilteredAssociator using " + getAssociatorSpec()
+ " on data filtered through " + getFilterSpec()
+ "\n\nFiltered Header\n" + m_FilteredInstances.toString()
+ "\n\nAssociator Model\n" + m_Associator.toString();
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for running this class.
*
* @param args commandline arguments, use "-h" for full list
*/
public static void main(String[] args) {
runAssociator(new FilteredAssociator(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/Item.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Item.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import weka.core.Attribute;
/**
* Class that encapsulates information about an individual item. An item
* is a value of a nominal attribute, so this class has a backing Attribute.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com).
* @version $Revision$
*/
public abstract class Item implements Serializable, Comparable<Item> {
/** For serialization */
private static final long serialVersionUID = -430198211081183575L;
/** The frequency of this item */
protected int m_frequency;
/** The attribute that backs this item */
protected Attribute m_attribute;
public Item(Attribute att) {
m_attribute = att;
}
/**
* Increase the frequency of this item.
*
* @param f the amount to increase the frequency by.
*/
public void increaseFrequency(int f) {
m_frequency += f;
}
/**
* Decrease the frequency of this item.
*
* @param f the amount by which to decrease the frequency.
*/
public void decreaseFrequency(int f) {
m_frequency -= f;
}
/**
* Increment the frequency of this item.
*/
public void increaseFrequency() {
m_frequency++;
}
/**
* Decrement the frequency of this item.
*/
public void decreaseFrequency() {
m_frequency--;
}
/**
* Get the frequency of this item.
*
* @return the frequency.
*/
public int getFrequency() {
return m_frequency;
}
/**
* Get the attribute that this item originates from.
*
* @return the corresponding attribute.
*/
public Attribute getAttribute() {
return m_attribute;
}
/**
* Get this item's value as a String.
*
* @return this item's value as a String.
*/
public abstract String getItemValueAsString();
/**
* Get this item's comparison operator as a String.
*
* @return this item's comparison operator as a String.
*/
public abstract String getComparisonAsString();
/**
* A string representation of this item. (i.e.
* <attribute name> <comparison operator> <item value>).
*
* @return a string representation of this item.
*/
public String toString() {
return toString(false);
}
/**
* A string representation of this item, (i.e.
* <attribute name> <comparison operator> <item value>).
* This default implementation just prints the attribute
* name and (optionally) frequency information.
*
* @param freq true if the frequency should be included.
* @return a string representation of this item.
*/
public String toString(boolean freq) {
String result = m_attribute.name();
if (freq) {
result += ":" + m_frequency;
}
return result;
}
/**
* Ensures that items will be sorted in descending order of frequency.
* Ties are ordered by attribute name.
*
* @param comp the Item to compare against.
*/
public int compareTo(Item comp) {
if (m_frequency == comp.getFrequency()) {
// sort by name
return -1 * m_attribute.name().compareTo(comp.getAttribute().name());
}
if (comp.getFrequency() < m_frequency) {
return -1;
}
return 1;
}
/**
* Equals. Just compares attribute.
* @return true if this Item is equal to the argument.
*/
public boolean equals(Object compareTo) {
if (!(compareTo instanceof Item)) {
return false;
}
Item b = (Item)compareTo;
if (m_attribute.equals(b.getAttribute())) {
return true;
}
return false;
}
public int hashCode() {
return (m_attribute.name().hashCode() ^
m_attribute.numValues()) * m_frequency;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/ItemSet.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ItemSet.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.WekaEnumeration;
/**
* Class for storing a set of items. Item sets are stored in a lexicographic
* order, which is determined by the header information of the set of instances
* used for generating the set of items. All methods in this class assume that
* item sets are stored in lexicographic order. The class provides the general
* methods used for item sets in class - and standard association rule mining.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class ItemSet implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 2724000045282835791L;
/** The items stored as an array of of ints. */
protected int[] m_items;
/** Counter for how many transactions contain this item set. */
protected int m_counter;
/**
* Holds support of consequence only in the case where this ItemSet is
* a consequence of a rule (as m_counter in this case actually holds the
* support of the rule as a whole, i.e. premise and consequence)
*/
protected int m_secondaryCounter;
/** The total number of transactions */
protected int m_totalTransactions;
/**
* Constructor
*
* @param totalTrans the total number of transactions in the data
*/
public ItemSet(int totalTrans) {
m_totalTransactions = totalTrans;
}
/**
* Constructor
*
* @param totalTrans the total number of transactions in the data
* @param array the attribute values encoded in an int array
*/
public ItemSet(int totalTrans, int[] array) {
m_totalTransactions = totalTrans;
m_items = array;
m_counter = 1;
}
/**
* Contsructor
*
* @param array the item set represented as an int array
*/
public ItemSet(int[] array) {
m_items = array;
m_counter = 0;
}
/**
* Checks if an instance contains an item set.
*
* @param instance the instance to be tested
* @return true if the given instance contains this item set
*/
public boolean containedByTreatZeroAsMissing(Instance instance) {
if (instance instanceof weka.core.SparseInstance) {
int numInstVals = instance.numValues();
int numItemSetVals = m_items.length;
for (int p1 = 0, p2 = 0; p1 < numInstVals || p2 < numItemSetVals;) {
int instIndex = Integer.MAX_VALUE;
if (p1 < numInstVals) {
instIndex = instance.index(p1);
}
int itemIndex = p2;
if (m_items[itemIndex] > -1) {
if (itemIndex != instIndex) {
return false;
} else {
if (instance.isMissingSparse(p1)) {
return false;
}
if (m_items[itemIndex] != (int) instance.valueSparse(p1)) {
return false;
}
}
p1++;
p2++;
} else {
if (itemIndex < instIndex) {
p2++;
} else if (itemIndex == instIndex) {
p2++;
p1++;
}
}
}
} else {
for (int i = 0; i < instance.numAttributes(); i++) {
if (m_items[i] > -1) {
if (instance.isMissing(i) || (int) instance.value(i) == 0) {
return false;
}
if (m_items[i] != (int) instance.value(i)) {
return false;
}
}
}
}
return true;
}
/**
* Checks if an instance contains an item set.
*
* @param instance the instance to be tested
* @return true if the given instance contains this item set
*/
public boolean containedBy(Instance instance) {
for (int i = 0; i < instance.numAttributes(); i++) {
if (m_items[i] > -1) {
if (instance.isMissing(i)) {
return false;
}
if (m_items[i] != (int) instance.value(i)) {
return false;
}
}
}
return true;
}
/**
* Deletes all item sets that don't have minimum support.
*
* @return the reduced set of item sets
* @param maxSupport the maximum support
* @param itemSets the set of item sets to be pruned
* @param minSupport the minimum number of transactions to be covered
*/
public static ArrayList<Object> deleteItemSets(ArrayList<Object> itemSets,
int minSupport, int maxSupport) {
ArrayList<Object> newVector = new ArrayList<Object>(itemSets.size());
for (int i = 0; i < itemSets.size(); i++) {
ItemSet current = (ItemSet) itemSets.get(i);
if ((current.m_counter >= minSupport)
&& (current.m_counter <= maxSupport)) {
newVector.add(current);
}
}
return newVector;
}
/**
* Tests if two item sets are equal.
*
* @param itemSet another item set
* @return true if this item set contains the same items as the given one
*/
@Override
public boolean equals(Object itemSet) {
if ((itemSet == null) || !(itemSet.getClass().equals(this.getClass()))) {
return false;
}
if (m_items.length != ((ItemSet) itemSet).m_items.length) {
return false;
}
for (int i = 0; i < m_items.length; i++) {
if (m_items[i] != ((ItemSet) itemSet).m_items[i]) {
return false;
}
}
return true;
}
/**
* Return a hashtable filled with the given item sets.
*
* @param itemSets the set of item sets to be used for filling the hash table
* @param initialSize the initial size of the hashtable
* @return the generated hashtable
*/
public static Hashtable<ItemSet, Integer> getHashtable(
ArrayList<Object> itemSets, int initialSize) {
Hashtable<ItemSet, Integer> hashtable = new Hashtable<ItemSet, Integer>(
initialSize);
for (int i = 0; i < itemSets.size(); i++) {
ItemSet current = (ItemSet) itemSets.get(i);
hashtable.put(current, new Integer(current.m_counter));
}
return hashtable;
}
/**
* Produces a hash code for a item set.
*
* @return a hash code for a set of items
*/
@Override
public int hashCode() {
long result = 0;
for (int i = m_items.length - 1; i >= 0; i--) {
result += (i * m_items[i]);
}
return (int) result;
}
/**
* Merges all item sets in the set of (k-1)-item sets to create the (k)-item
* sets and updates the counters.
*
* @return the generated (k)-item sets
* @param totalTrans thetotal number of transactions
* @param itemSets the set of (k-1)-item sets
* @param size the value of (k-1)
*/
public static ArrayList<Object> mergeAllItemSets(ArrayList<Object> itemSets,
int size, int totalTrans) {
ArrayList<Object> newVector = new ArrayList<Object>();
ItemSet result;
int numFound, k;
for (int i = 0; i < itemSets.size(); i++) {
ItemSet first = (ItemSet) itemSets.get(i);
out: for (int j = i + 1; j < itemSets.size(); j++) {
ItemSet second = (ItemSet) itemSets.get(j);
result = new ItemSet(totalTrans);
result.m_items = new int[first.m_items.length];
// Find and copy common prefix of size 'size'
numFound = 0;
k = 0;
while (numFound < size) {
if (first.m_items[k] == second.m_items[k]) {
if (first.m_items[k] != -1) {
numFound++;
}
result.m_items[k] = first.m_items[k];
} else {
break out;
}
k++;
}
// Check difference
while (k < first.m_items.length) {
if ((first.m_items[k] != -1) && (second.m_items[k] != -1)) {
break;
} else {
if (first.m_items[k] != -1) {
result.m_items[k] = first.m_items[k];
} else {
result.m_items[k] = second.m_items[k];
}
}
k++;
}
if (k == first.m_items.length) {
result.m_counter = 0;
newVector.add(result);
}
}
}
return newVector;
}
/**
* Prunes a set of (k)-item sets using the given (k-1)-item sets.
*
* @param toPrune the set of (k)-item sets to be pruned
* @param kMinusOne the (k-1)-item sets to be used for pruning
* @return the pruned set of item sets
*/
public static ArrayList<Object> pruneItemSets(ArrayList<Object> toPrune,
Hashtable<ItemSet, Integer> kMinusOne) {
ArrayList<Object> newVector = new ArrayList<Object>(toPrune.size());
int help, j;
for (int i = 0; i < toPrune.size(); i++) {
ItemSet current = (ItemSet) toPrune.get(i);
for (j = 0; j < current.m_items.length; j++) {
if (current.m_items[j] != -1) {
help = current.m_items[j];
current.m_items[j] = -1;
if (kMinusOne.get(current) == null) {
current.m_items[j] = help;
break;
} else {
current.m_items[j] = help;
}
}
}
if (j == current.m_items.length) {
newVector.add(current);
}
}
return newVector;
}
/**
* Prunes a set of rules.
*
* @param rules a two-dimensional array of lists of item sets. The first list
* of item sets contains the premises, the second one the
* consequences.
* @param minConfidence the minimum confidence the rules have to have
*/
public static void pruneRules(ArrayList<Object>[] rules, double minConfidence) {
ArrayList<Object> newPremises = new ArrayList<Object>(rules[0].size()), newConsequences = new ArrayList<Object>(
rules[1].size()), newConf = new ArrayList<Object>(rules[2].size());
ArrayList<Object> newLift = null, newLev = null, newConv = null;
if (rules.length > 3) {
newLift = new ArrayList<Object>(rules[3].size());
newLev = new ArrayList<Object>(rules[4].size());
newConv = new ArrayList<Object>(rules[5].size());
}
for (int i = 0; i < rules[0].size(); i++) {
if (!(((Double) rules[2].get(i)).doubleValue() < minConfidence)) {
newPremises.add(rules[0].get(i));
newConsequences.add(rules[1].get(i));
newConf.add(rules[2].get(i));
if (rules.length > 3) {
newLift.add(rules[3].get(i));
newLev.add(rules[4].get(i));
newConv.add(rules[5].get(i));
}
}
}
rules[0] = newPremises;
rules[1] = newConsequences;
rules[2] = newConf;
if (rules.length > 3) {
rules[3] = newLift;
rules[4] = newLev;
rules[5] = newConv;
}
}
/**
* Converts the header info of the given set of instances into a set of item
* sets (singletons). The ordering of values in the header file determines the
* lexicographic order.
*
* @param instances the set of instances whose header info is to be used
* @return a set of item sets, each containing a single item
* @exception Exception if singletons can't be generated successfully
*/
public static ArrayList<Object> singletons(Instances instances)
throws Exception {
ArrayList<Object> setOfItemSets = new ArrayList<Object>();
ItemSet current;
for (int i = 0; i < instances.numAttributes(); i++) {
if (instances.attribute(i).isNumeric()) {
throw new Exception("Can't handle numeric attributes!");
}
for (int j = 0; j < instances.attribute(i).numValues(); j++) {
current = new ItemSet(instances.numInstances());
current.m_items = new int[instances.numAttributes()];
for (int k = 0; k < instances.numAttributes(); k++) {
current.m_items[k] = -1;
}
current.m_items[i] = j;
setOfItemSets.add(current);
}
}
return setOfItemSets;
}
/**
* Outputs the support for an item set.
*
* @return the support
*/
public int support() {
return m_counter;
}
/**
* Returns the contents of an item set as a string.
*
* @param instances contains the relevant header information
* @return string describing the item set
*/
public String toString(Instances instances) {
StringBuffer text = new StringBuffer();
for (int i = 0; i < instances.numAttributes(); i++) {
if (m_items[i] != -1) {
text.append(instances.attribute(i).name() + '=');
text.append(instances.attribute(i).value(m_items[i]) + ' ');
}
}
text.append(m_counter);
return text.toString();
}
/**
* Returns the contents of an item set as a delimited string.
*
* @param instances contains the relevant header information
* @param outerDelim the outer delimiter
* @param innerDelim the inner delimiter
* @return string describing the item set
*/
public String toString(Instances instances, char outerDelim, char innerDelim) {
StringBuffer text = new StringBuffer();
for (int i = 0; i < instances.numAttributes(); i++) {
if (m_items[i] != -1) {
text.append(instances.attribute(i).name()).append('=')
.append(instances.attribute(i).value(m_items[i])).append(innerDelim);
}
}
int n = text.length();
if (n > 0) {
text.setCharAt(n - 1, outerDelim);
} else {
if (outerDelim != ' ' || innerDelim != ' ') {
text.append(outerDelim);
}
}
text.append(m_counter);
return text.toString();
}
/**
* Updates counter of item set with respect to given transaction.
*
* @param instance the instance to be used for ubdating the counter
*/
public void upDateCounter(Instance instance) {
if (containedBy(instance)) {
m_counter++;
}
}
/**
* Updates counter of item set with respect to given transaction.
*
* @param instance the instance to be used for ubdating the counter
*/
public void updateCounterTreatZeroAsMissing(Instance instance) {
if (containedByTreatZeroAsMissing(instance)) {
m_counter++;
}
}
/**
* Updates counters for a set of item sets and a set of instances.
*
* @param itemSets the set of item sets which are to be updated
* @param instances the instances to be used for updating the counters
*/
public static void upDateCounters(ArrayList<Object> itemSets,
Instances instances) {
for (int i = 0; i < instances.numInstances(); i++) {
Enumeration<Object> enu = new WekaEnumeration<Object>(itemSets);
while (enu.hasMoreElements()) {
((ItemSet) enu.nextElement()).upDateCounter(instances.instance(i));
}
}
}
/**
* Updates counters for a set of item sets and a set of instances.
*
* @param itemSets the set of item sets which are to be updated
* @param instances the instances to be used for updating the counters
*/
public static void upDateCountersTreatZeroAsMissing(
ArrayList<Object> itemSets, Instances instances) {
for (int i = 0; i < instances.numInstances(); i++) {
Enumeration<Object> enu = new WekaEnumeration<Object>(itemSets);
while (enu.hasMoreElements()) {
((ItemSet) enu.nextElement()).updateCounterTreatZeroAsMissing(instances
.instance(i));
}
}
}
/**
* Gets the counter
*
* @return the counter
*/
public int counter() {
return m_counter;
}
/**
* Gest the item set as an int array
*
* @return int array represneting an item set
*/
public int[] items() {
return m_items;
}
/**
* Gest the index of the value of the specified attribute
*
* @param k the attribute index
* @return the index of the attribute value
*/
public int itemAt(int k) {
return m_items[k];
}
/**
* Sets the counter
*
* @param count the counter
*/
public void setCounter(int count) {
m_counter = count;
}
/**
* Sets an item sets
*
* @param items an int array representing an item set
*/
public void setItem(int[] items) {
m_items = items;
}
/**
* Sets the index of an attribute value
*
* @param value the inex of the attribute value
* @param k the index of the attribute
*/
public void setItemAt(int value, int k) {
m_items[k] = value;
}
/**
* Get the item array
*
* @return the item array
*/
public int[] getItems() {
return m_items;
}
/**
* Get the total number of transactions
*
* @return the total number of transactions
*/
public int getTotalTransactions() {
return m_totalTransactions;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/LabeledItemSet.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* LabeledItemSet.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.WekaEnumeration;
/**
* Class for storing a set of items together with a class label. Item sets are
* stored in a lexicographic order, which is determined by the header
* information of the set of instances used for generating the set of items. All
* methods in this class assume that item sets are stored in lexicographic
* order. The class provides the methods used for item sets in class association
* rule mining. Because every item set knows its class label the training set
* can be splitted up virtually.
*
* @author Stefan Mutter (mutter@cs.waikato.ac.nz)
* @version $Revision$
*/
public class LabeledItemSet extends ItemSet implements Serializable,
RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 4158771925518299903L;
/** The class label. */
protected int m_classLabel;
/** The support of the rule. */
protected int m_ruleSupCounter;
/**
* Constructor
*
* @param totalTrans the total number of transactions
* @param classLabel the class lebel
*/
public LabeledItemSet(int totalTrans, int classLabel) {
super(totalTrans);
m_classLabel = classLabel;
}
/**
* Deletes all item sets that don't have minimum support and have more than
* maximum support
*
* @return the reduced set of item sets
* @param maxSupport the maximum support
* @param itemSets the set of item sets to be pruned
* @param minSupport the minimum number of transactions to be covered
*/
public static ArrayList<Object> deleteItemSets(ArrayList<Object> itemSets,
int minSupport, int maxSupport) {
ArrayList<Object> newVector = new ArrayList<Object>(itemSets.size());
for (int i = 0; i < itemSets.size(); i++) {
LabeledItemSet current = (LabeledItemSet) itemSets.get(i);
if ((current.m_ruleSupCounter >= minSupport)
&& (current.m_ruleSupCounter <= maxSupport)) {
newVector.add(current);
}
}
return newVector;
}
/**
* Tests if two item sets are equal.
*
* @param itemSet another item set
* @return true if this item set contains the same items as the given one
*/
@Override
public final boolean equals(Object itemSet) {
if (!(this.equalCondset(itemSet))) {
return false;
}
if (m_classLabel != ((LabeledItemSet) itemSet).m_classLabel) {
return false;
}
return true;
}
/**
* Compares two item sets
*
* @param itemSet an item set
* @return true if the the item sets are equal, false otherwise
*/
public final boolean equalCondset(Object itemSet) {
if ((itemSet == null) || !(itemSet.getClass().equals(this.getClass()))) {
return false;
}
if (m_items.length != ((ItemSet) itemSet).items().length) {
return false;
}
for (int i = 0; i < m_items.length; i++) {
if (m_items[i] != ((ItemSet) itemSet).itemAt(i)) {
return false;
}
}
return true;
}
/**
* Return a hashtable filled with the given item sets.
*
* @param itemSets the set of item sets to be used for filling the hash table
* @param initialSize the initial size of the hashtable
* @return the generated hashtable
*/
public static Hashtable<ItemSet, Integer> getHashtable(
ArrayList<Object> itemSets, int initialSize) {
Hashtable<ItemSet, Integer> hashtable = new Hashtable<ItemSet, Integer>(
initialSize);
for (int i = 0; i < itemSets.size(); i++) {
LabeledItemSet current = (LabeledItemSet) itemSets.get(i);
hashtable.put(current, new Integer(current.m_classLabel));
}
return hashtable;
}
/**
* Merges all item sets in the set of (k-1)-item sets to create the (k)-item
* sets and updates the counters.
*
* @return the generated (k)-item sets
* @param totalTrans the total number of transactions
* @param itemSets the set of (k-1)-item sets
* @param size the value of (k-1)
*/
public static ArrayList<Object> mergeAllItemSets(ArrayList<Object> itemSets,
int size, int totalTrans) {
ArrayList<Object> newVector = new ArrayList<Object>();
LabeledItemSet result;
int numFound, k;
for (int i = 0; i < itemSets.size(); i++) {
LabeledItemSet first = (LabeledItemSet) itemSets.get(i);
out: for (int j = i + 1; j < itemSets.size(); j++) {
LabeledItemSet second = (LabeledItemSet) itemSets.get(j);
while (first.m_classLabel != second.m_classLabel) {
j++;
if (j == itemSets.size()) {
break out;
}
second = (LabeledItemSet) itemSets.get(j);
}
result = new LabeledItemSet(totalTrans, first.m_classLabel);
result.m_items = new int[first.m_items.length];
// Find and copy common prefix of size 'size'
numFound = 0;
k = 0;
while (numFound < size) {
if (first.m_items[k] == second.m_items[k]) {
if (first.m_items[k] != -1) {
numFound++;
}
result.m_items[k] = first.m_items[k];
} else {
break out;
}
k++;
}
// Check difference
while (k < first.m_items.length) {
if ((first.m_items[k] != -1) && (second.m_items[k] != -1)) {
break;
} else {
if (first.m_items[k] != -1) {
result.m_items[k] = first.m_items[k];
} else {
result.m_items[k] = second.m_items[k];
}
}
k++;
}
if (k == first.m_items.length) {
result.m_ruleSupCounter = 0;
result.m_counter = 0;
newVector.add(result);
}
}
}
return newVector;
}
/**
* Splits the class attribute away. Depending on the invert flag, the
* instances without class attribute or only the class attribute of all
* instances is returned
*
* @param instances the instances
* @param invert flag; if true only the class attribute remains, otherweise
* the class attribute is the only attribute that is deleted.
* @throws Exception exception if instances cannot be splitted
* @return Instances without the class attribute or instances with only the
* class attribute
*/
public static Instances divide(Instances instances, boolean invert)
throws Exception {
Instances newInstances = new Instances(instances);
if (instances.classIndex() < 0) {
throw new Exception(
"For class association rule mining a class attribute has to be specified.");
}
if (invert) {
for (int i = 0; i < newInstances.numAttributes(); i++) {
if (i != newInstances.classIndex()) {
newInstances.deleteAttributeAt(i);
i--;
}
}
return newInstances;
} else {
newInstances.setClassIndex(-1);
newInstances.deleteAttributeAt(instances.classIndex());
return newInstances;
}
}
/**
* Converts the header info of the given set of instances into a set of item
* sets (singletons). The ordering of values in the header file determines the
* lexicographic order. Each item set knows its class label.
*
* @return a set of item sets, each containing a single item
* @param instancesNoClass instances without the class attribute
* @param classes the values of the class attribute sorted according to
* instances
* @exception Exception if singletons can't be generated successfully
*/
public static ArrayList<Object> singletons(Instances instancesNoClass,
Instances classes) throws Exception {
ArrayList<Object> setOfItemSets = new ArrayList<Object>();
LabeledItemSet current;
// make singletons
for (int i = 0; i < instancesNoClass.numAttributes(); i++) {
if (instancesNoClass.attribute(i).isNumeric()) {
throw new Exception("Can't handle numeric attributes!");
}
for (int j = 0; j < instancesNoClass.attribute(i).numValues(); j++) {
for (int k = 0; k < (classes.attribute(0)).numValues(); k++) {
current = new LabeledItemSet(instancesNoClass.numInstances(), k);
current.m_items = new int[instancesNoClass.numAttributes()];
for (int l = 0; l < instancesNoClass.numAttributes(); l++) {
current.m_items[l] = -1;
}
current.m_items[i] = j;
setOfItemSets.add(current);
}
}
}
return setOfItemSets;
}
/**
* Prunes a set of (k)-item sets using the given (k-1)-item sets.
*
* @param toPrune the set of (k)-item sets to be pruned
* @param kMinusOne the (k-1)-item sets to be used for pruning
* @return the pruned set of item sets
*/
public static ArrayList<Object> pruneItemSets(ArrayList<Object> toPrune,
Hashtable<ItemSet, Integer> kMinusOne) {
ArrayList<Object> newVector = new ArrayList<Object>(toPrune.size());
int help, j;
for (int i = 0; i < toPrune.size(); i++) {
LabeledItemSet current = (LabeledItemSet) toPrune.get(i);
for (j = 0; j < current.m_items.length; j++) {
if (current.m_items[j] != -1) {
help = current.m_items[j];
current.m_items[j] = -1;
if (kMinusOne.get(current) != null
&& (current.m_classLabel == (kMinusOne.get(current).intValue()))) {
current.m_items[j] = help;
} else {
current.m_items[j] = help;
break;
}
}
}
if (j == current.m_items.length) {
newVector.add(current);
}
}
return newVector;
}
/**
* Outputs the support for an item set.
*
* @return the support
*/
@Override
public final int support() {
return m_ruleSupCounter;
}
/**
* Updates counter of item set with respect to given transaction.
*
* @param instanceNoClass instances without the class attribute
* @param instanceClass the values of the class attribute sorted according to
* instances
*/
public final void upDateCounter(Instance instanceNoClass,
Instance instanceClass) {
if (containedBy(instanceNoClass)) {
m_counter++;
if (this.m_classLabel == instanceClass.value(0)) {
m_ruleSupCounter++;
}
}
}
/**
* Updates counter of item set with respect to given transaction.
*
* @param instanceNoClass instances without the class attribute
* @param instanceClass the values of the class attribute sorted according to
* instances
*/
public final void upDateCounterTreatZeroAsMissing(Instance instanceNoClass,
Instance instanceClass) {
if (containedByTreatZeroAsMissing(instanceNoClass)) {
m_counter++;
if (this.m_classLabel == instanceClass.value(0)) {
m_ruleSupCounter++;
}
}
}
/**
* Updates counter of a specific item set
*
* @param itemSets an item sets
* @param instancesNoClass instances without the class attribute
* @param instancesClass the values of the class attribute sorted according to
* instances
*/
public static void upDateCounters(ArrayList<Object> itemSets,
Instances instancesNoClass, Instances instancesClass) {
for (int i = 0; i < instancesNoClass.numInstances(); i++) {
Enumeration<Object> enu = new WekaEnumeration<Object>(itemSets);
while (enu.hasMoreElements()) {
((LabeledItemSet) enu.nextElement()).upDateCounter(
instancesNoClass.instance(i), instancesClass.instance(i));
}
}
}
/**
* Updates counter of a specific item set
*
* @param itemSets an item sets
* @param instancesNoClass instances without the class attribute
* @param instancesClass the values of the class attribute sorted according to
* instances
*/
public static void upDateCountersTreatZeroAsMissing(
ArrayList<LabeledItemSet> itemSets, Instances instancesNoClass,
Instances instancesClass) {
for (int i = 0; i < instancesNoClass.numInstances(); i++) {
Enumeration<LabeledItemSet> enu = new WekaEnumeration<LabeledItemSet>(
itemSets);
while (enu.hasMoreElements()) {
enu.nextElement().upDateCounterTreatZeroAsMissing(
instancesNoClass.instance(i), instancesClass.instance(i));
}
}
}
/**
* Generates rules out of item sets
*
* @param minConfidence the minimum confidence
* @param noPrune flag indicating whether the rules are pruned accoridng to
* the minimum confidence value
* @return a set of rules
*/
public final ArrayList<Object>[] generateRules(double minConfidence,
boolean noPrune) {
ArrayList<Object> premises = new ArrayList<Object>();
ArrayList<Object> consequences = new ArrayList<Object>();
ArrayList<Object> conf = new ArrayList<Object>();
@SuppressWarnings("unchecked")
ArrayList<Object>[] rules = new ArrayList[3];
ItemSet premise, consequence;
// Generate all rules with class in the consequence.
premise = new ItemSet(m_totalTransactions);
consequence = new ItemSet(m_totalTransactions);
int[] premiseItems = new int[m_items.length];
int[] consequenceItems = new int[1];
System.arraycopy(m_items, 0, premiseItems, 0, m_items.length);
consequence.setItem(consequenceItems);
premise.setItem(premiseItems);
consequence.setItemAt(m_classLabel, 0);
consequence.setCounter(this.m_ruleSupCounter);
premise.setCounter(this.m_counter);
premises.add(premise);
consequences.add(consequence);
conf.add(new Double((double) this.m_ruleSupCounter
/ (double) this.m_counter));
rules[0] = premises;
rules[1] = consequences;
rules[2] = conf;
if (!noPrune) {
pruneRules(rules, minConfidence);
}
return rules;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/NominalItem.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NominalItem.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import weka.core.Attribute;
/**
* Class that encapsulates a nominal item.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class NominalItem extends Item implements Serializable {
/** For serialization */
private static final long serialVersionUID = 2182122099990462066L;
/** The index of the value considered to be positive */
protected int m_valueIndex;
/**
* Constructs a new NominalItem.
*
* @param att the attribute that backs the item.
* @param valueIndex the index of the value for this item.
* @throws Exception if the NominalItem can't be constructed.
*/
public NominalItem(Attribute att, int valueIndex) throws Exception {
super(att);
if (att.isNumeric()) {
throw new Exception("NominalItem must be constructed using a nominal attribute");
}
m_attribute = att;
if (m_attribute.numValues() == 1) {
m_valueIndex = 0; // unary attribute (? used to indicate absence from a basket)
} else {
m_valueIndex = valueIndex;
}
}
/**
* Get the value index for this item.
*
* @return the value index.
*/
public int getValueIndex() {
return m_valueIndex;
}
/**
* Get this item's value as a String.
*
* @return this item's value as a String.
*/
public String getItemValueAsString() {
return m_attribute.value(m_valueIndex);
}
/**
* Get this item's comparison operator as a String.
*
* @return this item's comparison operator as a String.
*/
public String getComparisonAsString() {
return "=";
}
/**
* A string representation of this item, (i.e.
* <attribute name> <comparison operator> <item value>).
* This default implementation just prints the attribute
* name and (optionally) frequency information.
*
* @param freq true if the frequency should be included.
* @return a string representation of this item.
*/
public String toString(boolean freq) {
String result = m_attribute.name() + "=" + m_attribute.value(m_valueIndex);
if (freq) {
result += ":" + m_frequency;
}
return result;
}
/**
* Equals. Just compares attribute and valueIndex.
* @return true if this NominalItem is equal to the argument.
*/
public boolean equals(Object compareTo) {
if (!(compareTo instanceof NominalItem)) {
return false;
}
NominalItem b = (NominalItem)compareTo;
if (m_attribute.equals(b.getAttribute()) &&
// m_frequency == b.getFrequency() &&
m_valueIndex == b.getValueIndex()) {
return true;
}
return false;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/NumericItem.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NumericItem.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.io.Serializable;
import weka.core.Attribute;
import weka.core.Utils;
/**
* Class that encapsulates a numeric item.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class NumericItem extends Item implements Serializable {
/** For serialization */
private static final long serialVersionUID = -7869433770765864800L;
public static enum Comparison {
NONE, EQUAL, LESS_THAN_OR_EQUAL_TO, GREATER_THAN;
}
/** The numeric test */
protected double m_splitPoint;
/** The comparison operator */
protected Comparison m_comparison;
/**
* Constructs a new <code>NumericItem</code>
*
* @param att the attribute that backs the item.
* @param splitPoint the numeric test value.
* @param comp the comparison operator.
* @throws Exception if the NumericItem can't be constructed.
*/
public NumericItem(Attribute att, double splitPoint, Comparison comp) throws Exception {
super(att);
if (!att.isNumeric()) {
throw new Exception(
"NumericItem must be constructed using a numeric attribute");
}
m_comparison = comp;
m_splitPoint = splitPoint;
}
/**
* Gets the numeric test.
*
* @return the numeric test value for this item.
*/
public double getSplitPoint() {
return m_splitPoint;
}
/**
* Gets the comparison operator for this item.
*
* @return the comparison operator for this item.
*/
public Comparison getComparison() {
return m_comparison;
}
/**
* Get this item's value as a String.
*
* @return this item's value as a String.
*/
@Override
public String getItemValueAsString() {
return Utils.doubleToString(m_splitPoint, 3);
}
/**
* Get this item's comparison operator as a String.
*
* @return this item's comparison operator as a String.
*/
@Override
public String getComparisonAsString() {
String result = null;
switch (m_comparison) {
case EQUAL:
result = "=";
break;
case LESS_THAN_OR_EQUAL_TO:
result = "<=";
break;
case GREATER_THAN:
result = ">";
break;
default:
break;
}
return result;
}
/**
* A string representation of this item, (i.e. <attribute name> <comparison
* operator> <item value>). This default implementation just prints the
* attribute name and (optionally) frequency information.
*
* @param freq true if the frequency should be included.
* @return a string representation of this item.
*/
@Override
public String toString(boolean freq) {
StringBuffer result = new StringBuffer();
result.append(m_attribute.name() + " ");
switch (m_comparison) {
case EQUAL:
result.append("=");
break;
case LESS_THAN_OR_EQUAL_TO:
result.append("<=");
break;
case GREATER_THAN:
result.append(">");
break;
default:
break;
}
result.append(" " + Utils.doubleToString(m_splitPoint, 4));
if (freq) {
result.append(":" + m_frequency);
}
return result.toString();
}
/**
* Equals. Compares the attribute, numeric test and comparison operator
*
* @return true if this NumericItem is equal to the argument.
*/
@Override
public boolean equals(Object compareTo) {
if (!(compareTo instanceof NumericItem)) {
return false;
}
NumericItem b = (NumericItem) compareTo;
if (m_attribute.equals(b.getAttribute())
&& m_comparison == b.getComparison()
&& (new Double(m_splitPoint).equals(new Double(b.getSplitPoint())))) {
return true;
}
return false;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/associations/SingleAssociatorEnhancer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SingleAssociatorEnhancer.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.associations;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
/**
* Abstract utility class for handling settings common to meta associators that
* use a single base associator.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class SingleAssociatorEnhancer extends AbstractAssociator
implements OptionHandler {
/** for serialization */
private static final long serialVersionUID = -3665885256363525164L;
/** The base associator to use */
protected Associator m_Associator = new Apriori();
/**
* String describing default Associator.
*
* @return default classname
*/
protected String defaultAssociatorString() {
return Apriori.class.getName();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tFull name of base associator.\n"
+ "\t(default: " + defaultAssociatorString() + ")", "W", 1, "-W"));
if (m_Associator instanceof OptionHandler) {
result.addElement(new Option("", "", 0,
"\nOptions specific to associator " + m_Associator.getClass().getName()
+ ":"));
result.addAll(Collections.list(((OptionHandler) m_Associator)
.listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options. Valid options are:
* <p>
*
* -W classname <br>
* Specify the full class name of the base associator.
* <p>
*
* Options after -- are passed to the designated associator.
* <p>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption('W', options);
if (tmpStr.length() > 0) {
// This is just to set the associator in case the option
// parsing fails.
setAssociator(AbstractAssociator.forName(tmpStr, null));
setAssociator(AbstractAssociator.forName(tmpStr,
Utils.partitionOptions(options)));
} else {
// This is just to set the associator in case the option
// parsing fails.
setAssociator(AbstractAssociator.forName(defaultAssociatorString(), null));
setAssociator(AbstractAssociator.forName(defaultAssociatorString(),
Utils.partitionOptions(options)));
}
}
/**
* Gets the current settings of the associator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
int i;
Vector<String> result;
String[] options;
result = new Vector<String>();
result.add("-W");
result.add(getAssociator().getClass().getName());
if (getAssociator() instanceof OptionHandler) {
options = ((OptionHandler) getAssociator()).getOptions();
result.add("--");
for (i = 0; i < options.length; i++) {
result.add(options[i]);
}
}
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String associatorTipText() {
return "The base associator to be used.";
}
/**
* Set the base associator.
*
* @param value the associator to use.
*/
public void setAssociator(Associator value) {
m_Associator = value;
}
/**
* Get the associator used as the base associator.
*
* @return the currently used associator
*/
public Associator getAssociator() {
return m_Associator;
}
/**
* Gets the associator specification string, which contains the class name of
* the associator and any options to the associator
*
* @return the associator string
*/
protected String getAssociatorSpec() {
Associator c = getAssociator();
return c.getClass().getName() + " "
+ Utils.joinOptions(((OptionHandler) c).getOptions());
}
/**
* Returns default capabilities of the base associator.
*
* @return the capabilities of the base associator
*/
@Override
public Capabilities getCapabilities() {
Capabilities result;
if (getAssociator() != null) {
result = getAssociator().getCapabilities();
} else {
result = new Capabilities(this);
}
// set dependencies
for (Capability cap : Capability.values()) {
result.enableDependency(cap);
}
result.setOwner(this);
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/ASEvaluation.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ASEvaluation.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import weka.core.Capabilities;
import weka.core.CapabilitiesHandler;
import weka.core.CapabilitiesIgnorer;
import weka.core.CommandlineRunnable;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.Utils;
import java.io.Serializable;
/**
* Abstract attribute selection evaluation class
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class ASEvaluation implements Serializable, CapabilitiesHandler,
CapabilitiesIgnorer, RevisionHandler, CommandlineRunnable {
/** for serialization */
private static final long serialVersionUID = 2091705669885950849L;
/** Whether capabilities should not be checked */
protected boolean m_DoNotCheckCapabilities = false;
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String doNotCheckCapabilitiesTipText() {
return "If set, evaluator capabilities are not checked before evaluator is built"
+ " (Use with caution to reduce runtime).";
}
/**
* Set whether not to check capabilities.
*
* @param doNotCheckCapabilities true if capabilities are not to be checked.
*/
@Override
public void setDoNotCheckCapabilities(boolean doNotCheckCapabilities) {
m_DoNotCheckCapabilities = doNotCheckCapabilities;
}
/**
* Get whether capabilities checking is turned off.
*
* @return true if capabilities checking is turned off.
*/
@Override
public boolean getDoNotCheckCapabilities() {
return m_DoNotCheckCapabilities;
}
// ===============
// Public methods.
// ===============
/**
* Generates a attribute evaluator. Has to initialize all fields of the
* evaluator that are not being set via options.
*
* @param data set of instances serving as training data
* @exception Exception if the evaluator has not been generated successfully
*/
public abstract void buildEvaluator(Instances data) throws Exception;
/**
* Provides a chance for a attribute evaluator to do any special post
* processing of the selected attribute set. Can also be used to clean up any
* data structures post attribute selection.
*
* @param attributeSet the set of attributes found by the search
* @return a possibly ranked list of postprocessed attributes
* @exception Exception if postprocessing fails for some reason
*/
public int[] postProcess(int[] attributeSet) throws Exception {
return attributeSet;
}
/**
* Creates a new instance of an attribute/subset evaluator given it's class
* name and (optional) arguments to pass to it's setOptions method. If the
* evaluator implements OptionHandler and the options parameter is non-null,
* the evaluator will have it's options set.
*
* @param evaluatorName the fully qualified class name of the evaluator
* @param options an array of options suitable for passing to setOptions. May
* be null.
* @return the newly created evaluator, ready for use.
* @exception Exception if the evaluator name is invalid, or the options
* supplied are not acceptable to the evaluator
*/
public static ASEvaluation forName(String evaluatorName, String[] options)
throws Exception {
return (ASEvaluation) Utils.forName(ASEvaluation.class, evaluatorName,
options);
}
/**
* Creates copies of the current evaluator. Note that this method now uses
* Serialization to perform a deep copy, so the evaluator object must be fully
* Serializable. Any currently built model will now be copied as well.
*
* @param model an example evaluator to copy
* @param num the number of evaluator copies to create.
* @return an array of evaluators.
* @exception Exception if an error occurs
*/
public static ASEvaluation[] makeCopies(ASEvaluation model, int num)
throws Exception {
if (model == null) {
throw new Exception("No model evaluator set");
}
ASEvaluation[] evaluators = new ASEvaluation[num];
SerializedObject so = new SerializedObject(model);
for (int i = 0; i < evaluators.length; i++) {
evaluators[i] = (ASEvaluation) so.getObject();
}
return evaluators;
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = new Capabilities(this);
result.enableAll();
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Tells the evaluator that the attribute selection process is complete. It
* can then clean up data structures, references to training data as necessary
* in order to save memory
*/
public void clean() {
// subclasses to override
}
/**
* runs the evaluator with the given commandline options
*
* @param evaluator the evaluator to run
* @param options the commandline options
*/
public static void runEvaluator(ASEvaluation evaluator, String[] options) {
try {
evaluator.preExecution();
System.out
.println(AttributeSelection.SelectAttributes(evaluator, options));
} catch (Exception e) {
String msg = e.toString().toLowerCase();
if ((msg.indexOf("help requested") == -1)
&& (msg.indexOf("no training file given") == -1)) {
e.printStackTrace();
}
System.err.println(e.getMessage());
}
try {
evaluator.postExecution();
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Perform any setup stuff that might need to happen before commandline
* execution. Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during setup
*/
@Override
public void preExecution() throws Exception {
}
/**
* Execute the supplied object. Subclasses need to override this method.
*
* @param toRun the object to execute
* @param options any options to pass to the object
* @throws Exception if a problem occurs
*/
@Override
public void run(Object toRun, String[] options) throws Exception {
if (!(toRun instanceof ASEvaluation)) {
throw new IllegalArgumentException(
"Object to run is not an instance of ASEValuation!");
}
preExecution();
runEvaluator((ASEvaluation) toRun, options);
postExecution();
}
/**
* Perform any teardown stuff that might need to happen after execution.
* Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during teardown
*/
@Override
public void postExecution() throws Exception {
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/ASSearch.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ASSearch.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.io.Serializable;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.Utils;
/**
* Abstract attribute selection search class.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class ASSearch
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 7591673350342236548L;
// ===============
// Public methods.
// ===============
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Searches the attribute subset/ranking space.
*
* @param ASEvaluator the attribute evaluator to guide the search
* @param data the training instances.
* @return an array (not necessarily ordered) of selected attribute indexes
* @throws Exception if the search can't be completed
*/
public abstract int [] search(ASEvaluation ASEvaluator,
Instances data) throws Exception;
/**
* Creates a new instance of a search class given it's class name and
* (optional) arguments to pass to it's setOptions method. If the
* search method implements OptionHandler and the options parameter is
* non-null, the search method will have it's options set.
*
* @param searchName the fully qualified class name of the search class
* @param options an array of options suitable for passing to setOptions. May
* be null.
* @return the newly created search object, ready for use.
* @throws Exception if the search class name is invalid, or the options
* supplied are not acceptable to the search class.
*/
public static ASSearch forName(String searchName,
String [] options) throws Exception {
return (ASSearch)Utils.forName(ASSearch.class,
searchName,
options);
}
/**
* Creates copies of the current search scheme. Note that this method
* now uses Serialization to perform a deep copy, so the search
* object must be fully Serializable. Any currently built model will
* now be copied as well.
*
* @param model an example search scheme to copy
* @param num the number of search scheme copies to create.
* @return an array of search schemes.
* @throws Exception if an error occurs
*/
public static ASSearch[] makeCopies(ASSearch model, int num) throws Exception {
if (model == null)
throw new Exception("No model search scheme set");
ASSearch[] result = new ASSearch[num];
SerializedObject so = new SerializedObject(model);
for (int i = 0; i < result.length; i++)
result[i] = (ASSearch) so.getObject();
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/AttributeEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AttributeEvaluator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
/**
* Interface for classes that evaluate attributes individually.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface AttributeEvaluator {
/**
* evaluates an individual attribute
*
* @param attribute the index of the attribute to be evaluated
* @return the "merit" of the attribute
* @exception Exception if the attribute could not be evaluated
*/
public abstract double evaluateAttribute(int attribute) throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/AttributeSelection.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AttributeSelection.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.beans.BeanInfo;
import java.beans.IntrospectionException;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.util.Enumeration;
import java.util.Random;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
/**
* Attribute selection class. Takes the name of a search class and an evaluation
* class on the command line.
* <p/>
*
* Valid options are:
* <p/>
*
* -h <br/>
* Display help.
* <p/>
*
* -i <name of input file> <br/>
* Specify the training data file.
* <p/>
*
* -c <class index> <br/>
* The index of the attribute to use as the class.
* <p/>
*
* -s <search method> <br/>
* The full class name of the search method followed by search method options
* (if any).<br/>
* Eg. -s "weka.attributeSelection.BestFirst -N 10"
* <p/>
*
* -x <number of folds> <br/>
* Perform a cross validation.
* <p/>
*
* -n <random number seed> <br/>
* Specify a random number seed. Use in conjuction with -x. (Default = 1).
* <p/>
*
* ------------------------------------------------------------------------
* <p/>
*
* Example usage as the main of an attribute evaluator (called FunkyEvaluator):
*
* <pre>
* public static void main(String[] args) {
* runEvaluator(new FunkyEvaluator(), args);
* }
* </pre>
* <p/>
*
* ------------------------------------------------------------------------
* <p/>
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class AttributeSelection implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = 4170171824147584330L;
/** the instances to select attributes from */
private Instances m_trainInstances;
/** the attribute/subset evaluator */
private ASEvaluation m_ASEvaluator;
/** the search method */
private ASSearch m_searchMethod;
/** the number of folds to use for cross validation */
private int m_numFolds;
/** holds a string describing the results of the attribute selection */
private final StringBuffer m_selectionResults;
/** rank features (if allowed by the search method) */
private boolean m_doRank;
/** do cross validation */
private boolean m_doXval;
/** seed used to randomly shuffle instances for cross validation */
private int m_seed;
/** number of attributes requested from ranked results */
private int m_numToSelect;
/** the selected attributes */
private int[] m_selectedAttributeSet;
/** the attribute indexes and associated merits if a ranking is produced */
private double[][] m_attributeRanking;
/** if a feature selection run involves an attribute transformer */
private AttributeTransformer m_transformer = null;
/**
* the attribute filter for processing instances with respect to the most
* recent feature selection run
*/
private Remove m_attributeFilter = null;
/**
* hold statistics for repeated feature selection, such as under cross
* validation
*/
private double[][] m_rankResults = null;
private double[] m_subsetResults = null;
/**
* Return the number of attributes selected from the most recent run of
* attribute selection
*
* @return the number of attributes selected
*/
public int numberAttributesSelected() throws Exception {
int[] att = this.selectedAttributes();
return att.length - 1;
}
/**
* get the final selected set of attributes.
*
* @return an array of attribute indexes
* @exception Exception if attribute selection has not been performed yet
*/
public int[] selectedAttributes() throws Exception {
if (this.m_selectedAttributeSet == null) {
throw new Exception("Attribute selection has not been performed yet!");
}
return this.m_selectedAttributeSet;
}
/**
* get the final ranking of the attributes.
*
* @return a two dimensional array of ranked attribute indexes and their
* associated merit scores as doubles.
* @exception Exception if a ranking has not been produced
*/
public double[][] rankedAttributes() throws Exception {
if (this.m_attributeRanking == null) {
throw new Exception("Ranking has not been performed");
}
return this.m_attributeRanking;
}
/**
* set the attribute/subset evaluator
*
* @param evaluator the evaluator to use
*/
public void setEvaluator(final ASEvaluation evaluator) {
this.m_ASEvaluator = evaluator;
}
/**
* set the search method
*
* @param search the search method to use
*/
public void setSearch(final ASSearch search) {
this.m_searchMethod = search;
if (this.m_searchMethod instanceof RankedOutputSearch) {
this.setRanking(((RankedOutputSearch) this.m_searchMethod).getGenerateRanking());
}
}
/**
* set the number of folds for cross validation
*
* @param folds the number of folds
*/
public void setFolds(final int folds) {
this.m_numFolds = folds;
}
/**
* produce a ranking (if possible with the set search and evaluator)
*
* @param r true if a ranking is to be produced
*/
public void setRanking(final boolean r) {
this.m_doRank = r;
}
/**
* do a cross validation
*
* @param x true if a cross validation is to be performed
*/
public void setXval(final boolean x) {
this.m_doXval = x;
}
/**
* set the seed for use in cross validation
*
* @param s the seed
*/
public void setSeed(final int s) {
this.m_seed = s;
}
/**
* get a description of the attribute selection
*
* @return a String describing the results of attribute selection
*/
public String toResultsString() {
return this.m_selectionResults.toString();
}
/**
* reduce the dimensionality of a set of instances to include only those
* attributes chosen by the last run of attribute selection.
*
* @param in the instances to be reduced
* @return a dimensionality reduced set of instances
* @exception Exception if the instances can't be reduced
*/
public Instances reduceDimensionality(final Instances in) throws Exception {
if (this.m_attributeFilter == null) {
throw new Exception("No feature selection has been performed yet!");
}
if (this.m_transformer != null) {
Instances transformed = new Instances(this.m_transformer.transformedHeader(), in.numInstances());
for (int i = 0; i < in.numInstances(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
transformed.add(this.m_transformer.convertInstance(in.instance(i)));
}
return Filter.useFilter(transformed, this.m_attributeFilter);
}
return Filter.useFilter(in, this.m_attributeFilter);
}
/**
* reduce the dimensionality of a single instance to include only those
* attributes chosen by the last run of attribute selection.
*
* @param in the instance to be reduced
* @return a dimensionality reduced instance
* @exception Exception if the instance can't be reduced
*/
public Instance reduceDimensionality(Instance in) throws Exception {
if (this.m_attributeFilter == null) {
throw new Exception("No feature selection has been performed yet!");
}
if (this.m_transformer != null) {
in = this.m_transformer.convertInstance(in);
}
this.m_attributeFilter.input(in);
this.m_attributeFilter.batchFinished();
Instance result = this.m_attributeFilter.output();
return result;
}
/**
* constructor. Sets defaults for each member varaible. Default attribute
* evaluator is CfsSubsetEval; default search method is BestFirst.
*/
public AttributeSelection() {
this.setFolds(10);
this.setRanking(false);
this.setXval(false);
this.setSeed(1);
this.setEvaluator(new CfsSubsetEval());
this.setSearch(new GreedyStepwise());
this.m_selectionResults = new StringBuffer();
this.m_selectedAttributeSet = null;
this.m_attributeRanking = null;
}
/**
* Perform attribute selection with a particular evaluator and a set of
* options specifying search method and input file etc.
*
* @param ASEvaluator an evaluator object
* @param options an array of options, not only for the evaluator but also the
* search method (if any) and an input data file
* @return the results of attribute selection as a String
* @exception Exception if no training file is set
*/
public static String SelectAttributes(final ASEvaluation ASEvaluator, final String[] options) throws Exception {
String trainFileName, searchName;
Instances train = null;
ASSearch searchMethod = null;
String[] optionsTmp = options.clone();
boolean helpRequested = false;
try {
// get basic options (options the same for all attribute selectors
trainFileName = Utils.getOption('i', options);
helpRequested = Utils.getFlag('h', optionsTmp);
if (helpRequested || (trainFileName.length() == 0)) {
searchName = Utils.getOption('s', optionsTmp);
if (searchName.length() != 0) {
String[] searchOptions = Utils.splitOptions(searchName);
searchMethod = (ASSearch) Class.forName(searchOptions[0]).newInstance();
}
if (helpRequested) {
throw new Exception("Help requested.");
} else {
throw new Exception("No training file given.");
}
}
} catch (Exception e) {
throw new Exception('\n' + e.getMessage() + makeOptionString(ASEvaluator, searchMethod));
}
DataSource source = new DataSource(trainFileName);
train = source.getDataSet();
return SelectAttributes(ASEvaluator, options, train);
}
/**
* returns a string summarizing the results of repeated attribute selection
* runs on splits of a dataset.
*
* @return a summary of attribute selection results
* @exception Exception if no attribute selection has been performed.
*/
public String CVResultsString() throws Exception {
StringBuffer CvString = new StringBuffer();
if ((this.m_subsetResults == null && this.m_rankResults == null) || (this.m_trainInstances == null)) {
throw new Exception("Attribute selection has not been performed yet!");
}
int fieldWidth = (int) (Math.log(this.m_trainInstances.numAttributes()) + 1.0);
CvString.append("\n\n=== Attribute selection " + this.m_numFolds + " fold cross-validation ");
if (!(this.m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(this.m_ASEvaluator instanceof UnsupervisedAttributeEvaluator) && (this.m_trainInstances.classAttribute().isNominal())) {
CvString.append("(stratified), seed: ");
CvString.append(this.m_seed + " ===\n\n");
} else {
CvString.append("seed: " + this.m_seed + " ===\n\n");
}
if ((this.m_searchMethod instanceof RankedOutputSearch) && (this.m_doRank == true)) {
CvString.append("average merit average rank attribute\n");
// calcualte means and std devs
for (int i = 0; i < this.m_rankResults[0].length; i++) {
this.m_rankResults[0][i] /= this.m_numFolds; // mean merit
double var = this.m_rankResults[0][i] * this.m_rankResults[0][i] * this.m_numFolds;
var = (this.m_rankResults[2][i] - var);
var /= this.m_numFolds;
if (var <= 0.0) {
var = 0.0;
this.m_rankResults[2][i] = 0;
} else {
this.m_rankResults[2][i] = Math.sqrt(var);
}
this.m_rankResults[1][i] /= this.m_numFolds; // mean rank
var = this.m_rankResults[1][i] * this.m_rankResults[1][i] * this.m_numFolds;
var = (this.m_rankResults[3][i] - var);
var /= this.m_numFolds;
if (var <= 0.0) {
var = 0.0;
this.m_rankResults[3][i] = 0;
} else {
this.m_rankResults[3][i] = Math.sqrt(var);
}
}
// now sort them by mean rank
int[] s = Utils.sort(this.m_rankResults[1]);
for (int element : s) {
if (this.m_rankResults[1][element] > 0) {
CvString.append(Utils.doubleToString(/*
* Math. abs(
*/this.m_rankResults[0][element]/* ) */, 6, 3) + " +-" + Utils.doubleToString(this.m_rankResults[2][element], 6, 3) + " "
+ Utils.doubleToString(this.m_rankResults[1][element], fieldWidth + 2, 1) + " +-" + Utils.doubleToString(this.m_rankResults[3][element], 5, 2) + " " + Utils.doubleToString((element + 1), fieldWidth, 0) + " "
+ this.m_trainInstances.attribute(element).name() + "\n");
}
}
} else {
CvString.append("number of folds (%) attribute\n");
for (int i = 0; i < this.m_subsetResults.length; i++) {
if ((this.m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) || (i != this.m_trainInstances.classIndex())) {
CvString.append(Utils.doubleToString(this.m_subsetResults[i], 12, 0) + "(" + Utils.doubleToString((this.m_subsetResults[i] / this.m_numFolds * 100.0), 3, 0) + " %) " + Utils.doubleToString((i + 1), fieldWidth, 0) + " "
+ this.m_trainInstances.attribute(i).name() + "\n");
}
}
}
return CvString.toString();
}
/**
* Select attributes for a split of the data. Calling this function updates
* the statistics on attribute selection. CVResultsString() returns a string
* summarizing the results of repeated calls to this function. Assumes that
* splits are from the same dataset--- ie. have the same number and types of
* attributes as previous splits.
*
* @param split the instances to select attributes from
* @exception Exception if an error occurs
*/
public void selectAttributesCVSplit(final Instances split) throws Exception {
this.m_ASEvaluator.buildEvaluator(split);
// Do the search
int[] attributeSet = this.m_searchMethod.search(this.m_ASEvaluator, split);
// Do any postprocessing that a attribute selection method might
// require
attributeSet = this.m_ASEvaluator.postProcess(attributeSet);
this.updateStatsForModelCVSplit(split, this.m_ASEvaluator, this.m_searchMethod, attributeSet, this.m_doRank);
}
/**
* Update the attribute selection stats for a cross-validation fold of the
* data.
*
* @param split the instances in this split/fold of the data
* @param evaluator the evaluator that was used
* @param search the search that was used
* @param attributeSet the final subset produced for the split
* @param doRank whether to produce a ranking
* @throws Exception if a problem occurs
*/
public void updateStatsForModelCVSplit(final Instances split, final ASEvaluation evaluator, final ASSearch search, final int[] attributeSet, final boolean doRank) throws Exception {
double[][] attributeRanking = null;
// if the train instances are null then set equal to this split.
// If this is the case then this function is more than likely being
// called from outside this class in order to obtain CV statistics
// and all we need m_trainIstances for is to get at attribute names
// and types etc.
if (this.m_trainInstances == null) {
this.m_trainInstances = split;
}
// create space to hold statistics
if (this.m_rankResults == null && this.m_subsetResults == null) {
this.m_subsetResults = new double[split.numAttributes()];
this.m_rankResults = new double[4][split.numAttributes()];
}
if ((search instanceof RankedOutputSearch) && doRank) {
attributeRanking = ((RankedOutputSearch) search).rankedAttributes();
// System.out.println(attributeRanking[0][1]);
for (int j = 0; j < attributeRanking.length; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// merit
this.m_rankResults[0][(int) attributeRanking[j][0]] += attributeRanking[j][1];
// squared merit
this.m_rankResults[2][(int) attributeRanking[j][0]] += (attributeRanking[j][1] * attributeRanking[j][1]);
// rank
this.m_rankResults[1][(int) attributeRanking[j][0]] += (j + 1);
// squared rank
this.m_rankResults[3][(int) attributeRanking[j][0]] += (j + 1) * (j + 1);
// += (attributeRanking[j][0] * attributeRanking[j][0]);
}
} else {
for (int j = 0; j < attributeSet.length; j++) {
this.m_subsetResults[attributeSet[j]]++;
}
}
}
/**
* Perform a cross validation for attribute selection. With subset evaluators
* the number of times each attribute is selected over the cross validation is
* reported. For attribute evaluators, the average merit and average ranking +
* std deviation is reported for each attribute.
*
* @return the results of cross validation as a String
* @exception Exception if an error occurs during cross validation
*/
public String CrossValidateAttributes() throws Exception {
Instances cvData = new Instances(this.m_trainInstances);
Instances train;
Random random = new Random(this.m_seed);
cvData.randomize(random);
if (!(this.m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(this.m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) {
if (cvData.classAttribute().isNominal()) {
cvData.stratify(this.m_numFolds);
}
}
for (int i = 0; i < this.m_numFolds; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// Perform attribute selection
train = cvData.trainCV(this.m_numFolds, i, random);
this.selectAttributesCVSplit(train);
}
return this.CVResultsString();
}
/**
* Perform attribute selection on the supplied training instances.
*
* @param data the instances to select attributes from
* @exception Exception if there is a problem during selection
*/
public void SelectAttributes(final Instances data) throws Exception {
int[] attributeSet;
this.m_transformer = null;
this.m_attributeFilter = null;
this.m_trainInstances = data;
if (this.m_doXval == true && (this.m_ASEvaluator instanceof AttributeTransformer)) {
throw new Exception("Can't cross validate an attribute transformer.");
}
if (this.m_ASEvaluator instanceof SubsetEvaluator && this.m_searchMethod instanceof Ranker) {
throw new Exception(this.m_ASEvaluator.getClass().getName() + " must use a search method other than Ranker");
}
if (this.m_ASEvaluator instanceof AttributeEvaluator && !(this.m_searchMethod instanceof Ranker)) {
// System.err.println("AttributeEvaluators must use a Ranker search "
// +"method. Switching to Ranker...");
// m_searchMethod = new Ranker();
throw new Exception("AttributeEvaluators must use the Ranker search " + "method");
}
if (this.m_searchMethod instanceof RankedOutputSearch) {
this.m_doRank = ((RankedOutputSearch) this.m_searchMethod).getGenerateRanking();
}
if (this.m_ASEvaluator instanceof UnsupervisedAttributeEvaluator || this.m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) {
// unset the class index
// m_trainInstances.setClassIndex(-1);
} else {
// check that a class index has been set
if (this.m_trainInstances.classIndex() < 0) {
this.m_trainInstances.setClassIndex(this.m_trainInstances.numAttributes() - 1);
}
}
// Initialize the attribute evaluator
this.m_ASEvaluator.buildEvaluator(this.m_trainInstances);
if (this.m_ASEvaluator instanceof AttributeTransformer) {
this.m_trainInstances = ((AttributeTransformer) this.m_ASEvaluator).transformedHeader();
this.m_transformer = (AttributeTransformer) this.m_ASEvaluator;
}
int fieldWidth = (int) (Math.log(this.m_trainInstances.numAttributes()) + 1.0);
// Do the search
attributeSet = this.m_searchMethod.search(this.m_ASEvaluator, this.m_trainInstances);
// try and determine if the search method uses an attribute transformer---
// this is a bit of a hack to make things work properly with RankSearch
// using PrincipalComponents as its attribute ranker
try {
BeanInfo bi = Introspector.getBeanInfo(this.m_searchMethod.getClass());
PropertyDescriptor properties[];
// methods = bi.getMethodDescriptors();
properties = bi.getPropertyDescriptors();
for (PropertyDescriptor propertie : properties) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
propertie.getDisplayName();
Method meth = propertie.getReadMethod();
Object retType = meth.getReturnType();
if (retType.equals(ASEvaluation.class)) {
Class<?> args[] = {};
ASEvaluation tempEval = (ASEvaluation) (meth.invoke(this.m_searchMethod, (Object[]) args));
if (tempEval instanceof AttributeTransformer) {
// grab the transformed data header
this.m_trainInstances = ((AttributeTransformer) tempEval).transformedHeader();
this.m_transformer = (AttributeTransformer) tempEval;
}
}
}
} catch (IntrospectionException ex) {
System.err.println("AttributeSelection: Couldn't " + "introspect");
}
// Do any postprocessing that a attribute selection method might require
attributeSet = this.m_ASEvaluator.postProcess(attributeSet);
// if (!this.m_doRank) {
// this.m_selectionResults.append(this.printSelectionResults());
// }
if ((this.m_searchMethod instanceof RankedOutputSearch) && this.m_doRank == true) {
try {
this.m_attributeRanking = ((RankedOutputSearch) this.m_searchMethod).rankedAttributes();
} catch (Exception ex) {
ex.printStackTrace();
throw ex;
}
// m_selectionResults.append(printSelectionResults());
// this.m_selectionResults.append("Ranked attributes:\n");
// retrieve the number of attributes to retain
this.m_numToSelect = ((RankedOutputSearch) this.m_searchMethod).getCalculatedNumToSelect();
// determine fieldwidth for merit
int f_p = 0;
int w_p = 0;
for (int i = 0; i < this.m_numToSelect; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
double precision = (Math.abs(this.m_attributeRanking[i][1]) - (int) (Math.abs(this.m_attributeRanking[i][1])));
double intPart = (int) (Math.abs(this.m_attributeRanking[i][1]));
if (precision > 0) {
precision = Math.abs((Math.log(Math.abs(precision)) / Math.log(10))) + 3;
}
if (precision > f_p) {
f_p = (int) precision;
}
if (intPart == 0) {
if (w_p < 2) {
w_p = 2;
}
} else if ((Math.abs((Math.log(Math.abs(this.m_attributeRanking[i][1])) / Math.log(10))) + 1) > w_p) {
if (this.m_attributeRanking[i][1] > 0) {
w_p = (int) Math.abs((Math.log(Math.abs(this.m_attributeRanking[i][1])) / Math.log(10))) + 1;
}
}
}
// for (int i = 0; i < this.m_numToSelect; i++) {
// this.m_selectionResults.append(Utils.doubleToString(this.m_attributeRanking[i][1], f_p + w_p + 1, f_p) + Utils.doubleToString((this.m_attributeRanking[i][0] + 1), fieldWidth + 1, 0) + " "
// + this.m_trainInstances.attribute((int) this.m_attributeRanking[i][0]).name() + "\n");
// }
// set up the selected attributes array - usable by a filter or
// whatever
if (this.m_trainInstances.classIndex() >= 0) {
if ((!(this.m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(this.m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) || this.m_ASEvaluator instanceof AttributeTransformer) {
// one more for the class
this.m_selectedAttributeSet = new int[this.m_numToSelect + 1];
this.m_selectedAttributeSet[this.m_numToSelect] = this.m_trainInstances.classIndex();
} else {
this.m_selectedAttributeSet = new int[this.m_numToSelect];
}
} else {
this.m_selectedAttributeSet = new int[this.m_numToSelect];
}
// this.m_selectionResults.append("\nSelected attributes: ");
for (int i = 0; i < this.m_numToSelect; i++) {
this.m_selectedAttributeSet[i] = (int) this.m_attributeRanking[i][0];
// if (i == this.m_numToSelect - 1) {
// this.m_selectionResults.append(((int) this.m_attributeRanking[i][0] + 1) + " : " + (i + 1) + "\n");
// } else {
// this.m_selectionResults.append(((int) this.m_attributeRanking[i][0] + 1));
// this.m_selectionResults.append(",");
// }
}
} else {
// set up the selected attributes array - usable by a filter or
// whatever
if ((!(this.m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(this.m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) || this.m_trainInstances.classIndex() >= 0)
// one more for the class
{
this.m_selectedAttributeSet = new int[attributeSet.length + 1];
this.m_selectedAttributeSet[attributeSet.length] = this.m_trainInstances.classIndex();
} else {
this.m_selectedAttributeSet = new int[attributeSet.length];
}
for (int i = 0; i < attributeSet.length; i++) {
this.m_selectedAttributeSet[i] = attributeSet[i];
}
// this.m_selectionResults.append("Selected attributes: ");
// for (int i = 0; i < attributeSet.length; i++) {
// if (i == (attributeSet.length - 1)) {
// this.m_selectionResults.append((attributeSet[i] + 1) + " : " + attributeSet.length + "\n");
// } else {
// this.m_selectionResults.append((attributeSet[i] + 1) + ",");
// }
// }
//
// for (int element : attributeSet) {
// this.m_selectionResults.append(" " + this.m_trainInstances.attribute(element).name() + "\n");
// }
}
// Cross validation should be called from here
// if (this.m_doXval == true) {
// this.m_selectionResults.append(this.CrossValidateAttributes());
// }
// set up the attribute filter with the selected attributes
if (this.m_selectedAttributeSet != null && !this.m_doXval) {
this.m_attributeFilter = new Remove();
this.m_attributeFilter.setAttributeIndicesArray(this.m_selectedAttributeSet);
this.m_attributeFilter.setInvertSelection(true);
this.m_attributeFilter.setInputFormat(this.m_trainInstances);
}
// Save space
this.m_trainInstances = new Instances(this.m_trainInstances, 0);
this.m_ASEvaluator.clean();
}
/**
* Perform attribute selection with a particular evaluator and a set of
* options specifying search method and options for the search method and
* evaluator.
*
* @param ASEvaluator an evaluator object
* @param options an array of options, not only for the evaluator but also the
* search method (if any) and an input data file
* @param train the input instances
* @return the results of attribute selection as a String
* @exception Exception if incorrect options are supplied
*/
public static String SelectAttributes(final ASEvaluation ASEvaluator, final String[] options, final Instances train) throws Exception {
int seed = 1, folds = 10;
String foldsString, seedString, searchName;
String classString;
String searchClassName;
String[] searchOptions = null; // new String [1];
ASSearch searchMethod = null;
boolean doCrossVal = false;
int classIndex = -1;
boolean helpRequested = false;
AttributeSelection trainSelector = new AttributeSelection();
try {
if (Utils.getFlag('h', options)) {
helpRequested = true;
}
// does data already have a class attribute set?
if (train.classIndex() != -1) {
classIndex = train.classIndex() + 1;
}
// get basic options (options the same for all attribute selectors
classString = Utils.getOption('c', options);
if (classString.length() != 0) {
if (classString.equals("first")) {
classIndex = 1;
} else if (classString.equals("last")) {
classIndex = train.numAttributes();
} else {
classIndex = Integer.parseInt(classString);
}
}
if ((classIndex != -1) && ((classIndex == 0) || (classIndex > train.numAttributes()))) {
throw new Exception("Class index out of range.");
}
if (classIndex != -1) {
train.setClassIndex(classIndex - 1);
} else {
// classIndex = train.numAttributes();
// train.setClassIndex(classIndex - 1);
}
foldsString = Utils.getOption('x', options);
if (foldsString.length() != 0) {
folds = Integer.parseInt(foldsString);
doCrossVal = true;
}
trainSelector.setFolds(folds);
trainSelector.setXval(doCrossVal);
seedString = Utils.getOption('n', options);
if (seedString.length() != 0) {
seed = Integer.parseInt(seedString);
}
trainSelector.setSeed(seed);
searchName = Utils.getOption('s', options);
if ((searchName.length() == 0) && (!(ASEvaluator instanceof AttributeEvaluator))) {
throw new Exception("No search method given.");
}
if (searchName.length() != 0) {
searchName = searchName.trim();
// split off any search options
int breakLoc = searchName.indexOf(' ');
searchClassName = searchName;
String searchOptionsString = "";
if (breakLoc != -1) {
searchClassName = searchName.substring(0, breakLoc);
searchOptionsString = searchName.substring(breakLoc).trim();
searchOptions = Utils.splitOptions(searchOptionsString);
}
} else {
try {
searchClassName = new String("weka.attributeSelection.Ranker");
searchMethod = (ASSearch) Class.forName(searchClassName).newInstance();
} catch (Exception e) {
throw new Exception("Can't create Ranker object");
}
}
// if evaluator is a subset evaluator
// create search method and set its options (if any)
if (searchMethod == null) {
searchMethod = ASSearch.forName(searchClassName, searchOptions);
}
// set the search method
trainSelector.setSearch(searchMethod);
} catch (Exception e) {
throw new Exception('\n' + e.getMessage() + makeOptionString(ASEvaluator, searchMethod));
}
try {
// Set options for ASEvaluator
if (ASEvaluator instanceof OptionHandler) {
((OptionHandler) ASEvaluator).setOptions(options);
}
/*
* // Set options for Search method if (searchMethod instanceof
* OptionHandler) { if (searchOptions != null) {
* ((OptionHandler)searchMethod).setOptions(searchOptions); } }
* Utils.checkForRemainingOptions(searchOptions);
*/
} catch (Exception e) {
throw new Exception("\n" + e.getMessage() + makeOptionString(ASEvaluator, searchMethod));
}
try {
Utils.checkForRemainingOptions(options);
} catch (Exception e) {
throw new Exception('\n' + e.getMessage() + makeOptionString(ASEvaluator, searchMethod));
}
if (helpRequested) {
System.out.println(makeOptionString(ASEvaluator, searchMethod));
throw new Exception();
}
// set the attribute evaluator
trainSelector.setEvaluator(ASEvaluator);
// do the attribute selection
trainSelector.SelectAttributes(train);
// return the results string
return trainSelector.toResultsString();
}
/**
* Assembles a text description of the attribute selection results.
*
* @return a string describing the results of attribute selection.
*/
private String printSelectionResults() {
StringBuffer text = new StringBuffer();
text.append("\n\n=== Attribute Selection on all input data ===\n\n" + "Search Method:\n");
text.append(this.m_searchMethod.toString());
text.append("\nAttribute ");
if (this.m_ASEvaluator instanceof SubsetEvaluator) {
text.append("Subset Evaluator (");
} else {
text.append("Evaluator (");
}
if (!(this.m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(this.m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) {
text.append("supervised, ");
text.append("Class (");
if (this.m_trainInstances.attribute(this.m_trainInstances.classIndex()).isNumeric()) {
text.append("numeric): ");
} else {
text.append("nominal): ");
}
text.append((this.m_trainInstances.classIndex() + 1) + " " + this.m_trainInstances.attribute(this.m_trainInstances.classIndex()).name() + "):\n");
} else {
text.append("unsupervised):\n");
}
text.append(this.m_ASEvaluator.toString() + "\n");
return text.toString();
}
/**
* Make up the help string giving all the command line options
*
* @param ASEvaluator the attribute evaluator to include options for
* @param searchMethod the search method to include options for
* @return a string detailing the valid command line options
* @throws Exception if something goes wrong
*/
private static String makeOptionString(final ASEvaluation ASEvaluator, final ASSearch searchMethod) throws Exception {
StringBuffer optionsText = new StringBuffer("");
// General options
optionsText.append("\n\nGeneral options:\n\n");
optionsText.append("-h\n\tdisplay this help\n");
optionsText.append("-i <name of input file>\n");
optionsText.append("\tSets training file.\n");
optionsText.append("-c <class index>\n");
optionsText.append("\tSets the class index for supervised attribute\n");
optionsText.append("\tselection. Default=last column.\n");
optionsText.append("-s <class name>\n");
optionsText.append("\tSets search method for subset evaluators.\n");
optionsText.append("-x <number of folds>\n");
optionsText.append("\tPerform a cross validation.\n");
optionsText.append("-n <random number seed>\n");
optionsText.append("\tUse in conjunction with -x.\n");
// Get attribute evaluator-specific options
if (ASEvaluator instanceof OptionHandler) {
optionsText.append("\nOptions specific to " + ASEvaluator.getClass().getName() + ":\n\n");
Enumeration<Option> enu = ((OptionHandler) ASEvaluator).listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
optionsText.append(option.synopsis() + '\n');
optionsText.append(option.description() + "\n");
}
}
if (searchMethod != null) {
if (searchMethod instanceof OptionHandler) {
optionsText.append("\nOptions specific to " + searchMethod.getClass().getName() + ":\n\n");
Enumeration<Option> enu = ((OptionHandler) searchMethod).listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
optionsText.append(option.synopsis() + '\n');
optionsText.append(option.description() + "\n");
}
}
} else {
if (ASEvaluator instanceof SubsetEvaluator) {
System.out.println("No search method given.");
}
}
return optionsText.toString();
}
/**
* Main method for testing this class.
*
* @param args the options
*/
public static void main(final String[] args) {
try {
if (args.length == 0) {
throw new Exception("The first argument must be the name of an " + "attribute/subset evaluator");
}
String EvaluatorName = args[0];
args[0] = "";
ASEvaluation newEval = ASEvaluation.forName(EvaluatorName, null);
System.out.println(SelectAttributes(newEval, args));
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/AttributeSetEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RELEASE INFORMATION (December 27, 2004)
*
* FCBF algorithm:
* Template obtained from Weka
* Developped for Weka by Zheng Alan Zhao
* December 27, 2004
*
* FCBF algorithm is a feature selection method based on Symmetrical Uncertainty
* Measurement for relevance redundancy analysis. The details of FCBF algorithm are
* in L. Yu and H. Liu. Feature selection for high-dimensional data: a fast
* correlation-based filter solution. In Proceedings of the twentieth International
* Conference on Machine Learning, pages 856--863, 2003.
*
*
* CONTACT INFORMATION
*
* For algorithm implementation:
* Zheng Zhao: zhaozheng at asu.edu
*
* For the algorithm:
* Lei Yu: leiyu at asu.edu
* Huan Liu: hliu at asu.edu
*
* Data Mining and Machine Learning Lab
* Computer Science and Engineering Department
* Fulton School of Engineering
* Arizona State University
* Tempe, AZ 85287
*
* AttributeSetEvaluator.java
*
* Copyright (C) 2004 Data Mining and Machine Learning Lab,
* Computer Science and Engineering Department,
* Fulton School of Engineering,
* Arizona State University
*
*/
package weka.attributeSelection;
/**
* Abstract attribute set evaluator.
*
* @author Zheng Zhao: zhaozheng at asu.edu
* @version $Revision$
*/
public abstract class AttributeSetEvaluator extends ASEvaluation {
/** for serialization */
private static final long serialVersionUID = -5744881009422257389L;
// ===============
// Public methods.
// ===============
/**
* evaluates an individual attribute
*
* @param attribute the index of the attribute to be evaluated
* @return the "merit" of the attribute
* @exception Exception if the attribute could not be evaluated
*/
public abstract double evaluateAttribute(int attribute) throws Exception;
/**
* Evaluates a set of attributes
*
* @param attributes an <code>int[]</code> value
* @param classAttributes an <code>int[]</code> value
* @return a <code>double</code> value
* @exception Exception if an error occurs
*/
public abstract double evaluateAttribute(int[] attributes, int[] classAttributes)
throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/AttributeTransformer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AttributeTransformer.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import weka.core.Instance;
import weka.core.Instances;
/**
* Abstract attribute transformer. Transforms the dataset.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface AttributeTransformer {
// ===============
// Public methods.
// ===============
/**
* Returns just the header for the transformed data (ie. an empty
* set of instances. This is so that AttributeSelection can
* determine the structure of the transformed data without actually
* having to get all the transformed data through getTransformedData().
* @return the header of the transformed data.
* @exception Exception if the header of the transformed data can't
* be determined.
*/
Instances transformedHeader() throws Exception;
/**
* Transform the supplied data set (assumed to be the same format
* as the training data)
* @return A set of instances representing the transformed data
* @exception Exception if the attribute could not be evaluated
*/
Instances transformedData(Instances data) throws Exception;
/**
* Transforms an instance in the format of the original data to the
* transformed space
* @return a transformed instance
* @exception Exception if the instance could not be transformed
*/
Instance convertInstance(Instance instance) throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/BestFirst.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BestFirst.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.Vector;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Range;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.Utils;
/**
<!-- globalinfo-start -->
* BestFirst:<br/>
* <br/>
* Searches the space of attribute subsets by greedy hillclimbing augmented with
* a backtracking facility. Setting the number of consecutive non-improving
* nodes allowed controls the level of backtracking done. Best first may start
* with the empty set of attributes and search forward, or start with the full
* set of attributes and search backward, or start at any point and search in
* both directions (by considering all possible single attribute additions and
* deletions at a given point).<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* </pre>
*
* <pre>
* -D <0 = backward | 1 = forward | 2 = bi-directional>
* Direction of search. (default = 1).
* </pre>
*
* <pre>
* -N <num>
* Number of non-improving nodes to
* consider before terminating search.
* </pre>
*
* <pre>
* -S <num>
* Size of lookup cache for evaluated subsets.
* Expressed as a multiple of the number of
* attributes in the data set. (default = 1)
* </pre>
*
<!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz) Martin Guetlein (cashing merit of
* expanded nodes)
* @version $Revision$
*/
public class BestFirst extends ASSearch implements OptionHandler, StartSetHandler {
/** for serialization */
static final long serialVersionUID = 7841338689536821867L;
// Inner classes
/**
* Class for a node in a linked list. Used in best first search.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
**/
public class Link2 implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -8236598311516351420L;
/* BitSet group; */
Object[] m_data;
double m_merit;
/**
* Constructor
*/
public Link2(final Object[] data, final double mer) {
// group = (BitSet)gr.clone();
this.m_data = data;
this.m_merit = mer;
}
/** Get a group */
public Object[] getData() {
return this.m_data;
}
@Override
public String toString() {
return ("Node: " + this.m_data.toString() + " " + this.m_merit);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* Class for handling a linked list. Used in best first search. Extends the
* Vector class.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
**/
public class LinkedList2 extends ArrayList<Link2> {
/** for serialization */
static final long serialVersionUID = 3250538292330398929L;
/** Max number of elements in the list */
int m_MaxSize;
// ================
// Public methods
// ================
public LinkedList2(final int sz) {
super();
this.m_MaxSize = sz;
}
/**
* removes an element (Link) at a specific index from the list.
*
* @param index the index of the element to be removed.
**/
public void removeLinkAt(final int index) throws Exception {
if ((index >= 0) && (index < this.size())) {
this.remove(index);
} else {
throw new Exception("index out of range (removeLinkAt)");
}
}
/**
* returns the element (Link) at a specific index from the list.
*
* @param index the index of the element to be returned.
**/
public Link2 getLinkAt(final int index) throws Exception {
if (this.size() == 0) {
throw new Exception("List is empty (getLinkAt)");
} else {
if ((index >= 0) && (index < this.size())) {
return ((this.get(index)));
} else {
throw new Exception("index out of range (getLinkAt)");
}
}
}
/**
* adds an element (Link) to the list.
*
* @param data the attribute set specification
* @param mer the "merit" of this attribute set
**/
public void addToList(final Object[] data, final double mer) throws Exception {
Link2 newL = new Link2(data, mer);
if (this.size() == 0) {
this.add(newL);
} else {
if (mer > (this.get(0)).m_merit) {
if (this.size() == this.m_MaxSize) {
this.removeLinkAt(this.m_MaxSize - 1);
}
// ----------
this.add(0, newL);
} else {
int i = 0;
int size = this.size();
boolean done = false;
// ------------
// don't insert if list contains max elements an this
// is worst than the last
if ((size == this.m_MaxSize) && (mer <= this.get(this.size() - 1).m_merit)) {
}
// ---------------
else {
while ((!done) && (i < size)) {
if (mer > (this.get(i)).m_merit) {
if (size == this.m_MaxSize) {
this.removeLinkAt(this.m_MaxSize - 1);
}
// ---------------------
this.add(i, newL);
done = true;
} else {
if (i == size - 1) {
this.add(newL);
done = true;
} else {
i++;
}
}
}
}
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
// member variables
/** maximum number of stale nodes before terminating search */
protected int m_maxStale;
/** 0 == backward search, 1 == forward search, 2 == bidirectional */
protected int m_searchDirection;
/** search direction: backward */
protected static final int SELECTION_BACKWARD = 0;
/** search direction: forward */
protected static final int SELECTION_FORWARD = 1;
/** search direction: bidirectional */
protected static final int SELECTION_BIDIRECTIONAL = 2;
/** search directions */
public static final Tag[] TAGS_SELECTION = { new Tag(SELECTION_BACKWARD, "Backward"), new Tag(SELECTION_FORWARD, "Forward"), new Tag(SELECTION_BIDIRECTIONAL, "Bi-directional"), };
/** holds an array of starting attributes */
protected int[] m_starting;
/** holds the start set for the search as a Range */
protected Range m_startRange;
/** does the data have a class */
protected boolean m_hasClass;
/** holds the class index */
protected int m_classIndex;
/** number of attributes in the data */
protected int m_numAttribs;
/** total number of subsets evaluated during a search */
protected int m_totalEvals;
/** for debugging */
protected boolean m_debug;
/** holds the merit of the best subset found */
protected double m_bestMerit;
/** holds the maximum size of the lookup cache for evaluated subsets */
protected int m_cacheSize;
/**
* Returns a string describing this search method
*
* @return a description of the search method suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "BestFirst:\n\n" + "Searches the space of attribute subsets by greedy hillclimbing " + "augmented with a backtracking facility. Setting the number of " + "consecutive non-improving nodes allowed controls the level of "
+ "backtracking done. Best first may start with the empty set of " + "attributes and search forward, or start with the full set of " + "attributes and search backward, or start at any point and search "
+ "in both directions (by considering all possible single attribute " + "additions and deletions at a given point).\n";
}
/**
* Constructor
*/
public BestFirst() {
this.resetOptions();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7.", "P", 1, "-P <start set>"));
newVector.addElement(new Option("\tDirection of search. (default = 1).", "D", 1, "-D <0 = backward | 1 = forward " + "| 2 = bi-directional>"));
newVector.addElement(new Option("\tNumber of non-improving nodes to" + "\n\tconsider before terminating search.", "N", 1, "-N <num>"));
newVector.addElement(new Option("\tSize of lookup cache for evaluated subsets." + "\n\tExpressed as a multiple of the number of" + "\n\tattributes in the data set. (default = 1)", "S", 1, "-S <num>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
<!-- options-start -->
* Valid options are:
* <p/>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* </pre>
*
* <pre>
* -D <0 = backward | 1 = forward | 2 = bi-directional>
* Direction of search. (default = 1).
* </pre>
*
* <pre>
* -N <num>
* Number of non-improving nodes to
* consider before terminating search.
* </pre>
*
* <pre>
* -S <num>
* Size of lookup cache for evaluated subsets.
* Expressed as a multiple of the number of
* attributes in the data set. (default = 1)
* </pre>
*
<!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*
**/
@Override
public void setOptions(final String[] options) throws Exception {
String optionString;
this.resetOptions();
optionString = Utils.getOption('P', options);
if (optionString.length() != 0) {
this.setStartSet(optionString);
}
optionString = Utils.getOption('D', options);
if (optionString.length() != 0) {
this.setDirection(new SelectedTag(Integer.parseInt(optionString), TAGS_SELECTION));
} else {
this.setDirection(new SelectedTag(SELECTION_FORWARD, TAGS_SELECTION));
}
optionString = Utils.getOption('N', options);
if (optionString.length() != 0) {
this.setSearchTermination(Integer.parseInt(optionString));
}
optionString = Utils.getOption('S', options);
if (optionString.length() != 0) {
this.setLookupCacheSize(Integer.parseInt(optionString));
}
this.m_debug = Utils.getFlag('Z', options);
}
/**
* Set the maximum size of the evaluated subset cache (hashtable). This is
* expressed as a multiplier for the number of attributes in the data set.
* (default = 1).
*
* @param size the maximum size of the hashtable
*/
public void setLookupCacheSize(final int size) {
if (size >= 0) {
this.m_cacheSize = size;
}
}
/**
* Return the maximum size of the evaluated subset cache (expressed as a
* multiplier for the number of attributes in a data set.
*
* @return the maximum size of the hashtable.
*/
public int getLookupCacheSize() {
return this.m_cacheSize;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String lookupCacheSizeTipText() {
return "Set the maximum size of the lookup cache of evaluated subsets. This is " + "expressed as a multiplier of the number of attributes in the data set. " + "(default = 1).";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String startSetTipText() {
return "Set the start point for the search. This is specified as a comma " + "seperated list off attribute indexes starting at 1. It can include " + "ranges. Eg. 1,2,5-9,17.";
}
/**
* Sets a starting set of attributes for the search. It is the search method's
* responsibility to report this start set (if any) in its toString() method.
*
* @param startSet a string containing a list of attributes (and or ranges),
* eg. 1,2,6,10-15.
* @throws Exception if start set can't be set.
*/
@Override
public void setStartSet(final String startSet) throws Exception {
this.m_startRange.setRanges(startSet);
}
/**
* Returns a list of attributes (and or attribute ranges) as a String
*
* @return a list of attributes (and or attribute ranges)
*/
@Override
public String getStartSet() {
return this.m_startRange.getRanges();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String searchTerminationTipText() {
return "Specify the number of consecutive non-improving nodes to allow " + "before terminating the search.";
}
/**
* Set the numnber of non-improving nodes to consider before terminating
* search.
*
* @param t the number of non-improving nodes
* @throws Exception if t is less than 1
*/
public void setSearchTermination(final int t) throws Exception {
if (t < 1) {
throw new Exception("Value of -N must be > 0.");
}
this.m_maxStale = t;
}
/**
* Get the termination criterion (number of non-improving nodes).
*
* @return the number of non-improving nodes
*/
public int getSearchTermination() {
return this.m_maxStale;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String directionTipText() {
return "Set the direction of the search.";
}
/**
* Set the search direction
*
* @param d the direction of the search
*/
public void setDirection(final SelectedTag d) {
if (d.getTags() == TAGS_SELECTION) {
this.m_searchDirection = d.getSelectedTag().getID();
}
}
/**
* Get the search direction
*
* @return the direction of the search
*/
public SelectedTag getDirection() {
return new SelectedTag(this.m_searchDirection, TAGS_SELECTION);
}
/**
* Gets the current settings of BestFirst.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (!(this.getStartSet().equals(""))) {
options.add("-P");
options.add("" + this.startSetToString());
}
options.add("-D");
options.add("" + this.m_searchDirection);
options.add("-N");
options.add("" + this.m_maxStale);
return options.toArray(new String[0]);
}
/**
* converts the array of starting attributes to a string. This is used by
* getOptions to return the actual attributes specified as the starting set.
* This is better than using m_startRanges.getRanges() as the same start set
* can be specified in different ways from the command line---eg 1,2,3 == 1-3.
* This is to ensure that stuff that is stored in a database is comparable.
*
* @return a comma seperated list of individual attribute numbers as a String
*/
private String startSetToString() {
StringBuffer FString = new StringBuffer();
boolean didPrint;
if (this.m_starting == null) {
return this.getStartSet();
}
for (int i = 0; i < this.m_starting.length; i++) {
didPrint = false;
if ((this.m_hasClass == false) || (this.m_hasClass == true && i != this.m_classIndex)) {
FString.append((this.m_starting[i] + 1));
didPrint = true;
}
if (i == (this.m_starting.length - 1)) {
FString.append("");
} else {
if (didPrint) {
FString.append(",");
}
}
}
return FString.toString();
}
/**
* returns a description of the search as a String
*
* @return a description of the search
*/
@Override
public String toString() {
StringBuffer BfString = new StringBuffer();
BfString.append("\tBest first.\n\tStart set: ");
if (this.m_starting == null) {
BfString.append("no attributes\n");
} else {
BfString.append(this.startSetToString() + "\n");
}
BfString.append("\tSearch direction: ");
if (this.m_searchDirection == SELECTION_BACKWARD) {
BfString.append("backward\n");
} else {
if (this.m_searchDirection == SELECTION_FORWARD) {
BfString.append("forward\n");
} else {
BfString.append("bi-directional\n");
}
}
BfString.append("\tStale search after " + this.m_maxStale + " node expansions\n");
BfString.append("\tTotal number of subsets evaluated: " + this.m_totalEvals + "\n");
BfString.append("\tMerit of best subset found: " + Utils.doubleToString(Math.abs(this.m_bestMerit), 8, 3) + "\n");
return BfString.toString();
}
protected void printGroup(final BitSet tt, final int numAttribs) {
int i;
for (i = 0; i < numAttribs; i++) {
if (tt.get(i) == true) {
System.out.print((i + 1) + " ");
}
}
System.out.println();
}
/**
* Searches the attribute subset space by best first search
*
* @param ASEval the attribute evaluator to guide the search
* @param data the training instances.
* @return an array (not necessarily ordered) of selected attribute indexes
* @throws Exception if the search can't be completed
*/
@Override
public int[] search(final ASEvaluation ASEval, final Instances data) throws Exception {
this.m_totalEvals = 0;
if (!(ASEval instanceof SubsetEvaluator)) {
throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!");
}
if (ASEval instanceof UnsupervisedSubsetEvaluator) {
this.m_hasClass = false;
} else {
this.m_hasClass = true;
this.m_classIndex = data.classIndex();
}
SubsetEvaluator ASEvaluator = (SubsetEvaluator) ASEval;
this.m_numAttribs = data.numAttributes();
int i, j;
int best_size = 0;
int size = 0;
int done;
int sd = this.m_searchDirection;
BitSet best_group, temp_group;
int stale;
double best_merit;
double merit;
boolean z;
boolean added;
Link2 tl;
Hashtable<String, Double> lookup = new Hashtable<String, Double>(this.m_cacheSize * this.m_numAttribs);
int insertCount = 0;
LinkedList2 bfList = new LinkedList2(this.m_maxStale);
best_merit = -Double.MAX_VALUE;
stale = 0;
best_group = new BitSet(this.m_numAttribs);
this.m_startRange.setUpper(this.m_numAttribs - 1);
if (!(this.getStartSet().equals(""))) {
this.m_starting = this.m_startRange.getSelection();
}
// If a starting subset has been supplied, then initialise the bitset
if (this.m_starting != null) {
for (i = 0; i < this.m_starting.length; i++) {
if ((this.m_starting[i]) != this.m_classIndex) {
best_group.set(this.m_starting[i]);
}
}
best_size = this.m_starting.length;
this.m_totalEvals++;
} else {
if (this.m_searchDirection == SELECTION_BACKWARD) {
this.setStartSet("1-last");
this.m_starting = new int[this.m_numAttribs];
// init initial subset to all attributes
for (i = 0, j = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i != this.m_classIndex) {
best_group.set(i);
this.m_starting[j++] = i;
}
}
best_size = this.m_numAttribs - 1;
this.m_totalEvals++;
}
}
// evaluate the initial subset
best_merit = ASEvaluator.evaluateSubset(best_group);
// add the initial group to the list and the hash table
Object[] best = new Object[1];
best[0] = best_group.clone();
bfList.addToList(best, best_merit);
BitSet tt = (BitSet) best_group.clone();
String hashC = tt.toString();
lookup.put(hashC, new Double(best_merit));
while (stale < this.m_maxStale) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
added = false;
if (this.m_searchDirection == SELECTION_BIDIRECTIONAL) {
// bi-directional search
done = 2;
sd = SELECTION_FORWARD;
} else {
done = 1;
}
// finished search?
if (bfList.size() == 0) {
stale = this.m_maxStale;
break;
}
// copy the attribute set at the head of the list
tl = bfList.getLinkAt(0);
temp_group = (BitSet) (tl.getData()[0]);
temp_group = (BitSet) temp_group.clone();
// remove the head of the list
bfList.removeLinkAt(0);
// count the number of bits set (attributes)
int kk;
for (kk = 0, size = 0; kk < this.m_numAttribs; kk++) {
if (temp_group.get(kk)) {
size++;
}
}
do {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
for (i = 0; i < this.m_numAttribs; i++) {
if (sd == SELECTION_FORWARD) {
z = ((i != this.m_classIndex) && (!temp_group.get(i)));
} else {
z = ((i != this.m_classIndex) && (temp_group.get(i)));
}
if (z) {
// set the bit (attribute to add/delete)
if (sd == SELECTION_FORWARD) {
temp_group.set(i);
size++;
} else {
temp_group.clear(i);
size--;
}
/*
* if this subset has been seen before, then it is already in the
* list (or has been fully expanded)
*/
tt = (BitSet) temp_group.clone();
hashC = tt.toString();
if (lookup.containsKey(hashC) == false) {
merit = ASEvaluator.evaluateSubset(temp_group);
this.m_totalEvals++;
// insert this one in the hashtable
if (insertCount > this.m_cacheSize * this.m_numAttribs) {
lookup = new Hashtable<String, Double>(this.m_cacheSize * this.m_numAttribs);
insertCount = 0;
}
hashC = tt.toString();
lookup.put(hashC, new Double(merit));
insertCount++;
} else {
merit = lookup.get(hashC).doubleValue();
}
// insert this one in the list
Object[] add = new Object[1];
add[0] = tt.clone();
bfList.addToList(add, merit);
if (this.m_debug) {
System.out.print("Group: ");
this.printGroup(tt, this.m_numAttribs);
System.out.println("Merit: " + merit);
}
// is this better than the best?
if (sd == SELECTION_FORWARD) {
z = ((merit - best_merit) > 0.00001);
} else {
if (merit == best_merit) {
z = (size < best_size);
} else {
z = (merit > best_merit);
}
}
if (z) {
added = true;
stale = 0;
best_merit = merit;
// best_size = (size + best_size);
best_size = size;
best_group = (BitSet) (temp_group.clone());
}
// unset this addition(deletion)
if (sd == SELECTION_FORWARD) {
temp_group.clear(i);
size--;
} else {
temp_group.set(i);
size++;
}
}
}
if (done == 2) {
sd = SELECTION_BACKWARD;
}
done--;
} while (done > 0);
/*
* if we haven't added a new attribute subset then full expansion of this
* node hasen't resulted in anything better
*/
if (!added) {
stale++;
}
}
this.m_bestMerit = best_merit;
return this.attributeList(best_group);
}
/**
* Reset options to default values
*/
protected void resetOptions() {
this.m_maxStale = 5;
this.m_searchDirection = SELECTION_FORWARD;
this.m_starting = null;
this.m_startRange = new Range();
this.m_classIndex = -1;
this.m_totalEvals = 0;
this.m_cacheSize = 1;
this.m_debug = false;
}
/**
* converts a BitSet into a list of attribute indexes
*
* @param group the BitSet to convert
* @return an array of attribute indexes
**/
protected int[] attributeList(final BitSet group) {
int count = 0;
// count how many were selected
for (int i = 0; i < this.m_numAttribs; i++) {
if (group.get(i)) {
count++;
}
}
int[] list = new int[count];
count = 0;
for (int i = 0; i < this.m_numAttribs; i++) {
if (group.get(i)) {
list[count++] = i;
}
}
return list;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/CfsSubsetEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CfsSubsetEval.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.BitSet;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.ContingencyTables;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.ThreadSafe;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.Discretize;
/**
* <!-- globalinfo-start --> CfsSubsetEval :<br/>
* <br/>
* Evaluates the worth of a subset of attributes by considering the individual predictive ability of
* each feature along with the degree of redundancy between them.<br/>
* <br/>
* Subsets of features that are highly correlated with the class while having low intercorrelation
* are preferred.<br/>
* <br/>
* For more information see:<br/>
* <br/>
* M. A. Hall (1998). Correlation-based Feature Subset Selection for Machine Learning. Hamilton, New
* Zealand.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @phdthesis{Hall1998,
* address = {Hamilton, New Zealand},
* author = {M. A. Hall},
* school = {University of Waikato},
* title = {Correlation-based Feature Subset Selection for Machine Learning},
* year = {1998}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* Treat missing values as a separate value.
* </pre>
*
* <pre>
* -L
* Don't include locally predictive attributes.
* </pre>
*
* <pre>
* -Z
* Precompute the full correlation matrix at the outset, rather than compute correlations lazily (as needed) during the search. Use this in conjuction with parallel processing in order to speed up a backward search.
* </pre>
*
* <pre>
* -P <int>
* The size of the thread pool, for example, the number of cores in the CPU. (default 1)
* </pre>
*
* <pre>
* -E <int>
* The number of threads to use, which should be >= size of thread pool. (default 1)
* </pre>
*
* <pre>
* -D
* Output debugging info.
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
* @see Discretize
*/
public class CfsSubsetEval extends ASEvaluation implements SubsetEvaluator, ThreadSafe, OptionHandler, TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = 747878400813276317L;
/** The training instances */
private Instances m_trainInstances;
/** Discretise attributes when class in nominal */
private Discretize m_disTransform;
/** The class index */
private int m_classIndex;
/** Is the class numeric */
private boolean m_isNumeric;
/** Number of attributes in the training data */
private int m_numAttribs;
/** Number of instances in the training data */
private int m_numInstances;
/** Treat missing values as separate values */
private boolean m_missingSeparate;
/** Include locally predictive attributes */
private boolean m_locallyPredictive;
/** Holds the matrix of attribute correlations */
// private Matrix m_corr_matrix;
private float[][] m_corr_matrix;
/** Standard deviations of attributes (when using pearsons correlation) */
private double[] m_std_devs;
/** Threshold for admitting locally predictive features */
private double m_c_Threshold;
/** Output debugging info */
protected boolean m_debug;
/** Number of entries in the correlation matrix */
protected int m_numEntries;
/** Number of correlations actually computed */
protected AtomicInteger m_numFilled;
protected boolean m_preComputeCorrelationMatrix;
/**
* The number of threads used to compute the correlation matrix. Used when correlation matrix is
* precomputed
*/
protected int m_numThreads = 1;
/**
* The size of the thread pool. Usually set equal to the number of CPUs or CPU cores available
*/
protected int m_poolSize = 1;
/** Thread pool */
protected transient ExecutorService m_pool = null;
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the explorer/experimenter gui
*/
public String globalInfo() {
return "CfsSubsetEval :\n\nEvaluates the worth of a subset of attributes " + "by considering the individual predictive ability of each feature " + "along with the degree of redundancy between them.\n\n"
+ "Subsets of features that are highly correlated with the class " + "while having low intercorrelation are preferred.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed information about the
* technical background of this class, e.g., paper reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.PHDTHESIS);
result.setValue(Field.AUTHOR, "M. A. Hall");
result.setValue(Field.YEAR, "1998");
result.setValue(Field.TITLE, "Correlation-based Feature Subset Selection for Machine Learning");
result.setValue(Field.SCHOOL, "University of Waikato");
result.setValue(Field.ADDRESS, "Hamilton, New Zealand");
return result;
}
/**
* Constructor
*/
public CfsSubsetEval() {
this.resetOptions();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<>(6);
newVector.addElement(new Option("\tTreat missing values as a separate " + "value.", "M", 0, "-M"));
newVector.addElement(new Option("\tDon't include locally predictive attributes" + ".", "L", 0, "-L"));
newVector.addElement(new Option("\t" + this.preComputeCorrelationMatrixTipText(), "Z", 0, "-Z"));
newVector.addElement(new Option("\t" + this.poolSizeTipText() + " (default 1)\n", "P", 1, "-P <int>"));
newVector.addElement(new Option("\t" + this.numThreadsTipText() + " (default 1)\n", "E", 1, "-E <int>"));
newVector.addElement(new Option("\tOutput debugging info" + ".", "D", 0, "-D"));
return newVector.elements();
}
/**
* Parses and sets a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* Treat missing values as a separate value.
* </pre>
*
* <pre>
* -L
* Don't include locally predictive attributes.
* </pre>
*
* <pre>
* -Z
* Precompute the full correlation matrix at the outset, rather than compute correlations lazily (as needed) during the search. Use this in conjuction with parallel processing in order to speed up a backward search.
* </pre>
*
* <pre>
* -P <int>
* The size of the thread pool, for example, the number of cores in the CPU. (default 1)
* </pre>
*
* <pre>
* -E <int>
* The number of threads to use, which should be >= size of thread pool. (default 1)
* </pre>
*
* <pre>
* -D
* Output debugging info.
* </pre>
*
* <!-- options-end -->
*
* @param options
* the list of options as an array of strings
* @throws Exception
* if an option is not supported
*
**/
@Override
public void setOptions(final String[] options) throws Exception {
this.resetOptions();
this.setMissingSeparate(Utils.getFlag('M', options));
this.setLocallyPredictive(!Utils.getFlag('L', options));
this.setPreComputeCorrelationMatrix(Utils.getFlag('Z', options));
String PoolSize = Utils.getOption('P', options);
if (PoolSize.length() != 0) {
this.setPoolSize(Integer.parseInt(PoolSize));
} else {
this.setPoolSize(1);
}
String NumThreads = Utils.getOption('E', options);
if (NumThreads.length() != 0) {
this.setNumThreads(Integer.parseInt(NumThreads));
} else {
this.setNumThreads(1);
}
this.setDebug(Utils.getFlag('D', options));
}
/**
* @return a string to describe the option
*/
public String preComputeCorrelationMatrixTipText() {
return "Precompute the full correlation matrix at the outset, " + "rather than compute correlations lazily (as needed) " + "during the search. Use this in conjuction with " + "parallel processing in order to speed up a backward "
+ "search.";
}
/**
* Set whether to pre-compute the full correlation matrix at the outset, rather than computing
* individual correlations lazily (as needed) during the search.
*
* @param p
* true if the correlation matrix is to be pre-computed at the outset
*/
public void setPreComputeCorrelationMatrix(final boolean p) {
this.m_preComputeCorrelationMatrix = p;
}
/**
* Get whether to pre-compute the full correlation matrix at the outset, rather than computing
* individual correlations lazily (as needed) during the search.
*
* @return true if the correlation matrix is to be pre-computed at the outset
*/
public boolean getPreComputeCorrelationMatrix() {
return this.m_preComputeCorrelationMatrix;
}
/**
* @return a string to describe the option
*/
public String numThreadsTipText() {
return "The number of threads to use, which should be >= size of thread pool.";
}
/**
* Gets the number of threads.
*/
public int getNumThreads() {
return this.m_numThreads;
}
/**
* Sets the number of threads
*/
public void setNumThreads(final int nT) {
this.m_numThreads = nT;
}
/**
* @return a string to describe the option
*/
public String poolSizeTipText() {
return "The size of the thread pool, for example, the number of cores in the CPU.";
}
/**
* Gets the number of threads.
*/
public int getPoolSize() {
return this.m_poolSize;
}
/**
* Sets the number of threads
*/
public void setPoolSize(final int nT) {
this.m_poolSize = nT;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the explorer/experimenter gui
*/
public String locallyPredictiveTipText() {
return "Identify locally predictive attributes. Iteratively adds " + "attributes with the highest correlation with the class as long " + "as there is not already an attribute in the subset that has a "
+ "higher correlation with the attribute in question";
}
/**
* Include locally predictive attributes
*
* @param b
* true or false
*/
public void setLocallyPredictive(final boolean b) {
this.m_locallyPredictive = b;
}
/**
* Return true if including locally predictive attributes
*
* @return true if locally predictive attributes are to be used
*/
public boolean getLocallyPredictive() {
return this.m_locallyPredictive;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the explorer/experimenter gui
*/
public String missingSeparateTipText() {
return "Treat missing as a separate value. Otherwise, counts for missing " + "values are distributed across other values in proportion to their " + "frequency.";
}
/**
* Treat missing as a separate value
*
* @param b
* true or false
*/
public void setMissingSeparate(final boolean b) {
this.m_missingSeparate = b;
}
/**
* Return true is missing is treated as a separate value
*
* @return true if missing is to be treated as a separate value
*/
public boolean getMissingSeparate() {
return this.m_missingSeparate;
}
/**
* Set whether to output debugging info
*
* @param d
* true if debugging info is to be output
*/
public void setDebug(final boolean d) {
this.m_debug = d;
}
/**
* Set whether to output debugging info
*
* @return true if debugging info is to be output
*/
public boolean getDebug() {
return this.m_debug;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the explorer/experimenter gui
*/
public String debugTipText() {
return "Output debugging info";
}
/**
* Gets the current settings of CfsSubsetEval
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<>();
if (this.getMissingSeparate()) {
options.add("-M");
}
if (!this.getLocallyPredictive()) {
options.add("-L");
}
if (this.getPreComputeCorrelationMatrix()) {
options.add("-Z");
}
options.add("-P");
options.add("" + this.getPoolSize());
options.add("-E");
options.add("" + this.getNumThreads());
if (this.getDebug()) {
options.add("-D");
}
return options.toArray(new String[0]);
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.NUMERIC_CLASS);
result.enable(Capability.DATE_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Generates a attribute evaluator. Has to initialize all fields of the evaluator that are not being
* set via options.
*
* CFS also discretises attributes (if necessary) and initializes the correlation matrix.
*
* @param data
* set of instances serving as training data
* @throws Exception
* if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.m_numEntries = 0;
this.m_numFilled = new AtomicInteger();
this.m_trainInstances = new Instances(data);
this.m_trainInstances.deleteWithMissingClass();
this.m_classIndex = this.m_trainInstances.classIndex();
this.m_numAttribs = this.m_trainInstances.numAttributes();
this.m_numInstances = this.m_trainInstances.numInstances();
this.m_isNumeric = this.m_trainInstances.attribute(this.m_classIndex).isNumeric();
if (!this.m_isNumeric) {
this.m_disTransform = new Discretize();
this.m_disTransform.setUseBetterEncoding(true);
this.m_disTransform.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, this.m_disTransform);
if (this.m_debug) {
System.err.println("Finished discretizing input data");
}
}
this.m_std_devs = new double[this.m_numAttribs];
this.m_corr_matrix = new float[this.m_numAttribs][];
for (int i = 0; i < this.m_numAttribs; i++) {
this.m_corr_matrix[i] = new float[i + 1];
this.m_numEntries += (i + 1);
}
this.m_numEntries -= this.m_numAttribs;
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
for (int i = 0; i < this.m_corr_matrix.length; i++) {
this.m_corr_matrix[i][i] = 1.0f;
this.m_std_devs[i] = 1.0;
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
for (int i = 0; i < this.m_numAttribs; i++) {
for (int j = 0; j < this.m_corr_matrix[i].length - 1; j++) {
this.m_corr_matrix[i][j] = -999;
}
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (this.m_preComputeCorrelationMatrix && this.m_poolSize > 1) {
this.m_pool = Executors.newFixedThreadPool(this.m_poolSize);
Set<Future<Void>> results = new HashSet<>();
int numEntriesPerThread = (this.m_numEntries + this.m_numAttribs) / this.m_numThreads;
numEntriesPerThread = numEntriesPerThread < 1 ? 1 : numEntriesPerThread;
int startRow = 0;
int startCol = 0;
int count = 0;
for (int i = 0; i < this.m_corr_matrix.length; i++) {
for (int j = 0; j < this.m_corr_matrix[i].length; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
count++;
if (count == numEntriesPerThread || (i == this.m_corr_matrix.length - 1 && j == this.m_corr_matrix[i].length - 1)) {
final int sR = startRow;
final int sC = startCol;
final int eR = i;
final int eC = j;
startRow = i;
startCol = j;
count = 0;
Future<Void> future = this.m_pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
if (CfsSubsetEval.this.m_debug) {
System.err.println("Starting correlation computation task...");
}
for (int i = sR; i <= eR; i++) {
for (int j = (i == sR ? sC : 0); j < (i == eR ? eC : CfsSubsetEval.this.m_corr_matrix[i].length); j++) {
if (CfsSubsetEval.this.m_corr_matrix[i][j] == -999) {
float corr = CfsSubsetEval.this.correlate(i, j);
CfsSubsetEval.this.m_corr_matrix[i][j] = corr;
}
}
}
if (CfsSubsetEval.this.m_debug) {
System.err.println("Percentage of correlation matrix computed: " + Utils.doubleToString(((double) CfsSubsetEval.this.m_numFilled.get() / CfsSubsetEval.this.m_numEntries * 100.0), 2) + "%");
}
return null;
}
});
results.add(future);
}
}
}
for (Future<Void> f : results) {
f.get();
}
// shut down the thread pool
this.m_pool.shutdown();
}
}
/**
* evaluates a subset of attributes
*
* @param subset
* a bitset representing the attribute subset to be evaluated
* @return the merit
* @throws Exception
* if the subset could not be evaluated
*/
@Override
public double evaluateSubset(final BitSet subset) throws Exception {
double num = 0.0;
double denom = 0.0;
float corr;
int larger, smaller;
// do numerator
for (int i = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i != this.m_classIndex) {
if (subset.get(i)) {
if (i > this.m_classIndex) {
larger = i;
smaller = this.m_classIndex;
} else {
smaller = i;
larger = this.m_classIndex;
}
/*
* int larger = (i > m_classIndex ? i : m_classIndex); int smaller = (i > m_classIndex ?
* m_classIndex : i);
*/
if (this.m_corr_matrix[larger][smaller] == -999) {
corr = this.correlate(i, this.m_classIndex);
this.m_corr_matrix[larger][smaller] = corr;
num += (this.m_std_devs[i] * corr);
} else {
num += (this.m_std_devs[i] * this.m_corr_matrix[larger][smaller]);
}
}
}
}
// do denominator
for (int i = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i != this.m_classIndex) {
if (subset.get(i)) {
denom += (1.0 * this.m_std_devs[i] * this.m_std_devs[i]);
for (int j = 0; j < this.m_corr_matrix[i].length - 1; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (subset.get(j)) {
if (this.m_corr_matrix[i][j] == -999) {
corr = this.correlate(i, j);
this.m_corr_matrix[i][j] = corr;
denom += (2.0 * this.m_std_devs[i] * this.m_std_devs[j] * corr);
} else {
denom += (2.0 * this.m_std_devs[i] * this.m_std_devs[j] * this.m_corr_matrix[i][j]);
}
}
}
}
}
}
if (denom < 0.0) {
denom *= -1.0;
}
if (denom == 0.0) {
return (0.0);
}
double merit = (num / Math.sqrt(denom));
if (merit < 0.0) {
merit *= -1.0;
}
return merit;
}
private float correlate(final int att1, final int att2) throws InterruptedException {
this.m_numFilled.addAndGet(1);
if (!this.m_isNumeric) {
return (float) this.symmUncertCorr(att1, att2);
}
boolean att1_is_num = (this.m_trainInstances.attribute(att1).isNumeric());
boolean att2_is_num = (this.m_trainInstances.attribute(att2).isNumeric());
if (att1_is_num && att2_is_num) {
return (float) this.num_num(att1, att2);
} else {
if (att2_is_num) {
return (float) this.num_nom2(att1, att2);
} else {
if (att1_is_num) {
return (float) this.num_nom2(att2, att1);
}
}
}
return (float) this.nom_nom(att1, att2);
}
private double symmUncertCorr(final int att1, final int att2) throws InterruptedException {
int i, j, ii, jj;
int ni, nj;
double sum = 0.0;
double sumi[], sumj[];
double counts[][];
Instance inst;
double corr_measure;
boolean flag = false;
double temp = 0.0;
if (att1 == this.m_classIndex || att2 == this.m_classIndex) {
flag = true;
}
ni = this.m_trainInstances.attribute(att1).numValues() + 1;
nj = this.m_trainInstances.attribute(att2).numValues() + 1;
counts = new double[ni][nj];
sumi = new double[ni];
sumj = new double[nj];
for (i = 0; i < ni; i++) {
sumi[i] = 0.0;
for (j = 0; j < nj; j++) {
sumj[j] = 0.0;
counts[i][j] = 0.0;
}
}
// Fill the contingency table
for (i = 0; i < this.m_numInstances; i++) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
inst = this.m_trainInstances.instance(i);
if (inst.isMissing(att1)) {
ii = ni - 1;
} else {
ii = (int) inst.value(att1);
}
if (inst.isMissing(att2)) {
jj = nj - 1;
} else {
jj = (int) inst.value(att2);
}
counts[ii][jj]++;
}
// get the row totals
for (i = 0; i < ni; i++) {
sumi[i] = 0.0;
for (j = 0; j < nj; j++) {
sumi[i] += counts[i][j];
sum += counts[i][j];
}
}
// get the column totals
for (j = 0; j < nj; j++) {
sumj[j] = 0.0;
for (i = 0; i < ni; i++) {
sumj[j] += counts[i][j];
}
}
// distribute missing counts
if (!this.m_missingSeparate && (sumi[ni - 1] < this.m_numInstances) && (sumj[nj - 1] < this.m_numInstances)) {
double[] i_copy = new double[sumi.length];
double[] j_copy = new double[sumj.length];
double[][] counts_copy = new double[sumi.length][sumj.length];
for (i = 0; i < ni; i++) {
System.arraycopy(counts[i], 0, counts_copy[i], 0, sumj.length);
}
System.arraycopy(sumi, 0, i_copy, 0, sumi.length);
System.arraycopy(sumj, 0, j_copy, 0, sumj.length);
double total_missing = (sumi[ni - 1] + sumj[nj - 1] - counts[ni - 1][nj - 1]);
// do the missing i's
if (sumi[ni - 1] > 0.0) {
for (j = 0; j < nj - 1; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (counts[ni - 1][j] > 0.0) {
for (i = 0; i < ni - 1; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
temp = ((i_copy[i] / (sum - i_copy[ni - 1])) * counts[ni - 1][j]);
counts[i][j] += temp;
sumi[i] += temp;
}
counts[ni - 1][j] = 0.0;
}
}
}
sumi[ni - 1] = 0.0;
// do the missing j's
if (sumj[nj - 1] > 0.0) {
for (i = 0; i < ni - 1; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (counts[i][nj - 1] > 0.0) {
for (j = 0; j < nj - 1; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
temp = ((j_copy[j] / (sum - j_copy[nj - 1])) * counts[i][nj - 1]);
counts[i][j] += temp;
sumj[j] += temp;
}
counts[i][nj - 1] = 0.0;
}
}
}
sumj[nj - 1] = 0.0;
// do the both missing
if (counts[ni - 1][nj - 1] > 0.0 && total_missing != sum) {
for (i = 0; i < ni - 1; i++) {
for (j = 0; j < nj - 1; j++) {
temp = (counts_copy[i][j] / (sum - total_missing)) * counts_copy[ni - 1][nj - 1];
counts[i][j] += temp;
sumi[i] += temp;
sumj[j] += temp;
}
}
counts[ni - 1][nj - 1] = 0.0;
}
}
corr_measure = ContingencyTables.symmetricalUncertainty(counts);
if (Utils.eq(corr_measure, 0.0)) {
if (flag == true) {
return (0.0);
} else {
return (1.0);
}
} else {
return (corr_measure);
}
}
private double num_num(final int att1, final int att2) throws InterruptedException {
int i;
Instance inst;
double r, diff1, diff2, num = 0.0, sx = 0.0, sy = 0.0;
double mx = this.m_trainInstances.meanOrMode(this.m_trainInstances.attribute(att1));
double my = this.m_trainInstances.meanOrMode(this.m_trainInstances.attribute(att2));
for (i = 0; i < this.m_numInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
inst = this.m_trainInstances.instance(i);
diff1 = (inst.isMissing(att1)) ? 0.0 : (inst.value(att1) - mx);
diff2 = (inst.isMissing(att2)) ? 0.0 : (inst.value(att2) - my);
num += (diff1 * diff2);
sx += (diff1 * diff1);
sy += (diff2 * diff2);
}
if (sx != 0.0) {
if (this.m_std_devs[att1] == 1.0) {
this.m_std_devs[att1] = Math.sqrt((sx / this.m_numInstances));
}
}
if (sy != 0.0) {
if (this.m_std_devs[att2] == 1.0) {
this.m_std_devs[att2] = Math.sqrt((sy / this.m_numInstances));
}
}
if ((sx * sy) > 0.0) {
r = (num / (Math.sqrt(sx * sy)));
return ((r < 0.0) ? -r : r);
} else {
if (att1 != this.m_classIndex && att2 != this.m_classIndex) {
return 1.0;
} else {
return 0.0;
}
}
}
private double num_nom2(final int att1, final int att2) throws InterruptedException {
int i, ii, k;
double temp;
Instance inst;
int mx = (int) this.m_trainInstances.meanOrMode(this.m_trainInstances.attribute(att1));
double my = this.m_trainInstances.meanOrMode(this.m_trainInstances.attribute(att2));
double stdv_num = 0.0;
double diff1, diff2;
double r = 0.0, rr;
int nx = (!this.m_missingSeparate) ? this.m_trainInstances.attribute(att1).numValues() : this.m_trainInstances.attribute(att1).numValues() + 1;
double[] prior_nom = new double[nx];
double[] stdvs_nom = new double[nx];
double[] covs = new double[nx];
for (i = 0; i < nx; i++) {
stdvs_nom[i] = covs[i] = prior_nom[i] = 0.0;
}
// calculate frequencies (and means) of the values of the nominal
// attribute
for (i = 0; i < this.m_numInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
inst = this.m_trainInstances.instance(i);
if (inst.isMissing(att1)) {
if (!this.m_missingSeparate) {
ii = mx;
} else {
ii = nx - 1;
}
} else {
ii = (int) inst.value(att1);
}
// increment freq for nominal
prior_nom[ii]++;
}
for (k = 0; k < this.m_numInstances; k++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
inst = this.m_trainInstances.instance(k);
// std dev of numeric attribute
diff2 = (inst.isMissing(att2)) ? 0.0 : (inst.value(att2) - my);
stdv_num += (diff2 * diff2);
//
for (i = 0; i < nx; i++) {
if (inst.isMissing(att1)) {
if (!this.m_missingSeparate) {
temp = (i == mx) ? 1.0 : 0.0;
} else {
temp = (i == (nx - 1)) ? 1.0 : 0.0;
}
} else {
temp = (i == inst.value(att1)) ? 1.0 : 0.0;
}
diff1 = (temp - (prior_nom[i] / this.m_numInstances));
stdvs_nom[i] += (diff1 * diff1);
covs[i] += (diff1 * diff2);
}
}
// calculate weighted correlation
for (i = 0, temp = 0.0; i < nx; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// calculate the weighted variance of the nominal
temp += ((prior_nom[i] / this.m_numInstances) * (stdvs_nom[i] / this.m_numInstances));
if ((stdvs_nom[i] * stdv_num) > 0.0) {
// System.out.println("Stdv :"+stdvs_nom[i]);
rr = (covs[i] / (Math.sqrt(stdvs_nom[i] * stdv_num)));
if (rr < 0.0) {
rr = -rr;
}
r += ((prior_nom[i] / this.m_numInstances) * rr);
}
/*
* if there is zero variance for the numeric att at a specific level of the catergorical att then if
* neither is the class then make this correlation at this level maximally bad i.e. 1.0. If either
* is the class then maximally bad correlation is 0.0
*/
else {
if (att1 != this.m_classIndex && att2 != this.m_classIndex) {
r += ((prior_nom[i] / this.m_numInstances) * 1.0);
}
}
}
// set the standard deviations for these attributes if necessary
// if ((att1 != classIndex) && (att2 != classIndex)) // =============
if (temp != 0.0) {
if (this.m_std_devs[att1] == 1.0) {
this.m_std_devs[att1] = Math.sqrt(temp);
}
}
if (stdv_num != 0.0) {
if (this.m_std_devs[att2] == 1.0) {
this.m_std_devs[att2] = Math.sqrt((stdv_num / this.m_numInstances));
}
}
if (r == 0.0) {
if (att1 != this.m_classIndex && att2 != this.m_classIndex) {
r = 1.0;
}
}
return r;
}
private double nom_nom(final int att1, final int att2) throws InterruptedException {
int i, j, ii, jj, z;
double temp1, temp2;
Instance inst;
int mx = (int) this.m_trainInstances.meanOrMode(this.m_trainInstances.attribute(att1));
int my = (int) this.m_trainInstances.meanOrMode(this.m_trainInstances.attribute(att2));
double diff1, diff2;
double r = 0.0, rr;
int nx = (!this.m_missingSeparate) ? this.m_trainInstances.attribute(att1).numValues() : this.m_trainInstances.attribute(att1).numValues() + 1;
int ny = (!this.m_missingSeparate) ? this.m_trainInstances.attribute(att2).numValues() : this.m_trainInstances.attribute(att2).numValues() + 1;
double[][] prior_nom = new double[nx][ny];
double[] sumx = new double[nx];
double[] sumy = new double[ny];
double[] stdvsx = new double[nx];
double[] stdvsy = new double[ny];
double[][] covs = new double[nx][ny];
for (i = 0; i < nx; i++) {
sumx[i] = stdvsx[i] = 0.0;
}
for (j = 0; j < ny; j++) {
sumy[j] = stdvsy[j] = 0.0;
}
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
covs[i][j] = prior_nom[i][j] = 0.0;
}
}
// calculate frequencies (and means) of the values of the nominal
// attribute
for (i = 0; i < this.m_numInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
inst = this.m_trainInstances.instance(i);
if (inst.isMissing(att1)) {
if (!this.m_missingSeparate) {
ii = mx;
} else {
ii = nx - 1;
}
} else {
ii = (int) inst.value(att1);
}
if (inst.isMissing(att2)) {
if (!this.m_missingSeparate) {
jj = my;
} else {
jj = ny - 1;
}
} else {
jj = (int) inst.value(att2);
}
// increment freq for nominal
prior_nom[ii][jj]++;
sumx[ii]++;
sumy[jj]++;
}
for (z = 0; z < this.m_numInstances; z++) {
inst = this.m_trainInstances.instance(z);
for (j = 0; j < ny; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (inst.isMissing(att2)) {
if (!this.m_missingSeparate) {
temp2 = (j == my) ? 1.0 : 0.0;
} else {
temp2 = (j == (ny - 1)) ? 1.0 : 0.0;
}
} else {
temp2 = (j == inst.value(att2)) ? 1.0 : 0.0;
}
diff2 = (temp2 - (sumy[j] / this.m_numInstances));
stdvsy[j] += (diff2 * diff2);
}
//
for (i = 0; i < nx; i++) {
if (inst.isMissing(att1)) {
if (!this.m_missingSeparate) {
temp1 = (i == mx) ? 1.0 : 0.0;
} else {
temp1 = (i == (nx - 1)) ? 1.0 : 0.0;
}
} else {
temp1 = (i == inst.value(att1)) ? 1.0 : 0.0;
}
diff1 = (temp1 - (sumx[i] / this.m_numInstances));
stdvsx[i] += (diff1 * diff1);
for (j = 0; j < ny; j++) {
if (inst.isMissing(att2)) {
if (!this.m_missingSeparate) {
temp2 = (j == my) ? 1.0 : 0.0;
} else {
temp2 = (j == (ny - 1)) ? 1.0 : 0.0;
}
} else {
temp2 = (j == inst.value(att2)) ? 1.0 : 0.0;
}
diff2 = (temp2 - (sumy[j] / this.m_numInstances));
covs[i][j] += (diff1 * diff2);
}
}
}
// calculate weighted correlation
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if ((stdvsx[i] * stdvsy[j]) > 0.0) {
// System.out.println("Stdv :"+stdvs_nom[i]);
rr = (covs[i][j] / (Math.sqrt(stdvsx[i] * stdvsy[j])));
if (rr < 0.0) {
rr = -rr;
}
r += ((prior_nom[i][j] / this.m_numInstances) * rr);
}
// if there is zero variance for either of the categorical atts then if
// neither is the class then make this
// correlation at this level maximally bad i.e. 1.0. If either is
// the class then maximally bad correlation is 0.0
else {
if (att1 != this.m_classIndex && att2 != this.m_classIndex) {
r += ((prior_nom[i][j] / this.m_numInstances) * 1.0);
}
}
}
}
// calculate weighted standard deviations for these attributes
// (if necessary)
for (i = 0, temp1 = 0.0; i < nx; i++) {
temp1 += ((sumx[i] / this.m_numInstances) * (stdvsx[i] / this.m_numInstances));
}
if (temp1 != 0.0) {
if (this.m_std_devs[att1] == 1.0) {
this.m_std_devs[att1] = Math.sqrt(temp1);
}
}
for (j = 0, temp2 = 0.0; j < ny; j++) {
temp2 += ((sumy[j] / this.m_numInstances) * (stdvsy[j] / this.m_numInstances));
}
if (temp2 != 0.0) {
if (this.m_std_devs[att2] == 1.0) {
this.m_std_devs[att2] = Math.sqrt(temp2);
}
}
if (r == 0.0) {
if (att1 != this.m_classIndex && att2 != this.m_classIndex) {
r = 1.0;
}
}
return r;
}
/**
* returns a string describing CFS
*
* @return the description as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_trainInstances == null) {
text.append("CFS subset evaluator has not been built yet\n");
} else {
text.append("\tCFS Subset Evaluator\n");
if (this.m_missingSeparate) {
text.append("\tTreating missing values as a separate value\n");
}
if (this.m_locallyPredictive) {
text.append("\tIncluding locally predictive attributes\n");
}
}
return text.toString();
}
private void addLocallyPredictive(final BitSet best_group) throws InterruptedException {
int i, j;
boolean done = false;
boolean ok = true;
double temp_best = -1.0;
float corr;
j = 0;
BitSet temp_group = (BitSet) best_group.clone();
int larger, smaller;
while (!done) {
temp_best = -1.0;
// find best not already in group
for (i = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i > this.m_classIndex) {
larger = i;
smaller = this.m_classIndex;
} else {
smaller = i;
larger = this.m_classIndex;
}
/*
* int larger = (i > m_classIndex ? i : m_classIndex); int smaller = (i > m_classIndex ?
* m_classIndex : i);
*/
if ((!temp_group.get(i)) && (i != this.m_classIndex)) {
if (this.m_corr_matrix[larger][smaller] == -999) {
corr = this.correlate(i, this.m_classIndex);
this.m_corr_matrix[larger][smaller] = corr;
}
if (this.m_corr_matrix[larger][smaller] > temp_best) {
temp_best = this.m_corr_matrix[larger][smaller];
j = i;
}
}
}
if (temp_best == -1.0) {
done = true;
} else {
ok = true;
temp_group.set(j);
// check the best against correlations with others already
// in group
for (i = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i > j) {
larger = i;
smaller = j;
} else {
larger = j;
smaller = i;
}
/*
* int larger = (i > j ? i : j); int smaller = (i > j ? j : i);
*/
if (best_group.get(i)) {
if (this.m_corr_matrix[larger][smaller] == -999) {
corr = this.correlate(i, j);
this.m_corr_matrix[larger][smaller] = corr;
}
if (this.m_corr_matrix[larger][smaller] > temp_best - this.m_c_Threshold) {
ok = false;
break;
}
}
}
// if ok then add to best_group
if (ok) {
best_group.set(j);
}
}
}
}
/**
* Calls locallyPredictive in order to include locally predictive attributes (if requested).
*
* @param attributeSet
* the set of attributes found by the search
* @return a possibly ranked list of postprocessed attributes
* @throws Exception
* if postprocessing fails for some reason
*/
@Override
public int[] postProcess(final int[] attributeSet) throws Exception {
if (this.m_debug) {
System.err.println("Percentage of correlation matrix computed " + "over the search: " + Utils.doubleToString(((double) this.m_numFilled.get() / this.m_numEntries * 100.0), 2) + "%");
}
int j = 0;
if (!this.m_locallyPredictive) {
return attributeSet;
}
BitSet bestGroup = new BitSet(this.m_numAttribs);
for (int element : attributeSet) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
bestGroup.set(element);
}
this.addLocallyPredictive(bestGroup);
// count how many are set
for (int i = 0; i < this.m_numAttribs; i++) {
if (bestGroup.get(i)) {
j++;
}
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
int[] newSet = new int[j];
j = 0;
for (int i = 0; i < this.m_numAttribs; i++) {
if (bestGroup.get(i)) {
newSet[j++] = i;
}
}
return newSet;
}
@Override
public void clean() {
if (this.m_trainInstances != null) {
// save memory
this.m_trainInstances = new Instances(this.m_trainInstances, 0);
}
}
protected void resetOptions() {
this.m_trainInstances = null;
this.m_missingSeparate = false;
this.m_locallyPredictive = true;
this.m_c_Threshold = 0.0;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args
* the options
*/
public static void main(final String[] args) {
runEvaluator(new CfsSubsetEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/CheckAttributeSelection.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckAttributeSelection.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.CheckScheme;
import weka.core.Instances;
import weka.core.MultiInstanceCapabilitiesHandler;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializationHelper;
import weka.core.SerializedObject;
import weka.core.TestInstances;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
/**
* Class for examining the capabilities and finding problems with attribute
* selection schemes. If you implement an attribute selection using the
* WEKA.libraries, you should run the checks on it to ensure robustness and
* correct operation. Passing all the tests of this object does not mean bugs in
* the attribute selection don't exist, but this will help find some common
* ones.
* <p/>
*
* Typical usage:
* <p/>
* <code>java weka.attributeSelection.CheckAttributeSelection -W ASscheme_name
* -- ASscheme_options </code>
* <p/>
*
* CheckAttributeSelection reports on the following:
* <ul>
* <li>Scheme abilities
* <ul>
* <li>Possible command line options to the scheme</li>
* <li>Whether the scheme can predict nominal, numeric, string, date or
* relational class attributes.</li>
* <li>Whether the scheme can handle numeric predictor attributes</li>
* <li>Whether the scheme can handle nominal predictor attributes</li>
* <li>Whether the scheme can handle string predictor attributes</li>
* <li>Whether the scheme can handle date predictor attributes</li>
* <li>Whether the scheme can handle relational predictor attributes</li>
* <li>Whether the scheme can handle multi-instance data</li>
* <li>Whether the scheme can handle missing predictor values</li>
* <li>Whether the scheme can handle missing class values</li>
* <li>Whether a nominal scheme only handles 2 class problems</li>
* <li>Whether the scheme can handle instance weights</li>
* </ul>
* </li>
* <li>Correct functioning
* <ul>
* <li>Correct initialisation during search (i.e. no result changes when search
* is performed repeatedly)</li>
* <li>Whether the scheme alters the data pased to it (number of instances,
* instance order, instance weights, etc)</li>
* </ul>
* </li>
* <li>Degenerate cases
* <ul>
* <li>building scheme with zero instances</li>
* <li>all but one predictor attribute values missing</li>
* <li>all predictor attribute values missing</li>
* <li>all but one class values missing</li>
* <li>all class values missing</li>
* </ul>
* </li>
* </ul>
* Running CheckAttributeSelection with the debug option set will output the
* training dataset for any failed tests.
* <p/>
*
* The <code>weka.attributeSelection.AbstractAttributeSelectionTest</code> uses
* this class to test all the schemes. Any changes here, have to be checked in
* that abstract test class, too.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 20).
* </pre>
*
* <pre>
* -nominal <num>
* The number of nominal attributes (default 2).
* </pre>
*
* <pre>
* -nominal-values <num>
* The number of values for nominal attributes (default 1).
* </pre>
*
* <pre>
* -numeric <num>
* The number of numeric attributes (default 1).
* </pre>
*
* <pre>
* -string <num>
* The number of string attributes (default 1).
* </pre>
*
* <pre>
* -date <num>
* The number of date attributes (default 1).
* </pre>
*
* <pre>
* -relational <num>
* The number of relational attributes (default 1).
* </pre>
*
* <pre>
* -num-instances-relational <num>
* The number of instances in relational/bag attributes (default 10).
* </pre>
*
* <pre>
* -words <comma-separated-list>
* The words to use in string attributes.
* </pre>
*
* <pre>
* -word-separators <chars>
* The word separators to use in string attributes.
* </pre>
*
* <pre>
* -eval name [options]
* Full name and options of the evaluator analyzed.
* eg: weka.attributeSelection.CfsSubsetEval
* </pre>
*
* <pre>
* -search name [options]
* Full name and options of the search method analyzed.
* eg: weka.attributeSelection.Ranker
* </pre>
*
* <pre>
* -test <eval|search>
* The scheme to test, either the evaluator or the search method.
* (Default: eval)
* </pre>
*
* <pre>
* Options specific to evaluator weka.attributeSelection.CfsSubsetEval:
* </pre>
*
* <pre>
* -M
* Treat missing values as a seperate value.
* </pre>
*
* <pre>
* -L
* Don't include locally predictive attributes.
* </pre>
*
* <pre>
* Options specific to search method weka.attributeSelection.Ranker:
* </pre>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* Any starting attributes specified are
* ignored during the ranking.
* </pre>
*
* <pre>
* -T <threshold>
* Specify a theshold by which attributes
* may be discarded from the ranking.
* </pre>
*
* <pre>
* -N <num to select>
* Specify number of attributes to select
* </pre>
*
* <!-- options-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see TestInstances
*/
public class CheckAttributeSelection extends CheckScheme {
/*
* Note about test methods: - methods return array of booleans - first index:
* success or not - second index: acceptable or not (e.g., Exception is OK)
*
* FracPete (fracpete at waikato dot ac dot nz)
*/
/*** The evaluator to be examined */
protected ASEvaluation m_Evaluator = new CfsSubsetEval();
/*** The search method to be used */
protected ASSearch m_Search = new Ranker();
/** whether to test the evaluator (default) or the search method */
protected boolean m_TestEvaluator = true;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.add(new Option(
"\tFull name and options of the evaluator analyzed.\n"
+ "\teg: weka.attributeSelection.CfsSubsetEval", "eval", 1,
"-eval name [options]"));
result.add(new Option(
"\tFull name and options of the search method analyzed.\n"
+ "\teg: weka.attributeSelection.Ranker", "search", 1,
"-search name [options]"));
result.add(new Option(
"\tThe scheme to test, either the evaluator or the search method.\n"
+ "\t(Default: eval)", "test", 1, "-test <eval|search>"));
result.addAll(Collections.list(super.listOptions()));
if ((m_Evaluator != null) && (m_Evaluator instanceof OptionHandler)) {
result.add(new Option("", "", 0, "\nOptions specific to evaluator "
+ m_Evaluator.getClass().getName() + ":"));
result.addAll(Collections.list(((OptionHandler) m_Evaluator)
.listOptions()));
}
if ((m_Search != null) && (m_Search instanceof OptionHandler)) {
result.add(new Option("", "", 0, "\nOptions specific to search method "
+ m_Search.getClass().getName() + ":"));
result.addAll(Collections.list(((OptionHandler) m_Search).listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 20).
* </pre>
*
* <pre>
* -nominal <num>
* The number of nominal attributes (default 2).
* </pre>
*
* <pre>
* -nominal-values <num>
* The number of values for nominal attributes (default 1).
* </pre>
*
* <pre>
* -numeric <num>
* The number of numeric attributes (default 1).
* </pre>
*
* <pre>
* -string <num>
* The number of string attributes (default 1).
* </pre>
*
* <pre>
* -date <num>
* The number of date attributes (default 1).
* </pre>
*
* <pre>
* -relational <num>
* The number of relational attributes (default 1).
* </pre>
*
* <pre>
* -num-instances-relational <num>
* The number of instances in relational/bag attributes (default 10).
* </pre>
*
* <pre>
* -words <comma-separated-list>
* The words to use in string attributes.
* </pre>
*
* <pre>
* -word-separators <chars>
* The word separators to use in string attributes.
* </pre>
*
* <pre>
* -eval name [options]
* Full name and options of the evaluator analyzed.
* eg: weka.attributeSelection.CfsSubsetEval
* </pre>
*
* <pre>
* -search name [options]
* Full name and options of the search method analyzed.
* eg: weka.attributeSelection.Ranker
* </pre>
*
* <pre>
* -test <eval|search>
* The scheme to test, either the evaluator or the search method.
* (Default: eval)
* </pre>
*
* <pre>
* Options specific to evaluator weka.attributeSelection.CfsSubsetEval:
* </pre>
*
* <pre>
* -M
* Treat missing values as a seperate value.
* </pre>
*
* <pre>
* -L
* Don't include locally predictive attributes.
* </pre>
*
* <pre>
* Options specific to search method weka.attributeSelection.Ranker:
* </pre>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* Any starting attributes specified are
* ignored during the ranking.
* </pre>
*
* <pre>
* -T <threshold>
* Specify a theshold by which attributes
* may be discarded from the ranking.
* </pre>
*
* <pre>
* -N <num to select>
* Specify number of attributes to select
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
String[] tmpOptions;
super.setOptions(options);
tmpStr = Utils.getOption("eval", options);
tmpOptions = Utils.splitOptions(tmpStr);
if (tmpOptions.length != 0) {
tmpStr = tmpOptions[0];
tmpOptions[0] = "";
setEvaluator((ASEvaluation) forName("weka.attributeSelection",
ASEvaluation.class, tmpStr, tmpOptions));
}
tmpStr = Utils.getOption("search", options);
tmpOptions = Utils.splitOptions(tmpStr);
if (tmpOptions.length != 0) {
tmpStr = tmpOptions[0];
tmpOptions[0] = "";
setSearch((ASSearch) forName("weka.attributeSelection", ASSearch.class,
tmpStr, tmpOptions));
}
tmpStr = Utils.getOption("test", options);
setTestEvaluator(!tmpStr.equalsIgnoreCase("search"));
}
/**
* Gets the current settings of the CheckAttributeSelection.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-eval");
if (getEvaluator() instanceof OptionHandler) {
result.add(getEvaluator().getClass().getName() + " "
+ Utils.joinOptions(((OptionHandler) getEvaluator()).getOptions()));
} else {
result.add(getEvaluator().getClass().getName());
}
result.add("-search");
if (getSearch() instanceof OptionHandler) {
result.add(getSearch().getClass().getName() + " "
+ Utils.joinOptions(((OptionHandler) getSearch()).getOptions()));
} else {
result.add(getSearch().getClass().getName());
}
result.add("-test");
if (getTestEvaluator()) {
result.add("eval");
} else {
result.add("search");
}
return result.toArray(new String[result.size()]);
}
/**
* Begin the tests, reporting results to System.out
*/
@Override
public void doTests() {
if (getTestObject() == null) {
println("\n=== No scheme set ===");
return;
}
println("\n=== Check on scheme: " + getTestObject().getClass().getName()
+ " ===\n");
// Start tests
m_ClasspathProblems = false;
println("--> Checking for interfaces");
canTakeOptions();
boolean weightedInstancesHandler = weightedInstancesHandler()[0];
boolean multiInstanceHandler = multiInstanceHandler()[0];
println("--> Scheme tests");
declaresSerialVersionUID();
testsPerClassType(Attribute.NOMINAL, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.NUMERIC, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.DATE, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.STRING, weightedInstancesHandler,
multiInstanceHandler);
testsPerClassType(Attribute.RELATIONAL, weightedInstancesHandler,
multiInstanceHandler);
}
/**
* Set the evaluator to test.
*
* @param value the evaluator to use.
*/
public void setEvaluator(ASEvaluation value) {
m_Evaluator = value;
}
/**
* Get the current evaluator
*
* @return the current evaluator
*/
public ASEvaluation getEvaluator() {
return m_Evaluator;
}
/**
* Set the search method to test.
*
* @param value the search method to use.
*/
public void setSearch(ASSearch value) {
m_Search = value;
}
/**
* Get the current search method
*
* @return the current search method
*/
public ASSearch getSearch() {
return m_Search;
}
/**
* Sets whether the evaluator or the search method is being tested.
*
* @param value if true then the evaluator will be tested
*/
public void setTestEvaluator(boolean value) {
m_TestEvaluator = value;
}
/**
* Gets whether the evaluator is being tested or the search method.
*
* @return true if the evaluator is being tested
*/
public boolean getTestEvaluator() {
return m_TestEvaluator;
}
/**
* returns either the evaluator or the search method.
*
* @return the object to be tested
* @see #m_TestEvaluator
*/
protected Object getTestObject() {
if (getTestEvaluator()) {
return getEvaluator();
} else {
return getSearch();
}
}
/**
* returns deep copies of the given object
*
* @param obj the object to copy
* @param num the number of copies
* @return the deep copies
* @throws Exception if copying fails
*/
protected Object[] makeCopies(Object obj, int num) throws Exception {
if (obj == null) {
throw new Exception("No object set");
}
Object[] objs = new Object[num];
SerializedObject so = new SerializedObject(obj);
for (int i = 0; i < objs.length; i++) {
objs[i] = so.getObject();
}
return objs;
}
/**
* Performs a attribute selection with the given search and evaluation scheme
* on the provided data. The generated AttributeSelection object is returned.
*
* @param search the search scheme to use
* @param eval the evaluator to use
* @param data the data to work on
* @return the used attribute selection object
* @throws Exception if the attribute selection fails
*/
protected AttributeSelection search(ASSearch search, ASEvaluation eval,
Instances data) throws Exception {
AttributeSelection result;
result = new AttributeSelection();
result.setSeed(42);
result.setSearch(search);
result.setEvaluator(eval);
result.SelectAttributes(data);
return result;
}
/**
* Run a battery of tests for a given class attribute type
*
* @param classType true if the class attribute should be numeric
* @param weighted true if the scheme says it handles weights
* @param multiInstance true if the scheme handles multi-instance data
*/
protected void testsPerClassType(int classType, boolean weighted,
boolean multiInstance) {
boolean PNom = canPredict(true, false, false, false, false, multiInstance,
classType)[0];
boolean PNum = canPredict(false, true, false, false, false, multiInstance,
classType)[0];
boolean PStr = canPredict(false, false, true, false, false, multiInstance,
classType)[0];
boolean PDat = canPredict(false, false, false, true, false, multiInstance,
classType)[0];
boolean PRel;
if (!multiInstance) {
PRel = canPredict(false, false, false, false, true, multiInstance,
classType)[0];
} else {
PRel = false;
}
if (PNom || PNum || PStr || PDat || PRel) {
if (weighted) {
instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType);
}
if (classType == Attribute.NOMINAL) {
canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4);
}
if (!multiInstance) {
canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel,
multiInstance, classType, 0);
canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel,
multiInstance, classType, 1);
}
canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType);
boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr,
PDat, PRel, multiInstance, classType, true, false, 20)[0];
if (handleMissingPredictors) {
canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType, true, false, 100);
}
boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat,
PRel, multiInstance, classType, false, true, 20)[0];
if (handleMissingClass) {
canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType, false, true, 100);
}
correctSearchInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance,
classType);
datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType,
handleMissingPredictors, handleMissingClass);
}
}
/**
* Checks whether the scheme can take command line options.
*
* @return index 0 is true if the scheme can take options
*/
protected boolean[] canTakeOptions() {
boolean[] result = new boolean[2];
print("options...");
if (getTestObject() instanceof OptionHandler) {
println("yes");
if (m_Debug) {
println("\n=== Full report ===");
Enumeration<Option> enu = ((OptionHandler) getTestObject())
.listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
print(option.synopsis() + "\n" + option.description() + "\n");
}
println("\n");
}
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme says it can handle instance weights.
*
* @return true if the scheme handles instance weights
*/
protected boolean[] weightedInstancesHandler() {
boolean[] result = new boolean[2];
print("weighted instances scheme...");
if (getTestObject() instanceof WeightedInstancesHandler) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme handles multi-instance data.
*
* @return true if the scheme handles multi-instance data
*/
protected boolean[] multiInstanceHandler() {
boolean[] result = new boolean[2];
print("multi-instance scheme...");
if (getTestObject() instanceof MultiInstanceCapabilitiesHandler) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* tests for a serialVersionUID. Fails in case the schemes don't declare a UID
* (both must!).
*
* @return index 0 is true if the scheme declares a UID
*/
protected boolean[] declaresSerialVersionUID() {
boolean[] result = new boolean[2];
boolean eval;
boolean search;
print("serialVersionUID...");
eval = !SerializationHelper.needsUID(m_Evaluator.getClass());
search = !SerializationHelper.needsUID(m_Search.getClass());
result[0] = eval && search;
if (result[0]) {
println("yes");
} else {
println("no");
}
return result;
}
/**
* Checks basic prediction of the scheme, for simple non-troublesome datasets.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NOMINAL, NUMERIC, etc.)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canPredict(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
print("basic predict");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("unary");
accepts.add("binary");
accepts.add("nominal");
accepts.add("numeric");
accepts.add("string");
accepts.add("date");
accepts.add("relational");
accepts.add("multi-instance");
accepts.add("not in classpath");
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether nominal schemes can handle more than two classes. If a
* scheme is only designed for two-class problems it should throw an
* appropriate exception for multi-class problems.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param numClasses the number of classes to test
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleNClasses(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int numClasses) {
print("more than two class problems");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("number");
accepts.add("class");
int numTrain = getNumInstances(), missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the scheme can handle class attributes as Nth attribute.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the index of the class attribute (0-based, -1 means last
* attribute)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
* @see TestInstances#CLASS_IS_LAST
*/
protected boolean[] canHandleClassAsNthAttribute(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
int classIndex) {
if (classIndex == TestInstances.CLASS_IS_LAST) {
print("class attribute as last attribute");
} else {
print("class attribute as " + (classIndex + 1) + ". attribute");
}
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType, classIndex,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the scheme can handle zero training instances.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleZeroTraining(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
print("handle zero training instances");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("train");
accepts.add("value");
int numTrain = 0, numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the scheme correctly initialises models when ASSearch.search
* is called. This test calls search with one training dataset. ASSearch is
* then called on a training set with different structure, and then again with
* the original training set. If the equals method of the ASEvaluation class
* returns false, this is noted as incorrect search initialisation.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 is true if the test was passed, index 1 is always false
*/
protected boolean[] correctSearchInitialisation(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
boolean[] result = new boolean[2];
print("correct initialisation during search");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
Instances train1 = null;
Instances train2 = null;
ASSearch search = null;
ASEvaluation evaluation1A = null;
ASEvaluation evaluation1B = null;
ASEvaluation evaluation2 = null;
AttributeSelection attsel1A = null;
AttributeSelection attsel1B = null;
int stage = 0;
try {
// Make two train sets with different numbers of attributes
train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
multiInstance);
train2 = makeTestDataset(84, numTrain,
nominalPredictor ? getNumNominal() + 1 : 0,
numericPredictor ? getNumNumeric() + 1 : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
multiInstance);
if (missingLevel > 0) {
addMissing(train1, missingLevel, predictorMissing, classMissing);
addMissing(train2, missingLevel, predictorMissing, classMissing);
}
search = ASSearch.makeCopies(getSearch(), 1)[0];
evaluation1A = ASEvaluation.makeCopies(getEvaluator(), 1)[0];
evaluation1B = ASEvaluation.makeCopies(getEvaluator(), 1)[0];
evaluation2 = ASEvaluation.makeCopies(getEvaluator(), 1)[0];
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
stage = 0;
attsel1A = search(search, evaluation1A, train1);
stage = 1;
search(search, evaluation2, train2);
stage = 2;
attsel1B = search(search, evaluation1B, train1);
stage = 3;
if (!attsel1A.toResultsString().equals(attsel1B.toResultsString())) {
if (m_Debug) {
println("\n=== Full report ===\n" + "\nFirst search\n"
+ attsel1A.toResultsString() + "\n\n");
println("\nSecond search\n" + attsel1B.toResultsString() + "\n\n");
}
throw new Exception("Results differ between search calls");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during training");
switch (stage) {
case 0:
print(" of dataset 1");
break;
case 1:
print(" of dataset 2");
break;
case 2:
print(" of dataset 1 (2nd build)");
break;
case 3:
print(", comparing results from builds of dataset 1");
break;
}
println(": " + ex.getMessage() + "\n");
println("here are the datasets:\n");
println("=== Train1 Dataset ===\n" + train1.toString() + "\n");
println("=== Train2 Dataset ===\n" + train2.toString() + "\n");
}
}
return result;
}
/**
* Checks basic missing value handling of the scheme. If the missing values
* cause an exception to be thrown by the scheme, this will be recorded.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param predictorMissing true if the missing values may be in the predictors
* @param classMissing true if the missing values may be in the class
* @param missingLevel the percentage of missing values
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleMissing(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
boolean predictorMissing, boolean classMissing, int missingLevel) {
if (missingLevel == 100) {
print("100% ");
}
print("missing");
if (predictorMissing) {
print(" predictor");
if (classMissing) {
print(" and");
}
}
if (classMissing) {
print(" class");
}
print(" values");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("missing");
accepts.add("value");
accepts.add("train");
accepts.add("no attributes");
int numTrain = getNumInstances(), numClasses = 2;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
missingLevel, predictorMissing, classMissing, numTrain, numClasses,
accepts);
}
/**
* Checks whether the scheme can handle instance weights. This test compares
* the scheme performance on two datasets that are identical except for the
* training weights. If the results change, then the scheme must be using the
* weights. It may be possible to get a false positive from this test if the
* weight changes aren't significant enough to induce a change in scheme
* performance (but the weights are chosen to minimize the likelihood of
* this).
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 true if the test was passed
*/
protected boolean[] instanceWeights(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
print("scheme uses instance weights");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
int numTrain = 2 * getNumInstances(), numClasses = 2, missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
boolean[] result = new boolean[2];
Instances train = null;
ASSearch[] search = null;
ASEvaluation evaluationB = null;
ASEvaluation evaluationI = null;
AttributeSelection attselB = null;
AttributeSelection attselI = null;
boolean evalFail = false;
try {
train = makeTestDataset(42, numTrain,
nominalPredictor ? getNumNominal() + 1 : 0,
numericPredictor ? getNumNumeric() + 1 : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
multiInstance);
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing, classMissing);
}
search = ASSearch.makeCopies(getSearch(), 2);
evaluationB = ASEvaluation.makeCopies(getEvaluator(), 1)[0];
evaluationI = ASEvaluation.makeCopies(getEvaluator(), 1)[0];
attselB = search(search[0], evaluationB, train);
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
// Now modify instance weights and re-built/test
for (int i = 0; i < train.numInstances(); i++) {
train.instance(i).setWeight(0);
}
Random random = new Random(1);
for (int i = 0; i < train.numInstances() / 2; i++) {
int inst = random.nextInt(train.numInstances());
int weight = random.nextInt(10) + 1;
train.instance(inst).setWeight(weight);
}
attselI = search(search[1], evaluationI, train);
if (attselB.toResultsString().equals(attselI.toResultsString())) {
// println("no");
evalFail = true;
throw new Exception("evalFail");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
if (evalFail) {
println("Results don't differ between non-weighted and "
+ "weighted instance models.");
println("Here are the results:\n");
println("\nboth methods\n");
println(evaluationB.toString());
} else {
print("Problem during training");
println(": " + ex.getMessage() + "\n");
}
println("Here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
println("=== Train Weights ===\n");
for (int i = 0; i < train.numInstances(); i++) {
println(" " + (i + 1) + " " + train.instance(i).weight());
}
}
}
return result;
}
/**
* Checks whether the scheme alters the training dataset during training. If
* the scheme needs to modify the training data it should take a copy of the
* training data. Currently checks for changes to header structure, number of
* instances, order of instances, instance weights.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param predictorMissing true if we know the scheme can handle (at least)
* moderate missing predictor values
* @param classMissing true if we know the scheme can handle (at least)
* moderate missing class values
* @return index 0 is true if the test was passed
*/
protected boolean[] datasetIntegrity(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
boolean predictorMissing, boolean classMissing) {
print("scheme doesn't alter original datasets");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType);
print("...");
int numTrain = getNumInstances(), numClasses = 2, missingLevel = 20;
boolean[] result = new boolean[2];
Instances train = null;
Instances trainCopy = null;
ASSearch search = null;
ASEvaluation evaluation = null;
try {
train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
multiInstance);
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing, classMissing);
}
search = ASSearch.makeCopies(getSearch(), 1)[0];
evaluation = ASEvaluation.makeCopies(getEvaluator(), 1)[0];
trainCopy = new Instances(train);
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
search(search, evaluation, trainCopy);
compareDatasets(train, trainCopy);
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during training");
println(": " + ex.getMessage() + "\n");
println("Here are the datasets:\n");
println("=== Train Dataset (original) ===\n" + trainCopy.toString()
+ "\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
}
return result;
}
/**
* Runs a text on the datasets with the given characteristics.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param missingLevel the percentage of missing values
* @param predictorMissing true if the missing values may be in the predictors
* @param classMissing true if the missing values may be in the class
* @param numTrain the number of instances in the training set
* @param numClasses the number of classes
* @param accepts the acceptable string in an exception
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] runBasicTest(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
int missingLevel, boolean predictorMissing, boolean classMissing,
int numTrain, int numClasses, ArrayList<String> accepts) {
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, classType,
TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing,
classMissing, numTrain, numClasses, accepts);
}
/**
* Runs a text on the datasets with the given characteristics.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the attribute index of the class
* @param missingLevel the percentage of missing values
* @param predictorMissing true if the missing values may be in the predictors
* @param classMissing true if the missing values may be in the class
* @param numTrain the number of instances in the training set
* @param numClasses the number of classes
* @param accepts the acceptable string in an exception
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] runBasicTest(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType,
int classIndex, int missingLevel, boolean predictorMissing,
boolean classMissing, int numTrain, int numClasses,
ArrayList<String> accepts) {
boolean[] result = new boolean[2];
Instances train = null;
ASSearch search = null;
ASEvaluation evaluation = null;
try {
train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, numClasses, classType,
classIndex, multiInstance);
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing, classMissing);
}
search = ASSearch.makeCopies(getSearch(), 1)[0];
evaluation = ASEvaluation.makeCopies(getEvaluator(), 1)[0];
} catch (Exception ex) {
ex.printStackTrace();
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
search(search, evaluation, train);
println("yes");
result[0] = true;
} catch (Exception ex) {
boolean acceptable = false;
String msg;
if (ex.getMessage() == null) {
msg = "";
} else {
msg = ex.getMessage().toLowerCase();
}
if (msg.indexOf("not in classpath") > -1) {
m_ClasspathProblems = true;
}
for (int i = 0; i < accepts.size(); i++) {
if (msg.indexOf(accepts.get(i)) >= 0) {
acceptable = true;
}
}
println("no" + (acceptable ? " (OK error message)" : ""));
result[1] = acceptable;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during training");
println(": " + ex.getMessage() + "\n");
if (!acceptable) {
if (accepts.size() > 0) {
print("Error message doesn't mention ");
for (int i = 0; i < accepts.size(); i++) {
if (i != 0) {
print(" or ");
}
print('"' + accepts.get(i) + '"');
}
}
println("here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
}
}
return result;
}
/**
* Make a simple set of instances, which can later be modified for use in
* specific tests.
*
* @param seed the random number seed
* @param numInstances the number of instances to generate
* @param numNominal the number of nominal attributes
* @param numNumeric the number of numeric attributes
* @param numString the number of string attributes
* @param numDate the number of date attributes
* @param numRelational the number of relational attributes
* @param numClasses the number of classes (if nominal class)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param multiInstance whether the dataset should a multi-instance dataset
* @return the test dataset
* @throws Exception if the dataset couldn't be generated
* @see #process(Instances)
*/
protected Instances makeTestDataset(int seed, int numInstances,
int numNominal, int numNumeric, int numString, int numDate,
int numRelational, int numClasses, int classType, boolean multiInstance)
throws Exception {
return makeTestDataset(seed, numInstances, numNominal, numNumeric,
numString, numDate, numRelational, numClasses, classType,
TestInstances.CLASS_IS_LAST, multiInstance);
}
/**
* Make a simple set of instances with variable position of the class
* attribute, which can later be modified for use in specific tests.
*
* @param seed the random number seed
* @param numInstances the number of instances to generate
* @param numNominal the number of nominal attributes
* @param numNumeric the number of numeric attributes
* @param numString the number of string attributes
* @param numDate the number of date attributes
* @param numRelational the number of relational attributes
* @param numClasses the number of classes (if nominal class)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the index of the class (0-based, -1 as last)
* @param multiInstance whether the dataset should a multi-instance dataset
* @return the test dataset
* @throws Exception if the dataset couldn't be generated
* @see TestInstances#CLASS_IS_LAST
* @see #process(Instances)
*/
protected Instances makeTestDataset(int seed, int numInstances,
int numNominal, int numNumeric, int numString, int numDate,
int numRelational, int numClasses, int classType, int classIndex,
boolean multiInstance) throws Exception {
TestInstances dataset = new TestInstances();
dataset.setSeed(seed);
dataset.setNumInstances(numInstances);
dataset.setNumNominal(numNominal);
dataset.setNumNumeric(numNumeric);
dataset.setNumString(numString);
dataset.setNumDate(numDate);
dataset.setNumRelational(numRelational);
dataset.setNumClasses(numClasses);
dataset.setClassType(classType);
dataset.setClassIndex(classIndex);
dataset.setNumClasses(numClasses);
dataset.setMultiInstance(multiInstance);
dataset.setWords(getWords());
dataset.setWordSeparators(getWordSeparators());
return process(dataset.generate());
}
/**
* Print out a short summary string for the dataset characteristics
*
* @param nominalPredictor true if nominal predictor attributes are present
* @param numericPredictor true if numeric predictor attributes are present
* @param stringPredictor true if string predictor attributes are present
* @param datePredictor true if date predictor attributes are present
* @param relationalPredictor true if relational predictor attributes are
* present
* @param multiInstance whether multi-instance is needed
* @param classType the class type (NUMERIC, NOMINAL, etc.)
*/
protected void printAttributeSummary(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int classType) {
String str = "";
if (numericPredictor) {
str += " numeric";
}
if (nominalPredictor) {
if (str.length() > 0) {
str += " &";
}
str += " nominal";
}
if (stringPredictor) {
if (str.length() > 0) {
str += " &";
}
str += " string";
}
if (datePredictor) {
if (str.length() > 0) {
str += " &";
}
str += " date";
}
if (relationalPredictor) {
if (str.length() > 0) {
str += " &";
}
str += " relational";
}
str += " predictors)";
switch (classType) {
case Attribute.NUMERIC:
str = " (numeric class," + str;
break;
case Attribute.NOMINAL:
str = " (nominal class," + str;
break;
case Attribute.STRING:
str = " (string class," + str;
break;
case Attribute.DATE:
str = " (date class," + str;
break;
case Attribute.RELATIONAL:
str = " (relational class," + str;
break;
}
print(str);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Test method for this class
*
* @param args the commandline parameters
*/
public static void main(String[] args) {
runCheck(new CheckAttributeSelection(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/ClassifierAttributeEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassifierAttributeEval.java
* Copyright (C) 2009 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import weka.classifiers.Classifier;
import weka.core.Capabilities;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Utils;
/**
<!-- globalinfo-start -->
* ClassifierAttributeEval :<br/>
* <br/>
* Evaluates the worth of an attribute by using a user-specified classifier.<br/>
* <p/>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p/>
* <pre> -L
* Evaluate an attribute by measuring the impact of leaving it out
* from the full set instead of considering its worth in isolation</pre>
*
* <pre> -B <base learner>
* class name of base learner to use for accuracy estimation.
* Place any classifier options LAST on the command line
* following a "--". eg.:
* -B weka.classifiers.bayes.NaiveBayes ... -- -K
* (default: weka.classifiers.rules.ZeroR)</pre>
*
* <pre> -F <num>
* number of cross validation folds to use for estimating accuracy.
* (default=5)</pre>
*
* <pre> -R <seed>
* Seed for cross validation accuracy testimation.
* (default = 1)</pre>
*
* <pre> -T <num>
* threshold by which to execute another cross validation
* (standard deviation---expressed as a percentage of the mean).
* (default: 0.01 (1%))</pre>
*
* <pre> -E <acc | rmse | mae | f-meas | auc | auprc>
* Performance evaluation measure to use for selecting attributes.
* (Default = accuracy for discrete class and rmse for numeric class)</pre>
*
* <pre> -IRclass <label | index>
* Optional class value (label or 1-based index) to use in conjunction with
* IR statistics (f-meas, auc or auprc). Omitting this option will use
* the class-weighted average.</pre>
*
* <pre>
* Options specific to scheme weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -execution-slots <integer>
* Number of attributes to evaluate in parallel.
* Default = 1 (i.e. no parallelism)</pre>
*
<!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision: 14195 $
*/
public class ClassifierAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler {
/** for serialization. */
private static final long serialVersionUID = 2442390690522602284L;
/** The training instances. */
protected Instances m_trainInstances;
/** Holds the merit scores for each attribute */
protected double[] m_merit;
/** The configured underlying Wrapper instance to use for evaluation */
protected WrapperSubsetEval m_wrapperTemplate = new WrapperSubsetEval();
/** Holds toString() info for the wrapper */
protected String m_wrapperSetup = "";
/**
* Whether to leave each attribute out in turn and evaluate rather than just
* evaluate on each attribute
*/
protected boolean m_leaveOneOut;
/** Executor service for multi-threading */
protected transient ExecutorService m_pool;
/** The number of attributes to evaluate in parallel */
protected int m_executionSlots = 1;
/**
* Constructor.
*/
public ClassifierAttributeEval() {
this.resetOptions();
}
/**
* Returns a string describing this attribute evaluator.
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "ClassifierAttributeEval :\n\nEvaluates the worth of an attribute by " + "using a user-specified classifier.\n";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
Enumeration<Option> wrapperOpts = this.m_wrapperTemplate.listOptions();
while (wrapperOpts.hasMoreElements()) {
result.addElement(wrapperOpts.nextElement());
}
result.addElement(new Option("\tEvaluate an attribute by measuring the impact of leaving it out\n\t" + "from the full set instead of considering its worth in isolation", "L", 0, "-L"));
result.addElement(new Option("\tNumber of attributes to evaluate in parallel.\n\t" + "Default = 1 (i.e. no parallelism)", "execution-slots", 1, "-execution-slots <integer>"));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -B <base learner>
* class name of base learner to use for accuracy estimation.
* Place any classifier options LAST on the command line
* following a "--". eg.:
* -B weka.classifiers.bayes.NaiveBayes ... -- -K
* (default: weka.classifiers.rules.ZeroR)</pre>
*
* <pre> -F <num>
* number of cross validation folds to use for estimating accuracy.
* (default=5)</pre>
*
* <pre> -R <seed>
* Seed for cross validation accuracy testimation.
* (default = 1)</pre>
*
* <pre> -T <num>
* threshold by which to execute another cross validation
* (standard deviation---expressed as a percentage of the mean).
* (default: 0.01 (1%))</pre>
*
* <pre> -E <acc | rmse | mae | f-meas | auc | auprc>
* Performance evaluation measure to use for selecting attributes.
* (Default = accuracy for discrete class and rmse for numeric class)</pre>
*
* <pre> -IRclass <label | index>
* Optional class value (label or 1-based index) to use in conjunction with
* IR statistics (f-meas, auc or auprc). Omitting this option will use
* the class-weighted average.</pre>
*
* <pre>
* Options specific to scheme weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -L
* Evaluate an attribute by measuring the impact of leaving it out
* from the full set instead of considering its worth in isolation</pre>
*
* <pre> -execution-slots <integer>
* Number of attributes to evaluate in parallel.
* Default = 1 (i.e. no parallelism)</pre>
*
<!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
this.resetOptions();
this.m_leaveOneOut = Utils.getFlag('L', options);
String slots = Utils.getOption("execution-slots", options);
if (slots.length() > 0) {
this.m_executionSlots = Integer.parseInt(slots);
}
this.m_wrapperTemplate.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* returns the current setup.
*
* @return the options of the current setup
*/
@Override
public String[] getOptions() {
ArrayList<String> result;
result = new ArrayList<String>();
if (this.m_leaveOneOut) {
result.add("-L");
}
result.add("-execution-slots");
result.add("" + this.m_executionSlots);
for (String o : this.m_wrapperTemplate.getOptions()) {
result.add(o);
}
return result.toArray(new String[result.size()]);
}
/**
* Tip text for this property
*
* @return the tip text for this property
*/
public String leaveOneAttributeOutTipText() {
return "Evaluate an attribute by measuring the impact of leaving it " + "out from the full set instead of considering its worth in isolation.";
}
/**
* Set whether to evaluate the merit of an attribute based on the impact of
* leaving it out from the full set instead of considering its worth in
* isolation
*
* @param l true if each attribute should be evaluated by measuring the impact
* of leaving it out from the full set
*/
public void setLeaveOneAttributeOut(final boolean l) {
this.m_leaveOneOut = l;
}
/**
* Get whether to evaluate the merit of an attribute based on the impact of
* leaving it out from the full set instead of considering its worth in
* isolation
*
* @return true if each attribute should be evaluated by measuring the impact
* of leaving it out from the full set
*/
public boolean getLeaveOneAttributeOut() {
return this.m_leaveOneOut;
}
/**
* Tip text for this property.
*
* @return the tip text for this property
*/
public String numToEvaluateInParallelTipText() {
return "The number of attributes to evaluate in parallel";
}
/**
* Set the number of attributes to evaluate in parallel
*
* @param n the number of attributes to evaluate in parallel
*/
public void setNumToEvaluateInParallel(final int n) {
this.m_executionSlots = n;
}
/**
* Get the number of attributes to evaluate in parallel
*
* @return the number of attributes to evaluate in parallel
*/
public int getNumToEvaluateInParallel() {
return this.m_executionSlots;
}
/**
* Set the class value (label or index) to use with IR metric evaluation of
* subsets. Leaving this unset will result in the class weighted average for
* the IR metric being used.
*
* @param val the class label or 1-based index of the class label to use when
* evaluating subsets with an IR metric
*/
public void setIRClassValue(final String val) {
this.m_wrapperTemplate.setIRClassValue(val);
}
/**
* Get the class value (label or index) to use with IR metric evaluation of
* subsets. Leaving this unset will result in the class weighted average for
* the IR metric being used.
*
* @return the class label or 1-based index of the class label to use when
* evaluating subsets with an IR metric
*/
public String getIRClassValue() {
return this.m_wrapperTemplate.getIRClassValue();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String IRClassValueTipText() {
return "The class label, or 1-based index of the class label, to use " + "when evaluating subsets with an IR metric (such as f-measure " + "or AUC. Leaving this unset will result in the class frequency "
+ "weighted average of the metric being used.";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String evaluationMeasureTipText() {
return "The measure used to evaluate the performance of attribute combinations.";
}
/**
* Gets the currently set performance evaluation measure used for selecting
* attributes for the decision table
*
* @return the performance evaluation measure
*/
public SelectedTag getEvaluationMeasure() {
return this.m_wrapperTemplate.getEvaluationMeasure();
}
/**
* Sets the performance evaluation measure to use for selecting attributes for
* the decision table
*
* @param newMethod the new performance evaluation metric to use
*/
public void setEvaluationMeasure(final SelectedTag newMethod) {
this.m_wrapperTemplate.setEvaluationMeasure(newMethod);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String thresholdTipText() {
return this.m_wrapperTemplate.thresholdTipText();
}
/**
* Set the value of the threshold for repeating cross validation
*
* @param t the value of the threshold
*/
public void setThreshold(final double t) {
this.m_wrapperTemplate.setThreshold(t);
}
/**
* Get the value of the threshold
*
* @return the threshold as a double
*/
public double getThreshold() {
return this.m_wrapperTemplate.getThreshold();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String foldsTipText() {
return this.m_wrapperTemplate.foldsTipText();
}
/**
* Set the number of folds to use for accuracy estimation
*
* @param f the number of folds
*/
public void setFolds(final int f) {
this.m_wrapperTemplate.setFolds(f);
}
/**
* Get the number of folds used for accuracy estimation
*
* @return the number of folds
*/
public int getFolds() {
return this.m_wrapperTemplate.getFolds();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return this.m_wrapperTemplate.seedTipText();
}
/**
* Set the seed to use for cross validation
*
* @param s the seed
*/
public void setSeed(final int s) {
this.m_wrapperTemplate.setSeed(s);
}
/**
* Get the random number seed used for cross validation
*
* @return the seed
*/
public int getSeed() {
return this.m_wrapperTemplate.getSeed();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String classifierTipText() {
return this.m_wrapperTemplate.classifierTipText();
}
/**
* Set the classifier to use for accuracy estimation
*
* @param newClassifier the Classifier to use.
*/
public void setClassifier(final Classifier newClassifier) {
this.m_wrapperTemplate.setClassifier(newClassifier);
}
/**
* Get the classifier used as the base learner.
*
* @return the classifier used as the classifier
*/
public Classifier getClassifier() {
return this.m_wrapperTemplate.getClassifier();
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result;
result = this.m_wrapperTemplate.getClassifier().getCapabilities();
result.setOwner(this);
return result;
}
/**
* Initializes a ClassifierAttribute attribute evaluator.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.m_trainInstances = new Instances(data, 0);
double baseMerit = 0;
this.m_merit = new double[data.numAttributes()];
this.m_pool = Executors.newFixedThreadPool(this.m_executionSlots);
Set<Future<double[]>> results = new HashSet<Future<double[]>>();
for (int i = -1; i < data.numAttributes(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i != data.classIndex()) {
final int attIndex = i;
Future<double[]> futureEval = this.m_pool.submit(new Callable<double[]>() {
@Override
public double[] call() throws Exception {
double[] eval = new double[2];
eval[0] = attIndex;
WrapperSubsetEval evaluator = new WrapperSubsetEval();
evaluator.setOptions(ClassifierAttributeEval.this.m_wrapperTemplate.getOptions());
evaluator.buildEvaluator(data);
if (ClassifierAttributeEval.this.m_wrapperSetup.length() == 0) {
ClassifierAttributeEval.this.m_wrapperSetup = evaluator.toString();
}
BitSet b = new BitSet(data.numAttributes());
if (ClassifierAttributeEval.this.m_leaveOneOut) {
b.set(0, data.numAttributes());
b.set(data.classIndex(), false);
}
if (attIndex >= 0) {
b.set(attIndex, !ClassifierAttributeEval.this.m_leaveOneOut);
}
eval[1] = evaluator.evaluateSubset(b);
return eval;
}
});
results.add(futureEval);
}
}
for (Future<double[]> f : results) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (f.get()[0] != -1) {
this.m_merit[(int) f.get()[0]] = f.get()[1];
} else {
baseMerit = f.get()[1];
}
}
for (int i = 0; i < data.numAttributes(); i++) {
this.m_merit[i] = this.m_leaveOneOut ? baseMerit - this.m_merit[i] : this.m_merit[i] - baseMerit;
}
this.m_pool.shutdown();
this.m_trainInstances = new Instances(this.m_trainInstances, 0);
}
/**
* Resets to defaults.
*/
protected void resetOptions() {
this.m_trainInstances = null;
this.m_wrapperTemplate = new WrapperSubsetEval();
this.m_wrapperSetup = "";
}
/**
* Evaluates an individual attribute by measuring the amount of information
* gained about the class given the attribute.
*
* @param attribute the index of the attribute to be evaluated
* @return the evaluation
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(final int attribute) throws Exception {
return this.m_merit[attribute];
}
/**
* Return a description of the evaluator.
*
* @return description as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_trainInstances == null) {
text.append("\tClassifier feature evaluator has not been built yet");
} else {
text.append("\tClassifier feature evaluator " + (this.m_leaveOneOut ? "(leave one out)" : "") + "\n\n");
text.append("\tUsing ");
text.append(this.m_wrapperSetup);
}
text.append("\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 14195 $");
}
/**
* Main method for executing this class.
*
* @param args the options
*/
public static void main(final String[] args) {
runEvaluator(new ClassifierAttributeEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/ClassifierSubsetEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassifierSubsetEval.java
* Copyright (C) 2000 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.io.File;
import java.util.BitSet;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.evaluation.AbstractEvaluationMetric;
import weka.classifiers.evaluation.InformationRetrievalEvaluationMetric;
import weka.classifiers.rules.ZeroR;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
/**
<!-- globalinfo-start -->
* Classifier subset evaluator:<br>
* <br>
* Evaluates attribute subsets on training data or a seperate hold out testing set. Uses a classifier to estimate the 'merit' of a set of attributes.
* <br><br>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -B <classifier>
* class name of the classifier to use for accuracy estimation.
* Place any classifier options LAST on the command line
* following a "--". eg.:
* -B weka.classifiers.bayes.NaiveBayes ... -- -K
* (default: weka.classifiers.rules.ZeroR)</pre>
*
* <pre> -T
* Use the training data to estimate accuracy.</pre>
*
* <pre> -H <filename>
* Name of the hold out/test set to
* estimate accuracy on.</pre>
*
* <pre> -percentage-split
* Perform a percentage split on the training data.
* Use in conjunction with -T.</pre>
*
* <pre> -P
* Split percentage to use (default = 90).</pre>
*
* <pre> -S
* Random seed for percentage split (default = 1).</pre>
*
* <pre> -E <DEFAULT|ACC|RMSE|MAE|F-MEAS|AUC|AUPRC|CORR-COEFF>
* Performance evaluation measure to use for selecting attributes.
* (Default = default: accuracy for discrete class and rmse for numeric class)</pre>
*
* <pre> -IRclass <label | index>
* Optional class value (label or 1-based index) to use in conjunction with
* IR statistics (f-meas, auc or auprc). Omitting this option will use
* the class-weighted average.</pre>
*
* <pre>
* Options specific to scheme weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
<!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision: 10332 $
*/
public class ClassifierSubsetEval extends HoldOutSubsetEvaluator implements OptionHandler, ErrorBasedMeritEvaluator {
/** for serialization */
static final long serialVersionUID = 7532217899385278710L;
/** training instances */
private Instances m_trainingInstances;
/** class index */
private int m_classIndex;
/** number of attributes in the training data */
private int m_numAttribs;
/** number of training instances */
// private int m_numInstances; NOT USED
/** holds the template classifier to use for error estimates */
private Classifier m_ClassifierTemplate = new ZeroR();
/**
* Holds the classifier used when evaluating single hold-out instances - this
* is used by RaceSearch and the trained classifier may need to persist
* between calls to that particular method.
*/
private Classifier m_Classifier = new ZeroR();
/** the file that containts hold out/test instances */
private File m_holdOutFile = new File("Click to set hold out or " + "test instances");
/** the instances to test on */
private Instances m_holdOutInstances;
/** evaluate on training data rather than separate hold out/test set */
private boolean m_useTraining = true;
/** Whether to hold out a percentage of the training data */
protected boolean m_usePercentageSplit;
/** Seed for randomizing prior to splitting training data */
protected int m_seed = 1;
/** The split to use if doing a percentage split */
protected String m_splitPercent = "90";
public static final int EVAL_DEFAULT = 1;
public static final int EVAL_ACCURACY = 2;
public static final int EVAL_RMSE = 3;
public static final int EVAL_MAE = 4;
public static final int EVAL_FMEASURE = 5;
public static final int EVAL_AUC = 6;
public static final int EVAL_AUPRC = 7;
public static final int EVAL_CORRELATION = 8;
public static final int EVAL_PLUGIN = 9;
protected static List<AbstractEvaluationMetric> PLUGIN_METRICS = AbstractEvaluationMetric.getPluginMetrics();
/** Holds all tags for metrics */
public static final Tag[] TAGS_EVALUATION;
static {
int totalPluginCount = 0;
if (PLUGIN_METRICS != null) {
for (AbstractEvaluationMetric m : PLUGIN_METRICS) {
totalPluginCount += m.getStatisticNames().size();
}
}
TAGS_EVALUATION = new Tag[8 + totalPluginCount];
TAGS_EVALUATION[0] = new Tag(EVAL_DEFAULT, "default", "Default: accuracy (discrete class); RMSE (numeric class)");
TAGS_EVALUATION[1] = new Tag(EVAL_ACCURACY, "acc", "Accuracy (discrete class only)");
TAGS_EVALUATION[2] = new Tag(EVAL_RMSE, "rmse", "RMSE (of the class probabilities for discrete class)");
TAGS_EVALUATION[3] = new Tag(EVAL_MAE, "mae", "MAE (of the class probabilities for discrete class)");
TAGS_EVALUATION[4] = new Tag(EVAL_FMEASURE, "f-meas", "F-measure (discrete class only)");
TAGS_EVALUATION[5] = new Tag(EVAL_AUC, "auc", "AUC (area under the ROC curve - discrete class only)");
TAGS_EVALUATION[6] = new Tag(EVAL_AUPRC, "auprc", "AUPRC (area under the precision-recall curve - discrete class only)");
TAGS_EVALUATION[7] = new Tag(EVAL_CORRELATION, "corr-coeff", "Correlation coefficient - numeric class only");
if (PLUGIN_METRICS != null) {
int index = 8;
for (AbstractEvaluationMetric m : PLUGIN_METRICS) {
for (String stat : m.getStatisticNames()) {
TAGS_EVALUATION[index++] = new WrapperSubsetEval.PluginTag(index + 1, m, stat);
}
}
}
}
/** The evaluation measure to use */
protected Tag m_evaluationMeasure = TAGS_EVALUATION[0];
/**
* If >= 0, and an IR metric is being used, then evaluate with respect to this
* class value (0-based index)
*/
protected int m_IRClassVal = -1;
/** User supplied option for IR class value (either name or 1-based index) */
protected String m_IRClassValS = "";
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Classifier subset evaluator:\n\nEvaluates attribute subsets on training data or a seperate " + "hold out testing set. Uses a classifier to estimate the 'merit' of a set of attributes.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(8);
newVector.addElement(new Option("\tclass name of the classifier to use for accuracy estimation.\n" + "\tPlace any classifier options LAST on the command line\n" + "\tfollowing a \"--\". eg.:\n"
+ "\t\t-B weka.classifiers.bayes.NaiveBayes ... -- -K\n" + "\t(default: weka.classifiers.rules.ZeroR)", "B", 1, "-B <classifier>"));
newVector.addElement(new Option("\tUse the training data to estimate" + " accuracy.", "T", 0, "-T"));
newVector.addElement(new Option("\tName of the hold out/test set to " + "\n\testimate accuracy on.", "H", 1, "-H <filename>"));
newVector.addElement(new Option("\tPerform a percentage split on the " + "training data.\n\tUse in conjunction with -T.", "percentage-split", 0, "-percentage-split"));
newVector.addElement(new Option("\tSplit percentage to use (default = 90).", "P", 1, "-P"));
newVector.addElement(new Option("\tRandom seed for percentage split (default = 1).", "S", 1, "-S"));
newVector.addElement(
new Option("\tPerformance evaluation measure to use for selecting attributes.\n" + "\t(Default = default: accuracy for discrete class and rmse for " + "numeric class)", "E", 1, "-E " + Tag.toOptionList(TAGS_EVALUATION)));
newVector.addElement(new Option("\tOptional class value (label or 1-based index) to use in conjunction with\n" + "\tIR statistics (f-meas, auc or auprc). Omitting this option will use\n" + "\tthe class-weighted average.", "IRclass",
1, "-IRclass <label | index>"));
if ((this.m_ClassifierTemplate != null) && (this.m_ClassifierTemplate instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0, "\nOptions specific to " + "scheme " + this.m_ClassifierTemplate.getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) this.m_ClassifierTemplate).listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -B <classifier>
* class name of the classifier to use for accuracy estimation.
* Place any classifier options LAST on the command line
* following a "--". eg.:
* -B weka.classifiers.bayes.NaiveBayes ... -- -K
* (default: weka.classifiers.rules.ZeroR)</pre>
*
* <pre> -T
* Use the training data to estimate accuracy.</pre>
*
* <pre> -H <filename>
* Name of the hold out/test set to
* estimate accuracy on.</pre>
*
* <pre> -percentage-split
* Perform a percentage split on the training data.
* Use in conjunction with -T.</pre>
*
* <pre> -P
* Split percentage to use (default = 90).</pre>
*
* <pre> -S
* Random seed for percentage split (default = 1).</pre>
*
* <pre> -E <DEFAULT|ACC|RMSE|MAE|F-MEAS|AUC|AUPRC|CORR-COEFF>
* Performance evaluation measure to use for selecting attributes.
* (Default = default: accuracy for discrete class and rmse for numeric class)</pre>
*
* <pre> -IRclass <label | index>
* Optional class value (label or 1-based index) to use in conjunction with
* IR statistics (f-meas, auc or auprc). Omitting this option will use
* the class-weighted average.</pre>
*
* <pre>
* Options specific to scheme weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
<!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
String optionString;
this.resetOptions();
optionString = Utils.getOption('B', options);
if (optionString.length() == 0) {
optionString = ZeroR.class.getName();
}
this.setClassifier(AbstractClassifier.forName(optionString, Utils.partitionOptions(options)));
optionString = Utils.getOption('H', options);
if (optionString.length() != 0) {
this.setHoldOutFile(new File(optionString));
}
this.setUsePercentageSplit(Utils.getFlag("percentage-split", options));
optionString = Utils.getOption('P', options);
if (optionString.length() > 0) {
this.setSplitPercent(optionString);
}
this.setUseTraining(Utils.getFlag('T', options));
optionString = Utils.getOption('E', options);
if (optionString.length() != 0) {
for (Tag t : TAGS_EVALUATION) {
if (t.getIDStr().equalsIgnoreCase(optionString)) {
this.setEvaluationMeasure(new SelectedTag(t.getIDStr(), TAGS_EVALUATION));
break;
}
}
}
optionString = Utils.getOption("IRClass", options);
if (optionString.length() > 0) {
this.setIRClassValue(optionString);
}
optionString = Utils.getOption("S", options);
if (optionString.length() > 0) {
this.setSeed(Integer.parseInt(optionString));
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return "The random seed to use for randomizing the training data " + "prior to performing a percentage split";
}
/**
* Set the random seed used to randomize the data before performing a
* percentage split
*
* @param s the seed to use
*/
public void setSeed(final int s) {
this.m_seed = s;
}
/**
* Get the random seed used to randomize the data before performing a
* percentage split
*
* @return the seed to use
*/
public int getSeed() {
return this.m_seed;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String usePercentageSplitTipText() {
return "Evaluate using a percentage split on the training data";
}
/**
* Set whether to perform a percentage split on the training data for
* evaluation
*
* @param p true if a percentage split is to be performed
*/
public void setUsePercentageSplit(final boolean p) {
this.m_usePercentageSplit = p;
}
/**
* Get whether to perform a percentage split on the training data for
* evaluation
*
* @return true if a percentage split is to be performed
*/
public boolean getUsePercentageSplit() {
return this.m_usePercentageSplit;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String splitPercentTipText() {
return "The percentage split to use";
}
/**
* Set the split percentage to use
*
* @param sp the split percentage to use
*/
public void setSplitPercent(final String sp) {
this.m_splitPercent = sp;
}
/**
* Get the split percentage to use
*
* @return the split percentage to use
*/
public String getSplitPercent() {
return this.m_splitPercent;
}
/**
* Set the class value (label or index) to use with IR metric evaluation of
* subsets. Leaving this unset will result in the class weighted average for
* the IR metric being used.
*
* @param val the class label or 1-based index of the class label to use when
* evaluating subsets with an IR metric
*/
public void setIRClassValue(final String val) {
this.m_IRClassValS = val;
}
/**
* Get the class value (label or index) to use with IR metric evaluation of
* subsets. Leaving this unset will result in the class weighted average for
* the IR metric being used.
*
* @return the class label or 1-based index of the class label to use when
* evaluating subsets with an IR metric
*/
public String getIRClassValue() {
return this.m_IRClassValS;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String IRClassValueTipText() {
return "The class label, or 1-based index of the class label, to use " + "when evaluating subsets with an IR metric (such as f-measure " + "or AUC. Leaving this unset will result in the class frequency "
+ "weighted average of the metric being used.";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String evaluationMeasureTipText() {
return "The measure used to evaluate the performance of attribute combinations.";
}
/**
* Gets the currently set performance evaluation measure used for selecting
* attributes for the decision table
*
* @return the performance evaluation measure
*/
public SelectedTag getEvaluationMeasure() {
return new SelectedTag(this.m_evaluationMeasure.getIDStr(), TAGS_EVALUATION);
}
/**
* Sets the performance evaluation measure to use for selecting attributes for
* the decision table
*
* @param newMethod the new performance evaluation metric to use
*/
public void setEvaluationMeasure(final SelectedTag newMethod) {
if (newMethod.getTags() == TAGS_EVALUATION) {
this.m_evaluationMeasure = newMethod.getSelectedTag();
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String classifierTipText() {
return "Classifier to use for estimating the accuracy of subsets";
}
/**
* Set the classifier to use for accuracy estimation
*
* @param newClassifier the Classifier to use.
*/
public void setClassifier(final Classifier newClassifier) {
this.m_ClassifierTemplate = newClassifier;
this.m_Classifier = newClassifier;
}
/**
* Get the classifier used as the base learner.
*
* @return the classifier used as the classifier
*/
public Classifier getClassifier() {
return this.m_ClassifierTemplate;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String holdOutFileTipText() {
return "File containing hold out/test instances.";
}
/**
* Gets the file that holds hold out/test instances.
*
* @return File that contains hold out instances
*/
public File getHoldOutFile() {
return this.m_holdOutFile;
}
/**
* Set the file that contains hold out/test instances
*
* @param h the hold out file
*/
public void setHoldOutFile(final File h) {
this.m_holdOutFile = h;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String useTrainingTipText() {
return "Use training data instead of hold out/test instances.";
}
/**
* Get if training data is to be used instead of hold out/test data
*
* @return true if training data is to be used instead of hold out data
*/
public boolean getUseTraining() {
return this.m_useTraining;
}
/**
* Set if training data is to be used instead of hold out/test data
*
* @param t true if training data is to be used instead of hold out data
*/
public void setUseTraining(final boolean t) {
this.m_useTraining = t;
}
/**
* Gets the current settings of ClassifierSubsetEval
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (this.getClassifier() != null) {
options.add("-B");
options.add(this.getClassifier().getClass().getName());
}
if (this.getUseTraining()) {
options.add("-T");
}
options.add("-H");
options.add(this.getHoldOutFile().getPath());
if (this.getUsePercentageSplit()) {
options.add("-percentage-split");
options.add("-P");
options.add(this.m_splitPercent);
options.add("-S");
options.add("" + this.getSeed());
}
options.add("-E");
options.add(this.m_evaluationMeasure.getIDStr());
if (this.m_IRClassValS != null && this.m_IRClassValS.length() > 0) {
options.add("-IRClass");
options.add(this.m_IRClassValS);
}
if ((this.m_ClassifierTemplate != null) && (this.m_ClassifierTemplate instanceof OptionHandler)) {
String[] classifierOptions = ((OptionHandler) this.m_ClassifierTemplate).getOptions();
if (classifierOptions.length > 0) {
options.add("--");
Collections.addAll(options, classifierOptions);
}
}
return options.toArray(new String[0]);
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result;
if (this.getClassifier() == null) {
result = super.getCapabilities();
result.disableAll();
} else {
result = this.getClassifier().getCapabilities();
}
// set dependencies
for (Capability cap : Capability.values()) {
result.enableDependency(cap);
}
return result;
}
/**
* Generates a attribute evaluator. Has to initialize all fields of the
* evaluator that are not being set via options.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.m_trainingInstances = new Instances(data);
this.m_classIndex = this.m_trainingInstances.classIndex();
this.m_numAttribs = this.m_trainingInstances.numAttributes();
// m_numInstances = m_trainingInstances.numInstances(); NOT USED
// load the testing data
if (!this.m_useTraining && (!this.getHoldOutFile().getPath().startsWith("Click to set"))) {
java.io.Reader r = new java.io.BufferedReader(new java.io.FileReader(this.getHoldOutFile().getPath()));
this.m_holdOutInstances = new Instances(r);
this.m_holdOutInstances.setClassIndex(this.m_trainingInstances.classIndex());
if (this.m_trainingInstances.equalHeaders(this.m_holdOutInstances) == false) {
throw new Exception("Hold out/test set is not compatable with " + "training data.\n" + this.m_trainingInstances.equalHeadersMsg(this.m_holdOutInstances));
}
} else if (this.m_usePercentageSplit) {
int splitPercentage = 90; // default
try {
splitPercentage = Integer.parseInt(this.m_splitPercent);
} catch (NumberFormatException n) {
}
this.m_trainingInstances.randomize(new Random(this.m_seed));
int trainSize = Math.round(this.m_trainingInstances.numInstances() * splitPercentage / 100);
int testSize = this.m_trainingInstances.numInstances() - trainSize;
this.m_holdOutInstances = new Instances(this.m_trainingInstances, trainSize, testSize);
this.m_trainingInstances = new Instances(this.m_trainingInstances, 0, trainSize);
}
if (this.m_IRClassValS != null && this.m_IRClassValS.length() > 0) {
// try to parse as a number first
try {
this.m_IRClassVal = Integer.parseInt(this.m_IRClassValS);
// make zero-based
this.m_IRClassVal--;
} catch (NumberFormatException e) {
// now try as a named class label
this.m_IRClassVal = this.m_trainingInstances.classAttribute().indexOfValue(this.m_IRClassValS);
}
}
}
/**
* Evaluates a subset of attributes
*
* @param subset a bitset representing the attribute subset to be evaluated
* @return the error rate
* @throws Exception if the subset could not be evaluated
*/
@Override
public double evaluateSubset(final BitSet subset) throws Exception {
int i, j;
double evalMetric = 0;
int numAttributes = 0;
Instances trainCopy = null;
Instances testCopy = null;
String[] cOpts = null;
Evaluation evaluation = null;
if (this.m_ClassifierTemplate instanceof OptionHandler) {
cOpts = ((OptionHandler) this.m_ClassifierTemplate).getOptions();
}
Classifier classifier = AbstractClassifier.forName(this.m_ClassifierTemplate.getClass().getName(), cOpts);
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the training instances
trainCopy = new Instances(this.m_trainingInstances);
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (!this.m_useTraining) {
if (this.m_holdOutInstances == null) {
throw new Exception("Must specify a set of hold out/test instances " + "with -H");
}
// copy the test instances
testCopy = new Instances(this.m_holdOutInstances);
} else if (this.m_usePercentageSplit) {
testCopy = new Instances(this.m_holdOutInstances);
}
// count attributes set in the BitSet
for (i = 0; i < this.m_numAttribs; i++) {
if (subset.get(i)) {
numAttributes++;
}
}
// set up an array of attribute indexes for the filter (+1 for the class)
int[] featArray = new int[numAttributes + 1];
for (i = 0, j = 0; i < this.m_numAttribs; i++) {
if (subset.get(i)) {
featArray[j++] = i;
}
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
featArray[j] = this.m_classIndex;
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
trainCopy = Filter.useFilter(trainCopy, delTransform);
if (!this.m_useTraining || this.m_usePercentageSplit) {
testCopy = Filter.useFilter(testCopy, delTransform);
}
// build the classifier
classifier.buildClassifier(trainCopy);
evaluation = new Evaluation(trainCopy);
if (!this.m_useTraining || this.m_usePercentageSplit) {
evaluation.evaluateModel(classifier, testCopy);
} else {
evaluation.evaluateModel(classifier, trainCopy);
}
String metricName = null;
String statName = null;
AbstractEvaluationMetric pluginMetric = null;
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
switch (this.m_evaluationMeasure.getID()) {
case EVAL_DEFAULT:
evalMetric = evaluation.errorRate();
break;
case EVAL_ACCURACY:
evalMetric = evaluation.errorRate();
break;
case EVAL_RMSE:
evalMetric = evaluation.rootMeanSquaredError();
break;
case EVAL_MAE:
evalMetric = evaluation.meanAbsoluteError();
break;
case EVAL_FMEASURE:
if (this.m_IRClassVal < 0) {
evalMetric = evaluation.weightedFMeasure();
} else {
evalMetric = evaluation.fMeasure(this.m_IRClassVal);
}
break;
case EVAL_AUC:
if (this.m_IRClassVal < 0) {
evalMetric = evaluation.weightedAreaUnderROC();
} else {
evalMetric = evaluation.areaUnderROC(this.m_IRClassVal);
}
break;
case EVAL_AUPRC:
if (this.m_IRClassVal < 0) {
evalMetric = evaluation.weightedAreaUnderPRC();
} else {
evalMetric = evaluation.areaUnderPRC(this.m_IRClassVal);
}
case EVAL_CORRELATION:
evalMetric = evaluation.correlationCoefficient();
break;
default:
if (this.m_evaluationMeasure.getID() >= EVAL_PLUGIN) {
metricName = ((WrapperSubsetEval.PluginTag) this.m_evaluationMeasure).getMetricName();
statName = ((WrapperSubsetEval.PluginTag) this.m_evaluationMeasure).getStatisticName();
statName = ((WrapperSubsetEval.PluginTag) this.m_evaluationMeasure).getStatisticName();
pluginMetric = evaluation.getPluginMetric(metricName);
if (pluginMetric == null) {
throw new Exception("Metric " + metricName + " does not seem to be " + "available");
}
}
if (pluginMetric instanceof InformationRetrievalEvaluationMetric) {
if (this.m_IRClassVal < 0) {
evalMetric = ((InformationRetrievalEvaluationMetric) pluginMetric).getClassWeightedAverageStatistic(statName);
} else {
evalMetric = ((InformationRetrievalEvaluationMetric) pluginMetric).getStatistic(statName, this.m_IRClassVal);
}
} else {
evalMetric = pluginMetric.getStatistic(statName);
}
break;
}
switch (this.m_evaluationMeasure.getID()) {
case EVAL_DEFAULT:
case EVAL_ACCURACY:
case EVAL_RMSE:
case EVAL_MAE:
if (this.m_trainingInstances.classAttribute().isNominal() && (this.m_evaluationMeasure.getID() == EVAL_DEFAULT || this.m_evaluationMeasure.getID() == EVAL_ACCURACY)) {
evalMetric = 1 - evalMetric;
} else {
evalMetric = -evalMetric; // maximize
}
break;
default:
if (pluginMetric != null && !pluginMetric.statisticIsMaximisable(statName)) {
evalMetric = -evalMetric; // maximize
}
}
return evalMetric;
}
/**
* Evaluates a subset of attributes with respect to a set of instances.
* Calling this function overrides any test/hold out instances set from
* setHoldOutFile.
*
* @param subset a bitset representing the attribute subset to be evaluated
* @param holdOut a set of instances (possibly separate and distinct from
* those use to build/train the evaluator) with which to evaluate the
* merit of the subset
* @return the "merit" of the subset on the holdOut data
* @throws Exception if the subset cannot be evaluated
*/
@Override
public double evaluateSubset(final BitSet subset, final Instances holdOut) throws Exception {
int i, j;
double evalMetric = 0;
int numAttributes = 0;
Instances trainCopy = null;
Instances testCopy = null;
String[] cOpts = null;
Evaluation evaluation = null;
if (this.m_ClassifierTemplate instanceof OptionHandler) {
cOpts = ((OptionHandler) this.m_ClassifierTemplate).getOptions();
}
Classifier classifier = AbstractClassifier.forName(this.m_ClassifierTemplate.getClass().getName(), cOpts);
if (this.m_trainingInstances.equalHeaders(holdOut) == false) {
throw new Exception("evaluateSubset : Incompatable instance types.\n" + this.m_trainingInstances.equalHeadersMsg(holdOut));
}
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the training instances
trainCopy = new Instances(this.m_trainingInstances);
testCopy = new Instances(holdOut);
// count attributes set in the BitSet
for (i = 0; i < this.m_numAttribs; i++) {
if (subset.get(i)) {
numAttributes++;
}
}
// set up an array of attribute indexes for the filter (+1 for the class)
int[] featArray = new int[numAttributes + 1];
for (i = 0, j = 0; i < this.m_numAttribs; i++) {
if (subset.get(i)) {
featArray[j++] = i;
}
}
featArray[j] = this.m_classIndex;
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
trainCopy = Filter.useFilter(trainCopy, delTransform);
testCopy = Filter.useFilter(testCopy, delTransform);
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// build the classifier
classifier.buildClassifier(trainCopy);
evaluation = new Evaluation(trainCopy);
evaluation.evaluateModel(classifier, testCopy);
String metricName = null;
String statName = null;
AbstractEvaluationMetric pluginMetric = null;
switch (this.m_evaluationMeasure.getID()) {
case EVAL_DEFAULT:
evalMetric = evaluation.errorRate();
break;
case EVAL_ACCURACY:
evalMetric = evaluation.errorRate();
break;
case EVAL_RMSE:
evalMetric = evaluation.rootMeanSquaredError();
break;
case EVAL_MAE:
evalMetric = evaluation.meanAbsoluteError();
break;
case EVAL_FMEASURE:
if (this.m_IRClassVal < 0) {
evalMetric = evaluation.weightedFMeasure();
} else {
evalMetric = evaluation.fMeasure(this.m_IRClassVal);
}
break;
case EVAL_AUC:
if (this.m_IRClassVal < 0) {
evalMetric = evaluation.weightedAreaUnderROC();
} else {
evalMetric = evaluation.areaUnderROC(this.m_IRClassVal);
}
break;
case EVAL_AUPRC:
if (this.m_IRClassVal < 0) {
evalMetric = evaluation.weightedAreaUnderPRC();
} else {
evalMetric = evaluation.areaUnderPRC(this.m_IRClassVal);
}
case EVAL_CORRELATION:
evalMetric = evaluation.correlationCoefficient();
break;
default:
if (this.m_evaluationMeasure.getID() >= EVAL_PLUGIN) {
metricName = ((WrapperSubsetEval.PluginTag) this.m_evaluationMeasure).getMetricName();
statName = ((WrapperSubsetEval.PluginTag) this.m_evaluationMeasure).getStatisticName();
statName = ((WrapperSubsetEval.PluginTag) this.m_evaluationMeasure).getStatisticName();
pluginMetric = evaluation.getPluginMetric(metricName);
if (pluginMetric == null) {
throw new Exception("Metric " + metricName + " does not seem to be " + "available");
}
}
if (pluginMetric instanceof InformationRetrievalEvaluationMetric) {
if (this.m_IRClassVal < 0) {
evalMetric = ((InformationRetrievalEvaluationMetric) pluginMetric).getClassWeightedAverageStatistic(statName);
} else {
evalMetric = ((InformationRetrievalEvaluationMetric) pluginMetric).getStatistic(statName, this.m_IRClassVal);
}
} else {
evalMetric = pluginMetric.getStatistic(statName);
}
break;
}
switch (this.m_evaluationMeasure.getID()) {
case EVAL_DEFAULT:
case EVAL_ACCURACY:
case EVAL_RMSE:
case EVAL_MAE:
if (this.m_trainingInstances.classAttribute().isNominal() && (this.m_evaluationMeasure.getID() == EVAL_DEFAULT || this.m_evaluationMeasure.getID() == EVAL_ACCURACY)) {
evalMetric = 1 - evalMetric;
} else {
evalMetric = -evalMetric; // maximize
}
break;
default:
if (pluginMetric != null && !pluginMetric.statisticIsMaximisable(statName)) {
evalMetric = -evalMetric; // maximize
}
}
return evalMetric;
}
/**
* Evaluates a subset of attributes with respect to a single instance. Calling
* this function overides any hold out/test instances set through
* setHoldOutFile.
*
* @param subset a bitset representing the attribute subset to be evaluated
* @param holdOut a single instance (possibly not one of those used to
* build/train the evaluator) with which to evaluate the merit of the
* subset
* @param retrain true if the classifier should be retrained with respect to
* the new subset before testing on the holdOut instance.
* @return the "merit" of the subset on the holdOut instance
* @throws Exception if the subset cannot be evaluated
*/
@Override
public double evaluateSubset(final BitSet subset, final Instance holdOut, final boolean retrain) throws Exception {
if (this.m_evaluationMeasure.getID() != EVAL_DEFAULT) {
throw new Exception("Can only use default evaluation measure in the method");
}
int i, j;
double error;
int numAttributes = 0;
Instances trainCopy = null;
Instance testCopy = null;
if (this.m_trainingInstances.equalHeaders(holdOut.dataset()) == false) {
throw new Exception("evaluateSubset : Incompatable instance types.\n" + this.m_trainingInstances.equalHeadersMsg(holdOut.dataset()));
}
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the training instances
trainCopy = new Instances(this.m_trainingInstances);
testCopy = (Instance) holdOut.copy();
// count attributes set in the BitSet
for (i = 0; i < this.m_numAttribs; i++) {
if (subset.get(i)) {
numAttributes++;
}
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// set up an array of attribute indexes for the filter (+1 for the class)
int[] featArray = new int[numAttributes + 1];
for (i = 0, j = 0; i < this.m_numAttribs; i++) {
if (subset.get(i)) {
featArray[j++] = i;
}
}
featArray[j] = this.m_classIndex;
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
if (retrain) {
trainCopy = Filter.useFilter(trainCopy, delTransform);
// build the classifier
this.m_Classifier.buildClassifier(trainCopy);
}
delTransform.input(testCopy);
testCopy = delTransform.output();
double pred;
double[] distrib;
distrib = this.m_Classifier.distributionForInstance(testCopy);
if (this.m_trainingInstances.classAttribute().isNominal()) {
pred = distrib[(int) testCopy.classValue()];
} else {
pred = distrib[0];
}
if (this.m_trainingInstances.classAttribute().isNominal()) {
error = 1.0 - pred;
} else {
error = testCopy.classValue() - pred;
}
// return the negative of the error as search methods need to
// maximize something
return -error;
}
/**
* Returns a string describing classifierSubsetEval
*
* @return the description as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_trainingInstances == null) {
text.append("\tClassifier subset evaluator has not been built yet\n");
} else {
text.append("\tClassifier Subset Evaluator\n");
text.append("\tLearning scheme: " + this.getClassifier().getClass().getName() + "\n");
text.append("\tScheme options: ");
String[] classifierOptions = new String[0];
if (this.m_ClassifierTemplate instanceof OptionHandler) {
classifierOptions = ((OptionHandler) this.m_ClassifierTemplate).getOptions();
for (String classifierOption : classifierOptions) {
text.append(classifierOption + " ");
}
}
text.append("\n");
text.append("\tHold out/test set: ");
if (!this.m_useTraining) {
if (this.getHoldOutFile().getPath().startsWith("Click to set")) {
text.append("none\n");
} else {
text.append(this.getHoldOutFile().getPath() + '\n');
}
} else {
if (this.m_usePercentageSplit) {
text.append("Percentage split: " + this.m_splitPercent + "\n");
} else {
text.append("Training data\n");
}
}
String IRClassL = "";
if (this.m_IRClassVal >= 0) {
IRClassL = "(class value: " + this.m_trainingInstances.classAttribute().value(this.m_IRClassVal) + ")";
}
switch (this.m_evaluationMeasure.getID()) {
case EVAL_DEFAULT:
case EVAL_ACCURACY:
if (this.m_trainingInstances.attribute(this.m_classIndex).isNumeric()) {
text.append("\tSubset evaluation: RMSE\n");
} else {
text.append("\tSubset evaluation: classification error\n");
}
break;
case EVAL_RMSE:
if (this.m_trainingInstances.attribute(this.m_classIndex).isNumeric()) {
text.append("\tSubset evaluation: RMSE\n");
} else {
text.append("\tSubset evaluation: RMSE (probability estimates)\n");
}
break;
case EVAL_MAE:
if (this.m_trainingInstances.attribute(this.m_classIndex).isNumeric()) {
text.append("\tSubset evaluation: MAE\n");
} else {
text.append("\tSubset evaluation: MAE (probability estimates)\n");
}
break;
case EVAL_FMEASURE:
text.append("\tSubset evaluation: F-measure " + (this.m_IRClassVal >= 0 ? IRClassL : "") + "\n");
break;
case EVAL_AUC:
text.append("\tSubset evaluation: area under the ROC curve " + (this.m_IRClassVal >= 0 ? IRClassL : "") + "\n");
break;
case EVAL_AUPRC:
text.append("\tSubset evalation: area under the precision-recal curve " + (this.m_IRClassVal >= 0 ? IRClassL : "") + "\n");
break;
case EVAL_CORRELATION:
text.append("\tSubset evaluation: correlation coefficient\n");
break;
default:
text.append("\tSubset evaluation: " + this.m_evaluationMeasure.getReadable());
if (((WrapperSubsetEval.PluginTag) this.m_evaluationMeasure).getMetric() instanceof InformationRetrievalEvaluationMetric) {
text.append(" " + (this.m_IRClassVal > 0 ? IRClassL : ""));
}
text.append("\n");
break;
}
}
return text.toString();
}
/**
* reset to defaults
*/
protected void resetOptions() {
this.m_trainingInstances = null;
this.m_ClassifierTemplate = new ZeroR();
this.m_holdOutFile = new File("Click to set hold out or test instances");
this.m_holdOutInstances = null;
this.m_useTraining = false;
this.m_splitPercent = "90";
this.m_usePercentageSplit = false;
this.m_evaluationMeasure = TAGS_EVALUATION[0];
this.m_IRClassVal = -1;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 10332 $");
}
/**
* Main method for testing this class.
*
* @param args the options
*/
public static void main(final String[] args) {
runEvaluator(new ClassifierSubsetEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/CorrelationAttributeEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CorrelationAttributeEval.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
* <!-- globalinfo-start --> CorrelationAttributeEval :<br/>
* <br/>
* Evaluates the worth of an attribute by measuring the correlation (Pearson's)
* between it and the class.<br/>
* <br/>
* Nominal attributes are considered on a value by value basis by treating each
* value as an indicator. An overall correlation for a nominal attribute is
* arrived at via a weighted average.<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Output detailed info for nominal attributes
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class CorrelationAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler {
/** For serialization */
private static final long serialVersionUID = -4931946995055872438L;
/** The correlation for each attribute */
protected double[] m_correlations;
/** Whether to output detailed (per value) correlation for nominal attributes */
protected boolean m_detailedOutput = false;
/** Holds the detailed output info */
protected StringBuffer m_detailedOutputBuff;
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "CorrelationAttributeEval :\n\nEvaluates the worth of an attribute " + "by measuring the correlation (Pearson's) between it and the class.\n\n" + "Nominal attributes are considered on a value by "
+ "value basis by treating each value as an indicator. An overall " + "correlation for a nominal attribute is arrived at via a weighted average.\n";
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.NUMERIC_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
// TODO Auto-generated method stub
Vector<Option> newVector = new Vector<Option>();
newVector.addElement(new Option("\tOutput detailed info for nominal attributes", "D", 0, "-D"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Output detailed info for nominal attributes
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
this.setOutputDetailedInfo(Utils.getFlag('D', options));
}
/**
* Gets the current settings of WrapperSubsetEval.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
String[] options = new String[1];
if (this.getOutputDetailedInfo()) {
options[0] = "-D";
} else {
options[0] = "";
}
return options;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String outputDetailedInfoTipText() {
return "Output per value correlation for nominal attributes";
}
/**
* Set whether to output per-value correlation for nominal attributes
*
* @param d true if detailed (per-value) correlation is to be output for
* nominal attributes
*/
public void setOutputDetailedInfo(final boolean d) {
this.m_detailedOutput = d;
}
/**
* Get whether to output per-value correlation for nominal attributes
*
* @return true if detailed (per-value) correlation is to be output for
* nominal attributes
*/
public boolean getOutputDetailedInfo() {
return this.m_detailedOutput;
}
/**
* Evaluates an individual attribute by measuring the correlation (Pearson's)
* between it and the class. Nominal attributes are considered on a value by
* value basis by treating each value as an indicator. An overall correlation
* for a nominal attribute is arrived at via a weighted average.
*
* @param attribute the index of the attribute to be evaluated
* @return the correlation
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(final int attribute) throws Exception {
return this.m_correlations[attribute];
}
/**
* Describe the attribute evaluator
*
* @return a description of the attribute evaluator as a String
*/
@Override
public String toString() {
StringBuffer buff = new StringBuffer();
if (this.m_correlations == null) {
buff.append("Correlation attribute evaluator has not been built yet.");
} else {
buff.append("\tCorrelation Ranking Filter");
if (this.m_detailedOutput && this.m_detailedOutputBuff.length() > 0) {
buff.append("\n\tDetailed output for nominal attributes");
buff.append(this.m_detailedOutputBuff);
}
}
return buff.toString();
}
/**
* Initializes an information gain attribute evaluator. Replaces missing
* values with means/modes; Deletes instances with missing class values.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(Instances data) throws Exception {
data = new Instances(data);
data.deleteWithMissingClass();
ReplaceMissingValues rmv = new ReplaceMissingValues();
rmv.setInputFormat(data);
data = Filter.useFilter(data, rmv);
int numClasses = data.classAttribute().numValues();
int classIndex = data.classIndex();
int numInstances = data.numInstances();
this.m_correlations = new double[data.numAttributes()];
/*
* boolean hasNominals = false; boolean hasNumerics = false;
*/
List<Integer> numericIndexes = new ArrayList<Integer>();
List<Integer> nominalIndexes = new ArrayList<Integer>();
if (this.m_detailedOutput) {
this.m_detailedOutputBuff = new StringBuffer();
}
// TODO for instance weights (folded into computing weighted correlations)
// add another dimension just before the last [2] (0 for 0/1 binary vector
// and
// 1 for corresponding instance weights for the 1's)
double[][][] nomAtts = new double[data.numAttributes()][][];
for (int i = 0; i < data.numAttributes(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (data.attribute(i).isNominal() && i != classIndex) {
nomAtts[i] = new double[data.attribute(i).numValues()][data.numInstances()];
Arrays.fill(nomAtts[i][0], 1.0); // set zero index for this att to all
// 1's
nominalIndexes.add(i);
} else if (data.attribute(i).isNumeric() && i != classIndex) {
numericIndexes.add(i);
}
}
// do the nominal attributes
if (nominalIndexes.size() > 0) {
for (int i = 0; i < data.numInstances(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
Instance current = data.instance(i);
for (int j = 0; j < current.numValues(); j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (current.attribute(current.index(j)).isNominal() && current.index(j) != classIndex) {
// Will need to check for zero in case this isn't a sparse
// instance (unless we add 1 and subtract 1)
nomAtts[current.index(j)][(int) current.valueSparse(j)][i] += 1;
nomAtts[current.index(j)][0][i] -= 1;
}
}
}
}
if (data.classAttribute().isNumeric()) {
double[] classVals = data.attributeToDoubleArray(classIndex);
// do the numeric attributes
for (Integer i : numericIndexes) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
double[] numAttVals = data.attributeToDoubleArray(i);
this.m_correlations[i] = Utils.correlation(numAttVals, classVals, numAttVals.length);
if (this.m_correlations[i] == 1.0) {
// check for zero variance (useless numeric attribute)
if (Utils.variance(numAttVals) == 0) {
this.m_correlations[i] = 0;
}
}
}
// do the nominal attributes
if (nominalIndexes.size() > 0) {
// now compute the correlations for the binarized nominal attributes
for (Integer i : nominalIndexes) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
double sum = 0;
double corr = 0;
double sumCorr = 0;
double sumForValue = 0;
if (this.m_detailedOutput) {
this.m_detailedOutputBuff.append("\n\n").append(data.attribute(i).name());
}
for (int j = 0; j < data.attribute(i).numValues(); j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
sumForValue = Utils.sum(nomAtts[i][j]);
corr = Utils.correlation(nomAtts[i][j], classVals, classVals.length);
// useless attribute - all instances have the same value
if (sumForValue == numInstances || sumForValue == 0) {
corr = 0;
}
if (corr < 0.0) {
corr = -corr;
}
sumCorr += sumForValue * corr;
sum += sumForValue;
if (this.m_detailedOutput) {
this.m_detailedOutputBuff.append("\n\t").append(data.attribute(i).value(j)).append(": ");
this.m_detailedOutputBuff.append(Utils.doubleToString(corr, 6));
}
}
this.m_correlations[i] = (sum > 0) ? sumCorr / sum : 0;
}
}
} else {
// class is nominal
// TODO extra dimension for storing instance weights too
double[][] binarizedClasses = new double[data.classAttribute().numValues()][data.numInstances()];
// this is equal to the number of instances for all inst weights = 1
double[] classValCounts = new double[data.classAttribute().numValues()];
for (int i = 0; i < data.numInstances(); i++) {
Instance current = data.instance(i);
binarizedClasses[(int) current.classValue()][i] = 1;
}
for (int i = 0; i < data.classAttribute().numValues(); i++) {
classValCounts[i] = Utils.sum(binarizedClasses[i]);
}
double sumClass = Utils.sum(classValCounts);
// do numeric attributes first
if (numericIndexes.size() > 0) {
for (Integer i : numericIndexes) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
double[] numAttVals = data.attributeToDoubleArray(i);
double corr = 0;
double sumCorr = 0;
for (int j = 0; j < data.classAttribute().numValues(); j++) {
corr = Utils.correlation(numAttVals, binarizedClasses[j], numAttVals.length);
if (corr < 0.0) {
corr = -corr;
}
if (corr == 1.0) {
// check for zero variance (useless numeric attribute)
if (Utils.variance(numAttVals) == 0) {
corr = 0;
}
}
sumCorr += classValCounts[j] * corr;
}
this.m_correlations[i] = sumCorr / sumClass;
}
}
if (nominalIndexes.size() > 0) {
for (Integer i : nominalIndexes) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (this.m_detailedOutput) {
this.m_detailedOutputBuff.append("\n\n").append(data.attribute(i).name());
}
double sumForAtt = 0;
double corrForAtt = 0;
for (int j = 0; j < data.attribute(i).numValues(); j++) {
double sumForValue = Utils.sum(nomAtts[i][j]);
double corr = 0;
double sumCorr = 0;
double avgCorrForValue = 0;
sumForAtt += sumForValue;
for (int k = 0; k < numClasses; k++) {
// corr between value j and class k
corr = Utils.correlation(nomAtts[i][j], binarizedClasses[k], binarizedClasses[k].length);
// useless attribute - all instances have the same value
if (sumForValue == numInstances || sumForValue == 0) {
corr = 0;
}
if (corr < 0.0) {
corr = -corr;
}
sumCorr += classValCounts[k] * corr;
}
avgCorrForValue = sumCorr / sumClass;
corrForAtt += sumForValue * avgCorrForValue;
if (this.m_detailedOutput) {
this.m_detailedOutputBuff.append("\n\t").append(data.attribute(i).value(j)).append(": ");
this.m_detailedOutputBuff.append(Utils.doubleToString(avgCorrForValue, 6));
}
}
// the weighted average corr for att i as
// a whole (wighted by value frequencies)
this.m_correlations[i] = (sumForAtt > 0) ? corrForAtt / sumForAtt : 0;
}
}
}
if (this.m_detailedOutputBuff != null && this.m_detailedOutputBuff.length() > 0) {
this.m_detailedOutputBuff.append("\n");
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args the options
*/
public static void main(final String[] args) {
runEvaluator(new CorrelationAttributeEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/ErrorBasedMeritEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ErrorBasedMeritEvaluator.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
/**
* Interface for evaluators that calculate the "merit" of attributes/subsets
* as the error of a learning scheme
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface ErrorBasedMeritEvaluator {
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/GainRatioAttributeEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* GainRatioAttributeEval.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.ContingencyTables;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.Discretize;
/**
* <!-- globalinfo-start --> GainRatioAttributeEval :<br/>
* <br/>
* Evaluates the worth of an attribute by measuring the gain ratio with respect
* to the class.<br/>
* <br/>
* GainR(Class, Attribute) = (H(Class) - H(Class | Attribute)) / H(Attribute).<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* treat missing values as a seperate value.
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
* @see Discretize
*/
public class GainRatioAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler {
/** for serialization */
static final long serialVersionUID = -8504656625598579926L;
/** The training instances */
private Instances m_trainInstances;
/** The class index */
private int m_classIndex;
/** The number of instances */
private int m_numInstances;
/** The number of classes */
private int m_numClasses;
/** Merge missing values */
private boolean m_missing_merge;
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "GainRatioAttributeEval :\n\nEvaluates the worth of an attribute " + "by measuring the gain ratio with respect to the class.\n\n" + "GainR(Class, Attribute) = (H(Class) - H(Class | Attribute)) / " + "H(Attribute).\n";
}
/**
* Constructor
*/
public GainRatioAttributeEval() {
this.resetOptions();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(1);
newVector.addElement(new Option("\ttreat missing values as a seperate " + "value.", "M", 0, "-M"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* treat missing values as a seperate value.
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
**/
@Override
public void setOptions(final String[] options) throws Exception {
this.resetOptions();
this.setMissingMerge(!(Utils.getFlag('M', options)));
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String missingMergeTipText() {
return "Distribute counts for missing values. Counts are distributed " + "across other values in proportion to their frequency. Otherwise, " + "missing is treated as a separate value.";
}
/**
* distribute the counts for missing values across observed values
*
* @param b true=distribute missing values.
*/
public void setMissingMerge(final boolean b) {
this.m_missing_merge = b;
}
/**
* get whether missing values are being distributed or not
*
* @return true if missing values are being distributed.
*/
public boolean getMissingMerge() {
return this.m_missing_merge;
}
/**
* Gets the current settings of WrapperSubsetEval.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
String[] options = new String[1];
if (!this.getMissingMerge()) {
options[0] = "-M";
} else {
options[0] = "";
}
return options;
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Initializes a gain ratio attribute evaluator. Discretizes all attributes
* that are numeric.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.m_trainInstances = data;
this.m_classIndex = this.m_trainInstances.classIndex();
this.m_numInstances = this.m_trainInstances.numInstances();
Discretize disTransform = new Discretize();
disTransform.setUseBetterEncoding(true);
disTransform.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, disTransform);
this.m_numClasses = this.m_trainInstances.attribute(this.m_classIndex).numValues();
}
/**
* reset options to default values
*/
protected void resetOptions() {
this.m_trainInstances = null;
this.m_missing_merge = true;
}
/**
* evaluates an individual attribute by measuring the gain ratio of the class
* given the attribute.
*
* @param attribute the index of the attribute to be evaluated
* @return the gain ratio
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(final int attribute) throws Exception {
int i, j, ii, jj;
int ni, nj;
double sum = 0.0;
ni = this.m_trainInstances.attribute(attribute).numValues() + 1;
nj = this.m_numClasses + 1;
double[] sumi, sumj;
Instance inst;
double temp = 0.0;
sumi = new double[ni];
sumj = new double[nj];
double[][] counts = new double[ni][nj];
sumi = new double[ni];
sumj = new double[nj];
for (i = 0; i < ni; i++) {
sumi[i] = 0.0;
for (j = 0; j < nj; j++) {
sumj[j] = 0.0;
counts[i][j] = 0.0;
}
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// Fill the contingency table
for (i = 0; i < this.m_numInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
inst = this.m_trainInstances.instance(i);
if (inst.isMissing(attribute)) {
ii = ni - 1;
} else {
ii = (int) inst.value(attribute);
}
if (inst.isMissing(this.m_classIndex)) {
jj = nj - 1;
} else {
jj = (int) inst.value(this.m_classIndex);
}
counts[ii][jj] += inst.weight();
}
// get the row totals
for (i = 0; i < ni; i++) {
sumi[i] = 0.0;
for (j = 0; j < nj; j++) {
sumi[i] += counts[i][j];
sum += counts[i][j];
}
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// get the column totals
for (j = 0; j < nj; j++) {
sumj[j] = 0.0;
for (i = 0; i < ni; i++) {
sumj[j] += counts[i][j];
}
}
// distribute missing counts
if (this.m_missing_merge && (sumi[ni - 1] < sum) && (sumj[nj - 1] < sum)) {
double[] i_copy = new double[sumi.length];
double[] j_copy = new double[sumj.length];
double[][] counts_copy = new double[sumi.length][sumj.length];
for (i = 0; i < ni; i++) {
System.arraycopy(counts[i], 0, counts_copy[i], 0, sumj.length);
}
System.arraycopy(sumi, 0, i_copy, 0, sumi.length);
System.arraycopy(sumj, 0, j_copy, 0, sumj.length);
double total_missing = (sumi[ni - 1] + sumj[nj - 1] - counts[ni - 1][nj - 1]);
// do the missing i's
if (sumi[ni - 1] > 0.0) {
for (j = 0; j < nj - 1; j++) {
if (counts[ni - 1][j] > 0.0) {
for (i = 0; i < ni - 1; i++) {
temp = ((i_copy[i] / (sum - i_copy[ni - 1])) * counts[ni - 1][j]);
counts[i][j] += temp;
sumi[i] += temp;
}
counts[ni - 1][j] = 0.0;
}
}
}
sumi[ni - 1] = 0.0;
// do the missing j's
if (sumj[nj - 1] > 0.0) {
for (i = 0; i < ni - 1; i++) {
if (counts[i][nj - 1] > 0.0) {
for (j = 0; j < nj - 1; j++) {
temp = ((j_copy[j] / (sum - j_copy[nj - 1])) * counts[i][nj - 1]);
counts[i][j] += temp;
sumj[j] += temp;
}
counts[i][nj - 1] = 0.0;
}
}
}
sumj[nj - 1] = 0.0;
// do the both missing
if (counts[ni - 1][nj - 1] > 0.0 && total_missing < sum) {
for (i = 0; i < ni - 1; i++) {
for (j = 0; j < nj - 1; j++) {
temp = (counts_copy[i][j] / (sum - total_missing)) * counts_copy[ni - 1][nj - 1];
counts[i][j] += temp;
sumi[i] += temp;
sumj[j] += temp;
}
}
counts[ni - 1][nj - 1] = 0.0;
}
}
return ContingencyTables.gainRatio(counts);
}
/**
* Return a description of the evaluator
*
* @return description as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_trainInstances == null) {
text.append("\tGain Ratio evaluator has not been built");
} else {
text.append("\tGain Ratio feature evaluator");
if (!this.m_missing_merge) {
text.append("\n\tMissing values treated as seperate");
}
}
text.append("\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
@Override
public int[] postProcess(final int[] attributeSet) {
// save memory
this.m_trainInstances = new Instances(this.m_trainInstances, 0);
return attributeSet;
}
/**
* Main method.
*
* @param args the options -t training file
*/
public static void main(final String[] args) {
runEvaluator(new GainRatioAttributeEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/GreedyStepwise.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* GreedyStepwise.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Range;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> GreedyStepwise :<br/>
* <br/>
* Performs a greedy forward or backward search through the space of attribute
* subsets. May start with no/all attributes or from an arbitrary point in the
* space. Stops when the addition/deletion of any remaining attributes results
* in a decrease in evaluation. Can also produce a ranked list of attributes by
* traversing the space from one side to the other and recording the order that
* attributes are selected.<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -C
* Use conservative forward search
* </pre>
*
* <pre>
* -B
* Use a backward search instead of a
* forward one.
* </pre>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* </pre>
*
* <pre>
* -R
* Produce a ranked list of attributes.
* </pre>
*
* <pre>
* -T <threshold>
* Specify a theshold by which attributes
* may be discarded from the ranking.
* Use in conjuction with -R
* </pre>
*
* <pre>
* -N <num to select>
* Specify number of attributes to select
* </pre>
*
* <pre>
* -num-slots <int>
* The number of execution slots, for example, the number of cores in the CPU. (default 1)
* </pre>
*
* <pre>
* -D
* Print debugging output
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall
* @version $Revision$
*/
public class GreedyStepwise extends ASSearch implements RankedOutputSearch, StartSetHandler, OptionHandler {
/** for serialization */
static final long serialVersionUID = -6312951970168325471L;
/** does the data have a class */
protected boolean m_hasClass;
/** holds the class index */
protected int m_classIndex;
/** number of attributes in the data */
protected int m_numAttribs;
/** true if the user has requested a ranked list of attributes */
protected boolean m_rankingRequested;
/**
* go from one side of the search space to the other in order to generate a
* ranking
*/
protected boolean m_doRank;
/** used to indicate whether or not ranking has been performed */
protected boolean m_doneRanking;
/**
* A threshold by which to discard attributes---used by the AttributeSelection
* module
*/
protected double m_threshold;
/**
* The number of attributes to select. -1 indicates that all attributes are to
* be retained. Has precedence over m_threshold
*/
protected int m_numToSelect = -1;
protected int m_calculatedNumToSelect;
/** the merit of the best subset found */
protected double m_bestMerit;
/** a ranked list of attribute indexes */
protected double[][] m_rankedAtts;
protected int m_rankedSoFar;
/** the best subset found */
protected BitSet m_best_group;
protected ASEvaluation m_ASEval;
protected Instances m_Instances;
/** holds the start set for the search as a Range */
protected Range m_startRange;
/** holds an array of starting attributes */
protected int[] m_starting;
/** Use a backwards search instead of a forwards one */
protected boolean m_backward = false;
/**
* If set then attributes will continue to be added during a forward search as
* long as the merit does not degrade
*/
protected boolean m_conservativeSelection = false;
/** Print debugging output */
protected boolean m_debug = false;
protected int m_poolSize = 1;
/** Thread pool */
protected transient ExecutorService m_pool = null;
/**
* Constructor
*/
public GreedyStepwise() {
this.m_threshold = -Double.MAX_VALUE;
this.m_doneRanking = false;
this.m_startRange = new Range();
this.m_starting = null;
this.resetOptions();
}
/**
* Returns a string describing this search method
*
* @return a description of the search suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "GreedyStepwise :\n\nPerforms a greedy forward or backward search " + "through " + "the space of attribute subsets. May start with no/all attributes or from "
+ "an arbitrary point in the space. Stops when the addition/deletion of any " + "remaining attributes results in a decrease in evaluation. " + "Can also produce a ranked list of "
+ "attributes by traversing the space from one side to the other and " + "recording the order that attributes are selected.\n";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String searchBackwardsTipText() {
return "Search backwards rather than forwards.";
}
/**
* Set whether to search backwards instead of forwards
*
* @param back true to search backwards
*/
public void setSearchBackwards(final boolean back) {
this.m_backward = back;
if (this.m_backward) {
this.setGenerateRanking(false);
}
}
/**
* Get whether to search backwards
*
* @return true if the search will proceed backwards
*/
public boolean getSearchBackwards() {
return this.m_backward;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String thresholdTipText() {
return "Set threshold by which attributes can be discarded. Default value " + "results in no attributes being discarded. Use in conjunction with " + "generateRanking";
}
/**
* Set the threshold by which the AttributeSelection module can discard
* attributes.
*
* @param threshold the threshold.
*/
@Override
public void setThreshold(final double threshold) {
this.m_threshold = threshold;
}
/**
* Returns the threshold so that the AttributeSelection module can discard
* attributes from the ranking.
*/
@Override
public double getThreshold() {
return this.m_threshold;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numToSelectTipText() {
return "Specify the number of attributes to retain. The default value " + "(-1) indicates that all attributes are to be retained. Use either " + "this option or a threshold to reduce the attribute set.";
}
/**
* Specify the number of attributes to select from the ranked list (if
* generating a ranking). -1 indicates that all attributes are to be retained.
*
* @param n the number of attributes to retain
*/
@Override
public void setNumToSelect(final int n) {
this.m_numToSelect = n;
}
/**
* Gets the number of attributes to be retained.
*
* @return the number of attributes to retain
*/
@Override
public int getNumToSelect() {
return this.m_numToSelect;
}
/**
* Gets the calculated number of attributes to retain. This is the actual
* number of attributes to retain. This is the same as getNumToSelect if the
* user specifies a number which is not less than zero. Otherwise it should be
* the number of attributes in the (potentially transformed) data.
*/
@Override
public int getCalculatedNumToSelect() {
if (this.m_numToSelect >= 0) {
this.m_calculatedNumToSelect = this.m_numToSelect;
}
return this.m_calculatedNumToSelect;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String generateRankingTipText() {
return "Set to true if a ranked list is required.";
}
/**
* Records whether the user has requested a ranked list of attributes.
*
* @param doRank true if ranking is requested
*/
@Override
public void setGenerateRanking(final boolean doRank) {
this.m_rankingRequested = doRank;
}
/**
* Gets whether ranking has been requested. This is used by the
* AttributeSelection module to determine if rankedAttributes() should be
* called.
*
* @return true if ranking has been requested.
*/
@Override
public boolean getGenerateRanking() {
return this.m_rankingRequested;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String startSetTipText() {
return "Set the start point for the search. This is specified as a comma " + "seperated list off attribute indexes starting at 1. It can include " + "ranges. Eg. 1,2,5-9,17.";
}
/**
* Sets a starting set of attributes for the search. It is the search method's
* responsibility to report this start set (if any) in its toString() method.
*
* @param startSet a string containing a list of attributes (and or ranges),
* eg. 1,2,6,10-15.
* @throws Exception if start set can't be set.
*/
@Override
public void setStartSet(final String startSet) throws Exception {
this.m_startRange.setRanges(startSet);
}
/**
* Returns a list of attributes (and or attribute ranges) as a String
*
* @return a list of attributes (and or attribute ranges)
*/
@Override
public String getStartSet() {
return this.m_startRange.getRanges();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String conservativeForwardSelectionTipText() {
return "If true (and forward search is selected) then attributes " + "will continue to be added to the best subset as long as merit does " + "not degrade.";
}
/**
* Set whether attributes should continue to be added during a forward search
* as long as merit does not decrease
*
* @param c true if atts should continue to be atted
*/
public void setConservativeForwardSelection(final boolean c) {
this.m_conservativeSelection = c;
}
/**
* Gets whether conservative selection has been enabled
*
* @return true if conservative forward selection is enabled
*/
public boolean getConservativeForwardSelection() {
return this.m_conservativeSelection;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String debuggingOutputTipText() {
return "Output debugging information to the console";
}
/**
* Set whether to output debugging info to the console
*
* @param d true if dubugging info is to be output
*/
public void setDebuggingOutput(final boolean d) {
this.m_debug = d;
}
/**
* Get whether to output debugging info to the console
*
* @return true if dubugging info is to be output
*/
public boolean getDebuggingOutput() {
return this.m_debug;
}
/**
* @return a string to describe the option
*/
public String numExecutionSlotsTipText() {
return "The number of execution slots, for example, the number of cores in the CPU.";
}
/**
* Gets the number of threads.
*/
public int getNumExecutionSlots() {
return this.m_poolSize;
}
/**
* Sets the number of threads
*/
public void setNumExecutionSlots(final int nT) {
this.m_poolSize = nT;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(8);
newVector.addElement(new Option("\tUse conservative forward search", "-C", 0, "-C"));
newVector.addElement(new Option("\tUse a backward search instead of a" + "\n\tforward one.", "-B", 0, "-B"));
newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7.", "P", 1, "-P <start set>"));
newVector.addElement(new Option("\tProduce a ranked list of attributes.", "R", 0, "-R"));
newVector.addElement(new Option("\tSpecify a theshold by which attributes" + "\n\tmay be discarded from the ranking." + "\n\tUse in conjuction with -R", "T", 1, "-T <threshold>"));
newVector.addElement(new Option("\tSpecify number of attributes to select", "N", 1, "-N <num to select>"));
newVector.addElement(new Option("\t" + this.numExecutionSlotsTipText() + " (default 1)\n", "-num-slots", 1, "-num-slots <int>"));
newVector.addElement(new Option("\tPrint debugging output", "D", 0, "-D"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -C
* Use conservative forward search
* </pre>
*
* <pre>
* -B
* Use a backward search instead of a
* forward one.
* </pre>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* </pre>
*
* <pre>
* -R
* Produce a ranked list of attributes.
* </pre>
*
* <pre>
* -T <threshold>
* Specify a theshold by which attributes
* may be discarded from the ranking.
* Use in conjuction with -R
* </pre>
*
* <pre>
* -N <num to select>
* Specify number of attributes to select
* </pre>
*
* <pre>
* -num-slots <int>
* The number of execution slots, for example, the number of cores in the CPU. (default 1)
* </pre>
*
* <pre>
* -D
* Print debugging output
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
String optionString;
this.resetOptions();
this.setSearchBackwards(Utils.getFlag('B', options));
this.setConservativeForwardSelection(Utils.getFlag('C', options));
optionString = Utils.getOption('P', options);
if (optionString.length() != 0) {
this.setStartSet(optionString);
}
this.setGenerateRanking(Utils.getFlag('R', options));
optionString = Utils.getOption('T', options);
if (optionString.length() != 0) {
Double temp;
temp = Double.valueOf(optionString);
this.setThreshold(temp.doubleValue());
}
optionString = Utils.getOption('N', options);
if (optionString.length() != 0) {
this.setNumToSelect(Integer.parseInt(optionString));
}
optionString = Utils.getOption("num-slots", options);
if (optionString.length() > 0) {
this.setNumExecutionSlots(Integer.parseInt(optionString));
}
this.setDebuggingOutput(Utils.getFlag('D', options));
}
/**
* Gets the current settings of ReliefFAttributeEval.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (this.getSearchBackwards()) {
options.add("-B");
}
if (this.getConservativeForwardSelection()) {
options.add("-C");
}
if (!(this.getStartSet().equals(""))) {
options.add("-P");
options.add("" + this.startSetToString());
}
if (this.getGenerateRanking()) {
options.add("-R");
}
options.add("-T");
options.add("" + this.getThreshold());
options.add("-N");
options.add("" + this.getNumToSelect());
options.add("-num-slots");
options.add("" + this.getNumExecutionSlots());
if (this.getDebuggingOutput()) {
options.add("-D");
}
return options.toArray(new String[0]);
}
/**
* converts the array of starting attributes to a string. This is used by
* getOptions to return the actual attributes specified as the starting set.
* This is better than using m_startRanges.getRanges() as the same start set
* can be specified in different ways from the command line---eg 1,2,3 == 1-3.
* This is to ensure that stuff that is stored in a database is comparable.
*
* @return a comma seperated list of individual attribute numbers as a String
*/
protected String startSetToString() {
StringBuffer FString = new StringBuffer();
boolean didPrint;
if (this.m_starting == null) {
return this.getStartSet();
}
for (int i = 0; i < this.m_starting.length; i++) {
didPrint = false;
if ((this.m_hasClass == false) || (this.m_hasClass == true && i != this.m_classIndex)) {
FString.append((this.m_starting[i] + 1));
didPrint = true;
}
if (i == (this.m_starting.length - 1)) {
FString.append("");
} else {
if (didPrint) {
FString.append(",");
}
}
}
return FString.toString();
}
/**
* returns a description of the search.
*
* @return a description of the search as a String.
*/
@Override
public String toString() {
StringBuffer FString = new StringBuffer();
FString.append("\tGreedy Stepwise (" + ((this.m_backward) ? "backwards)" : "forwards)") + ".\n\tStart set: ");
if (this.m_starting == null) {
if (this.m_backward) {
FString.append("all attributes\n");
} else {
FString.append("no attributes\n");
}
} else {
FString.append(this.startSetToString() + "\n");
}
if (!this.m_doneRanking) {
FString.append("\tMerit of best subset found: " + Utils.doubleToString(Math.abs(this.m_bestMerit), 8, 3) + "\n");
} else {
if (this.m_backward) {
FString.append("\n\tRanking is the order that attributes were removed, " + "starting \n\twith all attributes. The merit scores in the left" + "\n\tcolumn are the goodness of the remaining attributes in the"
+ "\n\tsubset after removing the corresponding in the right column" + "\n\tattribute from the subset.\n");
} else {
FString.append("\n\tRanking is the order that attributes were added, starting " + "\n\twith no attributes. The merit scores in the left column" + "\n\tare the goodness of the subset after the adding the"
+ "\n\tcorresponding attribute in the right column to the subset.\n");
}
}
if ((this.m_threshold != -Double.MAX_VALUE) && (this.m_doneRanking)) {
FString.append("\tThreshold for discarding attributes: " + Utils.doubleToString(this.m_threshold, 8, 4) + "\n");
}
return FString.toString();
}
/**
* Searches the attribute subset space by forward selection.
*
* @param ASEval the attribute evaluator to guide the search
* @param data the training instances.
* @return an array (not necessarily ordered) of selected attribute indexes
* @throws Exception if the search can't be completed
*/
@Override
public int[] search(final ASEvaluation ASEval, final Instances data) throws Exception {
int i;
double best_merit = -Double.MAX_VALUE;
double temp_best, temp_merit;
int temp_index = 0;
BitSet temp_group;
boolean parallel = (this.m_poolSize > 1);
if (parallel) {
this.m_pool = Executors.newFixedThreadPool(this.m_poolSize);
}
if (data != null) { // this is a fresh run so reset
this.resetOptions();
this.m_Instances = new Instances(data, 0);
}
this.m_ASEval = ASEval;
this.m_numAttribs = this.m_Instances.numAttributes();
if (this.m_best_group == null) {
this.m_best_group = new BitSet(this.m_numAttribs);
}
if (!(this.m_ASEval instanceof SubsetEvaluator)) {
throw new Exception(this.m_ASEval.getClass().getName() + " is not a " + "Subset evaluator!");
}
this.m_startRange.setUpper(this.m_numAttribs - 1);
if (!(this.getStartSet().equals(""))) {
this.m_starting = this.m_startRange.getSelection();
}
if (this.m_ASEval instanceof UnsupervisedSubsetEvaluator) {
this.m_hasClass = false;
this.m_classIndex = -1;
} else {
this.m_hasClass = true;
this.m_classIndex = this.m_Instances.classIndex();
}
final SubsetEvaluator ASEvaluator = (SubsetEvaluator) this.m_ASEval;
if (this.m_rankedAtts == null) {
this.m_rankedAtts = new double[this.m_numAttribs][2];
this.m_rankedSoFar = 0;
}
// If a starting subset has been supplied, then initialise the bitset
if (this.m_starting != null && this.m_rankedSoFar <= 0) {
for (i = 0; i < this.m_starting.length; i++) {
if ((this.m_starting[i]) != this.m_classIndex) {
this.m_best_group.set(this.m_starting[i]);
}
}
} else {
if (this.m_backward && this.m_rankedSoFar <= 0) {
for (i = 0; i < this.m_numAttribs; i++) {
if (i != this.m_classIndex) {
this.m_best_group.set(i);
}
}
}
}
// Evaluate the initial subset
best_merit = ASEvaluator.evaluateSubset(this.m_best_group);
// main search loop
boolean done = false;
boolean addone = false;
boolean z;
if (this.m_debug && parallel) {
System.err.println("Evaluating subsets in parallel...");
}
while (!done) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
List<Future<Double[]>> results = new ArrayList<Future<Double[]>>();
temp_group = (BitSet) this.m_best_group.clone();
temp_best = best_merit;
if (this.m_doRank) {
temp_best = -Double.MAX_VALUE;
}
done = true;
addone = false;
for (i = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (this.m_backward) {
z = ((i != this.m_classIndex) && (temp_group.get(i)));
} else {
z = ((i != this.m_classIndex) && (!temp_group.get(i)));
}
if (z) {
// set/unset the bit
if (this.m_backward) {
temp_group.clear(i);
} else {
temp_group.set(i);
}
if (parallel) {
final BitSet tempCopy = (BitSet) temp_group.clone();
final int attBeingEvaluated = i;
// make a copy if the evaluator is not thread safe
final SubsetEvaluator theEvaluator = (ASEvaluator instanceof weka.core.ThreadSafe) ? ASEvaluator : (SubsetEvaluator) ASEvaluation.makeCopies(this.m_ASEval, 1)[0];
Future<Double[]> future = this.m_pool.submit(new Callable<Double[]>() {
@Override
public Double[] call() throws Exception {
Double[] r = new Double[2];
double e = theEvaluator.evaluateSubset(tempCopy);
r[0] = new Double(attBeingEvaluated);
r[1] = e;
return r;
}
});
results.add(future);
} else {
temp_merit = ASEvaluator.evaluateSubset(temp_group);
if (this.m_backward) {
z = (temp_merit >= temp_best);
} else {
if (this.m_conservativeSelection) {
z = (temp_merit >= temp_best);
} else {
z = (temp_merit > temp_best);
}
}
if (z) {
temp_best = temp_merit;
temp_index = i;
addone = true;
done = false;
}
}
// unset this addition/deletion
if (this.m_backward) {
temp_group.set(i);
} else {
temp_group.clear(i);
}
if (this.m_doRank) {
done = false;
}
}
}
if (parallel) {
for (int j = 0; j < results.size(); j++) {
Future<Double[]> f = results.get(j);
int index = f.get()[0].intValue();
temp_merit = f.get()[1].doubleValue();
if (this.m_backward) {
z = (temp_merit >= temp_best);
} else {
if (this.m_conservativeSelection) {
z = (temp_merit >= temp_best);
} else {
z = (temp_merit > temp_best);
}
}
if (z) {
temp_best = temp_merit;
temp_index = index;
addone = true;
done = false;
}
}
}
if (addone) {
if (this.m_backward) {
this.m_best_group.clear(temp_index);
} else {
this.m_best_group.set(temp_index);
}
best_merit = temp_best;
if (this.m_debug) {
System.err.print("Best subset found so far: ");
int[] atts = this.attributeList(this.m_best_group);
for (int a : atts) {
System.err.print("" + (a + 1) + " ");
}
System.err.println("\nMerit: " + best_merit);
}
this.m_rankedAtts[this.m_rankedSoFar][0] = temp_index;
this.m_rankedAtts[this.m_rankedSoFar][1] = best_merit;
this.m_rankedSoFar++;
}
}
if (parallel) {
this.m_pool.shutdown();
}
this.m_bestMerit = best_merit;
return this.attributeList(this.m_best_group);
}
/**
* Produces a ranked list of attributes. Search must have been performed prior
* to calling this function. Search is called by this function to complete the
* traversal of the the search space. A list of attributes and merits are
* returned. The attributes a ranked by the order they are added to the subset
* during a forward selection search. Individual merit values reflect the
* merit associated with adding the corresponding attribute to the subset;
* because of this, merit values may initially increase but then decrease as
* the best subset is "passed by" on the way to the far side of the search
* space.
*
* @return an array of attribute indexes and associated merit values
* @throws Exception if something goes wrong.
*/
@Override
public double[][] rankedAttributes() throws Exception {
if (this.m_rankedAtts == null || this.m_rankedSoFar == -1) {
throw new Exception("Search must be performed before attributes " + "can be ranked.");
}
this.m_doRank = true;
this.search(this.m_ASEval, null);
double[][] final_rank = new double[this.m_rankedSoFar][2];
for (int i = 0; i < this.m_rankedSoFar; i++) {
final_rank[i][0] = this.m_rankedAtts[i][0];
final_rank[i][1] = this.m_rankedAtts[i][1];
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
this.resetOptions();
this.m_doneRanking = true;
if (this.m_numToSelect > final_rank.length) {
throw new Exception("More attributes requested than exist in the data");
}
if (this.m_numToSelect <= 0) {
if (this.m_threshold == -Double.MAX_VALUE) {
this.m_calculatedNumToSelect = final_rank.length;
} else {
this.determineNumToSelectFromThreshold(final_rank);
}
}
return final_rank;
}
private void determineNumToSelectFromThreshold(final double[][] ranking) {
int count = 0;
for (double[] element : ranking) {
if (element[1] > this.m_threshold) {
count++;
}
}
this.m_calculatedNumToSelect = count;
}
/**
* converts a BitSet into a list of attribute indexes
*
* @param group the BitSet to convert
* @return an array of attribute indexes
**/
protected int[] attributeList(final BitSet group) {
int count = 0;
// count how many were selected
for (int i = 0; i < this.m_numAttribs; i++) {
if (group.get(i)) {
count++;
}
}
int[] list = new int[count];
count = 0;
for (int i = 0; i < this.m_numAttribs; i++) {
if (group.get(i)) {
list[count++] = i;
}
}
return list;
}
/**
* Resets options
*/
protected void resetOptions() {
this.m_doRank = false;
this.m_best_group = null;
this.m_ASEval = null;
this.m_Instances = null;
this.m_rankedSoFar = -1;
this.m_rankedAtts = null;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/HoldOutSubsetEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* HoldOutSubsetEvaluator.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.BitSet;
import weka.core.Instance;
import weka.core.Instances;
/**
* Abstract attribute subset evaluator capable of evaluating subsets with
* respect to a data set that is distinct from that used to initialize/
* train the subset evaluator.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class HoldOutSubsetEvaluator
extends ASEvaluation
implements SubsetEvaluator {
/** for serialization */
private static final long serialVersionUID = 8280529785412054174L;
/**
* Evaluates a subset of attributes with respect to a set of instances.
* @param subset a bitset representing the attribute subset to be
* evaluated
* @param holdOut a set of instances (possibly seperate and distinct
* from those use to build/train the evaluator) with which to
* evaluate the merit of the subset
* @return the "merit" of the subset on the holdOut data
* @exception Exception if the subset cannot be evaluated
*/
public abstract double evaluateSubset(BitSet subset, Instances holdOut)
throws Exception;
/**
* Evaluates a subset of attributes with respect to a single instance.
* @param subset a bitset representing the attribute subset to be
* evaluated
* @param holdOut a single instance (possibly not one of those used to
* build/train the evaluator) with which to evaluate the merit of the subset
* @param retrain true if the classifier should be retrained with respect
* to the new subset before testing on the holdOut instance.
* @return the "merit" of the subset on the holdOut instance
* @exception Exception if the subset cannot be evaluated
*/
public abstract double evaluateSubset(BitSet subset,
Instance holdOut,
boolean retrain)
throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/InfoGainAttributeEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* InfoGainAttributeEval.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.ContingencyTables;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.Discretize;
import weka.filters.unsupervised.attribute.NumericToBinary;
/**
* <!-- globalinfo-start --> InfoGainAttributeEval :<br/>
* <br/>
* Evaluates the worth of an attribute by measuring the information gain with
* respect to the class.<br/>
* <br/>
* InfoGain(Class,Attribute) = H(Class) - H(Class | Attribute).<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* treat missing values as a seperate value.
* </pre>
*
* <pre>
* -B
* just binarize numeric attributes instead
* of properly discretizing them.
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
* @see Discretize
* @see NumericToBinary
*/
public class InfoGainAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler {
/** for serialization */
static final long serialVersionUID = -1949849512589218930L;
/** Treat missing values as a seperate value */
private boolean m_missing_merge;
/** Just binarize numeric attributes */
private boolean m_Binarize;
/** The info gain for each attribute */
private double[] m_InfoGains;
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "InfoGainAttributeEval :\n\nEvaluates the worth of an attribute " + "by measuring the information gain with respect to the class.\n\n" + "InfoGain(Class,Attribute) = H(Class) - H(Class | Attribute).\n";
}
/**
* Constructor
*/
public InfoGainAttributeEval() {
this.resetOptions();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(new Option("\ttreat missing values as a seperate " + "value.", "M", 0, "-M"));
newVector.addElement(new Option("\tjust binarize numeric attributes instead \n" + "\tof properly discretizing them.", "B", 0, "-B"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* treat missing values as a seperate value.
* </pre>
*
* <pre>
* -B
* just binarize numeric attributes instead
* of properly discretizing them.
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
this.resetOptions();
this.setMissingMerge(!(Utils.getFlag('M', options)));
this.setBinarizeNumericAttributes(Utils.getFlag('B', options));
}
/**
* Gets the current settings of WrapperSubsetEval.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
String[] options = new String[2];
int current = 0;
if (!this.getMissingMerge()) {
options[current++] = "-M";
}
if (this.getBinarizeNumericAttributes()) {
options[current++] = "-B";
}
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String binarizeNumericAttributesTipText() {
return "Just binarize numeric attributes instead of properly discretizing them.";
}
/**
* Binarize numeric attributes.
*
* @param b true=binarize numeric attributes
*/
public void setBinarizeNumericAttributes(final boolean b) {
this.m_Binarize = b;
}
/**
* get whether numeric attributes are just being binarized.
*
* @return true if missing values are being distributed.
*/
public boolean getBinarizeNumericAttributes() {
return this.m_Binarize;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String missingMergeTipText() {
return "Distribute counts for missing values. Counts are distributed " + "across other values in proportion to their frequency. Otherwise, " + "missing is treated as a separate value.";
}
/**
* distribute the counts for missing values across observed values
*
* @param b true=distribute missing values.
*/
public void setMissingMerge(final boolean b) {
this.m_missing_merge = b;
}
/**
* get whether missing values are being distributed or not
*
* @return true if missing values are being distributed.
*/
public boolean getMissingMerge() {
return this.m_missing_merge;
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Initializes an information gain attribute evaluator. Discretizes all
* attributes that are numeric.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
int classIndex = data.classIndex();
int numInstances = data.numInstances();
if (!this.m_Binarize) {
Discretize disTransform = new Discretize();
disTransform.setUseBetterEncoding(true);
disTransform.setInputFormat(data);
data = Filter.useFilter(data, disTransform);
} else {
NumericToBinary binTransform = new NumericToBinary();
binTransform.setInputFormat(data);
data = Filter.useFilter(data, binTransform);
}
int numClasses = data.attribute(classIndex).numValues();
// Reserve space and initialize counters
double[][][] counts = new double[data.numAttributes()][][];
for (int k = 0; k < data.numAttributes(); k++) {
if (k != classIndex) {
int numValues = data.attribute(k).numValues();
counts[k] = new double[numValues + 1][numClasses + 1];
}
}
// Initialize counters
double[] temp = new double[numClasses + 1];
for (int k = 0; k < numInstances; k++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
Instance inst = data.instance(k);
if (inst.classIsMissing()) {
temp[numClasses] += inst.weight();
} else {
temp[(int) inst.classValue()] += inst.weight();
}
}
for (int k = 0; k < counts.length; k++) {
if (k != classIndex) {
for (int i = 0; i < temp.length; i++) {
counts[k][0][i] = temp[i];
}
}
}
// Get counts
for (int k = 0; k < numInstances; k++) {
Instance inst = data.instance(k);
for (int i = 0; i < inst.numValues(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (inst.index(i) != classIndex) {
if (inst.isMissingSparse(i) || inst.classIsMissing()) {
if (!inst.isMissingSparse(i)) {
counts[inst.index(i)][(int) inst.valueSparse(i)][numClasses] += inst.weight();
counts[inst.index(i)][0][numClasses] -= inst.weight();
} else if (!inst.classIsMissing()) {
counts[inst.index(i)][data.attribute(inst.index(i)).numValues()][(int) inst.classValue()] += inst.weight();
counts[inst.index(i)][0][(int) inst.classValue()] -= inst.weight();
} else {
counts[inst.index(i)][data.attribute(inst.index(i)).numValues()][numClasses] += inst.weight();
counts[inst.index(i)][0][numClasses] -= inst.weight();
}
} else {
counts[inst.index(i)][(int) inst.valueSparse(i)][(int) inst.classValue()] += inst.weight();
counts[inst.index(i)][0][(int) inst.classValue()] -= inst.weight();
}
}
}
}
// distribute missing counts if required
if (this.m_missing_merge) {
for (int k = 0; k < data.numAttributes(); k++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (k != classIndex) {
int numValues = data.attribute(k).numValues();
// Compute marginals
double[] rowSums = new double[numValues];
double[] columnSums = new double[numClasses];
double sum = 0;
for (int i = 0; i < numValues; i++) {
for (int j = 0; j < numClasses; j++) {
rowSums[i] += counts[k][i][j];
columnSums[j] += counts[k][i][j];
}
sum += rowSums[i];
}
if (Utils.gr(sum, 0)) {
double[][] additions = new double[numValues][numClasses];
// Compute what needs to be added to each row
for (int i = 0; i < numValues; i++) {
for (int j = 0; j < numClasses; j++) {
additions[i][j] = (rowSums[i] / sum) * counts[k][numValues][j];
}
}
// Compute what needs to be added to each column
for (int i = 0; i < numClasses; i++) {
for (int j = 0; j < numValues; j++) {
additions[j][i] += (columnSums[i] / sum) * counts[k][j][numClasses];
}
}
// Compute what needs to be added to each cell
for (int i = 0; i < numClasses; i++) {
for (int j = 0; j < numValues; j++) {
additions[j][i] += (counts[k][j][i] / sum) * counts[k][numValues][numClasses];
}
}
// Make new contingency table
double[][] newTable = new double[numValues][numClasses];
for (int i = 0; i < numValues; i++) {
for (int j = 0; j < numClasses; j++) {
newTable[i][j] = counts[k][i][j] + additions[i][j];
}
}
counts[k] = newTable;
}
}
}
}
// Compute info gains
this.m_InfoGains = new double[data.numAttributes()];
for (int i = 0; i < data.numAttributes(); i++) {
if (i != classIndex) {
this.m_InfoGains[i] = (ContingencyTables.entropyOverColumns(counts[i]) - ContingencyTables.entropyConditionedOnRows(counts[i]));
}
}
}
/**
* Reset options to their default values
*/
protected void resetOptions() {
this.m_InfoGains = null;
this.m_missing_merge = true;
this.m_Binarize = false;
}
/**
* evaluates an individual attribute by measuring the amount of information
* gained about the class given the attribute.
*
* @param attribute the index of the attribute to be evaluated
* @return the info gain
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(final int attribute) throws Exception {
return this.m_InfoGains[attribute];
}
/**
* Describe the attribute evaluator
*
* @return a description of the attribute evaluator as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_InfoGains == null) {
text.append("Information Gain attribute evaluator has not been built");
} else {
text.append("\tInformation Gain Ranking Filter");
if (!this.m_missing_merge) {
text.append("\n\tMissing values treated as seperate");
}
if (this.m_Binarize) {
text.append("\n\tNumeric attributes are just binarized");
}
}
text.append("\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
// ============
// Test method.
// ============
/**
* Main method for testing this class.
*
* @param args the options
*/
public static void main(final String[] args) {
runEvaluator(new InfoGainAttributeEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/OneRAttributeEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* OneRAttributeEval.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
/**
* <!-- globalinfo-start --> OneRAttributeEval :<br/>
* <br/>
* Evaluates the worth of an attribute by using the OneR classifier.<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -S <seed>
* Random number seed for cross validation
* (default = 1)
* </pre>
*
* <pre>
* -F <folds>
* Number of folds for cross validation
* (default = 10)
* </pre>
*
* <pre>
* -D
* Use training data for evaluation rather than cross validaton
* </pre>
*
* <pre>
* -B <minimum bucket size>
* Minimum number of objects in a bucket
* (passed on to OneR, default = 6)
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class OneRAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler {
/** for serialization */
static final long serialVersionUID = 4386514823886856980L;
/** The training instances */
private Instances m_trainInstances;
/** Random number seed */
private int m_randomSeed;
/** Number of folds for cross validation */
private int m_folds;
/** Use training data to evaluate merit rather than x-val */
private boolean m_evalUsingTrainingData;
/** Passed on to OneR */
private int m_minBucketSize;
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "OneRAttributeEval :\n\nEvaluates the worth of an attribute by " + "using the OneR classifier.\n";
}
/**
* Returns a string for this option suitable for display in the gui as a tip
* text
*
* @return a string describing this option
*/
public String seedTipText() {
return "Set the seed for use in cross validation.";
}
/**
* Set the random number seed for cross validation
*
* @param seed the seed to use
*/
public void setSeed(final int seed) {
this.m_randomSeed = seed;
}
/**
* Get the random number seed
*
* @return an <code>int</code> value
*/
public int getSeed() {
return this.m_randomSeed;
}
/**
* Returns a string for this option suitable for display in the gui as a tip
* text
*
* @return a string describing this option
*/
public String foldsTipText() {
return "Set the number of folds for cross validation.";
}
/**
* Set the number of folds to use for cross validation
*
* @param folds the number of folds
*/
public void setFolds(final int folds) {
this.m_folds = folds;
if (this.m_folds < 2) {
this.m_folds = 2;
}
}
/**
* Get the number of folds used for cross validation
*
* @return the number of folds
*/
public int getFolds() {
return this.m_folds;
}
/**
* Returns a string for this option suitable for display in the gui as a tip
* text
*
* @return a string describing this option
*/
public String evalUsingTrainingDataTipText() {
return "Use the training data to evaluate attributes rather than " + "cross validation.";
}
/**
* Use the training data to evaluate attributes rather than cross validation
*
* @param e true if training data is to be used for evaluation
*/
public void setEvalUsingTrainingData(final boolean e) {
this.m_evalUsingTrainingData = e;
}
/**
* Returns a string for this option suitable for display in the gui as a tip
* text
*
* @return a string describing this option
*/
public String minimumBucketSizeTipText() {
return "The minimum number of objects in a bucket " + "(passed to OneR).";
}
/**
* Set the minumum bucket size used by OneR
*
* @param minB the minimum bucket size to use
*/
public void setMinimumBucketSize(final int minB) {
this.m_minBucketSize = minB;
}
/**
* Get the minimum bucket size used by oneR
*
* @return the minimum bucket size used
*/
public int getMinimumBucketSize() {
return this.m_minBucketSize;
}
/**
* Returns true if the training data is to be used for evaluation
*
* @return true if training data is to be used for evaluation
*/
public boolean getEvalUsingTrainingData() {
return this.m_evalUsingTrainingData;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector.addElement(new Option("\tRandom number seed for cross validation\n" + "\t(default = 1)", "S", 1, "-S <seed>"));
newVector.addElement(new Option("\tNumber of folds for cross validation\n" + "\t(default = 10)", "F", 1, "-F <folds>"));
newVector.addElement(new Option("\tUse training data for evaluation rather than cross validaton", "D", 0, "-D"));
newVector.addElement(new Option("\tMinimum number of objects in a bucket\n" + "\t(passed on to " + "OneR, default = 6)", "B", 1, "-B <minimum bucket size>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -S <seed>
* Random number seed for cross validation
* (default = 1)
* </pre>
*
* <pre>
* -F <folds>
* Number of folds for cross validation
* (default = 10)
* </pre>
*
* <pre>
* -D
* Use training data for evaluation rather than cross validaton
* </pre>
*
* <pre>
* -B <minimum bucket size>
* Minimum number of objects in a bucket
* (passed on to OneR, default = 6)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
String temp = Utils.getOption('S', options);
if (temp.length() != 0) {
this.setSeed(Integer.parseInt(temp));
}
temp = Utils.getOption('F', options);
if (temp.length() != 0) {
this.setFolds(Integer.parseInt(temp));
}
temp = Utils.getOption('B', options);
if (temp.length() != 0) {
this.setMinimumBucketSize(Integer.parseInt(temp));
}
this.setEvalUsingTrainingData(Utils.getFlag('D', options));
Utils.checkForRemainingOptions(options);
}
/**
* returns the current setup.
*
* @return the options of the current setup
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (this.getEvalUsingTrainingData()) {
options.add("-D");
}
options.add("-S");
options.add("" + this.getSeed());
options.add("-F");
options.add("" + this.getFolds());
options.add("-B");
options.add("" + this.getMinimumBucketSize());
return options.toArray(new String[0]);
}
/**
* Constructor
*/
public OneRAttributeEval() {
this.resetOptions();
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Initializes a OneRAttribute attribute evaluator. Discretizes all attributes
* that are numeric.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.m_trainInstances = data;
}
/**
* rests to defaults.
*/
protected void resetOptions() {
this.m_trainInstances = null;
this.m_randomSeed = 1;
this.m_folds = 10;
this.m_evalUsingTrainingData = false;
this.m_minBucketSize = 6; // default used by OneR
}
/**
* evaluates an individual attribute by measuring the amount of information
* gained about the class given the attribute.
*
* @param attribute the index of the attribute to be evaluated
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(final int attribute) throws Exception {
int[] featArray = new int[2]; // feat + class
double errorRate;
Evaluation o_Evaluation;
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the instances
Instances trainCopy = new Instances(this.m_trainInstances);
featArray[0] = attribute;
featArray[1] = trainCopy.classIndex();
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
trainCopy = Filter.useFilter(trainCopy, delTransform);
o_Evaluation = new Evaluation(trainCopy);
String[] oneROpts = { "-B", "" + this.getMinimumBucketSize() };
Classifier oneR = AbstractClassifier.forName("weka.classifiers.rules.OneR", oneROpts);
if (this.m_evalUsingTrainingData) {
oneR.buildClassifier(trainCopy);
o_Evaluation.evaluateModel(oneR, trainCopy);
} else {
/*
* o_Evaluation.crossValidateModel("weka.classifiers.rules.OneR",
* trainCopy, 10, null, new Random(m_randomSeed));
*/
o_Evaluation.crossValidateModel(oneR, trainCopy, this.m_folds, new Random(this.m_randomSeed));
}
errorRate = o_Evaluation.errorRate();
return (1 - errorRate) * 100.0;
}
/**
* Return a description of the evaluator
*
* @return description as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_trainInstances == null) {
text.append("\tOneR feature evaluator has not been built yet");
} else {
text.append("\tOneR feature evaluator.\n\n");
text.append("\tUsing ");
if (this.m_evalUsingTrainingData) {
text.append("training data for evaluation of attributes.");
} else {
text.append("" + this.getFolds() + " fold cross validation for evaluating " + "attributes.");
}
text.append("\n\tMinimum bucket size for OneR: " + this.getMinimumBucketSize());
}
text.append("\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
@Override
public int[] postProcess(final int[] attributeSet) {
// save memory
this.m_trainInstances = new Instances(this.m_trainInstances, 0);
return attributeSet;
}
// ============
// Test method.
// ============
/**
* Main method for testing this class.
*
* @param args the options
*/
public static void main(final String[] args) {
runEvaluator(new OneRAttributeEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/PrincipalComponents.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PrincipalComponents.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Vector;
import no.uib.cipr.matrix.Matrices;
import no.uib.cipr.matrix.SymmDenseEVD;
import no.uib.cipr.matrix.UpperSymmDenseMatrix;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SparseInstance;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Center;
import weka.filters.unsupervised.attribute.NominalToBinary;
import weka.filters.unsupervised.attribute.Remove;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
import weka.filters.unsupervised.attribute.Standardize;
/**
* <!-- globalinfo-start --> Performs a principal components analysis and
* transformation of the data. Use in conjunction with a Ranker search.
* Dimensionality reduction is accomplished by choosing enough eigenvectors to
* account for some percentage of the variance in the original data---default
* 0.95 (95%). Attribute noise can be filtered by transforming to the PC space,
* eliminating some of the worst eigenvectors, and then transforming back to the
* original space.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -C
* Center (rather than standardize) the
* data and compute PCA using the covariance (rather
* than the correlation) matrix.
* </pre>
*
* <pre>
* -R
* Retain enough PC attributes to account
* for this proportion of variance in the original data.
* (default = 0.95)
* </pre>
*
* <pre>
* -O
* Transform through the PC space and
* back to the original space.
* </pre>
*
* <pre>
* -A
* Maximum number of attributes to include in
* transformed attribute names. (-1 = include all)
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PrincipalComponents extends UnsupervisedAttributeEvaluator implements AttributeTransformer, OptionHandler {
/** for serialization */
private static final long serialVersionUID = -3675307197777734007L;
/** The data to transform analyse/transform */
private Instances m_trainInstances;
/** Keep a copy for the class attribute (if set) */
private Instances m_trainHeader;
/** The header for the transformed data format */
private Instances m_transformedFormat;
/** The header for data transformed back to the original space */
private Instances m_originalSpaceFormat;
/** Data has a class set */
private boolean m_hasClass;
/** Class index */
private int m_classIndex;
/** Number of attributes */
private int m_numAttribs;
/** Number of instances */
private int m_numInstances;
/** Correlation/covariance matrix for the original data */
private UpperSymmDenseMatrix m_correlation;
private double[] m_means;
private double[] m_stdDevs;
/**
* If true, center (rather than standardize) the data and compute PCA from
* covariance (rather than correlation) matrix.
*/
private boolean m_center = false;
/**
* Will hold the unordered linear transformations of the (normalized) original
* data
*/
private double[][] m_eigenvectors;
/** Eigenvalues for the corresponding eigenvectors */
private double[] m_eigenvalues = null;
/** Sorted eigenvalues */
private int[] m_sortedEigens;
/** sum of the eigenvalues */
private double m_sumOfEigenValues = 0.0;
/** Filters for original data */
private ReplaceMissingValues m_replaceMissingFilter;
private NominalToBinary m_nominalToBinFilter;
private Remove m_attributeFilter;
private Center m_centerFilter;
private Standardize m_standardizeFilter;
/** The number of attributes in the pc transformed data */
private int m_outputNumAtts = -1;
/**
* the amount of variance to cover in the original data when retaining the
* best n PC's
*/
private double m_coverVariance = 0.95;
/**
* transform the data through the pc space and back to the original space ?
*/
private boolean m_transBackToOriginal = false;
/** maximum number of attributes in the transformed attribute name */
private int m_maxAttrsInName = 5;
/**
* holds the transposed eigenvectors for converting back to the original space
*/
private double[][] m_eTranspose;
/**
* Returns a string describing this attribute transformer
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Performs a principal components analysis and transformation of " + "the data. Use in conjunction with a Ranker search. Dimensionality " + "reduction is accomplished by choosing enough eigenvectors to "
+ "account for some percentage of the variance in the original data---" + "default 0.95 (95%). Attribute noise can be filtered by transforming " + "to the PC space, eliminating some of the worst eigenvectors, and "
+ "then transforming back to the original space.";
}
/**
* Returns an enumeration describing the available options.
* <p>
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector.addElement(new Option("\tCenter (rather than standardize) the" + "\n\tdata and compute PCA using the covariance (rather" + "\n\t than the correlation) matrix.", "C", 0, "-C"));
newVector.addElement(new Option("\tRetain enough PC attributes to account " + "\n\tfor this proportion of variance in " + "the original data.\n" + "\t(default = 0.95)", "R", 1, "-R"));
newVector.addElement(new Option("\tTransform through the PC space and " + "\n\tback to the original space.", "O", 0, "-O"));
newVector.addElement(new Option("\tMaximum number of attributes to include in " + "\n\ttransformed attribute names. (-1 = include all)", "A", 1, "-A"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -C
* Center (rather than standardize) the
* data and compute PCA using the covariance (rather
* than the correlation) matrix.
* </pre>
*
* <pre>
* -R
* Retain enough PC attributes to account
* for this proportion of variance in the original data.
* (default = 0.95)
* </pre>
*
* <pre>
* -O
* Transform through the PC space and
* back to the original space.
* </pre>
*
* <pre>
* -A
* Maximum number of attributes to include in
* transformed attribute names. (-1 = include all)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
this.resetOptions();
String optionString;
optionString = Utils.getOption('R', options);
if (optionString.length() != 0) {
Double temp;
temp = Double.valueOf(optionString);
this.setVarianceCovered(temp.doubleValue());
}
optionString = Utils.getOption('A', options);
if (optionString.length() != 0) {
this.setMaximumAttributeNames(Integer.parseInt(optionString));
}
this.setTransformBackToOriginal(Utils.getFlag('O', options));
this.setCenterData(Utils.getFlag('C', options));
}
/**
* Reset to defaults
*/
private void resetOptions() {
this.m_coverVariance = 0.95;
this.m_sumOfEigenValues = 0.0;
this.m_transBackToOriginal = false;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String centerDataTipText() {
return "Center (rather than standardize) the data. PCA will " + "be computed from the covariance (rather than correlation) " + "matrix";
}
/**
* Set whether to center (rather than standardize) the data. If set to true
* then PCA is computed from the covariance rather than correlation matrix.
*
* @param center true if the data is to be centered rather than standardized
*/
public void setCenterData(final boolean center) {
this.m_center = center;
}
/**
* Get whether to center (rather than standardize) the data. If true then PCA
* is computed from the covariance rather than correlation matrix.
*
* @return true if the data is to be centered rather than standardized.
*/
public boolean getCenterData() {
return this.m_center;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String varianceCoveredTipText() {
return "Retain enough PC attributes to account for this proportion of " + "variance.";
}
/**
* Sets the amount of variance to account for when retaining principal
* components
*
* @param vc the proportion of total variance to account for
*/
public void setVarianceCovered(final double vc) {
this.m_coverVariance = vc;
}
/**
* Gets the proportion of total variance to account for when retaining
* principal components
*
* @return the proportion of variance to account for
*/
public double getVarianceCovered() {
return this.m_coverVariance;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maximumAttributeNamesTipText() {
return "The maximum number of attributes to include in transformed attribute names.";
}
/**
* Sets maximum number of attributes to include in transformed attribute
* names.
*
* @param m the maximum number of attributes
*/
public void setMaximumAttributeNames(final int m) {
this.m_maxAttrsInName = m;
}
/**
* Gets maximum number of attributes to include in transformed attribute
* names.
*
* @return the maximum number of attributes
*/
public int getMaximumAttributeNames() {
return this.m_maxAttrsInName;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String transformBackToOriginalTipText() {
return "Transform through the PC space and back to the original space. " + "If only the best n PCs are retained (by setting varianceCovered < 1) " + "then this option will give a dataset in the original space but with "
+ "less attribute noise.";
}
/**
* Sets whether the data should be transformed back to the original space
*
* @param b true if the data should be transformed back to the original space
*/
public void setTransformBackToOriginal(final boolean b) {
this.m_transBackToOriginal = b;
}
/**
* Gets whether the data is to be transformed back to the original space.
*
* @return true if the data is to be transformed back to the original space
*/
public boolean getTransformBackToOriginal() {
return this.m_transBackToOriginal;
}
/**
* Gets the current settings of PrincipalComponents
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (this.getCenterData()) {
options.add("-C");
}
options.add("-R");
options.add("" + this.getVarianceCovered());
options.add("-A");
options.add("" + this.getMaximumAttributeNames());
if (this.getTransformBackToOriginal()) {
options.add("-O");
}
return options.toArray(new String[0]);
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.UNARY_CLASS);
result.enable(Capability.NUMERIC_CLASS);
result.enable(Capability.DATE_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
result.enable(Capability.NO_CLASS);
return result;
}
/**
* Initializes principal components and performs the analysis
*
* @param data the instances to analyse/transform
* @throws Exception if analysis fails
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.buildAttributeConstructor(data);
}
private void buildAttributeConstructor(final Instances data) throws Exception {
this.m_eigenvalues = null;
this.m_outputNumAtts = -1;
this.m_attributeFilter = null;
this.m_nominalToBinFilter = null;
this.m_sumOfEigenValues = 0.0;
this.m_trainInstances = new Instances(data);
// make a copy of the training data so that we can get the class
// column to append to the transformed data (if necessary)
this.m_trainHeader = new Instances(this.m_trainInstances, 0);
this.m_replaceMissingFilter = new ReplaceMissingValues();
this.m_replaceMissingFilter.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, this.m_replaceMissingFilter);
/*
* if (m_normalize) { m_normalizeFilter = new Normalize();
* m_normalizeFilter.setInputFormat(m_trainInstances); m_trainInstances =
* Filter.useFilter(m_trainInstances, m_normalizeFilter); }
*/
this.m_nominalToBinFilter = new NominalToBinary();
this.m_nominalToBinFilter.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, this.m_nominalToBinFilter);
// delete any attributes with only one distinct value or are all missing
Vector<Integer> deleteCols = new Vector<Integer>();
for (int i = 0; i < this.m_trainInstances.numAttributes(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (this.m_trainInstances.numDistinctValues(i) <= 1) {
deleteCols.addElement(new Integer(i));
}
}
if (this.m_trainInstances.classIndex() >= 0) {
// get rid of the class column
this.m_hasClass = true;
this.m_classIndex = this.m_trainInstances.classIndex();
deleteCols.addElement(new Integer(this.m_classIndex));
}
// remove columns from the data if necessary
if (deleteCols.size() > 0) {
this.m_attributeFilter = new Remove();
int[] todelete = new int[deleteCols.size()];
for (int i = 0; i < deleteCols.size(); i++) {
todelete[i] = (deleteCols.elementAt(i)).intValue();
}
this.m_attributeFilter.setAttributeIndicesArray(todelete);
this.m_attributeFilter.setInvertSelection(false);
this.m_attributeFilter.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, this.m_attributeFilter);
}
// can evaluator handle the processed data ? e.g., enough attributes?
this.getCapabilities().testWithFail(this.m_trainInstances);
this.m_numInstances = this.m_trainInstances.numInstances();
this.m_numAttribs = this.m_trainInstances.numAttributes();
this.fillCovariance();
SymmDenseEVD evd = SymmDenseEVD.factorize(this.m_correlation);
this.m_eigenvectors = Matrices.getArray(evd.getEigenvectors());
this.m_eigenvalues = evd.getEigenvalues();
/*
* for (int i = 0; i < m_numAttribs; i++) { for (int j = 0; j <
* m_numAttribs; j++) { System.err.println(v[i][j] + " "); }
* System.err.println(d[i]); }
*/
// any eigenvalues less than 0 are not worth anything --- change to 0
for (int i = 0; i < this.m_eigenvalues.length; i++) {
if (this.m_eigenvalues[i] < 0) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
this.m_eigenvalues[i] = 0.0;
}
}
this.m_sortedEigens = Utils.sort(this.m_eigenvalues);
this.m_sumOfEigenValues = Utils.sum(this.m_eigenvalues);
this.m_transformedFormat = this.setOutputFormat();
if (this.m_transBackToOriginal) {
this.m_originalSpaceFormat = this.setOutputFormatOriginal();
// new ordered eigenvector matrix
int numVectors = (this.m_transformedFormat.classIndex() < 0) ? this.m_transformedFormat.numAttributes() : this.m_transformedFormat.numAttributes() - 1;
double[][] orderedVectors = new double[this.m_eigenvectors.length][numVectors + 1];
// try converting back to the original space
for (int i = this.m_numAttribs - 1; i > (this.m_numAttribs - numVectors - 1); i--) {
for (int j = 0; j < this.m_numAttribs; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
orderedVectors[j][this.m_numAttribs - i] = this.m_eigenvectors[j][this.m_sortedEigens[i]];
}
}
// transpose the matrix
int nr = orderedVectors.length;
int nc = orderedVectors[0].length;
this.m_eTranspose = new double[nc][nr];
for (int i = 0; i < nc; i++) {
for (int j = 0; j < nr; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
this.m_eTranspose[i][j] = orderedVectors[j][i];
}
}
}
}
/**
* Returns just the header for the transformed data (ie. an empty set of
* instances. This is so that AttributeSelection can determine the structure
* of the transformed data without actually having to get all the transformed
* data through transformedData().
*
* @return the header of the transformed data.
* @throws Exception if the header of the transformed data can't be
* determined.
*/
@Override
public Instances transformedHeader() throws Exception {
if (this.m_eigenvalues == null) {
throw new Exception("Principal components hasn't been built yet");
}
if (this.m_transBackToOriginal) {
return this.m_originalSpaceFormat;
} else {
return this.m_transformedFormat;
}
}
/**
* Return the header of the training data after all filtering - i.e missing
* values and nominal to binary.
*
* @return the header of the training data after all filtering.
*/
public Instances getFilteredInputFormat() {
return new Instances(this.m_trainInstances, 0);
}
/**
* Return the correlation/covariance matrix
*
* @return the correlation or covariance matrix
*/
public double[][] getCorrelationMatrix() {
return Matrices.getArray(this.m_correlation);
}
/**
* Return the unsorted eigenvectors
*
* @return the unsorted eigenvectors
*/
public double[][] getUnsortedEigenVectors() {
return this.m_eigenvectors;
}
/**
* Return the eigenvalues corresponding to the eigenvectors
*
* @return the eigenvalues
*/
public double[] getEigenValues() {
return this.m_eigenvalues;
}
/**
* Gets the transformed training data.
*
* @return the transformed training data
* @throws Exception if transformed data can't be returned
*/
@Override
public Instances transformedData(final Instances data) throws Exception {
if (this.m_eigenvalues == null) {
throw new Exception("Principal components hasn't been built yet");
}
Instances output = null;
if (this.m_transBackToOriginal) {
output = new Instances(this.m_originalSpaceFormat);
} else {
output = new Instances(this.m_transformedFormat);
}
for (int i = 0; i < data.numInstances(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
Instance converted = this.convertInstance(data.instance(i));
output.add(converted);
}
return output;
}
/**
* Evaluates the merit of a transformed attribute. This is defined to be 1
* minus the cumulative variance explained. Merit can't be meaningfully
* evaluated if the data is to be transformed back to the original space.
*
* @param att the attribute to be evaluated
* @return the merit of a transformed attribute
* @throws Exception if attribute can't be evaluated
*/
@Override
public double evaluateAttribute(final int att) throws Exception {
if (this.m_eigenvalues == null) {
throw new Exception("Principal components hasn't been built yet!");
}
if (this.m_transBackToOriginal) {
return 1.0; // can't evaluate back in the original space!
}
// return 1-cumulative variance explained for this transformed att
double cumulative = 0.0;
for (int i = this.m_numAttribs - 1; i >= this.m_numAttribs - att - 1; i--) {
cumulative += this.m_eigenvalues[this.m_sortedEigens[i]];
}
return 1.0 - cumulative / this.m_sumOfEigenValues;
}
private void fillCovariance() throws Exception {
// first store the means
this.m_means = new double[this.m_trainInstances.numAttributes()];
this.m_stdDevs = new double[this.m_trainInstances.numAttributes()];
for (int i = 0; i < this.m_trainInstances.numAttributes(); i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
this.m_means[i] = this.m_trainInstances.meanOrMode(i);
this.m_stdDevs[i] = Math.sqrt(Utils.variance(this.m_trainInstances.attributeToDoubleArray(i)));
}
// just center the data or standardize it?
if (this.m_center) {
this.m_centerFilter = new Center();
this.m_centerFilter.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, this.m_centerFilter);
} else {
this.m_standardizeFilter = new Standardize();
this.m_standardizeFilter.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, this.m_standardizeFilter);
}
// now compute the covariance matrix
this.m_correlation = new UpperSymmDenseMatrix(this.m_numAttribs);
for (int i = 0; i < this.m_numAttribs; i++) {
for (int j = i; j < this.m_numAttribs; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
double cov = 0;
for (Instance inst : this.m_trainInstances) {
cov += inst.value(i) * inst.value(j);
}
cov /= this.m_trainInstances.numInstances() - 1;
this.m_correlation.set(i, j, cov);
}
}
}
/**
* Return a summary of the analysis
*
* @return a summary of the analysis.
*/
private String principalComponentsSummary() {
StringBuffer result = new StringBuffer();
double cumulative = 0.0;
Instances output = null;
int numVectors = 0;
try {
output = this.setOutputFormat();
numVectors = (output.classIndex() < 0) ? output.numAttributes() : output.numAttributes() - 1;
} catch (Exception ex) {
}
// tomorrow
String corrCov = (this.m_center) ? "Covariance " : "Correlation ";
result.append(corrCov + "matrix\n" + matrixToString(Matrices.getArray(this.m_correlation)) + "\n\n");
result.append("eigenvalue\tproportion\tcumulative\n");
for (int i = this.m_numAttribs - 1; i > (this.m_numAttribs - numVectors - 1); i--) {
cumulative += this.m_eigenvalues[this.m_sortedEigens[i]];
result.append(Utils.doubleToString(this.m_eigenvalues[this.m_sortedEigens[i]], 9, 5) + "\t" + Utils.doubleToString((this.m_eigenvalues[this.m_sortedEigens[i]] / this.m_sumOfEigenValues), 9, 5) + "\t"
+ Utils.doubleToString((cumulative / this.m_sumOfEigenValues), 9, 5) + "\t" + output.attribute(this.m_numAttribs - i - 1).name() + "\n");
}
result.append("\nEigenvectors\n");
for (int j = 1; j <= numVectors; j++) {
result.append(" V" + j + '\t');
}
result.append("\n");
for (int j = 0; j < this.m_numAttribs; j++) {
for (int i = this.m_numAttribs - 1; i > (this.m_numAttribs - numVectors - 1); i--) {
result.append(Utils.doubleToString(this.m_eigenvectors[j][this.m_sortedEigens[i]], 7, 4) + "\t");
}
result.append(this.m_trainInstances.attribute(j).name() + '\n');
}
if (this.m_transBackToOriginal) {
result.append("\nPC space transformed back to original space.\n" + "(Note: can't evaluate attributes in the original " + "space)\n");
}
return result.toString();
}
/**
* Returns a description of this attribute transformer
*
* @return a String describing this attribute transformer
*/
@Override
public String toString() {
if (this.m_eigenvalues == null) {
return "Principal components hasn't been built yet!";
} else {
return "\tPrincipal Components Attribute Transformer\n\n" + this.principalComponentsSummary();
}
}
/**
* Return a matrix as a String
*
* @param matrix that is decribed as a string
* @return a String describing a matrix
*/
public static String matrixToString(final double[][] matrix) {
StringBuffer result = new StringBuffer();
int last = matrix.length - 1;
for (int i = 0; i <= last; i++) {
for (int j = 0; j <= last; j++) {
result.append(Utils.doubleToString(matrix[i][j], 6, 2) + " ");
if (j == last) {
result.append('\n');
}
}
}
return result.toString();
}
/**
* Convert a pc transformed instance back to the original space
*
* @param inst the instance to convert
* @return the processed instance
* @throws Exception if something goes wrong
*/
private Instance convertInstanceToOriginal(final Instance inst) throws Exception {
double[] newVals = null;
if (this.m_hasClass) {
newVals = new double[this.m_numAttribs + 1];
} else {
newVals = new double[this.m_numAttribs];
}
if (this.m_hasClass) {
// class is always appended as the last attribute
newVals[this.m_numAttribs] = inst.value(inst.numAttributes() - 1);
}
for (int i = 0; i < this.m_eTranspose[0].length; i++) {
double tempval = 0.0;
for (int j = 1; j < this.m_eTranspose.length; j++) {
tempval += (this.m_eTranspose[j][i] * inst.value(j - 1));
}
newVals[i] = tempval;
if (!this.m_center) {
newVals[i] *= this.m_stdDevs[i];
}
newVals[i] += this.m_means[i];
}
if (inst instanceof SparseInstance) {
return new SparseInstance(inst.weight(), newVals);
} else {
return new DenseInstance(inst.weight(), newVals);
}
}
/**
* Transform an instance in original (unormalized) format. Convert back to the
* original space if requested.
*
* @param instance an instance in the original (unormalized) format
* @return a transformed instance
* @throws Exception if instance cant be transformed
*/
@Override
public Instance convertInstance(final Instance instance) throws Exception {
if (this.m_eigenvalues == null) {
throw new Exception("convertInstance: Principal components not " + "built yet");
}
double[] newVals = new double[this.m_outputNumAtts];
Instance tempInst = (Instance) instance.copy();
if (!instance.dataset().equalHeaders(this.m_trainHeader)) {
throw new Exception("Can't convert instance: header's don't match: " + "PrincipalComponents\n" + instance.dataset().equalHeadersMsg(this.m_trainHeader));
}
this.m_replaceMissingFilter.input(tempInst);
this.m_replaceMissingFilter.batchFinished();
tempInst = this.m_replaceMissingFilter.output();
/*
* if (m_normalize) { m_normalizeFilter.input(tempInst);
* m_normalizeFilter.batchFinished(); tempInst = m_normalizeFilter.output();
* }
*/
this.m_nominalToBinFilter.input(tempInst);
this.m_nominalToBinFilter.batchFinished();
tempInst = this.m_nominalToBinFilter.output();
if (this.m_attributeFilter != null) {
this.m_attributeFilter.input(tempInst);
this.m_attributeFilter.batchFinished();
tempInst = this.m_attributeFilter.output();
}
if (!this.m_center) {
this.m_standardizeFilter.input(tempInst);
this.m_standardizeFilter.batchFinished();
tempInst = this.m_standardizeFilter.output();
} else {
this.m_centerFilter.input(tempInst);
this.m_centerFilter.batchFinished();
tempInst = this.m_centerFilter.output();
}
if (this.m_hasClass) {
newVals[this.m_outputNumAtts - 1] = instance.value(instance.classIndex());
}
double cumulative = 0;
for (int i = this.m_numAttribs - 1; i >= 0; i--) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
double tempval = 0.0;
for (int j = 0; j < this.m_numAttribs; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
tempval += (this.m_eigenvectors[j][this.m_sortedEigens[i]] * tempInst.value(j));
}
newVals[this.m_numAttribs - i - 1] = tempval;
cumulative += this.m_eigenvalues[this.m_sortedEigens[i]];
if ((cumulative / this.m_sumOfEigenValues) >= this.m_coverVariance) {
break;
}
}
if (!this.m_transBackToOriginal) {
if (instance instanceof SparseInstance) {
return new SparseInstance(instance.weight(), newVals);
} else {
return new DenseInstance(instance.weight(), newVals);
}
} else {
if (instance instanceof SparseInstance) {
return this.convertInstanceToOriginal(new SparseInstance(instance.weight(), newVals));
} else {
return this.convertInstanceToOriginal(new DenseInstance(instance.weight(), newVals));
}
}
}
/**
* Set up the header for the PC->original space dataset
*
* @return the output format
* @throws Exception if something goes wrong
*/
private Instances setOutputFormatOriginal() throws Exception {
ArrayList<Attribute> attributes = new ArrayList<Attribute>();
for (int i = 0; i < this.m_numAttribs; i++) {
String att = this.m_trainInstances.attribute(i).name();
attributes.add(new Attribute(att));
}
if (this.m_hasClass) {
attributes.add((Attribute) this.m_trainHeader.classAttribute().copy());
}
Instances outputFormat = new Instances(this.m_trainHeader.relationName() + "->PC->original space", attributes, 0);
// set the class to be the last attribute if necessary
if (this.m_hasClass) {
outputFormat.setClassIndex(outputFormat.numAttributes() - 1);
}
return outputFormat;
}
/**
* Set the format for the transformed data
*
* @return a set of empty Instances (header only) in the new format
* @throws Exception if the output format can't be set
*/
private Instances setOutputFormat() throws Exception {
if (this.m_eigenvalues == null) {
return null;
}
double cumulative = 0.0;
ArrayList<Attribute> attributes = new ArrayList<Attribute>();
for (int i = this.m_numAttribs - 1; i >= 0; i--) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
StringBuffer attName = new StringBuffer();
// build array of coefficients
double[] coeff_mags = new double[this.m_numAttribs];
for (int j = 0; j < this.m_numAttribs; j++) {
coeff_mags[j] = -Math.abs(this.m_eigenvectors[j][this.m_sortedEigens[i]]);
}
int num_attrs = (this.m_maxAttrsInName > 0) ? Math.min(this.m_numAttribs, this.m_maxAttrsInName) : this.m_numAttribs;
// this array contains the sorted indices of the coefficients
int[] coeff_inds;
if (this.m_numAttribs > 0) {
// if m_maxAttrsInName > 0, sort coefficients by decreasing magnitude
coeff_inds = Utils.sort(coeff_mags);
} else {
// if m_maxAttrsInName <= 0, use all coeffs in original order
coeff_inds = new int[this.m_numAttribs];
for (int j = 0; j < this.m_numAttribs; j++) {
coeff_inds[j] = j;
}
}
// build final attName string
for (int j = 0; j < num_attrs; j++) {
double coeff_value = this.m_eigenvectors[coeff_inds[j]][this.m_sortedEigens[i]];
if (j > 0 && coeff_value >= 0) {
attName.append("+");
}
attName.append(Utils.doubleToString(coeff_value, 5, 3) + this.m_trainInstances.attribute(coeff_inds[j]).name());
}
if (num_attrs < this.m_numAttribs) {
attName.append("...");
}
attributes.add(new Attribute(attName.toString()));
cumulative += this.m_eigenvalues[this.m_sortedEigens[i]];
if ((cumulative / this.m_sumOfEigenValues) >= this.m_coverVariance) {
break;
}
}
if (this.m_hasClass) {
attributes.add((Attribute) this.m_trainHeader.classAttribute().copy());
}
Instances outputFormat = new Instances(this.m_trainInstances.relationName() + "_principal components", attributes, 0);
// set the class to be the last attribute if necessary
if (this.m_hasClass) {
outputFormat.setClassIndex(outputFormat.numAttributes() - 1);
}
this.m_outputNumAtts = outputFormat.numAttributes();
return outputFormat;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class
*
* @param argv should contain the command line arguments to the
* evaluator/transformer (see AttributeSelection)
*/
public static void main(final String[] argv) {
runEvaluator(new PrincipalComponents(), argv);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/RankedOutputSearch.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RankedOutputSearch.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
/**
* Interface for search methods capable of producing a
* ranked list of attributes.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface RankedOutputSearch {
// ===============
// Public methods.
// ===============
/**
* Returns a X by 2 list of attribute indexes and corresponding
* evaluations from best (highest) to worst.
* @return the ranked list of attribute indexes in an array of ints
* @exception Exception if the ranking can't be produced
*/
double[][] rankedAttributes() throws Exception;
/**
* Sets a threshold by which attributes can be discarded from the
* ranking. This threshold is used by the AttributeSelection module
* which does the actual discarding of attributes---the implementer
* of this method needs only to provide a variable in which to store the
* supplied threshold. -Double.MAX_VALUE is reserved to mean no threshold,
* ie, retain all attributes.
* @param threshold the threshold.
*/
void setThreshold(double threshold);
/**
* Gets the threshold by which attributes can be discarded. Discarding
* of attributes is done by the AttributeSelection module using the
* threshold returned by this method.
* @return a threshold by which to discard attributes
*/
double getThreshold();
/**
* Specify the number of attributes to select from the ranked list. < 0
* indicates that all attributes are to be retained. NumToSelect has
* precedence over threshold, ie. if there is a non -1 value for NumToSelect
* then this will take precedence over any threshold value.
* @param numToSelect the number of attributes to retain
*/
void setNumToSelect(int numToSelect);
/**
* Gets the user specified number of attributes to be retained.
* @return the number of attributes to retain
*/
int getNumToSelect();
/**
* Gets the calculated number of attributes to retain. This is the
* actual number of attributes to retain. This is the same as
* getNumToSelect if the user specifies a number which is not less
* than zero. Otherwise it should be the number of attributes in the
* (potentially transformed) data.
*/
int getCalculatedNumToSelect();
/**
* Sets whether or not ranking is to be performed.
* When a search method is capable of producing a ranked list
* of attributes, the user has the choice of seeing the results of a
* normal search or seeing a ranked list.
* @param doRanking true if ranked list is to be produced
*/
void setGenerateRanking(boolean doRanking);
/**
* Gets whether the user has opted to see a ranked list of
* attributes rather than the normal result of the search
* @return true if a ranked list has been requested.
*/
boolean getGenerateRanking();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/Ranker.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Ranker.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Range;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Ranker : <br/>
* <br/>
* Ranks attributes by their individual evaluations. Use in conjunction with
* attribute evaluators (ReliefF, GainRatio, Entropy etc).<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* Any starting attributes specified are
* ignored during the ranking.
* </pre>
*
* <pre>
* -T <threshold>
* Specify a theshold by which attributes
* may be discarded from the ranking.
* </pre>
*
* <pre>
* -N <num to select>
* Specify number of attributes to select
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class Ranker extends ASSearch implements RankedOutputSearch, StartSetHandler, OptionHandler {
/** for serialization */
static final long serialVersionUID = -9086714848510751934L;
/** Holds the starting set as an array of attributes */
private int[] m_starting;
/** Holds the start set for the search as a range */
private Range m_startRange;
/** Holds the ordered list of attributes */
private int[] m_attributeList;
/** Holds the list of attribute merit scores */
private double[] m_attributeMerit;
/** Data has class attribute---if unsupervised evaluator then no class */
private boolean m_hasClass;
/** Class index of the data if supervised evaluator */
private int m_classIndex;
/** The number of attribtes */
private int m_numAttribs;
/**
* A threshold by which to discard attributes---used by the AttributeSelection
* module
*/
private double m_threshold;
/**
* The number of attributes to select. -1 indicates that all attributes are to
* be retained. Has precedence over m_threshold
*/
private int m_numToSelect = -1;
/** Used to compute the number to select */
private int m_calculatedNumToSelect = -1;
/**
* Returns a string describing this search method
*
* @return a description of the search suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Ranker : \n\nRanks attributes by their individual evaluations. " + "Use in conjunction with attribute evaluators (ReliefF, GainRatio, " + "Entropy etc).\n";
}
/**
* Constructor
*/
public Ranker() {
this.resetOptions();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numToSelectTipText() {
return "Specify the number of attributes to retain. The default value " + "(-1) indicates that all attributes are to be retained. Use either " + "this option or a threshold to reduce the attribute set.";
}
/**
* Specify the number of attributes to select from the ranked list. -1
* indicates that all attributes are to be retained.
*
* @param n the number of attributes to retain
*/
@Override
public void setNumToSelect(final int n) {
this.m_numToSelect = n;
}
/**
* Gets the number of attributes to be retained.
*
* @return the number of attributes to retain
*/
@Override
public int getNumToSelect() {
return this.m_numToSelect;
}
/**
* Gets the calculated number to select. This might be computed from a
* threshold, or if < 0 is set as the number to select then it is set to the
* number of attributes in the (transformed) data.
*
* @return the calculated number of attributes to select
*/
@Override
public int getCalculatedNumToSelect() {
if (this.m_numToSelect >= 0) {
this.m_calculatedNumToSelect = this.m_numToSelect > this.m_attributeMerit.length ? this.m_attributeMerit.length : this.m_numToSelect;
}
return this.m_calculatedNumToSelect;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String thresholdTipText() {
return "Set threshold by which attributes can be discarded. Default value " + "results in no attributes being discarded. Use either this option or " + "numToSelect to reduce the attribute set.";
}
/**
* Set the threshold by which the AttributeSelection module can discard
* attributes.
*
* @param threshold the threshold.
*/
@Override
public void setThreshold(final double threshold) {
this.m_threshold = threshold;
}
/**
* Returns the threshold so that the AttributeSelection module can discard
* attributes from the ranking.
*/
@Override
public double getThreshold() {
return this.m_threshold;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String generateRankingTipText() {
return "A constant option. Ranker is only capable of generating " + " attribute rankings.";
}
/**
* This is a dummy set method---Ranker is ONLY capable of producing a ranked
* list of attributes for attribute evaluators.
*
* @param doRank this parameter is N/A and is ignored
*/
@Override
public void setGenerateRanking(final boolean doRank) {
}
/**
* This is a dummy method. Ranker can ONLY be used with attribute evaluators
* and as such can only produce a ranked list of attributes
*
* @return true all the time.
*/
@Override
public boolean getGenerateRanking() {
return true;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String startSetTipText() {
return "Specify a set of attributes to ignore. " + " When generating the ranking, Ranker will not evaluate the attributes " + " in this list. " + "This is specified as a comma "
+ "seperated list off attribute indexes starting at 1. It can include " + "ranges. Eg. 1,2,5-9,17.";
}
/**
* Sets a starting set of attributes for the search. It is the search method's
* responsibility to report this start set (if any) in its toString() method.
*
* @param startSet a string containing a list of attributes (and or ranges),
* eg. 1,2,6,10-15.
* @throws Exception if start set can't be set.
*/
@Override
public void setStartSet(final String startSet) throws Exception {
this.m_startRange.setRanges(startSet);
}
/**
* Returns a list of attributes (and or attribute ranges) as a String
*
* @return a list of attributes (and or attribute ranges)
*/
@Override
public String getStartSet() {
return this.m_startRange.getRanges();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(3);
newVector.addElement(new Option("\tSpecify a starting set of attributes.\n" + "\tEg. 1,3,5-7.\n" + "\tAny starting attributes specified are\n" + "\tignored during the ranking.", "P", 1, "-P <start set>"));
newVector.addElement(new Option("\tSpecify a theshold by which attributes\n" + "\tmay be discarded from the ranking.", "T", 1, "-T <threshold>"));
newVector.addElement(new Option("\tSpecify number of attributes to select", "N", 1, "-N <num to select>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -P <start set>
* Specify a starting set of attributes.
* Eg. 1,3,5-7.
* Any starting attributes specified are
* ignored during the ranking.
* </pre>
*
* <pre>
* -T <threshold>
* Specify a theshold by which attributes
* may be discarded from the ranking.
* </pre>
*
* <pre>
* -N <num to select>
* Specify number of attributes to select
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
String optionString;
this.resetOptions();
optionString = Utils.getOption('P', options);
if (optionString.length() != 0) {
this.setStartSet(optionString);
}
optionString = Utils.getOption('T', options);
if (optionString.length() != 0) {
Double temp;
temp = Double.valueOf(optionString);
this.setThreshold(temp.doubleValue());
}
optionString = Utils.getOption('N', options);
if (optionString.length() != 0) {
this.setNumToSelect(Integer.parseInt(optionString));
}
}
/**
* Gets the current settings of ReliefFAttributeEval.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (!(this.getStartSet().equals(""))) {
options.add("-P");
options.add("" + this.startSetToString());
}
options.add("-T");
options.add("" + this.getThreshold());
options.add("-N");
options.add("" + this.getNumToSelect());
return options.toArray(new String[0]);
}
/**
* converts the array of starting attributes to a string. This is used by
* getOptions to return the actual attributes specified as the starting set.
* This is better than using m_startRanges.getRanges() as the same start set
* can be specified in different ways from the command line---eg 1,2,3 == 1-3.
* This is to ensure that stuff that is stored in a database is comparable.
*
* @return a comma seperated list of individual attribute numbers as a String
*/
private String startSetToString() {
StringBuffer FString = new StringBuffer();
boolean didPrint;
if (this.m_starting == null) {
return this.getStartSet();
}
for (int i = 0; i < this.m_starting.length; i++) {
didPrint = false;
if ((this.m_hasClass == false) || (this.m_hasClass == true && i != this.m_classIndex)) {
FString.append((this.m_starting[i] + 1));
didPrint = true;
}
if (i == (this.m_starting.length - 1)) {
FString.append("");
} else {
if (didPrint) {
FString.append(",");
}
}
}
return FString.toString();
}
/**
* Kind of a dummy search algorithm. Calls a Attribute evaluator to evaluate
* each attribute not included in the startSet and then sorts them to produce
* a ranked list of attributes.
*
* @param ASEval the attribute evaluator to guide the search
* @param data the training instances.
* @return an array (not necessarily ordered) of selected attribute indexes
* @throws Exception if the search can't be completed
*/
@Override
public int[] search(final ASEvaluation ASEval, Instances data) throws Exception {
int i, j;
if (!(ASEval instanceof AttributeEvaluator)) {
throw new Exception(ASEval.getClass().getName() + " is not a" + "Attribute evaluator!");
}
this.m_numAttribs = data.numAttributes();
if (ASEval instanceof UnsupervisedAttributeEvaluator) {
this.m_hasClass = false;
} else {
this.m_classIndex = data.classIndex();
if (this.m_classIndex >= 0) {
this.m_hasClass = true;
} else {
this.m_hasClass = false;
}
}
// get the transformed data and check to see if the transformer
// preserves a class index
if (ASEval instanceof AttributeTransformer) {
data = ((AttributeTransformer) ASEval).transformedHeader();
if (this.m_classIndex >= 0 && data.classIndex() >= 0) {
this.m_classIndex = data.classIndex();
this.m_hasClass = true;
}
}
this.m_startRange.setUpper(this.m_numAttribs - 1);
if (!(this.getStartSet().equals(""))) {
this.m_starting = this.m_startRange.getSelection();
}
int sl = 0;
if (this.m_starting != null) {
sl = this.m_starting.length;
}
if ((this.m_starting != null) && (this.m_hasClass == true)) {
// see if the supplied list contains the class index
boolean ok = false;
for (i = 0; i < sl; i++) {
if (this.m_starting[i] == this.m_classIndex) {
ok = true;
break;
}
}
if (ok == false) {
sl++;
}
} else {
if (this.m_hasClass == true) {
sl++;
}
}
this.m_attributeList = new int[this.m_numAttribs - sl];
this.m_attributeMerit = new double[this.m_numAttribs - sl];
// add in those attributes not in the starting (omit list)
for (i = 0, j = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (!this.inStarting(i)) {
this.m_attributeList[j++] = i;
}
}
AttributeEvaluator ASEvaluator = (AttributeEvaluator) ASEval;
for (i = 0; i < this.m_attributeList.length; i++) {
this.m_attributeMerit[i] = ASEvaluator.evaluateAttribute(this.m_attributeList[i]);
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
double[][] tempRanked = this.rankedAttributes();
int[] rankedAttributes = new int[this.m_attributeList.length];
for (i = 0; i < this.m_attributeList.length; i++) {
rankedAttributes[i] = (int) tempRanked[i][0];
}
return rankedAttributes;
}
/**
* Sorts the evaluated attribute list
*
* @return an array of sorted (highest eval to lowest) attribute indexes
* @throws Exception of sorting can't be done.
*/
@Override
public double[][] rankedAttributes() throws Exception {
int i, j;
if (this.m_attributeList == null || this.m_attributeMerit == null) {
throw new Exception("Search must be performed before a ranked " + "attribute list can be obtained");
}
int[] ranked = Utils.sort(this.m_attributeMerit);
// reverse the order of the ranked indexes
double[][] bestToWorst = new double[ranked.length][2];
for (i = ranked.length - 1, j = 0; i >= 0; i--) {
bestToWorst[j++][0] = ranked[i];
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// convert the indexes to attribute indexes
for (i = 0; i < bestToWorst.length; i++) {
int temp = ((int) bestToWorst[i][0]);
bestToWorst[i][0] = this.m_attributeList[temp];
bestToWorst[i][1] = this.m_attributeMerit[temp];
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// if (m_numToSelect > bestToWorst.length) {
// throw new Exception("More attributes requested than exist in the data");
// }
if (this.m_numToSelect <= 0) {
if (this.m_threshold == -Double.MAX_VALUE) {
this.m_calculatedNumToSelect = bestToWorst.length;
} else {
this.determineNumToSelectFromThreshold(bestToWorst);
}
}
/*
* if (m_numToSelect > 0) { determineThreshFromNumToSelect(bestToWorst); }
*/
return bestToWorst;
}
private void determineNumToSelectFromThreshold(final double[][] ranking) {
int count = 0;
for (double[] element : ranking) {
if (element[1] > this.m_threshold) {
count++;
}
}
this.m_calculatedNumToSelect = count;
}
/**
* returns a description of the search as a String
*
* @return a description of the search
*/
@Override
public String toString() {
StringBuffer BfString = new StringBuffer();
BfString.append("\tAttribute ranking.\n");
if (this.m_starting != null) {
BfString.append("\tIgnored attributes: ");
BfString.append(this.startSetToString());
BfString.append("\n");
}
if (this.m_threshold != -Double.MAX_VALUE) {
BfString.append("\tThreshold for discarding attributes: " + Utils.doubleToString(this.m_threshold, 8, 4) + "\n");
}
return BfString.toString();
}
/**
* Resets stuff to default values
*/
protected void resetOptions() {
this.m_starting = null;
this.m_startRange = new Range();
this.m_attributeList = null;
this.m_attributeMerit = null;
this.m_threshold = -Double.MAX_VALUE;
}
private boolean inStarting(final int feat) {
// omit the class from the evaluation
if ((this.m_hasClass == true) && (feat == this.m_classIndex)) {
return true;
}
if (this.m_starting == null) {
return false;
}
for (int element : this.m_starting) {
if (element == feat) {
return true;
}
}
return false;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/ReliefFAttributeEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ReliefFAttributeEval.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> ReliefFAttributeEval :<br/>
* <br/>
* Evaluates the worth of an attribute by repeatedly sampling an instance and
* considering the value of the given attribute for the nearest instance of the
* same and different class. Can operate on both discrete and continuous class
* data.<br/>
* <br/>
* For more information see:<br/>
* <br/>
* Kenji Kira, Larry A. Rendell: A Practical Approach to Feature Selection. In:
* Ninth International Workshop on Machine Learning, 249-256, 1992.<br/>
* <br/>
* Igor Kononenko: Estimating Attributes: Analysis and Extensions of RELIEF. In:
* European Conference on Machine Learning, 171-182, 1994.<br/>
* <br/>
* Marko Robnik-Sikonja, Igor Kononenko: An adaptation of Relief for attribute
* estimation in regression. In: Fourteenth International Conference on Machine
* Learning, 296-304, 1997.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @inproceedings{Kira1992,
* author = {Kenji Kira and Larry A. Rendell},
* booktitle = {Ninth International Workshop on Machine Learning},
* editor = {Derek H. Sleeman and Peter Edwards},
* pages = {249-256},
* publisher = {Morgan Kaufmann},
* title = {A Practical Approach to Feature Selection},
* year = {1992}
* }
*
* @inproceedings{Kononenko1994,
* author = {Igor Kononenko},
* booktitle = {European Conference on Machine Learning},
* editor = {Francesco Bergadano and Luc De Raedt},
* pages = {171-182},
* publisher = {Springer},
* title = {Estimating Attributes: Analysis and Extensions of RELIEF},
* year = {1994}
* }
*
* @inproceedings{Robnik-Sikonja1997,
* author = {Marko Robnik-Sikonja and Igor Kononenko},
* booktitle = {Fourteenth International Conference on Machine Learning},
* editor = {Douglas H. Fisher},
* pages = {296-304},
* publisher = {Morgan Kaufmann},
* title = {An adaptation of Relief for attribute estimation in regression},
* year = {1997}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M <num instances>
* Specify the number of instances to
* sample when estimating attributes.
* If not specified, then all instances
* will be used.
* </pre>
*
* <pre>
* -D <seed>
* Seed for randomly sampling instances.
* (Default = 1)
* </pre>
*
* <pre>
* -K <number of neighbours>
* Number of nearest neighbours (k) used
* to estimate attribute relevances
* (Default = 10).
* </pre>
*
* <pre>
* -W
* Weight nearest neighbours by distance
* </pre>
*
* <pre>
* -A <num>
* Specify sigma value (used in an exp
* function to control how quickly
* weights for more distant instances
* decrease. Use in conjunction with -W.
* Sensible value=1/5 to 1/10 of the
* number of nearest neighbours.
* (Default = 2)
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class ReliefFAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler, TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = -8422186665795839379L;
/** The training instances */
private Instances m_trainInstances;
/** The class index */
private int m_classIndex;
/** The number of attributes */
private int m_numAttribs;
/** The number of instances */
private int m_numInstances;
/** Numeric class */
private boolean m_numericClass;
/** The number of classes if class is nominal */
private int m_numClasses;
/**
* Used to hold the probability of a different class val given nearest
* instances (numeric class)
*/
private double m_ndc;
/**
* Used to hold the prob of different value of an attribute given nearest
* instances (numeric class case)
*/
private double[] m_nda;
/**
* Used to hold the prob of a different class val and different att val given
* nearest instances (numeric class case)
*/
private double[] m_ndcda;
/** Holds the weights that relief assigns to attributes */
private double[] m_weights;
/** Prior class probabilities (discrete class case) */
private double[] m_classProbs;
/**
* The number of instances to sample when estimating attributes default == -1,
* use all instances
*/
private int m_sampleM;
/** The number of nearest hits/misses */
private int m_Knn;
/** k nearest scores + instance indexes for n classes */
private double[][][] m_karray;
/** Upper bound for numeric attributes */
private double[] m_maxArray;
/** Lower bound for numeric attributes */
private double[] m_minArray;
/** Keep track of the farthest instance for each class */
private double[] m_worst;
/** Index in the m_karray of the farthest instance for each class */
private int[] m_index;
/** Number of nearest neighbours stored of each class */
private int[] m_stored;
/** Random number seed used for sampling instances */
private int m_seed;
/**
* used to (optionally) weight nearest neighbours by their distance from the
* instance in question. Each entry holds exp(-((rank(r_i, i_j)/sigma)^2))
* where rank(r_i,i_j) is the rank of instance i_j in a sequence of instances
* ordered by the distance from r_i. sigma is a user defined parameter,
* default=20
**/
private double[] m_weightsByRank;
private int m_sigma;
/** Weight by distance rather than equal weights */
private boolean m_weightByDistance;
/**
* Constructor
*/
public ReliefFAttributeEval() {
this.resetOptions();
}
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "ReliefFAttributeEval :\n\nEvaluates the worth of an attribute by " + "repeatedly sampling an instance and considering the value of the " + "given attribute for the nearest instance of the same and different "
+ "class. Can operate on both discrete and continuous class data.\n\n" + "For more information see:\n\n" + this.getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
TechnicalInformation additional;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Kenji Kira and Larry A. Rendell");
result.setValue(Field.TITLE, "A Practical Approach to Feature Selection");
result.setValue(Field.BOOKTITLE, "Ninth International Workshop on Machine Learning");
result.setValue(Field.EDITOR, "Derek H. Sleeman and Peter Edwards");
result.setValue(Field.YEAR, "1992");
result.setValue(Field.PAGES, "249-256");
result.setValue(Field.PUBLISHER, "Morgan Kaufmann");
additional = result.add(Type.INPROCEEDINGS);
additional.setValue(Field.AUTHOR, "Igor Kononenko");
additional.setValue(Field.TITLE, "Estimating Attributes: Analysis and Extensions of RELIEF");
additional.setValue(Field.BOOKTITLE, "European Conference on Machine Learning");
additional.setValue(Field.EDITOR, "Francesco Bergadano and Luc De Raedt");
additional.setValue(Field.YEAR, "1994");
additional.setValue(Field.PAGES, "171-182");
additional.setValue(Field.PUBLISHER, "Springer");
additional = result.add(Type.INPROCEEDINGS);
additional.setValue(Field.AUTHOR, "Marko Robnik-Sikonja and Igor Kononenko");
additional.setValue(Field.TITLE, "An adaptation of Relief for attribute estimation in regression");
additional.setValue(Field.BOOKTITLE, "Fourteenth International Conference on Machine Learning");
additional.setValue(Field.EDITOR, "Douglas H. Fisher");
additional.setValue(Field.YEAR, "1997");
additional.setValue(Field.PAGES, "296-304");
additional.setValue(Field.PUBLISHER, "Morgan Kaufmann");
return result;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector.addElement(new Option("\tSpecify the number of instances to\n" + "\tsample when estimating attributes.\n" + "\tIf not specified, then all instances\n" + "\twill be used.", "M", 1, "-M <num instances>"));
newVector.addElement(new Option("\tSeed for randomly sampling instances.\n" + "\t(Default = 1)", "D", 1, "-D <seed>"));
newVector.addElement(new Option("\tNumber of nearest neighbours (k) used\n" + "\tto estimate attribute relevances\n" + "\t(Default = 10).", "K", 1, "-K <number of neighbours>"));
newVector.addElement(new Option("\tWeight nearest neighbours by distance", "W", 0, "-W"));
newVector.addElement(new Option("\tSpecify sigma value (used in an exp\n" + "\tfunction to control how quickly\n" + "\tweights for more distant instances\n" + "\tdecrease. Use in conjunction with -W.\n"
+ "\tSensible value=1/5 to 1/10 of the\n" + "\tnumber of nearest neighbours.\n" + "\t(Default = 2)", "A", 1, "-A <num>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M <num instances>
* Specify the number of instances to
* sample when estimating attributes.
* If not specified, then all instances
* will be used.
* </pre>
*
* <pre>
* -D <seed>
* Seed for randomly sampling instances.
* (Default = 1)
* </pre>
*
* <pre>
* -K <number of neighbours>
* Number of nearest neighbours (k) used
* to estimate attribute relevances
* (Default = 10).
* </pre>
*
* <pre>
* -W
* Weight nearest neighbours by distance
* </pre>
*
* <pre>
* -A <num>
* Specify sigma value (used in an exp
* function to control how quickly
* weights for more distant instances
* decrease. Use in conjunction with -W.
* Sensible value=1/5 to 1/10 of the
* number of nearest neighbours.
* (Default = 2)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
String optionString;
this.resetOptions();
this.setWeightByDistance(Utils.getFlag('W', options));
optionString = Utils.getOption('M', options);
if (optionString.length() != 0) {
this.setSampleSize(Integer.parseInt(optionString));
}
optionString = Utils.getOption('D', options);
if (optionString.length() != 0) {
this.setSeed(Integer.parseInt(optionString));
}
optionString = Utils.getOption('K', options);
if (optionString.length() != 0) {
this.setNumNeighbours(Integer.parseInt(optionString));
}
optionString = Utils.getOption('A', options);
if (optionString.length() != 0) {
this.setWeightByDistance(true); // turn on weighting by distance
this.setSigma(Integer.parseInt(optionString));
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String sigmaTipText() {
return "Set influence of nearest neighbours. Used in an exp function to " + "control how quickly weights decrease for more distant instances. " + "Use in conjunction with weightByDistance. Sensible values = 1/5 to "
+ "1/10 the number of nearest neighbours.";
}
/**
* Sets the sigma value.
*
* @param s the value of sigma (> 0)
* @throws Exception if s is not positive
*/
public void setSigma(final int s) throws Exception {
if (s <= 0) {
throw new Exception("value of sigma must be > 0!");
}
this.m_sigma = s;
}
/**
* Get the value of sigma.
*
* @return the sigma value.
*/
public int getSigma() {
return this.m_sigma;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numNeighboursTipText() {
return "Number of nearest neighbours for attribute estimation.";
}
/**
* Set the number of nearest neighbours
*
* @param n the number of nearest neighbours.
*/
public void setNumNeighbours(final int n) {
this.m_Knn = n;
}
/**
* Get the number of nearest neighbours
*
* @return the number of nearest neighbours
*/
public int getNumNeighbours() {
return this.m_Knn;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return "Random seed for sampling instances.";
}
/**
* Set the random number seed for randomly sampling instances.
*
* @param s the random number seed.
*/
public void setSeed(final int s) {
this.m_seed = s;
}
/**
* Get the seed used for randomly sampling instances.
*
* @return the random number seed.
*/
public int getSeed() {
return this.m_seed;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String sampleSizeTipText() {
return "Number of instances to sample. Default (-1) indicates that all " + "instances will be used for attribute estimation.";
}
/**
* Set the number of instances to sample for attribute estimation
*
* @param s the number of instances to sample.
*/
public void setSampleSize(final int s) {
this.m_sampleM = s;
}
/**
* Get the number of instances used for estimating attributes
*
* @return the number of instances.
*/
public int getSampleSize() {
return this.m_sampleM;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String weightByDistanceTipText() {
return "Weight nearest neighbours by their distance.";
}
/**
* Set the nearest neighbour weighting method
*
* @param b true nearest neighbours are to be weighted by distance.
*/
public void setWeightByDistance(final boolean b) {
this.m_weightByDistance = b;
}
/**
* Get whether nearest neighbours are being weighted by distance
*
* @return m_weightByDiffernce
*/
public boolean getWeightByDistance() {
return this.m_weightByDistance;
}
/**
* Gets the current settings of ReliefFAttributeEval.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (this.getWeightByDistance()) {
options.add("-W");
}
options.add("-M");
options.add("" + this.getSampleSize());
options.add("-D");
options.add("" + this.getSeed());
options.add("-K");
options.add("" + this.getNumNeighbours());
if (this.getWeightByDistance()) {
options.add("-A");
options.add("" + this.getSigma());
}
return options.toArray(new String[0]);
}
/**
* Return a description of the ReliefF attribute evaluator.
*
* @return a description of the evaluator as a String.
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_trainInstances == null) {
text.append("ReliefF feature evaluator has not been built yet\n");
} else {
text.append("\tReliefF Ranking Filter");
text.append("\n\tInstances sampled: ");
if (this.m_sampleM == -1) {
text.append("all\n");
} else {
text.append(this.m_sampleM + "\n");
}
text.append("\tNumber of nearest neighbours (k): " + this.m_Knn + "\n");
if (this.m_weightByDistance) {
text.append("\tExponentially decreasing (with distance) " + "influence for\n" + "\tnearest neighbours. Sigma: " + this.m_sigma + "\n");
} else {
text.append("\tEqual influence nearest neighbours\n");
}
}
return text.toString();
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.NUMERIC_CLASS);
result.enable(Capability.DATE_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Initializes a ReliefF attribute evaluator.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
int z, totalInstances;
Random r = new Random(this.m_seed);
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.m_trainInstances = data;
this.m_classIndex = this.m_trainInstances.classIndex();
this.m_numAttribs = this.m_trainInstances.numAttributes();
this.m_numInstances = this.m_trainInstances.numInstances();
if (this.m_trainInstances.attribute(this.m_classIndex).isNumeric()) {
this.m_numericClass = true;
} else {
this.m_numericClass = false;
}
if (!this.m_numericClass) {
this.m_numClasses = this.m_trainInstances.attribute(this.m_classIndex).numValues();
} else {
this.m_ndc = 0;
this.m_numClasses = 1;
this.m_nda = new double[this.m_numAttribs];
this.m_ndcda = new double[this.m_numAttribs];
}
if (this.m_weightByDistance) // set up the rank based weights
{
this.m_weightsByRank = new double[this.m_Knn];
for (int i = 0; i < this.m_Knn; i++) {
this.m_weightsByRank[i] = Math.exp(-((i / (double) this.m_sigma) * (i / (double) this.m_sigma)));
}
}
// the final attribute weights
this.m_weights = new double[this.m_numAttribs];
// num classes (1 for numeric class) knn neighbours,
// and 0 = distance, 1 = instance index
this.m_karray = new double[this.m_numClasses][this.m_Knn][2];
if (!this.m_numericClass) {
this.m_classProbs = new double[this.m_numClasses];
for (int i = 0; i < this.m_numInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (!this.m_trainInstances.instance(i).classIsMissing()) {
this.m_classProbs[(int) this.m_trainInstances.instance(i).value(this.m_classIndex)]++;
}
}
for (int i = 0; i < this.m_numClasses; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
this.m_classProbs[i] /= this.m_numInstances;
}
}
this.m_worst = new double[this.m_numClasses];
this.m_index = new int[this.m_numClasses];
this.m_stored = new int[this.m_numClasses];
this.m_minArray = new double[this.m_numAttribs];
this.m_maxArray = new double[this.m_numAttribs];
for (int i = 0; i < this.m_numAttribs; i++) {
this.m_minArray[i] = this.m_maxArray[i] = Double.NaN;
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
for (int i = 0; i < this.m_numInstances; i++) {
this.updateMinMax(this.m_trainInstances.instance(i));
}
if ((this.m_sampleM > this.m_numInstances) || (this.m_sampleM < 0)) {
totalInstances = this.m_numInstances;
} else {
totalInstances = this.m_sampleM;
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// process each instance, updating attribute weights
for (int i = 0; i < totalInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (totalInstances == this.m_numInstances) {
z = i;
} else {
z = r.nextInt() % this.m_numInstances;
}
if (z < 0) {
z *= -1;
}
if (!(this.m_trainInstances.instance(z).isMissing(this.m_classIndex))) {
// first clear the knn and worst index stuff for the classes
for (int j = 0; j < this.m_numClasses; j++) {
this.m_index[j] = this.m_stored[j] = 0;
for (int k = 0; k < this.m_Knn; k++) {
this.m_karray[j][k][0] = this.m_karray[j][k][1] = 0;
}
}
this.findKHitMiss(z);
if (this.m_numericClass) {
this.updateWeightsNumericClass(z);
} else {
this.updateWeightsDiscreteClass(z);
}
}
}
// now scale weights by 1/m_numInstances (nominal class) or
// calculate weights numeric class
// System.out.println("num inst:"+m_numInstances+" r_ndc:"+r_ndc);
for (int i = 0; i < this.m_numAttribs; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i != this.m_classIndex) {
if (this.m_numericClass) {
this.m_weights[i] = this.m_ndcda[i] / this.m_ndc - ((this.m_nda[i] - this.m_ndcda[i]) / (totalInstances - this.m_ndc));
} else {
this.m_weights[i] *= (1.0 / totalInstances);
}
// System.out.println(r_weights[i]);
}
}
}
/**
* Evaluates an individual attribute using ReliefF's instance based approach.
* The actual work is done by buildEvaluator which evaluates all features.
*
* @param attribute the index of the attribute to be evaluated
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(final int attribute) throws Exception {
return this.m_weights[attribute];
}
/**
* Reset options to their default values
*/
protected void resetOptions() {
this.m_trainInstances = null;
this.m_sampleM = -1;
this.m_Knn = 10;
this.m_sigma = 2;
this.m_weightByDistance = false;
this.m_seed = 1;
}
/**
* Normalizes a given value of a numeric attribute.
*
* @param x the value to be normalized
* @param i the attribute's index
* @return the normalized value
*/
private double norm(final double x, final int i) {
if (Double.isNaN(this.m_minArray[i]) || Utils.eq(this.m_maxArray[i], this.m_minArray[i])) {
return 0;
} else {
return (x - this.m_minArray[i]) / (this.m_maxArray[i] - this.m_minArray[i]);
}
}
/**
* Updates the minimum and maximum values for all the attributes based on a
* new instance.
*
* @param instance the new instance
*/
private void updateMinMax(final Instance instance) {
// for (int j = 0; j < m_numAttribs; j++) {
try {
for (int j = 0; j < instance.numValues(); j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if ((instance.attributeSparse(j).isNumeric()) && (!instance.isMissingSparse(j))) {
if (Double.isNaN(this.m_minArray[instance.index(j)])) {
this.m_minArray[instance.index(j)] = instance.valueSparse(j);
this.m_maxArray[instance.index(j)] = instance.valueSparse(j);
} else {
if (instance.valueSparse(j) < this.m_minArray[instance.index(j)]) {
this.m_minArray[instance.index(j)] = instance.valueSparse(j);
} else {
if (instance.valueSparse(j) > this.m_maxArray[instance.index(j)]) {
this.m_maxArray[instance.index(j)] = instance.valueSparse(j);
}
}
}
}
}
} catch (Exception ex) {
System.err.println(ex);
ex.printStackTrace();
}
}
/**
* Computes the difference between two given attribute values.
*/
private double difference(final int index, final double val1, final double val2) {
switch (this.m_trainInstances.attribute(index).type()) {
case Attribute.NOMINAL:
// If attribute is nominal
if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) {
return (1.0 - (1.0 / (this.m_trainInstances.attribute(index).numValues())));
} else if ((int) val1 != (int) val2) {
return 1;
} else {
return 0;
}
case Attribute.NUMERIC:
// If attribute is numeric
if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) {
if (Utils.isMissingValue(val1) && Utils.isMissingValue(val2)) {
return 1;
} else {
double diff;
if (Utils.isMissingValue(val2)) {
diff = this.norm(val1, index);
} else {
diff = this.norm(val2, index);
}
if (diff < 0.5) {
diff = 1.0 - diff;
}
return diff;
}
} else {
return Math.abs(this.norm(val1, index) - this.norm(val2, index));
}
default:
return 0;
}
}
/**
* Calculates the distance between two instances
*
* @param first the first instance
* @param second the second instance
* @return the distance between the two given instances, between 0 and 1
* @throws InterruptedException
*/
private double distance(final Instance first, final Instance second) throws InterruptedException {
double distance = 0;
int firstI, secondI;
for (int p1 = 0, p2 = 0; p1 < first.numValues() || p2 < second.numValues();) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (p1 >= first.numValues()) {
firstI = this.m_trainInstances.numAttributes();
} else {
firstI = first.index(p1);
}
if (p2 >= second.numValues()) {
secondI = this.m_trainInstances.numAttributes();
} else {
secondI = second.index(p2);
}
if (firstI == this.m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == this.m_trainInstances.classIndex()) {
p2++;
continue;
}
double diff;
if (firstI == secondI) {
diff = this.difference(firstI, first.valueSparse(p1), second.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
diff = this.difference(secondI, 0, second.valueSparse(p2));
p2++;
} else {
diff = this.difference(firstI, first.valueSparse(p1), 0);
p1++;
}
// distance += diff * diff;
distance += diff;
}
// return Math.sqrt(distance / m_NumAttributesUsed);
return distance;
}
/**
* update attribute weights given an instance when the class is numeric
*
* @param instNum the index of the instance to use when updating weights
* @throws InterruptedException
*/
private void updateWeightsNumericClass(final int instNum) throws InterruptedException {
int i, j;
double temp, temp2;
int[] tempSorted = null;
double[] tempDist = null;
double distNorm = 1.0;
int firstI, secondI;
Instance inst = this.m_trainInstances.instance(instNum);
// sort nearest neighbours and set up normalization variable
if (this.m_weightByDistance) {
tempDist = new double[this.m_stored[0]];
for (j = 0, distNorm = 0; j < this.m_stored[0]; j++) {
// copy the distances
tempDist[j] = this.m_karray[0][j][0];
// sum normalizer
distNorm += this.m_weightsByRank[j];
}
tempSorted = Utils.sort(tempDist);
}
for (i = 0; i < this.m_stored[0]; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// P diff prediction (class) given nearest instances
if (this.m_weightByDistance) {
temp = this.difference(this.m_classIndex, inst.value(this.m_classIndex), this.m_trainInstances.instance((int) this.m_karray[0][tempSorted[i]][1]).value(this.m_classIndex));
temp *= (this.m_weightsByRank[i] / distNorm);
} else {
temp = this.difference(this.m_classIndex, inst.value(this.m_classIndex), this.m_trainInstances.instance((int) this.m_karray[0][i][1]).value(this.m_classIndex));
temp *= (1.0 / this.m_stored[0]); // equal influence
}
this.m_ndc += temp;
Instance cmp;
cmp = (this.m_weightByDistance) ? this.m_trainInstances.instance((int) this.m_karray[0][tempSorted[i]][1]) : this.m_trainInstances.instance((int) this.m_karray[0][i][1]);
double temp_diffP_diffA_givNearest = this.difference(this.m_classIndex, inst.value(this.m_classIndex), cmp.value(this.m_classIndex));
// now the attributes
for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) {
if (p1 >= inst.numValues()) {
firstI = this.m_trainInstances.numAttributes();
} else {
firstI = inst.index(p1);
}
if (p2 >= cmp.numValues()) {
secondI = this.m_trainInstances.numAttributes();
} else {
secondI = cmp.index(p2);
}
if (firstI == this.m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == this.m_trainInstances.classIndex()) {
p2++;
continue;
}
temp = 0.0;
temp2 = 0.0;
if (firstI == secondI) {
j = firstI;
temp = this.difference(j, inst.valueSparse(p1), cmp.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
j = secondI;
temp = this.difference(j, 0, cmp.valueSparse(p2));
p2++;
} else {
j = firstI;
temp = this.difference(j, inst.valueSparse(p1), 0);
p1++;
}
temp2 = temp_diffP_diffA_givNearest * temp;
// P of different prediction and different att value given
// nearest instances
if (this.m_weightByDistance) {
temp2 *= (this.m_weightsByRank[i] / distNorm);
} else {
temp2 *= (1.0 / this.m_stored[0]); // equal influence
}
this.m_ndcda[j] += temp2;
// P of different attribute val given nearest instances
if (this.m_weightByDistance) {
temp *= (this.m_weightsByRank[i] / distNorm);
} else {
temp *= (1.0 / this.m_stored[0]); // equal influence
}
this.m_nda[j] += temp;
}
}
}
/**
* update attribute weights given an instance when the class is discrete
*
* @param instNum the index of the instance to use when updating weights
* @throws InterruptedException
*/
private void updateWeightsDiscreteClass(final int instNum) throws InterruptedException {
int i, j, k;
int cl;
double temp_diff, w_norm = 1.0;
double[] tempDistClass;
int[] tempSortedClass = null;
double distNormClass = 1.0;
double[] tempDistAtt;
int[][] tempSortedAtt = null;
double[] distNormAtt = null;
int firstI, secondI;
// store the indexes (sparse instances) of non-zero elements
Instance inst = this.m_trainInstances.instance(instNum);
// get the class of this instance
cl = (int) this.m_trainInstances.instance(instNum).value(this.m_classIndex);
// sort nearest neighbours and set up normalization variables
if (this.m_weightByDistance) {
// do class (hits) first
// sort the distances
tempDistClass = new double[this.m_stored[cl]];
for (j = 0, distNormClass = 0; j < this.m_stored[cl]; j++) {
// copy the distances
tempDistClass[j] = this.m_karray[cl][j][0];
// sum normalizer
distNormClass += this.m_weightsByRank[j];
}
tempSortedClass = Utils.sort(tempDistClass);
// do misses (other classes)
tempSortedAtt = new int[this.m_numClasses][1];
distNormAtt = new double[this.m_numClasses];
for (k = 0; k < this.m_numClasses; k++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (k != cl) // already done cl
{
// sort the distances
tempDistAtt = new double[this.m_stored[k]];
for (j = 0, distNormAtt[k] = 0; j < this.m_stored[k]; j++) {
// copy the distances
tempDistAtt[j] = this.m_karray[k][j][0];
// sum normalizer
distNormAtt[k] += this.m_weightsByRank[j];
}
tempSortedAtt[k] = Utils.sort(tempDistAtt);
}
}
}
if (this.m_numClasses > 2) {
// the amount of probability space left after removing the
// probability of this instance's class value
w_norm = (1.0 - this.m_classProbs[cl]);
}
// do the k nearest hits of the same class
for (j = 0, temp_diff = 0.0; j < this.m_stored[cl]; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
Instance cmp;
cmp = (this.m_weightByDistance) ? this.m_trainInstances.instance((int) this.m_karray[cl][tempSortedClass[j]][1]) : this.m_trainInstances.instance((int) this.m_karray[cl][j][1]);
for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (p1 >= inst.numValues()) {
firstI = this.m_trainInstances.numAttributes();
} else {
firstI = inst.index(p1);
}
if (p2 >= cmp.numValues()) {
secondI = this.m_trainInstances.numAttributes();
} else {
secondI = cmp.index(p2);
}
if (firstI == this.m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == this.m_trainInstances.classIndex()) {
p2++;
continue;
}
if (firstI == secondI) {
i = firstI;
temp_diff = this.difference(i, inst.valueSparse(p1), cmp.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
i = secondI;
temp_diff = this.difference(i, 0, cmp.valueSparse(p2));
p2++;
} else {
i = firstI;
temp_diff = this.difference(i, inst.valueSparse(p1), 0);
p1++;
}
if (this.m_weightByDistance) {
temp_diff *= (this.m_weightsByRank[j] / distNormClass);
} else {
if (this.m_stored[cl] > 0) {
temp_diff /= this.m_stored[cl];
}
}
this.m_weights[i] -= temp_diff;
}
}
// now do k nearest misses from each of the other classes
temp_diff = 0.0;
for (k = 0; k < this.m_numClasses; k++) {
if (k != cl) // already done cl
{
for (j = 0; j < this.m_stored[k]; j++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
Instance cmp;
cmp = (this.m_weightByDistance) ? this.m_trainInstances.instance((int) this.m_karray[k][tempSortedAtt[k][j]][1]) : this.m_trainInstances.instance((int) this.m_karray[k][j][1]);
for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) {
if (p1 >= inst.numValues()) {
firstI = this.m_trainInstances.numAttributes();
} else {
firstI = inst.index(p1);
}
if (p2 >= cmp.numValues()) {
secondI = this.m_trainInstances.numAttributes();
} else {
secondI = cmp.index(p2);
}
if (firstI == this.m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == this.m_trainInstances.classIndex()) {
p2++;
continue;
}
if (firstI == secondI) {
i = firstI;
temp_diff = this.difference(i, inst.valueSparse(p1), cmp.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
i = secondI;
temp_diff = this.difference(i, 0, cmp.valueSparse(p2));
p2++;
} else {
i = firstI;
temp_diff = this.difference(i, inst.valueSparse(p1), 0);
p1++;
}
if (this.m_weightByDistance) {
temp_diff *= (this.m_weightsByRank[j] / distNormAtt[k]);
} else {
if (this.m_stored[k] > 0) {
temp_diff /= this.m_stored[k];
}
}
if (this.m_numClasses > 2) {
this.m_weights[i] += ((this.m_classProbs[k] / w_norm) * temp_diff);
} else {
this.m_weights[i] += temp_diff;
}
}
}
}
}
}
/**
* Find the K nearest instances to supplied instance if the class is numeric,
* or the K nearest Hits (same class) and Misses (K from each of the other
* classes) if the class is discrete.
*
* @param instNum the index of the instance to find nearest neighbours of
* @throws InterruptedException
*/
private void findKHitMiss(final int instNum) throws InterruptedException {
int i, j;
int cl;
double ww;
double temp_diff = 0.0;
Instance thisInst = this.m_trainInstances.instance(instNum);
for (i = 0; i < this.m_numInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
if (i != instNum) {
Instance cmpInst = this.m_trainInstances.instance(i);
temp_diff = this.distance(cmpInst, thisInst);
// class of this training instance or 0 if numeric
if (this.m_numericClass) {
cl = 0;
} else {
if (this.m_trainInstances.instance(i).classIsMissing()) {
// skip instances with missing class values in the nominal class case
continue;
}
cl = (int) this.m_trainInstances.instance(i).value(this.m_classIndex);
}
// add this diff to the list for the class of this instance
if (this.m_stored[cl] < this.m_Knn) {
this.m_karray[cl][this.m_stored[cl]][0] = temp_diff;
this.m_karray[cl][this.m_stored[cl]][1] = i;
this.m_stored[cl]++;
// note the worst diff for this class
for (j = 0, ww = -1.0; j < this.m_stored[cl]; j++) {
if (this.m_karray[cl][j][0] > ww) {
ww = this.m_karray[cl][j][0];
this.m_index[cl] = j;
}
}
this.m_worst[cl] = ww;
} else
/*
* if we already have stored knn for this class then check to see if
* this instance is better than the worst
*/
{
if (temp_diff < this.m_karray[cl][this.m_index[cl]][0]) {
this.m_karray[cl][this.m_index[cl]][0] = temp_diff;
this.m_karray[cl][this.m_index[cl]][1] = i;
for (j = 0, ww = -1.0; j < this.m_stored[cl]; j++) {
if (this.m_karray[cl][j][0] > ww) {
ww = this.m_karray[cl][j][0];
this.m_index[cl] = j;
}
}
this.m_worst[cl] = ww;
}
}
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
@Override
public int[] postProcess(final int[] attributeSet) {
// save memory
this.m_trainInstances = new Instances(this.m_trainInstances, 0);
return attributeSet;
}
// ============
// Test method.
// ============
/**
* Main method for testing this class.
*
* @param args the options
*/
public static void main(final String[] args) {
runEvaluator(new ReliefFAttributeEval(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/StartSetHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* StartSetHandler.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
/**
* Interface for search methods capable of doing something sensible
* given a starting set of attributes.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface StartSetHandler {
/**
* Sets a starting set of attributes for the search. It is the
* search method's responsibility to report this start set (if any)
* in its toString() method.
* @param startSet a string containing a list of attributes (and or ranges),
* eg. 1,2,6,10-15.
* @exception Exception if start set can't be set.
*/
void setStartSet (String startSet) throws Exception;
/**
* Returns a list of attributes (and or attribute ranges) as a String
* @return a list of attributes (and or attribute ranges)
*/
String getStartSet ();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/SubsetEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SubsetEvaluator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.BitSet;
/**
* Interface for attribute subset evaluators.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface SubsetEvaluator {
/**
* evaluates a subset of attributes
*
* @param subset a bitset representing the attribute subset to be
* evaluated
* @return the "merit" of the subset
* @exception Exception if the subset could not be evaluated
*/
double evaluateSubset(BitSet subset) throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/SymmetricalUncertAttributeEval.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SymmetricalUncertAttributeEval.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.ContingencyTables;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.Discretize;
/**
* <!-- globalinfo-start --> SymmetricalUncertAttributeEval :<br/>
* <br/>
* Evaluates the worth of an attribute by measuring the symmetrical uncertainty
* with respect to the class. <br/>
* <br/>
* SymmU(Class, Attribute) = 2 * (H(Class) - H(Class | Attribute)) / H(Class) +
* H(Attribute).<br/>
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* treat missing values as a seperate value.
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
* @see Discretize
*/
public class SymmetricalUncertAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler {
/** for serialization */
static final long serialVersionUID = -8096505776132296416L;
/** The training instances */
private Instances m_trainInstances;
/** The class index */
private int m_classIndex;
/** The number of instances */
private int m_numInstances;
/** The number of classes */
private int m_numClasses;
/** Treat missing values as a seperate value */
private boolean m_missing_merge;
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "SymmetricalUncertAttributeEval :\n\nEvaluates the worth of an attribute " + "by measuring the symmetrical uncertainty with respect to the class. " + "\n\n SymmU(Class, Attribute) = 2 * (H(Class) - H(Class | Attribute)) "
+ "/ H(Class) + H(Attribute).\n";
}
/**
* Constructor
*/
public SymmetricalUncertAttributeEval() {
this.resetOptions();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(1);
newVector.addElement(new Option("\ttreat missing values as a seperate " + "value.", "M", 0, "-M"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M
* treat missing values as a seperate value.
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
**/
@Override
public void setOptions(final String[] options) throws Exception {
this.resetOptions();
this.setMissingMerge(!(Utils.getFlag('M', options)));
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String missingMergeTipText() {
return "Distribute counts for missing values. Counts are distributed " + "across other values in proportion to their frequency. Otherwise, " + "missing is treated as a separate value.";
}
/**
* distribute the counts for missing values across observed values
*
* @param b true=distribute missing values.
*/
public void setMissingMerge(final boolean b) {
this.m_missing_merge = b;
}
/**
* get whether missing values are being distributed or not
*
* @return true if missing values are being distributed.
*/
public boolean getMissingMerge() {
return this.m_missing_merge;
}
/**
* Gets the current settings of WrapperSubsetEval.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
String[] options = new String[1];
int current = 0;
if (!this.getMissingMerge()) {
options[current++] = "-M";
}
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Initializes a symmetrical uncertainty attribute evaluator. Discretizes all
* attributes that are numeric.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(final Instances data) throws Exception {
// can evaluator handle data?
this.getCapabilities().testWithFail(data);
this.m_trainInstances = data;
this.m_classIndex = this.m_trainInstances.classIndex();
this.m_numInstances = this.m_trainInstances.numInstances();
Discretize disTransform = new Discretize();
disTransform.setUseBetterEncoding(true);
disTransform.setInputFormat(this.m_trainInstances);
this.m_trainInstances = Filter.useFilter(this.m_trainInstances, disTransform);
this.m_numClasses = this.m_trainInstances.attribute(this.m_classIndex).numValues();
}
/**
* set options to default values
*/
protected void resetOptions() {
this.m_trainInstances = null;
this.m_missing_merge = true;
}
/**
* evaluates an individual attribute by measuring the symmetrical uncertainty
* between it and the class.
*
* @param attribute the index of the attribute to be evaluated
* @return the uncertainty
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(final int attribute) throws Exception {
int i, j, ii, jj;
int ni, nj;
double sum = 0.0;
ni = this.m_trainInstances.attribute(attribute).numValues() + 1;
nj = this.m_numClasses + 1;
double[] sumi, sumj;
Instance inst;
double temp = 0.0;
sumi = new double[ni];
sumj = new double[nj];
double[][] counts = new double[ni][nj];
sumi = new double[ni];
sumj = new double[nj];
for (i = 0; i < ni; i++) {
sumi[i] = 0.0;
for (j = 0; j < nj; j++) {
sumj[j] = 0.0;
counts[i][j] = 0.0;
}
}
// Fill the contingency table
for (i = 0; i < this.m_numInstances; i++) {
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
inst = this.m_trainInstances.instance(i);
if (inst.isMissing(attribute)) {
ii = ni - 1;
} else {
ii = (int) inst.value(attribute);
}
if (inst.isMissing(this.m_classIndex)) {
jj = nj - 1;
} else {
jj = (int) inst.value(this.m_classIndex);
}
counts[ii][jj]++;
}
// get the row totals
for (i = 0; i < ni; i++) {
sumi[i] = 0.0;
for (j = 0; j < nj; j++) {
sumi[i] += counts[i][j];
sum += counts[i][j];
}
}
// get the column totals
for (j = 0; j < nj; j++) {
sumj[j] = 0.0;
for (i = 0; i < ni; i++) {
sumj[j] += counts[i][j];
}
}
// XXX thread interrupted; throw exception
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA");
}
// distribute missing counts
if (this.m_missing_merge && (sumi[ni - 1] < this.m_numInstances) && (sumj[nj - 1] < this.m_numInstances)) {
double[] i_copy = new double[sumi.length];
double[] j_copy = new double[sumj.length];
double[][] counts_copy = new double[sumi.length][sumj.length];
for (i = 0; i < ni; i++) {
System.arraycopy(counts[i], 0, counts_copy[i], 0, sumj.length);
}
System.arraycopy(sumi, 0, i_copy, 0, sumi.length);
System.arraycopy(sumj, 0, j_copy, 0, sumj.length);
double total_missing = (sumi[ni - 1] + sumj[nj - 1] - counts[ni - 1][nj - 1]);
// do the missing i's
if (sumi[ni - 1] > 0.0) {
for (j = 0; j < nj - 1; j++) {
if (counts[ni - 1][j] > 0.0) {
for (i = 0; i < ni - 1; i++) {
temp = ((i_copy[i] / (sum - i_copy[ni - 1])) * counts[ni - 1][j]);
counts[i][j] += temp;
sumi[i] += temp;
}
counts[ni - 1][j] = 0.0;
}
}
}
sumi[ni - 1] = 0.0;
// do the missing j's
if (sumj[nj - 1] > 0.0) {
for (i = 0; i < ni - 1; i++) {
if (counts[i][nj - 1] > 0.0) {
for (j = 0; j < nj - 1; j++) {
temp = ((j_copy[j] / (sum - j_copy[nj - 1])) * counts[i][nj - 1]);
counts[i][j] += temp;
sumj[j] += temp;
}
counts[i][nj - 1] = 0.0;
}
}
}
sumj[nj - 1] = 0.0;
// do the both missing
if (counts[ni - 1][nj - 1] > 0.0 && total_missing != sum) {
for (i = 0; i < ni - 1; i++) {
for (j = 0; j < nj - 1; j++) {
temp = (counts_copy[i][j] / (sum - total_missing)) * counts_copy[ni - 1][nj - 1];
counts[i][j] += temp;
sumi[i] += temp;
sumj[j] += temp;
}
}
counts[ni - 1][nj - 1] = 0.0;
}
}
return ContingencyTables.symmetricalUncertainty(counts);
}
/**
* Return a description of the evaluator
*
* @return description as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (this.m_trainInstances == null) {
text.append("\tSymmetrical Uncertainty evaluator has not been built");
} else {
text.append("\tSymmetrical Uncertainty Ranking Filter");
if (!this.m_missing_merge) {
text.append("\n\tMissing values treated as seperate");
}
}
text.append("\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
// ============
// Test method.
// ============
/**
* Main method for testing this class.
*
* @param argv should contain the following arguments: -t training file
*/
public static void main(final String[] argv) {
runEvaluator(new SymmetricalUncertAttributeEval(), argv);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/attributeSelection/UnsupervisedAttributeEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnsupervisedAttributeEvaluator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.attributeSelection;
/**
* Abstract unsupervised attribute evaluator.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class UnsupervisedAttributeEvaluator
extends ASEvaluation
implements AttributeEvaluator {
/** for serialization */
private static final long serialVersionUID = -4100897318675336178L;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.