index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/NBNode.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NBNode.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
import weka.classifiers.bayes.NaiveBayesUpdateable;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
/**
* Implements a LearningNode that uses a naive Bayes model
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class NBNode extends ActiveHNode implements LearningNode, Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = -1872415764817690961L;
/** The naive Bayes model at the node */
protected NaiveBayesUpdateable m_bayes;
/**
* The weight of instances that need to be seen by this node before allowing
* naive Bayes to make predictions
*/
protected double m_nbWeightThreshold;
/**
* Construct a new NBNode
*
* @param header the instances structure of the data we're learning from
* @param nbWeightThreshold the weight mass to see before allowing naive Bayes
* to predict
* @throws Exception if a problem occurs
*/
public NBNode(Instances header, double nbWeightThreshold) throws Exception {
m_nbWeightThreshold = nbWeightThreshold;
m_bayes = new NaiveBayesUpdateable();
m_bayes.buildClassifier(header);
}
@Override
public void updateNode(Instance inst) throws Exception {
super.updateNode(inst);
try {
m_bayes.updateClassifier(inst);
} catch (Exception e) {
e.printStackTrace();
}
}
protected double[] bypassNB(Instance inst, Attribute classAtt)
throws Exception {
return super.getDistribution(inst, classAtt);
}
@Override
public double[] getDistribution(Instance inst, Attribute classAtt)
throws Exception {
// totalWeight - m_weightSeenAtLastSplitEval is the weight mass
// observed by this node's NB model
boolean doNB = m_nbWeightThreshold == 0 ? true : (totalWeight()
- m_weightSeenAtLastSplitEval > m_nbWeightThreshold);
if (doNB) {
return m_bayes.distributionForInstance(inst);
}
return super.getDistribution(inst, classAtt);
}
@Override
protected int dumpTree(int depth, int leafCount, StringBuffer buff) {
leafCount = super.dumpTree(depth, leafCount, buff);
buff.append(" NB" + m_leafNum);
return leafCount;
}
@Override
protected void printLeafModels(StringBuffer buff) {
buff.append("NB" + m_leafNum).append("\n").append(m_bayes.toString());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/NBNodeAdaptive.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NBNodeAdaptive.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
import java.util.Map;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
/**
* Implements a LearningNode that chooses between using majority class or naive
* Bayes for prediction
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class NBNodeAdaptive extends NBNode implements LearningNode,
Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = -4509802312019989686L;
/** The number of correct predictions made by the majority class */
protected double m_majClassCorrectWeight = 0;
/** The number of correct predictions made by naive Bayes */
protected double m_nbCorrectWeight = 0;
/**
* Constructor
*
* @param header the structure of the instances we're training from
* @param nbWeightThreshold the weight mass to see before allowing naive Bayes
* to predict
* @throws Exception if a problem occurs
*/
public NBNodeAdaptive(Instances header, double nbWeightThreshold)
throws Exception {
super(header, nbWeightThreshold);
}
protected String majorityClass() {
String mc = "";
double max = -1;
for (Map.Entry<String, WeightMass> e : m_classDistribution.entrySet()) {
if (e.getValue().m_weight > max) {
max = e.getValue().m_weight;
mc = e.getKey();
}
}
return mc;
}
@Override
public void updateNode(Instance inst) throws Exception {
String trueClass = inst.classAttribute().value((int) inst.classValue());
int trueClassIndex = (int) inst.classValue();
if (majorityClass().equals(trueClass)) {
m_majClassCorrectWeight += inst.weight();
}
if (m_bayes.classifyInstance(inst) == trueClassIndex) {
m_nbCorrectWeight += inst.weight();
}
super.updateNode(inst);
}
@Override
public double[] getDistribution(Instance inst, Attribute classAtt)
throws Exception {
if (m_majClassCorrectWeight > m_nbCorrectWeight) {
return super.bypassNB(inst, classAtt);
}
return super.getDistribution(inst, classAtt);
}
@Override
protected int dumpTree(int depth, int leafCount, StringBuffer buff) {
leafCount = super.dumpTree(depth, leafCount, buff);
buff.append(" NB adaptive" + m_leafNum);
return leafCount;
}
@Override
protected void printLeafModels(StringBuffer buff) {
buff.append("NB adaptive" + m_leafNum).append("\n")
.append(m_bayes.toString());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/NominalConditionalSufficientStats.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NominalConditionalSufficientStats.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import weka.core.Utils;
/**
* Maintains sufficient stats for the distribution of a nominal attribute
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class NominalConditionalSufficientStats extends
ConditionalSufficientStats implements Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = -669902060601313488L;
/**
* Inner class that implements a discrete distribution
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
*
*/
protected class ValueDistribution implements Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = -61711544350888154L;
protected final Map<Integer, WeightMass> m_dist = new LinkedHashMap<Integer, WeightMass>();
private double m_sum;
public void add(int val, double weight) {
WeightMass count = m_dist.get(val);
if (count == null) {
count = new WeightMass();
count.m_weight = 1.0;
m_sum += 1.0;
m_dist.put(val, count);
}
count.m_weight += weight;
m_sum += weight;
}
public void delete(int val, double weight) {
WeightMass count = m_dist.get(val);
if (count != null) {
count.m_weight -= weight;
m_sum -= weight;
}
}
public double getWeight(int val) {
WeightMass count = m_dist.get(val);
if (count != null) {
return count.m_weight;
}
return 0.0;
}
public double sum() {
return m_sum;
}
}
protected double m_totalWeight;
protected double m_missingWeight;
@Override
public void update(double attVal, String classVal, double weight) {
if (Utils.isMissingValue(attVal)) {
m_missingWeight += weight;
} else {
new Integer((int) attVal);
ValueDistribution valDist = (ValueDistribution) m_classLookup
.get(classVal);
if (valDist == null) {
valDist = new ValueDistribution();
valDist.add((int) attVal, weight);
m_classLookup.put(classVal, valDist);
} else {
valDist.add((int) attVal, weight);
}
}
m_totalWeight += weight;
}
@Override
public double probabilityOfAttValConditionedOnClass(double attVal,
String classVal) {
ValueDistribution valDist = (ValueDistribution) m_classLookup.get(classVal);
if (valDist != null) {
double prob = valDist.getWeight((int) attVal) / valDist.sum();
return prob;
}
return 0;
}
protected List<Map<String, WeightMass>> classDistsAfterSplit() {
// att index keys to class distribution
Map<Integer, Map<String, WeightMass>> splitDists = new HashMap<Integer, Map<String, WeightMass>>();
for (Map.Entry<String, Object> cls : m_classLookup.entrySet()) {
String classVal = cls.getKey();
ValueDistribution attDist = (ValueDistribution) cls.getValue();
for (Map.Entry<Integer, WeightMass> att : attDist.m_dist.entrySet()) {
Integer attVal = att.getKey();
WeightMass attCount = att.getValue();
Map<String, WeightMass> clsDist = splitDists.get(attVal);
if (clsDist == null) {
clsDist = new HashMap<String, WeightMass>();
splitDists.put(attVal, clsDist);
}
WeightMass clsCount = clsDist.get(classVal);
if (clsCount == null) {
clsCount = new WeightMass();
clsDist.put(classVal, clsCount);
}
clsCount.m_weight += attCount.m_weight;
}
}
List<Map<String, WeightMass>> result = new LinkedList<Map<String, WeightMass>>();
for (Map.Entry<Integer, Map<String, WeightMass>> v : splitDists.entrySet()) {
result.add(v.getValue());
}
return result;
}
@Override
public SplitCandidate bestSplit(SplitMetric splitMetric,
Map<String, WeightMass> preSplitDist, String attName) {
List<Map<String, WeightMass>> postSplitDists = classDistsAfterSplit();
double merit = splitMetric.evaluateSplit(preSplitDist, postSplitDists);
SplitCandidate candidate = new SplitCandidate(
new UnivariateNominalMultiwaySplit(attName), postSplitDists, merit);
return candidate;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/Split.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Split.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import weka.core.Instance;
import java.io.Serializable;
/**
* Base class for different split types
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public abstract class Split implements Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = 5390368487675958092L;
/** name(s) of attribute(s) involved in the split */
protected List<String> m_splitAttNames = new ArrayList<String>();
/**
* Returns the name of the branch that the supplied instance would go down
*
* @param inst the instance to find the branch for
* @return the name of the branch that the instance would go down
*/
public abstract String branchForInstance(Instance inst);
/**
* Returns the condition for the supplied branch name
*
* @param branch the name of the branch to get the condition for
* @return the condition (test) that corresponds to the named branch
*/
public abstract String conditionForBranch(String branch);
public List<String> splitAttributes() {
return m_splitAttNames;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/SplitCandidate.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SplitCandidate.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.util.List;
import java.util.Map;
/**
* Encapsulates a candidate split
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class SplitCandidate implements Comparable<SplitCandidate> {
public Split m_splitTest;
/**
* list of class distributions resulting from a split - 2 entries in the outer
* list for numeric splits and n for nominal splits
*/
public List<Map<String, WeightMass>> m_postSplitClassDistributions;
/** The merit of the split */
public double m_splitMerit;
/**
* Constructor
*
* @param splitTest the splitting test
* @param postSplitDists the distributions resulting from the split
* @param merit the merit of the split
*/
public SplitCandidate(Split splitTest,
List<Map<String, WeightMass>> postSplitDists, double merit) {
m_splitTest = splitTest;
m_postSplitClassDistributions = postSplitDists;
m_splitMerit = merit;
}
/**
* Number of branches resulting from the split
*
* @return the number of subsets of instances resulting from the split
*/
public int numSplits() {
return m_postSplitClassDistributions.size();
}
@Override
public int compareTo(SplitCandidate comp) {
return Double.compare(m_splitMerit, comp.m_splitMerit);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/SplitMetric.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SplitMetric.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
import java.util.List;
import java.util.Map;
/**
* Base class for split metrics
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public abstract class SplitMetric implements Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = 2891555018707080818L;
/**
* Utility method to return the sum of instance weight in a distribution
*
* @param dist the distribution
* @return the sum of the weights contained in a distribution
*/
public static double sum(Map<String, WeightMass> dist) {
double sum = 0;
for (Map.Entry<String, WeightMass> e : dist.entrySet()) {
sum += e.getValue().m_weight;
}
return sum;
}
/**
* Evaluate the merit of a split
*
* @param preDist the class distribution before the split
* @param postDist the class distributions after the split
* @return the merit of the split
*/
public abstract double evaluateSplit(Map<String, WeightMass> preDist,
List<Map<String, WeightMass>> postDist);
/**
* Get the range of the splitting metric
*
* @param preDist the pre-split class distribution
* @return the range of the splitting metric
*/
public abstract double getMetricRange(Map<String, WeightMass> preDist);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/SplitNode.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SplitNode.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.util.LinkedHashMap;
import java.util.Map;
import weka.core.Instance;
/**
* Class for a node that splits the data in a Hoeffding tree
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class SplitNode extends HNode {
/**
* For serialization
*/
private static final long serialVersionUID = 1558033628618451073L;
/** The split itself */
protected Split m_split;
/** Child nodes */
protected Map<String, HNode> m_children = new LinkedHashMap<String, HNode>();
/**
* Construct a new SplitNode
*
* @param classDistrib the class distribution
* @param split the split
*/
public SplitNode(Map<String, WeightMass> classDistrib, Split split) {
super(classDistrib);
m_split = split;
}
/**
* Return the branch that the supplied instance goes down
*
* @param inst the instance to find the branch for
* @return the branch that the supplied instance goes down
*/
public String branchForInstance(Instance inst) {
return m_split.branchForInstance(inst);
}
@Override
public boolean isLeaf() {
return false;
}
/**
* Number of child nodes
*
* @return the number of child nodes
*/
public int numChildred() {
return m_children.size();
}
/**
* Add a child
*
* @param branch the branch for the child
* @param child the child itself
*/
public void setChild(String branch, HNode child) {
m_children.put(branch, child);
}
@Override
public LeafNode leafForInstance(Instance inst, SplitNode parent,
String parentBranch) {
String branch = branchForInstance(inst);
if (branch != null) {
HNode child = m_children.get(branch);
if (child != null) {
return child.leafForInstance(inst, this, branch);
}
return new LeafNode(null, this, branch);
}
return new LeafNode(this, parent, parentBranch);
}
@Override
public void updateNode(Instance inst) {
// don't update the distribution
}
@Override
protected int dumpTree(int depth, int leafCount, StringBuffer buff) {
for (Map.Entry<String, HNode> e : m_children.entrySet()) {
HNode child = e.getValue();
String branch = e.getKey();
if (child != null) {
buff.append("\n");
for (int i = 0; i < depth; i++) {
buff.append("| ");
}
buff.append(m_split.conditionForBranch(branch).trim());
buff.append(": ");
leafCount = child.dumpTree(depth + 1, leafCount, buff);
}
}
return leafCount;
}
@Override
public int installNodeNums(int nodeNum) {
nodeNum = super.installNodeNums(nodeNum);
for (Map.Entry<String, HNode> e : m_children.entrySet()) {
HNode child = e.getValue();
if (child != null) {
nodeNum = child.installNodeNums(nodeNum);
}
}
return nodeNum;
}
@Override
public void graphTree(StringBuffer buff) {
boolean first = true;
for (Map.Entry<String, HNode> e : m_children.entrySet()) {
HNode child = e.getValue();
String branch = e.getKey();
if (child != null) {
String conditionForBranch = m_split.conditionForBranch(branch);
if (first) {
String testAttName = null;
if (conditionForBranch.indexOf("<=") < 0) {
testAttName = conditionForBranch.substring(0,
conditionForBranch.indexOf("=")).trim();
} else {
testAttName = conditionForBranch.substring(0,
conditionForBranch.indexOf("<")).trim();
}
first = false;
buff.append("N" + m_nodeNum + " [label=\"" + testAttName + "\"]\n");
}
int startIndex = 0;
if (conditionForBranch.indexOf("<=") > 0) {
startIndex = conditionForBranch.indexOf("<") - 1;
} else if (conditionForBranch.indexOf("=") > 0) {
startIndex = conditionForBranch.indexOf("=") - 1;
} else {
startIndex = conditionForBranch.indexOf(">") - 1;
}
conditionForBranch = conditionForBranch.substring(startIndex,
conditionForBranch.length()).trim();
buff.append(
"N" + m_nodeNum + "->" + "N" + child.m_nodeNum + "[label=\""
+ conditionForBranch + "\"]\n").append("\n");
}
}
for (Map.Entry<String, HNode> e : m_children.entrySet()) {
HNode child = e.getValue();
if (child != null) {
child.graphTree(buff);
}
}
}
@Override
protected void printLeafModels(StringBuffer buff) {
for (Map.Entry<String, HNode> e : m_children.entrySet()) {
HNode child = e.getValue();
if (child != null) {
child.printLeafModels(buff);
}
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/UnivariateNominalMultiwaySplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateNominalMultiwaySplit.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
import weka.core.Attribute;
import weka.core.Instance;
/**
* A multiway split based on a single nominal attribute
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class UnivariateNominalMultiwaySplit extends Split implements
Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = -9094590488097956665L;
/**
* Constructor
*
* @param attName the name of the attribute to split on
*/
public UnivariateNominalMultiwaySplit(String attName) {
m_splitAttNames.add(attName);
}
@Override
public String branchForInstance(Instance inst) {
Attribute att = inst.dataset().attribute(m_splitAttNames.get(0));
if (att == null || inst.isMissing(att)) {
return null;
}
return att.value((int) inst.value(att));
}
@Override
public String conditionForBranch(String branch) {
return m_splitAttNames.get(0) + " = " + branch;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/UnivariateNumericBinarySplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateNumericBinarySplit.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
import weka.core.Attribute;
import weka.core.Instance;
/**
* A binary split based on a single numeric attribute
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class UnivariateNumericBinarySplit extends Split implements Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = -7392204582942741097L;
/** The split point */
protected double m_splitPoint;
/**
* Constructor
*
* @param attName the name of the attribute to split on
* @param splitPoint the split point
*/
public UnivariateNumericBinarySplit(String attName, double splitPoint) {
m_splitAttNames.add(attName);
m_splitPoint = splitPoint;
}
@Override
public String branchForInstance(Instance inst) {
Attribute att = inst.dataset().attribute(m_splitAttNames.get(0));
if (att == null || inst.isMissing(att)) {
// TODO -------------
return null;
}
if (inst.value(att) <= m_splitPoint) {
return "left";
}
return "right";
}
@Override
public String conditionForBranch(String branch) {
String result = m_splitAttNames.get(0);
if (branch.equalsIgnoreCase("left")) {
result += " <= ";
} else {
result += " > ";
}
result += String.format("%-9.3f", m_splitPoint);
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/ht/WeightMass.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* WeightMass.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.ht;
import java.io.Serializable;
/**
* Simple container for a weight
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class WeightMass implements Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = 6794839107050779425L;
public double m_weight;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/BinC45ModelSelection.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BinC45ModelSelection.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Enumeration;
import weka.core.Attribute;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for selecting a C4.5-like binary (!) split for a given dataset.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class BinC45ModelSelection extends ModelSelection {
/** for serialization */
private static final long serialVersionUID = 179170923545122001L;
/** Minimum number of instances in interval. */
protected final int m_minNoObj;
/** Use MDL correction? */
protected final boolean m_useMDLcorrection;
/** The FULL training dataset. */
protected Instances m_allData;
/** Do not relocate split point to actual data value */
protected final boolean m_doNotMakeSplitPointActualValue;
/**
* Initializes the split selection method with the given parameters.
*
* @param minNoObj minimum number of instances that have to occur in at least
* two subsets induced by split
* @param allData FULL training dataset (necessary for selection of split
* points).
* @param useMDLcorrection whether to use MDL adjustement when finding splits
* on numeric attributes
*/
public BinC45ModelSelection(int minNoObj, Instances allData,
boolean useMDLcorrection, boolean doNotMakeSplitPointActualValue) {
m_minNoObj = minNoObj;
m_allData = allData;
m_useMDLcorrection = useMDLcorrection;
m_doNotMakeSplitPointActualValue = doNotMakeSplitPointActualValue;
}
/**
* Sets reference to training data to null.
*/
public void cleanup() {
m_allData = null;
}
/**
* Selects C4.5-type split for the given dataset.
*/
@Override
public final ClassifierSplitModel selectModel(Instances data) {
double minResult;
BinC45Split[] currentModel;
BinC45Split bestModel = null;
NoSplit noSplitModel = null;
double averageInfoGain = 0;
int validModels = 0;
boolean multiVal = true;
Distribution checkDistribution;
double sumOfWeights;
int i;
try {
// Check if all Instances belong to one class or if not
// enough Instances to split.
checkDistribution = new Distribution(data);
noSplitModel = new NoSplit(checkDistribution);
if (Utils.sm(checkDistribution.total(), 2 * m_minNoObj)
|| Utils.eq(checkDistribution.total(),
checkDistribution.perClass(checkDistribution.maxClass()))) {
return noSplitModel;
}
// Check if all attributes are nominal and have a
// lot of values.
Enumeration<Attribute> enu = data.enumerateAttributes();
while (enu.hasMoreElements()) {
Attribute attribute = enu.nextElement();
if ((attribute.isNumeric())
|| (Utils.sm(attribute.numValues(), (0.3 * m_allData.numInstances())))) {
multiVal = false;
break;
}
}
currentModel = new BinC45Split[data.numAttributes()];
sumOfWeights = data.sumOfWeights();
// For each attribute.
for (i = 0; i < data.numAttributes(); i++) {
// Apart from class attribute.
if (i != (data).classIndex()) {
// Get models for current attribute.
currentModel[i] = new BinC45Split(i, m_minNoObj, sumOfWeights,
m_useMDLcorrection);
currentModel[i].buildClassifier(data);
// Check if useful split for current attribute
// exists and check for enumerated attributes with
// a lot of values.
if (currentModel[i].checkModel()) {
if ((data.attribute(i).isNumeric())
|| (multiVal || Utils.sm(data.attribute(i).numValues(),
(0.3 * m_allData.numInstances())))) {
averageInfoGain = averageInfoGain + currentModel[i].infoGain();
validModels++;
}
}
} else {
currentModel[i] = null;
}
}
// Check if any useful split was found.
if (validModels == 0) {
return noSplitModel;
}
averageInfoGain = averageInfoGain / validModels;
// Find "best" attribute to split on.
minResult = 0;
for (i = 0; i < data.numAttributes(); i++) {
if ((i != (data).classIndex()) && (currentModel[i].checkModel())) {
// Use 1E-3 here to get a closer approximation to the original
// implementation.
if ((currentModel[i].infoGain() >= (averageInfoGain - 1E-3))
&& Utils.gr(currentModel[i].gainRatio(), minResult)) {
bestModel = currentModel[i];
minResult = currentModel[i].gainRatio();
}
}
}
// Check if useful split was found.
if (Utils.eq(minResult, 0)) {
return noSplitModel;
}
// Add all Instances with unknown values for the corresponding
// attribute to the distribution for the model, so that
// the complete distribution is stored with the model.
bestModel.distribution().addInstWithUnknown(data, bestModel.attIndex());
// Set the split point analogue to C45 if attribute numeric.
if (!m_doNotMakeSplitPointActualValue) {
bestModel.setSplitPoint(m_allData);
}
return bestModel;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* Selects C4.5-type split for the given dataset.
*/
@Override
public final ClassifierSplitModel selectModel(Instances train, Instances test) {
return selectModel(train);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/BinC45Split.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BinC45Split.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Enumeration;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class implementing a binary C4.5-like split on an attribute.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class BinC45Split extends ClassifierSplitModel {
/** for serialization */
private static final long serialVersionUID = -1278776919563022474L;
/** Attribute to split on. */
protected final int m_attIndex;
/** Minimum number of objects in a split. */
protected final int m_minNoObj;
/** Use MDL correction? */
protected final boolean m_useMDLcorrection;
/** Value of split point. */
protected double m_splitPoint;
/** InfoGain of split. */
protected double m_infoGain;
/** GainRatio of split. */
protected double m_gainRatio;
/** The sum of the weights of the instances. */
protected final double m_sumOfWeights;
/** Static reference to splitting criterion. */
protected static InfoGainSplitCrit m_infoGainCrit = new InfoGainSplitCrit();
/** Static reference to splitting criterion. */
protected static GainRatioSplitCrit m_gainRatioCrit = new GainRatioSplitCrit();
/**
* Initializes the split model.
*/
public BinC45Split(int attIndex, int minNoObj, double sumOfWeights,
boolean useMDLcorrection) {
// Get index of attribute to split on.
m_attIndex = attIndex;
// Set minimum number of objects.
m_minNoObj = minNoObj;
// Set sum of weights;
m_sumOfWeights = sumOfWeights;
// Whether to use the MDL correction for numeric attributes
m_useMDLcorrection = useMDLcorrection;
}
/**
* Creates a C4.5-type split on the given data.
*
* @exception Exception if something goes wrong
*/
@Override
public void buildClassifier(Instances trainInstances) throws Exception {
// Initialize the remaining instance variables.
m_numSubsets = 0;
m_splitPoint = Double.MAX_VALUE;
m_infoGain = 0;
m_gainRatio = 0;
// Different treatment for enumerated and numeric
// attributes.
if (trainInstances.attribute(m_attIndex).isNominal()) {
handleEnumeratedAttribute(trainInstances);
} else {
trainInstances.sort(trainInstances.attribute(m_attIndex));
handleNumericAttribute(trainInstances);
}
}
/**
* Returns index of attribute for which split was generated.
*/
public final int attIndex() {
return m_attIndex;
}
/**
* Returns the split point (numeric attribute only).
*
* @return the split point used for a test on a numeric attribute
*/
public double splitPoint() {
return m_splitPoint;
}
/**
* Returns (C4.5-type) gain ratio for the generated split.
*/
public final double gainRatio() {
return m_gainRatio;
}
/**
* Gets class probability for instance.
*
* @exception Exception if something goes wrong
*/
@Override
public final double classProb(int classIndex, Instance instance, int theSubset)
throws Exception {
if (theSubset <= -1) {
double[] weights = weights(instance);
if (weights == null) {
return m_distribution.prob(classIndex);
} else {
double prob = 0;
for (int i = 0; i < weights.length; i++) {
prob += weights[i] * m_distribution.prob(classIndex, i);
}
return prob;
}
} else {
if (Utils.gr(m_distribution.perBag(theSubset), 0)) {
return m_distribution.prob(classIndex, theSubset);
} else {
return m_distribution.prob(classIndex);
}
}
}
/**
* Creates split on enumerated attribute.
*
* @exception Exception if something goes wrong
*/
private void handleEnumeratedAttribute(Instances trainInstances)
throws Exception {
Distribution newDistribution, secondDistribution;
int numAttValues;
double currIG, currGR;
Instance instance;
int i;
numAttValues = trainInstances.attribute(m_attIndex).numValues();
newDistribution = new Distribution(numAttValues,
trainInstances.numClasses());
// Only Instances with known values are relevant.
Enumeration<Instance> enu = trainInstances.enumerateInstances();
while (enu.hasMoreElements()) {
instance = enu.nextElement();
if (!instance.isMissing(m_attIndex)) {
newDistribution.add((int) instance.value(m_attIndex), instance);
}
}
m_distribution = newDistribution;
// For all values
for (i = 0; i < numAttValues; i++) {
if (Utils.grOrEq(newDistribution.perBag(i), m_minNoObj)) {
secondDistribution = new Distribution(newDistribution, i);
// Check if minimum number of Instances in the two
// subsets.
if (secondDistribution.check(m_minNoObj)) {
m_numSubsets = 2;
currIG = m_infoGainCrit.splitCritValue(secondDistribution,
m_sumOfWeights);
currGR = m_gainRatioCrit.splitCritValue(secondDistribution,
m_sumOfWeights, currIG);
if ((i == 0) || Utils.gr(currGR, m_gainRatio)) {
m_gainRatio = currGR;
m_infoGain = currIG;
m_splitPoint = i;
m_distribution = secondDistribution;
}
}
}
}
}
/**
* Creates split on numeric attribute.
*
* @exception Exception if something goes wrong
*/
private void handleNumericAttribute(Instances trainInstances)
throws Exception {
int firstMiss;
int next = 1;
int last = 0;
int index = 0;
int splitIndex = -1;
double currentInfoGain;
double defaultEnt;
double minSplit;
Instance instance;
int i;
// Current attribute is a numeric attribute.
m_distribution = new Distribution(2, trainInstances.numClasses());
// Only Instances with known values are relevant.
Enumeration<Instance> enu = trainInstances.enumerateInstances();
i = 0;
while (enu.hasMoreElements()) {
instance = enu.nextElement();
if (instance.isMissing(m_attIndex)) {
break;
}
m_distribution.add(1, instance);
i++;
}
firstMiss = i;
// Compute minimum number of Instances required in each
// subset.
minSplit = 0.1 * (m_distribution.total()) / (trainInstances.numClasses());
if (Utils.smOrEq(minSplit, m_minNoObj)) {
minSplit = m_minNoObj;
} else if (Utils.gr(minSplit, 25)) {
minSplit = 25;
}
// Enough Instances with known values?
if (Utils.sm(firstMiss, 2 * minSplit)) {
return;
}
// Compute values of criteria for all possible split
// indices.
defaultEnt = m_infoGainCrit.oldEnt(m_distribution);
while (next < firstMiss) {
if (trainInstances.instance(next - 1).value(m_attIndex) + 1e-5 < trainInstances
.instance(next).value(m_attIndex)) {
// Move class values for all Instances up to next
// possible split point.
m_distribution.shiftRange(1, 0, trainInstances, last, next);
// Check if enough Instances in each subset and compute
// values for criteria.
if (Utils.grOrEq(m_distribution.perBag(0), minSplit)
&& Utils.grOrEq(m_distribution.perBag(1), minSplit)) {
currentInfoGain = m_infoGainCrit.splitCritValue(m_distribution,
m_sumOfWeights, defaultEnt);
if (Utils.gr(currentInfoGain, m_infoGain)) {
m_infoGain = currentInfoGain;
splitIndex = next - 1;
}
index++;
}
last = next;
}
next++;
}
// Was there any useful split?
if (index == 0) {
return;
}
// Compute modified information gain for best split.
if (m_useMDLcorrection) {
m_infoGain = m_infoGain - (Utils.log2(index) / m_sumOfWeights);
}
if (Utils.smOrEq(m_infoGain, 0)) {
return;
}
// Set instance variables' values to values for
// best split.
m_numSubsets = 2;
m_splitPoint = (trainInstances.instance(splitIndex + 1).value(m_attIndex) + trainInstances
.instance(splitIndex).value(m_attIndex)) / 2;
// In case we have a numerical precision problem we need to choose the
// smaller value
if (m_splitPoint == trainInstances.instance(splitIndex + 1).value(
m_attIndex)) {
m_splitPoint = trainInstances.instance(splitIndex).value(m_attIndex);
}
// Restore distributioN for best split.
m_distribution = new Distribution(2, trainInstances.numClasses());
m_distribution.addRange(0, trainInstances, 0, splitIndex + 1);
m_distribution.addRange(1, trainInstances, splitIndex + 1, firstMiss);
// Compute modified gain ratio for best split.
m_gainRatio = m_gainRatioCrit.splitCritValue(m_distribution,
m_sumOfWeights, m_infoGain);
}
/**
* Returns (C4.5-type) information gain for the generated split.
*/
public final double infoGain() {
return m_infoGain;
}
/**
* Prints left side of condition.
*
* @param data the data to get the attribute name from.
* @return the attribute name
*/
@Override
public final String leftSide(Instances data) {
return data.attribute(m_attIndex).name();
}
/**
* Prints the condition satisfied by instances in a subset.
*
* @param index of subset and training set.
*/
@Override
public final String rightSide(int index, Instances data) {
StringBuffer text;
text = new StringBuffer();
if (data.attribute(m_attIndex).isNominal()) {
if (index == 0) {
text.append(" = "
+ data.attribute(m_attIndex).value((int) m_splitPoint));
} else {
text.append(" != "
+ data.attribute(m_attIndex).value((int) m_splitPoint));
}
} else if (index == 0) {
text.append(" <= " + m_splitPoint);
} else {
text.append(" > " + m_splitPoint);
}
return text.toString();
}
/**
* Returns a string containing java source code equivalent to the test made at
* this node. The instance being tested is called "i".
*
* @param index index of the nominal value tested
* @param data the data containing instance structure info
* @return a value of type 'String'
*/
@Override
public final String sourceExpression(int index, Instances data) {
StringBuffer expr = null;
if (index < 0) {
return "i[" + m_attIndex + "] == null";
}
if (data.attribute(m_attIndex).isNominal()) {
if (index == 0) {
expr = new StringBuffer("i[");
} else {
expr = new StringBuffer("!i[");
}
expr.append(m_attIndex).append("]");
expr.append(".equals(\"")
.append(data.attribute(m_attIndex).value((int) m_splitPoint))
.append("\")");
} else {
expr = new StringBuffer("((Double) i[");
expr.append(m_attIndex).append("])");
if (index == 0) {
expr.append(".doubleValue() <= ").append(m_splitPoint);
} else {
expr.append(".doubleValue() > ").append(m_splitPoint);
}
}
return expr.toString();
}
/**
* Sets split point to greatest value in given data smaller or equal to old
* split point. (C4.5 does this for some strange reason).
*/
public final void setSplitPoint(Instances allInstances) {
double newSplitPoint = -Double.MAX_VALUE;
double tempValue;
Instance instance;
if ((!allInstances.attribute(m_attIndex).isNominal()) && (m_numSubsets > 1)) {
Enumeration<Instance> enu = allInstances.enumerateInstances();
while (enu.hasMoreElements()) {
instance = enu.nextElement();
if (!instance.isMissing(m_attIndex)) {
tempValue = instance.value(m_attIndex);
if (Utils.gr(tempValue, newSplitPoint)
&& Utils.smOrEq(tempValue, m_splitPoint)) {
newSplitPoint = tempValue;
}
}
}
m_splitPoint = newSplitPoint;
}
}
/**
* Sets distribution associated with model.
*/
@Override
public void resetDistribution(Instances data) throws Exception {
Instances insts = new Instances(data, data.numInstances());
for (int i = 0; i < data.numInstances(); i++) {
if (whichSubset(data.instance(i)) > -1) {
insts.add(data.instance(i));
}
}
Distribution newD = new Distribution(insts, this);
newD.addInstWithUnknown(data, m_attIndex);
m_distribution = newD;
}
/**
* Returns weights if instance is assigned to more than one subset. Returns
* null if instance is only assigned to one subset.
*/
@Override
public final double[] weights(Instance instance) {
double[] weights;
int i;
if (instance.isMissing(m_attIndex)) {
weights = new double[m_numSubsets];
for (i = 0; i < m_numSubsets; i++) {
weights[i] = m_distribution.perBag(i) / m_distribution.total();
}
return weights;
} else {
return null;
}
}
/**
* Returns index of subset instance is assigned to. Returns -1 if instance is
* assigned to more than one subset.
*
* @exception Exception if something goes wrong
*/
@Override
public final int whichSubset(Instance instance) throws Exception {
if (instance.isMissing(m_attIndex)) {
return -1;
} else {
if (instance.attribute(m_attIndex).isNominal()) {
if ((int) m_splitPoint == (int) instance.value(m_attIndex)) {
return 0;
} else {
return 1;
}
} else if (Utils.smOrEq(instance.value(m_attIndex), m_splitPoint)) {
return 0;
} else {
return 1;
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/C45ModelSelection.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* C45ModelSelection.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Enumeration;
import weka.core.Attribute;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for selecting a C4.5-type split for a given dataset.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class C45ModelSelection extends ModelSelection {
/** for serialization */
private static final long serialVersionUID = 3372204862440821989L;
/** Minimum number of objects in interval. */
protected final int m_minNoObj;
/** Use MDL correction? */
protected final boolean m_useMDLcorrection;
/** All the training data */
protected Instances m_allData; //
/** Do not relocate split point to actual data value */
protected final boolean m_doNotMakeSplitPointActualValue;
/**
* Initializes the split selection method with the given parameters.
*
* @param minNoObj
* minimum number of instances that have to occur in at least two subsets induced by split
* @param allData
* FULL training dataset (necessary for selection of split points).
* @param useMDLcorrection
* whether to use MDL adjustement when finding splits on numeric attributes
* @param doNotMakeSplitPointActualValue
* if true, split point is not relocated by scanning the entire dataset for the closest
* data value
*/
public C45ModelSelection(final int minNoObj, final Instances allData, final boolean useMDLcorrection, final boolean doNotMakeSplitPointActualValue) {
this.m_minNoObj = minNoObj;
this.m_allData = allData;
this.m_useMDLcorrection = useMDLcorrection;
this.m_doNotMakeSplitPointActualValue = doNotMakeSplitPointActualValue;
}
/**
* Sets reference to training data to null.
*/
public void cleanup() {
this.m_allData = null;
}
/**
* Selects C4.5-type split for the given dataset.
*
* @throws InterruptedException
*/
@Override
public final ClassifierSplitModel selectModel(final Instances data) throws InterruptedException {
double minResult;
C45Split[] currentModel;
C45Split bestModel = null;
NoSplit noSplitModel = null;
double averageInfoGain = 0;
int validModels = 0;
boolean multiVal = true;
Distribution checkDistribution;
Attribute attribute;
double sumOfWeights;
int i;
try {
// Check if all Instances belong to one class or if not
// enough Instances to split.
checkDistribution = new Distribution(data);
noSplitModel = new NoSplit(checkDistribution);
if (Utils.sm(checkDistribution.total(), 2 * this.m_minNoObj) || Utils.eq(checkDistribution.total(), checkDistribution.perClass(checkDistribution.maxClass()))) {
return noSplitModel;
}
// Check if all attributes are nominal and have a
// lot of values.
if (this.m_allData != null) {
Enumeration<Attribute> enu = data.enumerateAttributes();
while (enu.hasMoreElements()) {
attribute = enu.nextElement();
if ((attribute.isNumeric()) || (Utils.sm(attribute.numValues(), (0.3 * this.m_allData.numInstances())))) {
multiVal = false;
break;
}
}
}
currentModel = new C45Split[data.numAttributes()];
sumOfWeights = data.sumOfWeights();
// For each attribute.
for (i = 0; i < data.numAttributes(); i++) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
// Apart from class attribute.
if (i != (data).classIndex()) {
// Get models for current attribute.
currentModel[i] = new C45Split(i, this.m_minNoObj, sumOfWeights, this.m_useMDLcorrection);
currentModel[i].buildClassifier(data);
// Check if useful split for current attribute
// exists and check for enumerated attributes with
// a lot of values.
if (currentModel[i].checkModel()) {
if (this.m_allData != null) {
if ((data.attribute(i).isNumeric()) || (multiVal || Utils.sm(data.attribute(i).numValues(), (0.3 * this.m_allData.numInstances())))) {
averageInfoGain = averageInfoGain + currentModel[i].infoGain();
validModels++;
}
} else {
averageInfoGain = averageInfoGain + currentModel[i].infoGain();
validModels++;
}
}
} else {
currentModel[i] = null;
}
}
// Check if any useful split was found.
if (validModels == 0) {
return noSplitModel;
}
averageInfoGain = averageInfoGain / validModels;
// Find "best" attribute to split on.
minResult = 0;
for (i = 0; i < data.numAttributes(); i++) {
if ((i != (data).classIndex()) && (currentModel[i].checkModel())) {
// Use 1E-3 here to get a closer approximation to the original
// implementation.
if ((currentModel[i].infoGain() >= (averageInfoGain - 1E-3)) && Utils.gr(currentModel[i].gainRatio(), minResult)) {
bestModel = currentModel[i];
minResult = currentModel[i].gainRatio();
}
}
}
// Check if useful split was found.
if (Utils.eq(minResult, 0)) {
return noSplitModel;
}
// Add all Instances with unknown values for the corresponding
// attribute to the distribution for the model, so that
// the complete distribution is stored with the model.
bestModel.distribution().addInstWithUnknown(data, bestModel.attIndex());
// Set the split point analogue to C45 if attribute numeric.
if ((this.m_allData != null) && (!this.m_doNotMakeSplitPointActualValue)) {
bestModel.setSplitPoint(this.m_allData);
}
return bestModel;
} catch (Exception e) {
if (e instanceof InterruptedException) {
throw (InterruptedException) e;
}
e.printStackTrace();
}
return null;
}
/**
* Selects C4.5-type split for the given dataset.
*
* @throws InterruptedException
*/
@Override
public final ClassifierSplitModel selectModel(final Instances train, final Instances test) throws InterruptedException {
return this.selectModel(train);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/C45PruneableClassifierTree.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* C45PruneableClassifierTree.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for handling a tree structure that can
* be pruned using C4.5 procedures.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class C45PruneableClassifierTree
extends ClassifierTree {
/** for serialization */
static final long serialVersionUID = -4813820170260388194L;
/** True if the tree is to be pruned. */
protected boolean m_pruneTheTree = false;
/** True if the tree is to be collapsed. */
protected boolean m_collapseTheTree = false;
/** The confidence factor for pruning. */
protected float m_CF = 0.25f;
/** Is subtree raising to be performed? */
protected boolean m_subtreeRaising = true;
/** Cleanup after the tree has been built. */
protected boolean m_cleanup = true;
/**
* Constructor for pruneable tree structure. Stores reference
* to associated training data at each node.
*
* @param toSelectLocModel selection method for local splitting model
* @param pruneTree true if the tree is to be pruned
* @param cf the confidence factor for pruning
* @param raiseTree
* @param cleanup
* @throws Exception if something goes wrong
*/
public C45PruneableClassifierTree(ModelSelection toSelectLocModel,
boolean pruneTree,float cf,
boolean raiseTree,
boolean cleanup,
boolean collapseTree)
throws Exception {
super(toSelectLocModel);
m_pruneTheTree = pruneTree;
m_CF = cf;
m_subtreeRaising = raiseTree;
m_cleanup = cleanup;
m_collapseTheTree = collapseTree;
}
/**
* Method for building a pruneable classifier tree.
*
* @param data the data for building the tree
* @throws Exception if something goes wrong
*/
public void buildClassifier(Instances data) throws Exception {
// remove instances with missing class
data = new Instances(data);
data.deleteWithMissingClass();
buildTree(data, m_subtreeRaising || !m_cleanup);
if (m_collapseTheTree) {
collapse();
}
if (m_pruneTheTree) {
prune();
}
if (m_cleanup) {
cleanup(new Instances(data, 0));
}
}
/**
* Collapses a tree to a node if training error doesn't increase.
*/
public final void collapse(){
double errorsOfSubtree;
double errorsOfTree;
int i;
if (!m_isLeaf){
errorsOfSubtree = getTrainingErrors();
errorsOfTree = localModel().distribution().numIncorrect();
if (errorsOfSubtree >= errorsOfTree-1E-3){
// Free adjacent trees
m_sons = null;
m_isLeaf = true;
// Get NoSplit Model for tree.
m_localModel = new NoSplit(localModel().distribution());
}else
for (i=0;i<m_sons.length;i++)
son(i).collapse();
}
}
/**
* Prunes a tree using C4.5's pruning procedure.
*
* @throws Exception if something goes wrong
*/
public void prune() throws Exception {
double errorsLargestBranch;
double errorsLeaf;
double errorsTree;
int indexOfLargestBranch;
C45PruneableClassifierTree largestBranch;
int i;
if (!m_isLeaf){
// Prune all subtrees.
for (i=0;i<m_sons.length;i++)
son(i).prune();
// Compute error for largest branch
indexOfLargestBranch = localModel().distribution().maxBag();
if (m_subtreeRaising) {
errorsLargestBranch = son(indexOfLargestBranch).
getEstimatedErrorsForBranch((Instances)m_train);
} else {
errorsLargestBranch = Double.MAX_VALUE;
}
// Compute error if this Tree would be leaf
errorsLeaf =
getEstimatedErrorsForDistribution(localModel().distribution());
// Compute error for the whole subtree
errorsTree = getEstimatedErrors();
// Decide if leaf is best choice.
if (Utils.smOrEq(errorsLeaf,errorsTree+0.1) &&
Utils.smOrEq(errorsLeaf,errorsLargestBranch+0.1)){
// Free son Trees
m_sons = null;
m_isLeaf = true;
// Get NoSplit Model for node.
m_localModel = new NoSplit(localModel().distribution());
return;
}
// Decide if largest branch is better choice
// than whole subtree.
if (Utils.smOrEq(errorsLargestBranch,errorsTree+0.1)){
largestBranch = son(indexOfLargestBranch);
m_sons = largestBranch.m_sons;
m_localModel = largestBranch.localModel();
m_isLeaf = largestBranch.m_isLeaf;
newDistribution(m_train);
prune();
}
}
}
/**
* Returns a newly created tree.
*
* @param data the data to work with
* @return the new tree
* @throws Exception if something goes wrong
*/
protected ClassifierTree getNewTree(Instances data) throws Exception {
C45PruneableClassifierTree newTree =
new C45PruneableClassifierTree(m_toSelectModel, m_pruneTheTree, m_CF,
m_subtreeRaising, m_cleanup, m_collapseTheTree);
newTree.buildTree((Instances)data, m_subtreeRaising || !m_cleanup);
return newTree;
}
/**
* Computes estimated errors for tree.
*
* @return the estimated errors
*/
private double getEstimatedErrors(){
double errors = 0;
int i;
if (m_isLeaf)
return getEstimatedErrorsForDistribution(localModel().distribution());
else{
for (i=0;i<m_sons.length;i++)
errors = errors+son(i).getEstimatedErrors();
return errors;
}
}
/**
* Computes estimated errors for one branch.
*
* @param data the data to work with
* @return the estimated errors
* @throws Exception if something goes wrong
*/
private double getEstimatedErrorsForBranch(Instances data)
throws Exception {
Instances [] localInstances;
double errors = 0;
int i;
if (m_isLeaf)
return getEstimatedErrorsForDistribution(new Distribution(data));
else{
Distribution savedDist = localModel().m_distribution;
localModel().resetDistribution(data);
localInstances = (Instances[])localModel().split(data);
localModel().m_distribution = savedDist;
for (i=0;i<m_sons.length;i++)
errors = errors+
son(i).getEstimatedErrorsForBranch(localInstances[i]);
return errors;
}
}
/**
* Computes estimated errors for leaf.
*
* @param theDistribution the distribution to use
* @return the estimated errors
*/
private double getEstimatedErrorsForDistribution(Distribution
theDistribution){
if (Utils.eq(theDistribution.total(),0))
return 0;
else
return theDistribution.numIncorrect()+
Stats.addErrs(theDistribution.total(),
theDistribution.numIncorrect(),m_CF);
}
/**
* Computes errors of tree on training data.
*
* @return the training errors
*/
private double getTrainingErrors(){
double errors = 0;
int i;
if (m_isLeaf)
return localModel().distribution().numIncorrect();
else{
for (i=0;i<m_sons.length;i++)
errors = errors+son(i).getTrainingErrors();
return errors;
}
}
/**
* Method just exists to make program easier to read.
*
* @return the local split model
*/
private ClassifierSplitModel localModel(){
return (ClassifierSplitModel)m_localModel;
}
/**
* Computes new distributions of instances for nodes
* in tree.
*
* @param data the data to compute the distributions for
* @throws Exception if something goes wrong
*/
private void newDistribution(Instances data) throws Exception {
Instances [] localInstances;
localModel().resetDistribution(data);
m_train = data;
if (!m_isLeaf){
localInstances =
(Instances [])localModel().split(data);
for (int i = 0; i < m_sons.length; i++)
son(i).newDistribution(localInstances[i]);
} else {
// Check whether there are some instances at the leaf now!
if (!Utils.eq(data.sumOfWeights(), 0)) {
m_isEmpty = false;
}
}
}
/**
* Method just exists to make program easier to read.
*/
private C45PruneableClassifierTree son(int index){
return (C45PruneableClassifierTree)m_sons[index];
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/C45Split.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* C45Split.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Enumeration;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class implementing a C4.5-type split on an attribute.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class C45Split extends ClassifierSplitModel {
/** for serialization */
private static final long serialVersionUID = 3064079330067903161L;
/** Desired number of branches. */
protected int m_complexityIndex;
/** Attribute to split on. */
protected final int m_attIndex;
/** Minimum number of objects in a split. */
protected final int m_minNoObj;
/** Use MDL correction? */
protected final boolean m_useMDLcorrection;
/** Value of split point. */
protected double m_splitPoint;
/** InfoGain of split. */
protected double m_infoGain;
/** GainRatio of split. */
protected double m_gainRatio;
/** The sum of the weights of the instances. */
protected final double m_sumOfWeights;
/** Number of split points. */
protected int m_index;
/** Static reference to splitting criterion. */
protected static InfoGainSplitCrit infoGainCrit = new InfoGainSplitCrit();
/** Static reference to splitting criterion. */
protected static GainRatioSplitCrit gainRatioCrit = new GainRatioSplitCrit();
/**
* Initializes the split model.
*/
public C45Split(final int attIndex, final int minNoObj, final double sumOfWeights, final boolean useMDLcorrection) {
// Get index of attribute to split on.
this.m_attIndex = attIndex;
// Set minimum number of objects.
this.m_minNoObj = minNoObj;
// Set the sum of the weights
this.m_sumOfWeights = sumOfWeights;
// Whether to use the MDL correction for numeric attributes
this.m_useMDLcorrection = useMDLcorrection;
}
/**
* Creates a C4.5-type split on the given data. Assumes that none of the class values is missing.
*
* @exception Exception
* if something goes wrong
*/
@Override
public void buildClassifier(final Instances trainInstances) throws Exception {
// Initialize the remaining instance variables.
this.m_numSubsets = 0;
this.m_splitPoint = Double.MAX_VALUE;
this.m_infoGain = 0;
this.m_gainRatio = 0;
// Different treatment for enumerated and numeric
// attributes.
if (trainInstances.attribute(this.m_attIndex).isNominal()) {
this.m_complexityIndex = trainInstances.attribute(this.m_attIndex).numValues();
this.m_index = this.m_complexityIndex;
this.handleEnumeratedAttribute(trainInstances);
} else {
this.m_complexityIndex = 2;
this.m_index = 0;
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
trainInstances.sort(trainInstances.attribute(this.m_attIndex));
this.handleNumericAttribute(trainInstances);
}
}
/**
* Returns index of attribute for which split was generated.
*/
public final int attIndex() {
return this.m_attIndex;
}
/**
* Returns the split point (numeric attribute only).
*
* @return the split point used for a test on a numeric attribute
*/
public double splitPoint() {
return this.m_splitPoint;
}
/**
* Gets class probability for instance.
*
* @exception Exception
* if something goes wrong
*/
@Override
public final double classProb(final int classIndex, final Instance instance, final int theSubset) throws Exception {
if (theSubset <= -1) {
double[] weights = this.weights(instance);
if (weights == null) {
return this.m_distribution.prob(classIndex);
} else {
double prob = 0;
for (int i = 0; i < weights.length; i++) {
prob += weights[i] * this.m_distribution.prob(classIndex, i);
}
return prob;
}
} else {
if (Utils.gr(this.m_distribution.perBag(theSubset), 0)) {
return this.m_distribution.prob(classIndex, theSubset);
} else {
return this.m_distribution.prob(classIndex);
}
}
}
/**
* Returns coding cost for split (used in rule learner).
*/
@Override
public final double codingCost() {
return Utils.log2(this.m_index);
}
/**
* Returns (C4.5-type) gain ratio for the generated split.
*/
public final double gainRatio() {
return this.m_gainRatio;
}
/**
* Creates split on enumerated attribute.
*
* @exception Exception
* if something goes wrong
*/
private void handleEnumeratedAttribute(final Instances trainInstances) throws Exception {
Instance instance;
this.m_distribution = new Distribution(this.m_complexityIndex, trainInstances.numClasses());
// Only Instances with known values are relevant.
Enumeration<Instance> enu = trainInstances.enumerateInstances();
while (enu.hasMoreElements()) {
instance = enu.nextElement();
if (!instance.isMissing(this.m_attIndex)) {
this.m_distribution.add((int) instance.value(this.m_attIndex), instance);
}
}
// Check if minimum number of Instances in at least two
// subsets.
if (this.m_distribution.check(this.m_minNoObj)) {
this.m_numSubsets = this.m_complexityIndex;
this.m_infoGain = infoGainCrit.splitCritValue(this.m_distribution, this.m_sumOfWeights);
this.m_gainRatio = gainRatioCrit.splitCritValue(this.m_distribution, this.m_sumOfWeights, this.m_infoGain);
}
}
/**
* Creates split on numeric attribute.
*
* @exception Exception
* if something goes wrong
*/
private void handleNumericAttribute(final Instances trainInstances) throws Exception {
int firstMiss;
int next = 1;
int last = 0;
int splitIndex = -1;
double currentInfoGain;
double defaultEnt;
double minSplit;
Instance instance;
int i;
// Current attribute is a numeric attribute.
this.m_distribution = new Distribution(2, trainInstances.numClasses());
// Only Instances with known values are relevant.
Enumeration<Instance> enu = trainInstances.enumerateInstances();
i = 0;
while (enu.hasMoreElements()) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
instance = enu.nextElement();
if (instance.isMissing(this.m_attIndex)) {
break;
}
this.m_distribution.add(1, instance);
i++;
}
firstMiss = i;
// Compute minimum number of Instances required in each
// subset.
minSplit = 0.1 * (this.m_distribution.total()) / (trainInstances.numClasses());
if (Utils.smOrEq(minSplit, this.m_minNoObj)) {
minSplit = this.m_minNoObj;
} else if (Utils.gr(minSplit, 25)) {
minSplit = 25;
}
// Enough Instances with known values?
if (Utils.sm(firstMiss, 2 * minSplit)) {
return;
}
// Compute values of criteria for all possible split
// indices.
defaultEnt = infoGainCrit.oldEnt(this.m_distribution);
while (next < firstMiss) {
if (trainInstances.instance(next - 1).value(this.m_attIndex) + 1e-5 < trainInstances.instance(next).value(this.m_attIndex)) {
// Move class values for all Instances up to next
// possible split point.
this.m_distribution.shiftRange(1, 0, trainInstances, last, next);
// Check if enough Instances in each subset and compute
// values for criteria.
if (Utils.grOrEq(this.m_distribution.perBag(0), minSplit) && Utils.grOrEq(this.m_distribution.perBag(1), minSplit)) {
currentInfoGain = infoGainCrit.splitCritValue(this.m_distribution, this.m_sumOfWeights, defaultEnt);
if (Utils.gr(currentInfoGain, this.m_infoGain)) {
this.m_infoGain = currentInfoGain;
splitIndex = next - 1;
}
this.m_index++;
}
last = next;
}
next++;
}
// Was there any useful split?
if (this.m_index == 0) {
return;
}
// Compute modified information gain for best split.
if (this.m_useMDLcorrection) {
this.m_infoGain = this.m_infoGain - (Utils.log2(this.m_index) / this.m_sumOfWeights);
}
if (Utils.smOrEq(this.m_infoGain, 0)) {
return;
}
// Set instance variables' values to values for
// best split.
this.m_numSubsets = 2;
this.m_splitPoint = (trainInstances.instance(splitIndex + 1).value(this.m_attIndex) + trainInstances.instance(splitIndex).value(this.m_attIndex)) / 2;
// In case we have a numerical precision problem we need to choose the
// smaller value
if (this.m_splitPoint == trainInstances.instance(splitIndex + 1).value(this.m_attIndex)) {
this.m_splitPoint = trainInstances.instance(splitIndex).value(this.m_attIndex);
}
// Restore distributioN for best split.
this.m_distribution = new Distribution(2, trainInstances.numClasses());
this.m_distribution.addRange(0, trainInstances, 0, splitIndex + 1);
this.m_distribution.addRange(1, trainInstances, splitIndex + 1, firstMiss);
// Compute modified gain ratio for best split.
this.m_gainRatio = gainRatioCrit.splitCritValue(this.m_distribution, this.m_sumOfWeights, this.m_infoGain);
}
/**
* Returns (C4.5-type) information gain for the generated split.
*/
public final double infoGain() {
return this.m_infoGain;
}
/**
* Prints left side of condition..
*
* @param data
* training set.
*/
@Override
public final String leftSide(final Instances data) {
return data.attribute(this.m_attIndex).name();
}
/**
* Prints the condition satisfied by instances in a subset.
*
* @param index
* of subset
* @param data
* training set.
*/
@Override
public final String rightSide(final int index, final Instances data) {
StringBuffer text;
text = new StringBuffer();
if (data.attribute(this.m_attIndex).isNominal()) {
text.append(" = " + data.attribute(this.m_attIndex).value(index));
} else if (index == 0) {
text.append(" <= " + Utils.doubleToString(this.m_splitPoint, 6));
} else {
text.append(" > " + Utils.doubleToString(this.m_splitPoint, 6));
}
return text.toString();
}
/**
* Returns a string containing java source code equivalent to the test made at this node. The
* instance being tested is called "i".
*
* @param index
* index of the nominal value tested
* @param data
* the data containing instance structure info
* @return a value of type 'String'
*/
@Override
public final String sourceExpression(final int index, final Instances data) {
StringBuffer expr = null;
if (index < 0) {
return "i[" + this.m_attIndex + "] == null";
}
if (data.attribute(this.m_attIndex).isNominal()) {
expr = new StringBuffer("i[");
expr.append(this.m_attIndex).append("]");
expr.append(".equals(\"").append(data.attribute(this.m_attIndex).value(index)).append("\")");
} else {
expr = new StringBuffer("((Double) i[");
expr.append(this.m_attIndex).append("])");
if (index == 0) {
expr.append(".doubleValue() <= ").append(this.m_splitPoint);
} else {
expr.append(".doubleValue() > ").append(this.m_splitPoint);
}
}
return expr.toString();
}
/**
* Sets split point to greatest value in given data smaller or equal to old split point. (C4.5 does
* this for some strange reason).
*/
public final void setSplitPoint(final Instances allInstances) {
double newSplitPoint = -Double.MAX_VALUE;
double tempValue;
Instance instance;
if ((allInstances.attribute(this.m_attIndex).isNumeric()) && (this.m_numSubsets > 1)) {
Enumeration<Instance> enu = allInstances.enumerateInstances();
while (enu.hasMoreElements()) {
instance = enu.nextElement();
if (!instance.isMissing(this.m_attIndex)) {
tempValue = instance.value(this.m_attIndex);
if (Utils.gr(tempValue, newSplitPoint) && Utils.smOrEq(tempValue, this.m_splitPoint)) {
newSplitPoint = tempValue;
}
}
}
this.m_splitPoint = newSplitPoint;
}
}
/**
* Returns the minsAndMaxs of the index.th subset.
*/
public final double[][] minsAndMaxs(final Instances data, final double[][] minsAndMaxs, final int index) {
double[][] newMinsAndMaxs = new double[data.numAttributes()][2];
for (int i = 0; i < data.numAttributes(); i++) {
newMinsAndMaxs[i][0] = minsAndMaxs[i][0];
newMinsAndMaxs[i][1] = minsAndMaxs[i][1];
if (i == this.m_attIndex) {
if (data.attribute(this.m_attIndex).isNominal()) {
newMinsAndMaxs[this.m_attIndex][1] = 1;
} else {
newMinsAndMaxs[this.m_attIndex][1 - index] = this.m_splitPoint;
}
}
}
return newMinsAndMaxs;
}
/**
* Sets distribution associated with model.
*/
@Override
public void resetDistribution(final Instances data) throws Exception {
Instances insts = new Instances(data, data.numInstances());
for (int i = 0; i < data.numInstances(); i++) {
if (this.whichSubset(data.instance(i)) > -1) {
insts.add(data.instance(i));
}
}
Distribution newD = new Distribution(insts, this);
newD.addInstWithUnknown(data, this.m_attIndex);
this.m_distribution = newD;
}
/**
* Returns weights if instance is assigned to more than one subset. Returns null if instance is only
* assigned to one subset.
*/
@Override
public final double[] weights(final Instance instance) {
double[] weights;
int i;
if (instance.isMissing(this.m_attIndex)) {
weights = new double[this.m_numSubsets];
for (i = 0; i < this.m_numSubsets; i++) {
weights[i] = this.m_distribution.perBag(i) / this.m_distribution.total();
}
return weights;
} else {
return null;
}
}
/**
* Returns index of subset instance is assigned to. Returns -1 if instance is assigned to more than
* one subset.
*
* @exception Exception
* if something goes wrong
*/
@Override
public final int whichSubset(final Instance instance) throws Exception {
if (instance.isMissing(this.m_attIndex)) {
return -1;
} else {
if (instance.attribute(this.m_attIndex).isNominal()) {
return (int) instance.value(this.m_attIndex);
} else if (Utils.smOrEq(instance.value(this.m_attIndex), this.m_splitPoint)) {
return 0;
} else {
return 1;
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/ClassifierSplitModel.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassifierSplitModel.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.io.Serializable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.Utils;
/**
* Abstract class for classification models that can be used
* recursively to split the data.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class ClassifierSplitModel implements Cloneable, Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 4280730118393457457L;
/** Distribution of class values. */
protected Distribution m_distribution;
/** Number of created subsets. */
protected int m_numSubsets;
/**
* Allows to clone a model (shallow copy).
*/
@Override
public Object clone() {
Object clone = null;
try {
clone = super.clone();
} catch (CloneNotSupportedException e) {
}
return clone;
}
/**
* Builds the classifier split model for the given set of instances.
*
* @exception Exception if something goes wrong
*/
public abstract void buildClassifier(Instances instances) throws Exception;
/**
* Checks if generated model is valid.
*/
public final boolean checkModel() {
if (this.m_numSubsets > 0) {
return true;
} else {
return false;
}
}
/**
* Classifies a given instance.
*
* @exception Exception if something goes wrong
*/
public final double classifyInstance(final Instance instance) throws Exception {
int theSubset;
theSubset = this.whichSubset(instance);
if (theSubset > -1) {
return this.m_distribution.maxClass(theSubset);
} else {
return this.m_distribution.maxClass();
}
}
/**
* Gets class probability for instance.
*
* @exception Exception if something goes wrong
*/
public double classProb(final int classIndex, final Instance instance, final int theSubset) throws Exception {
if (theSubset > -1) {
return this.m_distribution.prob(classIndex, theSubset);
} else {
double[] weights = this.weights(instance);
if (weights == null) {
return this.m_distribution.prob(classIndex);
} else {
double prob = 0;
for (int i = 0; i < weights.length; i++) {
prob += weights[i] * this.m_distribution.prob(classIndex, i);
}
return prob;
}
}
}
/**
* Gets class probability for instance.
*
* @exception Exception if something goes wrong
*/
public double classProbLaplace(final int classIndex, final Instance instance, final int theSubset) throws Exception {
if (theSubset > -1) {
return this.m_distribution.laplaceProb(classIndex, theSubset);
} else {
double[] weights = this.weights(instance);
if (weights == null) {
return this.m_distribution.laplaceProb(classIndex);
} else {
double prob = 0;
for (int i = 0; i < weights.length; i++) {
prob += weights[i] * this.m_distribution.laplaceProb(classIndex, i);
}
return prob;
}
}
}
/**
* Returns coding costs of model. Returns 0 if not overwritten.
*/
public double codingCost() {
return 0;
}
/**
* Returns the distribution of class values induced by the model.
*/
public final Distribution distribution() {
return this.m_distribution;
}
/**
* Prints left side of condition satisfied by instances.
*
* @param data the data.
*/
public abstract String leftSide(Instances data);
/**
* Prints left side of condition satisfied by instances in subset index.
*/
public abstract String rightSide(int index, Instances data);
/**
* Prints label for subset index of instances (eg class).
*
* @exception Exception if something goes wrong
*/
public final String dumpLabel(final int index, final Instances data) throws Exception {
StringBuffer text;
text = new StringBuffer();
text.append(data.classAttribute().value(this.m_distribution.maxClass(index)));
text.append(" (" + Utils.roundDouble(this.m_distribution.perBag(index), 2));
if (Utils.gr(this.m_distribution.numIncorrect(index), 0)) {
text.append("/" + Utils.roundDouble(this.m_distribution.numIncorrect(index), 2));
}
text.append(")");
return text.toString();
}
public final String sourceClass(final int index, final Instances data) throws Exception {
System.err.println("sourceClass");
return (new StringBuffer(this.m_distribution.maxClass(index))).toString();
}
public abstract String sourceExpression(int index, Instances data);
/**
* Prints the split model.
*
* @exception Exception if something goes wrong
*/
public final String dumpModel(final Instances data) throws Exception {
StringBuffer text;
int i;
text = new StringBuffer();
for (i = 0; i < this.m_numSubsets; i++) {
text.append(this.leftSide(data) + this.rightSide(i, data) + ": ");
text.append(this.dumpLabel(i, data) + "\n");
}
return text.toString();
}
/**
* Returns the number of created subsets for the split.
*/
public final int numSubsets() {
return this.m_numSubsets;
}
/**
* Sets distribution associated with model.
*/
public void resetDistribution(final Instances data) throws Exception {
this.m_distribution = new Distribution(data, this);
}
/**
* Sets the distribution associated with model.
*
* @param dist
*/
public void setDistribution(final Distribution dist) {
this.m_distribution = dist;
}
/**
* Splits the given set of instances into subsets.
*
* @exception Exception if something goes wrong
*/
public final Instances[] split(final Instances data) throws Exception {
// Find size and constitution of subsets
int[] subsetSize = new int[this.m_numSubsets];
for (Instance instance : data) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
int subset = this.whichSubset(instance);
if (subset > -1) {
subsetSize[subset]++;
} else {
double[] weights = this.weights(instance);
for (int j = 0; j < this.m_numSubsets; j++) {
if (Utils.gr(weights[j], 0)) {
subsetSize[j]++;
}
}
}
}
// Create subsets
Instances[] instances = new Instances[this.m_numSubsets];
for (int j = 0; j < this.m_numSubsets; j++) {
instances[j] = new Instances(data, subsetSize[j]);
}
for (Instance instance : data) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
int subset = this.whichSubset(instance);
if (subset > -1) {
instances[subset].add(instance);
} else {
double[] weights = this.weights(instance);
for (int j = 0; j < this.m_numSubsets; j++) {
if (Utils.gr(weights[j], 0)) {
instances[j].add(instance);
instances[j].lastInstance().setWeight(weights[j] * instance.weight());
}
}
}
}
return instances;
}
/**
* Returns weights if instance is assigned to more than one subset.
* Returns null if instance is only assigned to one subset.
*/
public abstract double[] weights(Instance instance);
/**
* Returns index of subset instance is assigned to.
* Returns -1 if instance is assigned to more than one subset.
*
* @exception Exception if something goes wrong
*/
public abstract int whichSubset(Instance instance) throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/ClassifierTree.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassifierTree.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.io.Serializable;
import java.util.LinkedList;
import java.util.Queue;
import weka.core.Capabilities;
import weka.core.CapabilitiesHandler;
import weka.core.Drawable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for handling a tree structure used for classification.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class ClassifierTree implements Drawable, Serializable, RevisionHandler, CapabilitiesHandler {
/** for serialization */
static final long serialVersionUID = -8722249377542734193L;
/** The model selection method. */
protected ModelSelection m_toSelectModel;
/** Local model at node. */
protected ClassifierSplitModel m_localModel;
/** References to sons. */
protected ClassifierTree[] m_sons;
/** True if node is leaf. */
protected boolean m_isLeaf;
/** True if node is empty. */
protected boolean m_isEmpty;
/** The training instances. */
protected Instances m_train;
/** The pruning instances. */
protected Distribution m_test;
/** The id for the node. */
protected int m_id;
/**
* For getting a unique ID when outputting the tree (hashcode isn't guaranteed unique)
*/
private static long PRINTED_NODES = 0;
public ClassifierSplitModel getLocalModel() {
return this.m_localModel;
}
public ClassifierTree[] getSons() {
return this.m_sons;
}
public boolean isLeaf() {
return this.m_isLeaf;
}
public Instances getTrainingData() {
return this.m_train;
}
/**
* Gets the next unique node ID.
*
* @return the next unique node ID.
*/
protected static long nextID() {
return PRINTED_NODES++;
}
/**
* Resets the unique node ID counter (e.g. between repeated separate print types)
*/
protected static void resetID() {
PRINTED_NODES = 0;
}
/**
* Returns default capabilities of the classifier tree.
*
* @return the capabilities of this classifier tree
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = new Capabilities(this);
result.enableAll();
return result;
}
/**
* Constructor.
*/
public ClassifierTree(final ModelSelection toSelectLocModel) {
this.m_toSelectModel = toSelectLocModel;
}
/**
* Method for building a classifier tree.
*
* @param data
* the data to build the tree from
* @throws Exception
* if something goes wrong
*/
public void buildClassifier(Instances data) throws Exception {
// remove instances with missing class
data = new Instances(data);
data.deleteWithMissingClass();
this.buildTree(data, false);
}
/**
* Builds the tree structure.
*
* @param data
* the data for which the tree structure is to be generated.
* @param keepData
* is training data to be kept?
* @throws Exception
* if something goes wrong
*/
public void buildTree(Instances data, final boolean keepData) throws Exception {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
Instances[] localInstances;
if (keepData) {
this.m_train = data;
}
this.m_test = null;
this.m_isLeaf = false;
this.m_isEmpty = false;
this.m_sons = null;
this.m_localModel = this.m_toSelectModel.selectModel(data);
if (this.m_localModel.numSubsets() > 1) {
localInstances = this.m_localModel.split(data);
data = null;
this.m_sons = new ClassifierTree[this.m_localModel.numSubsets()];
for (int i = 0; i < this.m_sons.length; i++) {
this.m_sons[i] = this.getNewTree(localInstances[i]);
localInstances[i] = null;
}
} else {
this.m_isLeaf = true;
if (Utils.eq(data.sumOfWeights(), 0)) {
this.m_isEmpty = true;
}
data = null;
}
}
/**
* Builds the tree structure with hold out set
*
* @param train
* the data for which the tree structure is to be generated.
* @param test
* the test data for potential pruning
* @param keepData
* is training Data to be kept?
* @throws Exception
* if something goes wrong
*/
public void buildTree(Instances train, Instances test, final boolean keepData) throws Exception {
Instances[] localTrain, localTest;
int i;
if (keepData) {
this.m_train = train;
}
this.m_isLeaf = false;
this.m_isEmpty = false;
this.m_sons = null;
this.m_localModel = this.m_toSelectModel.selectModel(train, test);
this.m_test = new Distribution(test, this.m_localModel);
if (this.m_localModel.numSubsets() > 1) {
localTrain = this.m_localModel.split(train);
localTest = this.m_localModel.split(test);
train = null;
test = null;
this.m_sons = new ClassifierTree[this.m_localModel.numSubsets()];
for (i = 0; i < this.m_sons.length; i++) {
this.m_sons[i] = this.getNewTree(localTrain[i], localTest[i]);
localTrain[i] = null;
localTest[i] = null;
}
} else {
this.m_isLeaf = true;
if (Utils.eq(train.sumOfWeights(), 0)) {
this.m_isEmpty = true;
}
train = null;
test = null;
}
}
/**
* Classifies an instance.
*
* @param instance
* the instance to classify
* @return the classification
* @throws Exception
* if something goes wrong
*/
public double classifyInstance(final Instance instance) throws Exception {
double maxProb = -1;
double currentProb;
int maxIndex = 0;
int j;
for (j = 0; j < instance.numClasses(); j++) {
currentProb = this.getProbs(j, instance, 1);
if (Utils.gr(currentProb, maxProb)) {
maxIndex = j;
maxProb = currentProb;
}
}
return maxIndex;
}
/**
* Cleanup in order to save memory.
*
* @param justHeaderInfo
*/
public final void cleanup(final Instances justHeaderInfo) {
this.m_train = justHeaderInfo;
this.m_test = null;
if (!this.m_isLeaf) {
for (ClassifierTree m_son : this.m_sons) {
m_son.cleanup(justHeaderInfo);
}
}
}
/**
* Returns class probabilities for a weighted instance.
*
* @param instance
* the instance to get the distribution for
* @param useLaplace
* whether to use laplace or not
* @return the distribution
* @throws Exception
* if something goes wrong
*/
public final double[] distributionForInstance(final Instance instance, final boolean useLaplace) throws Exception {
double[] doubles = new double[instance.numClasses()];
for (int i = 0; i < doubles.length; i++) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
if (!useLaplace) {
doubles[i] = this.getProbs(i, instance, 1);
} else {
doubles[i] = this.getProbsLaplace(i, instance, 1);
}
}
return doubles;
}
/**
* Assigns a uniqe id to every node in the tree.
*
* @param lastID
* the last ID that was assign
* @return the new current ID
*/
public int assignIDs(final int lastID) {
int currLastID = lastID + 1;
this.m_id = currLastID;
if (this.m_sons != null) {
for (ClassifierTree m_son : this.m_sons) {
currLastID = m_son.assignIDs(currLastID);
}
}
return currLastID;
}
/**
* Returns the type of graph this classifier represents.
*
* @return Drawable.TREE
*/
@Override
public int graphType() {
return Drawable.TREE;
}
/**
* Returns graph describing the tree.
*
* @throws Exception
* if something goes wrong
* @return the tree as graph
*/
@Override
public String graph() throws Exception {
StringBuffer text = new StringBuffer();
this.assignIDs(-1);
text.append("digraph J48Tree {\n");
if (this.m_isLeaf) {
text.append("N" + this.m_id + " [label=\"" + Utils.backQuoteChars(this.m_localModel.dumpLabel(0, this.m_train)) + "\" " + "shape=box style=filled ");
if (this.m_train != null && this.m_train.numInstances() > 0) {
text.append("data =\n" + this.m_train + "\n");
text.append(",\n");
}
text.append("]\n");
} else {
text.append("N" + this.m_id + " [label=\"" + Utils.backQuoteChars(this.m_localModel.leftSide(this.m_train)) + "\" ");
if (this.m_train != null && this.m_train.numInstances() > 0) {
text.append("data =\n" + this.m_train + "\n");
text.append(",\n");
}
text.append("]\n");
this.graphTree(text);
}
return text.toString() + "}\n";
}
/**
* Returns tree in prefix order.
*
* @throws Exception
* if something goes wrong
* @return the prefix order
*/
public String prefix() throws Exception {
StringBuffer text;
text = new StringBuffer();
if (this.m_isLeaf) {
text.append("[" + this.m_localModel.dumpLabel(0, this.m_train) + "]");
} else {
this.prefixTree(text);
}
return text.toString();
}
/**
* Returns source code for the tree as an if-then statement. The class is assigned to variable "p",
* and assumes the tested instance is named "i". The results are returned as two stringbuffers: a
* section of code for assignment of the class, and a section of code containing support code (eg:
* other support methods).
*
* @param className
* the classname that this static classifier has
* @return an array containing two stringbuffers, the first string containing assignment code, and
* the second containing source for support code.
* @throws Exception
* if something goes wrong
*/
public StringBuffer[] toSource(final String className) throws Exception {
StringBuffer[] result = new StringBuffer[2];
if (this.m_isLeaf) {
result[0] = new StringBuffer(" p = " + this.m_localModel.distribution().maxClass(0) + ";\n");
result[1] = new StringBuffer("");
} else {
StringBuffer text = new StringBuffer();
StringBuffer atEnd = new StringBuffer();
long printID = ClassifierTree.nextID();
text.append(" static double N").append(Integer.toHexString(this.m_localModel.hashCode()) + printID).append("(Object []i) {\n").append(" double p = Double.NaN;\n");
text.append(" if (").append(this.m_localModel.sourceExpression(-1, this.m_train)).append(") {\n");
text.append(" p = ").append(this.m_localModel.distribution().maxClass(0)).append(";\n");
text.append(" } ");
for (int i = 0; i < this.m_sons.length; i++) {
text.append("else if (" + this.m_localModel.sourceExpression(i, this.m_train) + ") {\n");
if (this.m_sons[i].m_isLeaf) {
text.append(" p = " + this.m_localModel.distribution().maxClass(i) + ";\n");
} else {
StringBuffer[] sub = this.m_sons[i].toSource(className);
text.append(sub[0]);
atEnd.append(sub[1]);
}
text.append(" } ");
if (i == this.m_sons.length - 1) {
text.append('\n');
}
}
text.append(" return p;\n }\n");
result[0] = new StringBuffer(" p = " + className + ".N");
result[0].append(Integer.toHexString(this.m_localModel.hashCode()) + printID).append("(i);\n");
result[1] = text.append(atEnd);
}
return result;
}
/**
* Returns number of leaves in tree structure.
*
* @return the number of leaves
*/
public int numLeaves() {
int num = 0;
int i;
if (this.m_isLeaf) {
return 1;
} else {
for (i = 0; i < this.m_sons.length; i++) {
num = num + this.m_sons[i].numLeaves();
}
}
return num;
}
/**
* Returns number of nodes in tree structure.
*
* @return the number of nodes
*/
public int numNodes() {
int no = 1;
int i;
if (!this.m_isLeaf) {
for (i = 0; i < this.m_sons.length; i++) {
no = no + this.m_sons[i].numNodes();
}
}
return no;
}
/**
* Prints tree structure.
*
* @return the tree structure
*/
@Override
public String toString() {
try {
StringBuffer text = new StringBuffer();
if (this.m_isLeaf) {
text.append(": ");
text.append(this.m_localModel.dumpLabel(0, this.m_train));
} else {
this.dumpTree(0, text);
}
text.append("\n\nNumber of Leaves : \t" + this.numLeaves() + "\n");
text.append("\nSize of the tree : \t" + this.numNodes() + "\n");
return text.toString();
} catch (Exception e) {
return "Can't print classification tree.";
}
}
/**
* Returns a newly created tree.
*
* @param data
* the training data
* @return the generated tree
* @throws Exception
* if something goes wrong
*/
protected ClassifierTree getNewTree(final Instances data) throws Exception {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
ClassifierTree newTree = new ClassifierTree(this.m_toSelectModel);
newTree.buildTree(data, false);
return newTree;
}
/**
* Returns a newly created tree.
*
* @param train
* the training data
* @param test
* the pruning data.
* @return the generated tree
* @throws Exception
* if something goes wrong
*/
protected ClassifierTree getNewTree(final Instances train, final Instances test) throws Exception {
ClassifierTree newTree = new ClassifierTree(this.m_toSelectModel);
newTree.buildTree(train, test, false);
return newTree;
}
/**
* Help method for printing tree structure.
*
* @param depth
* the current depth
* @param text
* for outputting the structure
* @throws Exception
* if something goes wrong
*/
private void dumpTree(final int depth, final StringBuffer text) throws Exception {
int i, j;
for (i = 0; i < this.m_sons.length; i++) {
text.append("\n");
;
for (j = 0; j < depth; j++) {
text.append("| ");
}
text.append(this.m_localModel.leftSide(this.m_train));
text.append(this.m_localModel.rightSide(i, this.m_train));
if (this.m_sons[i].m_isLeaf) {
text.append(": ");
text.append(this.m_localModel.dumpLabel(i, this.m_train));
} else {
this.m_sons[i].dumpTree(depth + 1, text);
}
}
}
/**
* Help method for printing tree structure as a graph.
*
* @param text
* for outputting the tree
* @throws Exception
* if something goes wrong
*/
private void graphTree(final StringBuffer text) throws Exception {
for (int i = 0; i < this.m_sons.length; i++) {
text.append("N" + this.m_id + "->" + "N" + this.m_sons[i].m_id + " [label=\"" + Utils.backQuoteChars(this.m_localModel.rightSide(i, this.m_train).trim()) + "\"]\n");
if (this.m_sons[i].m_isLeaf) {
text.append("N" + this.m_sons[i].m_id + " [label=\"" + Utils.backQuoteChars(this.m_localModel.dumpLabel(i, this.m_train)) + "\" " + "shape=box style=filled ");
if (this.m_train != null && this.m_train.numInstances() > 0) {
text.append("data =\n" + this.m_sons[i].m_train + "\n");
text.append(",\n");
}
text.append("]\n");
} else {
text.append("N" + this.m_sons[i].m_id + " [label=\"" + Utils.backQuoteChars(this.m_sons[i].m_localModel.leftSide(this.m_train)) + "\" ");
if (this.m_train != null && this.m_train.numInstances() > 0) {
text.append("data =\n" + this.m_sons[i].m_train + "\n");
text.append(",\n");
}
text.append("]\n");
this.m_sons[i].graphTree(text);
}
}
}
/**
* Prints the tree in prefix form
*
* @param text
* the buffer to output the prefix form to
* @throws Exception
* if something goes wrong
*/
private void prefixTree(final StringBuffer text) throws Exception {
text.append("[");
text.append(this.m_localModel.leftSide(this.m_train) + ":");
for (int i = 0; i < this.m_sons.length; i++) {
if (i > 0) {
text.append(",\n");
}
text.append(this.m_localModel.rightSide(i, this.m_train));
}
for (int i = 0; i < this.m_sons.length; i++) {
if (this.m_sons[i].m_isLeaf) {
text.append("[");
text.append(this.m_localModel.dumpLabel(i, this.m_train));
text.append("]");
} else {
this.m_sons[i].prefixTree(text);
}
}
text.append("]");
}
/**
* Help method for computing class probabilities of a given instance.
*
* @param classIndex
* the class index
* @param instance
* the instance to compute the probabilities for
* @param weight
* the weight to use
* @return the laplace probs
* @throws Exception
* if something goes wrong
*/
private double getProbsLaplace(final int classIndex, final Instance instance, final double weight) throws Exception {
double prob = 0;
if (this.m_isLeaf) {
return weight * this.localModel().classProbLaplace(classIndex, instance, -1);
} else {
int treeIndex = this.localModel().whichSubset(instance);
if (treeIndex == -1) {
double[] weights = this.localModel().weights(instance);
for (int i = 0; i < this.m_sons.length; i++) {
if (!this.son(i).m_isEmpty) {
prob += this.son(i).getProbsLaplace(classIndex, instance, weights[i] * weight);
}
}
return prob;
} else {
if (this.son(treeIndex).m_isEmpty) {
return weight * this.localModel().classProbLaplace(classIndex, instance, treeIndex);
} else {
return this.son(treeIndex).getProbsLaplace(classIndex, instance, weight);
}
}
}
}
/**
* Help method for computing class probabilities of a given instance.
*
* @param classIndex
* the class index
* @param instance
* the instance to compute the probabilities for
* @param weight
* the weight to use
* @return the probs
* @throws Exception
* if something goes wrong
*/
private double getProbs(final int classIndex, final Instance instance, final double weight) throws Exception {
double prob = 0;
if (this.m_isLeaf) {
return weight * this.localModel().classProb(classIndex, instance, -1);
} else {
int treeIndex = this.localModel().whichSubset(instance);
if (treeIndex == -1) {
double[] weights = this.localModel().weights(instance);
for (int i = 0; i < this.m_sons.length; i++) {
if (!this.son(i).m_isEmpty) {
prob += this.son(i).getProbs(classIndex, instance, weights[i] * weight);
}
}
return prob;
} else {
if (this.son(treeIndex).m_isEmpty) {
return weight * this.localModel().classProb(classIndex, instance, treeIndex);
} else {
return this.son(treeIndex).getProbs(classIndex, instance, weight);
}
}
}
}
/**
* Method just exists to make program easier to read.
*/
private ClassifierSplitModel localModel() {
return this.m_localModel;
}
/**
* Method just exists to make program easier to read.
*/
private ClassifierTree son(final int index) {
return this.m_sons[index];
}
/**
* Computes a list that indicates node membership
*/
public double[] getMembershipValues(final Instance instance) throws Exception {
// Set up array for membership values
double[] a = new double[this.numNodes()];
// Initialize queues
Queue<Double> queueOfWeights = new LinkedList<>();
Queue<ClassifierTree> queueOfNodes = new LinkedList<>();
queueOfWeights.add(instance.weight());
queueOfNodes.add(this);
int index = 0;
// While the queue is not empty
while (!queueOfNodes.isEmpty()) {
a[index++] = queueOfWeights.poll();
ClassifierTree node = queueOfNodes.poll();
// Is node a leaf?
if (node.m_isLeaf) {
continue;
}
// Which subset?
int treeIndex = node.localModel().whichSubset(instance);
// Space for weight distribution
double[] weights = new double[node.m_sons.length];
// Check for missing value
if (treeIndex == -1) {
weights = node.localModel().weights(instance);
} else {
weights[treeIndex] = 1.0;
}
for (int i = 0; i < node.m_sons.length; i++) {
queueOfNodes.add(node.son(i));
queueOfWeights.add(a[index - 1] * weights[i]);
}
}
return a;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/Distribution.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Distribution.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.io.Serializable;
import java.util.Enumeration;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for handling a distribution of class values.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class Distribution implements Cloneable, Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 8526859638230806576L;
/** Weight of instances per class per bag. */
protected final double m_perClassPerBag[][];
/** Weight of instances per bag. */
protected final double m_perBag[];
/** Weight of instances per class. */
protected final double m_perClass[];
/** Total weight of instances. */
protected double totaL;
/**
* Creates and initializes a new distribution.
*/
public Distribution(int numBags, int numClasses) {
int i;
m_perClassPerBag = new double[numBags][0];
m_perBag = new double[numBags];
m_perClass = new double[numClasses];
for (i = 0; i < numBags; i++) {
m_perClassPerBag[i] = new double[numClasses];
}
totaL = 0;
}
/**
* Creates and initializes a new distribution using the given array. WARNING:
* it just copies a reference to this array.
*/
public Distribution(double[][] table) {
int i, j;
m_perClassPerBag = table;
m_perBag = new double[table.length];
m_perClass = new double[table[0].length];
for (i = 0; i < table.length; i++) {
for (j = 0; j < table[i].length; j++) {
m_perBag[i] += table[i][j];
m_perClass[j] += table[i][j];
totaL += table[i][j];
}
}
}
/**
* Creates a distribution with only one bag according to instances in source.
*
* @exception Exception if something goes wrong
*/
public Distribution(Instances source) throws Exception {
m_perClassPerBag = new double[1][0];
m_perBag = new double[1];
totaL = 0;
m_perClass = new double[source.numClasses()];
m_perClassPerBag[0] = new double[source.numClasses()];
Enumeration<Instance> enu = source.enumerateInstances();
while (enu.hasMoreElements()) {
add(0, enu.nextElement());
}
}
/**
* Creates a distribution according to given instances and split model.
*
* @exception Exception if something goes wrong
*/
public Distribution(Instances source, ClassifierSplitModel modelToUse) throws Exception {
int index;
Instance instance;
double[] weights;
m_perClassPerBag = new double[modelToUse.numSubsets()][0];
m_perBag = new double[modelToUse.numSubsets()];
totaL = 0;
m_perClass = new double[source.numClasses()];
for (int i = 0; i < modelToUse.numSubsets(); i++) {
m_perClassPerBag[i] = new double[source.numClasses()];
}
Enumeration<Instance> enu = source.enumerateInstances();
while (enu.hasMoreElements()) {
instance = enu.nextElement();
index = modelToUse.whichSubset(instance);
if (index != -1) {
add(index, instance);
} else {
weights = modelToUse.weights(instance);
addWeights(instance, weights);
}
}
}
/**
* Creates distribution with only one bag by merging all bags of given
* distribution.
*/
public Distribution(Distribution toMerge) {
totaL = toMerge.totaL;
m_perClass = new double[toMerge.numClasses()];
System
.arraycopy(toMerge.m_perClass, 0, m_perClass, 0, toMerge.numClasses());
m_perClassPerBag = new double[1][0];
m_perClassPerBag[0] = new double[toMerge.numClasses()];
System.arraycopy(toMerge.m_perClass, 0, m_perClassPerBag[0], 0,
toMerge.numClasses());
m_perBag = new double[1];
m_perBag[0] = totaL;
}
/**
* Creates distribution with two bags by merging all bags apart of the
* indicated one.
*/
public Distribution(Distribution toMerge, int index) {
int i;
totaL = toMerge.totaL;
m_perClass = new double[toMerge.numClasses()];
System
.arraycopy(toMerge.m_perClass, 0, m_perClass, 0, toMerge.numClasses());
m_perClassPerBag = new double[2][0];
m_perClassPerBag[0] = new double[toMerge.numClasses()];
System.arraycopy(toMerge.m_perClassPerBag[index], 0, m_perClassPerBag[0],
0, toMerge.numClasses());
m_perClassPerBag[1] = new double[toMerge.numClasses()];
for (i = 0; i < toMerge.numClasses(); i++) {
m_perClassPerBag[1][i] = toMerge.m_perClass[i] - m_perClassPerBag[0][i];
}
m_perBag = new double[2];
m_perBag[0] = toMerge.m_perBag[index];
m_perBag[1] = totaL - m_perBag[0];
}
/**
* Returns number of non-empty bags of distribution.
*/
public final int actualNumBags() {
int returnValue = 0;
int i;
for (i = 0; i < m_perBag.length; i++) {
if (Utils.gr(m_perBag[i], 0)) {
returnValue++;
}
}
return returnValue;
}
/**
* Returns number of classes actually occuring in distribution.
*/
public final int actualNumClasses() {
int returnValue = 0;
int i;
for (i = 0; i < m_perClass.length; i++) {
if (Utils.gr(m_perClass[i], 0)) {
returnValue++;
}
}
return returnValue;
}
/**
* Returns number of classes actually occuring in given bag.
*/
public final int actualNumClasses(int bagIndex) {
int returnValue = 0;
int i;
for (i = 0; i < m_perClass.length; i++) {
if (Utils.gr(m_perClassPerBag[bagIndex][i], 0)) {
returnValue++;
}
}
return returnValue;
}
/**
* Adds given instance to given bag.
*
* @exception Exception if something goes wrong
*/
public final void add(int bagIndex, Instance instance) throws Exception {
int classIndex;
double weight;
classIndex = (int) instance.classValue();
weight = instance.weight();
m_perClassPerBag[bagIndex][classIndex] = m_perClassPerBag[bagIndex][classIndex]
+ weight;
m_perBag[bagIndex] = m_perBag[bagIndex] + weight;
m_perClass[classIndex] = m_perClass[classIndex] + weight;
totaL = totaL + weight;
}
/**
* Subtracts given instance from given bag.
*
* @exception Exception if something goes wrong
*/
public final void sub(int bagIndex, Instance instance) throws Exception {
int classIndex;
double weight;
classIndex = (int) instance.classValue();
weight = instance.weight();
m_perClassPerBag[bagIndex][classIndex] = m_perClassPerBag[bagIndex][classIndex]
- weight;
m_perBag[bagIndex] = m_perBag[bagIndex] - weight;
m_perClass[classIndex] = m_perClass[classIndex] - weight;
totaL = totaL - weight;
}
/**
* Adds counts to given bag.
*/
public final void add(int bagIndex, double[] counts) {
double sum = Utils.sum(counts);
for (int i = 0; i < counts.length; i++) {
m_perClassPerBag[bagIndex][i] += counts[i];
}
m_perBag[bagIndex] = m_perBag[bagIndex] + sum;
for (int i = 0; i < counts.length; i++) {
m_perClass[i] = m_perClass[i] + counts[i];
}
totaL = totaL + sum;
}
/**
* Adds all instances with unknown values for given attribute, weighted
* according to frequency of instances in each bag.
*
* @exception Exception if something goes wrong
*/
public final void addInstWithUnknown(Instances source, int attIndex)
throws Exception {
double[] probs;
double weight, newWeight;
int classIndex;
Instance instance;
int j;
probs = new double[m_perBag.length];
for (j = 0; j < m_perBag.length; j++) {
if (Utils.eq(totaL, 0)) {
probs[j] = 1.0 / probs.length;
} else {
probs[j] = m_perBag[j] / totaL;
}
}
Enumeration<Instance> enu = source.enumerateInstances();
while (enu.hasMoreElements()) {
instance = enu.nextElement();
if (instance.isMissing(attIndex)) {
classIndex = (int) instance.classValue();
weight = instance.weight();
m_perClass[classIndex] = m_perClass[classIndex] + weight;
totaL = totaL + weight;
for (j = 0; j < m_perBag.length; j++) {
newWeight = probs[j] * weight;
m_perClassPerBag[j][classIndex] = m_perClassPerBag[j][classIndex]
+ newWeight;
m_perBag[j] = m_perBag[j] + newWeight;
}
}
}
}
/**
* Adds all instances in given range to given bag.
*
* @exception Exception if something goes wrong
*/
public final void addRange(int bagIndex, Instances source, int startIndex,
int lastPlusOne) throws Exception {
double sumOfWeights = 0;
int classIndex;
Instance instance;
int i;
for (i = startIndex; i < lastPlusOne; i++) {
instance = source.instance(i);
classIndex = (int) instance.classValue();
sumOfWeights = sumOfWeights + instance.weight();
m_perClassPerBag[bagIndex][classIndex] += instance.weight();
m_perClass[classIndex] += instance.weight();
}
m_perBag[bagIndex] += sumOfWeights;
totaL += sumOfWeights;
}
/**
* Adds given instance to all bags weighting it according to given weights.
*
* @exception Exception if something goes wrong
*/
public final void addWeights(Instance instance, double[] weights)
throws Exception {
int classIndex;
int i;
classIndex = (int) instance.classValue();
for (i = 0; i < m_perBag.length; i++) {
double weight = instance.weight() * weights[i];
m_perClassPerBag[i][classIndex] = m_perClassPerBag[i][classIndex]
+ weight;
m_perBag[i] = m_perBag[i] + weight;
m_perClass[classIndex] = m_perClass[classIndex] + weight;
totaL = totaL + weight;
}
}
/**
* Checks if at least two bags contain a minimum number of instances.
*/
public final boolean check(double minNoObj) {
int counter = 0;
int i;
for (i = 0; i < m_perBag.length; i++) {
if (Utils.grOrEq(m_perBag[i], minNoObj)) {
counter++;
}
}
if (counter > 1) {
return true;
} else {
return false;
}
}
/**
* Clones distribution (Deep copy of distribution).
*/
@Override
public final Object clone() {
int i, j;
Distribution newDistribution = new Distribution(m_perBag.length,
m_perClass.length);
for (i = 0; i < m_perBag.length; i++) {
newDistribution.m_perBag[i] = m_perBag[i];
for (j = 0; j < m_perClass.length; j++) {
newDistribution.m_perClassPerBag[i][j] = m_perClassPerBag[i][j];
}
}
for (j = 0; j < m_perClass.length; j++) {
newDistribution.m_perClass[j] = m_perClass[j];
}
newDistribution.totaL = totaL;
return newDistribution;
}
/**
* Deletes given instance from given bag.
*
* @exception Exception if something goes wrong
*/
public final void del(int bagIndex, Instance instance) throws Exception {
int classIndex;
double weight;
classIndex = (int) instance.classValue();
weight = instance.weight();
m_perClassPerBag[bagIndex][classIndex] = m_perClassPerBag[bagIndex][classIndex]
- weight;
m_perBag[bagIndex] = m_perBag[bagIndex] - weight;
m_perClass[classIndex] = m_perClass[classIndex] - weight;
totaL = totaL - weight;
}
/**
* Deletes all instances in given range from given bag.
*
* @exception Exception if something goes wrong
*/
public final void delRange(int bagIndex, Instances source, int startIndex,
int lastPlusOne) throws Exception {
double sumOfWeights = 0;
int classIndex;
Instance instance;
int i;
for (i = startIndex; i < lastPlusOne; i++) {
instance = source.instance(i);
classIndex = (int) instance.classValue();
sumOfWeights = sumOfWeights + instance.weight();
m_perClassPerBag[bagIndex][classIndex] -= instance.weight();
m_perClass[classIndex] -= instance.weight();
}
m_perBag[bagIndex] -= sumOfWeights;
totaL -= sumOfWeights;
}
/**
* Prints distribution.
*/
public final String dumpDistribution() {
StringBuffer text;
int i, j;
text = new StringBuffer();
for (i = 0; i < m_perBag.length; i++) {
text.append("Bag num " + i + "\n");
for (j = 0; j < m_perClass.length; j++) {
text.append("Class num " + j + " " + m_perClassPerBag[i][j] + "\n");
}
}
return text.toString();
}
/**
* Sets all counts to zero.
*/
public final void initialize() {
for (int i = 0; i < m_perClass.length; i++) {
m_perClass[i] = 0;
}
for (int i = 0; i < m_perBag.length; i++) {
m_perBag[i] = 0;
}
for (int i = 0; i < m_perBag.length; i++) {
for (int j = 0; j < m_perClass.length; j++) {
m_perClassPerBag[i][j] = 0;
}
}
totaL = 0;
}
/**
* Returns matrix with distribution of class values.
*/
public final double[][] matrix() {
return m_perClassPerBag;
}
/**
* Returns index of bag containing maximum number of instances.
*/
public final int maxBag() {
double max;
int maxIndex;
int i;
max = 0;
maxIndex = -1;
for (i = 0; i < m_perBag.length; i++) {
if (Utils.grOrEq(m_perBag[i], max)) {
max = m_perBag[i];
maxIndex = i;
}
}
return maxIndex;
}
/**
* Returns class with highest frequency over all bags.
*/
public final int maxClass() {
double maxCount = 0;
int maxIndex = 0;
int i;
for (i = 0; i < m_perClass.length; i++) {
if (Utils.gr(m_perClass[i], maxCount)) {
maxCount = m_perClass[i];
maxIndex = i;
}
}
return maxIndex;
}
/**
* Returns class with highest frequency for given bag.
*/
public final int maxClass(int index) {
double maxCount = 0;
int maxIndex = 0;
int i;
if (Utils.gr(m_perBag[index], 0)) {
for (i = 0; i < m_perClass.length; i++) {
if (Utils.gr(m_perClassPerBag[index][i], maxCount)) {
maxCount = m_perClassPerBag[index][i];
maxIndex = i;
}
}
return maxIndex;
} else {
return maxClass();
}
}
/**
* Returns number of bags.
*/
public final int numBags() {
return m_perBag.length;
}
/**
* Returns number of classes.
*/
public final int numClasses() {
return m_perClass.length;
}
/**
* Returns perClass(maxClass()).
*/
public final double numCorrect() {
return m_perClass[maxClass()];
}
/**
* Returns perClassPerBag(index,maxClass(index)).
*/
public final double numCorrect(int index) {
return m_perClassPerBag[index][maxClass(index)];
}
/**
* Returns total-numCorrect().
*/
public final double numIncorrect() {
return totaL - numCorrect();
}
/**
* Returns perBag(index)-numCorrect(index).
*/
public final double numIncorrect(int index) {
return m_perBag[index] - numCorrect(index);
}
/**
* Returns number of (possibly fractional) instances of given class in given
* bag.
*/
public final double perClassPerBag(int bagIndex, int classIndex) {
return m_perClassPerBag[bagIndex][classIndex];
}
/**
* Returns number of (possibly fractional) instances in given bag.
*/
public final double perBag(int bagIndex) {
return m_perBag[bagIndex];
}
/**
* Returns number of (possibly fractional) instances of given class.
*/
public final double perClass(int classIndex) {
return m_perClass[classIndex];
}
/**
* Returns relative frequency of class over all bags with Laplace correction.
*/
public final double laplaceProb(int classIndex) {
return (m_perClass[classIndex] + 1) / (totaL + m_perClass.length);
}
/**
* Returns relative frequency of class for given bag.
*/
public final double laplaceProb(int classIndex, int intIndex) {
if (Utils.gr(m_perBag[intIndex], 0)) {
return (m_perClassPerBag[intIndex][classIndex] + 1.0)
/ (m_perBag[intIndex] + m_perClass.length);
} else {
return laplaceProb(classIndex);
}
}
/**
* Returns relative frequency of class over all bags.
*/
public final double prob(int classIndex) {
if (!Utils.eq(totaL, 0)) {
return m_perClass[classIndex] / totaL;
} else {
return 0;
}
}
/**
* Returns relative frequency of class for given bag.
*/
public final double prob(int classIndex, int intIndex) {
if (Utils.gr(m_perBag[intIndex], 0)) {
return m_perClassPerBag[intIndex][classIndex] / m_perBag[intIndex];
} else {
return prob(classIndex);
}
}
/**
* Subtracts the given distribution from this one. The results has only one
* bag.
*/
public final Distribution subtract(Distribution toSubstract) {
Distribution newDist = new Distribution(1, m_perClass.length);
newDist.m_perBag[0] = totaL - toSubstract.totaL;
newDist.totaL = newDist.m_perBag[0];
for (int i = 0; i < m_perClass.length; i++) {
newDist.m_perClassPerBag[0][i] = m_perClass[i]
- toSubstract.m_perClass[i];
newDist.m_perClass[i] = newDist.m_perClassPerBag[0][i];
}
return newDist;
}
/**
* Returns total number of (possibly fractional) instances.
*/
public final double total() {
return totaL;
}
/**
* Shifts given instance from one bag to another one.
*
* @exception Exception if something goes wrong
*/
public final void shift(int from, int to, Instance instance) throws Exception {
int classIndex;
double weight;
classIndex = (int) instance.classValue();
weight = instance.weight();
m_perClassPerBag[from][classIndex] -= weight;
m_perClassPerBag[to][classIndex] += weight;
m_perBag[from] -= weight;
m_perBag[to] += weight;
}
/**
* Shifts all instances in given range from one bag to another one.
*
* @exception Exception if something goes wrong
*/
public final void shiftRange(int from, int to, Instances source,
int startIndex, int lastPlusOne) throws Exception {
int classIndex;
double weight;
Instance instance;
int i;
for (i = startIndex; i < lastPlusOne; i++) {
instance = source.instance(i);
classIndex = (int) instance.classValue();
weight = instance.weight();
m_perClassPerBag[from][classIndex] -= weight;
m_perClassPerBag[to][classIndex] += weight;
m_perBag[from] -= weight;
m_perBag[to] += weight;
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/EntropyBasedSplitCrit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* EntropyBasedSplitCrit.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.ContingencyTables;
/**
* "Abstract" class for computing splitting criteria
* based on the entropy of a class distribution.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class EntropyBasedSplitCrit
extends SplitCriterion {
/** for serialization */
private static final long serialVersionUID = -2618691439791653056L;
/**
* Help method for computing entropy.
*/
public final double lnFunc(double num) {
// Constant hard coded for efficiency reasons
if (num < 1e-6)
return 0;
else
return ContingencyTables.lnFunc(num);
}
/**
* Computes entropy of distribution before splitting.
*/
public final double oldEnt(Distribution bags) {
double returnValue = 0;
int j;
for (j=0;j<bags.numClasses();j++)
returnValue = returnValue+lnFunc(bags.perClass(j));
return (lnFunc(bags.total())-returnValue)/ContingencyTables.log2;
}
/**
* Computes entropy of distribution after splitting.
*/
public final double newEnt(Distribution bags) {
double returnValue = 0;
int i,j;
for (i=0;i<bags.numBags();i++){
for (j=0;j<bags.numClasses();j++)
returnValue = returnValue+lnFunc(bags.perClassPerBag(i,j));
returnValue = returnValue-lnFunc(bags.perBag(i));
}
return -(returnValue/ContingencyTables.log2);
}
/**
* Computes entropy after splitting without considering the
* class values.
*/
public final double splitEnt(Distribution bags) {
double returnValue = 0;
int i;
for (i=0;i<bags.numBags();i++)
returnValue = returnValue+lnFunc(bags.perBag(i));
return (lnFunc(bags.total())-returnValue)/ContingencyTables.log2;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/EntropySplitCrit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* EntropySplitCrit.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.ContingencyTables;
/**
* Class for computing the entropy for a given distribution.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class EntropySplitCrit
extends EntropyBasedSplitCrit {
/** for serialization */
private static final long serialVersionUID = 5986252682266803935L;
/**
* Computes entropy for given distribution.
*/
public final double splitCritValue(Distribution bags) {
return newEnt(bags);
}
/**
* Computes entropy of test distribution with respect to training distribution.
*/
public final double splitCritValue(Distribution train, Distribution test) {
double result = 0;
int numClasses = 0;
int i, j;
// Find out relevant number of classes
for (j = 0; j < test.numClasses(); j++)
if (Utils.gr(train.perClass(j), 0) || Utils.gr(test.perClass(j), 0))
numClasses++;
// Compute entropy of test data with respect to training data
for (i = 0; i < test.numBags(); i++)
if (Utils.gr(test.perBag(i),0)) {
for (j = 0; j < test.numClasses(); j++)
if (Utils.gr(test.perClassPerBag(i, j), 0))
result -= test.perClassPerBag(i, j)*
Math.log(train.perClassPerBag(i, j) + 1);
result += test.perBag(i) * Math.log(train.perBag(i) + numClasses);
}
return result / ContingencyTables.log2;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/GainRatioSplitCrit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* GainRatioSplitCrit.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.ContingencyTables;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for computing the gain ratio for a given distribution.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class GainRatioSplitCrit extends EntropyBasedSplitCrit {
/** for serialization */
private static final long serialVersionUID = -433336694718670930L;
/**
* This method is a straightforward implementation of the gain ratio criterion
* for the given distribution.
*/
@Override
public final double splitCritValue(Distribution bags) {
double numerator;
double denumerator;
numerator = oldEnt(bags) - newEnt(bags);
// Splits with no gain are useless.
if (Utils.eq(numerator, 0)) {
return Double.MAX_VALUE;
}
denumerator = splitEnt(bags);
// Test if split is trivial.
if (Utils.eq(denumerator, 0)) {
return Double.MAX_VALUE;
}
// We take the reciprocal value because we want to minimize the
// splitting criterion's value.
return denumerator / numerator;
}
/**
* This method computes the gain ratio in the same way C4.5 does.
*
* @param bags the distribution
* @param totalnoInst the weight of ALL instances
* @param numerator the info gain
*/
public final double splitCritValue(Distribution bags, double totalnoInst,
double numerator) {
double denumerator;
// Compute split info.
denumerator = splitEnt(bags, totalnoInst);
// Test if split is trivial.
if (Utils.eq(denumerator, 0)) {
return 0;
}
denumerator = denumerator / totalnoInst;
return numerator / denumerator;
}
/**
* Help method for computing the split entropy.
*/
private final double splitEnt(Distribution bags, double totalnoInst) {
double returnValue = 0;
double noUnknown;
int i;
noUnknown = totalnoInst - bags.total();
if (Utils.gr(bags.total(), 0)) {
for (i = 0; i < bags.numBags(); i++) {
returnValue = returnValue - lnFunc(bags.perBag(i));
}
returnValue = returnValue - lnFunc(noUnknown);
returnValue = returnValue + lnFunc(totalnoInst);
}
return returnValue / ContingencyTables.log2;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/InfoGainSplitCrit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* InfoGainSplitCrit.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for computing the information gain for a given distribution.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class InfoGainSplitCrit extends EntropyBasedSplitCrit {
/** for serialization */
private static final long serialVersionUID = 4892105020180728499L;
/**
* This method is a straightforward implementation of the information gain
* criterion for the given distribution.
*/
@Override
public final double splitCritValue(Distribution bags) {
double numerator;
numerator = oldEnt(bags) - newEnt(bags);
// Splits with no gain are useless.
if (Utils.eq(numerator, 0)) {
return Double.MAX_VALUE;
}
// We take the reciprocal value because we want to minimize the
// splitting criterion's value.
return bags.total() / numerator;
}
/**
* This method computes the information gain in the same way C4.5 does.
*
* @param bags the distribution
* @param totalNoInst weight of ALL instances (including the ones with missing
* values).
*/
public final double splitCritValue(Distribution bags, double totalNoInst) {
double numerator;
double noUnknown;
double unknownRate;
noUnknown = totalNoInst - bags.total();
unknownRate = noUnknown / totalNoInst;
numerator = (oldEnt(bags) - newEnt(bags));
numerator = (1 - unknownRate) * numerator;
// Splits with no gain are useless.
if (Utils.eq(numerator, 0)) {
return 0;
}
return numerator / bags.total();
}
/**
* This method computes the information gain in the same way C4.5 does.
*
* @param bags the distribution
* @param totalNoInst weight of ALL instances
* @param oldEnt entropy with respect to "no-split"-model.
*/
public final double splitCritValue(Distribution bags, double totalNoInst,
double oldEnt) {
double numerator;
double noUnknown;
double unknownRate;
noUnknown = totalNoInst - bags.total();
unknownRate = noUnknown / totalNoInst;
numerator = (oldEnt - newEnt(bags));
numerator = (1 - unknownRate) * numerator;
// Splits with no gain are useless.
if (Utils.eq(numerator, 0)) {
return 0;
}
return numerator / bags.total();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/ModelSelection.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ModelSelection.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.io.Serializable;
import weka.core.Instances;
import weka.core.RevisionHandler;
/**
* Abstract class for model selection criteria.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class ModelSelection
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -4850147125096133642L;
/**
* Selects a model for the given dataset.
*
* @exception Exception if model can't be selected
*/
public abstract ClassifierSplitModel selectModel(Instances data) throws Exception;
/**
* Selects a model for the given train data using the given test data
*
* @exception Exception if model can't be selected
*/
public ClassifierSplitModel selectModel(Instances train, Instances test)
throws Exception {
throw new Exception("Model selection method not implemented");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/NBTreeClassifierTree.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NBTreeClassifierTree.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for handling a naive bayes tree structure used for classification.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class NBTreeClassifierTree extends ClassifierTree {
/** for serialization */
private static final long serialVersionUID = -4472639447877404786L;
public NBTreeClassifierTree(ModelSelection toSelectLocModel) {
super(toSelectLocModel);
}
/**
* Method for building a naive bayes classifier tree
*
* @exception Exception if something goes wrong
*/
@Override
public void buildClassifier(Instances data) throws Exception {
super.buildClassifier(data);
cleanup(new Instances(data, 0));
assignIDs(-1);
}
/**
* Assigns a uniqe id to every node in the tree.
*
* public int assignIDs(int lastID) {
*
* int currLastID = lastID + 1;
*
* m_id = currLastID; if (m_sons != null) { for (int i = 0; i < m_sons.length;
* i++) { currLastID = m_sons[i].assignIDs(currLastID); } } return currLastID;
* }
*/
/**
* Returns a newly created tree.
*
* @param data the training data
* @exception Exception if something goes wrong
*/
@Override
protected ClassifierTree getNewTree(Instances data) throws Exception {
ClassifierTree newTree = new NBTreeClassifierTree(m_toSelectModel);
newTree.buildTree(data, false);
return newTree;
}
/**
* Returns a newly created tree.
*
* @param train the training data
* @param test the pruning data.
* @exception Exception if something goes wrong
*/
@Override
protected ClassifierTree getNewTree(Instances train, Instances test)
throws Exception {
ClassifierTree newTree = new NBTreeClassifierTree(m_toSelectModel);
newTree.buildTree(train, test, false);
return newTree;
}
/**
* Print the models at the leaves
*
* @return textual description of the leaf models
*/
public String printLeafModels() {
StringBuffer text = new StringBuffer();
if (m_isLeaf) {
text.append("\nLeaf number: " + m_id + " ");
text.append(m_localModel.toString());
text.append("\n");
} else {
for (ClassifierTree m_son : m_sons) {
text.append(((NBTreeClassifierTree) m_son).printLeafModels());
}
}
return text.toString();
}
/**
* Prints tree structure.
*/
@Override
public String toString() {
try {
StringBuffer text = new StringBuffer();
if (m_isLeaf) {
text.append(": NB");
text.append(m_id);
} else {
dumpTreeNB(0, text);
}
text.append("\n" + printLeafModels());
text.append("\n\nNumber of Leaves : \t" + numLeaves() + "\n");
text.append("\nSize of the tree : \t" + numNodes() + "\n");
return text.toString();
} catch (Exception e) {
e.printStackTrace();
return "Can't print nb tree.";
}
}
/**
* Help method for printing tree structure.
*
* @exception Exception if something goes wrong
*/
private void dumpTreeNB(int depth, StringBuffer text) throws Exception {
int i, j;
for (i = 0; i < m_sons.length; i++) {
text.append("\n");
;
for (j = 0; j < depth; j++) {
text.append("| ");
}
text.append(m_localModel.leftSide(m_train));
text.append(m_localModel.rightSide(i, m_train));
if (m_sons[i].m_isLeaf) {
text.append(": NB ");
text.append(m_sons[i].m_id);
} else {
((NBTreeClassifierTree) m_sons[i]).dumpTreeNB(depth + 1, text);
}
}
}
/**
* Returns graph describing the tree.
*
* @exception Exception if something goes wrong
*/
@Override
public String graph() throws Exception {
StringBuffer text = new StringBuffer();
text.append("digraph J48Tree {\n");
if (m_isLeaf) {
text.append("N" + m_id + " [label=\"" + "NB model" + "\" "
+ "shape=box style=filled ");
if (m_train != null && m_train.numInstances() > 0) {
text.append("data =\n" + m_train + "\n");
text.append(",\n");
}
text.append("]\n");
} else {
text.append("N" + m_id + " [label=\""
+ Utils.backQuoteChars(m_localModel.leftSide(m_train)) + "\" ");
if (m_train != null && m_train.numInstances() > 0) {
text.append("data =\n" + m_train + "\n");
text.append(",\n");
}
text.append("]\n");
graphTree(text);
}
return text.toString() + "}\n";
}
/**
* Help method for printing tree structure as a graph.
*
* @exception Exception if something goes wrong
*/
private void graphTree(StringBuffer text) throws Exception {
for (int i = 0; i < m_sons.length; i++) {
text.append("N" + m_id + "->" + "N" + m_sons[i].m_id + " [label=\""
+ Utils.backQuoteChars(m_localModel.rightSide(i, m_train).trim())
+ "\"]\n");
if (m_sons[i].m_isLeaf) {
text.append("N" + m_sons[i].m_id + " [label=\"" + "NB Model" + "\" "
+ "shape=box style=filled ");
if (m_train != null && m_train.numInstances() > 0) {
text.append("data =\n" + m_sons[i].m_train + "\n");
text.append(",\n");
}
text.append("]\n");
} else {
text.append("N" + m_sons[i].m_id + " [label=\""
+ Utils.backQuoteChars(m_sons[i].m_localModel.leftSide(m_train))
+ "\" ");
if (m_train != null && m_train.numInstances() > 0) {
text.append("data =\n" + m_sons[i].m_train + "\n");
text.append(",\n");
}
text.append("]\n");
((NBTreeClassifierTree) m_sons[i]).graphTree(text);
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/NBTreeModelSelection.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NBTreeModelSelection.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Enumeration;
import weka.core.Attribute;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for selecting a NB tree split.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class NBTreeModelSelection extends ModelSelection {
/** for serialization */
private static final long serialVersionUID = 990097748931976704L;
/** Minimum number of objects in interval. */
protected final int m_minNoObj;
/** All the training data */
protected Instances m_allData; //
/**
* Initializes the split selection method with the given parameters.
*
* @param minNoObj minimum number of instances that have to occur in at least
* two subsets induced by split
* @param allData FULL training dataset (necessary for selection of split
* points).
*/
public NBTreeModelSelection(int minNoObj, Instances allData) {
m_minNoObj = minNoObj;
m_allData = allData;
}
/**
* Sets reference to training data to null.
*/
public void cleanup() {
m_allData = null;
}
/**
* Selects NBTree-type split for the given dataset.
*/
@Override
public final ClassifierSplitModel selectModel(Instances data) {
double globalErrors = 0;
double minResult;
NBTreeSplit[] currentModel;
NBTreeSplit bestModel = null;
NBTreeNoSplit noSplitModel = null;
int validModels = 0;
Distribution checkDistribution;
Attribute attribute;
double sumOfWeights;
int i;
try {
// build the global model at this node
noSplitModel = new NBTreeNoSplit();
noSplitModel.buildClassifier(data);
if (data.numInstances() < 5) {
return noSplitModel;
}
// evaluate it
globalErrors = noSplitModel.getErrors();
if (globalErrors == 0) {
return noSplitModel;
}
// Check if all Instances belong to one class or if not
// enough Instances to split.
checkDistribution = new Distribution(data);
if (Utils.sm(checkDistribution.total(), m_minNoObj)
|| Utils.eq(checkDistribution.total(),
checkDistribution.perClass(checkDistribution.maxClass()))) {
return noSplitModel;
}
// Check if all attributes are nominal and have a
// lot of values.
if (m_allData != null) {
Enumeration<Attribute> enu = data.enumerateAttributes();
while (enu.hasMoreElements()) {
attribute = enu.nextElement();
if ((attribute.isNumeric())
|| (Utils.sm(attribute.numValues(),
(0.3 * m_allData.numInstances())))) {
break;
}
}
}
currentModel = new NBTreeSplit[data.numAttributes()];
sumOfWeights = data.sumOfWeights();
// For each attribute.
for (i = 0; i < data.numAttributes(); i++) {
// Apart from class attribute.
if (i != (data).classIndex()) {
// Get models for current attribute.
currentModel[i] = new NBTreeSplit(i, m_minNoObj, sumOfWeights);
currentModel[i].setGlobalModel(noSplitModel);
currentModel[i].buildClassifier(data);
// Check if useful split for current attribute
// exists and check for enumerated attributes with
// a lot of values.
if (currentModel[i].checkModel()) {
validModels++;
}
} else {
currentModel[i] = null;
}
}
// Check if any useful split was found.
if (validModels == 0) {
return noSplitModel;
}
// Find "best" attribute to split on.
minResult = globalErrors;
for (i = 0; i < data.numAttributes(); i++) {
if ((i != (data).classIndex()) && (currentModel[i].checkModel())) {
/*
* System.err.println("Errors for "+data.attribute(i).name()+" "+
* currentModel[i].getErrors());
*/
if (currentModel[i].getErrors() < minResult) {
bestModel = currentModel[i];
minResult = currentModel[i].getErrors();
}
}
}
//
// Check if useful split was found.
if (((globalErrors - minResult) / globalErrors) < 0.05) {
return noSplitModel;
}
/*
* if (bestModel == null) {
* System.err.println("This shouldn't happen! glob : "+globalErrors+
* " minRes : "+minResult); }
*/
// Set the global model for the best split
// bestModel.setGlobalModel(noSplitModel);
return bestModel;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* Selects NBTree-type split for the given dataset.
*/
@Override
public final ClassifierSplitModel selectModel(Instances train, Instances test) {
return selectModel(train);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/NBTreeNoSplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NBTreeNoSplit.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Random;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.bayes.NaiveBayesUpdateable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.Discretize;
/**
* Class implementing a "no-split"-split (leaf node) for naive bayes
* trees.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class NBTreeNoSplit
extends ClassifierSplitModel {
/** for serialization */
private static final long serialVersionUID = 7824804381545259618L;
/** the naive bayes classifier */
protected NaiveBayesUpdateable m_nb;
/** the discretizer used */
protected Discretize m_disc;
/** errors on the training data at this node */
protected double m_errors;
public NBTreeNoSplit() {
m_numSubsets = 1;
}
/**
* Build the no-split node
*
* @param instances an <code>Instances</code> value
* @exception Exception if an error occurs
*/
public final void buildClassifier(Instances instances) throws Exception {
m_nb = new NaiveBayesUpdateable();
m_disc = new Discretize();
m_disc.setInputFormat(instances);
Instances temp = Filter.useFilter(instances, m_disc);
m_nb.buildClassifier(temp);
if (temp.numInstances() >= 5) {
m_errors = crossValidate(m_nb, temp, new Random(1));
}
m_numSubsets = 1;
}
/**
* Return the errors made by the naive bayes model at this node
*
* @return the number of errors made
*/
public double getErrors() {
return m_errors;
}
/**
* Return the discretizer used at this node
*
* @return a <code>Discretize</code> value
*/
public Discretize getDiscretizer() {
return m_disc;
}
/**
* Get the naive bayes model at this node
*
* @return a <code>NaiveBayesUpdateable</code> value
*/
public NaiveBayesUpdateable getNaiveBayesModel() {
return m_nb;
}
/**
* Always returns 0 because only there is only one subset.
*/
public final int whichSubset(Instance instance){
return 0;
}
/**
* Always returns null because there is only one subset.
*/
public final double [] weights(Instance instance){
return null;
}
/**
* Does nothing because no condition has to be satisfied.
*/
public final String leftSide(Instances instances){
return "";
}
/**
* Does nothing because no condition has to be satisfied.
*/
public final String rightSide(int index, Instances instances){
return "";
}
/**
* Returns a string containing java source code equivalent to the test
* made at this node. The instance being tested is called "i".
*
* @param index index of the nominal value tested
* @param data the data containing instance structure info
* @return a value of type 'String'
*/
public final String sourceExpression(int index, Instances data) {
return "true"; // or should this be false??
}
/**
* Return the probability for a class value
*
* @param classIndex the index of the class value
* @param instance the instance to generate a probability for
* @param theSubset the subset to consider
* @return a probability
* @exception Exception if an error occurs
*/
public double classProb(int classIndex, Instance instance, int theSubset)
throws Exception {
m_disc.input(instance);
Instance temp = m_disc.output();
return m_nb.distributionForInstance(temp)[classIndex];
}
/**
* Return a textual description of the node
*
* @return a <code>String</code> value
*/
public String toString() {
return m_nb.toString();
}
/**
* Utility method for fast 5-fold cross validation of a naive bayes
* model
*
* @param fullModel a <code>NaiveBayesUpdateable</code> value
* @param trainingSet an <code>Instances</code> value
* @param r a <code>Random</code> value
* @return a <code>double</code> value
* @exception Exception if an error occurs
*/
public static double crossValidate(NaiveBayesUpdateable fullModel,
Instances trainingSet,
Random r) throws Exception {
// make some copies for fast evaluation of 5-fold xval
Classifier [] copies = AbstractClassifier.makeCopies(fullModel, 5);
Evaluation eval = new Evaluation(trainingSet);
// make some splits
for (int j = 0; j < 5; j++) {
Instances test = trainingSet.testCV(5, j);
// unlearn these test instances
for (int k = 0; k < test.numInstances(); k++) {
test.instance(k).setWeight(-test.instance(k).weight());
((NaiveBayesUpdateable)copies[j]).updateClassifier(test.instance(k));
// reset the weight back to its original value
test.instance(k).setWeight(-test.instance(k).weight());
}
eval.evaluateModel(copies[j], test);
}
return eval.incorrect();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/NBTreeSplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NBTreeSplit.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Random;
import weka.classifiers.bayes.NaiveBayesUpdateable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.Discretize;
/**
* Class implementing a NBTree split on an attribute.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class NBTreeSplit extends ClassifierSplitModel {
/** for serialization */
private static final long serialVersionUID = 8922627123884975070L;
/** Desired number of branches. */
protected int m_complexityIndex;
/** Attribute to split on. */
protected final int m_attIndex;
/** The sum of the weights of the instances. */
protected final double m_sumOfWeights;
/**
* The weight of the instances incorrectly classified by the naive bayes
* models arising from this split
*/
protected double m_errors;
protected C45Split m_c45S;
/** The global naive bayes model for this node */
NBTreeNoSplit m_globalNB;
/**
* Initializes the split model.
*/
public NBTreeSplit(final int attIndex, final int minNoObj, final double sumOfWeights) {
// Get index of attribute to split on.
this.m_attIndex = attIndex;
// Set the sum of the weights
this.m_sumOfWeights = sumOfWeights;
}
/**
* Creates a NBTree-type split on the given data. Assumes that none of the
* class values is missing.
*
* @exception Exception if something goes wrong
*/
@Override
public void buildClassifier(final Instances trainInstances) throws Exception {
// Initialize the remaining instance variables.
this.m_numSubsets = 0;
this.m_errors = 0;
if (this.m_globalNB != null) {
this.m_errors = this.m_globalNB.getErrors();
}
// Different treatment for enumerated and numeric
// attributes.
if (trainInstances.attribute(this.m_attIndex).isNominal()) {
this.m_complexityIndex = trainInstances.attribute(this.m_attIndex).numValues();
this.handleEnumeratedAttribute(trainInstances);
} else {
this.m_complexityIndex = 2;
trainInstances.sort(trainInstances.attribute(this.m_attIndex));
this.handleNumericAttribute(trainInstances);
}
}
/**
* Returns index of attribute for which split was generated.
*/
public final int attIndex() {
return this.m_attIndex;
}
/**
* Creates split on enumerated attribute.
*
* @exception Exception if something goes wrong
*/
private void handleEnumeratedAttribute(final Instances trainInstances) throws Exception {
this.m_c45S = new C45Split(this.m_attIndex, 2, this.m_sumOfWeights, true);
this.m_c45S.buildClassifier(trainInstances);
if (this.m_c45S.numSubsets() == 0) {
return;
}
this.m_errors = 0;
Instance instance;
Instances[] trainingSets = new Instances[this.m_complexityIndex];
for (int i = 0; i < this.m_complexityIndex; i++) {
trainingSets[i] = new Instances(trainInstances, 0);
}
/*
* m_distribution = new Distribution(m_complexityIndex,
* trainInstances.numClasses());
*/
int subset;
for (int i = 0; i < trainInstances.numInstances(); i++) {
instance = trainInstances.instance(i);
subset = this.m_c45S.whichSubset(instance);
if (subset > -1) {
trainingSets[subset].add((Instance) instance.copy());
} else {
double[] weights = this.m_c45S.weights(instance);
for (int j = 0; j < this.m_complexityIndex; j++) {
try {
Instance temp = (Instance) instance.copy();
if (weights.length == this.m_complexityIndex) {
temp.setWeight(temp.weight() * weights[j]);
} else {
temp.setWeight(temp.weight() / this.m_complexityIndex);
}
trainingSets[j].add(temp);
} catch (Exception ex) {
System.err.println("*** " + this.m_complexityIndex);
System.err.println(weights.length);
throw ex;
}
}
}
}
/*
* // compute weights (weights of instances per subset m_weights = new
* double [m_complexityIndex]; for (int i = 0; i < m_complexityIndex; i++) {
* m_weights[i] = trainingSets[i].sumOfWeights(); }
* Utils.normalize(m_weights);
*/
/*
* // Only Instances with known values are relevant. Enumeration enu =
* trainInstances.enumerateInstances(); while (enu.hasMoreElements()) {
* instance = (Instance) enu.nextElement(); if
* (!instance.isMissing(m_attIndex)) { //
* m_distribution.add((int)instance.value(m_attIndex),instance);
* trainingSets[(int)instances.value(m_attIndex)].add(instance); } else { //
* add these to the error count m_errors += instance.weight(); } }
*/
Random r = new Random(1);
int minNumCount = 0;
for (int i = 0; i < this.m_complexityIndex; i++) {
if (trainingSets[i].numInstances() >= 5) {
minNumCount++;
// Discretize the sets
Discretize disc = new Discretize();
disc.setInputFormat(trainingSets[i]);
trainingSets[i] = Filter.useFilter(trainingSets[i], disc);
trainingSets[i].randomize(r);
trainingSets[i].stratify(5);
NaiveBayesUpdateable fullModel = new NaiveBayesUpdateable();
fullModel.buildClassifier(trainingSets[i]);
// add the errors for this branch of the split
this.m_errors += NBTreeNoSplit.crossValidate(fullModel, trainingSets[i], r);
} else {
// if fewer than min obj then just count them as errors
for (int j = 0; j < trainingSets[i].numInstances(); j++) {
this.m_errors += trainingSets[i].instance(j).weight();
}
}
}
// Check if there are at least five instances in at least two of the subsets
// subsets.
if (minNumCount > 1) {
this.m_numSubsets = this.m_complexityIndex;
}
}
/**
* Creates split on numeric attribute.
*
* @exception Exception if something goes wrong
*/
private void handleNumericAttribute(final Instances trainInstances) throws Exception {
this.m_c45S = new C45Split(this.m_attIndex, 2, this.m_sumOfWeights, true);
this.m_c45S.buildClassifier(trainInstances);
if (this.m_c45S.numSubsets() == 0) {
return;
}
this.m_errors = 0;
Instances[] trainingSets = new Instances[this.m_complexityIndex];
trainingSets[0] = new Instances(trainInstances, 0);
trainingSets[1] = new Instances(trainInstances, 0);
int subset = -1;
// populate the subsets
for (int i = 0; i < trainInstances.numInstances(); i++) {
Instance instance = trainInstances.instance(i);
subset = this.m_c45S.whichSubset(instance);
if (subset != -1) {
trainingSets[subset].add((Instance) instance.copy());
} else {
double[] weights = this.m_c45S.weights(instance);
for (int j = 0; j < this.m_complexityIndex; j++) {
Instance temp = (Instance) instance.copy();
if (weights.length == this.m_complexityIndex) {
temp.setWeight(temp.weight() * weights[j]);
} else {
temp.setWeight(temp.weight() / this.m_complexityIndex);
}
trainingSets[j].add(temp);
}
}
}
/*
* // compute weights (weights of instances per subset m_weights = new
* double [m_complexityIndex]; for (int i = 0; i < m_complexityIndex; i++) {
* m_weights[i] = trainingSets[i].sumOfWeights(); }
* Utils.normalize(m_weights);
*/
Random r = new Random(1);
int minNumCount = 0;
for (int i = 0; i < this.m_complexityIndex; i++) {
if (trainingSets[i].numInstances() > 5) {
minNumCount++;
// Discretize the sets
Discretize disc = new Discretize();
disc.setInputFormat(trainingSets[i]);
trainingSets[i] = Filter.useFilter(trainingSets[i], disc);
trainingSets[i].randomize(r);
trainingSets[i].stratify(5);
NaiveBayesUpdateable fullModel = new NaiveBayesUpdateable();
fullModel.buildClassifier(trainingSets[i]);
// add the errors for this branch of the split
this.m_errors += NBTreeNoSplit.crossValidate(fullModel, trainingSets[i], r);
} else {
for (int j = 0; j < trainingSets[i].numInstances(); j++) {
this.m_errors += trainingSets[i].instance(j).weight();
}
}
}
// Check if minimum number of Instances in at least two
// subsets.
if (minNumCount > 1) {
this.m_numSubsets = this.m_complexityIndex;
}
}
/**
* Returns index of subset instance is assigned to. Returns -1 if instance is
* assigned to more than one subset.
*
* @exception Exception if something goes wrong
*/
@Override
public final int whichSubset(final Instance instance) throws Exception {
return this.m_c45S.whichSubset(instance);
}
/**
* Returns weights if instance is assigned to more than one subset. Returns
* null if instance is only assigned to one subset.
*/
@Override
public final double[] weights(final Instance instance) {
return this.m_c45S.weights(instance);
// return m_weights;
}
/**
* Returns a string containing java source code equivalent to the test made at
* this node. The instance being tested is called "i".
*
* @param index index of the nominal value tested
* @param data the data containing instance structure info
* @return a value of type 'String'
*/
@Override
public final String sourceExpression(final int index, final Instances data) {
return this.m_c45S.sourceExpression(index, data);
}
/**
* Prints the condition satisfied by instances in a subset.
*
* @param index of subset
* @param data training set.
*/
@Override
public final String rightSide(final int index, final Instances data) {
return this.m_c45S.rightSide(index, data);
}
/**
* Prints left side of condition..
*
* @param data training set.
*/
@Override
public final String leftSide(final Instances data) {
return this.m_c45S.leftSide(data);
}
/**
* Return the probability for a class value
*
* @param classIndex the index of the class value
* @param instance the instance to generate a probability for
* @param theSubset the subset to consider
* @return a probability
* @exception Exception if an error occurs
*/
@Override
public double classProb(final int classIndex, final Instance instance, final int theSubset) throws Exception {
// use the global naive bayes model
if (theSubset > -1) {
return this.m_globalNB.classProb(classIndex, instance, theSubset);
} else {
throw new Exception("This shouldn't happen!!!");
}
}
/**
* Return the global naive bayes model for this node
*
* @return a <code>NBTreeNoSplit</code> value
*/
public NBTreeNoSplit getGlobalModel() {
return this.m_globalNB;
}
/**
* Set the global naive bayes model for this node
*
* @param global a <code>NBTreeNoSplit</code> value
*/
public void setGlobalModel(final NBTreeNoSplit global) {
this.m_globalNB = global;
}
/**
* Return the errors made by the naive bayes models arising from this split.
*
* @return a <code>double</code> value
*/
public double getErrors() {
return this.m_errors;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/NoSplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NoSplit.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
/**
* Class implementing a "no-split"-split.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class NoSplit
extends ClassifierSplitModel{
/** for serialization */
private static final long serialVersionUID = -1292620749331337546L;
/**
* Creates "no-split"-split for given distribution.
*/
public NoSplit(Distribution distribution){
m_distribution = new Distribution(distribution);
m_numSubsets = 1;
}
/**
* Creates a "no-split"-split for a given set of instances.
*
* @exception Exception if split can't be built successfully
*/
public final void buildClassifier(Instances instances)
throws Exception {
m_distribution = new Distribution(instances);
m_numSubsets = 1;
}
/**
* Always returns 0 because only there is only one subset.
*/
public final int whichSubset(Instance instance){
return 0;
}
/**
* Always returns null because there is only one subset.
*/
public final double [] weights(Instance instance){
return null;
}
/**
* Does nothing because no condition has to be satisfied.
*/
public final String leftSide(Instances instances){
return "";
}
/**
* Does nothing because no condition has to be satisfied.
*/
public final String rightSide(int index, Instances instances){
return "";
}
/**
* Returns a string containing java source code equivalent to the test
* made at this node. The instance being tested is called "i".
*
* @param index index of the nominal value tested
* @param data the data containing instance structure info
* @return a value of type 'String'
*/
public final String sourceExpression(int index, Instances data) {
return "true"; // or should this be false??
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/PruneableClassifierTree.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PruneableClassifierTree.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.util.Random;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class for handling a tree structure that can
* be pruned using a pruning set.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PruneableClassifierTree
extends ClassifierTree {
/** for serialization */
static final long serialVersionUID = -555775736857600201L;
/** True if the tree is to be pruned. */
protected boolean pruneTheTree = false;
/** How many subsets of equal size? One used for pruning, the rest for training. */
protected int numSets = 3;
/** Cleanup after the tree has been built. */
protected boolean m_cleanup = true;
/** The random number seed. */
protected int m_seed = 1;
/**
* Constructor for pruneable tree structure. Stores reference
* to associated training data at each node.
*
* @param toSelectLocModel selection method for local splitting model
* @param pruneTree true if the tree is to be pruned
* @param num number of subsets of equal size
* @param cleanup
* @param seed the seed value to use
* @throws Exception if something goes wrong
*/
public PruneableClassifierTree(ModelSelection toSelectLocModel,
boolean pruneTree, int num, boolean cleanup,
int seed)
throws Exception {
super(toSelectLocModel);
pruneTheTree = pruneTree;
numSets = num;
m_cleanup = cleanup;
m_seed = seed;
}
/**
* Method for building a pruneable classifier tree.
*
* @param data the data to build the tree from
* @throws Exception if tree can't be built successfully
*/
public void buildClassifier(Instances data)
throws Exception {
// remove instances with missing class
data = new Instances(data);
data.deleteWithMissingClass();
Random random = new Random(m_seed);
data.stratify(numSets);
buildTree(data.trainCV(numSets, numSets - 1, random),
data.testCV(numSets, numSets - 1), !m_cleanup);
if (pruneTheTree) {
prune();
}
if (m_cleanup) {
cleanup(new Instances(data, 0));
}
}
/**
* Prunes a tree.
*
* @throws Exception if tree can't be pruned successfully
*/
public void prune() throws Exception {
if (!m_isLeaf) {
// Prune all subtrees.
for (int i = 0; i < m_sons.length; i++)
son(i).prune();
// Decide if leaf is best choice.
if (Utils.smOrEq(errorsForLeaf(),errorsForTree())) {
// Free son Trees
m_sons = null;
m_isLeaf = true;
// Get NoSplit Model for node.
m_localModel = new NoSplit(localModel().distribution());
}
}
}
/**
* Returns a newly created tree.
*
* @param train the training data
* @param test the test data
* @return the generated tree
* @throws Exception if something goes wrong
*/
protected ClassifierTree getNewTree(Instances train, Instances test)
throws Exception {
PruneableClassifierTree newTree =
new PruneableClassifierTree(m_toSelectModel, pruneTheTree, numSets, m_cleanup,
m_seed);
newTree.buildTree(train, test, !m_cleanup);
return newTree;
}
/**
* Computes estimated errors for tree.
*
* @return the estimated errors
* @throws Exception if error estimate can't be computed
*/
private double errorsForTree() throws Exception {
double errors = 0;
if (m_isLeaf)
return errorsForLeaf();
else{
for (int i = 0; i < m_sons.length; i++)
if (Utils.eq(localModel().distribution().perBag(i), 0)) {
errors += m_test.perBag(i)-
m_test.perClassPerBag(i,localModel().distribution().
maxClass());
} else
errors += son(i).errorsForTree();
return errors;
}
}
/**
* Computes estimated errors for leaf.
*
* @return the estimated errors
* @throws Exception if error estimate can't be computed
*/
private double errorsForLeaf() throws Exception {
return m_test.total()-
m_test.perClass(localModel().distribution().maxClass());
}
/**
* Method just exists to make program easier to read.
*/
private ClassifierSplitModel localModel() {
return (ClassifierSplitModel)m_localModel;
}
/**
* Method just exists to make program easier to read.
*/
private PruneableClassifierTree son(int index) {
return (PruneableClassifierTree)m_sons[index];
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/SplitCriterion.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SplitCriterion.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import java.io.Serializable;
import weka.core.RevisionHandler;
/**
* Abstract class for computing splitting criteria
* with respect to distributions of class values.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class SplitCriterion
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 5490996638027101259L;
/**
* Computes result of splitting criterion for given distribution.
*
* @return value of splitting criterion. 0 by default
*/
public double splitCritValue(Distribution bags){
return 0;
}
/**
* Computes result of splitting criterion for given training and
* test distributions.
*
* @return value of splitting criterion. 0 by default
*/
public double splitCritValue(Distribution train, Distribution test){
return 0;
}
/**
* Computes result of splitting criterion for given training and
* test distributions and given number of classes.
*
* @return value of splitting criterion. 0 by default
*/
public double splitCritValue(Distribution train, Distribution test,
int noClassesDefault){
return 0;
}
/**
* Computes result of splitting criterion for given training and
* test distributions and given default distribution.
*
* @return value of splitting criterion. 0 by default
*/
public double splitCritValue(Distribution train, Distribution test,
Distribution defC){
return 0;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/j48/Stats.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Stats.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.j48;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Statistics;
/**
* Class implementing a statistical routine needed by J48 to
* compute its error estimate.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class Stats implements RevisionHandler {
/**
* Computes estimated extra error for given total number of instances
* and error using normal approximation to binomial distribution
* (and continuity correction).
*
* @param N number of instances
* @param e observed error
* @param CF confidence value
*/
public static double addErrs(final double N, final double e, final float CF) {
// Ignore stupid values for CF
if (CF > 0.5) {
return 0;
}
// Check for extreme cases at the low end because the
// normal approximation won't work
if (e < 1) {
// Base case (i.e. e == 0) from documenta Geigy Scientific
// Tables, 6th edition, page 185
double base = N * (1 - Math.pow(CF, 1 / N));
if (e == 0) {
return base;
}
// Use linear interpolation between 0 and 1 like C4.5 does
return base + e * (addErrs(N, 1, CF) - base);
}
// Use linear interpolation at the high end (i.e. between N - 0.5
// and N) because of the continuity correction
if (e + 0.5 >= N) {
// Make sure that we never return anything smaller than zero
return Math.max(N - e, 0);
}
// Get z-score corresponding to CF
double z = Statistics.normalInverse(1 - CF);
// Compute upper limit of confidence interval
double f = (e + 0.5) / N;
double r = (f + (z * z) / (2 * N) + z * Math.sqrt((f / N) - (f * f / N) + (z * z / (4 * N * N)))) / (1 + (z * z) / N);
return (r * N) - e;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/lmt/LMTNode.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* LMTNode.java
* Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.lmt;
import java.util.Collections;
import java.util.Comparator;
import java.util.Vector;
import weka.classifiers.Evaluation;
import weka.classifiers.trees.j48.ClassifierSplitModel;
import weka.classifiers.trees.j48.ModelSelection;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.NominalToBinary;
/**
* Auxiliary class for list of LMTNodes
*/
class CompareNode implements Comparator<LMTNode>, RevisionHandler {
/**
* Compares its two arguments for order.
*
* @param o1
* first object
* @param o2
* second object
* @return a negative integer, zero, or a positive integer as the first argument is less than, equal
* to, or greater than the second.
*/
@Override
public int compare(final LMTNode o1, final LMTNode o2) {
if (o1.m_alpha < o2.m_alpha) {
return -1;
}
if (o1.m_alpha > o2.m_alpha) {
return 1;
}
return 0;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* Class for logistic model tree structure.
*
*
* @author Niels Landwehr
* @author Marc Sumner
* @version $Revision$
*/
public class LMTNode extends LogisticBase {
/** for serialization */
static final long serialVersionUID = 1862737145870398755L;
/** Total number of training instances. */
protected double m_totalInstanceWeight;
/** Node id */
protected int m_id;
/** ID of logistic model at leaf */
protected int m_leafModelNum;
/** Alpha-value (for pruning) at the node */
public double m_alpha;
/**
* Weighted number of training examples currently misclassified by the logistic model at the node
*/
public double m_numIncorrectModel;
/**
* Weighted number of training examples currently misclassified by the subtree rooted at the node
*/
public double m_numIncorrectTree;
/** minimum number of instances at which a node is considered for splitting */
protected int m_minNumInstances;
/** ModelSelection object (for splitting) */
protected ModelSelection m_modelSelection;
/** Filter to convert nominal attributes to binary */
protected NominalToBinary m_nominalToBinary;
/** Number of folds for CART pruning */
protected static int m_numFoldsPruning = 5;
/**
* Use heuristic that determines the number of LogitBoost iterations only once in the beginning?
*/
protected boolean m_fastRegression;
/** Number of instances at the node */
protected int m_numInstances;
/** The ClassifierSplitModel (for splitting) */
protected ClassifierSplitModel m_localModel;
/** Array of children of the node */
protected LMTNode[] m_sons;
/** True if node is leaf */
protected boolean m_isLeaf;
/**
* Constructor for logistic model tree node.
*
* @param modelSelection
* selection method for local splitting model
* @param numBoostingIterations
* sets the numBoostingIterations parameter
* @param fastRegression
* sets the fastRegression parameter
* @param errorOnProbabilities
* Use error on probabilities for stopping criterion of LogitBoost?
* @param minNumInstances
* minimum number of instances at which a node is considered for splitting
*/
public LMTNode(final ModelSelection modelSelection, final int numBoostingIterations, final boolean fastRegression, final boolean errorOnProbabilities, final int minNumInstances, final double weightTrimBeta, final boolean useAIC,
final NominalToBinary ntb, final int numDecimalPlaces) {
this.m_modelSelection = modelSelection;
this.m_fixedNumIterations = numBoostingIterations;
this.m_fastRegression = fastRegression;
this.m_errorOnProbabilities = errorOnProbabilities;
this.m_minNumInstances = minNumInstances;
this.m_maxIterations = 200;
this.setWeightTrimBeta(weightTrimBeta);
this.setUseAIC(useAIC);
this.m_nominalToBinary = ntb;
this.m_numDecimalPlaces = numDecimalPlaces;
}
/**
* Method for building a logistic model tree (only called for the root node). Grows an initial
* logistic model tree and prunes it back using the CART pruning scheme.
*
* @param data
* the data to train with
* @throws Exception
* if something goes wrong
*/
@Override
public void buildClassifier(final Instances data) throws Exception {
// heuristic to avoid cross-validating the number of LogitBoost iterations
// at every node: build standalone logistic model and take its optimum
// number
// of iteration everywhere in the tree.
if (this.m_fastRegression && (this.m_fixedNumIterations < 0)) {
this.m_fixedNumIterations = this.tryLogistic(data);
}
// Need to cross-validate alpha-parameter for CART-pruning
Instances cvData = new Instances(data);
cvData.stratify(m_numFoldsPruning);
double[][] alphas = new double[m_numFoldsPruning][];
double[][] errors = new double[m_numFoldsPruning][];
for (int i = 0; i < m_numFoldsPruning; i++) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
// for every fold, grow tree on training set...
Instances train = cvData.trainCV(m_numFoldsPruning, i);
Instances test = cvData.testCV(m_numFoldsPruning, i);
this.buildTree(train, null, train.numInstances(), 0, null);
int numNodes = this.getNumInnerNodes();
alphas[i] = new double[numNodes + 2];
errors[i] = new double[numNodes + 2];
// ... then prune back and log alpha-values and errors on test set
this.prune(alphas[i], errors[i], test);
}
// don't need CV data anymore
cvData = null;
// build tree using all the data
this.buildTree(data, null, data.numInstances(), 0, null);
int numNodes = this.getNumInnerNodes();
double[] treeAlphas = new double[numNodes + 2];
// prune back and log alpha-values
int iterations = this.prune(treeAlphas, null, null);
double[] treeErrors = new double[numNodes + 2];
for (int i = 0; i <= iterations; i++) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
// compute midpoint alphas
double alpha = Math.sqrt(treeAlphas[i] * treeAlphas[i + 1]);
double error = 0;
// compute error estimate for final trees from the midpoint-alphas and the
// error estimates gotten in
// the cross-validation
for (int k = 0; k < m_numFoldsPruning; k++) {
int l = 0;
while (alphas[k][l] <= alpha) {
l++;
}
error += errors[k][l - 1];
}
treeErrors[i] = error;
}
// find best alpha
int best = -1;
double bestError = Double.MAX_VALUE;
for (int i = iterations; i >= 0; i--) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
if (treeErrors[i] < bestError) {
bestError = treeErrors[i];
best = i;
}
}
double bestAlpha = Math.sqrt(treeAlphas[best] * treeAlphas[best + 1]);
// "unprune" final tree (faster than regrowing it)
this.unprune();
// CART-prune it with best alpha
this.prune(bestAlpha);
}
/**
* Method for building the tree structure. Builds a logistic model, splits the node and recursively
* builds tree for child nodes.
*
* @param data
* the training data passed on to this node
* @param higherRegressions
* An array of regression functions produced by LogitBoost at higher levels in the tree.
* They represent a logistic regression model that is refined locally at this node.
* @param totalInstanceWeight
* the total number of training examples
* @param higherNumParameters
* effective number of parameters in the logistic regression model built in parent nodes
* @throws Exception
* if something goes wrong
*/
public void buildTree(final Instances data, final SimpleLinearRegression[][] higherRegressions, final double totalInstanceWeight, final double higherNumParameters, final Instances numericDataHeader) throws Exception {
// save some stuff
this.m_totalInstanceWeight = totalInstanceWeight;
this.m_train = data; // no need to copy the data here
this.m_isLeaf = true;
this.m_sons = null;
this.m_numInstances = this.m_train.numInstances();
this.m_numClasses = this.m_train.numClasses();
// init
this.m_numericDataHeader = numericDataHeader;
this.m_numericData = this.getNumericData(this.m_train);
if (higherRegressions == null) {
this.m_regressions = this.initRegressions();
} else {
this.m_regressions = higherRegressions;
}
this.m_numParameters = higherNumParameters;
this.m_numRegressions = 0;
// build logistic model
if (this.m_numInstances >= m_numFoldsBoosting) {
if (this.m_fixedNumIterations > 0) {
this.performBoosting(this.m_fixedNumIterations);
} else if (this.getUseAIC()) {
this.performBoostingInfCriterion();
} else {
this.performBoostingCV();
}
}
this.m_numParameters += this.m_numRegressions;
// store performance of model at this node
Evaluation eval = new Evaluation(this.m_train);
eval.evaluateModel(this, this.m_train);
this.m_numIncorrectModel = eval.incorrect();
boolean grow;
// split node if more than minNumInstances...
if (this.m_numInstances > this.m_minNumInstances) {
// split node: either splitting on class value (a la C4.5) or splitting on
// residuals
if (this.m_modelSelection instanceof ResidualModelSelection) {
// need ps/Ys/Zs/weights
double[][] probs = this.getProbs(this.getFs(this.m_numericData));
double[][] trainYs = this.getYs(this.m_train);
double[][] dataZs = this.getZs(probs, trainYs);
double[][] dataWs = this.getWs(probs, trainYs);
this.m_localModel = ((ResidualModelSelection) this.m_modelSelection).selectModel(this.m_train, dataZs, dataWs);
} else {
this.m_localModel = this.m_modelSelection.selectModel(this.m_train);
}
// ... and valid split found
grow = (this.m_localModel.numSubsets() > 1);
} else {
grow = false;
}
if (grow) {
// create and build children of node
this.m_isLeaf = false;
Instances[] localInstances = this.m_localModel.split(this.m_train);
// don't need data anymore, so clean up
this.cleanup();
this.m_sons = new LMTNode[this.m_localModel.numSubsets()];
for (int i = 0; i < this.m_sons.length; i++) {
this.m_sons[i] = new LMTNode(this.m_modelSelection, this.m_fixedNumIterations, this.m_fastRegression, this.m_errorOnProbabilities, this.m_minNumInstances, this.getWeightTrimBeta(), this.getUseAIC(), this.m_nominalToBinary,
this.m_numDecimalPlaces);
this.m_sons[i].buildTree(localInstances[i], this.copyRegressions(this.m_regressions), this.m_totalInstanceWeight, this.m_numParameters, this.m_numericDataHeader);
localInstances[i] = null;
}
} else {
this.cleanup();
}
}
/**
* Prunes a logistic model tree using the CART pruning scheme, given a cost-complexity parameter
* alpha.
*
* @param alpha
* the cost-complexity measure
* @throws Exception
* if something goes wrong
*/
public void prune(final double alpha) throws Exception {
Vector<LMTNode> nodeList;
CompareNode comparator = new CompareNode();
// determine training error of logistic models and subtrees, and calculate
// alpha-values from them
this.treeErrors();
this.calculateAlphas();
// get list of all inner nodes in the tree
nodeList = this.getNodes();
boolean prune = (nodeList.size() > 0);
while (prune) {
// select node with minimum alpha
LMTNode nodeToPrune = Collections.min(nodeList, comparator);
// want to prune if its alpha is smaller than alpha
if (nodeToPrune.m_alpha > alpha) {
break;
}
nodeToPrune.m_isLeaf = true;
nodeToPrune.m_sons = null;
// update tree errors and alphas
this.treeErrors();
this.calculateAlphas();
nodeList = this.getNodes();
prune = (nodeList.size() > 0);
}
// discard references to models at internal nodes because they are not
// needed
for (Object node : this.getNodes()) {
LMTNode lnode = (LMTNode) node;
if (!lnode.m_isLeaf) {
this.m_regressions = null;
}
}
}
/**
* Method for performing one fold in the cross-validation of the cost-complexity parameter.
* Generates a sequence of alpha-values with error estimates for the corresponding (partially
* pruned) trees, given the test set of that fold.
*
* @param alphas
* array to hold the generated alpha-values
* @param errors
* array to hold the corresponding error estimates
* @param test
* test set of that fold (to obtain error estimates)
* @throws Exception
* if something goes wrong
*/
public int prune(final double[] alphas, final double[] errors, final Instances test) throws Exception {
Vector<LMTNode> nodeList;
CompareNode comparator = new CompareNode();
// determine training error of logistic models and subtrees, and calculate
// alpha-values from them
this.treeErrors();
this.calculateAlphas();
// get list of all inner nodes in the tree
nodeList = this.getNodes();
boolean prune = (nodeList.size() > 0);
// alpha_0 is always zero (unpruned tree)
alphas[0] = 0;
Evaluation eval;
// error of unpruned tree
if (errors != null) {
eval = new Evaluation(test);
eval.evaluateModel(this, test);
errors[0] = eval.errorRate();
}
int iteration = 0;
while (prune) {
iteration++;
// get node with minimum alpha
LMTNode nodeToPrune = Collections.min(nodeList, comparator);
nodeToPrune.m_isLeaf = true;
// Do not set m_sons null, want to unprune
// get alpha-value of node
alphas[iteration] = nodeToPrune.m_alpha;
// log error
if (errors != null) {
eval = new Evaluation(test);
eval.evaluateModel(this, test);
errors[iteration] = eval.errorRate();
}
// update errors/alphas
this.treeErrors();
this.calculateAlphas();
nodeList = this.getNodes();
prune = (nodeList.size() > 0);
}
// set last alpha 1 to indicate end
alphas[iteration + 1] = 1.0;
return iteration;
}
/**
* Method to "unprune" a logistic model tree. Sets all leaf-fields to false. Faster than re-growing
* the tree because the logistic models do not have to be fit again.
*/
protected void unprune() {
if (this.m_sons != null) {
this.m_isLeaf = false;
for (LMTNode m_son : this.m_sons) {
m_son.unprune();
}
}
}
/**
* Determines the optimum number of LogitBoost iterations to perform by building a standalone
* logistic regression function on the training data. Used for the heuristic that avoids
* cross-validating this number again at every node.
*
* @param data
* training instances for the logistic model
* @throws Exception
* if something goes wrong
*/
protected int tryLogistic(final Instances data) throws Exception {
// convert nominal attributes
Instances filteredData = Filter.useFilter(data, this.m_nominalToBinary);
LogisticBase logistic = new LogisticBase(0, true, this.m_errorOnProbabilities);
// limit LogitBoost to 200 iterations (speed)
logistic.setMaxIterations(200);
logistic.setWeightTrimBeta(this.getWeightTrimBeta()); // Not in Marc's code.
// Added by Eibe.
logistic.setUseAIC(this.getUseAIC());
logistic.buildClassifier(filteredData);
// return best number of iterations
return logistic.getNumRegressions();
}
/**
* Method to count the number of inner nodes in the tree
*
* @return the number of inner nodes
*/
public int getNumInnerNodes() {
if (this.m_isLeaf) {
return 0;
}
int numNodes = 1;
for (LMTNode m_son : this.m_sons) {
numNodes += m_son.getNumInnerNodes();
}
return numNodes;
}
/**
* Returns the number of leaves in the tree. Leaves are only counted if their logistic model has
* changed compared to the one of the parent node.
*
* @return the number of leaves
*/
public int getNumLeaves() {
int numLeaves;
if (!this.m_isLeaf) {
numLeaves = 0;
int numEmptyLeaves = 0;
for (int i = 0; i < this.m_sons.length; i++) {
numLeaves += this.m_sons[i].getNumLeaves();
if (this.m_sons[i].m_isLeaf && !this.m_sons[i].hasModels()) {
numEmptyLeaves++;
}
}
if (numEmptyLeaves > 1) {
numLeaves -= (numEmptyLeaves - 1);
}
} else {
numLeaves = 1;
}
return numLeaves;
}
/**
* Updates the numIncorrectTree field for all nodes. This is needed for calculating the
* alpha-values.
*/
public void treeErrors() {
if (this.m_isLeaf) {
this.m_numIncorrectTree = this.m_numIncorrectModel;
} else {
this.m_numIncorrectTree = 0;
for (LMTNode m_son : this.m_sons) {
m_son.treeErrors();
this.m_numIncorrectTree += m_son.m_numIncorrectTree;
}
}
}
/**
* Updates the alpha field for all nodes.
*/
public void calculateAlphas() throws Exception {
if (!this.m_isLeaf) {
double errorDiff = this.m_numIncorrectModel - this.m_numIncorrectTree;
if (errorDiff <= 0) {
// split increases training error (should not normally happen).
// prune it instantly.
this.m_isLeaf = true;
this.m_sons = null;
this.m_alpha = Double.MAX_VALUE;
} else {
// compute alpha
errorDiff /= this.m_totalInstanceWeight;
this.m_alpha = errorDiff / (this.getNumLeaves() - 1);
for (LMTNode m_son : this.m_sons) {
m_son.calculateAlphas();
}
}
} else {
// alpha = infinite for leaves (do not want to prune)
this.m_alpha = Double.MAX_VALUE;
}
}
/**
* Return a list of all inner nodes in the tree
*
* @return the list of nodes
*/
public Vector<LMTNode> getNodes() {
Vector<LMTNode> nodeList = new Vector<>();
this.getNodes(nodeList);
return nodeList;
}
/**
* Fills a list with all inner nodes in the tree
*
* @param nodeList
* the list to be filled
*/
public void getNodes(final Vector<LMTNode> nodeList) {
if (!this.m_isLeaf) {
nodeList.add(this);
for (LMTNode m_son : this.m_sons) {
m_son.getNodes(nodeList);
}
}
}
/**
* Returns a numeric version of a set of instances. All nominal attributes are replaced by binary
* ones, and the class variable is replaced by a pseudo-class variable that is used by LogitBoost.
*/
@Override
protected Instances getNumericData(final Instances train) throws Exception {
Instances filteredData = Filter.useFilter(train, this.m_nominalToBinary);
return super.getNumericData(filteredData);
}
/**
* Returns true if the logistic regression model at this node has changed compared to the one at the
* parent node.
*
* @return whether it has changed
*/
public boolean hasModels() {
return (this.m_numRegressions > 0);
}
/**
* Returns the class probabilities for an instance according to the logistic model at the node.
*
* @param instance
* the instance
* @return the array of probabilities
*/
public double[] modelDistributionForInstance(Instance instance) throws Exception {
// make copy and convert nominal attributes
this.m_nominalToBinary.input(instance);
instance = this.m_nominalToBinary.output();
// saet numeric pseudo-class
instance.setDataset(this.m_numericDataHeader);
return this.probs(this.getFs(instance));
}
/**
* Returns the class probabilities for an instance given by the logistic model tree.
*
* @param instance
* the instance
* @return the array of probabilities
*/
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
double[] probs;
if (this.m_isLeaf) {
// leaf: use logistic model
probs = this.modelDistributionForInstance(instance);
} else {
// sort into appropiate child node
int branch = this.m_localModel.whichSubset(instance);
probs = this.m_sons[branch].distributionForInstance(instance);
}
return probs;
}
/**
* Returns the number of leaves (normal count).
*
* @return the number of leaves
*/
public int numLeaves() {
if (this.m_isLeaf) {
return 1;
}
int numLeaves = 0;
for (LMTNode m_son : this.m_sons) {
numLeaves += m_son.numLeaves();
}
return numLeaves;
}
/**
* Returns the number of nodes.
*
* @return the number of nodes
*/
public int numNodes() {
if (this.m_isLeaf) {
return 1;
}
int numNodes = 1;
for (LMTNode m_son : this.m_sons) {
numNodes += m_son.numNodes();
}
return numNodes;
}
/**
* Returns a description of the logistic model tree (tree structure and logistic models)
*
* @return describing string
*/
@Override
public String toString() {
// assign numbers to logistic regression functions at leaves
this.assignLeafModelNumbers(0);
try {
StringBuffer text = new StringBuffer();
if (this.m_isLeaf) {
text.append(": ");
text.append("LM_" + this.m_leafModelNum + ":" + this.getModelParameters());
} else {
this.dumpTree(0, text);
}
text.append("\n\nNumber of Leaves : \t" + this.numLeaves() + "\n");
text.append("\nSize of the Tree : \t" + this.numNodes() + "\n");
// This prints logistic models after the tree, comment out if only tree
// should be printed
text.append(this.modelsToString());
return text.toString();
} catch (Exception e) {
return "Can't print logistic model tree";
}
}
/**
* Returns a string describing the number of LogitBoost iterations performed at this node, the total
* number of LogitBoost iterations performed (including iterations at higher levels in the tree),
* and the number of training examples at this node.
*
* @return the describing string
*/
public String getModelParameters() {
StringBuffer text = new StringBuffer();
int numModels = (int) this.m_numParameters;
text.append(this.m_numRegressions + "/" + numModels + " (" + this.m_numInstances + ")");
return text.toString();
}
/**
* Help method for printing tree structure.
*
* @throws Exception
* if something goes wrong
*/
protected void dumpTree(final int depth, final StringBuffer text) throws Exception {
for (int i = 0; i < this.m_sons.length; i++) {
text.append("\n");
for (int j = 0; j < depth; j++) {
text.append("| ");
}
text.append(this.m_localModel.leftSide(this.m_train));
text.append(this.m_localModel.rightSide(i, this.m_train));
if (this.m_sons[i].m_isLeaf) {
text.append(": ");
text.append("LM_" + this.m_sons[i].m_leafModelNum + ":" + this.m_sons[i].getModelParameters());
} else {
this.m_sons[i].dumpTree(depth + 1, text);
}
}
}
/**
* Assigns unique IDs to all nodes in the tree
*/
public int assignIDs(final int lastID) {
int currLastID = lastID + 1;
this.m_id = currLastID;
if (this.m_sons != null) {
for (LMTNode m_son : this.m_sons) {
currLastID = m_son.assignIDs(currLastID);
}
}
return currLastID;
}
/**
* Assigns numbers to the logistic regression models at the leaves of the tree
*/
public int assignLeafModelNumbers(int leafCounter) {
if (!this.m_isLeaf) {
this.m_leafModelNum = 0;
for (LMTNode m_son : this.m_sons) {
leafCounter = m_son.assignLeafModelNumbers(leafCounter);
}
} else {
leafCounter++;
this.m_leafModelNum = leafCounter;
}
return leafCounter;
}
/**
* Returns a string describing the logistic regression function at the node.
*/
public String modelsToString() {
StringBuffer text = new StringBuffer();
if (this.m_isLeaf) {
text.append("LM_" + this.m_leafModelNum + ":" + super.toString());
} else {
for (LMTNode m_son : this.m_sons) {
text.append("\n" + m_son.modelsToString());
}
}
return text.toString();
}
/**
* Returns graph describing the tree.
*
* @throws Exception
* if something goes wrong
*/
public String graph() throws Exception {
StringBuffer text = new StringBuffer();
this.assignIDs(-1);
this.assignLeafModelNumbers(0);
text.append("digraph LMTree {\n");
if (this.m_isLeaf) {
text.append("N" + this.m_id + " [label=\"LM_" + this.m_leafModelNum + ":" + this.getModelParameters() + "\" " + "shape=box style=filled");
text.append("]\n");
} else {
text.append("N" + this.m_id + " [label=\"" + Utils.backQuoteChars(this.m_localModel.leftSide(this.m_train)) + "\" ");
text.append("]\n");
this.graphTree(text);
}
return text.toString() + "}\n";
}
/**
* Helper function for graph description of tree
*
* @throws Exception
* if something goes wrong
*/
private void graphTree(final StringBuffer text) throws Exception {
for (int i = 0; i < this.m_sons.length; i++) {
text.append("N" + this.m_id + "->" + "N" + this.m_sons[i].m_id + " [label=\"" + Utils.backQuoteChars(this.m_localModel.rightSide(i, this.m_train).trim()) + "\"]\n");
if (this.m_sons[i].m_isLeaf) {
text.append("N" + this.m_sons[i].m_id + " [label=\"LM_" + this.m_sons[i].m_leafModelNum + ":" + this.m_sons[i].getModelParameters() + "\" " + "shape=box style=filled");
text.append("]\n");
} else {
text.append("N" + this.m_sons[i].m_id + " [label=\"" + Utils.backQuoteChars(this.m_sons[i].m_localModel.leftSide(this.m_train)) + "\" ");
text.append("]\n");
this.m_sons[i].graphTree(text);
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/lmt/LogisticBase.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* LogisticBase.java
* Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.lmt;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Evaluation;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
/**
* Base/helper class for building logistic regression models with the LogitBoost algorithm. Used for building logistic model trees (weka.classifiers.trees.lmt.LMT) and standalone logistic regression
* (weka.classifiers.functions.SimpleLogistic).
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* @author Niels Landwehr
* @author Marc Sumner
* @version $Revision$
*/
public class LogisticBase extends AbstractClassifier implements WeightedInstancesHandler {
/** for serialization */
static final long serialVersionUID = 168765678097825064L;
/** Header-only version of the numeric version of the training data */
protected Instances m_numericDataHeader;
/**
* Numeric version of the training data. Original class is replaced by a numeric pseudo-class.
*/
protected Instances m_numericData;
/** Training data */
protected Instances m_train;
/** Use cross-validation to determine best number of LogitBoost iterations ? */
protected boolean m_useCrossValidation;
/** Use error on probabilities for stopping criterion of LogitBoost? */
protected boolean m_errorOnProbabilities;
/**
* Use fixed number of iterations for LogitBoost? (if negative, cross-validate number of iterations)
*/
protected int m_fixedNumIterations;
/**
* Use heuristic to stop performing LogitBoost iterations earlier? If enabled, LogitBoost is stopped if the current (local) minimum of the error on a test set as a function of the number of iterations has not changed for m_heuristicStop
* iterations.
*/
protected int m_heuristicStop = 50;
/** The number of LogitBoost iterations performed. */
protected int m_numRegressions = 0;
/** The maximum number of LogitBoost iterations */
protected int m_maxIterations;
/** The number of different classes */
protected int m_numClasses;
/** Array holding the simple regression functions fit by LogitBoost */
protected SimpleLinearRegression[][] m_regressions;
/** Number of folds for cross-validating number of LogitBoost iterations */
protected static int m_numFoldsBoosting = 5;
/** Threshold on the Z-value for LogitBoost */
protected static final double Z_MAX = 3;
/** If true, the AIC is used to choose the best iteration */
private boolean m_useAIC = false;
/** Effective number of parameters used for AIC / BIC automatic stopping */
protected double m_numParameters = 0;
/**
* Threshold for trimming weights. Instances with a weight lower than this (as a percentage of total weights) are not included in the regression fit.
**/
protected double m_weightTrimBeta = 0;
/**
* Constructor that creates LogisticBase object with standard options.
*/
public LogisticBase() {
this.m_fixedNumIterations = -1;
this.m_useCrossValidation = true;
this.m_errorOnProbabilities = false;
this.m_maxIterations = 500;
this.m_useAIC = false;
this.m_numParameters = 0;
this.m_numDecimalPlaces = 2;
}
/**
* Constructor to create LogisticBase object.
*
* @param numBoostingIterations
* fixed number of iterations for LogitBoost (if negative, use cross-validation or stopping criterion on the training data).
* @param useCrossValidation
* cross-validate number of LogitBoost iterations (if false, use stopping criterion on the training data).
* @param errorOnProbabilities
* if true, use error on probabilities instead of misclassification for stopping criterion of LogitBoost
*/
public LogisticBase(final int numBoostingIterations, final boolean useCrossValidation, final boolean errorOnProbabilities) {
this.m_fixedNumIterations = numBoostingIterations;
this.m_useCrossValidation = useCrossValidation;
this.m_errorOnProbabilities = errorOnProbabilities;
this.m_maxIterations = 500;
this.m_useAIC = false;
this.m_numParameters = 0;
this.m_numDecimalPlaces = 2;
}
/**
* Builds the logistic regression model usiing LogitBoost.
*
* @param data
* the training data
* @throws Exception
* if something goes wrong
*/
@Override
public void buildClassifier(final Instances data) throws Exception {
this.m_train = new Instances(data);
this.m_numClasses = this.m_train.numClasses();
// get numeric version of the training data (class variable replaced by
// numeric pseudo-class)
this.m_numericData = this.getNumericData(this.m_train);
// init the array of simple regression functions
this.m_regressions = this.initRegressions();
this.m_numRegressions = 0;
if (this.m_fixedNumIterations > 0) {
// run LogitBoost for fixed number of iterations
this.performBoosting(this.m_fixedNumIterations);
} else if (this.m_useAIC) { // Marc had this after the test for
// m_useCrossValidation. Changed by Eibe.
// run LogitBoost using information criterion for stopping
this.performBoostingInfCriterion();
} else if (this.m_useCrossValidation) {
// cross-validate number of LogitBoost iterations
this.performBoostingCV();
} else {
// run LogitBoost with number of iterations that minimizes error on the
// training set
this.performBoosting();
}
// clean up
this.cleanup();
}
/**
* Runs LogitBoost, determining the best number of iterations by cross-validation.
*
* @throws Exception
* if something goes wrong
*/
protected void performBoostingCV() throws Exception {
// completed iteration keeps track of the number of iterations that have
// been
// performed in every fold (some might stop earlier than others).
// Best iteration is selected only from these.
int completedIterations = this.m_maxIterations;
Instances allData = new Instances(this.m_train);
allData.stratify(m_numFoldsBoosting);
double[] error = new double[this.m_maxIterations + 1];
SimpleLinearRegression[][] backup = this.m_regressions;
for (int i = 0; i < m_numFoldsBoosting; i++) {
// XXX Interrupt added
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
// split into training/test data in fold
Instances train = allData.trainCV(m_numFoldsBoosting, i);
Instances test = allData.testCV(m_numFoldsBoosting, i);
// initialize LogitBoost
this.m_numRegressions = 0;
this.m_regressions = this.copyRegressions(backup);
// run LogitBoost iterations
int iterations = this.performBoosting(train, test, error, completedIterations);
if (iterations < completedIterations) {
completedIterations = iterations;
}
}
// determine iteration with minimum error over the folds
int bestIteration = this.getBestIteration(error, completedIterations);
// rebuild model on all of the training data
this.m_numRegressions = 0;
this.m_regressions = backup;
this.performBoosting(bestIteration);
}
/**
* Deep copies the given array of simple linear regression functions.
*
* @param a
* the array to copy
*
* @return the new array
*/
protected SimpleLinearRegression[][] copyRegressions(final SimpleLinearRegression[][] a) throws Exception {
SimpleLinearRegression[][] result = this.initRegressions();
for (int i = 0; i < a.length; i++) {
for (int j = 0; j < a[i].length; j++) {
if (j != this.m_numericDataHeader.classIndex()) {
result[i][j].addModel(a[i][j]);
}
}
}
return result;
}
/**
* Runs LogitBoost, determining the best number of iterations by an information criterion (currently AIC).
*/
protected void performBoostingInfCriterion() throws Exception {
double bestCriterion = Double.MAX_VALUE;
int bestIteration = 0;
int noMin = 0;
// Variable to keep track of criterion values (AIC)
double criterionValue = Double.MAX_VALUE;
// initialize Ys/Fs/ps
double[][] trainYs = this.getYs(this.m_train);
double[][] trainFs = this.getFs(this.m_numericData);
double[][] probs = this.getProbs(trainFs);
int iteration = 0;
while (iteration < this.m_maxIterations) {
// XXX interrupt weka
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
// perform single LogitBoost iteration
boolean foundAttribute = this.performIteration(iteration, trainYs, trainFs, probs, this.m_numericData);
if (foundAttribute) {
iteration++;
this.m_numRegressions = iteration;
} else {
// could not fit simple linear regression: stop LogitBoost
break;
}
double numberOfAttributes = this.m_numParameters + iteration;
// Fill criterion array values
criterionValue = 2.0 * this.negativeLogLikelihood(trainYs, probs) + 2.0 * numberOfAttributes;
// heuristic: stop LogitBoost if the current minimum has not changed for
// <m_heuristicStop> iterations
if (noMin > this.m_heuristicStop) {
break;
}
if (criterionValue < bestCriterion) {
bestCriterion = criterionValue;
bestIteration = iteration;
noMin = 0;
} else {
noMin++;
}
}
this.m_numRegressions = 0;
this.m_regressions = this.initRegressions();
this.performBoosting(bestIteration);
}
/**
* Runs LogitBoost on a training set and monitors the error on a test set. Used for running one fold when cross-validating the number of LogitBoost iterations.
*
* @param train
* the training set
* @param test
* the test set
* @param error
* array to hold the logged error values
* @param maxIterations
* the maximum number of LogitBoost iterations to run
* @return the number of completed LogitBoost iterations (can be smaller than maxIterations if the heuristic for early stopping is active or there is a problem while fitting the regressions in LogitBoost).
* @throws Exception
* if something goes wrong
*/
protected int performBoosting(final Instances train, final Instances test, final double[] error, final int maxIterations) throws Exception {
// get numeric version of the (sub)set of training instances
Instances numericTrain = this.getNumericData(train);
// initialize Ys/Fs/ps
double[][] trainYs = this.getYs(train);
double[][] trainFs = this.getFs(numericTrain);
double[][] probs = this.getProbs(trainFs);
int iteration = 0;
int noMin = 0;
double lastMin = Double.MAX_VALUE;
if (this.m_errorOnProbabilities) {
error[0] += this.getMeanAbsoluteError(test);
} else {
error[0] += this.getErrorRate(test);
}
while (iteration < maxIterations) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
// perform single LogitBoost iteration
boolean foundAttribute = this.performIteration(iteration, trainYs, trainFs, probs, numericTrain);
if (foundAttribute) {
iteration++;
this.m_numRegressions = iteration;
} else {
// could not fit simple linear regression: stop LogitBoost
break;
}
if (this.m_errorOnProbabilities) {
error[iteration] += this.getMeanAbsoluteError(test);
} else {
error[iteration] += this.getErrorRate(test);
}
// heuristic: stop LogitBoost if the current minimum has not changed for
// <m_heuristicStop> iterations
if (noMin > this.m_heuristicStop) {
break;
}
if (error[iteration] < lastMin) {
lastMin = error[iteration];
noMin = 0;
} else {
noMin++;
}
}
return iteration;
}
/**
* Runs LogitBoost with a fixed number of iterations.
*
* @param numIterations
* the number of iterations to run
* @throws Exception
* if something goes wrong
*/
protected void performBoosting(final int numIterations) throws Exception {
// initialize Ys/Fs/ps
double[][] trainYs = this.getYs(this.m_train);
double[][] trainFs = this.getFs(this.m_numericData);
double[][] probs = this.getProbs(trainFs);
int iteration = 0;
// run iterations
while (iteration < numIterations) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
boolean foundAttribute = this.performIteration(iteration, trainYs, trainFs, probs, this.m_numericData);
if (foundAttribute) {
iteration++;
} else {
break;
}
}
this.m_numRegressions = iteration;
}
/**
* Runs LogitBoost using the stopping criterion on the training set. The number of iterations is used that gives the lowest error on the training set, either misclassification or error on probabilities (depending on the
* errorOnProbabilities option).
*
* @throws Exception
* if something goes wrong
*/
protected void performBoosting() throws Exception {
// initialize Ys/Fs/ps
double[][] trainYs = this.getYs(this.m_train);
double[][] trainFs = this.getFs(this.m_numericData);
double[][] probs = this.getProbs(trainFs);
int iteration = 0;
double[] trainErrors = new double[this.m_maxIterations + 1];
trainErrors[0] = this.getErrorRate(this.m_train);
int noMin = 0;
double lastMin = Double.MAX_VALUE;
while (iteration < this.m_maxIterations) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
boolean foundAttribute = this.performIteration(iteration, trainYs, trainFs, probs, this.m_numericData);
if (foundAttribute) {
iteration++;
this.m_numRegressions = iteration;
} else {
// could not fit simple regression
break;
}
trainErrors[iteration] = this.getErrorRate(this.m_train);
// heuristic: stop LogitBoost if the current minimum has not changed for
// <m_heuristicStop> iterations
if (noMin > this.m_heuristicStop) {
break;
}
if (trainErrors[iteration] < lastMin) {
lastMin = trainErrors[iteration];
noMin = 0;
} else {
noMin++;
}
}
// find iteration with best error
int bestIteration = this.getBestIteration(trainErrors, iteration);
this.m_numRegressions = 0;
this.m_regressions = this.initRegressions();
this.performBoosting(bestIteration);
}
/**
* Returns the misclassification error of the current model on a set of instances.
*
* @param data
* the set of instances
* @return the error rate
* @throws Exception
* if something goes wrong
*/
protected double getErrorRate(final Instances data) throws Exception {
Evaluation eval = new Evaluation(data);
eval.evaluateModel(this, data);
return eval.errorRate();
}
/**
* Returns the error of the probability estimates for the current model on a set of instances.
*
* @param data
* the set of instances
* @return the error
* @throws Exception
* if something goes wrong
*/
protected double getMeanAbsoluteError(final Instances data) throws Exception {
Evaluation eval = new Evaluation(data);
eval.evaluateModel(this, data);
return eval.meanAbsoluteError();
}
/**
* Helper function to find the minimum in an array of error values.
*
* @param errors
* an array containing errors
* @param maxIteration
* the maximum of iterations
* @return the minimum
*/
protected int getBestIteration(final double[] errors, final int maxIteration) {
double bestError = errors[0];
int bestIteration = 0;
for (int i = 1; i <= maxIteration; i++) {
if (errors[i] < bestError) {
bestError = errors[i];
bestIteration = i;
}
}
return bestIteration;
}
/**
* Performs a single iteration of LogitBoost, and updates the model accordingly. A simple regression function is fit to the response and added to the m_regressions array.
*
* @param iteration
* the current iteration
* @param trainYs
* the y-values (see description of LogitBoost) for the model trained so far
* @param trainFs
* the F-values (see description of LogitBoost) for the model trained so far
* @param probs
* the p-values (see description of LogitBoost) for the model trained so far
* @param trainNumeric
* numeric version of the training data
* @return returns true if iteration performed successfully, false if no simple regression function could be fitted.
* @throws Exception
* if something goes wrong
*/
protected boolean performIteration(final int iteration, final double[][] trainYs, final double[][] trainFs, final double[][] probs, final Instances trainNumeric) throws Exception {
SimpleLinearRegression[] linearRegressionForEachClass = new SimpleLinearRegression[this.m_numClasses];
// Store weights
double[] oldWeights = new double[trainNumeric.numInstances()];
for (int i = 0; i < oldWeights.length; i++) {
oldWeights[i] = trainNumeric.instance(i).weight();
}
for (int j = 0; j < this.m_numClasses; j++) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
// Keep track of sum of weights
double weightSum = 0.0;
for (int i = 0; i < trainNumeric.numInstances(); i++) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
// compute response and weight
double p = probs[i][j];
double actual = trainYs[i][j];
double z = this.getZ(actual, p);
double w = (actual - p) / z;
// set values for instance
Instance current = trainNumeric.instance(i);
current.setValue(trainNumeric.classIndex(), z);
current.setWeight(oldWeights[i] * w);
weightSum += current.weight();
}
Instances instancesCopy = trainNumeric;
if (weightSum > 0) {
// Only the (1-beta)th quantile of instances are sent to the base
// classifier
if (this.m_weightTrimBeta > 0) {
// Need to make an empty dataset
instancesCopy = new Instances(trainNumeric, trainNumeric.numInstances());
// Get weights
double[] weights = new double[oldWeights.length];
for (int i = 0; i < oldWeights.length; i++) {
weights[i] = trainNumeric.instance(i).weight();
}
double weightPercentage = 0.0;
int[] weightsOrder = Utils.sort(weights);
for (int i = weightsOrder.length - 1; (i >= 0) && (weightPercentage < (1 - this.m_weightTrimBeta)); i--) {
instancesCopy.add(trainNumeric.instance(weightsOrder[i]));
weightPercentage += (weights[weightsOrder[i]] / weightSum);
}
// Update the sum of weights
weightSum = instancesCopy.sumOfWeights();
}
// Scale the weights
double multiplier = instancesCopy.numInstances() / weightSum;
for (Instance current : instancesCopy) {
current.setWeight(current.weight() * multiplier);
}
}
// fit simple regression function
linearRegressionForEachClass[j] = new SimpleLinearRegression();
linearRegressionForEachClass[j].buildClassifier(instancesCopy);
boolean foundAttribute = linearRegressionForEachClass[j].foundUsefulAttribute();
if (!foundAttribute) {
// could not fit simple regression function
// Restore weights
for (int i = 0; i < oldWeights.length; i++) {
trainNumeric.instance(i).setWeight(oldWeights[i]);
}
return false;
}
}
// Add each linear regression model to the sum
for (int i = 0; i < this.m_numClasses; i++) {
this.m_regressions[i][linearRegressionForEachClass[i].getAttributeIndex()].addModel(linearRegressionForEachClass[i]);
}
// Evaluate / increment trainFs from the classifier
for (int i = 0; i < trainFs.length; i++) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
double[] pred = new double[this.m_numClasses];
double predSum = 0;
for (int j = 0; j < this.m_numClasses; j++) {
pred[j] = linearRegressionForEachClass[j].classifyInstance(trainNumeric.instance(i));
predSum += pred[j];
}
predSum /= this.m_numClasses;
for (int j = 0; j < this.m_numClasses; j++) {
trainFs[i][j] += (pred[j] - predSum) * (this.m_numClasses - 1) / this.m_numClasses;
}
}
// Compute the current probability estimates
for (int i = 0; i < trainYs.length; i++) {
probs[i] = this.probs(trainFs[i]);
}
// Restore weights
for (int i = 0; i < oldWeights.length; i++) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
trainNumeric.instance(i).setWeight(oldWeights[i]);
}
return true;
}
/**
* Helper function to initialize m_regressions.
*
* @return the generated classifiers
*/
protected SimpleLinearRegression[][] initRegressions() throws Exception {
SimpleLinearRegression[][] classifiers = new SimpleLinearRegression[this.m_numClasses][this.m_numericDataHeader.numAttributes()];
for (int j = 0; j < this.m_numClasses; j++) {
for (int i = 0; i < this.m_numericDataHeader.numAttributes(); i++) {
if (i != this.m_numericDataHeader.classIndex()) {
classifiers[j][i] = new SimpleLinearRegression(i, 0, 0);
}
}
}
return classifiers;
}
/**
* Private class implementing a DenseInstance with an unsafe setValue() operation.
*/
private class UnsafeInstance extends DenseInstance {
/**
* Added ID to avoid warning
*/
private static final long serialVersionUID = 3210674215118962869L;
/**
* The constructor.
*
* @param vals
* The instance whose value we want to copy.
*/
public UnsafeInstance(final Instance vals) {
super(vals.numAttributes());
for (int i = 0; i < vals.numAttributes(); i++) {
this.m_AttValues[i] = vals.value(i);
}
this.m_Weight = vals.weight();
}
/**
* Unsafe setValue() method.
*/
@Override
public void setValue(final int attIndex, final double value) {
this.m_AttValues[attIndex] = value;
}
/**
* We need a copy method that doesn't do anything...
*/
@Override
public Object copy() {
return this;
}
}
/**
* Converts training data to numeric version. The class variable is replaced by a pseudo-class used by LogitBoost.
*
* @param data
* the data to convert
* @return the converted data
* @throws Exception
* if something goes wrong
*/
protected Instances getNumericData(final Instances data) throws Exception {
if (this.m_numericDataHeader == null) {
this.m_numericDataHeader = new Instances(data, 0);
int classIndex = this.m_numericDataHeader.classIndex();
this.m_numericDataHeader.setClassIndex(-1);
this.m_numericDataHeader.replaceAttributeAt(new Attribute("'pseudo class'"), classIndex);
this.m_numericDataHeader.setClassIndex(classIndex);
}
Instances numericData = new Instances(this.m_numericDataHeader, data.numInstances());
for (Instance inst : data) {
// XXX Interrupt added
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
numericData.add(new UnsafeInstance(inst));
}
return numericData;
}
/**
* Computes the LogitBoost response variable from y/p values (actual/estimated class probabilities).
*
* @param actual
* the actual class probability
* @param p
* the estimated class probability
* @return the LogitBoost response
*/
protected double getZ(final double actual, final double p) {
double z;
if (actual == 1) {
z = 1.0 / p;
if (z > Z_MAX) { // threshold
z = Z_MAX;
}
} else {
z = -1.0 / (1.0 - p);
if (z < -Z_MAX) { // threshold
z = -Z_MAX;
}
}
return z;
}
/**
* Computes the LogitBoost response for an array of y/p values (actual/estimated class probabilities).
*
* @param dataYs
* the actual class probabilities
* @param probs
* the estimated class probabilities
* @return the LogitBoost response
* @throws InterruptedException
*/
protected double[][] getZs(final double[][] probs, final double[][] dataYs) throws InterruptedException {
double[][] dataZs = new double[probs.length][this.m_numClasses];
for (int j = 0; j < this.m_numClasses; j++) {
// XXX Interrupt added
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
for (int i = 0; i < probs.length; i++) {
dataZs[i][j] = this.getZ(dataYs[i][j], probs[i][j]);
}
}
return dataZs;
}
/**
* Computes the LogitBoost weights from an array of y/p values (actual/estimated class probabilities).
*
* @param dataYs
* the actual class probabilities
* @param probs
* the estimated class probabilities
* @return the LogitBoost weights
*/
protected double[][] getWs(final double[][] probs, final double[][] dataYs) {
double[][] dataWs = new double[probs.length][this.m_numClasses];
for (int j = 0; j < this.m_numClasses; j++) {
for (int i = 0; i < probs.length; i++) {
double z = this.getZ(dataYs[i][j], probs[i][j]);
dataWs[i][j] = (dataYs[i][j] - probs[i][j]) / z;
}
}
return dataWs;
}
/**
* Computes the p-values (probabilities for the classes) from the F-values of the logistic model.
*
* @param Fs
* the F-values
* @return the p-values
*/
protected double[] probs(final double[] Fs) {
double maxF = -Double.MAX_VALUE;
for (double element : Fs) {
if (element > maxF) {
maxF = element;
}
}
double sum = 0;
double[] probs = new double[Fs.length];
for (int i = 0; i < Fs.length; i++) {
probs[i] = Math.exp(Fs[i] - maxF);
sum += probs[i];
}
Utils.normalize(probs, sum);
return probs;
}
/**
* Computes the Y-values (actual class probabilities) for a set of instances.
*
* @param data
* the data to compute the Y-values from
* @return the Y-values
*/
protected double[][] getYs(final Instances data) {
double[][] dataYs = new double[data.numInstances()][this.m_numClasses];
for (int j = 0; j < this.m_numClasses; j++) {
for (int k = 0; k < data.numInstances(); k++) {
dataYs[k][j] = (data.instance(k).classValue() == j) ? 1.0 : 0.0;
}
}
return dataYs;
}
/**
* Computes the F-values for a single instance.
*
* @param instance
* the instance to compute the F-values for
* @return the F-values
* @throws Exception
* if something goes wrong
*/
protected double[] getFs(final Instance instance) throws Exception {
double[] pred = new double[this.m_numClasses];
double[] instanceFs = new double[this.m_numClasses];
// add up the predictions from the simple regression functions
for (int i = 0; i < this.m_numericDataHeader.numAttributes(); i++) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
if (i != this.m_numericDataHeader.classIndex()) {
double predSum = 0;
for (int j = 0; j < this.m_numClasses; j++) {
pred[j] = this.m_regressions[j][i].classifyInstance(instance);
predSum += pred[j];
}
predSum /= this.m_numClasses;
for (int j = 0; j < this.m_numClasses; j++) {
instanceFs[j] += (pred[j] - predSum) * (this.m_numClasses - 1) / this.m_numClasses;
}
}
}
return instanceFs;
}
/**
* Computes the F-values for a set of instances.
*
* @param data
* the data to work on
* @return the F-values
* @throws Exception
* if something goes wrong
*/
protected double[][] getFs(final Instances data) throws Exception {
double[][] dataFs = new double[data.numInstances()][];
for (int k = 0; k < data.numInstances(); k++) {
// XXX Interrupt added
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
dataFs[k] = this.getFs(data.instance(k));
}
return dataFs;
}
/**
* Computes the p-values (probabilities for the different classes) from the F-values for a set of instances.
*
* @param dataFs
* the F-values
* @return the p-values
*/
protected double[][] getProbs(final double[][] dataFs) {
int numInstances = dataFs.length;
double[][] probs = new double[numInstances][];
for (int k = 0; k < numInstances; k++) {
probs[k] = this.probs(dataFs[k]);
}
return probs;
}
/**
* Returns the negative loglikelihood of the Y-values (actual class probabilities) given the p-values (current probability estimates).
*
* @param dataYs
* the Y-values
* @param probs
* the p-values
* @return the likelihood
* @throws InterruptedException
*/
protected double negativeLogLikelihood(final double[][] dataYs, final double[][] probs) throws InterruptedException {
double logLikelihood = 0;
for (int i = 0; i < dataYs.length; i++) {
for (int j = 0; j < this.m_numClasses; j++) {
// XXX Interrupt added
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
if (dataYs[i][j] == 1.0) {
logLikelihood -= Math.log(probs[i][j]);
}
}
}
return logLikelihood;// / (double)dataYs.length;
}
/**
* Returns an array of the indices of the attributes used in the logistic model. The first dimension is the class, the second dimension holds a list of attribute indices. Attribute indices start at zero.
*
* @return the array of attribute indices
*/
public int[][] getUsedAttributes() {
int[][] usedAttributes = new int[this.m_numClasses][];
// first extract coefficients
double[][] coefficients = this.getCoefficients();
for (int j = 0; j < this.m_numClasses; j++) {
// boolean array indicating if attribute used
boolean[] attributes = new boolean[this.m_numericDataHeader.numAttributes()];
for (int i = 0; i < attributes.length; i++) {
// attribute used if coefficient > 0
if (!Utils.eq(coefficients[j][i + 1], 0)) {
attributes[i] = true;
}
}
int numAttributes = 0;
for (int i = 0; i < this.m_numericDataHeader.numAttributes(); i++) {
if (attributes[i]) {
numAttributes++;
}
}
// "collect" all attributes into array of indices
int[] usedAttributesClass = new int[numAttributes];
int count = 0;
for (int i = 0; i < this.m_numericDataHeader.numAttributes(); i++) {
if (attributes[i]) {
usedAttributesClass[count] = i;
count++;
}
}
usedAttributes[j] = usedAttributesClass;
}
return usedAttributes;
}
/**
* The number of LogitBoost iterations performed (= the number of simple regression functions fit).
*
* @return the number of LogitBoost iterations performed
*/
public int getNumRegressions() {
return this.m_numRegressions;
}
/**
* Get the value of weightTrimBeta.
*
* @return Value of weightTrimBeta.
*/
public double getWeightTrimBeta() {
return this.m_weightTrimBeta;
}
/**
* Get the value of useAIC.
*
* @return Value of useAIC.
*/
public boolean getUseAIC() {
return this.m_useAIC;
}
/**
* Sets the parameter "maxIterations".
*
* @param maxIterations
* the maximum iterations
*/
public void setMaxIterations(final int maxIterations) {
this.m_maxIterations = maxIterations;
}
/**
* Sets the option "heuristicStop".
*
* @param heuristicStop
* the heuristic stop to use
*/
public void setHeuristicStop(final int heuristicStop) {
this.m_heuristicStop = heuristicStop;
}
/**
* Sets the option "weightTrimBeta".
*/
public void setWeightTrimBeta(final double w) {
this.m_weightTrimBeta = w;
}
/**
* Set the value of useAIC.
*
* @param c
* Value to assign to useAIC.
*/
public void setUseAIC(final boolean c) {
this.m_useAIC = c;
}
/**
* Returns the maxIterations parameter.
*
* @return the maximum iteration
*/
public int getMaxIterations() {
return this.m_maxIterations;
}
/**
* Returns an array holding the coefficients of the logistic model. First dimension is the class, the second one holds a list of coefficients. At position zero, the constant term of the model is stored, then, the coefficients for the
* attributes in ascending order.
*
* @return the array of coefficients
*/
protected double[][] getCoefficients() {
double[][] coefficients = new double[this.m_numClasses][this.m_numericDataHeader.numAttributes() + 1];
for (int j = 0; j < this.m_numClasses; j++) {
// go through simple regression functions and add their coefficient to the
// coefficient of
// the attribute they are built on.
for (int i = 0; i < this.m_numericDataHeader.numAttributes(); i++) {
if (i != this.m_numericDataHeader.classIndex()) {
double slope = this.m_regressions[j][i].getSlope();
double intercept = this.m_regressions[j][i].getIntercept();
int attribute = this.m_regressions[j][i].getAttributeIndex();
coefficients[j][0] += intercept;
coefficients[j][attribute + 1] += slope;
}
}
}
// Need to multiply all coefficients by (J-1) / J
for (int j = 0; j < coefficients.length; j++) {
for (int i = 0; i < coefficients[0].length; i++) {
coefficients[j][i] *= (double) (this.m_numClasses - 1) / (double) this.m_numClasses;
}
}
return coefficients;
}
/**
* Returns the fraction of all attributes in the data that are used in the logistic model (in percent). An attribute is used in the model if it is used in any of the models for the different classes.
*
* @return the fraction of all attributes that are used
*/
public double percentAttributesUsed() {
boolean[] attributes = new boolean[this.m_numericDataHeader.numAttributes()];
double[][] coefficients = this.getCoefficients();
for (int j = 0; j < this.m_numClasses; j++) {
for (int i = 1; i < this.m_numericDataHeader.numAttributes() + 1; i++) {
// attribute used if it is used in any class, note coefficients are
// shifted by one (because
// of constant term).
if (!Utils.eq(coefficients[j][i], 0)) {
attributes[i - 1] = true;
}
}
}
// count number of used attributes (without the class attribute)
double count = 0;
for (boolean attribute : attributes) {
if (attribute) {
count++;
}
}
return count / (this.m_numericDataHeader.numAttributes() - 1) * 100.0;
}
/**
* Returns a description of the logistic model (i.e., attributes and coefficients).
*
* @return the description of the model
*/
@Override
public String toString() {
StringBuffer s = new StringBuffer();
// get used attributes
int[][] attributes = this.getUsedAttributes();
// get coefficients
double[][] coefficients = this.getCoefficients();
for (int j = 0; j < this.m_numClasses; j++) {
s.append("\nClass " + this.m_train.classAttribute().value(j) + " :\n");
// constant term
s.append(Utils.doubleToString(coefficients[j][0], 2 + this.m_numDecimalPlaces, this.m_numDecimalPlaces) + " + \n");
for (int i = 0; i < attributes[j].length; i++) {
// attribute/coefficient pairs
s.append("[" + this.m_numericDataHeader.attribute(attributes[j][i]).name() + "]");
s.append(" * " + Utils.doubleToString(coefficients[j][attributes[j][i] + 1], 2 + this.m_numDecimalPlaces, this.m_numDecimalPlaces));
if (i != attributes[j].length - 1) {
s.append(" +");
}
s.append("\n");
}
}
return new String(s);
}
/**
* Returns class probabilities for an instance.
*
* @param instance
* the instance to compute the distribution for
* @return the class probabilities
* @throws Exception
* if distribution can't be computed successfully
*/
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
instance = (Instance) instance.copy();
// set to numeric pseudo-class
instance.setDataset(this.m_numericDataHeader);
// calculate probs via Fs
return this.probs(this.getFs(instance));
}
/**
* Cleanup in order to save memory.
*/
public void cleanup() {
// save just header info
this.m_train = new Instances(this.m_train, 0);
this.m_numericData = null;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/lmt/ResidualModelSelection.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResidualModelSelection.java
* Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.lmt;
import weka.classifiers.trees.j48.ClassifierSplitModel;
import weka.classifiers.trees.j48.Distribution;
import weka.classifiers.trees.j48.ModelSelection;
import weka.classifiers.trees.j48.NoSplit;
import weka.core.Instances;
import weka.core.RevisionUtils;
/**
* Helper class for logistic model trees (weka.classifiers.trees.lmt.LMT) to implement the
* splitting criterion based on residuals.
*
* @author Niels Landwehr
* @version $Revision$
*/
public class ResidualModelSelection
extends ModelSelection {
/** for serialization */
private static final long serialVersionUID = -293098783159385148L;
/** Minimum number of instances for leaves*/
protected int m_minNumInstances;
/** Minimum information gain for split*/
protected double m_minInfoGain;
/**
* Constructor to create ResidualModelSelection object.
* @param minNumInstances minimum number of instances for leaves
*/
public ResidualModelSelection(int minNumInstances) {
m_minNumInstances = minNumInstances;
m_minInfoGain = 1.0E-4;
}
/**Method not in use*/
public void cleanup() {
//method not in use
}
/**
* Selects split based on residuals for the given dataset.
*/
public final ClassifierSplitModel selectModel(Instances data,
double[][] dataZs, double[][] dataWs) throws Exception{
int numAttributes = data.numAttributes();
if (numAttributes < 2) throw new Exception("Can't select Model without non-class attribute");
if (data.numInstances() < m_minNumInstances) return new NoSplit(new Distribution(data));
double bestGain = -Double.MAX_VALUE;
int bestAttribute = -1;
//try split on every attribute
for (int i = 0; i < numAttributes; i++) {
if (i != data.classIndex()) {
//build split
ResidualSplit split = new ResidualSplit(i);
split.buildClassifier(data, dataZs, dataWs);
if (split.checkModel(m_minNumInstances)){
//evaluate split
double gain = split.entropyGain();
if (gain > bestGain) {
bestGain = gain;
bestAttribute = i;
}
}
}
}
if (bestGain >= m_minInfoGain){
//return best split
ResidualSplit split = new ResidualSplit(bestAttribute);
split.buildClassifier(data, dataZs, dataWs);
return split;
} else {
//could not find any split with enough information gain
return new NoSplit(new Distribution(data));
}
}
/**Method not in use*/
public final ClassifierSplitModel selectModel(Instances train) {
//method not in use
return null;
}
/**Method not in use*/
public final ClassifierSplitModel selectModel(Instances train, Instances test) {
//method not in use
return null;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/lmt/ResidualSplit.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResidualSplit.java
* Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.lmt;
import weka.classifiers.trees.j48.ClassifierSplitModel;
import weka.classifiers.trees.j48.Distribution;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Helper class for logistic model trees (weka.classifiers.trees.lmt.LMT) to implement the
* splitting criterion based on residuals of the LogitBoost algorithm.
*
* @author Niels Landwehr
* @version $Revision$
*/
public class ResidualSplit
extends ClassifierSplitModel{
/** for serialization */
private static final long serialVersionUID = -5055883734183713525L;
/**The attribute selected for the split*/
protected Attribute m_attribute;
/**The index of the attribute selected for the split*/
protected int m_attIndex;
/**Number of instances in the set*/
protected int m_numInstances;
/**Number of classed*/
protected int m_numClasses;
/**The set of instances*/
protected Instances m_data;
/**The Z-values (LogitBoost response) for the set of instances*/
protected double[][] m_dataZs;
/**The LogitBoost-weights for the set of instances*/
protected double[][] m_dataWs;
/**The split point (for numeric attributes)*/
protected double m_splitPoint;
/**
*Creates a split object
*@param attIndex the index of the attribute to split on
*/
public ResidualSplit(int attIndex) {
m_attIndex = attIndex;
}
/**
* Builds the split.
* Needs the Z/W values of LogitBoost for the set of instances.
*/
public void buildClassifier(Instances data, double[][] dataZs, double[][] dataWs)
throws Exception {
m_numClasses = data.numClasses();
m_numInstances = data.numInstances();
if (m_numInstances == 0) throw new Exception("Can't build split on 0 instances");
//save data/Zs/Ws
m_data = data;
m_dataZs = dataZs;
m_dataWs = dataWs;
m_attribute = data.attribute(m_attIndex);
//determine number of subsets and split point for numeric attributes
if (m_attribute.isNominal()) {
m_splitPoint = 0.0;
m_numSubsets = m_attribute.numValues();
} else {
getSplitPoint();
m_numSubsets = 2;
}
//create distribution for data
m_distribution = new Distribution(data, this);
}
/**
* Selects split point for numeric attribute.
*/
protected boolean getSplitPoint() throws Exception{
//compute possible split points
double[] splitPoints = new double[m_numInstances];
int numSplitPoints = 0;
Instances sortedData = new Instances(m_data);
sortedData.sort(sortedData.attribute(m_attIndex));
double last, current;
last = sortedData.instance(0).value(m_attIndex);
for (int i = 0; i < m_numInstances - 1; i++) {
current = sortedData.instance(i+1).value(m_attIndex);
if (!Utils.eq(current, last)){
splitPoints[numSplitPoints++] = (last + current) / 2.0;
}
last = current;
}
//compute entropy for all split points
double[] entropyGain = new double[numSplitPoints];
for (int i = 0; i < numSplitPoints; i++) {
m_splitPoint = splitPoints[i];
entropyGain[i] = entropyGain();
}
//get best entropy gain
int bestSplit = -1;
double bestGain = -Double.MAX_VALUE;
for (int i = 0; i < numSplitPoints; i++) {
if (entropyGain[i] > bestGain) {
bestGain = entropyGain[i];
bestSplit = i;
}
}
if (bestSplit < 0) return false;
m_splitPoint = splitPoints[bestSplit];
return true;
}
/**
* Computes entropy gain for current split.
*/
public double entropyGain() throws Exception{
int numSubsets;
if (m_attribute.isNominal()) {
numSubsets = m_attribute.numValues();
} else {
numSubsets = 2;
}
double[][][] splitDataZs = new double[numSubsets][][];
double[][][] splitDataWs = new double[numSubsets][][];
//determine size of the subsets
int[] subsetSize = new int[numSubsets];
for (int i = 0; i < m_numInstances; i++) {
int subset = whichSubset(m_data.instance(i));
if (subset < 0) throw new Exception("ResidualSplit: no support for splits on missing values");
subsetSize[subset]++;
}
for (int i = 0; i < numSubsets; i++) {
splitDataZs[i] = new double[subsetSize[i]][];
splitDataWs[i] = new double[subsetSize[i]][];
}
int[] subsetCount = new int[numSubsets];
//sort Zs/Ws into subsets
for (int i = 0; i < m_numInstances; i++) {
int subset = whichSubset(m_data.instance(i));
splitDataZs[subset][subsetCount[subset]] = m_dataZs[i];
splitDataWs[subset][subsetCount[subset]] = m_dataWs[i];
subsetCount[subset]++;
}
//calculate entropy gain
double entropyOrig = entropy(m_dataZs, m_dataWs);
double entropySplit = 0.0;
for (int i = 0; i < numSubsets; i++) {
entropySplit += entropy(splitDataZs[i], splitDataWs[i]);
}
return entropyOrig - entropySplit;
}
/**
* Helper function to compute entropy from Z/W values.
*/
protected double entropy(double[][] dataZs, double[][] dataWs){
//method returns entropy * sumOfWeights
double entropy = 0.0;
int numInstances = dataZs.length;
for (int j = 0; j < m_numClasses; j++) {
//compute mean for class
double m = 0.0;
double sum = 0.0;
for (int i = 0; i < numInstances; i++) {
m += dataZs[i][j] * dataWs[i][j];
sum += dataWs[i][j];
}
m /= sum;
//sum up entropy for class
for (int i = 0; i < numInstances; i++) {
entropy += dataWs[i][j] * Math.pow(dataZs[i][j] - m,2);
}
}
return entropy;
}
/**
* Checks if there are at least 2 subsets that contain >= minNumInstances.
*/
public boolean checkModel(int minNumInstances){
//checks if there are at least 2 subsets that contain >= minNumInstances
int count = 0;
for (int i = 0; i < m_distribution.numBags(); i++) {
if (m_distribution.perBag(i) >= minNumInstances) count++;
}
return (count >= 2);
}
/**
* Returns name of splitting attribute (left side of condition).
*/
public final String leftSide(Instances data) {
return data.attribute(m_attIndex).name();
}
/**
* Prints the condition satisfied by instances in a subset.
*/
public final String rightSide(int index,Instances data) {
StringBuffer text;
text = new StringBuffer();
if (data.attribute(m_attIndex).isNominal())
text.append(" = "+
data.attribute(m_attIndex).value(index));
else
if (index == 0)
text.append(" <= "+
Utils.doubleToString(m_splitPoint,6));
else
text.append(" > "+
Utils.doubleToString(m_splitPoint,6));
return text.toString();
}
public final int whichSubset(Instance instance)
throws Exception {
if (instance.isMissing(m_attIndex))
return -1;
else{
if (instance.attribute(m_attIndex).isNominal())
return (int)instance.value(m_attIndex);
else
if (Utils.smOrEq(instance.value(m_attIndex),m_splitPoint))
return 0;
else
return 1;
}
}
/** Method not in use*/
public void buildClassifier(Instances data) {
//method not in use
}
/**Method not in use*/
public final double [] weights(Instance instance){
//method not in use
return null;
}
/**Method not in use*/
public final String sourceExpression(int index, Instances data) {
//method not in use
return "";
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/lmt/SimpleLinearRegression.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SimpleLinearRegression.java
* Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.lmt;
import java.io.Serializable;
import weka.core.Instance;
import weka.core.Instances;
/**
* Stripped down version of SimpleLinearRegression. Assumes that there are no missing class values.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision: 10169 $
*/
public class SimpleLinearRegression implements Serializable {
/** for serialization */
static final long serialVersionUID = 1779336022895414137L;
/** The index of the chosen attribute */
private int m_attributeIndex = -1;
/** The slope */
private double m_slope = Double.NaN;
/** The intercept */
private double m_intercept = Double.NaN;
/**
* Default constructor.
*/
public SimpleLinearRegression() {
}
/**
* Construct a simple linear regression model based on the given info.
*/
public SimpleLinearRegression(final int attIndex, final double slope, final double intercept) {
this.m_attributeIndex = attIndex;
this.m_slope = slope;
this.m_intercept = intercept;
}
/**
* Takes the given simple linear regression model and adds it to this one. Does nothing if the given model is based on a different attribute. Assumes the given model has been initialized.
*/
public void addModel(final SimpleLinearRegression slr) throws Exception {
this.m_attributeIndex = slr.m_attributeIndex;
if (this.m_attributeIndex != -1) {
this.m_slope += slr.m_slope;
this.m_intercept += slr.m_intercept;
} else {
this.m_slope = slr.m_slope;
this.m_intercept = slr.m_intercept;
}
}
/**
* Generate a prediction for the supplied instance.
*
* @param inst
* the instance to predict.
* @return the prediction
*/
public double classifyInstance(final Instance inst) {
return this.m_intercept + this.m_slope * inst.value(this.m_attributeIndex);
}
/**
* Computes the attribute means.
*
* @throws InterruptedException
*/
protected double[] computeMeans(final Instances insts) throws InterruptedException {
// We can assume that all the attributes are numeric and that
// we don't have any missing attribute values (including the class)
double[] means = new double[insts.numAttributes()];
double[] counts = new double[insts.numAttributes()];
for (int j = 0; j < insts.numInstances(); j++) {
// XXX interrupt weka
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
Instance inst = insts.instance(j);
for (int i = 0; i < insts.numAttributes(); i++) {
means[i] += inst.weight() * inst.value(i);
counts[i] += inst.weight();
}
}
for (int i = 0; i < insts.numAttributes(); i++) {
// XXX interrupt weka
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
if (counts[i] > 0) {
means[i] /= counts[i];
} else {
means[i] = 0.0;
}
}
return means;
}
/**
* Builds a simple linear regression model given the supplied training data.
*
* @param insts
* the training data.
* @throws InterruptedException
*/
public void buildClassifier(final Instances insts) throws InterruptedException {
// Compute relevant statistics
double[] means = this.computeMeans(insts);
double[] slopes = new double[insts.numAttributes()];
double[] sumWeightedDiffsSquared = new double[insts.numAttributes()];
int classIndex = insts.classIndex();
// For all instances
for (int j = 0; j < insts.numInstances(); j++) {
// XXX interrupt weka
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
Instance inst = insts.instance(j);
double yDiff = inst.value(classIndex) - means[classIndex];
double weightedYDiff = inst.weight() * yDiff;
// For all attributes
for (int i = 0; i < insts.numAttributes(); i++) {
double diff = inst.value(i) - means[i];
double weightedDiff = inst.weight() * diff;
// Doesn't matter if we compute this for the class
slopes[i] += weightedYDiff * diff;
// We need this for the class as well
sumWeightedDiffsSquared[i] += weightedDiff * diff;
}
}
// Pick the best attribute
double minSSE = Double.MAX_VALUE;
this.m_attributeIndex = -1;
for (int i = 0; i < insts.numAttributes(); i++) {
// XXX interrupt weka
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
// Should we skip this attribute?
if ((i == classIndex) || (sumWeightedDiffsSquared[i] == 0)) {
continue;
}
// Compute final slope and intercept
double numerator = slopes[i];
slopes[i] /= sumWeightedDiffsSquared[i];
double intercept = means[classIndex] - slopes[i] * means[i];
// Compute sum of squared errors
double sse = sumWeightedDiffsSquared[classIndex] - slopes[i] * numerator;
// Check whether this is the best attribute
if (sse < minSSE) {
minSSE = sse;
this.m_attributeIndex = i;
this.m_slope = slopes[i];
this.m_intercept = intercept;
}
}
}
/**
* Returns true if a usable attribute was found.
*
* @return true if a usable attribute was found.
*/
public boolean foundUsefulAttribute() {
return (this.m_attributeIndex != -1);
}
/**
* Returns the index of the attribute used in the regression.
*
* @return the index of the attribute.
*/
public int getAttributeIndex() {
return this.m_attributeIndex;
}
/**
* Returns the slope of the function.
*
* @return the slope.
*/
public double getSlope() {
return this.m_slope;
}
/**
* Returns the intercept of the function.
*
* @return the intercept.
*/
public double getIntercept() {
return this.m_intercept;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/CorrelationSplitInfo.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CorrelationSplitInfo.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import java.io.Serializable;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.experiment.PairedStats;
/**
* Finds split points using correlation.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class CorrelationSplitInfo implements Cloneable, Serializable,
SplitEvaluate, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 4212734895125452770L;
private int m_position;
/**
* the maximum impurity reduction
*/
private double m_maxImpurity;
/**
* the attribute being tested
*/
private int m_splitAttr;
/**
* the best value on which to split
*/
private double m_splitValue;
/**
* the number of instances
*/
private int m_number;
/**
* Constructs an object which contains the split information
*
* @param low the index of the first instance
* @param high the index of the last instance
* @param attr an attribute
*/
public CorrelationSplitInfo(int low, int high, int attr) {
initialize(low, high, attr);
}
/**
* Makes a copy of this CorrelationSplitInfo object
*/
@Override
public final SplitEvaluate copy() throws Exception {
CorrelationSplitInfo s = (CorrelationSplitInfo) this.clone();
return s;
}
/**
* Resets the object of split information
*
* @param low the index of the first instance
* @param high the index of the last instance
* @param attr the attribute
*/
public final void initialize(int low, int high, int attr) {
m_number = high - low + 1;
m_position = -1;
m_maxImpurity = -Double.MAX_VALUE;
m_splitAttr = attr;
m_splitValue = 0.0;
}
/**
* Finds the best splitting point for an attribute in the instances
*
* @param attr the splitting attribute
* @param inst the instances
* @exception Exception if something goes wrong
*/
@Override
public final void attrSplit(int attr, Instances inst) throws Exception {
int i;
int len;
int low = 0;
int high = inst.numInstances() - 1;
PairedStats full = new PairedStats(0.01);
PairedStats leftSubset = new PairedStats(0.01);
PairedStats rightSubset = new PairedStats(0.01);
int classIndex = inst.classIndex();
double leftCorr, rightCorr;
double leftVar, rightVar, allVar;
double order = 2.0;
initialize(low, high, attr);
if (m_number < 4) {
return;
}
len = ((high - low + 1) < 5) ? 1 : (high - low + 1) / 5;
m_position = low;
// prime the subsets
for (i = low; i < len; i++) {
full
.add(inst.instance(i).value(attr), inst.instance(i).value(classIndex));
leftSubset.add(inst.instance(i).value(attr),
inst.instance(i).value(classIndex));
}
for (i = len; i < inst.numInstances(); i++) {
full
.add(inst.instance(i).value(attr), inst.instance(i).value(classIndex));
rightSubset.add(inst.instance(i).value(attr),
inst.instance(i).value(classIndex));
}
full.calculateDerived();
allVar = (full.yStats.stdDev * full.yStats.stdDev);
allVar = Math.abs(allVar);
allVar = Math.pow(allVar, (1.0 / order));
for (i = low + len; i < high - len - 1; i++) {
rightSubset.subtract(inst.instance(i).value(attr), inst.instance(i)
.value(classIndex));
leftSubset.add(inst.instance(i).value(attr),
inst.instance(i).value(classIndex));
if (!Utils.eq(inst.instance(i + 1).value(attr),
inst.instance(i).value(attr))) {
leftSubset.calculateDerived();
rightSubset.calculateDerived();
leftCorr = Math.abs(leftSubset.correlation);
rightCorr = Math.abs(rightSubset.correlation);
leftVar = (leftSubset.yStats.stdDev * leftSubset.yStats.stdDev);
leftVar = Math.abs(leftVar);
leftVar = Math.pow(leftVar, (1.0 / order));
rightVar = (rightSubset.yStats.stdDev * rightSubset.yStats.stdDev);
rightVar = Math.abs(rightVar);
rightVar = Math.pow(rightVar, (1.0 / order));
double score = allVar - ((leftSubset.count / full.count) * leftVar)
- ((rightSubset.count / full.count) * rightVar);
// score /= allVar;
leftCorr = (leftSubset.count / full.count) * leftCorr;
rightCorr = (rightSubset.count / full.count) * rightCorr;
// c_score += score;
if (!Utils.eq(score, 0.0)) {
if (score > m_maxImpurity) {
m_maxImpurity = score;
m_splitValue = (inst.instance(i).value(attr) + inst.instance(i + 1)
.value(attr)) * 0.5;
m_position = i;
}
}
}
}
}
/**
* Returns the impurity of this split
*
* @return the impurity of this split
*/
@Override
public double maxImpurity() {
return m_maxImpurity;
}
/**
* Returns the attribute used in this split
*
* @return the attribute used in this split
*/
@Override
public int splitAttr() {
return m_splitAttr;
}
/**
* Returns the position of the split in the sorted values. -1 indicates that a
* split could not be found.
*
* @return an <code>int</code> value
*/
@Override
public int position() {
return m_position;
}
/**
* Returns the split value
*
* @return the split value
*/
@Override
public double splitValue() {
return m_splitValue;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/Impurity.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Impurity.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* Class for handling the impurity values when spliting the instances
*
* @author Yong Wang (yongwang@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class Impurity implements RevisionHandler {
double n; // number of total instances
int attr; // splitting attribute
double nl; // number of instances in the left group
double nr; // number of instances in the right group
double sl; // sum of the left group
double sr; // sum of the right group
double s2l; // squared sum of the left group
double s2r; // squared sum of the right group
double sdl; // standard deviation of the left group
double sdr; // standard deviation of the right group
double vl; // variance of the left group
double vr; // variance of the right group
double sd; // overall standard deviation
double va; // overall variance
double impurity; // impurity value;
int order; // order = 1, variance; order = 2, standard deviation; order = 3, the cubic root of the variance;
// order = k, the k-th order root of the variance
/**
* Constructs an Impurity object containing the impurity values of partitioning the instances using
* an attribute
*
* @param partition
* the index of the last instance in the left subset
* @param attribute
* the attribute used in partitioning
* @param inst
* instances
* @param k
* the order of the impurity; =1, the variance; =2, the stardard deviation; =k, the k-th
* order root of the variance
* @throws InterruptedException
*/
public Impurity(final int partition, final int attribute, final Instances inst, final int k) throws InterruptedException {
Values values = new Values(0, inst.numInstances() - 1, inst.classIndex(), inst);
this.attr = attribute;
this.n = inst.numInstances();
this.sd = values.sd;
this.va = values.va;
values = new Values(0, partition, inst.classIndex(), inst);
this.nl = partition + 1;
this.sl = values.sum;
this.s2l = values.sqrSum;
values = new Values(partition + 1, inst.numInstances() - 1, inst.classIndex(), inst);
this.nr = inst.numInstances() - partition - 1;
this.sr = values.sum;
this.s2r = values.sqrSum;
this.order = k;
this.incremental(0, 0);
}
/**
* Converts an Impurity object to a string
*
* @return the converted string
*/
@Override
public final String toString() {
StringBuffer text = new StringBuffer();
text.append("Print impurity values:\n");
text.append(" Number of total instances:\t" + this.n + "\n");
text.append(" Splitting attribute:\t\t" + this.attr + "\n");
text.append(" Number of the instances in the left:\t" + this.nl + "\n");
text.append(" Number of the instances in the right:\t" + this.nr + "\n");
text.append(" Sum of the left:\t\t\t" + this.sl + "\n");
text.append(" Sum of the right:\t\t\t" + this.sr + "\n");
text.append(" Squared sum of the left:\t\t" + this.s2l + "\n");
text.append(" Squared sum of the right:\t\t" + this.s2r + "\n");
text.append(" Standard deviation of the left:\t" + this.sdl + "\n");
text.append(" Standard deviation of the right:\t" + this.sdr + "\n");
text.append(" Variance of the left:\t\t" + this.vr + "\n");
text.append(" Variance of the right:\t\t" + this.vr + "\n");
text.append(" Overall standard deviation:\t\t" + this.sd + "\n");
text.append(" Overall variance:\t\t\t" + this.va + "\n");
text.append(" Impurity (order " + this.order + "):\t\t" + this.impurity + "\n");
return text.toString();
}
/**
* Incrementally computes the impurirty values
*
* @param value
* the incremental value
* @param type
* if type=1, value will be added to the left subset; type=-1, to the right subset; type=0,
* initializes
* @throws InterruptedException
*/
public final void incremental(final double value, final int type) throws InterruptedException {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
double y = 0., yl = 0., yr = 0.;
switch (type) {
case 1:
this.nl += 1;
this.nr -= 1;
this.sl += value;
this.sr -= value;
this.s2l += value * value;
this.s2r -= value * value;
break;
case -1:
this.nl -= 1;
this.nr += 1;
this.sl -= value;
this.sr += value;
this.s2l -= value * value;
this.s2r += value * value;
break;
case 0:
break;
default:
System.err.println("wrong type in Impurity.incremental().");
}
if (this.nl <= 0.0) {
this.vl = 0.0;
this.sdl = 0.0;
} else {
this.vl = (this.nl * this.s2l - this.sl * this.sl) / (this.nl * (this.nl));
this.vl = Math.abs(this.vl);
this.sdl = Math.sqrt(this.vl);
}
if (this.nr <= 0.0) {
this.vr = 0.0;
this.sdr = 0.0;
} else {
this.vr = (this.nr * this.s2r - this.sr * this.sr) / (this.nr * (this.nr));
this.vr = Math.abs(this.vr);
this.sdr = Math.sqrt(this.vr);
}
if (this.order <= 0) {
System.err.println("Impurity order less than zero in Impurity.incremental()");
} else if (this.order == 1) {
y = this.va;
yl = this.vl;
yr = this.vr;
} else {
y = Math.pow(this.va, 1. / this.order);
yl = Math.pow(this.vl, 1. / this.order);
yr = Math.pow(this.vr, 1. / this.order);
}
if (this.nl <= 0.0 || this.nr <= 0.0) {
this.impurity = 0.0;
} else {
this.impurity = y - (this.nl / this.n) * yl - (this.nr / this.n) * yr;
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/M5Base.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* M5Base.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.functions.LinearRegression;
import weka.core.AdditionalMeasureProducer;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.supervised.attribute.NominalToBinary;
import weka.filters.unsupervised.attribute.RemoveUseless;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
* M5Base. Implements base routines for generating M5 Model trees and rules.
* <p>
*
* The original algorithm M5 was invented by Quinlan: <br/>
*
* Quinlan J. R. (1992). Learning with continuous classes. Proceedings of the
* Australian Joint Conference on Artificial Intelligence. 343--348. World
* Scientific, Singapore.
* <p/>
*
* Yong Wang made improvements and created M5': <br/>
*
* Wang, Y and Witten, I. H. (1997). Induction of model trees for predicting
* continuous classes. Proceedings of the poster papers of the European
* Conference on Machine Learning. University of Economics, Faculty of
* Informatics and Statistics, Prague.
* <p/>
*
* Valid options are:
* <p>
*
* -U <br>
* Use unsmoothed predictions.
* <p>
*
* -R <br>
* Build regression tree/rule rather than model tree/rule
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class M5Base extends AbstractClassifier implements
AdditionalMeasureProducer, TechnicalInformationHandler {
/** for serialization */
private static final long serialVersionUID = -4022221950191647679L;
/**
* the instances covered by the tree/rules
*/
private Instances m_instances;
/**
* the rule set
*/
protected ArrayList<Rule> m_ruleSet;
/**
* generate a decision list instead of a single tree.
*/
private boolean m_generateRules;
/**
* use unsmoothed predictions
*/
private boolean m_unsmoothedPredictions;
/**
* filter to fill in missing values
*/
private ReplaceMissingValues m_replaceMissing;
/**
* filter to convert nominal attributes to binary
*/
private NominalToBinary m_nominalToBinary;
/**
* for removing useless attributes
*/
private RemoveUseless m_removeUseless;
/**
* Save instances at each node in an M5 tree for visualization purposes.
*/
protected boolean m_saveInstances = false;
/**
* Make a regression tree/rule instead of a model tree/rule
*/
protected boolean m_regressionTree;
/**
* Do not prune tree/rules
*/
protected boolean m_useUnpruned = false;
/**
* The minimum number of instances to allow at a leaf node
*/
protected double m_minNumInstances = 4;
/**
* Constructor
*/
public M5Base() {
m_generateRules = false;
m_unsmoothedPredictions = false;
m_useUnpruned = false;
m_minNumInstances = 4;
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
TechnicalInformation additional;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Ross J. Quinlan");
result.setValue(Field.TITLE, "Learning with Continuous Classes");
result.setValue(Field.BOOKTITLE,
"5th Australian Joint Conference on Artificial Intelligence");
result.setValue(Field.YEAR, "1992");
result.setValue(Field.PAGES, "343-348");
result.setValue(Field.PUBLISHER, "World Scientific");
result.setValue(Field.ADDRESS, "Singapore");
additional = result.add(Type.INPROCEEDINGS);
additional.setValue(Field.AUTHOR, "Y. Wang and I. H. Witten");
additional.setValue(Field.TITLE,
"Induction of model trees for predicting continuous classes");
additional.setValue(Field.BOOKTITLE,
"Poster papers of the 9th European Conference on Machine Learning");
additional.setValue(Field.YEAR, "1997");
additional.setValue(Field.PUBLISHER, "Springer");
return result;
}
/**
* Returns an enumeration describing the available options
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector.add(new Option("\tUse unpruned tree/rules", "N", 0, "-N"));
newVector.add(new Option("\tUse unsmoothed predictions", "U", 0, "-U"));
newVector.add(new Option("\tBuild regression tree/rule rather "
+ "than a model tree/rule", "R", 0, "-R"));
newVector.add(new Option("\tSet minimum number of instances "
+ "per leaf\n\t(default 4)", "M", 1, "-M <minimum number of instances>"));
newVector.addAll(Collections.list(super.listOptions()));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* Valid options are:
* <p>
*
* -U <br>
* Use unsmoothed predictions.
* <p>
*
* -R <br>
* Build a regression tree rather than a model tree.
* <p>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
setUnpruned(Utils.getFlag('N', options));
setUseUnsmoothed(Utils.getFlag('U', options));
setBuildRegressionTree(Utils.getFlag('R', options));
String optionString = Utils.getOption('M', options);
if (optionString.length() != 0) {
setMinNumInstances((new Double(optionString)).doubleValue());
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
if (getUnpruned()) {
result.add("-N");
}
if (getUseUnsmoothed()) {
result.add("-U");
}
if (getBuildRegressionTree()) {
result.add("-R");
}
result.add("-M");
result.add("" + getMinNumInstances());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String unprunedTipText() {
return "Whether unpruned tree/rules are to be generated.";
}
/**
* Use unpruned tree/rules
*
* @param unpruned true if unpruned tree/rules are to be generated
*/
public void setUnpruned(boolean unpruned) {
m_useUnpruned = unpruned;
}
/**
* Get whether unpruned tree/rules are being generated
*
* @return true if unpruned tree/rules are to be generated
*/
public boolean getUnpruned() {
return m_useUnpruned;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String generateRulesTipText() {
return "Whether to generate rules (decision list) rather than a tree.";
}
/**
* Generate rules (decision list) rather than a tree
*
* @param u true if rules are to be generated
*/
protected void setGenerateRules(boolean u) {
m_generateRules = u;
}
/**
* get whether rules are being generated rather than a tree
*
* @return true if rules are to be generated
*/
protected boolean getGenerateRules() {
return m_generateRules;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String useUnsmoothedTipText() {
return "Whether to use unsmoothed predictions.";
}
/**
* Use unsmoothed predictions
*
* @param s true if unsmoothed predictions are to be used
*/
public void setUseUnsmoothed(boolean s) {
m_unsmoothedPredictions = s;
}
/**
* Get whether or not smoothing is being used
*
* @return true if unsmoothed predictions are to be used
*/
public boolean getUseUnsmoothed() {
return m_unsmoothedPredictions;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String buildRegressionTreeTipText() {
return "Whether to generate a regression tree/rule instead of a model tree/rule.";
}
/**
* Get the value of regressionTree.
*
* @return Value of regressionTree.
*/
public boolean getBuildRegressionTree() {
return m_regressionTree;
}
/**
* Set the value of regressionTree.
*
* @param newregressionTree Value to assign to regressionTree.
*/
public void setBuildRegressionTree(boolean newregressionTree) {
m_regressionTree = newregressionTree;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minNumInstancesTipText() {
return "The minimum number of instances to allow at a leaf node.";
}
/**
* Set the minimum number of instances to allow at a leaf node
*
* @param minNum the minimum number of instances
*/
public void setMinNumInstances(double minNum) {
m_minNumInstances = minNum;
}
/**
* Get the minimum number of instances to allow at a leaf node
*
* @return a <code>double</code> value
*/
public double getMinNumInstances() {
return m_minNumInstances;
}
/**
* Returns default capabilities of the classifier, i.e., of LinearRegression.
*
* @return the capabilities of this classifier
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capabilities.Capability.NOMINAL_ATTRIBUTES);
result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES);
result.enable(Capabilities.Capability.DATE_ATTRIBUTES);
result.enable(Capabilities.Capability.MISSING_VALUES);
// class
result.enable(Capabilities.Capability.NUMERIC_CLASS);
result.enable(Capabilities.Capability.DATE_CLASS);
result.enable(Capabilities.Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Generates the classifier.
*
* @param data set of instances serving as training data
* @throws Exception if the classifier has not been generated successfully
*/
@Override
public void buildClassifier(Instances data) throws Exception {
// can classifier handle the data?
getCapabilities().testWithFail(data);
// remove instances with missing class
data = new Instances(data);
data.deleteWithMissingClass();
m_instances = new Instances(data);
m_replaceMissing = new ReplaceMissingValues();
m_replaceMissing.setInputFormat(m_instances);
m_instances = Filter.useFilter(m_instances, m_replaceMissing);
m_nominalToBinary = new NominalToBinary();
m_nominalToBinary.setInputFormat(m_instances);
m_instances = Filter.useFilter(m_instances, m_nominalToBinary);
m_removeUseless = new RemoveUseless();
m_removeUseless.setInputFormat(m_instances);
m_instances = Filter.useFilter(m_instances, m_removeUseless);
m_instances.randomize(new Random(1));
m_ruleSet = new ArrayList<Rule>();
Rule tempRule;
if (m_generateRules) {
Instances tempInst = m_instances;
do {
tempRule = new Rule();
tempRule.setSmoothing(!m_unsmoothedPredictions);
tempRule.setRegressionTree(m_regressionTree);
tempRule.setUnpruned(m_useUnpruned);
tempRule.setSaveInstances(false);
tempRule.setMinNumInstances(m_minNumInstances);
tempRule.buildClassifier(tempInst);
m_ruleSet.add(tempRule);
// System.err.println("Built rule : "+tempRule.toString());
tempInst = tempRule.notCoveredInstances();
tempRule.freeNotCoveredInstances();
} while (tempInst.numInstances() > 0);
} else {
// just build a single tree
tempRule = new Rule();
tempRule.setUseTree(true);
// tempRule.setGrowFullTree(true);
tempRule.setSmoothing(!m_unsmoothedPredictions);
tempRule.setSaveInstances(m_saveInstances);
tempRule.setRegressionTree(m_regressionTree);
tempRule.setUnpruned(m_useUnpruned);
tempRule.setMinNumInstances(m_minNumInstances);
Instances temp_train;
temp_train = m_instances;
tempRule.buildClassifier(temp_train);
m_ruleSet.add(tempRule);
// System.err.print(tempRule.m_topOfTree.treeToString(0));
}
// save space
m_instances = new Instances(m_instances, 0);
}
/**
* Calculates a prediction for an instance using a set of rules or an M5 model
* tree
*
* @param inst the instance whos class value is to be predicted
* @return the prediction
* @throws Exception if a prediction can't be made.
*/
@Override
public double classifyInstance(Instance inst) throws Exception {
Rule temp;
double prediction = 0;
boolean success = false;
m_replaceMissing.input(inst);
inst = m_replaceMissing.output();
m_nominalToBinary.input(inst);
inst = m_nominalToBinary.output();
m_removeUseless.input(inst);
inst = m_removeUseless.output();
if (m_ruleSet == null) {
throw new Exception("Classifier has not been built yet!");
}
if (!m_generateRules) {
temp = m_ruleSet.get(0);
return temp.classifyInstance(inst);
}
boolean cont;
int i;
for (i = 0; i < m_ruleSet.size(); i++) {
cont = false;
temp = m_ruleSet.get(i);
try {
prediction = temp.classifyInstance(inst);
success = true;
} catch (Exception e) {
cont = true;
}
if (!cont) {
break;
}
}
if (!success) {
System.out.println("Error in predicting (DecList)");
}
return prediction;
}
/**
* Returns a description of the classifier
*
* @return a description of the classifier as a String
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
Rule temp;
if (m_ruleSet == null) {
return "Classifier hasn't been built yet!";
}
if (m_generateRules) {
text.append("M5 " + ((m_useUnpruned == true) ? "unpruned " : "pruned ")
+ ((m_regressionTree == true) ? "regression " : "model ") + "rules ");
if (!m_unsmoothedPredictions) {
text.append("\n(using smoothed linear models) ");
}
text.append(":\n");
text.append("Number of Rules : " + m_ruleSet.size() + "\n\n");
for (int j = 0; j < m_ruleSet.size(); j++) {
temp = m_ruleSet.get(j);
text.append("Rule: " + (j + 1) + "\n");
text.append(temp.toString());
}
} else {
temp = m_ruleSet.get(0);
text.append(temp.toString());
}
return text.toString();
}
/**
* Returns an enumeration of the additional measure names
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>(1);
newVector.add("measureNumRules");
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws Exception if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) {
return measureNumRules();
} else {
throw new IllegalArgumentException(additionalMeasureName
+ " not supported (M5)");
}
}
/**
* return the number of rules
*
* @return the number of rules (same as # linear models & # leaves in the
* tree)
*/
public double measureNumRules() {
if (m_generateRules) {
return m_ruleSet.size();
}
return m_ruleSet.get(0).m_topOfTree.numberOfLinearModels();
}
public RuleNode getM5RootNode() {
Rule temp = m_ruleSet.get(0);
return temp.getM5RootNode();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/PreConstructedLinearModel.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RuleNode.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import java.io.Serializable;
import weka.classifiers.AbstractClassifier;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* This class encapsulates a linear regression function. It is a classifier but does not learn the function itself, instead it is constructed with coefficients and intercept obtained elsewhere. The buildClassifier method must still be
* called however as this stores a copy of the training data's header for use in printing the model to the console.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PreConstructedLinearModel extends AbstractClassifier implements Serializable {
/** for serialization */
static final long serialVersionUID = 2030974097051713247L;
/** The coefficients */
private double[] m_coefficients;
/** The intercept */
private double m_intercept;
/** Holds the instances header for printing the model */
private Instances m_instancesHeader;
/** number of coefficients in the model */
private int m_numParameters;
/**
* Constructor
*
* @param coeffs
* an array of coefficients
* @param intercept
* the intercept
*/
public PreConstructedLinearModel(final double[] coeffs, final double intercept) {
this.m_coefficients = coeffs;
this.m_intercept = intercept;
int count = 0;
for (int i = 0; i < coeffs.length; i++) {
if (coeffs[i] != 0) {
count++;
}
}
this.m_numParameters = count;
}
/**
* Builds the classifier. In this case all that is done is that a copy of the training instances header is saved.
*
* @param instances
* an <code>Instances</code> value
* @exception Exception
* if an error occurs
*/
@Override
public void buildClassifier(final Instances instances) throws Exception {
this.m_instancesHeader = new Instances(instances, 0);
}
/**
* Predicts the class of the supplied instance using the linear model.
*
* @param inst
* the instance to make a prediction for
* @return the prediction
* @exception Exception
* if an error occurs
*/
@Override
public double classifyInstance(final Instance inst) throws Exception {
double result = 0;
for (int i = 0; i < this.m_coefficients.length; i++) {
if (i != inst.classIndex() && !inst.isMissing(i)) {
result += this.m_coefficients[i] * inst.value(i);
}
}
result += this.m_intercept;
return result;
}
/**
* Return the number of parameters (coefficients) in the linear model
*
* @return the number of parameters
*/
public int numParameters() {
return this.m_numParameters;
}
/**
* Return the array of coefficients
*
* @return the coefficients
*/
public double[] coefficients() {
return this.m_coefficients;
}
/**
* Return the intercept
*
* @return the intercept
*/
public double intercept() {
return this.m_intercept;
}
/**
* Returns a textual description of this linear model
*
* @return String containing a description of this linear model
*/
@Override
public String toString() {
StringBuffer b = new StringBuffer();
b.append("\n" + this.m_instancesHeader.classAttribute().name() + " = ");
boolean first = true;
for (int i = 0; i < this.m_coefficients.length; i++) {
if (this.m_coefficients[i] != 0.0) {
double c = this.m_coefficients[i];
if (first) {
b.append("\n\t" + Utils.doubleToString(c, 12, 4).trim() + " * " + this.m_instancesHeader.attribute(i).name() + " ");
first = false;
} else {
b.append("\n\t" + ((this.m_coefficients[i] < 0) ? "- " + Utils.doubleToString(Math.abs(c), 12, 4).trim() : "+ " + Utils.doubleToString(Math.abs(c), 12, 4).trim()) + " * " + this.m_instancesHeader.attribute(i).name()
+ " ");
}
}
}
b.append("\n\t" + ((this.m_intercept < 0) ? "- " : "+ ") + Utils.doubleToString(Math.abs(this.m_intercept), 12, 4).trim());
return b.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/Rule.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Rule.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import java.io.Serializable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Generates a single m5 tree or rule
*
* @author Mark Hall
* @version $Revision$
*/
public class Rule implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -4458627451682483204L;
protected static int LEFT = 0;
protected static int RIGHT = 1;
/**
* the instances covered by this rule
*/
private Instances m_instances;
/**
* the class index
*/
private int m_classIndex;
/**
* the number of instances in the dataset
*/
private int m_numInstances;
/**
* the indexes of the attributes used to split on for this rule
*/
private int[] m_splitAtts;
/**
* the corresponding values of the split points
*/
private double[] m_splitVals;
/**
* the corresponding internal nodes. Used for smoothing rules.
*/
private RuleNode[] m_internalNodes;
/**
* the corresponding relational operators (0 = "<=", 1 = ">")
*/
private int[] m_relOps;
/**
* the leaf encapsulating the linear model for this rule
*/
private RuleNode m_ruleModel;
/**
* the top of the m5 tree for this rule
*/
protected RuleNode m_topOfTree;
/**
* the standard deviation of the class for all the instances
*/
private double m_globalStdDev;
/**
* the absolute deviation of the class for all the instances
*/
private double m_globalAbsDev;
/**
* the instances covered by this rule
*/
private Instances m_covered;
/**
* the number of instances covered by this rule
*/
private int m_numCovered;
/**
* the instances not covered by this rule
*/
private Instances m_notCovered;
/**
* use a pruned m5 tree rather than make a rule
*/
private boolean m_useTree;
/**
* use the original m5 smoothing procedure
*/
private boolean m_smoothPredictions;
/**
* Save instances at each node in an M5 tree for visualization purposes.
*/
private boolean m_saveInstances;
/**
* Make a regression tree instead of a model tree
*/
private boolean m_regressionTree;
/**
* Build unpruned tree/rule
*/
private boolean m_useUnpruned;
/**
* The minimum number of instances to allow at a leaf node
*/
private double m_minNumInstances;
/**
* Constructor declaration
*
*/
public Rule() {
this.m_useTree = false;
this.m_smoothPredictions = false;
this.m_useUnpruned = false;
this.m_minNumInstances = 4;
}
/**
* Generates a single rule or m5 model tree.
*
* @param data
* set of instances serving as training data
* @exception Exception
* if the rule has not been generated successfully
*/
public void buildClassifier(final Instances data) throws Exception {
this.m_instances = null;
this.m_topOfTree = null;
this.m_covered = null;
this.m_notCovered = null;
this.m_ruleModel = null;
this.m_splitAtts = null;
this.m_splitVals = null;
this.m_relOps = null;
this.m_internalNodes = null;
this.m_instances = data;
this.m_classIndex = this.m_instances.classIndex();
this.m_numInstances = this.m_instances.numInstances();
// first calculate global deviation of class attribute
this.m_globalStdDev = Rule.stdDev(this.m_classIndex, this.m_instances);
this.m_globalAbsDev = Rule.absDev(this.m_classIndex, this.m_instances);
this.m_topOfTree = new RuleNode(this.m_globalStdDev, this.m_globalAbsDev, null);
this.m_topOfTree.setSaveInstances(this.m_saveInstances);
this.m_topOfTree.setRegressionTree(this.m_regressionTree);
this.m_topOfTree.setMinNumInstances(this.m_minNumInstances);
this.m_topOfTree.buildClassifier(this.m_instances);
if (!this.m_useUnpruned) {
this.m_topOfTree.prune();
} else {
this.m_topOfTree.installLinearModels();
}
if (this.m_smoothPredictions) {
this.m_topOfTree.installSmoothedModels();
}
// m_topOfTree.printAllModels();
this.m_topOfTree.numLeaves(0);
if (!this.m_useTree) {
this.makeRule();
// save space
// m_topOfTree = null;
}
// save space
this.m_instances = new Instances(this.m_instances, 0);
}
/**
* Calculates a prediction for an instance using this rule or M5 model tree
*
* @param instance
* the instance whos class value is to be predicted
* @return the prediction
* @exception Exception
* if a prediction can't be made.
*/
public double classifyInstance(final Instance instance) throws Exception {
if (this.m_useTree) {
return this.m_topOfTree.classifyInstance(instance);
}
// does the instance pass the rule's conditions?
if (this.m_splitAtts.length > 0) {
for (int i = 0; i < this.m_relOps.length; i++) {
if (this.m_relOps[i] == LEFT) // left
{
if (instance.value(this.m_splitAtts[i]) > this.m_splitVals[i]) {
throw new Exception("Rule does not classify instance");
}
} else {
if (instance.value(this.m_splitAtts[i]) <= this.m_splitVals[i]) {
throw new Exception("Rule does not classify instance");
}
}
}
}
// the linear model's prediction for this rule
return this.m_ruleModel.classifyInstance(instance);
}
/**
* Returns the top of the tree.
*/
public RuleNode topOfTree() {
return this.m_topOfTree;
}
/**
* Make the single best rule from a pruned m5 model tree
*
* @exception Exception
* if something goes wrong.
*/
private void makeRule() throws Exception {
RuleNode[] best_leaf = new RuleNode[1];
double[] best_cov = new double[1];
RuleNode temp;
this.m_notCovered = new Instances(this.m_instances, 0);
this.m_covered = new Instances(this.m_instances, 0);
best_cov[0] = -1;
best_leaf[0] = null;
this.m_topOfTree.findBestLeaf(best_cov, best_leaf);
temp = best_leaf[0];
if (temp == null) {
throw new Exception("Unable to generate rule!");
}
// save the linear model for this rule
this.m_ruleModel = temp;
int count = 0;
while (temp.parentNode() != null) {
count++;
temp = temp.parentNode();
}
temp = best_leaf[0];
this.m_relOps = new int[count];
this.m_splitAtts = new int[count];
this.m_splitVals = new double[count];
if (this.m_smoothPredictions) {
this.m_internalNodes = new RuleNode[count];
}
// trace back to the root
int i = 0;
while (temp.parentNode() != null) {
this.m_splitAtts[i] = temp.parentNode().splitAtt();
this.m_splitVals[i] = temp.parentNode().splitVal();
if (temp.parentNode().leftNode() == temp) {
this.m_relOps[i] = LEFT;
temp.parentNode().m_right = null;
} else {
this.m_relOps[i] = RIGHT;
temp.parentNode().m_left = null;
}
if (this.m_smoothPredictions) {
this.m_internalNodes[i] = temp.parentNode();
}
temp = temp.parentNode();
i++;
}
// now assemble the covered and uncovered instances
boolean ok;
for (i = 0; i < this.m_numInstances; i++) {
ok = true;
for (int j = 0; j < this.m_relOps.length; j++) {
if (this.m_relOps[j] == LEFT) {
if (this.m_instances.instance(i).value(this.m_splitAtts[j]) > this.m_splitVals[j]) {
this.m_notCovered.add(this.m_instances.instance(i));
ok = false;
break;
}
} else {
if (this.m_instances.instance(i).value(this.m_splitAtts[j]) <= this.m_splitVals[j]) {
this.m_notCovered.add(this.m_instances.instance(i));
ok = false;
break;
}
}
}
if (ok) {
this.m_numCovered++;
// m_covered.add(m_instances.instance(i));
}
}
}
/**
* Return a description of the m5 tree or rule
*
* @return a description of the m5 tree or rule as a String
*/
@Override
public String toString() {
if (this.m_useTree) {
return this.treeToString();
} else {
return this.ruleToString();
}
}
/**
* Return a description of the m5 tree
*
* @return a description of the m5 tree as a String
*/
private String treeToString() {
StringBuffer text = new StringBuffer();
if (this.m_topOfTree == null) {
return "Tree/Rule has not been built yet!";
}
text.append("M5 " + ((this.m_useUnpruned) ? "unpruned " : "pruned ") + ((this.m_regressionTree) ? "regression " : "model ") + "tree:\n");
if (this.m_smoothPredictions == true) {
text.append("(using smoothed linear models)\n");
}
text.append(this.m_topOfTree.treeToString(0));
text.append(this.m_topOfTree.printLeafModels());
text.append("\nNumber of Rules : " + this.m_topOfTree.numberOfLinearModels());
return text.toString();
}
/**
* Return a description of the rule
*
* @return a description of the rule as a String
*/
private String ruleToString() {
StringBuffer text = new StringBuffer();
if (this.m_splitAtts.length > 0) {
text.append("IF\n");
for (int i = this.m_splitAtts.length - 1; i >= 0; i--) {
text.append("\t" + this.m_covered.attribute(this.m_splitAtts[i]).name() + " ");
if (this.m_relOps[i] == 0) {
text.append("<= ");
} else {
text.append("> ");
}
text.append(Utils.doubleToString(this.m_splitVals[i], 1, 3) + "\n");
}
text.append("THEN\n");
}
if (this.m_ruleModel != null) {
try {
text.append(this.m_ruleModel.printNodeLinearModel());
text.append(" [" + this.m_numCovered/* m_covered.numInstances() */);
if (this.m_globalAbsDev > 0.0) {
text.append("/" + Utils.doubleToString((100 * this.m_ruleModel.rootMeanSquaredError() / this.m_globalStdDev), 1, 3) + "%]\n\n");
} else {
text.append("]\n\n");
}
} catch (Exception e) {
return "Can't print rule";
}
}
return text.toString();
}
/**
* Use unpruned tree/rules
*
* @param unpruned
* true if unpruned tree/rules are to be generated
*/
public void setUnpruned(final boolean unpruned) {
this.m_useUnpruned = unpruned;
}
/**
* Get whether unpruned tree/rules are being generated
*
* @return true if unpruned tree/rules are to be generated
*/
public boolean getUnpruned() {
return this.m_useUnpruned;
}
/**
* Use an m5 tree rather than generate rules
*
* @param u
* true if m5 tree is to be used
*/
public void setUseTree(final boolean u) {
this.m_useTree = u;
}
/**
* get whether an m5 tree is being used rather than rules
*
* @return true if an m5 tree is being used.
*/
public boolean getUseTree() {
return this.m_useTree;
}
/**
* Smooth predictions
*
* @param s
* true if smoothing is to be used
*/
public void setSmoothing(final boolean s) {
this.m_smoothPredictions = s;
}
/**
* Get whether or not smoothing has been turned on
*
* @return true if smoothing is being used
*/
public boolean getSmoothing() {
return this.m_smoothPredictions;
}
/**
* Get the instances not covered by this rule
*
* @return the instances not covered
*/
public Instances notCoveredInstances() {
return this.m_notCovered;
}
/**
* Free up memory consumed by the set of instances not covered by this rule.
*/
public void freeNotCoveredInstances() {
this.m_notCovered = null;
}
// /**
// * Get the instances covered by this rule
// *
// * @return the instances covered by this rule
// */
// public Instances coveredInstances() {
// return m_covered;
// }
/**
* Returns the standard deviation value of the supplied attribute index.
*
* @param attr
* an attribute index
* @param inst
* the instances
* @return the standard deviation value
*/
protected static final double stdDev(final int attr, final Instances inst) {
int i, count = 0;
double sd, va, sum = 0.0, sqrSum = 0.0, value;
for (i = 0; i <= inst.numInstances() - 1; i++) {
count++;
value = inst.instance(i).value(attr);
sum += value;
sqrSum += value * value;
}
if (count > 1) {
va = (sqrSum - sum * sum / count) / count;
va = Math.abs(va);
sd = Math.sqrt(va);
} else {
sd = 0.0;
}
return sd;
}
/**
* Returns the absolute deviation value of the supplied attribute index.
*
* @param attr
* an attribute index
* @param inst
* the instances
* @return the absolute deviation value
*/
protected static final double absDev(final int attr, final Instances inst) {
int i;
double average = 0.0, absdiff = 0.0, absDev;
for (i = 0; i <= inst.numInstances() - 1; i++) {
average += inst.instance(i).value(attr);
}
if (inst.numInstances() > 1) {
average /= inst.numInstances();
for (i = 0; i <= inst.numInstances() - 1; i++) {
absdiff += Math.abs(inst.instance(i).value(attr) - average);
}
absDev = absdiff / inst.numInstances();
} else {
absDev = 0.0;
}
return absDev;
}
/**
* Sets whether instances at each node in an M5 tree should be saved for visualization purposes. Default is to save memory.
*
* @param save
* a <code>boolean</code> value
*/
protected void setSaveInstances(final boolean save) {
this.m_saveInstances = save;
}
/**
* Get the value of regressionTree.
*
* @return Value of regressionTree.
*/
public boolean getRegressionTree() {
return this.m_regressionTree;
}
/**
* Set the value of regressionTree.
*
* @param newregressionTree
* Value to assign to regressionTree.
*/
public void setRegressionTree(final boolean newregressionTree) {
this.m_regressionTree = newregressionTree;
}
/**
* Set the minumum number of instances to allow at a leaf node
*
* @param minNum
* the minimum number of instances
*/
public void setMinNumInstances(final double minNum) {
this.m_minNumInstances = minNum;
}
/**
* Get the minimum number of instances to allow at a leaf node
*
* @return a <code>double</code> value
*/
public double getMinNumInstances() {
return this.m_minNumInstances;
}
public RuleNode getM5RootNode() {
return this.m_topOfTree;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/RuleNode.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RuleNode.java
* Copyright (C) 2000 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import java.util.ArrayList;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Evaluation;
import weka.classifiers.functions.LinearRegression;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
/**
* Constructs a node for use in an m5 tree or rule
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RuleNode extends AbstractClassifier {
/** for serialization */
static final long serialVersionUID = 1979807611124337144L;
/**
* instances reaching this node
*/
private Instances m_instances;
/**
* the class index
*/
private int m_classIndex;
/**
* the number of instances reaching this node
*/
protected int m_numInstances;
/**
* the number of attributes
*/
private int m_numAttributes;
/**
* Node is a leaf
*/
private boolean m_isLeaf;
/**
* attribute this node splits on
*/
private int m_splitAtt;
/**
* the value of the split attribute
*/
private double m_splitValue;
/**
* the linear model at this node
*/
private PreConstructedLinearModel m_nodeModel;
/**
* the number of paramters in the chosen model for this node---either the subtree model or the linear model. The constant term is counted as a paramter---this is for pruning purposes
*/
public int m_numParameters;
/**
* the mean squared error of the model at this node (either linear or subtree)
*/
private double m_rootMeanSquaredError;
/**
* left child node
*/
protected RuleNode m_left;
/**
* right child node
*/
protected RuleNode m_right;
/**
* the parent of this node
*/
private final RuleNode m_parent;
/**
* a node will not be split if it contains less then m_splitNum instances
*/
private double m_splitNum = 4;
/**
* a node will not be split if its class standard deviation is less than 5% of the class standard deviation of all the instances
*/
private final double m_devFraction = 0.05;
private final double m_pruningMultiplier = 2;
/**
* the number assigned to the linear model if this node is a leaf. = 0 if this node is not a leaf
*/
private int m_leafModelNum;
/**
* a node will not be split if the class deviation of its instances is less than m_devFraction of the deviation of the global class
*/
private final double m_globalDeviation;
/**
* the absolute deviation of the global class
*/
private final double m_globalAbsDeviation;
/**
* Indices of the attributes to be used in generating a linear model at this node
*/
private int[] m_indices;
/**
* Constant used in original m5 smoothing calculation
*/
private static final double SMOOTHING_CONSTANT = 15.0;
/**
* Node id.
*/
private int m_id;
/**
* Save the instances at each node (for visualizing in the Explorer's treevisualizer.
*/
private boolean m_saveInstances = false;
/**
* Make a regression tree instead of a model tree
*/
private boolean m_regressionTree;
/**
* Creates a new <code>RuleNode</code> instance.
*
* @param globalDev
* the global standard deviation of the class
* @param globalAbsDev
* the global absolute deviation of the class
* @param parent
* the parent of this node
*/
public RuleNode(final double globalDev, final double globalAbsDev, final RuleNode parent) {
this.m_nodeModel = null;
this.m_right = null;
this.m_left = null;
this.m_parent = parent;
this.m_globalDeviation = globalDev;
this.m_globalAbsDeviation = globalAbsDev;
}
/**
* Build this node (find an attribute and split point)
*
* @param data
* the instances on which to build this node
* @throws Exception
* if an error occurs
*/
@Override
public void buildClassifier(final Instances data) throws Exception {
this.m_rootMeanSquaredError = Double.MAX_VALUE;
// m_instances = new Instances(data);
this.m_instances = data;
this.m_classIndex = this.m_instances.classIndex();
this.m_numInstances = this.m_instances.numInstances();
this.m_numAttributes = this.m_instances.numAttributes();
this.m_nodeModel = null;
this.m_right = null;
this.m_left = null;
if ((this.m_numInstances < this.m_splitNum) || (Rule.stdDev(this.m_classIndex, this.m_instances) < (this.m_globalDeviation * this.m_devFraction))) {
this.m_isLeaf = true;
} else {
this.m_isLeaf = false;
}
this.split();
}
/**
* Classify an instance using this node. Recursively calls classifyInstance on child nodes.
*
* @param inst
* the instance to classify
* @return the prediction for this instance
* @throws Exception
* if an error occurs
*/
@Override
public double classifyInstance(final Instance inst) throws Exception {
if (this.m_isLeaf) {
if (this.m_nodeModel == null) {
throw new Exception("Classifier has not been built correctly.");
}
return this.m_nodeModel.classifyInstance(inst);
}
if (inst.value(this.m_splitAtt) <= this.m_splitValue) {
return this.m_left.classifyInstance(inst);
} else {
return this.m_right.classifyInstance(inst);
}
}
/**
* Applies the m5 smoothing procedure to a prediction
*
* @param n
* number of instances in selected child of this node
* @param pred
* the prediction so far
* @param supportPred
* the prediction of the linear model at this node
* @return the current prediction smoothed with the prediction of the linear model at this node
* @throws Exception
* if an error occurs
*/
protected static double smoothingOriginal(final double n, final double pred, final double supportPred) throws Exception {
double smoothed;
smoothed = ((n * pred) + (SMOOTHING_CONSTANT * supportPred)) / (n + SMOOTHING_CONSTANT);
return smoothed;
}
/**
* Finds an attribute and split point for this node
*
* @throws Exception
* if an error occurs
*/
public void split() throws Exception {
int i;
Instances leftSubset, rightSubset;
SplitEvaluate bestSplit, currentSplit;
boolean[] attsBelow;
if (!this.m_isLeaf) {
bestSplit = new YongSplitInfo(0, this.m_numInstances - 1, -1);
currentSplit = new YongSplitInfo(0, this.m_numInstances - 1, -1);
// find the best attribute to split on
for (i = 0; i < this.m_numAttributes; i++) {
if (i != this.m_classIndex) {
// sort the instances by this attribute
this.m_instances.sort(i);
currentSplit.attrSplit(i, this.m_instances);
if ((Math.abs(currentSplit.maxImpurity() - bestSplit.maxImpurity()) > 1.e-6) && (currentSplit.maxImpurity() > bestSplit.maxImpurity() + 1.e-6)) {
bestSplit = currentSplit.copy();
}
}
}
// cant find a good split or split point?
if (bestSplit.splitAttr() < 0 || bestSplit.position() < 1 || bestSplit.position() > this.m_numInstances - 1) {
this.m_isLeaf = true;
} else {
this.m_splitAtt = bestSplit.splitAttr();
this.m_splitValue = bestSplit.splitValue();
leftSubset = new Instances(this.m_instances, this.m_numInstances);
rightSubset = new Instances(this.m_instances, this.m_numInstances);
for (i = 0; i < this.m_numInstances; i++) {
if (this.m_instances.instance(i).value(this.m_splitAtt) <= this.m_splitValue) {
leftSubset.add(this.m_instances.instance(i));
} else {
rightSubset.add(this.m_instances.instance(i));
}
}
leftSubset.compactify();
rightSubset.compactify();
// build left and right nodes
this.m_left = new RuleNode(this.m_globalDeviation, this.m_globalAbsDeviation, this);
this.m_left.setMinNumInstances(this.m_splitNum);
this.m_left.setRegressionTree(this.m_regressionTree);
this.m_left.setSaveInstances(this.m_saveInstances);
this.m_left.buildClassifier(leftSubset);
this.m_right = new RuleNode(this.m_globalDeviation, this.m_globalAbsDeviation, this);
this.m_right.setMinNumInstances(this.m_splitNum);
this.m_right.setRegressionTree(this.m_regressionTree);
this.m_right.setSaveInstances(this.m_saveInstances);
this.m_right.buildClassifier(rightSubset);
// now find out what attributes are tested in the left and right
// subtrees and use them to learn a linear model for this node
if (!this.m_regressionTree) {
attsBelow = this.attsTestedBelow();
attsBelow[this.m_classIndex] = true;
int count = 0, j;
for (j = 0; j < this.m_numAttributes; j++) {
if (attsBelow[j]) {
count++;
}
}
int[] indices = new int[count];
count = 0;
for (j = 0; j < this.m_numAttributes; j++) {
if (attsBelow[j] && (j != this.m_classIndex)) {
indices[count++] = j;
}
}
indices[count] = this.m_classIndex;
this.m_indices = indices;
} else {
this.m_indices = new int[1];
this.m_indices[0] = this.m_classIndex;
this.m_numParameters = 1;
}
}
}
if (this.m_isLeaf) {
int[] indices = new int[1];
indices[0] = this.m_classIndex;
this.m_indices = indices;
this.m_numParameters = 1;
// need to evaluate the model here if want correct stats for unpruned
// tree
}
}
/**
* Build a linear model for this node using those attributes specified in indices.
*
* @param indices
* an array of attribute indices to include in the linear model
* @throws Exception
* if something goes wrong
*/
private void buildLinearModel(final int[] indices) throws Exception {
// copy the training instances and remove all but the tested
// attributes
Instances reducedInst = new Instances(this.m_instances);
Remove attributeFilter = new Remove();
attributeFilter.setInvertSelection(true);
attributeFilter.setAttributeIndicesArray(indices);
attributeFilter.setInputFormat(reducedInst);
reducedInst = Filter.useFilter(reducedInst, attributeFilter);
// build a linear regression for the training data using the
// tested attributes
LinearRegression temp = new LinearRegression();
temp.setDoNotCheckCapabilities(true);
temp.setMinimal(true);
temp.buildClassifier(reducedInst);
double[] lmCoeffs = temp.coefficients();
double[] coeffs = new double[this.m_instances.numAttributes()];
for (int i = 0; i < lmCoeffs.length - 1; i++) {
if (indices[i] != this.m_classIndex) {
coeffs[indices[i]] = lmCoeffs[i];
}
}
this.m_nodeModel = new PreConstructedLinearModel(coeffs, lmCoeffs[lmCoeffs.length - 1]);
this.m_nodeModel.buildClassifier(this.m_instances);
}
/**
* Returns an array containing the indexes of attributes used in tests below this node
*
* @return an array of attribute indexes
*/
private boolean[] attsTestedBelow() {
boolean[] attsBelow = new boolean[this.m_numAttributes];
boolean[] attsBelowLeft = null;
boolean[] attsBelowRight = null;
if (this.m_right != null) {
attsBelowRight = this.m_right.attsTestedBelow();
}
if (this.m_left != null) {
attsBelowLeft = this.m_left.attsTestedBelow();
}
for (int i = 0; i < this.m_numAttributes; i++) {
if (attsBelowLeft != null) {
attsBelow[i] = (attsBelow[i] || attsBelowLeft[i]);
}
if (attsBelowRight != null) {
attsBelow[i] = (attsBelow[i] || attsBelowRight[i]);
}
}
if (!this.m_isLeaf) {
attsBelow[this.m_splitAtt] = true;
}
return attsBelow;
}
/**
* Sets the leaves' numbers
*
* @param leafCounter
* the number of leaves counted
* @return the number of the total leaves under the node
*/
public int numLeaves(int leafCounter) {
if (!this.m_isLeaf) {
// node
this.m_leafModelNum = 0;
if (this.m_left != null) {
leafCounter = this.m_left.numLeaves(leafCounter);
}
if (this.m_right != null) {
leafCounter = this.m_right.numLeaves(leafCounter);
}
} else {
// leaf
leafCounter++;
this.m_leafModelNum = leafCounter;
}
return leafCounter;
}
/**
* print the linear model at this node
*
* @return the linear model
*/
@Override
public String toString() {
return this.printNodeLinearModel();
}
/**
* print the linear model at this node
*
* @return the linear model at this node
*/
public String printNodeLinearModel() {
return this.m_nodeModel.toString();
}
/**
* print all leaf models
*
* @return the leaf models
*/
public String printLeafModels() {
StringBuffer text = new StringBuffer();
if (this.m_isLeaf) {
text.append("\nLM num: " + this.m_leafModelNum);
text.append(this.m_nodeModel.toString());
text.append("\n");
} else {
text.append(this.m_left.printLeafModels());
text.append(this.m_right.printLeafModels());
}
return text.toString();
}
/**
* Returns a description of this node (debugging purposes)
*
* @return a string describing this node
*/
public String nodeToString() {
StringBuffer text = new StringBuffer();
text.append("Node:\n\tnum inst: " + this.m_numInstances);
if (this.m_isLeaf) {
text.append("\n\tleaf");
} else {
text.append("\tnode");
}
text.append("\n\tSplit att: " + this.m_instances.attribute(this.m_splitAtt).name());
text.append("\n\tSplit val: " + Utils.doubleToString(this.m_splitValue, 1, 3));
text.append("\n\tLM num: " + this.m_leafModelNum);
text.append("\n\tLinear model\n" + this.m_nodeModel.toString());
text.append("\n\n");
if (this.m_left != null) {
text.append(this.m_left.nodeToString());
}
if (this.m_right != null) {
text.append(this.m_right.nodeToString());
}
return text.toString();
}
/**
* Recursively builds a textual description of the tree
*
* @param level
* the level of this node
* @return string describing the tree
*/
public String treeToString(final int level) {
int i;
StringBuffer text = new StringBuffer();
if (!this.m_isLeaf) {
text.append("\n");
for (i = 1; i <= level; i++) {
text.append("| ");
}
if (this.m_instances.attribute(this.m_splitAtt).name().charAt(0) != '[') {
text.append(this.m_instances.attribute(this.m_splitAtt).name() + " <= " + Utils.doubleToString(this.m_splitValue, 1, 3) + " : ");
} else {
text.append(this.m_instances.attribute(this.m_splitAtt).name() + " false : ");
}
if (this.m_left != null) {
text.append(this.m_left.treeToString(level + 1));
} else {
text.append("NULL\n");
}
for (i = 1; i <= level; i++) {
text.append("| ");
}
if (this.m_instances.attribute(this.m_splitAtt).name().charAt(0) != '[') {
text.append(this.m_instances.attribute(this.m_splitAtt).name() + " > " + Utils.doubleToString(this.m_splitValue, 1, 3) + " : ");
} else {
text.append(this.m_instances.attribute(this.m_splitAtt).name() + " true : ");
}
if (this.m_right != null) {
text.append(this.m_right.treeToString(level + 1));
} else {
text.append("NULL\n");
}
} else {
text.append("LM" + this.m_leafModelNum);
if (this.m_globalDeviation > 0.0) {
text.append(" (" + this.m_numInstances + "/" + Utils.doubleToString((100.0 * this.m_rootMeanSquaredError / this.m_globalDeviation), 1, 3) + "%)\n");
} else {
text.append(" (" + this.m_numInstances + ")\n");
}
}
return text.toString();
}
/**
* Traverses the tree and installs linear models at each node. This method must be called if pruning is not to be performed.
*
* @throws Exception
* if an error occurs
*/
public void installLinearModels() throws Exception {
Evaluation nodeModelEval;
if (this.m_isLeaf) {
this.buildLinearModel(this.m_indices);
} else {
if (this.m_left != null) {
this.m_left.installLinearModels();
}
if (this.m_right != null) {
this.m_right.installLinearModels();
}
this.buildLinearModel(this.m_indices);
}
nodeModelEval = new Evaluation(this.m_instances);
nodeModelEval.evaluateModel(this.m_nodeModel, this.m_instances);
this.m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError();
// save space
if (!this.m_saveInstances) {
this.m_instances = new Instances(this.m_instances, 0);
}
}
/**
*
* @throws Exception
*/
public void installSmoothedModels() throws Exception {
if (this.m_isLeaf) {
double[] coefficients = new double[this.m_numAttributes];
double intercept;
double[] coeffsUsedByLinearModel = this.m_nodeModel.coefficients();
RuleNode current = this;
// prime array with leaf node coefficients
for (int i = 0; i < coeffsUsedByLinearModel.length; i++) {
if (i != this.m_classIndex) {
coefficients[i] = coeffsUsedByLinearModel[i];
}
}
// intercept
intercept = this.m_nodeModel.intercept();
do {
if (current.m_parent != null) {
double n = current.m_numInstances;
// contribution of the model below
for (int i = 0; i < coefficients.length; i++) {
coefficients[i] = ((coefficients[i] * n) / (n + SMOOTHING_CONSTANT));
}
intercept = ((intercept * n) / (n + SMOOTHING_CONSTANT));
// contribution of this model
coeffsUsedByLinearModel = current.m_parent.getModel().coefficients();
for (int i = 0; i < coeffsUsedByLinearModel.length; i++) {
if (i != this.m_classIndex) {
// smooth in these coefficients (at this node)
coefficients[i] += ((SMOOTHING_CONSTANT * coeffsUsedByLinearModel[i]) / (n + SMOOTHING_CONSTANT));
}
}
// smooth in the intercept
intercept += ((SMOOTHING_CONSTANT * current.m_parent.getModel().intercept()) / (n + SMOOTHING_CONSTANT));
current = current.m_parent;
}
} while (current.m_parent != null);
this.m_nodeModel = new PreConstructedLinearModel(coefficients, intercept);
this.m_nodeModel.buildClassifier(this.m_instances);
}
if (this.m_left != null) {
this.m_left.installSmoothedModels();
}
if (this.m_right != null) {
this.m_right.installSmoothedModels();
}
}
/**
* Recursively prune the tree
*
* @throws Exception
* if an error occurs
*/
public void prune() throws Exception {
Evaluation nodeModelEval = null;
if (this.m_isLeaf) {
this.buildLinearModel(this.m_indices);
nodeModelEval = new Evaluation(this.m_instances);
// count the constant term as a paramter for a leaf
// Evaluate the model
nodeModelEval.evaluateModel(this.m_nodeModel, this.m_instances);
this.m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError();
} else {
// Prune the left and right subtrees
if (this.m_left != null) {
this.m_left.prune();
}
if (this.m_right != null) {
this.m_right.prune();
}
this.buildLinearModel(this.m_indices);
nodeModelEval = new Evaluation(this.m_instances);
double rmsModel;
double adjustedErrorModel;
nodeModelEval.evaluateModel(this.m_nodeModel, this.m_instances);
rmsModel = nodeModelEval.rootMeanSquaredError();
adjustedErrorModel = rmsModel * this.pruningFactor(this.m_numInstances, this.m_nodeModel.numParameters() + 1);
// Evaluate this node (ie its left and right subtrees)
Evaluation nodeEval = new Evaluation(this.m_instances);
double rmsSubTree;
double adjustedErrorNode;
int l_params = 0, r_params = 0;
nodeEval.evaluateModel(this, this.m_instances);
rmsSubTree = nodeEval.rootMeanSquaredError();
if (this.m_left != null) {
l_params = this.m_left.numParameters();
}
if (this.m_right != null) {
r_params = this.m_right.numParameters();
}
adjustedErrorNode = rmsSubTree * this.pruningFactor(this.m_numInstances, (l_params + r_params + 1));
if ((adjustedErrorModel <= adjustedErrorNode) || (adjustedErrorModel < (this.m_globalDeviation * 0.00001))) {
// Choose linear model for this node rather than subtree model
this.m_isLeaf = true;
this.m_right = null;
this.m_left = null;
this.m_numParameters = this.m_nodeModel.numParameters() + 1;
this.m_rootMeanSquaredError = rmsModel;
} else {
this.m_numParameters = (l_params + r_params + 1);
this.m_rootMeanSquaredError = rmsSubTree;
}
}
// save space
if (!this.m_saveInstances) {
this.m_instances = new Instances(this.m_instances, 0);
}
}
/**
* Compute the pruning factor
*
* @param num_instances
* number of instances
* @param num_params
* number of parameters in the model
* @return the pruning factor
*/
private double pruningFactor(final int num_instances, final int num_params) {
if (num_instances <= num_params) {
return 10.0; // Caution says Yong in his code
}
return ((num_instances + this.m_pruningMultiplier * num_params) / (num_instances - num_params));
}
/**
* Find the leaf with greatest coverage
*
* @param maxCoverage
* the greatest coverage found so far
* @param bestLeaf
* the leaf with the greatest coverage
*/
public void findBestLeaf(final double[] maxCoverage, final RuleNode[] bestLeaf) {
if (!this.m_isLeaf) {
if (this.m_left != null) {
this.m_left.findBestLeaf(maxCoverage, bestLeaf);
}
if (this.m_right != null) {
this.m_right.findBestLeaf(maxCoverage, bestLeaf);
}
} else {
if (this.m_numInstances > maxCoverage[0]) {
maxCoverage[0] = this.m_numInstances;
bestLeaf[0] = this;
}
}
}
/**
* Return a list containing all the leaves in the tree
*
* @param v
* a single element array containing a vector of leaves
*/
public void returnLeaves(final ArrayList<RuleNode>[] v) {
if (this.m_isLeaf) {
v[0].add(this);
} else {
if (this.m_left != null) {
this.m_left.returnLeaves(v);
}
if (this.m_right != null) {
this.m_right.returnLeaves(v);
}
}
}
/**
* Get the parent of this node
*
* @return the parent of this node
*/
public RuleNode parentNode() {
return this.m_parent;
}
/**
* Get the left child of this node
*
* @return the left child of this node
*/
public RuleNode leftNode() {
return this.m_left;
}
/**
* Get the right child of this node
*
* @return the right child of this node
*/
public RuleNode rightNode() {
return this.m_right;
}
/**
* Get the index of the splitting attribute for this node
*
* @return the index of the splitting attribute
*/
public int splitAtt() {
return this.m_splitAtt;
}
/**
* Get the split point for this node
*
* @return the split point for this node
*/
public double splitVal() {
return this.m_splitValue;
}
/**
* Get the number of linear models in the tree
*
* @return the number of linear models
*/
public int numberOfLinearModels() {
if (this.m_isLeaf) {
return 1;
} else {
return this.m_left.numberOfLinearModels() + this.m_right.numberOfLinearModels();
}
}
/**
* Return true if this node is a leaf
*
* @return true if this node is a leaf
*/
public boolean isLeaf() {
return this.m_isLeaf;
}
/**
* Get the root mean squared error at this node
*
* @return the root mean squared error
*/
protected double rootMeanSquaredError() {
return this.m_rootMeanSquaredError;
}
/**
* Get the linear model at this node
*
* @return the linear model at this node
*/
public PreConstructedLinearModel getModel() {
return this.m_nodeModel;
}
/**
* Return the number of instances that reach this node.
*
* @return the number of instances at this node.
*/
public int getNumInstances() {
return this.m_numInstances;
}
/**
* Get the number of parameters in the model at this node
*
* @return the number of parameters in the model at this node
*/
private int numParameters() {
return this.m_numParameters;
}
/**
* Get the value of regressionTree.
*
* @return Value of regressionTree.
*/
public boolean getRegressionTree() {
return this.m_regressionTree;
}
/**
* Set the minumum number of instances to allow at a leaf node
*
* @param minNum
* the minimum number of instances
*/
public void setMinNumInstances(final double minNum) {
this.m_splitNum = minNum;
}
/**
* Get the minimum number of instances to allow at a leaf node
*
* @return a <code>double</code> value
*/
public double getMinNumInstances() {
return this.m_splitNum;
}
/**
* Set the value of regressionTree.
*
* @param newregressionTree
* Value to assign to regressionTree.
*/
public void setRegressionTree(final boolean newregressionTree) {
this.m_regressionTree = newregressionTree;
}
/**
* Print all the linear models at the learf (debugging purposes)
*/
public void printAllModels() {
if (this.m_isLeaf) {
System.out.println(this.m_nodeModel.toString());
} else {
System.out.println(this.m_nodeModel.toString());
this.m_left.printAllModels();
this.m_right.printAllModels();
}
}
/**
* Assigns a unique identifier to each node in the tree
*
* @param lastID
* last id number used
* @return ID after processing child nodes
*/
protected int assignIDs(final int lastID) {
int currLastID = lastID + 1;
this.m_id = currLastID;
if (this.m_left != null) {
currLastID = this.m_left.assignIDs(currLastID);
}
if (this.m_right != null) {
currLastID = this.m_right.assignIDs(currLastID);
}
return currLastID;
}
/**
* Assign a unique identifier to each node in the tree and then calls graphTree
*
* @param text
* a <code>StringBuffer</code> value
*/
public void graph(final StringBuffer text) {
this.assignIDs(-1);
this.graphTree(text);
}
/**
* Return a dotty style string describing the tree
*
* @param text
* a <code>StringBuffer</code> value
*/
protected void graphTree(final StringBuffer text) {
text.append("N" + this.m_id + (this.m_isLeaf ? " [label=\"LM " + this.m_leafModelNum : " [label=\"" + Utils.backQuoteChars(this.m_instances.attribute(this.m_splitAtt).name()))
+ (this.m_isLeaf
? " (" + ((this.m_globalDeviation > 0.0) ? this.m_numInstances + "/" + Utils.doubleToString((100.0 * this.m_rootMeanSquaredError / this.m_globalDeviation), 1, 3) + "%)" : this.m_numInstances + ")")
+ "\" shape=box style=filled "
: "\"")
+ (this.m_saveInstances ? "data=\n" + this.m_instances + "\n,\n" : "") + "]\n");
if (this.m_left != null) {
text.append("N" + this.m_id + "->" + "N" + this.m_left.m_id + " [label=\"<=" + Utils.doubleToString(this.m_splitValue, 1, 3) + "\"]\n");
this.m_left.graphTree(text);
}
if (this.m_right != null) {
text.append("N" + this.m_id + "->" + "N" + this.m_right.m_id + " [label=\">" + Utils.doubleToString(this.m_splitValue, 1, 3) + "\"]\n");
this.m_right.graphTree(text);
}
}
/**
* Set whether to save instances for visualization purposes. Default is to save memory.
*
* @param save
* a <code>boolean</code> value
*/
protected void setSaveInstances(final boolean save) {
this.m_saveInstances = save;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/SplitEvaluate.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SplitEvaluate.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import weka.core.Instances;
/**
* Interface for objects that determine a split point on an attribute
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface SplitEvaluate {
/**
* makes a copy of the SplitEvaluate object
* @return a copy of the object
*/
SplitEvaluate copy () throws Exception;
/**
* Finds the best splitting point for an attribute in the instances
* @param attr the splitting attribute
* @param inst the instances
* @exception Exception if something goes wrong
*/
void attrSplit (int attr, Instances inst) throws Exception;
/**
* Returns the impurity of this split
*
* @return the impurity of this split
*/
double maxImpurity();
/**
* Returns the position of the split in the sorted values. -1 indicates that
* a split could not be found.
*
* @return an <code>int</code> value
*/
int position();
/**
* Returns the attribute used in this split
*
* @return the attribute used in this split
*/
int splitAttr();
/**
* Returns the split value
*
* @return the split value
*/
double splitValue();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/Values.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Values.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* Stores some statistics.
* @author Yong Wang (yongwang@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class Values
implements RevisionHandler {
int numInstances; // number of the instances
int missingInstances; // number of the instances with missing values
int first; // index of the first instance
int last; // index of the last instance
int attr; // attribute
double sum; // sum of the instances for attribute
double sqrSum; // squared sum of the instances for attribute
double va; // variance
double sd; // standard deviation
/**
* Constructs an object which stores some statistics of the instances such
* as sum, squared sum, variance, standard deviation
* @param low the index of the first instance
* @param high the index of the last instance
* @param attribute the attribute
* @param inst the instances
*/
public Values(int low,int high,int attribute,Instances inst){
int i,count=0;
double value;
numInstances = high-low+1;
missingInstances = 0;
first = low;
last = high;
attr = attribute;
sum=0.0;
sqrSum=0.0;
for(i=first;i<=last;i++){
if(inst.instance(i).isMissing(attr)==false){
count++;
value = inst.instance(i).value(attr);
sum += value;
sqrSum += value * value;
}
if(count >1){
va = (sqrSum - sum * sum/count)/count;
va = Math.abs(va);
sd = Math.sqrt(va);
}
else {va = 0.0; sd = 0.0;}
}
}
/**
* Converts the stats to a string
* @return the converted string
*/
public final String toString(){
StringBuffer text = new StringBuffer();
text.append("Print statistic values of instances (" + first + "-" + last +
"\n");
text.append(" Number of instances:\t" + numInstances + "\n");
text.append(" NUmber of instances with unknowns:\t" + missingInstances +
"\n");
text.append(" Attribute:\t\t\t:" + attr + "\n");
text.append(" Sum:\t\t\t" + sum + "\n");
text.append(" Squared sum:\t\t" + sqrSum + "\n");
text.append(" Stanard Deviation:\t\t" + sd + "\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/trees/m5/YongSplitInfo.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* YongSplitInfo.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees.m5;
import java.io.Serializable;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Stores split information.
*
* @author Yong Wang (yongwang@cs.waikato.ac.nz)
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public final class YongSplitInfo implements Cloneable, Serializable,
SplitEvaluate, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 1864267581079767881L;
private int number; // number of total instances
private int first; // first instance index
private int last; // last instance index
private int position; // position of maximum impurity reduction
private double maxImpurity; // maximum impurity reduction
private double leftAve; // left average class value
private double rightAve; // right average class value
private int splitAttr; // spliting attribute
private double splitValue; // splitting value
/**
* Constructs an object which contains the split information
*
* @param low the index of the first instance
* @param high the index of the last instance
* @param attr an attribute
*/
public YongSplitInfo(int low, int high, int attr) {
number = high - low + 1;
first = low;
last = high;
position = -1;
maxImpurity = -1.e20;
splitAttr = attr; // attr < 0 is an empty object
splitValue = 0.0;
}
/**
* Makes a copy of this SplitInfo object
*/
@Override
public final SplitEvaluate copy() throws Exception {
YongSplitInfo s = (YongSplitInfo) this.clone();
return s;
}
/**
* Resets the object of split information
*
* @param low the index of the first instance
* @param high the index of the last instance
* @param attr the attribute
*/
public final void initialize(int low, int high, int attr) {
number = high - low + 1;
first = low;
last = high;
position = -1;
maxImpurity = -1.e20;
splitAttr = attr;
splitValue = 0.0;
}
/**
* Converts the spliting information to string
*
* @param inst the instances
*/
public final String toString(Instances inst) {
StringBuffer text = new StringBuffer();
text.append("Print SplitInfo:\n");
text.append(" Instances:\t\t" + number + " (" + first + "-" + position
+ "," + (position + 1) + "-" + last + ")\n");
text.append(" Maximum Impurity Reduction:\t"
+ Utils.doubleToString(maxImpurity, 1, 4) + "\n");
text.append(" Left average:\t" + leftAve + "\n");
text.append(" Right average:\t" + rightAve + "\n");
if (maxImpurity > 0.0) {
text.append(" Splitting function:\t"
+ inst.attribute(splitAttr).name() + " = " + splitValue + "\n");
} else {
text.append(" Splitting function:\tnull\n");
}
return text.toString();
}
/**
* Finds the best splitting point for an attribute in the instances
*
* @param attr the splitting attribute
* @param inst the instances
* @exception Exception if something goes wrong
*/
@Override
public final void attrSplit(int attr, Instances inst) throws Exception {
int i, len, part;
Impurity imp;
int low = 0;
int high = inst.numInstances() - 1;
this.initialize(low, high, attr);
if (number < 4) {
return;
}
len = ((high - low + 1) < 5) ? 1 : (high - low + 1) / 5;
position = low;
part = low + len - 1;
imp = new Impurity(part, attr, inst, 5);
for (i = low + len; i <= high - len - 1; i++) {
imp.incremental(inst.instance(i).classValue(), 1);
if (Utils.eq(inst.instance(i + 1).value(attr),
inst.instance(i).value(attr)) == false) {
if (imp.impurity > maxImpurity) {
maxImpurity = imp.impurity;
splitValue = (inst.instance(i).value(attr) + inst.instance(i + 1)
.value(attr)) * 0.5;
leftAve = imp.sl / imp.nl;
rightAve = imp.sr / imp.nr;
position = i;
}
}
}
}
/**
* Returns the impurity of this split
*
* @return the impurity of this split
*/
@Override
public double maxImpurity() {
return maxImpurity;
}
/**
* Returns the attribute used in this split
*
* @return the attribute used in this split
*/
@Override
public int splitAttr() {
return splitAttr;
}
/**
* Returns the position of the split in the sorted values. -1 indicates that a
* split could not be found.
*
* @return an <code>int</code> value
*/
@Override
public int position() {
return position;
}
/**
* Returns the split value
*
* @return the split value
*/
@Override
public double splitValue() {
return splitValue;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/classifiers/xml/XMLClassifier.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XMLClassifier.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.classifiers.xml;
import weka.core.RevisionUtils;
import weka.core.xml.XMLBasicSerialization;
/**
* This class serializes and deserializes a Classifier instance to and
* fro XML.<br>
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class XMLClassifier
extends XMLBasicSerialization {
/**
* initializes the serialization
*
* @throws Exception if initialization fails
*/
public XMLClassifier() throws Exception {
super();
}
/**
* generates internally a new XML document and clears also the IgnoreList and
* the mappings for the Read/Write-Methods
*/
public void clear() throws Exception {
super.clear();
// allow
m_Properties.addAllowed(weka.classifiers.Classifier.class, "debug");
m_Properties.addAllowed(weka.classifiers.Classifier.class, "options");
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/AbstractClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AbstractClusterer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import weka.core.Capabilities;
import weka.core.CapabilitiesHandler;
import weka.core.CapabilitiesIgnorer;
import weka.core.CommandlineRunnable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.Utils;
import java.io.Serializable;
import java.util.Enumeration;
import java.util.Vector;
/**
* Abstract clusterer.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class AbstractClusterer
implements Clusterer, Cloneable, Serializable, CapabilitiesHandler,
RevisionHandler, OptionHandler, CapabilitiesIgnorer, CommandlineRunnable {
/** for serialization */
private static final long serialVersionUID = -6099962589663877632L;
/** Whether the clusterer is run in debug mode. */
protected boolean m_Debug = false;
/** Whether capabilities should not be checked before clusterer is built. */
protected boolean m_DoNotCheckCapabilities = false;
// ===============
// Public methods.
// ===============
/**
* Generates a clusterer. Has to initialize all fields of the clusterer that
* are not being set via options.
*
* @param data set of instances serving as training data
* @exception Exception if the clusterer has not been generated successfully
*/
@Override
public abstract void buildClusterer(Instances data) throws Exception;
/**
* Classifies a given instance. Either this or distributionForInstance() needs
* to be implemented by subclasses.
*
* @param instance the instance to be assigned to a cluster
* @return the number of the assigned cluster as an integer
* @exception Exception if instance could not be clustered successfully
*/
@Override
public int clusterInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
if (Utils.sum(dist) <= 0) {
throw new Exception("Unable to cluster instance");
}
return Utils.maxIndex(dist);
}
/**
* Predicts the cluster memberships for a given instance. Either this or
* clusterInstance() needs to be implemented by subclasses.
*
* @param instance the instance to be assigned a cluster.
* @return an array containing the estimated membership probabilities of the
* test instance in each cluster (this should sum to at most 1)
* @exception Exception if distribution could not be computed successfully
*/
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
double[] d = new double[numberOfClusters()];
d[clusterInstance(instance)] = 1.0;
return d;
}
/**
* Returns the number of clusters.
*
* @return the number of clusters generated for a training dataset.
* @exception Exception if number of clusters could not be returned
* successfully
*/
@Override
public abstract int numberOfClusters() throws Exception;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = Option
.listOptionsForClassHierarchy(this.getClass(), AbstractClusterer.class);
newVector.addElement(new Option(
"\tIf set, clusterer is run in debug mode and\n"
+ "\tmay output additional info to the console",
"output-debug-info", 0, "-output-debug-info"));
newVector.addElement(new Option(
"\tIf set, clusterer capabilities are not checked before clusterer is built\n"
+ "\t(use with caution).",
"-do-not-check-capabilities", 0, "-do-not-check-capabilities"));
return newVector.elements();
}
/**
* Parses a given list of options. Valid options are:
* <p>
*
* -D <br>
* If set, clusterer is run in debug mode and may output additional info to
* the console.
* <p>
*
* -do-not-check-capabilities <br>
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).
* <p>
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
Option.setOptionsForHierarchy(options, this, AbstractClusterer.class);
setDebug(Utils.getFlag("output-debug-info", options));
setDoNotCheckCapabilities(
Utils.getFlag("do-not-check-capabilities", options));
}
/**
* Set debugging mode.
*
* @param debug true if debug output should be printed
*/
public void setDebug(boolean debug) {
m_Debug = debug;
}
/**
* Get whether debugging is turned on.
*
* @return true if debugging output is on
*/
public boolean getDebug() {
return m_Debug;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String debugTipText() {
return "If set to true, clusterer may output additional info to "
+ "the console.";
}
/**
* Set whether not to check capabilities.
*
* @param doNotCheckCapabilities true if capabilities are not to be checked.
*/
public void setDoNotCheckCapabilities(boolean doNotCheckCapabilities) {
m_DoNotCheckCapabilities = doNotCheckCapabilities;
}
/**
* Get whether capabilities checking is turned off.
*
* @return true if capabilities checking is turned off.
*/
public boolean getDoNotCheckCapabilities() {
return m_DoNotCheckCapabilities;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String doNotCheckCapabilitiesTipText() {
return "If set, clusterer capabilities are not checked before clusterer is built"
+ " (Use with caution to reduce runtime).";
}
/**
* Gets the current settings of the clusterer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
for (String s : Option.getOptionsForHierarchy(this,
AbstractClusterer.class)) {
options.add(s);
}
if (getDebug()) {
options.add("-output-debug-info");
}
if (getDoNotCheckCapabilities()) {
options.add("-do-not-check-capabilities");
}
return options.toArray(new String[0]);
}
/**
* Creates a new instance of a clusterer given it's class name and (optional)
* arguments to pass to it's setOptions method. If the clusterer implements
* OptionHandler and the options parameter is non-null, the clusterer will
* have it's options set.
*
* @param clustererName the fully qualified class name of the clusterer
* @param options an array of options suitable for passing to setOptions. May
* be null.
* @return the newly created search object, ready for use.
* @exception Exception if the clusterer class name is invalid, or the options
* supplied are not acceptable to the clusterer.
*/
public static Clusterer forName(String clustererName, String[] options)
throws Exception {
return (Clusterer) Utils.forName(Clusterer.class, clustererName, options);
}
/**
* Creates a deep copy of the given clusterer using serialization.
*
* @param model the clusterer to copy
* @return a deep copy of the clusterer
* @exception Exception if an error occurs
*/
public static Clusterer makeCopy(Clusterer model) throws Exception {
return (Clusterer) new SerializedObject(model).getObject();
}
/**
* Creates copies of the current clusterer. Note that this method now uses
* Serialization to perform a deep copy, so the Clusterer object must be fully
* Serializable. Any currently built model will now be copied as well.
*
* @param model an example clusterer to copy
* @param num the number of clusterer copies to create.
* @return an array of clusterers.
* @exception Exception if an error occurs
*/
public static Clusterer[] makeCopies(Clusterer model, int num)
throws Exception {
if (model == null) {
throw new Exception("No model clusterer set");
}
Clusterer[] clusterers = new Clusterer[num];
SerializedObject so = new SerializedObject(model);
for (int i = 0; i < clusterers.length; i++) {
clusterers[i] = (Clusterer) so.getObject();
}
return clusterers;
}
/**
* Returns the Capabilities of this clusterer. Derived clusterers have to
* override this method to enable capabilities.
*
* @return the capabilities of this object
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result;
result = new Capabilities(this);
result.enableAll();
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* runs the clusterer instance with the given options.
*
* @param clusterer the clusterer to run
* @param options the commandline options
*/
public static void runClusterer(Clusterer clusterer, String[] options) {
try {
if (clusterer instanceof CommandlineRunnable) {
((CommandlineRunnable) clusterer).preExecution();
}
System.out
.println(ClusterEvaluation.evaluateClusterer(clusterer, options));
} catch (Exception e) {
if ((e.getMessage() == null) || ((e.getMessage() != null)
&& (e.getMessage().indexOf("General options") == -1))) {
e.printStackTrace();
} else {
System.err.println(e.getMessage());
}
}
try {
if (clusterer instanceof CommandlineRunnable) {
((CommandlineRunnable) clusterer).postExecution();
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Perform any setup stuff that might need to happen before commandline
* execution. Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during setup
*/
@Override
public void preExecution() throws Exception {
}
/**
* Execute the supplied object. Subclasses need to override this method.
*
* @param toRun the object to execute
* @param options any options to pass to the object
* @throws Exception if the object if a problem occurs
*/
@Override
public void run(Object toRun, String[] options) throws Exception {
if (!(toRun instanceof Clusterer)) {
throw new IllegalArgumentException(
"Object to execute is not a Clusterer!");
}
runClusterer((Clusterer) toRun, options);
}
/**
* Perform any teardown stuff that might need to happen after execution.
* Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during teardown
*/
@Override
public void postExecution() throws Exception {
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/AbstractDensityBasedClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AbstractDensityBasedClusterer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import weka.core.Instance;
import weka.core.SerializedObject;
import weka.core.Utils;
/**
* Abstract clustering model that produces (for each test instance)
* an estimate of the membership in each cluster
* (ie. a probability distribution).
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class AbstractDensityBasedClusterer
extends AbstractClusterer implements DensityBasedClusterer {
/** for serialization. */
private static final long serialVersionUID = -5950728041704213845L;
// ===============
// Public methods.
// ===============
/**
* Returns the prior probability of each cluster.
*
* @return the prior probability for each cluster
* @exception Exception if priors could not be
* returned successfully
*/
public abstract double[] clusterPriors()
throws Exception;
/**
* Computes the log of the conditional density (per cluster) for a given instance.
*
* @param instance the instance to compute the density for
* @return an array containing the estimated densities
* @exception Exception if the density could not be computed
* successfully
*/
public abstract double[] logDensityPerClusterForInstance(Instance instance)
throws Exception;
/**
* Computes the density for a given instance.
*
* @param instance the instance to compute the density for
* @return the density.
* @exception Exception if the density could not be computed successfully
*/
public double logDensityForInstance(Instance instance) throws Exception {
double[] a = logJointDensitiesForInstance(instance);
double max = a[Utils.maxIndex(a)];
double sum = 0.0;
for(int i = 0; i < a.length; i++) {
sum += Math.exp(a[i] - max);
}
return max + Math.log(sum);
}
/**
* Returns the cluster probability distribution for an instance.
*
* @param instance the instance to be clustered
* @return the probability distribution
* @throws Exception if computation fails
*/
public double[] distributionForInstance(Instance instance) throws Exception {
return Utils.logs2probs(logJointDensitiesForInstance(instance));
}
/**
* Returns the logs of the joint densities for a given instance.
*
* @param inst the instance
* @return the array of values
* @exception Exception if values could not be computed
*/
public double[] logJointDensitiesForInstance(Instance inst)
throws Exception {
double[] weights = logDensityPerClusterForInstance(inst);
double[] priors = clusterPriors();
for (int i = 0; i < weights.length; i++) {
if (priors[i] > 0) {
weights[i] += Math.log(priors[i]);
} else {
throw new IllegalArgumentException("Cluster empty!");
}
}
return weights;
}
/**
* Creates copies of the current clusterer. Note that this method
* now uses Serialization to perform a deep copy, so the Clusterer
* object must be fully Serializable. Any currently built model will
* now be copied as well.
*
* @param model an example clusterer to copy
* @param num the number of clusterer copies to create.
* @return an array of clusterers.
* @exception Exception if an error occurs
*/
public static DensityBasedClusterer [] makeCopies(DensityBasedClusterer model,
int num) throws Exception {
if (model == null) {
throw new Exception("No model clusterer set");
}
DensityBasedClusterer [] clusterers = new DensityBasedClusterer [num];
SerializedObject so = new SerializedObject(model);
for(int i = 0; i < clusterers.length; i++) {
clusterers[i] = (DensityBasedClusterer) so.getObject();
}
return clusterers;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/Canopy.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Canopy.java
* Copyright (C) 2014 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import weka.classifiers.rules.DecisionTableHashKey;
import weka.core.AttributeStats;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.DenseInstance;
import weka.core.EuclideanDistance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.NormalizableDistance;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.SparseInstance;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
<!-- globalinfo-start -->
* Cluster data using the capopy clustering algorithm, which requires just one pass over the data. Can run in eitherbatch or incremental mode. Results are generally not as good when running incrementally as the min/max for each numeric attribute is not known in advance. Has a heuristic (based on attribute std. deviations), that can be used in batch mode, for setting the T2 distance. The T2 distance determines how many canopies (clusters) are formed. When the user specifies a specific number (N) of clusters to generate, the algorithm will return the top N canopies (as determined by T2 density) when N < number of canopies (this applies to both batch and incremental learning); when N > number of canopies, the difference is made up by selecting training instances randomly (this can only be done when batch training). For more information see:<br/>
* <br/>
* A. McCallum, K. Nigam, L.H. Ungar: Efficient Clustering of High Dimensional Data Sets with Application to Reference Matching. In: Proceedings of the sixth ACM SIGKDD internation conference on knowledge discovery and data mining ACM-SIAM symposium on Discrete algorithms, 169-178, 2000.
* <p/>
<!-- globalinfo-end -->
*
<!-- technical-bibtex-start -->
* BibTeX:
* <pre>
* @inproceedings{McCallum2000,
* author = {A. McCallum and K. Nigam and L.H. Ungar},
* booktitle = {Proceedings of the sixth ACM SIGKDD internation conference on knowledge discovery and data mining ACM-SIAM symposium on Discrete algorithms},
* pages = {169-178},
* title = {Efficient Clustering of High Dimensional Data Sets with Application to Reference Matching},
* year = {2000}
* }
* </pre>
* <p/>
<!-- technical-bibtex-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -N <num>
* Number of clusters.
* (default 2).</pre>
*
* <pre> -max-candidates <num>
* Maximum number of candidate canopies to retain in memory
* at any one time. T2 distance plus, data characteristics,
* will determine how many candidate canopies are formed before
* periodic and final pruning are performed, which might result
* in exceess memory consumption. This setting avoids large numbers
* of candidate canopies consuming memory. (default = 100)</pre>
*
* <pre> -periodic-pruning <num>
* How often to prune low density canopies.
* (default = every 10,000 training instances)</pre>
*
* <pre> -min-density
* Minimum canopy density, below which a canopy will be pruned
* during periodic pruning. (default = 2 instances)</pre>
*
* <pre> -t2
* The T2 distance to use. Values < 0 indicate that
* a heuristic based on attribute std. deviation should be used to set this.
* Note that this heuristic can only be used when batch training
* (default = -1.0)</pre>
*
* <pre> -t1
* The T1 distance to use. A value < 0 is taken as a
* positive multiplier for T2. (default = -1.5)</pre>
*
* <pre> -M
* Don't replace missing values with mean/mode when running in batch mode.
* </pre>
*
* <pre> -S <num>
* Random number seed.
* (default 1)</pre>
*
* <pre> -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).</pre>
*
<!-- options-end -->
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public class Canopy extends RandomizableClusterer implements UpdateableClusterer, NumberOfClustersRequestable, OptionHandler, TechnicalInformationHandler {
/** For serialization */
private static final long serialVersionUID = 2067574593448223334L;
/** The canopy centers */
protected Instances m_canopies;
/** The T2 density of each canopy */
protected List<double[]> m_canopyT2Density;
protected List<double[][]> m_canopyCenters;
protected List<double[]> m_canopyNumMissingForNumerics;
/**
* The list of canopies that each canopy is a member of (according to the T1
* radius, which can overlap). Each bit position in the long values
* corresponds to one canopy. Outer list order corresponds to the order of the
* instances that store the actual canopy centers
*/
protected List<long[]> m_clusterCanopies;
public static final double DEFAULT_T2 = -1.0;
public static final double DEFAULT_T1 = -1.25;
/** < 0 means use the heuristic based on std. dev. to set the t2 radius */
protected double m_userT2 = DEFAULT_T2;
/**
* < 0 indicates the multiplier to use for T2 when setting T1, otherwise the
* value is take as is
*/
protected double m_userT1 = DEFAULT_T1;
/** Outer radius */
protected double m_t1 = this.m_userT1;
/** Inner radius */
protected double m_t2 = this.m_userT2;
/**
* Prune low-density candidate canopies after every x instances have been seen
*/
protected int m_periodicPruningRate = 10000;
/**
* The minimum cluster density (according to T2 distance) allowed. Used when
* periodically pruning candidate canopies
*/
protected double m_minClusterDensity = 2;
/** The maximum number of candidate canopies to hold in memory at any one time */
protected int m_maxCanopyCandidates = 100;
/**
* True if the pruning operation did remove at least one low density canopy
* the last time it was invoked
*/
protected boolean m_didPruneLastTime = true;
/** Number of training instances seen so far */
protected int m_instanceCount;
/**
* Default is to let the t2 radius determine how many canopies/clusters are
* formed
*/
protected int m_numClustersRequested = -1;
/**
* If not null, then this is expected to be a filter that can replace missing
* values immediately (at training and testing time)
*/
protected Filter m_missingValuesReplacer;
/**
* Replace missing values globally when running in batch mode?
*/
protected boolean m_dontReplaceMissing = false;
/** The distance function to use */
protected NormalizableDistance m_distanceFunction = new EuclideanDistance();
/**
* Used to pad out number of cluster centers if fewer canopies are generated
* than the number of requested clusters and we are running in batch mode.
*/
protected Instances m_trainingData;
/**
* Returns a string describing this clusterer.
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Cluster data using the capopy clustering algorithm, which requires just " + "one pass over the data. Can run in either" + "batch or incremental mode. Results are generally not as good when "
+ "running incrementally as the min/max for each numeric attribute is not " + "known in advance. Has a heuristic (based on attribute std. deviations), "
+ "that can be used in batch mode, for setting the T2 distance. The T2 distance " + "determines how many canopies (clusters) are formed. When the user specifies "
+ "a specific number (N) of clusters to generate, the algorithm will return the " + "top N canopies (as determined by T2 density) when N < number of canopies " + "(this applies to both batch and incremental learning); "
+ "when N > number of canopies, the difference is made up by selecting training " + "instances randomly (this can only be done when batch training). For more " + "information see:\n\n"
+ this.getTechnicalInformation().toString();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "A. McCallum and K. Nigam and L.H. Ungar");
result.setValue(Field.TITLE, "Efficient Clustering of High Dimensional Data Sets with Application to Reference Matching");
result.setValue(Field.BOOKTITLE, "Proceedings of the sixth ACM SIGKDD internation conference on " + "knowledge discovery and data mining " + "ACM-SIAM symposium on Discrete algorithms");
result.setValue(Field.YEAR, "2000");
result.setValue(Field.PAGES, "169-178");
return result;
}
/**
* Returns default capabilities of the clusterer.
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
result.enable(Capability.NO_CLASS);
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
return result;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tNumber of clusters.\n" + "\t(default 2).", "N", 1, "-N <num>"));
result.addElement(new Option(
"\tMaximum number of candidate canopies to retain in memory\n\t" + "at any one time. T2 distance plus, data characteristics,\n\t" + "will determine how many candidate canopies are formed before\n\t"
+ "periodic and final pruning are performed, which might result\n\t" + "in exceess memory consumption. This setting avoids large numbers\n\t" + "of candidate canopies consuming memory. (default = 100)",
"-max-candidates", 1, "-max-candidates <num>"));
result.addElement(new Option("\tHow often to prune low density canopies. \n\t" + "(default = every 10,000 training instances)", "periodic-pruning", 1, "-periodic-pruning <num>"));
result.addElement(new Option("\tMinimum canopy density, below which a canopy will be pruned\n\t" + "during periodic pruning. (default = 2 instances)", "min-density", 1, "-min-density"));
result.addElement(new Option("\tThe T2 distance to use. Values < 0 indicate that\n\t" + "a heuristic based on attribute std. deviation should be used to set this.\n\t"
+ "Note that this heuristic can only be used when batch training\n\t" + "(default = -1.0)", "t2", 1, "-t2"));
result.addElement(new Option("\tThe T1 distance to use. A value < 0 is taken as a\n\t" + "positive multiplier for T2. (default = -1.5)", "t1", 1, "-t1"));
result.addElement(new Option("\tDon't replace missing values with mean/mode when " + "running in batch mode.\n", "M", 0, "-M"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -N <num>
* Number of clusters.
* (default 2).</pre>
*
* <pre> -max-candidates <num>
* Maximum number of candidate canopies to retain in memory
* at any one time. T2 distance plus, data characteristics,
* will determine how many candidate canopies are formed before
* periodic and final pruning are performed, which might result
* in exceess memory consumption. This setting avoids large numbers
* of candidate canopies consuming memory. (default = 100)</pre>
*
* <pre> -periodic-pruning <num>
* How often to prune low density canopies.
* (default = every 10,000 training instances)</pre>
*
* <pre> -min-density
* Minimum canopy density, below which a canopy will be pruned
* during periodic pruning. (default = 2 instances)</pre>
*
* <pre> -t2
* The T2 distance to use. Values < 0 indicate that
* a heuristic based on attribute std. deviation should be used to set this.
* Note that this heuristic can only be used when batch training
* (default = -1.0)</pre>
*
* <pre> -t1
* The T1 distance to use. A value < 0 is taken as a
* positive multiplier for T2. (default = -1.5)</pre>
*
* <pre> -M
* Don't replace missing values with mean/mode when running in batch mode.
* </pre>
*
* <pre> -S <num>
* Random number seed.
* (default 1)</pre>
*
* <pre> -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).</pre>
*
<!-- options-end -->
*
* @param options the list of options as an array of strings throws Exception
* if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
String temp = Utils.getOption('N', options);
if (temp.length() > 0) {
this.setNumClusters(Integer.parseInt(temp));
}
temp = Utils.getOption("max-candidates", options);
if (temp.length() > 0) {
this.setMaxNumCandidateCanopiesToHoldInMemory(Integer.parseInt(temp));
}
temp = Utils.getOption("periodic-pruning", options);
if (temp.length() > 0) {
this.setPeriodicPruningRate(Integer.parseInt(temp));
}
temp = Utils.getOption("min-density", options);
if (temp.length() > 0) {
this.setMinimumCanopyDensity(Double.parseDouble(temp));
}
temp = Utils.getOption("t2", options);
if (temp.length() > 0) {
this.setT2(Double.parseDouble(temp));
}
temp = Utils.getOption("t1", options);
if (temp.length() > 0) {
this.setT1(Double.parseDouble(temp));
}
this.setDontReplaceMissingValues(Utils.getFlag('M', options));
super.setOptions(options);
}
/**
* Gets the current settings of Canopy.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-N");
result.add("" + this.getNumClusters());
result.add("-max-candidates");
result.add("" + this.getMaxNumCandidateCanopiesToHoldInMemory());
result.add("-periodic-pruning");
result.add("" + this.getPeriodicPruningRate());
result.add("-min-density");
result.add("" + this.getMinimumCanopyDensity());
result.add("-t2");
result.add("" + this.getT2());
result.add("-t1");
result.add("" + this.getT1());
if (this.getDontReplaceMissingValues()) {
result.add("-M");
}
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Tests if two sets of canopies have a non-empty intersection
*
* @param first the first canopy set
* @param second the second canopy set
* @return true if the intersection is non-empty
* @throws Exception if a problem occurs
*/
public static boolean nonEmptyCanopySetIntersection(final long[] first, final long[] second) throws Exception {
if (first.length != second.length) {
throw new Exception("Canopy lists need to be the same length");
}
if (first.length == 0 || second.length == 0) {
return false;
}
for (int i = 0; i < first.length; i++) {
long firstBlock = first[i];
long secondBlock = second[i];
if ((firstBlock & secondBlock) != 0L) {
return true;
}
}
return false;
}
private static void updateCanopyAssignment(final long[] assigned, final int toAssign) {
int whichLong = toAssign / 64;
int whichBitPosition = toAssign % 64;
long mask = 1L << whichBitPosition;
assigned[whichLong] |= mask;
}
/**
* Uses T1 distance to assign canopies to the supplied instance. If the
* instance does not fall within T1 distance of any canopies then the instance
* has the closest canopy assigned to it.
*
* @param inst the instance to find covering canopies for
* @return a set of canopies that contain this instance according to T1
* distance
* @throws Exception if a problem occurs
*/
public long[] assignCanopies(Instance inst) throws Exception {
if (this.m_missingValuesReplacer != null) {
this.m_missingValuesReplacer.input(inst);
inst = this.m_missingValuesReplacer.output();
}
int numLongs = this.m_canopies.size() / 64 + 1;
long[] assigned = new long[numLongs];
double minDist = Double.MAX_VALUE;
double bitsSet = 0;
int index = -1;
for (int i = 0; i < this.m_canopies.numInstances(); i++) {
double dist = this.m_distanceFunction.distance(inst, this.m_canopies.instance(i));
if (dist < minDist) {
minDist = dist;
index = i;
}
if (dist < this.m_t1) {
updateCanopyAssignment(assigned, i);
bitsSet++;
// assigned.add(i);
}
}
// this won't be necessary (for the training data) unless canopies have been
// pruned due to the user requesting fewer canopies than are created
// naturally according to the t2 radius
if (bitsSet == 0) {
// add the closest canopy
updateCanopyAssignment(assigned, index);
}
return assigned;
}
protected void updateCanopyCenter(final Instance newInstance, final double[][] center, final double[] numMissingNumerics) {
for (int i = 0; i < newInstance.numAttributes(); i++) {
if (newInstance.attribute(i).isNumeric()) {
if (center[i].length == 0) {
center[i] = new double[1];
}
if (!newInstance.isMissing(i)) {
center[i][0] += newInstance.value(i);
} else {
numMissingNumerics[i]++;
}
} else if (newInstance.attribute(i).isNominal()) {
if (center[i].length == 0) {
// +1 for missing
center[i] = new double[newInstance.attribute(i).numValues() + 1];
}
if (newInstance.isMissing(i)) {
center[i][center[i].length - 1]++;
} else {
center[i][(int) newInstance.value(i)]++;
}
}
}
}
@Override
public void updateClusterer(Instance newInstance) throws Exception {
if (this.m_instanceCount > 0) {
if (this.m_instanceCount % this.m_periodicPruningRate == 0) {
this.pruneCandidateCanopies();
}
}
this.m_instanceCount++;
if (this.m_missingValuesReplacer != null) {
this.m_missingValuesReplacer.input(newInstance);
newInstance = this.m_missingValuesReplacer.output();
}
this.m_distanceFunction.update(newInstance);
boolean addPoint = true;
for (int i = 0; i < this.m_canopies.numInstances(); i++) {
if (this.m_distanceFunction.distance(newInstance, this.m_canopies.instance(i)) < this.m_t2) {
double[] density = this.m_canopyT2Density.get(i);
density[0]++;
addPoint = false;
double[][] center = this.m_canopyCenters.get(i);
double[] numMissingNumerics = this.m_canopyNumMissingForNumerics.get(i);
this.updateCanopyCenter(newInstance, center, numMissingNumerics);
break;
}
}
if (addPoint && this.m_canopies.numInstances() < this.m_maxCanopyCandidates) {
this.m_canopies.add(newInstance);
double[] density = new double[1];
density[0] = 1.0;
this.m_canopyT2Density.add(density);
double[][] center = new double[newInstance.numAttributes()][0];
double[] numMissingNumerics = new double[newInstance.numAttributes()];
this.updateCanopyCenter(newInstance, center, numMissingNumerics);
this.m_canopyCenters.add(center);
this.m_canopyNumMissingForNumerics.add(numMissingNumerics);
}
}
/**
* Prune low density candidate canopies
*/
protected void pruneCandidateCanopies() {
if (this.m_didPruneLastTime == false && this.m_canopies.size() == this.m_maxCanopyCandidates) {
return;
}
this.m_didPruneLastTime = false;
for (int i = this.m_canopies.numInstances() - 1; i >= 0; i--) {
double dens = this.m_canopyT2Density.get(i)[0];
if (dens < this.m_minClusterDensity) {
double[] tempDens = this.m_canopyT2Density.remove(this.m_canopyT2Density.size() - 1);
if (i < this.m_canopyT2Density.size()) {
this.m_canopyT2Density.set(i, tempDens);
}
if (this.getDebug()) {
System.err.println("Pruning a candidate canopy with density: " + dens);
}
this.m_didPruneLastTime = true;
double[][] tempCenter = this.m_canopyCenters.remove(this.m_canopyCenters.size() - 1);
if (i < this.m_canopyCenters.size()) {
this.m_canopyCenters.set(i, tempCenter);
}
double[] tempNumMissingNumerics = this.m_canopyNumMissingForNumerics.remove(this.m_canopyNumMissingForNumerics.size() - 1);
if (i < this.m_canopyNumMissingForNumerics.size()) {
this.m_canopyNumMissingForNumerics.set(i, tempNumMissingNumerics);
}
if (i != this.m_canopies.numInstances() - 1) {
this.m_canopies.swap(i, this.m_canopies.numInstances() - 1);
}
this.m_canopies.delete(this.m_canopies.numInstances() - 1);
}
}
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
if (this.m_canopies == null || this.m_canopies.size() == 0) {
throw new Exception("No canopies available to cluster with!");
}
double[] d = new double[this.numberOfClusters()];
if (this.m_missingValuesReplacer != null) {
this.m_missingValuesReplacer.input(instance);
instance = this.m_missingValuesReplacer.output();
}
for (int i = 0; i < this.m_canopies.numInstances(); i++) {
double distance = this.m_distanceFunction.distance(instance, this.m_canopies.instance(i));
d[i] = 1.0 / (1.0 + distance);
}
Utils.normalize(d);
return d;
}
private void assignCanopiesToCanopyCenters() {
// assign canopies to each canopy center
this.m_clusterCanopies = new ArrayList<long[]>();
for (int i = 0; i < this.m_canopies.size(); i++) {
Instance inst = this.m_canopies.instance(i);
try {
long[] assignments = this.assignCanopies(inst);
this.m_clusterCanopies.add(assignments);
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
/**
* Adjust the final number of canopies to match the user-requested number (if
* possible)
*
* @param densities the density of each of the canopies
* @throws InterruptedException
*/
protected void adjustCanopies(final double[] densities) throws InterruptedException {
if (this.m_numClustersRequested < 0) {
this.assignCanopiesToCanopyCenters();
this.m_trainingData = new Instances(this.m_canopies, 0);
return;
}
// more canopies than requested?
if (this.m_canopies.numInstances() > this.m_numClustersRequested) {
int[] sortedIndexes = Utils.stableSort(densities);
Instances finalCanopies = new Instances(this.m_canopies, 0);
int count = 0;
for (int i = sortedIndexes.length - 1; count < this.m_numClustersRequested; i--) {
finalCanopies.add(this.m_canopies.instance(sortedIndexes[i]));
count++;
}
this.m_canopies = finalCanopies;
List<double[][]> tempCanopyCenters = new ArrayList<double[][]>();
List<double[]> tempT2Dists = new ArrayList<double[]>();
List<double[]> tempMissings = new ArrayList<double[]>();
// make sure that the center sums, densities and missing counts are
// aligned with the new canopy list
count = 0;
for (int i = sortedIndexes.length - 1; count < finalCanopies.numInstances(); i--) {
tempCanopyCenters.add(this.m_canopyCenters.get(sortedIndexes[i]));
tempT2Dists.add(this.m_canopyT2Density.get(sortedIndexes[i]));
tempMissings.add(this.m_canopyNumMissingForNumerics.get(sortedIndexes[i]));
count++;
}
this.m_canopyCenters = tempCanopyCenters;
this.m_canopyT2Density = tempT2Dists;
this.m_canopyNumMissingForNumerics = tempMissings;
} else if (this.m_canopies.numInstances() < this.m_numClustersRequested && this.m_trainingData != null && this.m_trainingData.numInstances() > 0) {
// make up the difference with randomly selected instances (if possible)
Random r = new Random(this.getSeed());
for (int i = 0; i < 10; i++) {
r.nextInt();
}
HashMap<DecisionTableHashKey, Integer> initC = new HashMap<DecisionTableHashKey, Integer>();
DecisionTableHashKey hk = null;
// put the existing canopies in the lookup
for (int i = 0; i < this.m_canopies.numInstances(); i++) {
try {
hk = new DecisionTableHashKey(this.m_canopies.instance(i), this.m_canopies.numAttributes(), true);
initC.put(hk, null);
} catch (Exception e) {
e.printStackTrace();
}
}
for (int j = this.m_trainingData.numInstances() - 1; j >= 0; j--) {
int instIndex = r.nextInt(j + 1);
try {
hk = new DecisionTableHashKey(this.m_trainingData.instance(instIndex), this.m_trainingData.numAttributes(), true);
} catch (Exception e) {
e.printStackTrace();
}
if (!initC.containsKey(hk)) {
Instance newInstance = this.m_trainingData.instance(instIndex);
this.m_canopies.add(newInstance);
double[] density = new double[1];
density[0] = 1.0;
this.m_canopyT2Density.add(density);
double[][] center = new double[newInstance.numAttributes()][0];
double[] numMissingNumerics = new double[newInstance.numAttributes()];
this.updateCanopyCenter(newInstance, center, numMissingNumerics);
this.m_canopyCenters.add(center);
this.m_canopyNumMissingForNumerics.add(numMissingNumerics);
initC.put(hk, null);
}
this.m_trainingData.swap(j, instIndex);
if (this.m_canopies.numInstances() == this.m_numClustersRequested) {
break;
}
}
}
this.assignCanopiesToCanopyCenters();
// save memory
this.m_trainingData = new Instances(this.m_canopies, 0);
}
@Override
public void updateFinished() throws InterruptedException {
if (this.m_canopies == null || this.m_canopies.numInstances() == 0) {
return;
}
this.pruneCandidateCanopies();
// set the final canopy centers and weights
double[] densities = new double[this.m_canopies.size()];
for (int i = 0; i < this.m_canopies.numInstances(); i++) {
double[] density = this.m_canopyT2Density.get(i);
double[][] centerSums = this.m_canopyCenters.get(i);
double[] numMissingForNumerics = this.m_canopyNumMissingForNumerics.get(i);
double[] finalCenter = new double[this.m_canopies.numAttributes()];
for (int j = 0; j < this.m_canopies.numAttributes(); j++) {
if (this.m_canopies.attribute(j).isNumeric()) {
if (numMissingForNumerics[j] == density[0]) {
finalCenter[j] = Utils.missingValue();
} else {
finalCenter[j] = centerSums[j][0] / (density[0] - numMissingForNumerics[j]);
}
} else if (this.m_canopies.attribute(j).isNominal()) {
int mode = Utils.maxIndex(centerSums[j]);
if (mode == centerSums[j].length - 1) {
finalCenter[j] = Utils.missingValue();
} else {
finalCenter[j] = mode;
}
}
}
Instance finalCenterInst = this.m_canopies.instance(i) instanceof SparseInstance ? new SparseInstance(1.0, finalCenter) : new DenseInstance(1.0, finalCenter);
this.m_canopies.set(i, finalCenterInst);
this.m_canopies.instance(i).setWeight(density[0]);
densities[i] = density[0];
}
this.adjustCanopies(densities);
}
/**
* Initialize the distance function (i.e set min/max values for numeric
* attributes) with the supplied instances.
*
* @param init the instances to initialize with
* @throws Exception if a problem occurs
*/
public void initializeDistanceFunction(Instances init) throws Exception {
if (this.m_missingValuesReplacer != null) {
init = Filter.useFilter(init, this.m_missingValuesReplacer);
}
this.m_distanceFunction.setInstances(init);
}
/**
* Pretty hokey heuristic to try and set t2 distance automatically based on
* standard deviation
*
* @param trainingBatch the training instances
* @throws Exception if a problem occurs
*/
protected void setT2T1BasedOnStdDev(final Instances trainingBatch) throws Exception {
double normalizedStdDevSum = 0;
for (int i = 0; i < trainingBatch.numAttributes(); i++) {
if (trainingBatch.attribute(i).isNominal()) {
normalizedStdDevSum += 0.25;
} else if (trainingBatch.attribute(i).isNumeric()) {
AttributeStats stats = trainingBatch.attributeStats(i);
if (trainingBatch.numInstances() - stats.missingCount > 2) {
double stdDev = stats.numericStats.stdDev;
double min = stats.numericStats.min;
double max = stats.numericStats.max;
if (!Utils.isMissingValue(stdDev) && max - min > 0) {
stdDev = 0.5 * stdDev / (max - min);
normalizedStdDevSum += stdDev;
}
}
}
}
normalizedStdDevSum = Math.sqrt(normalizedStdDevSum);
if (normalizedStdDevSum > 0) {
this.m_t2 = normalizedStdDevSum;
}
}
@Override
public void buildClusterer(Instances data) throws Exception {
this.m_t1 = this.m_userT1;
this.m_t2 = this.m_userT2;
if (data.numInstances() == 0 && this.m_userT2 < 0) {
System.err.println("The heuristic for setting T2 based on std. dev. can't be used when " + "running in incremental mode. Using default of 1.0.");
this.m_t2 = 1.0;
}
this.m_canopyT2Density = new ArrayList<double[]>();
this.m_canopyCenters = new ArrayList<double[][]>();
this.m_canopyNumMissingForNumerics = new ArrayList<double[]>();
if (data.numInstances() > 0) {
if (!this.m_dontReplaceMissing) {
this.m_missingValuesReplacer = new ReplaceMissingValues();
this.m_missingValuesReplacer.setInputFormat(data);
data = Filter.useFilter(data, this.m_missingValuesReplacer);
}
Random r = new Random(this.getSeed());
for (int i = 0; i < 10; i++) {
r.nextInt();
}
data.randomize(r);
if (this.m_userT2 < 0) {
this.setT2T1BasedOnStdDev(data);
}
}
this.m_t1 = this.m_userT1 > 0 ? this.m_userT1 : -this.m_userT1 * this.m_t2;
// if (m_t1 < m_t2) {
// throw new Exception("T1 can't be less than T2. Computed T2 as " + m_t2
// + " T1 is requested to be " + m_t1);
// }
this.m_distanceFunction.setInstances(data);
this.m_canopies = new Instances(data, 0);
if (data.numInstances() > 0) {
this.m_trainingData = new Instances(data);
}
for (int i = 0; i < data.numInstances(); i++) {
if (this.getDebug() && i % this.m_periodicPruningRate == 0) {
System.err.println("Processed: " + i);
}
this.updateClusterer(data.instance(i));
}
this.updateFinished();
}
@Override
public int numberOfClusters() throws Exception {
return this.m_canopies.numInstances();
}
/**
* Set a ready-to-use missing values replacement filter
*
* @param missingReplacer the missing values replacement filter to use
*/
public void setMissingValuesReplacer(final Filter missingReplacer) {
this.m_missingValuesReplacer = missingReplacer;
}
/**
* Get the canopies (cluster centers).
*
* @return the canopies
*/
public Instances getCanopies() {
return this.m_canopies;
}
/**
* Set the canopies to use (replaces any learned by this clusterer already)
*
* @param canopies the canopies to use
*/
public void setCanopies(final Instances canopies) {
this.m_canopies = canopies;
}
/**
* Get the canopies that each canopy (cluster center) is within T1 distance of
*
* @return a list of canopies for each cluster center
*/
public List<long[]> getClusterCanopyAssignments() {
return this.m_clusterCanopies;
}
/**
* Set the canopies that each canopy (cluster center) is within T1 distance of
*
* @param clusterCanopies the list canopies for each cluster center
*/
public void setClusterCanopyAssignments(final List<long[]> clusterCanopies) {
this.m_clusterCanopies = clusterCanopies;
}
/**
* Get the actual value of T2 (which may be different from the initial value
* if the heuristic is used)
*
* @return the actual value of T2
*/
public double getActualT2() {
return this.m_t2;
}
/**
* Get the actual value of T1 (which may be different from the initial value
* if the heuristic is used)
*
* @return the actual value of T1
*/
public double getActualT1() {
return this.m_t1;
}
/**
* Tip text for this property
*
* @return the tip text for this property
*/
public String t1TipText() {
return "The T1 distance to use. Values < 0 are taken as a positive " + "multiplier for the T2 distance";
}
/**
* Set the T1 distance. Values < 0 are taken as a positive multiplier for the
* T2 distance - e.g. T1_actual = Math.abs(t1) * t2;
*
* @param t1 the T1 distance to use
*/
public void setT1(final double t1) {
this.m_userT1 = t1;
}
/**
* Get the T1 distance. Values < 0 are taken as a positive multiplier for the
* T2 distance - e.g. T1_actual = Math.abs(t1) * t2;
*
* @return the T1 distance to use
*/
public double getT1() {
return this.m_userT1;
}
/**
* Tip text for this property
*
* @return the tip text for this property
*/
public String t2TipText() {
return "The T2 distance to use. Values < 0 indicate that this should be set using " + "a heuristic based on attribute standard deviation (note that this only" + "works when batch training)";
}
/**
* Set the T2 distance to use. Values < 0 indicate that a heuristic based on
* attribute standard deviation should be used to set this (note that the
* heuristic is only applicable when batch training).
*
* @param t2 the T2 distance to use
*/
public void setT2(final double t2) {
this.m_userT2 = t2;
}
/**
* Get the T2 distance to use. Values < 0 indicate that a heuristic based on
* attribute standard deviation should be used to set this (note that the
* heuristic is only applicable when batch training).
*
* @return the T2 distance to use
*/
public double getT2() {
return this.m_userT2;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numClustersTipText() {
return "Set number of clusters. -1 means number of clusters is determined by " + "T2 distance";
}
@Override
public void setNumClusters(final int numClusters) throws Exception {
this.m_numClustersRequested = numClusters;
}
/**
* Get the number of clusters to generate
*
* @return the number of clusters to generate
*/
public int getNumClusters() {
return this.m_numClustersRequested;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String periodicPruningRateTipText() {
return "How often to prune low density canopies during training";
}
/**
* Set the how often to prune low density canopies during training
*
* @param p how often (every p instances) to prune low density canopies
*/
public void setPeriodicPruningRate(final int p) {
this.m_periodicPruningRate = p;
}
/**
* Get the how often to prune low density canopies during training
*
* @return how often (every p instances) to prune low density canopies
*/
public int getPeriodicPruningRate() {
return this.m_periodicPruningRate;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minimumCanopyDensityTipText() {
return "The minimum T2-based density below which a canopy will be pruned during periodic pruning";
}
/**
* Set the minimum T2-based density below which a canopy will be pruned during
* periodic pruning.
*
* @param dens the minimum canopy density
*/
public void setMinimumCanopyDensity(final double dens) {
this.m_minClusterDensity = dens;
}
/**
* Get the minimum T2-based density below which a canopy will be pruned during
* periodic pruning.
*
* @return the minimum canopy density
*/
public double getMinimumCanopyDensity() {
return this.m_minClusterDensity;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxNumCandidateCanopiesToHoldInMemory() {
return "The maximum number of candidate canopies to retain in main memory during training. " + "T2 distance and data characteristics determine how many candidate "
+ "canopies are formed before periodic and final pruning are performed. There " + "may not be enough memory available if T2 is set too low.";
}
/**
* Set the maximum number of candidate canopies to retain in memory during
* training. T2 distance and data characteristics determine how many candidate
* canopies are formed before periodic and final pruning are performed. There
* may not be enough memory available if T2 is set too low.
*
* @param max the maximum number of candidate canopies to retain in memory
* during training
*/
public void setMaxNumCandidateCanopiesToHoldInMemory(final int max) {
this.m_maxCanopyCandidates = max;
}
/**
* Get the maximum number of candidate canopies to retain in memory during
* training. T2 distance and data characteristics determine how many candidate
* canopies are formed before periodic and final pruning are performed. There
* may not be enough memory available if T2 is set too low.
*
* @return the maximum number of candidate canopies to retain in memory during
* training
*/
public int getMaxNumCandidateCanopiesToHoldInMemory() {
return this.m_maxCanopyCandidates;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String dontReplaceMissingValuesTipText() {
return "Replace missing values globally with mean/mode.";
}
/**
* Sets whether missing values are to be replaced.
*
* @param r true if missing values are to be replaced
*/
public void setDontReplaceMissingValues(final boolean r) {
this.m_dontReplaceMissing = r;
}
/**
* Gets whether missing values are to be replaced.
*
* @return true if missing values are to be replaced
*/
public boolean getDontReplaceMissingValues() {
return this.m_dontReplaceMissing;
}
public static String printSingleAssignment(final long[] assignments) {
StringBuilder temp = new StringBuilder();
boolean first = true;
temp.append(" <");
for (int j = 0; j < assignments.length; j++) {
long block = assignments[j];
int offset = j * 64;
for (int k = 0; k < 64; k++) {
long mask = 1L << k;
if ((mask & block) != 0L) {
temp.append("" + (!first ? "," : "") + (offset + k));
if (first) {
first = false;
}
}
}
}
temp.append(">");
return temp.toString();
}
/**
* Print the supplied instances and their canopies
*
* @param dataPoints the instances to print
* @param canopyAssignments the canopy assignments, one assignment array for
* each instance
* @return a string containing the printed assignments
*/
public static String printCanopyAssignments(final Instances dataPoints, final List<long[]> canopyAssignments) {
StringBuilder temp = new StringBuilder();
for (int i = 0; i < dataPoints.size(); i++) {
temp.append("Cluster " + i + ": ");
temp.append(dataPoints.instance(i));
if (canopyAssignments != null && canopyAssignments.size() == dataPoints.size()) {
long[] assignments = canopyAssignments.get(i);
temp.append(printSingleAssignment(assignments));
}
temp.append("\n");
}
return temp.toString();
}
/**
* Return a textual description of this clusterer
*
* @param header true if the header should be printed
* @return a string describing the result of the clustering
*/
public String toString(final boolean header) {
StringBuffer temp = new StringBuffer();
if (this.m_canopies == null) {
return "No clusterer built yet";
}
if (header) {
temp.append("\nCanopy clustering\n=================\n");
temp.append("\nNumber of canopies (cluster centers) found: " + this.m_canopies.numInstances());
}
temp.append("\nT2 radius: " + String.format("%-10.3f", this.m_t2));
temp.append("\nT1 radius: " + String.format("%-10.3f", this.m_t1));
temp.append("\n\n");
temp.append(printCanopyAssignments(this.m_canopies, this.m_clusterCanopies));
temp.append("\n");
return temp.toString();
}
@Override
public String toString() {
return this.toString(true);
}
/**
* Save memory
*/
public void cleanUp() {
this.m_canopyNumMissingForNumerics = null;
this.m_canopyT2Density = null;
this.m_canopyCenters = null;
}
/**
* Aggregate the canopies from a list of Canopy clusterers together into one
* final model.
*
* @param canopies the list of Canopy clusterers to aggregate
* @param aggregationT1 the T1 distance to use for the aggregated classifier
* @param aggregationT2 the T2 distance to use when aggregating canopies
* @param finalDistanceFunction the distance function to use with the final
* Canopy clusterer
* @param missingValuesReplacer the missing value replacement filter to use
* with the final clusterer (can be null for no missing value
* replacement)
* @param finalNumCanopies the final number of canopies
* @return a Canopy clusterer that aggregates all the canopies
* @throws InterruptedException
*/
public static Canopy aggregateCanopies(final List<Canopy> canopies, final double aggregationT1, final double aggregationT2, final NormalizableDistance finalDistanceFunction, final Filter missingValuesReplacer,
final int finalNumCanopies) throws InterruptedException {
Instances collectedCanopies = new Instances(canopies.get(0).getCanopies(), 0);
Instances finalCanopies = new Instances(collectedCanopies, 0);
List<double[][]> finalCenters = new ArrayList<double[][]>();
List<double[]> finalMissingNumerics = new ArrayList<double[]>();
List<double[]> finalT2Densities = new ArrayList<double[]>();
List<Instance> finalCanopiesList = new ArrayList<Instance>();
List<double[][]> centersForEachCanopy = new ArrayList<double[][]>();
List<double[]> numMissingNumericsForEachCanopy = new ArrayList<double[]>();
for (Canopy c : canopies) {
Instances tempC = c.getCanopies();
// System.err.println("A canopy clusterer:\n " + c.toString());
for (int i = 0; i < tempC.numInstances(); i++) {
collectedCanopies.add(tempC.instance(i));
centersForEachCanopy.add(c.m_canopyCenters.get(i));
numMissingNumericsForEachCanopy.add(c.m_canopyNumMissingForNumerics.get(i));
}
}
for (int i = 0; i < collectedCanopies.numInstances(); i++) {
boolean addPoint = true;
Instance candidate = collectedCanopies.instance(i);
double[][] candidateCenter = centersForEachCanopy.get(i);
double[] candidateMissingNumerics = numMissingNumericsForEachCanopy.get(i);
for (int j = 0; j < finalCanopiesList.size(); j++) {
Instance fc = finalCanopiesList.get(j);
if (finalDistanceFunction.distance(candidate, fc) < aggregationT2) {
addPoint = false;
// now absorb candidate into fc
double[][] center = finalCenters.get(j);
double[] missingNumerics = finalMissingNumerics.get(j);
// double newDensity = fc.weight() + candidate.weight();
finalT2Densities.get(j)[0] += candidate.weight();
for (int k = 0; k < candidate.numAttributes(); k++) {
missingNumerics[k] += candidateMissingNumerics[k];
for (int l = 0; l < center[k].length; l++) {
center[k][l] += candidateCenter[k][l];
}
}
break;
}
}
if (addPoint) {
finalCanopiesList.add(candidate);
finalCanopies.add(candidate);
finalCenters.add(candidateCenter);
finalMissingNumerics.add(candidateMissingNumerics);
double[] dens = new double[1];
dens[0] = candidate.weight();
finalT2Densities.add(dens);
}
}
// now construct a new Canopy encapsulating the final set of canopies
// System.err.println(finalCanopies);
Canopy finalC = new Canopy();
finalC.setCanopies(finalCanopies);
finalC.setMissingValuesReplacer(missingValuesReplacer);
finalC.m_distanceFunction = finalDistanceFunction;
finalC.m_canopyCenters = finalCenters;
finalC.m_canopyNumMissingForNumerics = finalMissingNumerics;
finalC.m_canopyT2Density = finalT2Densities;
finalC.m_t2 = aggregationT2;
finalC.m_t1 = aggregationT1;
try {
finalC.setNumClusters(finalNumCanopies);
} catch (Exception e) {
// can safely ignore as Canopy does not generate an exception
}
finalC.updateFinished();
return finalC;
}
public static void main(final String[] args) {
runClusterer(new Canopy(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/CheckClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckClusterer.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.CheckScheme;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.MultiInstanceCapabilitiesHandler;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializationHelper;
import weka.core.TestInstances;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
/**
* Class for examining the capabilities and finding problems with clusterers. If
* you implement a clusterer using the WEKA.libraries, you should run the checks
* on it to ensure robustness and correct operation. Passing all the tests of
* this object does not mean bugs in the clusterer don't exist, but this will
* help find some common ones.
* <p/>
*
* Typical usage:
* <p/>
* <code>java weka.clusterers.CheckClusterer -W clusterer_name
* -- clusterer_options </code>
* <p/>
*
* CheckClusterer reports on the following:
* <ul>
* <li>Clusterer abilities
* <ul>
* <li>Possible command line options to the clusterer</li>
* <li>Whether the clusterer can predict nominal, numeric, string, date or
* relational class attributes.</li>
* <li>Whether the clusterer can handle numeric predictor attributes</li>
* <li>Whether the clusterer can handle nominal predictor attributes</li>
* <li>Whether the clusterer can handle string predictor attributes</li>
* <li>Whether the clusterer can handle date predictor attributes</li>
* <li>Whether the clusterer can handle relational predictor attributes</li>
* <li>Whether the clusterer can handle multi-instance data</li>
* <li>Whether the clusterer can handle missing predictor values</li>
* <li>Whether the clusterer can handle instance weights</li>
* </ul>
* </li>
* <li>Correct functioning
* <ul>
* <li>Correct initialisation during buildClusterer (i.e. no result changes when
* buildClusterer called repeatedly)</li>
* <li>Whether the clusterer alters the data pased to it (number of instances,
* instance order, instance weights, etc)</li>
* </ul>
* </li>
* <li>Degenerate cases
* <ul>
* <li>building clusterer with zero training instances</li>
* <li>all but one predictor attribute values missing</li>
* <li>all predictor attribute values missing</li>
* <li>all but one class values missing</li>
* <li>all class values missing</li>
* </ul>
* </li>
* </ul>
* Running CheckClusterer with the debug option set will output the training
* dataset for any failed tests.
* <p/>
*
* The <code>weka.clusterers.AbstractClustererTest</code> uses this class to
* test all the clusterers. Any changes here, have to be checked in that
* abstract test class, too.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 20).
* </pre>
*
* <pre>
* -nominal <num>
* The number of nominal attributes (default 2).
* </pre>
*
* <pre>
* -nominal-values <num>
* The number of values for nominal attributes (default 1).
* </pre>
*
* <pre>
* -numeric <num>
* The number of numeric attributes (default 1).
* </pre>
*
* <pre>
* -string <num>
* The number of string attributes (default 1).
* </pre>
*
* <pre>
* -date <num>
* The number of date attributes (default 1).
* </pre>
*
* <pre>
* -relational <num>
* The number of relational attributes (default 1).
* </pre>
*
* <pre>
* -num-instances-relational <num>
* The number of instances in relational/bag attributes (default 10).
* </pre>
*
* <pre>
* -words <comma-separated-list>
* The words to use in string attributes.
* </pre>
*
* <pre>
* -word-separators <chars>
* The word separators to use in string attributes.
* </pre>
*
* <pre>
* -W
* Full name of the clusterer analyzed.
* eg: weka.clusterers.SimpleKMeans
* (default weka.clusterers.SimpleKMeans)
* </pre>
*
* <pre>
* Options specific to clusterer weka.clusterers.SimpleKMeans:
* </pre>
*
* <pre>
* -N <num>
* number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Replace missing values with mean/mode.
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <!-- options-end -->
*
* Options after -- are passed to the designated clusterer.
* <p/>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see TestInstances
*/
public class CheckClusterer extends CheckScheme {
/*
* Note about test methods: - methods return array of booleans - first index:
* success or not - second index: acceptable or not (e.g., Exception is OK)
*
* FracPete (fracpete at waikato dot ac dot nz)
*/
/*** The clusterer to be examined */
protected Clusterer m_Clusterer = new SimpleKMeans();
/**
* default constructor
*/
public CheckClusterer() {
super();
setNumInstances(40);
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tFull name of the clusterer analyzed.\n"
+ "\teg: weka.clusterers.SimpleKMeans\n"
+ "\t(default weka.clusterers.SimpleKMeans)", "W", 1, "-W"));
result.addAll(Collections.list(super.listOptions()));
if ((m_Clusterer != null) && (m_Clusterer instanceof OptionHandler)) {
result.addElement(new Option("", "", 0,
"\nOptions specific to clusterer " + m_Clusterer.getClass().getName()
+ ":"));
result.addAll(Collections.list(((OptionHandler) m_Clusterer)
.listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 20).
* </pre>
*
* <pre>
* -nominal <num>
* The number of nominal attributes (default 2).
* </pre>
*
* <pre>
* -nominal-values <num>
* The number of values for nominal attributes (default 1).
* </pre>
*
* <pre>
* -numeric <num>
* The number of numeric attributes (default 1).
* </pre>
*
* <pre>
* -string <num>
* The number of string attributes (default 1).
* </pre>
*
* <pre>
* -date <num>
* The number of date attributes (default 1).
* </pre>
*
* <pre>
* -relational <num>
* The number of relational attributes (default 1).
* </pre>
*
* <pre>
* -num-instances-relational <num>
* The number of instances in relational/bag attributes (default 10).
* </pre>
*
* <pre>
* -words <comma-separated-list>
* The words to use in string attributes.
* </pre>
*
* <pre>
* -word-separators <chars>
* The word separators to use in string attributes.
* </pre>
*
* <pre>
* -W
* Full name of the clusterer analyzed.
* eg: weka.clusterers.SimpleKMeans
* (default weka.clusterers.SimpleKMeans)
* </pre>
*
* <pre>
* Options specific to clusterer weka.clusterers.SimpleKMeans:
* </pre>
*
* <pre>
* -N <num>
* number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Replace missing values with mean/mode.
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setNumInstances(Integer.parseInt(tmpStr));
} else {
setNumInstances(40);
}
super.setOptions(options);
tmpStr = Utils.getOption('W', options);
if (tmpStr.length() == 0) {
tmpStr = weka.clusterers.SimpleKMeans.class.getName();
}
setClusterer((Clusterer) forName("weka.clusterers", Clusterer.class,
tmpStr, Utils.partitionOptions(options)));
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of the CheckClusterer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
if (getClusterer() != null) {
result.add("-W");
result.add(getClusterer().getClass().getName());
}
Collections.addAll(result, super.getOptions());
if ((m_Clusterer != null) && (m_Clusterer instanceof OptionHandler)) {
String[] options = ((OptionHandler) m_Clusterer).getOptions();
if (options.length > 0) {
result.add("--");
Collections.addAll(result, options);
}
}
return result.toArray(new String[result.size()]);
}
/**
* Begin the tests, reporting results to System.out
*/
@Override
public void doTests() {
if (getClusterer() == null) {
println("\n=== No clusterer set ===");
return;
}
println("\n=== Check on Clusterer: " + getClusterer().getClass().getName()
+ " ===\n");
// Start tests
println("--> Checking for interfaces");
canTakeOptions();
boolean updateable = updateableClusterer()[0];
boolean weightedInstancesHandler = weightedInstancesHandler()[0];
boolean multiInstanceHandler = multiInstanceHandler()[0];
println("--> Clusterer tests");
declaresSerialVersionUID();
runTests(weightedInstancesHandler, multiInstanceHandler, updateable);
}
/**
* Set the clusterer for testing.
*
* @param newClusterer the Clusterer to use.
*/
public void setClusterer(Clusterer newClusterer) {
m_Clusterer = newClusterer;
}
/**
* Get the clusterer used as the clusterer
*
* @return the clusterer used as the clusterer
*/
public Clusterer getClusterer() {
return m_Clusterer;
}
/**
* Run a battery of tests
*
* @param weighted true if the clusterer says it handles weights
* @param multiInstance true if the clusterer is a multi-instance clusterer
* @param updateable true if the classifier is updateable
*/
protected void runTests(boolean weighted, boolean multiInstance,
boolean updateable) {
boolean PNom = canPredict(true, false, false, false, false, multiInstance)[0];
boolean PNum = canPredict(false, true, false, false, false, multiInstance)[0];
boolean PStr = canPredict(false, false, true, false, false, multiInstance)[0];
boolean PDat = canPredict(false, false, false, true, false, multiInstance)[0];
boolean PRel;
if (!multiInstance) {
PRel = canPredict(false, false, false, false, true, multiInstance)[0];
} else {
PRel = false;
}
if (PNom || PNum || PStr || PDat || PRel) {
if (weighted) {
instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance);
}
canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance);
boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr,
PDat, PRel, multiInstance, true, 20)[0];
if (handleMissingPredictors) {
canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, true, 100);
}
correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance);
datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance,
handleMissingPredictors);
if (updateable) {
updatingEquality(PNom, PNum, PStr, PDat, PRel, multiInstance);
}
}
}
/**
* Checks whether the scheme can take command line options.
*
* @return index 0 is true if the clusterer can take options
*/
protected boolean[] canTakeOptions() {
boolean[] result = new boolean[2];
print("options...");
if (m_Clusterer instanceof OptionHandler) {
println("yes");
if (m_Debug) {
println("\n=== Full report ===");
Enumeration<Option> enu = ((OptionHandler) m_Clusterer).listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
print(option.synopsis() + "\n" + option.description() + "\n");
}
println("\n");
}
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme can build models incrementally.
*
* @return index 0 is true if the clusterer can train incrementally
*/
protected boolean[] updateableClusterer() {
boolean[] result = new boolean[2];
print("updateable clusterer...");
if (m_Clusterer instanceof UpdateableClusterer) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme says it can handle instance weights.
*
* @return true if the clusterer handles instance weights
*/
protected boolean[] weightedInstancesHandler() {
boolean[] result = new boolean[2];
print("weighted instances clusterer...");
if (m_Clusterer instanceof WeightedInstancesHandler) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme handles multi-instance data.
*
* @return true if the clusterer handles multi-instance data
*/
protected boolean[] multiInstanceHandler() {
boolean[] result = new boolean[2];
print("multi-instance clusterer...");
if (m_Clusterer instanceof MultiInstanceCapabilitiesHandler) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* tests for a serialVersionUID. Fails in case the scheme doesn't declare a
* UID.
*
* @return index 0 is true if the scheme declares a UID
*/
protected boolean[] declaresSerialVersionUID() {
boolean[] result = new boolean[2];
print("serialVersionUID...");
result[0] = !SerializationHelper.needsUID(m_Clusterer.getClass());
if (result[0]) {
println("yes");
} else {
println("no");
}
return result;
}
/**
* Checks basic prediction of the scheme, for simple non-troublesome datasets.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canPredict(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance) {
print("basic predict");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance);
print("...");
Vector<String> accepts = new Vector<String>();
accepts.addElement("unary");
accepts.addElement("binary");
accepts.addElement("nominal");
accepts.addElement("numeric");
accepts.addElement("string");
accepts.addElement("date");
accepts.addElement("relational");
accepts.addElement("multi-instance");
accepts.addElement("not in classpath");
int numTrain = getNumInstances(), missingLevel = 0;
boolean predictorMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, missingLevel,
predictorMissing, numTrain, accepts);
}
/**
* Checks whether the scheme can handle zero training instances.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleZeroTraining(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance) {
print("handle zero training instances");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance);
print("...");
Vector<String> accepts = new Vector<String>();
accepts.addElement("train");
accepts.addElement("value");
int numTrain = 0, missingLevel = 0;
boolean predictorMissing = false;
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, missingLevel,
predictorMissing, numTrain, accepts);
}
/**
* Checks whether the scheme correctly initialises models when buildClusterer
* is called. This test calls buildClusterer with one training dataset.
* buildClusterer is then called on a training set with different structure,
* and then again with the original training set. If the equals method of the
* ClusterEvaluation class returns false, this is noted as incorrect build
* initialisation.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @return index 0 is true if the test was passed
*/
protected boolean[] correctBuildInitialisation(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance) {
boolean[] result = new boolean[2];
print("correct initialisation during buildClusterer");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance);
print("...");
int numTrain = getNumInstances(), missingLevel = 0;
boolean predictorMissing = false;
Instances train1 = null;
Instances train2 = null;
Clusterer clusterer = null;
ClusterEvaluation evaluation1A = null;
ClusterEvaluation evaluation1B = null;
ClusterEvaluation evaluation2 = null;
boolean built = false;
int stage = 0;
try {
// Make two train sets with different numbers of attributes
train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, multiInstance);
train2 = makeTestDataset(84, numTrain,
nominalPredictor ? getNumNominal() + 1 : 0,
numericPredictor ? getNumNumeric() + 1 : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, multiInstance);
if (nominalPredictor && !multiInstance) {
train1.deleteAttributeAt(0);
train2.deleteAttributeAt(0);
}
if (missingLevel > 0) {
addMissing(train1, missingLevel, predictorMissing);
addMissing(train2, missingLevel, predictorMissing);
}
clusterer = AbstractClusterer.makeCopies(getClusterer(), 1)[0];
evaluation1A = new ClusterEvaluation();
evaluation1B = new ClusterEvaluation();
evaluation2 = new ClusterEvaluation();
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
stage = 0;
clusterer.buildClusterer(train1);
built = true;
evaluation1A.setClusterer(clusterer);
evaluation1A.evaluateClusterer(train1);
stage = 1;
built = false;
clusterer.buildClusterer(train2);
built = true;
evaluation2.setClusterer(clusterer);
evaluation2.evaluateClusterer(train2);
stage = 2;
built = false;
clusterer.buildClusterer(train1);
built = true;
evaluation1B.setClusterer(clusterer);
evaluation1B.evaluateClusterer(train1);
stage = 3;
if (!evaluation1A.equals(evaluation1B)) {
if (m_Debug) {
println("\n=== Full report ===\n");
println("First buildClusterer()");
println(evaluation1A.clusterResultsToString() + "\n\n");
println("Second buildClusterer()");
println(evaluation1B.clusterResultsToString() + "\n\n");
}
throw new Exception("Results differ between buildClusterer calls");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
switch (stage) {
case 0:
print(" of dataset 1");
break;
case 1:
print(" of dataset 2");
break;
case 2:
print(" of dataset 1 (2nd build)");
break;
case 3:
print(", comparing results from builds of dataset 1");
break;
}
println(": " + ex.getMessage() + "\n");
println("here are the datasets:\n");
println("=== Train1 Dataset ===\n" + train1.toString() + "\n");
println("=== Train2 Dataset ===\n" + train2.toString() + "\n");
}
}
return result;
}
/**
* Checks basic missing value handling of the scheme. If the missing values
* cause an exception to be thrown by the scheme, this will be recorded.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param predictorMissing true if the missing values may be in the predictors
* @param missingLevel the percentage of missing values
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleMissing(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance,
boolean predictorMissing, int missingLevel) {
if (missingLevel == 100) {
print("100% ");
}
print("missing");
if (predictorMissing) {
print(" predictor");
}
print(" values");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance);
print("...");
Vector<String> accepts = new Vector<String>();
accepts.addElement("missing");
accepts.addElement("value");
accepts.addElement("train");
int numTrain = getNumInstances();
return runBasicTest(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance, missingLevel,
predictorMissing, numTrain, accepts);
}
/**
* Checks whether the clusterer can handle instance weights. This test
* compares the clusterer performance on two datasets that are identical
* except for the training weights. If the results change, then the clusterer
* must be using the weights. It may be possible to get a false positive from
* this test if the weight changes aren't significant enough to induce a
* change in clusterer performance (but the weights are chosen to minimize the
* likelihood of this).
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @return index 0 true if the test was passed
*/
protected boolean[] instanceWeights(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance) {
print("clusterer uses instance weights");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance);
print("...");
int numTrain = 2 * getNumInstances(), missingLevel = 0;
boolean predictorMissing = false;
boolean[] result = new boolean[2];
Instances train = null;
Clusterer[] clusterers = null;
ClusterEvaluation evaluationB = null;
ClusterEvaluation evaluationI = null;
boolean built = false;
boolean evalFail = false;
try {
train = makeTestDataset(42, numTrain,
nominalPredictor ? getNumNominal() + 1 : 0,
numericPredictor ? getNumNumeric() + 1 : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, multiInstance);
if (nominalPredictor && !multiInstance) {
train.deleteAttributeAt(0);
}
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing);
}
clusterers = AbstractClusterer.makeCopies(getClusterer(), 2);
evaluationB = new ClusterEvaluation();
evaluationI = new ClusterEvaluation();
clusterers[0].buildClusterer(train);
evaluationB.setClusterer(clusterers[0]);
evaluationB.evaluateClusterer(train);
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
// Now modify instance weights and re-built/test
for (int i = 0; i < train.numInstances(); i++) {
train.instance(i).setWeight(0);
}
Random random = new Random(1);
for (int i = 0; i < train.numInstances() / 2; i++) {
int inst = random.nextInt(train.numInstances());
int weight = random.nextInt(10) + 1;
train.instance(inst).setWeight(weight);
}
clusterers[1].buildClusterer(train);
built = true;
evaluationI.setClusterer(clusterers[1]);
evaluationI.evaluateClusterer(train);
if (evaluationB.equals(evaluationI)) {
// println("no");
evalFail = true;
throw new Exception("evalFail");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
if (evalFail) {
println("Results don't differ between non-weighted and "
+ "weighted instance models.");
println("Here are the results:\n");
println("\nboth methods\n");
println(evaluationB.clusterResultsToString());
} else {
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
println(": " + ex.getMessage() + "\n");
}
println("Here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
println("=== Train Weights ===\n");
for (int i = 0; i < train.numInstances(); i++) {
println(" " + (i + 1) + " " + train.instance(i).weight());
}
}
}
return result;
}
/**
* Checks whether the scheme alters the training dataset during training. If
* the scheme needs to modify the training data it should take a copy of the
* training data. Currently checks for changes to header structure, number of
* instances, order of instances, instance weights.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param predictorMissing true if we know the clusterer can handle (at least)
* moderate missing predictor values
* @return index 0 is true if the test was passed
*/
protected boolean[] datasetIntegrity(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, boolean predictorMissing) {
print("clusterer doesn't alter original datasets");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance);
print("...");
int numTrain = getNumInstances(), missingLevel = 20;
boolean[] result = new boolean[2];
Instances train = null;
Clusterer clusterer = null;
try {
train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, multiInstance);
if (nominalPredictor && !multiInstance) {
train.deleteAttributeAt(0);
}
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing);
}
clusterer = AbstractClusterer.makeCopies(getClusterer(), 1)[0];
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
Instances trainCopy = new Instances(train);
clusterer.buildClusterer(trainCopy);
compareDatasets(train, trainCopy);
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during training");
println(": " + ex.getMessage() + "\n");
println("Here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
}
return result;
}
/**
* Checks whether an updateable scheme produces the same model when trained
* incrementally as when batch trained. The model itself cannot be compared,
* so we compare the evaluation on test data for both models. It is possible
* to get a false positive on this test (likelihood depends on the
* classifier).
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @return index 0 is true if the test was passed
*/
protected boolean[] updatingEquality(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance) {
print("incremental training produces the same results"
+ " as batch training");
printAttributeSummary(nominalPredictor, numericPredictor, stringPredictor,
datePredictor, relationalPredictor, multiInstance);
print("...");
int numTrain = getNumInstances(), missingLevel = 0;
boolean predictorMissing = false, classMissing = false;
boolean[] result = new boolean[2];
Instances train = null;
Clusterer[] clusterers = null;
ClusterEvaluation evaluationB = null;
ClusterEvaluation evaluationI = null;
boolean built = false;
try {
train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, multiInstance);
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing, classMissing);
}
clusterers = AbstractClusterer.makeCopies(getClusterer(), 2);
evaluationB = new ClusterEvaluation();
evaluationI = new ClusterEvaluation();
clusterers[0].buildClusterer(train);
evaluationB.setClusterer(clusterers[0]);
evaluationB.evaluateClusterer(train);
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
clusterers[1].buildClusterer(new Instances(train, 0));
for (int i = 0; i < train.numInstances(); i++) {
((UpdateableClusterer) clusterers[1])
.updateClusterer(train.instance(i));
}
built = true;
evaluationI.setClusterer(clusterers[1]);
evaluationI.evaluateClusterer(train);
if (!evaluationB.equals(evaluationI)) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
println("Results differ between batch and "
+ "incrementally built models.\n"
+ "Depending on the classifier, this may be OK");
println("Here are the results:\n");
println("\nbatch built results\n"
+ evaluationB.clusterResultsToString());
println("\nincrementally built results\n"
+ evaluationI.clusterResultsToString());
println("Here are the datasets:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
} else {
println("yes");
result[0] = true;
}
} catch (Exception ex) {
result[0] = false;
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
println(": " + ex.getMessage() + "\n");
}
return result;
}
/**
* Runs a text on the datasets with the given characteristics.
*
* @param nominalPredictor if true use nominal predictor attributes
* @param numericPredictor if true use numeric predictor attributes
* @param stringPredictor if true use string predictor attributes
* @param datePredictor if true use date predictor attributes
* @param relationalPredictor if true use relational predictor attributes
* @param multiInstance whether multi-instance is needed
* @param missingLevel the percentage of missing values
* @param predictorMissing true if the missing values may be in the predictors
* @param numTrain the number of instances in the training set
* @param accepts the acceptable string in an exception
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] runBasicTest(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance, int missingLevel,
boolean predictorMissing, int numTrain, Vector<String> accepts) {
boolean[] result = new boolean[2];
Instances train = null;
Clusterer clusterer = null;
try {
train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal()
: 0, numericPredictor ? getNumNumeric() : 0,
stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0,
relationalPredictor ? getNumRelational() : 0, multiInstance);
if (nominalPredictor && !multiInstance) {
train.deleteAttributeAt(0);
}
if (missingLevel > 0) {
addMissing(train, missingLevel, predictorMissing);
}
clusterer = AbstractClusterer.makeCopies(getClusterer(), 1)[0];
} catch (Exception ex) {
ex.printStackTrace();
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
clusterer.buildClusterer(train);
println("yes");
result[0] = true;
} catch (Exception ex) {
boolean acceptable = false;
String msg = ex.getMessage().toLowerCase();
for (int i = 0; i < accepts.size(); i++) {
if (msg.indexOf(accepts.elementAt(i)) >= 0) {
acceptable = true;
}
}
println("no" + (acceptable ? " (OK error message)" : ""));
result[1] = acceptable;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during training");
println(": " + ex.getMessage() + "\n");
if (!acceptable) {
if (accepts.size() > 0) {
print("Error message doesn't mention ");
for (int i = 0; i < accepts.size(); i++) {
if (i != 0) {
print(" or ");
}
print('"' + accepts.elementAt(i) + '"');
}
}
println("here is the dataset:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
}
}
return result;
}
/**
* Add missing values to a dataset.
*
* @param data the instances to add missing values to
* @param level the level of missing values to add (if positive, this is the
* probability that a value will be set to missing, if negative all
* but one value will be set to missing (not yet implemented))
* @param predictorMissing if true, predictor attributes will be modified
*/
protected void addMissing(Instances data, int level, boolean predictorMissing) {
Random random = new Random(1);
for (int i = 0; i < data.numInstances(); i++) {
Instance current = data.instance(i);
for (int j = 0; j < data.numAttributes(); j++) {
if (predictorMissing) {
if (random.nextInt(100) < level) {
current.setMissing(j);
}
}
}
}
}
/**
* Make a simple set of instances with variable position of the class
* attribute, which can later be modified for use in specific tests.
*
* @param seed the random number seed
* @param numInstances the number of instances to generate
* @param numNominal the number of nominal attributes
* @param numNumeric the number of numeric attributes
* @param numString the number of string attributes
* @param numDate the number of date attributes
* @param numRelational the number of relational attributes
* @param multiInstance whether the dataset should a multi-instance dataset
* @return the test dataset
* @throws Exception if the dataset couldn't be generated
* @see TestInstances#CLASS_IS_LAST
*/
protected Instances makeTestDataset(int seed, int numInstances,
int numNominal, int numNumeric, int numString, int numDate,
int numRelational, boolean multiInstance) throws Exception {
TestInstances dataset = new TestInstances();
dataset.setSeed(seed);
dataset.setNumInstances(numInstances);
dataset.setNumNominal(numNominal);
dataset.setNumNumeric(numNumeric);
dataset.setNumString(numString);
dataset.setNumDate(numDate);
dataset.setNumRelational(numRelational);
dataset.setClassIndex(TestInstances.NO_CLASS);
dataset.setMultiInstance(multiInstance);
return dataset.generate();
}
/**
* Print out a short summary string for the dataset characteristics
*
* @param nominalPredictor true if nominal predictor attributes are present
* @param numericPredictor true if numeric predictor attributes are present
* @param stringPredictor true if string predictor attributes are present
* @param datePredictor true if date predictor attributes are present
* @param relationalPredictor true if relational predictor attributes are
* present
* @param multiInstance whether multi-instance is needed
*/
protected void printAttributeSummary(boolean nominalPredictor,
boolean numericPredictor, boolean stringPredictor, boolean datePredictor,
boolean relationalPredictor, boolean multiInstance) {
String str = "";
if (numericPredictor) {
str += "numeric";
}
if (nominalPredictor) {
if (str.length() > 0) {
str += " & ";
}
str += "nominal";
}
if (stringPredictor) {
if (str.length() > 0) {
str += " & ";
}
str += "string";
}
if (datePredictor) {
if (str.length() > 0) {
str += " & ";
}
str += "date";
}
if (relationalPredictor) {
if (str.length() > 0) {
str += " & ";
}
str += "relational";
}
str = " (" + str + " predictors)";
print(str);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Test method for this class
*
* @param args the commandline options
*/
public static void main(String[] args) {
runCheck(new CheckClusterer(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/ClusterEvaluation.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClusterEvaluation.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.beans.BeanInfo;
import java.beans.Introspector;
import java.beans.MethodDescriptor;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.BatchPredictor;
import weka.core.Drawable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Range;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
/**
* Class for evaluating clustering models.
* <p/>
*
* Valid options are:
* <p/>
*
* -t name of the training file <br/>
* Specify the training file.
* <p/>
*
* -T name of the test file <br/>
* Specify the test file to apply clusterer to.
* <p/>
*
* -force-batch-training <br/>
* Always train the clusterer in batch mode, never incrementally.
* <p/>
*
* -d name of file to save clustering model to <br/>
* Specify output file.
* <p/>
*
* -l name of file to load clustering model from <br/>
* Specifiy input file.
* <p/>
*
* -p attribute range <br/>
* Output predictions. Predictions are for the training file if only the
* training file is specified, otherwise they are for the test file. The range
* specifies attribute values to be output with the predictions. Use '-p 0' for
* none.
* <p/>
*
* -x num folds <br/>
* Set the number of folds for a cross validation of the training data. Cross
* validation can only be done for distribution clusterers and will be performed
* if the test file is missing.
* <p/>
*
* -s num <br/>
* Sets the seed for randomizing the data for cross-validation.
* <p/>
*
* -c class <br/>
* Set the class attribute. If set, then class based evaluation of clustering is
* performed.
* <p/>
*
* -g name of graph file <br/>
* Outputs the graph representation of the clusterer to the file. Only for
* clusterer that implemented the <code>weka.core.Drawable</code> interface.
* <p/>
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
* @see weka.core.Drawable
*/
public class ClusterEvaluation implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -830188327319128005L;
/** the clusterer */
private Clusterer m_Clusterer;
/** holds a string describing the results of clustering the training data */
private final StringBuffer m_clusteringResults;
/** holds the number of clusters found by the clusterer */
private int m_numClusters;
/**
* holds the assigments of instances to clusters for a particular testing
* dataset
*/
private double[] m_clusterAssignments;
/**
* holds the average log likelihood for a particular testing dataset if the
* clusterer is a DensityBasedClusterer
*/
private double m_logL;
/**
* will hold the mapping of classes to clusters (for class based evaluation)
*/
private int[] m_classToCluster = null;
/**
* set the clusterer
*
* @param clusterer the clusterer to use
*/
public void setClusterer(Clusterer clusterer) {
m_Clusterer = clusterer;
}
/**
* return the results of clustering.
*
* @return a string detailing the results of clustering a data set
*/
public String clusterResultsToString() {
return m_clusteringResults.toString();
}
/**
* Return the number of clusters found for the most recent call to
* evaluateClusterer
*
* @return the number of clusters found
*/
public int getNumClusters() {
return m_numClusters;
}
/**
* Return an array of cluster assignments corresponding to the most recent set
* of instances clustered.
*
* @return an array of cluster assignments
*/
public double[] getClusterAssignments() {
return m_clusterAssignments;
}
/**
* Return the array (ordered by cluster number) of minimum error class to
* cluster mappings
*
* @return an array of class to cluster mappings
*/
public int[] getClassesToClusters() {
return m_classToCluster;
}
/**
* Return the log likelihood corresponding to the most recent set of instances
* clustered.
*
* @return a <code>double</code> value
*/
public double getLogLikelihood() {
return m_logL;
}
/**
* Constructor. Sets defaults for each member variable. Default Clusterer is
* EM.
*/
public ClusterEvaluation() {
setClusterer(new SimpleKMeans());
m_clusteringResults = new StringBuffer();
m_clusterAssignments = null;
}
/**
* Evaluate the clusterer on a set of instances. Calculates clustering
* statistics and stores cluster assigments for the instances in
* m_clusterAssignments
*
* @param test the set of instances to cluster
* @throws Exception if something goes wrong
*/
public void evaluateClusterer(Instances test) throws Exception {
evaluateClusterer(test, "");
}
/**
* Evaluate the clusterer on a set of instances. Calculates clustering
* statistics and stores cluster assigments for the instances in
* m_clusterAssignments
*
* @param test the set of instances to cluster
* @param testFileName the name of the test file for incremental testing, if
* "" or null then not used
*
* @throws Exception if something goes wrong
*/
public void evaluateClusterer(Instances test, String testFileName)
throws Exception {
evaluateClusterer(test, testFileName, true);
}
/**
* Evaluate the clusterer on a set of instances. Calculates clustering
* statistics and stores cluster assigments for the instances in
* m_clusterAssignments
*
* @param test the set of instances to cluster
* @param testFileName the name of the test file for incremental testing, if
* "" or null then not used
* @param outputModel true if the clustering model is to be output as well as
* the stats
*
* @throws Exception if something goes wrong
*/
public void evaluateClusterer(Instances test, String testFileName,
boolean outputModel) throws Exception {
int i = 0;
int cnum;
double loglk = 0.0;
int cc = m_Clusterer.numberOfClusters();
m_numClusters = cc;
double[] instanceStats = new double[cc];
Instances testRaw = null;
boolean hasClass = (test.classIndex() >= 0);
int unclusteredInstances = 0;
Vector<Double> clusterAssignments = new Vector<Double>();
Filter filter = null;
DataSource source = null;
Instance inst;
if (testFileName == null) {
testFileName = "";
}
// load data
if (testFileName.length() != 0) {
source = new DataSource(testFileName);
} else {
source = new DataSource(test);
}
testRaw = source.getStructure(test.classIndex());
// If class is set then do class based evaluation as well
if (hasClass) {
if (testRaw.classAttribute().isNumeric()) {
throw new Exception("ClusterEvaluation: Class must be nominal!");
}
filter = new Remove();
((Remove) filter).setAttributeIndices("" + (testRaw.classIndex() + 1));
((Remove) filter).setInvertSelection(false);
filter.setInputFormat(testRaw);
}
Instances forBatchPredictors =
filter != null ? new Instances(filter.getOutputFormat(), 0)
: new Instances(source.getStructure(), 0);
i = 0;
while (source.hasMoreElements(testRaw)) {
// next instance
inst = source.nextElement(testRaw);
if (filter != null) {
filter.input(inst);
filter.batchFinished();
inst = filter.output();
}
if (m_Clusterer instanceof BatchPredictor
&& ((BatchPredictor) m_Clusterer)
.implementsMoreEfficientBatchPrediction()) {
forBatchPredictors.add(inst);
} else {
cnum = -1;
try {
if (m_Clusterer instanceof DensityBasedClusterer) {
loglk +=
((DensityBasedClusterer) m_Clusterer).logDensityForInstance(inst);
cnum = m_Clusterer.clusterInstance(inst);
clusterAssignments.add((double) cnum);
} else {
cnum = m_Clusterer.clusterInstance(inst);
clusterAssignments.add((double) cnum);
}
} catch (Exception e) {
clusterAssignments.add(-1.0);
unclusteredInstances++;
}
if (cnum != -1) {
instanceStats[cnum]++;
}
}
}
if (m_Clusterer instanceof BatchPredictor
&& ((BatchPredictor) m_Clusterer)
.implementsMoreEfficientBatchPrediction()) {
double[][] dists =
((BatchPredictor) m_Clusterer)
.distributionsForInstances(forBatchPredictors);
for (double[] d : dists) {
cnum = Utils.maxIndex(d);
clusterAssignments.add((double) cnum);
instanceStats[cnum]++;
}
}
double sum = Utils.sum(instanceStats);
loglk /= sum;
m_logL = loglk;
m_clusterAssignments = new double[clusterAssignments.size()];
for (i = 0; i < clusterAssignments.size(); i++) {
m_clusterAssignments[i] = clusterAssignments.get(i);
}
int numInstFieldWidth =
(int) ((Math.log(clusterAssignments.size()) / Math.log(10)) + 1);
if (outputModel) {
m_clusteringResults.append(m_Clusterer.toString());
}
m_clusteringResults.append("Clustered Instances\n\n");
int clustFieldWidth = (int) ((Math.log(cc) / Math.log(10)) + 1);
for (i = 0; i < cc; i++) {
if (instanceStats[i] > 0) {
m_clusteringResults.append(Utils.doubleToString(i, clustFieldWidth, 0)
+ " "
+ Utils.doubleToString(instanceStats[i], numInstFieldWidth, 0) + " ("
+ Utils.doubleToString((instanceStats[i] / sum * 100.0), 3, 0)
+ "%)\n");
}
}
if (unclusteredInstances > 0) {
m_clusteringResults.append("\nUnclustered instances : "
+ unclusteredInstances);
}
if (m_Clusterer instanceof DensityBasedClusterer) {
m_clusteringResults.append("\n\nLog likelihood: "
+ Utils.doubleToString(loglk, 1, 5) + "\n");
}
if (hasClass) {
evaluateClustersWithRespectToClass(test, testFileName);
}
}
/**
* Evaluates cluster assignments with respect to actual class labels. Assumes
* that m_Clusterer has been trained and tested on inst (minus the class).
*
* @param inst the instances (including class) to evaluate with respect to
* @param fileName the name of the test file for incremental testing, if "" or
* null then not used
* @throws Exception if something goes wrong
*/
private void evaluateClustersWithRespectToClass(Instances inst,
String fileName) throws Exception {
int numClasses = inst.classAttribute().numValues();
int[][] counts = new int[m_numClusters][numClasses];
int[] clusterTotals = new int[m_numClusters];
double[] best = new double[m_numClusters + 1];
double[] current = new double[m_numClusters + 1];
DataSource source = null;
Instances instances = null;
Instance instance = null;
int i;
int numInstances;
if (fileName == null) {
fileName = "";
}
if (fileName.length() != 0) {
source = new DataSource(fileName);
} else {
source = new DataSource(inst);
}
instances = source.getStructure(inst.classIndex());
i = 0;
while (source.hasMoreElements(instances)) {
instance = source.nextElement(instances);
if (m_clusterAssignments[i] >= 0) {
if (!instance.classIsMissing()) {
counts[(int) m_clusterAssignments[i]][(int) instance.classValue()]++;
clusterTotals[(int) m_clusterAssignments[i]]++;
}
}
i++;
}
numInstances = i;
best[m_numClusters] = Double.MAX_VALUE;
mapClasses(m_numClusters, 0, counts, clusterTotals, current, best, 0);
m_clusteringResults.append("\n\nClass attribute: "
+ inst.classAttribute().name() + "\n");
m_clusteringResults.append("Classes to Clusters:\n");
String matrixString =
toMatrixString(counts, clusterTotals, new Instances(inst, 0));
m_clusteringResults.append(matrixString).append("\n");
int Cwidth = 1 + (int) (Math.log(m_numClusters) / Math.log(10));
// add the minimum error assignment
for (i = 0; i < m_numClusters; i++) {
if (clusterTotals[i] > 0) {
m_clusteringResults.append("Cluster "
+ Utils.doubleToString(i, Cwidth, 0));
m_clusteringResults.append(" <-- ");
if (best[i] < 0) {
m_clusteringResults.append("No class\n");
} else {
m_clusteringResults
.append(inst.classAttribute().value((int) best[i])).append("\n");
}
}
}
m_clusteringResults.append("\nIncorrectly clustered instances :\t"
+ best[m_numClusters]
+ "\t"
+ (Utils.doubleToString((best[m_numClusters] / numInstances * 100.0), 8,
4)) + " %\n");
// copy the class assignments
m_classToCluster = new int[m_numClusters];
for (i = 0; i < m_numClusters; i++) {
m_classToCluster[i] = (int) best[i];
}
}
/**
* Returns a "confusion" style matrix of classes to clusters assignments
*
* @param counts the counts of classes for each cluster
* @param clusterTotals total number of examples in each cluster
* @param inst the training instances (with class)
* @return the "confusion" style matrix as string
* @throws Exception if matrix can't be generated
*/
private String toMatrixString(int[][] counts, int[] clusterTotals,
Instances inst) throws Exception {
StringBuffer ms = new StringBuffer();
int maxval = 0;
for (int i = 0; i < m_numClusters; i++) {
for (int j = 0; j < counts[i].length; j++) {
if (counts[i][j] > maxval) {
maxval = counts[i][j];
}
}
}
int Cwidth =
1 + Math.max((int) (Math.log(maxval) / Math.log(10)),
(int) (Math.log(m_numClusters) / Math.log(10)));
ms.append("\n");
for (int i = 0; i < m_numClusters; i++) {
if (clusterTotals[i] > 0) {
ms.append(" ").append(Utils.doubleToString(i, Cwidth, 0));
}
}
ms.append(" <-- assigned to cluster\n");
for (int i = 0; i < counts[0].length; i++) {
for (int j = 0; j < m_numClusters; j++) {
if (clusterTotals[j] > 0) {
ms.append(" ").append(Utils.doubleToString(counts[j][i], Cwidth, 0));
}
}
ms.append(" | ").append(inst.classAttribute().value(i)).append("\n");
}
return ms.toString();
}
/**
* Finds the minimum error mapping of classes to clusters. Recursively
* considers all possible class to cluster assignments.
*
* @param numClusters the number of clusters
* @param lev the cluster being processed
* @param counts the counts of classes in clusters
* @param clusterTotals the total number of examples in each cluster
* @param current the current path through the class to cluster assignment
* tree
* @param best the best assignment path seen
* @param error accumulates the error for a particular path
*/
public static void mapClasses(int numClusters, int lev, int[][] counts,
int[] clusterTotals, double[] current, double[] best, int error) {
// leaf
if (lev == numClusters) {
if (error < best[numClusters]) {
best[numClusters] = error;
for (int i = 0; i < numClusters; i++) {
best[i] = current[i];
}
}
} else {
// empty cluster -- ignore
if (clusterTotals[lev] == 0) {
current[lev] = -1; // cluster ignored
mapClasses(numClusters, lev + 1, counts, clusterTotals, current, best,
error);
} else {
// first try no class assignment to this cluster
current[lev] = -1; // cluster assigned no class (ie all errors)
mapClasses(numClusters, lev + 1, counts, clusterTotals, current, best,
error + clusterTotals[lev]);
// now loop through the classes in this cluster
for (int i = 0; i < counts[0].length; i++) {
if (counts[lev][i] > 0) {
boolean ok = true;
// check to see if this class has already been assigned
for (int j = 0; j < lev; j++) {
if ((int) current[j] == i) {
ok = false;
break;
}
}
if (ok) {
current[lev] = i;
mapClasses(numClusters, lev + 1, counts, clusterTotals, current,
best, (error + (clusterTotals[lev] - counts[lev][i])));
}
}
}
}
}
}
/**
* Evaluates a clusterer with the options given in an array of strings. It
* takes the string indicated by "-t" as training file, the string indicated
* by "-T" as test file. If the test file is missing, a stratified ten-fold
* cross-validation is performed (distribution clusterers only). Using "-x"
* you can change the number of folds to be used, and using "-s" the random
* seed. If the "-p" option is present it outputs the classification for each
* test instance. If you provide the name of an object file using "-l", a
* clusterer will be loaded from the given file. If you provide the name of an
* object file using "-d", the clusterer built from the training data will be
* saved to the given file.
*
* @param clusterer machine learning clusterer
* @param options the array of string containing the options
* @throws Exception if model could not be evaluated successfully
* @return a string describing the results
*/
public static String evaluateClusterer(Clusterer clusterer, String[] options)
throws Exception {
int seed = 1, folds = 10;
boolean doXval = false;
Instances train = null;
Random random;
String trainFileName, testFileName, seedString, foldsString;
String objectInputFileName, objectOutputFileName, attributeRangeString;
String graphFileName;
String[] savedOptions = null;
boolean printClusterAssignments = false;
Range attributesToOutput = null;
StringBuffer text = new StringBuffer();
int theClass = -1; // class based evaluation of clustering
boolean forceBatch = Utils.getFlag("force-batch-training", options);
boolean updateable =
(clusterer instanceof UpdateableClusterer) && !forceBatch;
DataSource source = null;
Instance inst;
if (Utils.getFlag('h', options) || Utils.getFlag("help", options)) {
// global info requested as well?
boolean globalInfo =
Utils.getFlag("synopsis", options) || Utils.getFlag("info", options);
throw new Exception("Help requested."
+ makeOptionString(clusterer, globalInfo));
}
try {
// Get basic options (options the same for all clusterers
// printClusterAssignments = Utils.getFlag('p', options);
objectInputFileName = Utils.getOption('l', options);
objectOutputFileName = Utils.getOption('d', options);
trainFileName = Utils.getOption('t', options);
testFileName = Utils.getOption('T', options);
graphFileName = Utils.getOption('g', options);
// Check -p option
try {
attributeRangeString = Utils.getOption('p', options);
} catch (Exception e) {
throw new Exception(e.getMessage()
+ "\nNOTE: the -p option has changed. "
+ "It now expects a parameter specifying a range of attributes "
+ "to list with the predictions. Use '-p 0' for none.");
}
if (attributeRangeString.length() != 0) {
printClusterAssignments = true;
if (!attributeRangeString.equals("0")) {
attributesToOutput = new Range(attributeRangeString);
}
}
if (trainFileName.length() == 0) {
if (objectInputFileName.length() == 0) {
throw new Exception("No training file and no object "
+ "input file given.");
}
if (testFileName.length() == 0) {
throw new Exception("No training file and no test file given.");
}
} else {
if ((objectInputFileName.length() != 0)
&& (printClusterAssignments == false)) {
throw new Exception("Can't use both train and model file "
+ "unless -p specified.");
}
}
seedString = Utils.getOption('s', options);
if (seedString.length() != 0) {
seed = Integer.parseInt(seedString);
}
foldsString = Utils.getOption('x', options);
if (foldsString.length() != 0) {
folds = Integer.parseInt(foldsString);
doXval = true;
}
} catch (Exception e) {
throw new Exception('\n' + e.getMessage()
+ makeOptionString(clusterer, false));
}
try {
if (trainFileName.length() != 0) {
source = new DataSource(trainFileName);
train = source.getStructure();
String classString = Utils.getOption('c', options);
if (classString.length() != 0) {
if (classString.compareTo("last") == 0) {
theClass = train.numAttributes();
} else if (classString.compareTo("first") == 0) {
theClass = 1;
} else {
theClass = Integer.parseInt(classString);
}
if (theClass != -1) {
if (doXval || testFileName.length() != 0) {
throw new Exception("Can only do class based evaluation on the "
+ "training data");
}
if (objectInputFileName.length() != 0) {
throw new Exception("Can't load a clusterer and do class based "
+ "evaluation");
}
if (objectOutputFileName.length() != 0) {
throw new Exception(
"Can't do class based evaluation and save clusterer");
}
}
} else {
// if the dataset defines a class attribute, use it
if (train.classIndex() != -1) {
theClass = train.classIndex() + 1;
System.err
.println("Note: using class attribute from dataset, i.e., attribute #"
+ theClass);
}
}
if (theClass != -1) {
if (theClass < 1 || theClass > train.numAttributes()) {
throw new Exception("Class is out of range!");
}
if (!train.attribute(theClass - 1).isNominal()) {
throw new Exception("Class must be nominal!");
}
train.setClassIndex(theClass - 1);
}
}
} catch (Exception e) {
throw new Exception("ClusterEvaluation: " + e.getMessage() + '.');
}
// Save options
if (options != null) {
savedOptions = new String[options.length];
System.arraycopy(options, 0, savedOptions, 0, options.length);
}
if (objectInputFileName.length() != 0) {
Utils.checkForRemainingOptions(options);
}
// Set options for clusterer
if (clusterer instanceof OptionHandler) {
((OptionHandler) clusterer).setOptions(options);
}
Utils.checkForRemainingOptions(options);
Instances trainHeader = train;
if (objectInputFileName.length() != 0) {
// Load the clusterer from file
// clusterer = (Clusterer) SerializationHelper.read(objectInputFileName);
java.io.ObjectInputStream ois =
new java.io.ObjectInputStream(new java.io.BufferedInputStream(
new java.io.FileInputStream(objectInputFileName)));
clusterer = (Clusterer) ois.readObject();
// try and get the training header
try {
trainHeader = (Instances) ois.readObject();
} catch (Exception ex) {
// don't moan if we cant
}
ois.close();
} else {
// Build the clusterer if no object file provided
if (theClass == -1) {
if (updateable) {
clusterer.buildClusterer(source.getStructure());
while (source.hasMoreElements(train)) {
inst = source.nextElement(train);
((UpdateableClusterer) clusterer).updateClusterer(inst);
}
((UpdateableClusterer) clusterer).updateFinished();
} else {
clusterer.buildClusterer(source.getDataSet());
}
} else {
Remove removeClass = new Remove();
removeClass.setAttributeIndices("" + theClass);
removeClass.setInvertSelection(false);
removeClass.setInputFormat(train);
if (updateable) {
Instances clusterTrain = Filter.useFilter(train, removeClass);
clusterer.buildClusterer(clusterTrain);
trainHeader = clusterTrain;
while (source.hasMoreElements(train)) {
inst = source.nextElement(train);
removeClass.input(inst);
removeClass.batchFinished();
Instance clusterTrainInst = removeClass.output();
((UpdateableClusterer) clusterer).updateClusterer(clusterTrainInst);
}
((UpdateableClusterer) clusterer).updateFinished();
} else {
Instances clusterTrain =
Filter.useFilter(source.getDataSet(), removeClass);
clusterer.buildClusterer(clusterTrain);
trainHeader = clusterTrain;
}
ClusterEvaluation ce = new ClusterEvaluation();
ce.setClusterer(clusterer);
ce.evaluateClusterer(train, trainFileName);
// If classifier is drawable output string describing graph
if ((clusterer instanceof Drawable) && (graphFileName.length() != 0)) {
BufferedWriter writer = new BufferedWriter(new FileWriter(graphFileName));
writer.write(((Drawable) clusterer).graph());
writer.newLine();
writer.flush();
writer.close();
}
return "\n\n=== Clustering stats for training data ===\n\n"
+ ce.clusterResultsToString();
}
}
/*
* Output cluster predictions only (for the test data if specified,
* otherwise for the training data
*/
if (printClusterAssignments) {
return printClusterings(clusterer, trainFileName, testFileName,
attributesToOutput);
}
text.append(clusterer.toString());
text.append("\n\n=== Clustering stats for training data ===\n\n"
+ printClusterStats(clusterer, trainFileName));
if (testFileName.length() != 0) {
// check header compatibility
DataSource test = new DataSource(testFileName);
Instances testStructure = test.getStructure();
if (!trainHeader.equalHeaders(testStructure)) {
throw new Exception("Training and testing data are not compatible\n"
+ trainHeader.equalHeadersMsg(testStructure));
}
text.append("\n\n=== Clustering stats for testing data ===\n\n"
+ printClusterStats(clusterer, testFileName));
}
if ((clusterer instanceof DensityBasedClusterer) && (doXval == true)
&& (testFileName.length() == 0) && (objectInputFileName.length() == 0)) {
// cross validate the log likelihood on the training data
random = new Random(seed);
random.setSeed(seed);
train = source.getDataSet();
train.randomize(random);
text.append(crossValidateModel(clusterer.getClass().getName(), train,
folds, savedOptions, random));
}
// Save the clusterer if an object output file is provided
if (objectOutputFileName.length() != 0) {
// SerializationHelper.write(objectOutputFileName, clusterer);
saveClusterer(objectOutputFileName, clusterer, trainHeader);
}
// If classifier is drawable output string describing graph
if ((clusterer instanceof Drawable) && (graphFileName.length() != 0)) {
BufferedWriter writer = new BufferedWriter(new FileWriter(graphFileName));
writer.write(((Drawable) clusterer).graph());
writer.newLine();
writer.flush();
writer.close();
}
return text.toString();
}
private static void saveClusterer(String fileName, Clusterer clusterer,
Instances header) throws Exception {
java.io.ObjectOutputStream oos =
new java.io.ObjectOutputStream(new java.io.BufferedOutputStream(
new java.io.FileOutputStream(fileName)));
oos.writeObject(clusterer);
if (header != null) {
oos.writeObject(header);
}
oos.flush();
oos.close();
}
/**
* Perform a cross-validation for DensityBasedClusterer on a set of instances.
*
* @param clusterer the clusterer to use
* @param data the training data
* @param numFolds number of folds of cross validation to perform
* @param random random number seed for cross-validation
* @return the cross-validated log-likelihood
* @throws Exception if an error occurs
*/
public static double crossValidateModel(DensityBasedClusterer clusterer,
Instances data, int numFolds, Random random) throws Exception {
Instances train, test;
double foldAv = 0;
;
data = new Instances(data);
data.randomize(random);
// double sumOW = 0;
for (int i = 0; i < numFolds; i++) {
// Build and test clusterer
train = data.trainCV(numFolds, i, random);
clusterer.buildClusterer(train);
test = data.testCV(numFolds, i);
for (int j = 0; j < test.numInstances(); j++) {
try {
foldAv += clusterer.logDensityForInstance(test.instance(j));
// sumOW += test.instance(j).weight();
// double temp = Utils.sum(tempDist);
} catch (Exception ex) {
// unclustered instances
}
}
}
// return foldAv / sumOW;
return foldAv / data.numInstances();
}
/**
* Performs a cross-validation for a DensityBasedClusterer clusterer on a set
* of instances.
*
* @param clustererString a string naming the class of the clusterer
* @param data the data on which the cross-validation is to be performed
* @param numFolds the number of folds for the cross-validation
* @param options the options to the clusterer
* @param random a random number generator
* @return a string containing the cross validated log likelihood
* @throws Exception if a clusterer could not be generated
*/
public static String crossValidateModel(String clustererString,
Instances data, int numFolds, String[] options, Random random)
throws Exception {
Clusterer clusterer = null;
String[] savedOptions = null;
double CvAv = 0.0;
StringBuffer CvString = new StringBuffer();
if (options != null) {
savedOptions = new String[options.length];
}
data = new Instances(data);
// create clusterer
try {
clusterer = (Clusterer) Class.forName(clustererString).newInstance();
} catch (Exception e) {
throw new Exception("Can't find class with name " + clustererString + '.');
}
if (!(clusterer instanceof DensityBasedClusterer)) {
throw new Exception(clustererString + " must be a distrinbution "
+ "clusterer.");
}
// Save options
if (options != null) {
System.arraycopy(options, 0, savedOptions, 0, options.length);
}
// Parse options
if (clusterer instanceof OptionHandler) {
try {
((OptionHandler) clusterer).setOptions(savedOptions);
Utils.checkForRemainingOptions(savedOptions);
} catch (Exception e) {
throw new Exception("Can't parse given options in "
+ "cross-validation!");
}
}
CvAv =
crossValidateModel((DensityBasedClusterer) clusterer, data, numFolds,
random);
CvString.append("\n" + numFolds + " fold CV Log Likelihood: "
+ Utils.doubleToString(CvAv, 6, 4) + "\n");
return CvString.toString();
}
// ===============
// Private methods
// ===============
/**
* Print the cluster statistics for either the training or the testing data.
*
* @param clusterer the clusterer to use for generating statistics.
* @param fileName the file to load
* @return a string containing cluster statistics.
* @throws Exception if statistics can't be generated.
*/
private static String printClusterStats(Clusterer clusterer, String fileName)
throws Exception {
StringBuffer text = new StringBuffer();
int i = 0;
int cnum;
double loglk = 0.0;
int cc = clusterer.numberOfClusters();
double[] instanceStats = new double[cc];
int unclusteredInstances = 0;
if (fileName.length() != 0) {
DataSource source = new DataSource(fileName);
Instances structure = source.getStructure();
Instances forBatchPredictors =
(clusterer instanceof BatchPredictor && ((BatchPredictor) clusterer)
.implementsMoreEfficientBatchPrediction()) ? new Instances(
source.getStructure(), 0) : null;
Instance inst;
while (source.hasMoreElements(structure)) {
inst = source.nextElement(structure);
if (forBatchPredictors != null) {
forBatchPredictors.add(inst);
} else {
try {
cnum = clusterer.clusterInstance(inst);
if (clusterer instanceof DensityBasedClusterer) {
loglk +=
((DensityBasedClusterer) clusterer).logDensityForInstance(inst);
// temp = Utils.sum(dist);
}
instanceStats[cnum]++;
} catch (Exception e) {
unclusteredInstances++;
}
i++;
}
}
if (forBatchPredictors != null) {
double[][] dists =
((BatchPredictor) clusterer)
.distributionsForInstances(forBatchPredictors);
for (double[] d : dists) {
cnum = Utils.maxIndex(d);
instanceStats[cnum]++;
}
}
/*
* // count the actual number of used clusters int count = 0; for (i = 0;
* i < cc; i++) { if (instanceStats[i] > 0) { count++; } } if (count > 0)
* { double[] tempStats = new double [count]; count=0; for (i=0;i<cc;i++)
* { if (instanceStats[i] > 0) { tempStats[count++] = instanceStats[i]; }
* } instanceStats = tempStats; cc = instanceStats.length; }
*/
int clustFieldWidth = (int) ((Math.log(cc) / Math.log(10)) + 1);
int numInstFieldWidth = (int) ((Math.log(i) / Math.log(10)) + 1);
double sum = Utils.sum(instanceStats);
loglk /= sum;
text.append("Clustered Instances\n");
for (i = 0; i < cc; i++) {
if (instanceStats[i] > 0) {
text.append(Utils.doubleToString(i, clustFieldWidth, 0) + " "
+ Utils.doubleToString(instanceStats[i], numInstFieldWidth, 0)
+ " ("
+ Utils.doubleToString((instanceStats[i] / sum * 100.0), 3, 0)
+ "%)\n");
}
}
if (unclusteredInstances > 0) {
text.append("\nUnclustered Instances : " + unclusteredInstances);
}
if (clusterer instanceof DensityBasedClusterer) {
text.append("\n\nLog likelihood: " + Utils.doubleToString(loglk, 1, 5)
+ "\n");
}
}
return text.toString();
}
/**
* Print the cluster assignments for either the training or the testing data.
*
* @param clusterer the clusterer to use for cluster assignments
* @param trainFileName the train file
* @param testFileName an optional test file
* @param attributesToOutput the attributes to print
* @return a string containing the instance indexes and cluster assigns.
* @throws Exception if cluster assignments can't be printed
*/
private static String printClusterings(Clusterer clusterer,
String trainFileName, String testFileName, Range attributesToOutput)
throws Exception {
StringBuffer text = new StringBuffer();
int i = 0;
int cnum;
DataSource source = null;
Instance inst;
Instances structure;
if (testFileName.length() != 0) {
source = new DataSource(testFileName);
} else {
source = new DataSource(trainFileName);
}
structure = source.getStructure();
Instances forBatchPredictors =
(clusterer instanceof BatchPredictor && ((BatchPredictor) clusterer)
.implementsMoreEfficientBatchPrediction()) ? new Instances(
source.getStructure(), 0) : null;
while (source.hasMoreElements(structure)) {
inst = source.nextElement(structure);
if (forBatchPredictors != null) {
forBatchPredictors.add(inst);
} else {
try {
cnum = clusterer.clusterInstance(inst);
text.append(i + " " + cnum + " "
+ attributeValuesString(inst, attributesToOutput) + "\n");
} catch (Exception e) {
/*
* throw new Exception('\n' + "Unable to cluster instance\n" +
* e.getMessage());
*/
text.append(i + " Unclustered "
+ attributeValuesString(inst, attributesToOutput) + "\n");
}
i++;
}
}
if (forBatchPredictors != null) {
double[][] dists =
((BatchPredictor) clusterer)
.distributionsForInstances(forBatchPredictors);
for (double[] d : dists) {
cnum = Utils.maxIndex(d);
text.append(i
+ " "
+ cnum
+ " "
+ attributeValuesString(forBatchPredictors.instance(i),
attributesToOutput) + "\n");
i++;
}
}
return text.toString();
}
/**
* Builds a string listing the attribute values in a specified range of
* indices, separated by commas and enclosed in brackets.
*
* @param instance the instance to print the values from
* @param attRange the range of the attributes to list
* @return a string listing values of the attributes in the range
*/
private static String
attributeValuesString(Instance instance, Range attRange) {
StringBuffer text = new StringBuffer();
if (attRange != null) {
boolean firstOutput = true;
attRange.setUpper(instance.numAttributes() - 1);
for (int i = 0; i < instance.numAttributes(); i++) {
if (attRange.isInRange(i)) {
if (firstOutput) {
text.append("(");
} else {
text.append(",");
}
text.append(instance.toString(i));
firstOutput = false;
}
}
if (!firstOutput) {
text.append(")");
}
}
return text.toString();
}
/**
* Make up the help string giving all the command line options
*
* @param clusterer the clusterer to include options for
* @return a string detailing the valid command line options
*/
private static String
makeOptionString(Clusterer clusterer, boolean globalInfo) {
StringBuffer optionsText = new StringBuffer("");
// General options
optionsText.append("\n\nGeneral options:\n\n");
optionsText.append("-h or -help\n");
optionsText.append("\tOutput help information.\n");
optionsText.append("-synopsis or -info\n");
optionsText.append("\tOutput synopsis for clusterer (use in conjunction "
+ " with -h)\n");
optionsText.append("-t <name of training file>\n");
optionsText.append("\tSets training file.\n");
optionsText.append("-T <name of test file>\n");
optionsText.append("\tSets test file.\n");
optionsText.append("-force-batch-training\n");
optionsText
.append("\tAlways train the clusterer in batch mode, never incrementally.\n");
optionsText.append("-l <name of input file>\n");
optionsText.append("\tSets model input file.\n");
optionsText.append("-d <name of output file>\n");
optionsText.append("\tSets model output file.\n");
optionsText.append("-p <attribute range>\n");
optionsText.append("\tOutput predictions. Predictions are for "
+ "training file" + "\n\tif only training file is specified,"
+ "\n\totherwise predictions are for the test file."
+ "\n\tThe range specifies attribute values to be output"
+ "\n\twith the predictions. Use '-p 0' for none.\n");
optionsText.append("-x <number of folds>\n");
optionsText
.append("\tOnly Distribution Clusterers can be cross validated.\n");
optionsText.append("-s <random number seed>\n");
optionsText
.append("\tSets the seed for randomizing the data in cross-validation\n");
optionsText.append("-c <class index>\n");
optionsText.append("\tSet class attribute. If supplied, class is ignored");
optionsText.append("\n\tduring clustering but is used in a classes to");
optionsText.append("\n\tclusters evaluation.\n");
if (clusterer instanceof Drawable) {
optionsText.append("-g <name of graph file>\n");
optionsText
.append("\tOutputs the graph representation of the clusterer to the file.\n");
}
// Get scheme-specific options
if (clusterer instanceof OptionHandler) {
optionsText.append("\nOptions specific to "
+ clusterer.getClass().getName() + ":\n\n");
Enumeration<Option> enu = ((OptionHandler) clusterer).listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
optionsText.append(option.synopsis() + '\n');
optionsText.append(option.description() + "\n");
}
}
// Get global information (if available)
if (globalInfo) {
try {
String gi = getGlobalInfo(clusterer);
optionsText.append(gi);
} catch (Exception ex) {
// quietly ignore
}
}
return optionsText.toString();
}
/**
* Return the global info (if it exists) for the supplied clusterer
*
* @param clusterer the clusterer to get the global info for
* @return the global info (synopsis) for the clusterer
* @throws Exception if there is a problem reflecting on the clusterer
*/
protected static String getGlobalInfo(Clusterer clusterer) throws Exception {
BeanInfo bi = Introspector.getBeanInfo(clusterer.getClass());
MethodDescriptor[] methods;
methods = bi.getMethodDescriptors();
Object[] args = {};
String result =
"\nSynopsis for " + clusterer.getClass().getName() + ":\n\n";
for (MethodDescriptor method : methods) {
String name = method.getDisplayName();
Method meth = method.getMethod();
if (name.equals("globalInfo")) {
String globalInfo = (String) (meth.invoke(clusterer, args));
result += globalInfo;
break;
}
}
return result;
}
/**
* Tests whether the current evaluation object is equal to another evaluation
* object
*
* @param obj the object to compare against
* @return true if the two objects are equal
*/
@Override
public boolean equals(Object obj) {
if ((obj == null) || !(obj.getClass().equals(this.getClass()))) {
return false;
}
ClusterEvaluation cmp = (ClusterEvaluation) obj;
if ((m_classToCluster != null) != (cmp.m_classToCluster != null)) {
return false;
}
if (m_classToCluster != null) {
for (int i = 0; i < m_classToCluster.length; i++) {
if (m_classToCluster[i] != cmp.m_classToCluster[i]) {
return false;
}
}
}
if ((m_clusterAssignments != null) != (cmp.m_clusterAssignments != null)) {
return false;
}
if (m_clusterAssignments != null) {
for (int i = 0; i < m_clusterAssignments.length; i++) {
if (m_clusterAssignments[i] != cmp.m_clusterAssignments[i]) {
return false;
}
}
}
if (Double.isNaN(m_logL) != Double.isNaN(cmp.m_logL)) {
return false;
}
if (!Double.isNaN(m_logL)) {
if (m_logL != cmp.m_logL) {
return false;
}
}
if (m_numClusters != cmp.m_numClusters) {
return false;
}
// TODO: better comparison? via members?
String clusteringResults1 =
m_clusteringResults.toString().replaceAll("Elapsed time.*", "");
String clusteringResults2 =
cmp.m_clusteringResults.toString().replaceAll("Elapsed time.*", "");
if (!clusteringResults1.equals(clusteringResults2)) {
return false;
}
return true;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args the options
*/
public static void main(String[] args) {
try {
if (args.length == 0) {
throw new Exception("The first argument must be the name of a "
+ "clusterer");
}
String ClustererString = args[0];
args[0] = "";
Clusterer newClusterer = AbstractClusterer.forName(ClustererString, null);
System.out.println(evaluateClusterer(newClusterer, args));
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/Clusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Clusterer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
/**
* Interface for clusterers. Clients will typically extend either
* AbstractClusterer or AbstractDensityBasedClusterer.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface Clusterer {
/**
* Generates a clusterer. Has to initialize all fields of the clusterer
* that are not being set via options.
*
* @param data set of instances serving as training data
* @exception Exception if the clusterer has not been
* generated successfully
*/
void buildClusterer(Instances data) throws Exception;
/**
* Classifies a given instance. Either this or distributionForInstance()
* needs to be implemented by subclasses.
*
* @param instance the instance to be assigned to a cluster
* @return the number of the assigned cluster as an integer
* @exception Exception if instance could not be clustered
* successfully
*/
int clusterInstance(Instance instance) throws Exception;
/**
* Predicts the cluster memberships for a given instance. Either
* this or clusterInstance() needs to be implemented by subclasses.
*
* @param instance the instance to be assigned a cluster.
* @return an array containing the estimated membership
* probabilities of the test instance in each cluster (this
* should sum to at most 1)
* @exception Exception if distribution could not be
* computed successfully
*/
public double[] distributionForInstance(Instance instance) throws Exception;
/**
* Returns the number of clusters.
*
* @return the number of clusters generated for a training dataset.
* @exception Exception if number of clusters could not be returned
* successfully
*/
int numberOfClusters() throws Exception;
/**
* Returns the Capabilities of this clusterer. Derived classifiers have to
* override this method to enable capabilities.
*
* @return the capabilities of this object
* @see Capabilities
*/
public Capabilities getCapabilities();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/Cobweb.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Cobweb.java
* Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.AttributeStats;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Drawable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.experiment.Stats;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Add;
/**
* <!-- globalinfo-start -->
* Class implementing the Cobweb and Classit clustering algorithms.<br/>
* <br/>
* Note: the application of node operators (merging, splitting etc.) in terms of ordering and priority differs (and is somewhat ambiguous) between the original Cobweb and Classit papers. This algorithm always compares the best host, adding a new leaf, merging the two best hosts, and splitting the best host when considering where to place a new instance.<br/>
* <br/>
* For more information see:<br/>
* <br/>
* D. Fisher (1987). Knowledge acquisition via incremental conceptual clustering. Machine Learning. 2(2):139-172.<br/>
* <br/>
* J. H. Gennari, P. Langley, D. Fisher (1990). Models of incremental concept formation. Artificial Intelligence. 40:11-61.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start -->
* BibTeX:
* <pre>
* @article{Fisher1987,
* author = {D. Fisher},
* journal = {Machine Learning},
* number = {2},
* pages = {139-172},
* title = {Knowledge acquisition via incremental conceptual clustering},
* volume = {2},
* year = {1987}
* }
*
* @article{Gennari1990,
* author = {J. H. Gennari and P. Langley and D. Fisher},
* journal = {Artificial Intelligence},
* pages = {11-61},
* title = {Models of incremental concept formation},
* volume = {40},
* year = {1990}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start -->
* Valid options are: <p/>
*
* <pre> -A <acuity>
* Acuity.
* (default=1.0)</pre>
*
* <pre> -C <cutoff>
* Cutoff.
* (default=0.002)</pre>
*
* <pre> -save-data
* Save instance data.</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 42)</pre>
*
* <pre> -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).</pre>
*
* <!-- options-end -->
*
* @author <a href="mailto:mhall@cs.waikato.ac.nz">Mark Hall</a>
* @version $Revision$
* @see RandomizableClusterer
* @see Drawable
*/
public class Cobweb extends RandomizableClusterer implements Drawable,
TechnicalInformationHandler, UpdateableClusterer {
/** for serialization */
static final long serialVersionUID = 928406656495092318L;
/**
* Inner class handling node operations for Cobweb.
*
* @see Serializable
*/
public class CNode implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = 3452097436933325631L;
/**
* Within cluster attribute statistics
*/
private AttributeStats[] m_attStats;
/**
* Number of attributes
*/
private final int m_numAttributes;
/**
* Instances at this node
*/
protected Instances m_clusterInstances = null;
/**
* Children of this node
*/
private ArrayList<CNode> m_children = null;
/**
* Total instances at this node
*/
private double m_totalInstances = 0.0;
/**
* Cluster number of this node
*/
private int m_clusterNum = -1;
/**
* Creates an empty <code>CNode</code> instance.
*
* @param numAttributes the number of attributes in the data
*/
public CNode(int numAttributes) {
m_numAttributes = numAttributes;
}
/**
* Creates a new leaf <code>CNode</code> instance.
*
* @param numAttributes the number of attributes in the data
* @param leafInstance the instance to store at this leaf
*/
public CNode(int numAttributes, Instance leafInstance) {
this(numAttributes);
if (m_clusterInstances == null) {
m_clusterInstances = new Instances(leafInstance.dataset(), 1);
}
m_clusterInstances.add(leafInstance);
updateStats(leafInstance, false);
}
/**
* Adds an instance to this cluster.
*
* @param newInstance the instance to add
* @throws Exception if an error occurs
*/
protected void addInstance(Instance newInstance) throws Exception {
// Add the instance to this cluster
if (m_clusterInstances == null) {
m_clusterInstances = new Instances(newInstance.dataset(), 1);
m_clusterInstances.add(newInstance);
updateStats(newInstance, false);
return;
} else if (m_children == null) {
/*
* we are a leaf, so make our existing instance(s) into a child and then
* add the new instance as a child
*/
m_children = new ArrayList<CNode>();
CNode tempSubCluster = new CNode(m_numAttributes,
m_clusterInstances.instance(0));
// System.out.println("Dumping "+m_clusterInstances.numInstances());
for (int i = 1; i < m_clusterInstances.numInstances(); i++) {
tempSubCluster.m_clusterInstances.add(m_clusterInstances.instance(i));
tempSubCluster.updateStats(m_clusterInstances.instance(i), false);
}
m_children = new ArrayList<CNode>();
m_children.add(tempSubCluster);
m_children.add(new CNode(m_numAttributes, newInstance));
m_clusterInstances.add(newInstance);
updateStats(newInstance, false);
// here is where we check against cutoff (also check cutoff
// in findHost)
if (categoryUtility() < m_cutoff) {
// System.out.println("Cutting (leaf add) ");
m_children = null;
}
return;
}
// otherwise, find the best host for this instance
CNode bestHost = findHost(newInstance, false);
if (bestHost != null) {
// now add to the best host
bestHost.addInstance(newInstance);
}
}
/**
* Temporarily adds a new instance to each of this nodes children in turn
* and computes the category utility.
*
* @param newInstance the new instance to evaluate
* @return an array of category utility values---the result of considering
* each child in turn as a host for the new instance
* @throws Exception if an error occurs
*/
private double[] cuScoresForChildren(Instance newInstance) throws Exception {
// look for a host in existing children
double[] categoryUtils = new double[m_children.size()];
// look for a home for this instance in the existing children
for (int i = 0; i < m_children.size(); i++) {
CNode temp = m_children.get(i);
// tentitively add the new instance to this child
temp.updateStats(newInstance, false);
categoryUtils[i] = categoryUtility();
// remove the new instance from this child
temp.updateStats(newInstance, true);
}
return categoryUtils;
}
private double cuScoreForBestTwoMerged(CNode merged, CNode a, CNode b,
Instance newInstance) throws Exception {
double mergedCU = -Double.MAX_VALUE;
// consider merging the best and second
// best.
merged.m_clusterInstances = new Instances(m_clusterInstances, 1);
merged.addChildNode(a);
merged.addChildNode(b);
merged.updateStats(newInstance, false); // add new instance to stats
// remove the best and second best nodes
m_children.remove(m_children.indexOf(a));
m_children.remove(m_children.indexOf(b));
m_children.add(merged);
mergedCU = categoryUtility();
// restore the status quo
merged.updateStats(newInstance, true);
m_children.remove(m_children.indexOf(merged));
m_children.add(a);
m_children.add(b);
return mergedCU;
}
/**
* Finds a host for the new instance in this nodes children. Also considers
* merging the two best hosts and splitting the best host.
*
* @param newInstance the instance to find a host for
* @param structureFrozen true if the instance is not to be added to the
* tree and instead the best potential host is to be returned
* @return the best host
* @throws Exception if an error occurs
*/
private CNode findHost(Instance newInstance, boolean structureFrozen)
throws Exception {
if (!structureFrozen) {
updateStats(newInstance, false);
}
// look for a host in existing children and also consider as a new leaf
double[] categoryUtils = cuScoresForChildren(newInstance);
// make a temporary new leaf for this instance and get CU
CNode newLeaf = new CNode(m_numAttributes, newInstance);
m_children.add(newLeaf);
double bestHostCU = categoryUtility();
CNode finalBestHost = newLeaf;
// remove new leaf when searching for best and second best nodes to
// consider for merging and splitting
m_children.remove(m_children.size() - 1);
// now determine the best host (and the second best)
int best = 0;
int secondBest = 0;
for (int i = 0; i < categoryUtils.length; i++) {
if (categoryUtils[i] > categoryUtils[secondBest]) {
if (categoryUtils[i] > categoryUtils[best]) {
secondBest = best;
best = i;
} else {
secondBest = i;
}
}
}
CNode a = m_children.get(best);
CNode b = m_children.get(secondBest);
if (categoryUtils[best] > bestHostCU) {
bestHostCU = categoryUtils[best];
finalBestHost = a;
// System.out.println("Node is best");
}
if (structureFrozen) {
if (finalBestHost == newLeaf) {
return null; // *this* node is the best host
} else {
return finalBestHost;
}
}
double mergedCU = -Double.MAX_VALUE;
CNode merged = new CNode(m_numAttributes);
if (a != b) {
mergedCU = cuScoreForBestTwoMerged(merged, a, b, newInstance);
if (mergedCU > bestHostCU) {
bestHostCU = mergedCU;
finalBestHost = merged;
}
}
// Consider splitting the best
double splitCU = -Double.MAX_VALUE;
double splitBestChildCU = -Double.MAX_VALUE;
double splitPlusNewLeafCU = -Double.MAX_VALUE;
double splitPlusMergeBestTwoCU = -Double.MAX_VALUE;
if (a.m_children != null) {
ArrayList<CNode> tempChildren = new ArrayList<CNode>();
for (int i = 0; i < m_children.size(); i++) {
CNode existingChild = m_children.get(i);
if (existingChild != a) {
tempChildren.add(existingChild);
}
}
for (int i = 0; i < a.m_children.size(); i++) {
CNode promotedChild = a.m_children.get(i);
tempChildren.add(promotedChild);
}
// also add the new leaf
tempChildren.add(newLeaf);
ArrayList<CNode> saveStatusQuo = m_children;
m_children = tempChildren;
splitPlusNewLeafCU = categoryUtility(); // split + new leaf
// remove the new leaf
tempChildren.remove(tempChildren.size() - 1);
// now look for best and second best
categoryUtils = cuScoresForChildren(newInstance);
// now determine the best host (and the second best)
best = 0;
secondBest = 0;
for (int i = 0; i < categoryUtils.length; i++) {
if (categoryUtils[i] > categoryUtils[secondBest]) {
if (categoryUtils[i] > categoryUtils[best]) {
secondBest = best;
best = i;
} else {
secondBest = i;
}
}
}
CNode sa = m_children.get(best);
CNode sb = m_children.get(secondBest);
splitBestChildCU = categoryUtils[best];
// now merge best and second best
CNode mergedSplitChildren = new CNode(m_numAttributes);
if (sa != sb) {
splitPlusMergeBestTwoCU = cuScoreForBestTwoMerged(
mergedSplitChildren, sa, sb, newInstance);
}
splitCU = (splitBestChildCU > splitPlusNewLeafCU) ? splitBestChildCU
: splitPlusNewLeafCU;
splitCU = (splitCU > splitPlusMergeBestTwoCU) ? splitCU
: splitPlusMergeBestTwoCU;
if (splitCU > bestHostCU) {
bestHostCU = splitCU;
finalBestHost = this;
// tempChildren.remove(tempChildren.size()-1);
} else {
// restore the status quo
m_children = saveStatusQuo;
}
}
if (finalBestHost != this) {
// can commit the instance to the set of instances at this node
m_clusterInstances.add(newInstance);
} else {
m_numberSplits++;
}
if (finalBestHost == merged) {
m_numberMerges++;
m_children.remove(m_children.indexOf(a));
m_children.remove(m_children.indexOf(b));
m_children.add(merged);
}
if (finalBestHost == newLeaf) {
finalBestHost = new CNode(m_numAttributes);
m_children.add(finalBestHost);
}
if (bestHostCU < m_cutoff) {
if (finalBestHost == this) {
// splitting was the best, but since we are cutting all children
// recursion is aborted and we still need to add the instance
// to the set of instances at this node
m_clusterInstances.add(newInstance);
}
m_children = null;
finalBestHost = null;
}
if (finalBestHost == this) {
// splitting is still the best, so downdate the stats as
// we'll be recursively calling on this node
updateStats(newInstance, true);
}
return finalBestHost;
}
/**
* Adds the supplied node as a child of this node. All of the child's
* instances are added to this nodes instances
*
* @param child the child to add
*/
protected void addChildNode(CNode child) {
for (int i = 0; i < child.m_clusterInstances.numInstances(); i++) {
Instance temp = child.m_clusterInstances.instance(i);
m_clusterInstances.add(temp);
updateStats(temp, false);
}
if (m_children == null) {
m_children = new ArrayList<CNode>();
}
m_children.add(child);
}
/**
* Computes the utility of all children with respect to this node
*
* @return the category utility of the children with respect to this node.
* @throws Exception if there are no children
*/
protected double categoryUtility() throws Exception {
if (m_children == null) {
throw new Exception("categoryUtility: No children!");
}
double totalCU = 0;
for (int i = 0; i < m_children.size(); i++) {
CNode child = m_children.get(i);
totalCU += categoryUtilityChild(child);
}
totalCU /= m_children.size();
return totalCU;
}
/**
* Computes the utility of a single child with respect to this node
*
* @param child the child for which to compute the utility
* @return the utility of the child with respect to this node
* @throws Exception if something goes wrong
*/
protected double categoryUtilityChild(CNode child) throws Exception {
double sum = 0;
for (int i = 0; i < m_numAttributes; i++) {
if (m_clusterInstances.attribute(i).isNominal()) {
for (int j = 0; j < m_clusterInstances.attribute(i).numValues(); j++) {
double x = child.getProbability(i, j);
double y = getProbability(i, j);
sum += (x - y)*(x + y);
}
} else {
// numeric attribute
sum += ((m_normal / child.getStandardDev(i)) - (m_normal / getStandardDev(i)));
}
}
return (child.m_totalInstances / m_totalInstances) * sum;
}
/**
* Returns the probability of a value of a nominal attribute in this node
*
* @param attIndex the index of the attribute
* @param valueIndex the index of the value of the attribute
* @return the probability
* @throws Exception if the requested attribute is not nominal
*/
protected double getProbability(int attIndex, int valueIndex)
throws Exception {
if (!m_clusterInstances.attribute(attIndex).isNominal()) {
throw new Exception("getProbability: attribute is not nominal");
}
if (m_attStats[attIndex].totalCount <= 0) {
return 0;
}
return (double) m_attStats[attIndex].nominalCounts[valueIndex]
/ (double) m_attStats[attIndex].totalCount;
}
/**
* Returns the standard deviation of a numeric attribute
*
* @param attIndex the index of the attribute
* @return the standard deviation
* @throws Exception if an error occurs
*/
protected double getStandardDev(int attIndex) throws Exception {
if (!m_clusterInstances.attribute(attIndex).isNumeric()) {
throw new Exception("getStandardDev: attribute is not numeric");
}
m_attStats[attIndex].numericStats.calculateDerived();
double stdDev = m_attStats[attIndex].numericStats.stdDev;
if (Double.isNaN(stdDev) || Double.isInfinite(stdDev)) {
return m_acuity;
}
return Math.max(m_acuity, stdDev);
}
/**
* Update attribute stats using the supplied instance.
*
* @param updateInstance the instance for updating
* @param delete true if the values of the supplied instance are to be
* removed from the statistics
*/
protected void updateStats(Instance updateInstance, boolean delete) {
if (m_attStats == null) {
m_attStats = new AttributeStats[m_numAttributes];
for (int i = 0; i < m_numAttributes; i++) {
m_attStats[i] = new AttributeStats();
if (m_clusterInstances.attribute(i).isNominal()) {
m_attStats[i].nominalCounts = new int[m_clusterInstances.attribute(
i).numValues()];
} else {
m_attStats[i].numericStats = new Stats();
}
}
}
for (int i = 0; i < m_numAttributes; i++) {
if (!updateInstance.isMissing(i)) {
double value = updateInstance.value(i);
if (m_clusterInstances.attribute(i).isNominal()) {
m_attStats[i].nominalCounts[(int) value] += (delete) ? (-1.0 * updateInstance
.weight()) : updateInstance.weight();
m_attStats[i].totalCount += (delete) ? (-1.0 * updateInstance
.weight()) : updateInstance.weight();
} else {
if (delete) {
m_attStats[i].numericStats.subtract(value,
updateInstance.weight());
} else {
m_attStats[i].numericStats.add(value, updateInstance.weight());
}
}
}
}
m_totalInstances += (delete) ? (-1.0 * updateInstance.weight())
: (updateInstance.weight());
}
/**
* Recursively assigns numbers to the nodes in the tree.
*
* @param cl_num an <code>int[]</code> value
* @throws Exception if an error occurs
*/
private void assignClusterNums(int[] cl_num) throws Exception {
if (m_children != null && m_children.size() < 2) {
throw new Exception("assignClusterNums: tree not built correctly!");
}
m_clusterNum = cl_num[0];
cl_num[0]++;
if (m_children != null) {
for (int i = 0; i < m_children.size(); i++) {
CNode child = m_children.get(i);
child.assignClusterNums(cl_num);
}
}
}
/**
* Recursively build a string representation of the Cobweb tree
*
* @param depth depth of this node in the tree
* @param text holds the string representation
*/
protected void dumpTree(int depth, StringBuffer text) {
if (depth == 0) {
determineNumberOfClusters();
}
if (m_children == null) {
text.append("\n");
for (int j = 0; j < depth; j++) {
text.append("| ");
}
text.append("leaf " + m_clusterNum + " ["
+ m_clusterInstances.numInstances() + "]");
} else {
for (int i = 0; i < m_children.size(); i++) {
text.append("\n");
for (int j = 0; j < depth; j++) {
text.append("| ");
}
text.append("node " + m_clusterNum + " ["
+ m_clusterInstances.numInstances() + "]");
m_children.get(i).dumpTree(depth + 1, text);
}
}
}
/**
* Returns the instances at this node as a string. Appends the cluster
* number of the child that each instance belongs to.
*
* @return a <code>String</code> value
* @throws Exception if an error occurs
*/
protected String dumpData() throws Exception {
if (m_children == null) {
return m_clusterInstances.toString();
}
// construct instances string with cluster numbers attached
CNode tempNode = new CNode(m_numAttributes);
tempNode.m_clusterInstances = new Instances(m_clusterInstances, 1);
for (int i = 0; i < m_children.size(); i++) {
tempNode.addChildNode(m_children.get(i));
}
Instances tempInst = tempNode.m_clusterInstances;
tempNode = null;
Add af = new Add();
af.setAttributeName("Cluster");
String labels = "";
for (int i = 0; i < m_children.size(); i++) {
CNode temp = m_children.get(i);
labels += ("C" + temp.m_clusterNum);
if (i < m_children.size() - 1) {
labels += ",";
}
}
af.setNominalLabels(labels);
af.setInputFormat(tempInst);
tempInst = Filter.useFilter(tempInst, af);
tempInst.setRelationName("Cluster " + m_clusterNum);
int z = 0;
for (int i = 0; i < m_children.size(); i++) {
CNode temp = m_children.get(i);
for (int j = 0; j < temp.m_clusterInstances.numInstances(); j++) {
tempInst.instance(z).setValue(m_numAttributes, i);
z++;
}
}
return tempInst.toString();
}
/**
* Recursively generate the graph string for the Cobweb tree.
*
* @param text holds the graph string
* @throws Exception if generation fails
*/
protected void graphTree(StringBuffer text) throws Exception {
text.append("N" + m_clusterNum + " [label=\""
+ ((m_children == null) ? "leaf " : "node ") + m_clusterNum + " "
+ " (" + m_clusterInstances.numInstances() + ")\" "
+ ((m_children == null) ? "shape=box style=filled " : "")
+ (m_saveInstances ? "data =\n" + dumpData() + "\n,\n" : "") + "]\n");
if (m_children != null) {
for (int i = 0; i < m_children.size(); i++) {
CNode temp = m_children.get(i);
text.append("N" + m_clusterNum + "->" + "N" + temp.m_clusterNum
+ "\n");
}
for (int i = 0; i < m_children.size(); i++) {
CNode temp = m_children.get(i);
temp.graphTree(text);
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* Normal constant.
*/
protected static final double m_normal = 1.0 / (2 * Math.sqrt(Math.PI));
/**
* Acuity (minimum standard deviation).
*/
protected double m_acuity = 1.0;
/**
* Cutoff (minimum category utility).
*/
protected double m_cutoff = 0.01 * Cobweb.m_normal;
/**
* Holds the root of the Cobweb tree.
*/
protected CNode m_cobwebTree = null;
/**
* Number of clusters (nodes in the tree). Must never be queried directly,
* only via the method numberOfClusters(). Otherwise it's not guaranteed that
* it contains the correct value.
*
* @see #numberOfClusters()
* @see #m_numberOfClustersDetermined
*/
protected int m_numberOfClusters = -1;
/** whether the number of clusters was already determined */
protected boolean m_numberOfClustersDetermined = false;
/** the number of splits that happened */
protected int m_numberSplits;
/** the number of merges that happened */
protected int m_numberMerges;
/**
* Output instances in graph representation of Cobweb tree (Allows instances
* at nodes in the tree to be visualized in the Explorer).
*/
protected boolean m_saveInstances = false;
/**
* default constructor
*/
public Cobweb() {
super();
m_SeedDefault = 42;
setSeed(m_SeedDefault);
}
/**
* Returns a string describing this clusterer
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Class implementing the Cobweb and Classit clustering algorithms.\n\n"
+ "Note: the application of node operators (merging, splitting etc.) in "
+ "terms of ordering and priority differs (and is somewhat ambiguous) "
+ "between the original Cobweb and Classit papers. This algorithm always "
+ "compares the best host, adding a new leaf, merging the two best hosts, "
+ "and splitting the best host when considering where to place a new "
+ "instance.\n\n"
+ "For more information see:\n\n"
+ getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
TechnicalInformation additional;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "D. Fisher");
result.setValue(Field.YEAR, "1987");
result.setValue(Field.TITLE,
"Knowledge acquisition via incremental conceptual clustering");
result.setValue(Field.JOURNAL, "Machine Learning");
result.setValue(Field.VOLUME, "2");
result.setValue(Field.NUMBER, "2");
result.setValue(Field.PAGES, "139-172");
additional = result.add(Type.ARTICLE);
additional.setValue(Field.AUTHOR,
"J. H. Gennari and P. Langley and D. Fisher");
additional.setValue(Field.YEAR, "1990");
additional.setValue(Field.TITLE, "Models of incremental concept formation");
additional.setValue(Field.JOURNAL, "Artificial Intelligence");
additional.setValue(Field.VOLUME, "40");
additional.setValue(Field.PAGES, "11-61");
return result;
}
/**
* Returns default capabilities of the clusterer.
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
result.enable(Capability.NO_CLASS);
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
// other
result.setMinimumNumberInstances(0);
return result;
}
/**
* Builds the clusterer.
*
* @param data the training instances.
* @throws Exception if something goes wrong.
*/
@Override
public void buildClusterer(Instances data) throws Exception {
m_numberOfClusters = -1;
m_cobwebTree = null;
m_numberSplits = 0;
m_numberMerges = 0;
// can clusterer handle the data?
getCapabilities().testWithFail(data);
// randomize the instances
data = new Instances(data);
if (getSeed() >= 0) {
data.randomize(new Random(getSeed()));
}
for (int i = 0; i < data.numInstances(); i++) {
updateClusterer(data.instance(i));
}
updateFinished();
}
/**
* Singals the end of the updating.
*/
@Override
public void updateFinished() {
determineNumberOfClusters();
}
/**
* Classifies a given instance.
*
* @param instance the instance to be assigned to a cluster
* @return the number of the assigned cluster as an interger if the class is
* enumerated, otherwise the predicted value
* @throws Exception if instance could not be classified successfully
*/
@Override
public int clusterInstance(Instance instance) throws Exception {
CNode host = m_cobwebTree;
CNode temp = null;
determineNumberOfClusters();
do {
if (host.m_children == null) {
temp = null;
break;
}
// host.updateStats(instance, false);
temp = host.findHost(instance, true);
// host.updateStats(instance, true);
if (temp != null) {
host = temp;
}
} while (temp != null);
return host.m_clusterNum;
}
/**
* determines the number of clusters if necessary
*
* @see #m_numberOfClusters
* @see #m_numberOfClustersDetermined
*/
protected void determineNumberOfClusters() {
if (!m_numberOfClustersDetermined && (m_cobwebTree != null)) {
int[] numClusts = new int[1];
numClusts[0] = 0;
try {
m_cobwebTree.assignClusterNums(numClusts);
} catch (Exception e) {
e.printStackTrace();
numClusts[0] = 0;
}
m_numberOfClusters = numClusts[0];
m_numberOfClustersDetermined = true;
}
}
/**
* Returns the number of clusters.
*
* @return the number of clusters
*/
@Override
public int numberOfClusters() {
determineNumberOfClusters();
return m_numberOfClusters;
}
/**
* Get the root of the tree.
*
* @return the root of the tree.
*/
public CNode getTreeRoot() {
return m_cobwebTree;
}
/**
* Adds an instance to the clusterer.
*
* @param newInstance the instance to be added
* @throws Exception if something goes wrong
*/
@Override
public void updateClusterer(Instance newInstance) throws Exception {
m_numberOfClustersDetermined = false;
if (m_cobwebTree == null) {
m_cobwebTree = new CNode(newInstance.numAttributes(), newInstance);
} else {
m_cobwebTree.addInstance(newInstance);
}
}
/**
* Adds an instance to the Cobweb tree.
*
* @param newInstance the instance to be added
* @throws Exception if something goes wrong
* @deprecated updateClusterer(Instance) should be used instead
* @see #updateClusterer(Instance)
*/
@Deprecated
public void addInstance(Instance newInstance) throws Exception {
updateClusterer(newInstance);
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tAcuity.\n" + "\t(default=1.0)", "A", 1,
"-A <acuity>"));
result.addElement(new Option("\tCutoff.\n" + "\t(default=0.002)", "C", 1,
"-C <cutoff>"));
result.addElement(new Option("\tSave instance data.", "save-data", 0,
"-save-data"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start -->
* Valid options are: <p/>
*
* <pre> -A <acuity>
* Acuity.
* (default=1.0)</pre>
*
* <pre> -C <cutoff>
* Cutoff.
* (default=0.002)</pre>
*
* <pre> -save-data
* Save instance data.</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 42)</pre>
*
* <pre> -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).</pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String optionString;
optionString = Utils.getOption('A', options);
if (optionString.length() != 0) {
Double temp = new Double(optionString);
setAcuity(temp.doubleValue());
} else {
m_acuity = 1.0;
}
optionString = Utils.getOption('C', options);
if (optionString.length() != 0) {
Double temp = new Double(optionString);
setCutoff(temp.doubleValue());
} else {
m_cutoff = 0.01 * Cobweb.m_normal;
}
setSaveInstanceData(Utils.getFlag("save-data", options));
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String acuityTipText() {
return "set the minimum standard deviation for numeric attributes";
}
/**
* set the acuity.
*
* @param a the acuity value
*/
public void setAcuity(double a) {
m_acuity = a;
}
/**
* get the acuity value
*
* @return the acuity
*/
public double getAcuity() {
return m_acuity;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String cutoffTipText() {
return "set the category utility threshold by which to prune nodes";
}
/**
* set the cutoff
*
* @param c the cutof
*/
public void setCutoff(double c) {
m_cutoff = c;
}
/**
* get the cutoff
*
* @return the cutoff
*/
public double getCutoff() {
return m_cutoff;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String saveInstanceDataTipText() {
return "save instance information for visualization purposes";
}
/**
* Get the value of saveInstances.
*
* @return Value of saveInstances.
*/
public boolean getSaveInstanceData() {
return m_saveInstances;
}
/**
* Set the value of saveInstances.
*
* @param newsaveInstances Value to assign to saveInstances.
*/
public void setSaveInstanceData(boolean newsaveInstances) {
m_saveInstances = newsaveInstances;
}
/**
* Gets the current settings of Cobweb.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-A");
result.add("" + m_acuity);
result.add("-C");
result.add("" + m_cutoff);
if (getSaveInstanceData()) {
result.add("-save-data");
}
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Returns a description of the clusterer as a string.
*
* @return a string describing the clusterer.
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (m_cobwebTree == null) {
return "Cobweb hasn't been built yet!";
} else {
m_cobwebTree.dumpTree(0, text);
return "Number of merges: " + m_numberMerges + "\nNumber of splits: "
+ m_numberSplits + "\nNumber of clusters: " + numberOfClusters() + "\n"
+ text.toString() + "\n\n";
}
}
/**
* Returns the type of graphs this class represents
*
* @return Drawable.TREE
*/
@Override
public int graphType() {
return Drawable.TREE;
}
/**
* Generates the graph string of the Cobweb tree
*
* @return a <code>String</code> value
* @throws Exception if an error occurs
*/
@Override
public String graph() throws Exception {
StringBuffer text = new StringBuffer();
text.append("digraph CobwebTree {\n");
m_cobwebTree.graphTree(text);
text.append("}\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String seedTipText() {
String result = super.seedTipText() + " Use -1 for no randomization.";
return result;
}
/**
* Main method.
*
* @param argv the commandline options
*/
public static void main(String[] argv) {
runClusterer(new Cobweb(), argv);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/DensityBasedClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DensityBasedClusterer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import weka.core.Instance;
/**
* Interface for clusterers that can estimate the density for a given instance.
* Implementations will typically extend AbstractDensityBasedClusterer.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface DensityBasedClusterer extends Clusterer {
/**
* Returns the prior probability of each cluster.
*
* @return the prior probability for each cluster
* @exception Exception if priors could not be returned successfully
*/
double[] clusterPriors() throws Exception;
/**
* Computes the log of the conditional density (per cluster) for a given
* instance.
*
* @param instance the instance to compute the density for
* @return an array containing the estimated densities
* @exception Exception if the density could not be computed successfully
*/
double[] logDensityPerClusterForInstance(Instance instance) throws Exception;
/**
* Computes the density for a given instance.
*
* @param instance the instance to compute the density for
* @return the density.
* @exception Exception if the density could not be computed successfully
*/
double logDensityForInstance(Instance instance) throws Exception;
/**
* Returns the logs of the joint densities for a given instance.
*
* @param inst the instance
* @return the array of values
* @exception Exception if values could not be computed
*/
double[] logJointDensitiesForInstance(Instance inst) throws Exception;
/**
* Returns the cluster probability distribution for an instance.
*
* @param instance the instance to be clustered
* @return the probability distribution
* @throws Exception if computation fails
*/
@Override
double[] distributionForInstance(Instance instance) throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/EM.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* EM.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
import weka.estimators.DiscreteEstimator;
import weka.estimators.Estimator;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
* <!-- globalinfo-start -->
* Simple EM (expectation maximisation) class.<br/>
* <br/>
* EM assigns a probability distribution to each instance which indicates the probability of it belonging to each of the clusters. EM can decide how many clusters to create by cross validation, or you may specify apriori how many clusters to generate.<br/>
* <br/>
* The cross validation performed to determine the number of clusters is done in the following steps:<br/>
* 1. the number of clusters is set to 1<br/>
* 2. the training set is split randomly into 10 folds.<br/>
* 3. EM is performed 10 times using the 10 folds the usual CV way.<br/>
* 4. the loglikelihood is averaged over all 10 results.<br/>
* 5. if loglikelihood has increased the number of clusters is increased by 1 and the program continues at step 2. <br/>
* <br/>
* The number of folds is fixed to 10, as long as the number of instances in the training set is not smaller 10. If this is the case the number of folds is set equal to the number of instances.<br/>
* <br/>
* Missing values are globally replaced with ReplaceMissingValues.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start -->
* Valid options are: <p/>
*
* <pre> -N <num>
* number of clusters. If omitted or -1 specified, then
* cross validation is used to select the number of clusters.</pre>
*
* <pre> -X <num>
* Number of folds to use when cross-validating to find the best number of clusters.</pre>
*
* <pre> -K <num>
* Number of runs of k-means to perform.
* (default 10)</pre>
*
* <pre> -max <num>
* Maximum number of clusters to consider during cross-validation. If omitted or -1 specified, then
* there is no upper limit on the number of clusters.</pre>
*
* <pre> -ll-cv <num>
* Minimum improvement in cross-validated log likelihood required
* to consider increasing the number of clusters.
* (default 1e-6)</pre>
*
* <pre> -I <num>
* max iterations.
* (default 100)</pre>
*
* <pre> -ll-iter <num>
* Minimum improvement in log likelihood required
* to perform another iteration of the E and M steps.
* (default 1e-6)</pre>
*
* <pre> -V
* verbose.</pre>
*
* <pre> -M <num>
* minimum allowable standard deviation for normal density
* computation
* (default 1e-6)</pre>
*
* <pre> -O
* Display model in old format (good when there are many clusters)
* </pre>
*
* <pre> -num-slots <num>
* Number of execution slots.
* (default 1 - i.e. no parallelism)</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 100)</pre>
*
* <pre> -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).</pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class EM extends RandomizableDensityBasedClusterer implements
NumberOfClustersRequestable, WeightedInstancesHandler {
/** for serialization */
static final long serialVersionUID = 8348181483812829475L;
private Estimator m_modelPrev[][];
private double[][][] m_modelNormalPrev;
private double[] m_priorsPrev;
/** hold the discrete estimators for each cluster */
private Estimator m_model[][];
/** hold the normal estimators for each cluster */
private double m_modelNormal[][][];
/** default minimum standard deviation */
private double m_minStdDev = 1e-6;
private double[] m_minStdDevPerAtt;
/** hold the weights of each instance for each cluster */
private double m_weights[][];
/** the prior probabilities for clusters */
private double m_priors[];
/** full training instances */
private Instances m_theInstances = null;
/** number of clusters selected by the user or cross validation */
private int m_num_clusters;
/**
* the initial number of clusters requested by the user--- -1 if xval is to be
* used to find the number of clusters
*/
private int m_initialNumClusters;
/** Don't consider more clusters than this under CV (-1 means no upper bound) */
private int m_upperBoundNumClustersCV = -1;
/** number of attributes */
private int m_num_attribs;
/** number of training instances */
private int m_num_instances;
/** maximum iterations to perform */
private int m_max_iterations;
/** attribute min values */
private double[] m_minValues;
/** attribute max values */
private double[] m_maxValues;
/** random number generator */
private Random m_rr;
/** Verbose? */
private boolean m_verbose;
/** globally replace missing values */
private ReplaceMissingValues m_replaceMissing;
/** display model output in old-style format */
private boolean m_displayModelInOldFormat;
/** Number of threads to use for E and M steps */
protected int m_executionSlots = 1;
/** For parallel execution mode */
protected transient ExecutorService m_executorPool;
/** False once training has completed */
protected boolean m_training;
/** The actual number of iterations performed */
protected int m_iterationsPerformed;
/** Minimum improvement in log likelihood when iterating */
protected double m_minLogLikelihoodImprovementIterating = 1e-6;
/** Minimum improvement to increase number of clusters when cross-validating */
protected double m_minLogLikelihoodImprovementCV = 1e-6;
/** The number of folds to use for cross-validation */
protected int m_cvFolds = 10;
/** The number of runs of k-means to perform */
protected int m_NumKMeansRuns = 10;
/**
* Returns a string describing this clusterer
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Simple EM (expectation maximisation) class.\n\n"
+ "EM assigns a probability distribution to each instance which "
+ "indicates the probability of it belonging to each of the clusters. "
+ "EM can decide how many clusters to create by cross validation, or you "
+ "may specify apriori how many clusters to generate.\n\n"
+ "The cross validation performed to determine the number of clusters "
+ "is done in the following steps:\n"
+ "1. the number of clusters is set to 1\n"
+ "2. the training set is split randomly into 10 folds.\n"
+ "3. EM is performed 10 times using the 10 folds the usual CV way.\n"
+ "4. the loglikelihood is averaged over all 10 results.\n"
+ "5. if loglikelihood has increased the number of clusters is increased "
+ "by 1 and the program continues at step 2. \n\n"
+ "The number of folds is fixed to 10, as long as the number of "
+ "instances in the training set is not smaller 10. If this is the case "
+ "the number of folds is set equal to the number of instances.\n\n"
+ "Missing values are globally replaced with ReplaceMissingValues.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option(
"\tnumber of clusters. If omitted or -1 specified, then \n"
+ "\tcross validation is used to select the number of clusters.", "N",
1, "-N <num>"));
result
.addElement(new Option(
"\tNumber of folds to use when cross-validating to find the best number of clusters.",
"X", 1, "-X <num>"));
result.addElement(new Option("\tNumber of runs of k-means to perform." + "\n\t(default 10)",
"K", 1, "-K <num>"));
result
.addElement(new Option(
"\tMaximum number of clusters to consider during cross-validation. If omitted or -1 specified, then \n"
+ "\tthere is no upper limit on the number of clusters.", "max", 1,
"-max <num>"));
result.addElement(new Option(
"\tMinimum improvement in cross-validated log likelihood required"
+ "\n\tto consider increasing the number of clusters."
+ "\n\t(default 1e-6)", "ll-cv", 1, "-ll-cv <num>"));
result.addElement(new Option("\tmax iterations." + "\n\t(default 100)",
"I", 1, "-I <num>"));
result.addElement(new Option(
"\tMinimum improvement in log likelihood required"
+ "\n\tto perform another iteration of the E and M steps."
+ "\n\t(default 1e-6)", "ll-iter", 1, "-ll-iter <num>"));
result.addElement(new Option("\tverbose.", "V", 0, "-V"));
result.addElement(new Option(
"\tminimum allowable standard deviation for normal density\n"
+ "\tcomputation\n" + "\t(default 1e-6)", "M", 1, "-M <num>"));
result.addElement(new Option(
"\tDisplay model in old format (good when there are "
+ "many clusters)\n", "O", 0, "-O"));
result.addElement(new Option("\tNumber of execution slots.\n"
+ "\t(default 1 - i.e. no parallelism)", "num-slots", 1,
"-num-slots <num>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start -->
* Valid options are: <p/>
*
* <pre> -N <num>
* number of clusters. If omitted or -1 specified, then
* cross validation is used to select the number of clusters.</pre>
*
* <pre> -X <num>
* Number of folds to use when cross-validating to find the best number of clusters.</pre>
*
* <pre> -K <num>
* Number of runs of k-means to perform.
* (default 10)</pre>
*
* <pre> -max <num>
* Maximum number of clusters to consider during cross-validation. If omitted or -1 specified, then
* there is no upper limit on the number of clusters.</pre>
*
* <pre> -ll-cv <num>
* Minimum improvement in cross-validated log likelihood required
* to consider increasing the number of clusters.
* (default 1e-6)</pre>
*
* <pre> -I <num>
* max iterations.
* (default 100)</pre>
*
* <pre> -ll-iter <num>
* Minimum improvement in log likelihood required
* to perform another iteration of the E and M steps.
* (default 1e-6)</pre>
*
* <pre> -V
* verbose.</pre>
*
* <pre> -M <num>
* minimum allowable standard deviation for normal density
* computation
* (default 1e-6)</pre>
*
* <pre> -O
* Display model in old format (good when there are many clusters)
* </pre>
*
* <pre> -num-slots <num>
* Number of execution slots.
* (default 1 - i.e. no parallelism)</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 100)</pre>
*
* <pre> -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).</pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
resetOptions();
setDebug(Utils.getFlag('V', options));
String optionString = Utils.getOption('I', options);
if (optionString.length() != 0) {
setMaxIterations(Integer.parseInt(optionString));
}
optionString = Utils.getOption('X', options);
if (optionString.length() > 0) {
setNumFolds(Integer.parseInt(optionString));
}
optionString = Utils.getOption("ll-iter", options);
if (optionString.length() > 0) {
setMinLogLikelihoodImprovementIterating(Double.parseDouble(optionString));
}
optionString = Utils.getOption("ll-cv", options);
if (optionString.length() > 0) {
setMinLogLikelihoodImprovementCV(Double.parseDouble(optionString));
}
optionString = Utils.getOption('N', options);
if (optionString.length() != 0) {
setNumClusters(Integer.parseInt(optionString));
}
optionString = Utils.getOption("max", options);
if (optionString.length() > 0) {
setMaximumNumberOfClusters(Integer.parseInt(optionString));
}
optionString = Utils.getOption('M', options);
if (optionString.length() != 0) {
setMinStdDev((new Double(optionString)).doubleValue());
}
optionString = Utils.getOption('K', options);
if (optionString.length() != 0) {
setNumKMeansRuns((new Integer(optionString)).intValue());
}
setDisplayModelInOldFormat(Utils.getFlag('O', options));
String slotsS = Utils.getOption("num-slots", options);
if (slotsS.length() > 0) {
setNumExecutionSlots(Integer.parseInt(slotsS));
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numKMeansRunsTipText() {
return "The number of runs of k-means to perform.";
}
/**
* Returns the number of runs of k-means to perform.
*
* @return the number of runs
*/
public int getNumKMeansRuns() {
return m_NumKMeansRuns;
}
/**
* Set the number of runs of SimpleKMeans to perform.
*
* @param intValue
*/
public void setNumKMeansRuns(int intValue) {
m_NumKMeansRuns = intValue;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numFoldsTipText() {
return "The number of folds to use when cross-validating to find the "
+ "best number of clusters (default = 10)";
}
/**
* Set the number of folds to use when cross-validating to find the best
* number of clusters.
*
* @param folds the number of folds to use
*/
public void setNumFolds(int folds) {
m_cvFolds = folds;
}
/**
* Get the number of folds to use when cross-validating to find the best
* number of clusters.
*
* @return the number of folds to use
*/
public int getNumFolds() {
return m_cvFolds;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minLogLikelihoodImprovementCVTipText() {
return "The minimum improvement in cross-validated log likelihood required "
+ "in order to consider increasing the number of clusters "
+ "when cross-validiting to find the best number of clusters";
}
/**
* Set the minimum improvement in cross-validated log likelihood required to
* consider increasing the number of clusters when cross-validating to find
* the best number of clusters
*
* @param min the minimum improvement in log likelihood
*/
public void setMinLogLikelihoodImprovementCV(double min) {
m_minLogLikelihoodImprovementCV = min;
}
/**
* Get the minimum improvement in cross-validated log likelihood required to
* consider increasing the number of clusters when cross-validating to find
* the best number of clusters
*
* @return the minimum improvement in log likelihood
*/
public double getMinLogLikelihoodImprovementCV() {
return m_minLogLikelihoodImprovementCV;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minLogLikelihoodImprovementIteratingTipText() {
return "The minimum improvement in log likelihood required to "
+ "perform another iteration of the E and M steps";
}
/**
* Set the minimum improvement in log likelihood necessary to perform another
* iteration of the E and M steps.
*
* @param min the minimum improvement in log likelihood
*/
public void setMinLogLikelihoodImprovementIterating(double min) {
m_minLogLikelihoodImprovementIterating = min;
}
/**
* Get the minimum improvement in log likelihood necessary to perform another
* iteration of the E and M steps.
*
* @return the minimum improvement in log likelihood
*/
public double getMinLogLikelihoodImprovementIterating() {
return m_minLogLikelihoodImprovementIterating;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numExecutionSlotsTipText() {
return "The number of execution slots (threads) to use. "
+ "Set equal to the number of available cpu/cores";
}
/**
* Set the degree of parallelism to use.
*
* @param slots the number of tasks to run in parallel when computing the
* nearest neighbors and evaluating different values of k between the
* lower and upper bounds
*/
public void setNumExecutionSlots(int slots) {
m_executionSlots = slots;
}
/**
* Get the degree of parallelism to use.
*
* @return the number of tasks to run in parallel when computing the nearest
* neighbors and evaluating different values of k between the lower
* and upper bounds
*/
public int getNumExecutionSlots() {
return m_executionSlots;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String displayModelInOldFormatTipText() {
return "Use old format for model output. The old format is "
+ "better when there are many clusters. The new format "
+ "is better when there are fewer clusters and many attributes.";
}
/**
* Set whether to display model output in the old, original format.
*
* @param d true if model ouput is to be shown in the old format
*/
public void setDisplayModelInOldFormat(boolean d) {
m_displayModelInOldFormat = d;
}
/**
* Get whether to display model output in the old, original format.
*
* @return true if model ouput is to be shown in the old format
*/
public boolean getDisplayModelInOldFormat() {
return m_displayModelInOldFormat;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minStdDevTipText() {
return "set minimum allowable standard deviation";
}
/**
* Set the minimum value for standard deviation when calculating normal
* density. Reducing this value can help prevent arithmetic overflow resulting
* from multiplying large densities (arising from small standard deviations)
* when there are many singleton or near singleton values.
*
* @param m minimum value for standard deviation
*/
public void setMinStdDev(double m) {
m_minStdDev = m;
}
public void setMinStdDevPerAtt(double[] m) {
m_minStdDevPerAtt = m;
}
/**
* Get the minimum allowable standard deviation.
*
* @return the minumum allowable standard deviation
*/
public double getMinStdDev() {
return m_minStdDev;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numClustersTipText() {
return "set number of clusters. -1 to select number of clusters "
+ "automatically by cross validation.";
}
/**
* Set the number of clusters (-1 to select by CV).
*
* @param n the number of clusters
* @throws Exception if n is 0
*/
@Override
public void setNumClusters(int n) throws Exception {
if (n == 0) {
throw new Exception("Number of clusters must be > 0. (or -1 to "
+ "select by cross validation).");
}
if (n < 0) {
m_num_clusters = -1;
m_initialNumClusters = -1;
} else {
m_num_clusters = n;
m_initialNumClusters = n;
}
}
/**
* Get the number of clusters
*
* @return the number of clusters.
*/
public int getNumClusters() {
return m_initialNumClusters;
}
/**
* Set the maximum number of clusters to consider when cross-validating
*
* @param n the maximum number of clusters to consider
*/
public void setMaximumNumberOfClusters(int n) {
m_upperBoundNumClustersCV = n;
}
/**
* Get the maximum number of clusters to consider when cross-validating
*
* @return the maximum number of clusters to consider
*/
public int getMaximumNumberOfClusters() {
return m_upperBoundNumClustersCV;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maximumNumberOfClustersTipText() {
return "The maximum number of clusters to consider during cross-validation "
+ "to select the best number of clusters";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxIterationsTipText() {
return "maximum number of iterations";
}
/**
* Set the maximum number of iterations to perform
*
* @param i the number of iterations
* @throws Exception if i is less than 1
*/
public void setMaxIterations(int i) throws Exception {
if (i < 1) {
throw new Exception("Maximum number of iterations must be > 0!");
}
m_max_iterations = i;
}
/**
* Get the maximum number of iterations
*
* @return the number of iterations
*/
public int getMaxIterations() {
return m_max_iterations;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String debugTipText() {
return "If set to true, clusterer may output additional info to "
+ "the console.";
}
/**
* Set debug mode - verbose output
*
* @param v true for verbose output
*/
@Override
public void setDebug(boolean v) {
m_verbose = v;
}
/**
* Get debug mode
*
* @return true if debug mode is set
*/
@Override
public boolean getDebug() {
return m_verbose;
}
/**
* Gets the current settings of EM.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-I");
result.add("" + m_max_iterations);
result.add("-N");
result.add("" + getNumClusters());
result.add("-X");
result.add("" + getNumFolds());
result.add("-max");
result.add("" + getMaximumNumberOfClusters());
result.add("-ll-cv");
result.add("" + getMinLogLikelihoodImprovementCV());
result.add("-ll-iter");
result.add("" + getMinLogLikelihoodImprovementIterating());
result.add("-M");
result.add("" + getMinStdDev());
result.add("-K");
result.add("" + getNumKMeansRuns());
if (m_displayModelInOldFormat) {
result.add("-O");
}
result.add("-num-slots");
result.add("" + getNumExecutionSlots());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Initialize the global aggregated estimators and storage.
*
* @param inst the instances
* @throws Exception if initialization fails
**/
private void EM_Init(Instances inst) throws Exception {
int i, j, k;
// run k means a user-specified number of times and choose best solution
SimpleKMeans bestK = null;
double bestSqE = Double.MAX_VALUE;
for (i = 0; i < m_NumKMeansRuns; i++) {
SimpleKMeans sk = new SimpleKMeans();
sk.setSeed(m_rr.nextInt());
sk.setNumClusters(m_num_clusters);
sk.setNumExecutionSlots(m_executionSlots);
sk.setDisplayStdDevs(true);
sk.setDoNotCheckCapabilities(true);
sk.setDontReplaceMissingValues(true);
sk.buildClusterer(inst);
if (sk.getSquaredError() < bestSqE) {
bestSqE = sk.getSquaredError();
bestK = sk;
}
}
// initialize with best k-means solution
m_num_clusters = bestK.numberOfClusters();
m_weights = new double[inst.numInstances()][m_num_clusters];
m_model = new DiscreteEstimator[m_num_clusters][m_num_attribs];
m_modelNormal = new double[m_num_clusters][m_num_attribs][3];
m_priors = new double[m_num_clusters];
m_modelPrev = new DiscreteEstimator[m_num_clusters][m_num_attribs];
m_modelNormalPrev = new double[m_num_clusters][m_num_attribs][3];
m_priorsPrev = new double[m_num_clusters];
Instances centers = bestK.getClusterCentroids();
Instances stdD = bestK.getClusterStandardDevs();
double[][][] nominalCounts = bestK.getClusterNominalCounts();
double[] clusterSizes = bestK.getClusterSizes();
for (i = 0; i < m_num_clusters; i++) {
Instance center = centers.instance(i);
for (j = 0; j < m_num_attribs; j++) {
if (inst.attribute(j).isNominal()) {
m_model[i][j] = new DiscreteEstimator(m_theInstances.attribute(j)
.numValues(), true);
for (k = 0; k < inst.attribute(j).numValues(); k++) {
m_model[i][j].addValue(k, nominalCounts[i][j][k]);
}
} else {
double minStdD = (m_minStdDevPerAtt != null) ? m_minStdDevPerAtt[j]
: m_minStdDev;
m_modelNormal[i][j][0] = center.value(j);
double stdv = stdD.instance(i).value(j);
if (stdv < minStdD) {
stdv = Math.sqrt(inst.variance(j));
if (Double.isInfinite(stdv)) {
stdv = minStdD;
}
if (stdv < minStdD) {
stdv = minStdD;
}
}
if ((stdv <= 0) || Double.isNaN(stdv)) {
stdv = m_minStdDev;
}
m_modelNormal[i][j][1] = stdv;
m_modelNormal[i][j][2] = 1.0;
}
}
}
for (j = 0; j < m_num_clusters; j++) {
// m_priors[j] += 1.0;
m_priors[j] = clusterSizes[j];
}
Utils.normalize(m_priors);
}
/**
* calculate prior probabilites for the clusters
*
* @param inst the instances
* @throws Exception if priors can't be calculated
**/
private void estimate_priors(Instances inst) throws Exception {
for (int i = 0; i < m_num_clusters; i++) {
m_priorsPrev[i] = m_priors[i];
m_priors[i] = 0.0;
}
for (int i = 0; i < inst.numInstances(); i++) {
for (int j = 0; j < m_num_clusters; j++) {
m_priors[j] += inst.instance(i).weight() * m_weights[i][j];
}
}
Utils.normalize(m_priors);
}
/** Constant for normal distribution. */
private static double m_normConst = Math.log(Math.sqrt(2 * Math.PI));
/**
* Density function of normal distribution.
*
* @param x input value
* @param mean mean of distribution
* @param stdDev standard deviation of distribution
* @return the density
*/
private double logNormalDens(double x, double mean, double stdDev) {
double diff = x - mean;
// System.err.println("x: "+x+" mean: "+mean+" diff: "+diff+" stdv: "+stdDev);
// System.err.println("diff*diff/(2*stdv*stdv): "+ (diff * diff / (2 *
// stdDev * stdDev)));
return -(diff * diff / (2 * stdDev * stdDev)) - m_normConst
- Math.log(stdDev);
}
/**
* New probability estimators for an iteration
*/
private void new_estimators() {
for (int i = 0; i < m_num_clusters; i++) {
for (int j = 0; j < m_num_attribs; j++) {
if (m_theInstances.attribute(j).isNominal()) {
m_modelPrev[i][j] = m_model[i][j];
m_model[i][j] = new DiscreteEstimator(m_theInstances.attribute(j)
.numValues(), true);
} else {
m_modelNormalPrev[i][j][0] = m_modelNormal[i][j][0];
m_modelNormalPrev[i][j][1] = m_modelNormal[i][j][1];
m_modelNormalPrev[i][j][2] = m_modelNormal[i][j][2];
m_modelNormal[i][j][0] = m_modelNormal[i][j][1] = m_modelNormal[i][j][2] = 0.0;
}
}
}
}
/**
* Start the pool of execution threads
*/
protected void startExecutorPool() {
if (m_executorPool != null) {
m_executorPool.shutdownNow();
}
m_executorPool = Executors.newFixedThreadPool(m_executionSlots);
}
private class ETask implements Callable<double[]> {
protected int m_lowNum;
protected int m_highNum;
protected boolean m_changeWeights;
protected Instances m_eData;
public ETask(Instances data, int lowInstNum, int highInstNum,
boolean changeWeights) {
m_eData = data;
m_lowNum = lowInstNum;
m_highNum = highInstNum;
m_changeWeights = changeWeights;
}
@Override
public double[] call() {
double[] llk = new double[2];
double loglk = 0.0, sOW = 0.0;
try {
for (int i = m_lowNum; i < m_highNum; i++) {
Instance in = m_eData.instance(i);
loglk += in.weight() * EM.this.logDensityForInstance(in);
sOW += in.weight();
if (m_changeWeights) {
m_weights[i] = distributionForInstance(in);
}
}
// completedETask(loglk, sOW, true);
} catch (Exception ex) {
// completedETask(0, 0, false);
}
llk[0] = loglk;
llk[1] = sOW;
return llk;
}
}
private class MTask implements Callable<MTask> {
// protected Instances m_dataChunk;
protected int m_start;
protected int m_end;
protected Instances m_inst;
protected DiscreteEstimator[][] m_taskModel;
double[][][] m_taskModelNormal;
public MTask(Instances inst, int start, int end,
DiscreteEstimator[][] discEst, double[][][] numericEst) {
// m_dataChunk = chunk;
m_start = start;
m_end = end;
m_inst = inst;
m_taskModel = discEst;
m_taskModelNormal = numericEst;
}
@Override
public MTask call() {
for (int l = m_start; l < m_end; l++) {
Instance in = m_inst.instance(l);
for (int i = 0; i < m_num_clusters; i++) {
for (int j = 0; j < m_num_attribs; j++) {
if (m_inst.attribute(j).isNominal()) {
m_taskModel[i][j].addValue(in.value(j), in.weight()
* m_weights[l][i]);
} else {
m_taskModelNormal[i][j][0] += (in.value(j) * in.weight() * m_weights[l][i]);
m_taskModelNormal[i][j][2] += in.weight() * m_weights[l][i];
m_taskModelNormal[i][j][1] += (in.value(j) * in.value(j)
* in.weight() * m_weights[l][i]);
}
}
}
}
// completedMTask(this, true);
return this;
}
}
private void M_reEstimate(Instances inst) {
// calcualte mean and std deviation for numeric attributes
for (int i = 0; i < m_num_clusters; i++) {
for (int j = 0; j < m_num_attribs; j++) {
if (!inst.attribute(j).isNominal()) {
if (m_modelNormal[i][j][2] <= 0) {
m_modelNormal[i][j][1] = Double.MAX_VALUE;
// m_modelNormal[i][j][0] = 0;
m_modelNormal[i][j][0] = m_minStdDev;
} else {
// variance
m_modelNormal[i][j][1] = (m_modelNormal[i][j][1] - (m_modelNormal[i][j][0]
* m_modelNormal[i][j][0] / m_modelNormal[i][j][2]))
/ (m_modelNormal[i][j][2]);
if (m_modelNormal[i][j][1] < 0) {
m_modelNormal[i][j][1] = 0;
}
// std dev
double minStdD = (m_minStdDevPerAtt != null) ? m_minStdDevPerAtt[j]
: m_minStdDev;
m_modelNormal[i][j][1] = Math.sqrt(m_modelNormal[i][j][1]);
if ((m_modelNormal[i][j][1] <= minStdD)) {
m_modelNormal[i][j][1] = Math.sqrt(inst.variance(j));
if ((m_modelNormal[i][j][1] <= minStdD)) {
m_modelNormal[i][j][1] = minStdD;
}
}
if ((m_modelNormal[i][j][1] <= 0)) {
m_modelNormal[i][j][1] = m_minStdDev;
}
if (Double.isInfinite(m_modelNormal[i][j][1])) {
m_modelNormal[i][j][1] = m_minStdDev;
}
// mean
m_modelNormal[i][j][0] /= m_modelNormal[i][j][2];
}
}
}
}
}
/**
* The M step of the EM algorithm.
*
* @param inst the training instances
* @throws Exception if something goes wrong
*/
private void M(Instances inst) throws Exception {
int i, j, l;
new_estimators();
estimate_priors(inst);
// sum
for (l = 0; l < inst.numInstances(); l++) {
Instance in = inst.instance(l);
for (i = 0; i < m_num_clusters; i++) {
for (j = 0; j < m_num_attribs; j++) {
if (inst.attribute(j).isNominal()) {
m_model[i][j]
.addValue(in.value(j), in.weight() * m_weights[l][i]);
} else {
m_modelNormal[i][j][0] += (in.value(j) * in.weight() * m_weights[l][i]);
m_modelNormal[i][j][2] += in.weight() * m_weights[l][i];
m_modelNormal[i][j][1] += (in.value(j) * in.value(j)
* in.weight() * m_weights[l][i]);
}
}
}
}
// re-estimate Gaussian parameters
M_reEstimate(inst);
}
/**
* The E step of the EM algorithm. Estimate cluster membership probabilities.
*
* @param inst the training instances
* @param change_weights whether to change the weights
* @return the average log likelihood
* @throws Exception if computation fails
*/
private double E(Instances inst, boolean change_weights) throws Exception {
double loglk = 0.0, sOW = 0.0;
for (int l = 0; l < inst.numInstances(); l++) {
Instance in = inst.instance(l);
loglk += in.weight() * logDensityForInstance(in);
sOW += in.weight();
if (change_weights) {
m_weights[l] = distributionForInstance(in);
}
}
if (sOW <= 0) { // In case all weights are zero
return 0;
}
// reestimate priors
/*
* if (change_weights) { estimate_priors(inst); }
*/
return loglk / sOW;
}
/**
* Constructor.
*
**/
public EM() {
super();
m_SeedDefault = 100;
resetOptions();
}
/**
* Reset to default options
*/
protected void resetOptions() {
m_minStdDev = 1e-6;
m_max_iterations = 100;
m_Seed = m_SeedDefault;
m_num_clusters = -1;
m_initialNumClusters = -1;
m_verbose = false;
m_minLogLikelihoodImprovementIterating = 1e-6;
m_minLogLikelihoodImprovementCV = 1e-6;
m_executionSlots = 1;
m_cvFolds = 10;
}
/**
* Return the normal distributions for the cluster models
*
* @return a <code>double[][][]</code> value
*/
public double[][][] getClusterModelsNumericAtts() {
return m_modelNormal;
}
/**
* Return the priors for the clusters
*
* @return a <code>double[]</code> value
*/
public double[] getClusterPriors() {
return m_priors;
}
/**
* Outputs the generated clusters into a string.
*
* @return the clusterer in string representation
*/
@Override
public String toString() {
if (m_displayModelInOldFormat) {
return toStringOriginal();
}
if (m_priors == null) {
return "No clusterer built yet!";
}
StringBuffer temp = new StringBuffer();
temp.append("\nEM\n==\n");
if (m_initialNumClusters == -1) {
temp.append("\nNumber of clusters selected by cross validation: "
+ m_num_clusters + "\n");
} else {
temp.append("\nNumber of clusters: " + m_num_clusters + "\n");
}
temp.append("Number of iterations performed: " + m_iterationsPerformed
+ "\n");
int maxWidth = 0;
int maxAttWidth = 0;
// set up max widths
// attributes
for (int i = 0; i < m_num_attribs; i++) {
Attribute a = m_theInstances.attribute(i);
if (a.name().length() > maxAttWidth) {
maxAttWidth = m_theInstances.attribute(i).name().length();
}
if (a.isNominal()) {
// check values
for (int j = 0; j < a.numValues(); j++) {
String val = a.value(j) + " ";
if (val.length() > maxAttWidth) {
maxAttWidth = val.length();
}
}
}
}
for (int i = 0; i < m_num_clusters; i++) {
for (int j = 0; j < m_num_attribs; j++) {
if (m_theInstances.attribute(j).isNumeric()) {
// check mean and std. dev. against maxWidth
double mean = Math.log(Math.abs(m_modelNormal[i][j][0]))
/ Math.log(10.0);
double stdD = Math.log(Math.abs(m_modelNormal[i][j][1]))
/ Math.log(10.0);
double width = (mean > stdD) ? mean : stdD;
if (width < 0) {
width = 1;
}
// decimal + # decimal places + 1
width += 6.0;
if ((int) width > maxWidth) {
maxWidth = (int) width;
}
} else {
// nominal distributions
DiscreteEstimator d = (DiscreteEstimator) m_model[i][j];
for (int k = 0; k < d.getNumSymbols(); k++) {
String size = Utils.doubleToString(d.getCount(k), maxWidth, 4)
.trim();
if (size.length() > maxWidth) {
maxWidth = size.length();
}
}
int sum = Utils.doubleToString(d.getSumOfCounts(), maxWidth, 4)
.trim().length();
if (sum > maxWidth) {
maxWidth = sum;
}
}
}
}
if (maxAttWidth < "Attribute".length()) {
maxAttWidth = "Attribute".length();
}
maxAttWidth += 2;
temp.append("\n\n");
temp.append(pad("Cluster", " ",
(maxAttWidth + maxWidth + 1) - "Cluster".length(), true));
temp.append("\n");
temp
.append(pad("Attribute", " ", maxAttWidth - "Attribute".length(), false));
// cluster #'s
for (int i = 0; i < m_num_clusters; i++) {
String classL = "" + i;
temp.append(pad(classL, " ", maxWidth + 1 - classL.length(), true));
}
temp.append("\n");
// cluster priors
temp.append(pad("", " ", maxAttWidth, true));
for (int i = 0; i < m_num_clusters; i++) {
String priorP = Utils.doubleToString(m_priors[i], maxWidth, 2).trim();
priorP = "(" + priorP + ")";
temp.append(pad(priorP, " ", maxWidth + 1 - priorP.length(), true));
}
temp.append("\n");
temp.append(pad("", "=", maxAttWidth + (maxWidth * m_num_clusters)
+ m_num_clusters + 1, true));
temp.append("\n");
for (int i = 0; i < m_num_attribs; i++) {
String attName = m_theInstances.attribute(i).name();
temp.append(attName + "\n");
if (m_theInstances.attribute(i).isNumeric()) {
String meanL = " mean";
temp.append(pad(meanL, " ", maxAttWidth + 1 - meanL.length(), false));
for (int j = 0; j < m_num_clusters; j++) {
// means
String mean = Utils.doubleToString(m_modelNormal[j][i][0], maxWidth,
4).trim();
temp.append(pad(mean, " ", maxWidth + 1 - mean.length(), true));
}
temp.append("\n");
// now do std deviations
String stdDevL = " std. dev.";
temp
.append(pad(stdDevL, " ", maxAttWidth + 1 - stdDevL.length(), false));
for (int j = 0; j < m_num_clusters; j++) {
String stdDev = Utils.doubleToString(m_modelNormal[j][i][1],
maxWidth, 4).trim();
temp.append(pad(stdDev, " ", maxWidth + 1 - stdDev.length(), true));
}
temp.append("\n\n");
} else {
Attribute a = m_theInstances.attribute(i);
for (int j = 0; j < a.numValues(); j++) {
String val = " " + a.value(j);
temp.append(pad(val, " ", maxAttWidth + 1 - val.length(), false));
for (int k = 0; k < m_num_clusters; k++) {
DiscreteEstimator d = (DiscreteEstimator) m_model[k][i];
String count = Utils.doubleToString(d.getCount(j), maxWidth, 4)
.trim();
temp.append(pad(count, " ", maxWidth + 1 - count.length(), true));
}
temp.append("\n");
}
// do the totals
String total = " [total]";
temp.append(pad(total, " ", maxAttWidth + 1 - total.length(), false));
for (int k = 0; k < m_num_clusters; k++) {
DiscreteEstimator d = (DiscreteEstimator) m_model[k][i];
String count = Utils.doubleToString(d.getSumOfCounts(), maxWidth, 4)
.trim();
temp.append(pad(count, " ", maxWidth + 1 - count.length(), true));
}
temp.append("\n");
}
}
return temp.toString();
}
private String pad(String source, String padChar, int length, boolean leftPad) {
StringBuffer temp = new StringBuffer();
if (leftPad) {
for (int i = 0; i < length; i++) {
temp.append(padChar);
}
temp.append(source);
} else {
temp.append(source);
for (int i = 0; i < length; i++) {
temp.append(padChar);
}
}
return temp.toString();
}
/**
* Outputs the generated clusters into a string.
*
* @return the clusterer in string representation
*/
protected String toStringOriginal() {
if (m_priors == null) {
return "No clusterer built yet!";
}
StringBuffer temp = new StringBuffer();
temp.append("\nEM\n==\n");
if (m_initialNumClusters == -1) {
temp.append("\nNumber of clusters selected by cross validation: "
+ m_num_clusters + "\n");
} else {
temp.append("\nNumber of clusters: " + m_num_clusters + "\n");
}
for (int j = 0; j < m_num_clusters; j++) {
temp.append("\nCluster: " + j + " Prior probability: "
+ Utils.doubleToString(m_priors[j], 4) + "\n\n");
for (int i = 0; i < m_num_attribs; i++) {
temp.append("Attribute: " + m_theInstances.attribute(i).name() + "\n");
if (m_theInstances.attribute(i).isNominal()) {
if (m_model[j][i] != null) {
temp.append(m_model[j][i].toString());
}
} else {
temp.append("Normal Distribution. Mean = "
+ Utils.doubleToString(m_modelNormal[j][i][0], 4) + " StdDev = "
+ Utils.doubleToString(m_modelNormal[j][i][1], 4) + "\n");
}
}
}
return temp.toString();
}
/**
* verbose output for debugging
*
* @param inst the training instances
*/
private void EM_Report(Instances inst) {
int i, j, l, m;
System.out.println("======================================");
for (j = 0; j < m_num_clusters; j++) {
for (i = 0; i < m_num_attribs; i++) {
System.out.println("Clust: " + j + " att: " + i + "\n");
if (m_theInstances.attribute(i).isNominal()) {
if (m_model[j][i] != null) {
System.out.println(m_model[j][i].toString());
}
} else {
System.out.println("Normal Distribution. Mean = "
+ Utils.doubleToString(m_modelNormal[j][i][0], 8, 4)
+ " StandardDev = "
+ Utils.doubleToString(m_modelNormal[j][i][1], 8, 4)
+ " WeightSum = "
+ Utils.doubleToString(m_modelNormal[j][i][2], 8, 4));
}
}
}
for (l = 0; l < inst.numInstances(); l++) {
m = Utils.maxIndex(m_weights[l]);
System.out.print("Inst " + Utils.doubleToString(l, 5, 0) + " Class " + m
+ "\t");
for (j = 0; j < m_num_clusters; j++) {
System.out.print(Utils.doubleToString(m_weights[l][j], 7, 5) + " ");
}
System.out.println();
}
}
/**
* estimate the number of clusters by cross validation on the training data.
*
* @throws Exception if something goes wrong
*/
private void CVClusters() throws Exception {
double CVLogLikely = -Double.MAX_VALUE;
double templl, tll;
boolean CVincreased = true;
m_num_clusters = 1;
int upperBoundMaxClusters = (m_upperBoundNumClustersCV > 0) ? m_upperBoundNumClustersCV
: Integer.MAX_VALUE;
int num_clusters = m_num_clusters;
int i;
Random cvr;
Instances trainCopy;
int numFolds = (m_theInstances.numInstances() < m_cvFolds) ? m_theInstances
.numInstances() : m_cvFolds;
boolean ok = true;
int seed = getSeed();
int restartCount = 0;
CLUSTER_SEARCH: while (CVincreased) {
if (num_clusters > upperBoundMaxClusters) {
break CLUSTER_SEARCH;
}
// theInstances.stratify(10);
CVincreased = false;
cvr = new Random(getSeed());
trainCopy = new Instances(m_theInstances);
trainCopy.randomize(cvr);
templl = 0.0;
for (i = 0; i < numFolds; i++) {
Instances cvTrain = trainCopy.trainCV(numFolds, i, cvr);
if (num_clusters > cvTrain.numInstances()) {
break CLUSTER_SEARCH;
}
Instances cvTest = trainCopy.testCV(numFolds, i);
m_rr = new Random(seed);
for (int z = 0; z < 10; z++) {
m_rr.nextDouble();
}
m_num_clusters = num_clusters;
EM_Init(cvTrain);
try {
iterate(cvTrain, false);
} catch (Exception ex) {
// catch any problems - i.e. empty clusters occurring
ex.printStackTrace();
// System.err.println("Restarting after CV training failure ("+num_clusters+" clusters");
seed++;
restartCount++;
ok = false;
if (restartCount > 5) {
break CLUSTER_SEARCH;
}
break;
}
try {
tll = E(cvTest, false);
} catch (Exception ex) {
// catch any problems - i.e. empty clusters occurring
// ex.printStackTrace();
ex.printStackTrace();
// System.err.println("Restarting after CV testing failure ("+num_clusters+" clusters");
// throw new Exception(ex);
seed++;
restartCount++;
ok = false;
if (restartCount > 5) {
break CLUSTER_SEARCH;
}
break;
}
if (m_verbose) {
System.out.println("# clust: " + num_clusters + " Fold: " + i
+ " Loglikely: " + tll);
}
templl += tll;
}
if (ok) {
restartCount = 0;
seed = getSeed();
templl /= numFolds;
if (m_verbose) {
System.out.println("==================================="
+ "==============\n# clust: " + num_clusters + " Mean Loglikely: "
+ templl + "\n================================"
+ "=================");
}
// if (templl > CVLogLikely) {
if (templl - CVLogLikely > m_minLogLikelihoodImprovementCV) {
CVLogLikely = templl;
CVincreased = true;
num_clusters++;
}
}
}
if (m_verbose) {
System.out.println("Number of clusters: " + (num_clusters - 1));
}
m_num_clusters = num_clusters - 1;
}
/**
* Returns the number of clusters.
*
* @return the number of clusters generated for a training dataset.
* @throws Exception if number of clusters could not be returned successfully
*/
@Override
public int numberOfClusters() throws Exception {
if (m_num_clusters == -1) {
throw new Exception("Haven't generated any clusters!");
}
return m_num_clusters;
}
/**
* Updates the minimum and maximum values for all the attributes based on a
* new instance.
*
* @param instance the new instance
*/
private void updateMinMax(Instance instance) {
for (int j = 0; j < m_theInstances.numAttributes(); j++) {
if (instance.value(j) < m_minValues[j]) {
m_minValues[j] = instance.value(j);
} else {
if (instance.value(j) > m_maxValues[j]) {
m_maxValues[j] = instance.value(j);
}
}
}
}
/**
* Returns default capabilities of the clusterer (i.e., the ones of
* SimpleKMeans).
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = new SimpleKMeans().getCapabilities();
result.setOwner(this);
return result;
}
/**
* Generates a clusterer. Has to initialize all fields of the clusterer that
* are not being set via options.
*
* @param data set of instances serving as training data
* @throws Exception if the clusterer has not been generated successfully
*/
@Override
public void buildClusterer(Instances data) throws Exception {
m_training = true;
// can clusterer handle the data?
getCapabilities().testWithFail(data);
m_replaceMissing = new ReplaceMissingValues();
Instances instances = new Instances(data);
instances.setClassIndex(-1);
m_replaceMissing.setInputFormat(instances);
data = weka.filters.Filter.useFilter(instances, m_replaceMissing);
instances = null;
m_theInstances = data;
// calculate min and max values for attributes
m_minValues = new double[m_theInstances.numAttributes()];
m_maxValues = new double[m_theInstances.numAttributes()];
for (int i = 0; i < m_theInstances.numAttributes(); i++) {
m_minValues[i] = Double.MAX_VALUE;
m_maxValues[i] = -Double.MAX_VALUE;
}
for (int i = 0; i < m_theInstances.numInstances(); i++) {
updateMinMax(m_theInstances.instance(i));
}
doEM();
// save memory
m_theInstances = new Instances(m_theInstances, 0);
m_training = false;
}
/**
* Returns the cluster priors.
*
* @return the cluster priors
*/
@Override
public double[] clusterPriors() {
double[] n = new double[m_priors.length];
System.arraycopy(m_priors, 0, n, 0, n.length);
return n;
}
/**
* Computes the log of the conditional density (per cluster) for a given
* instance.
*
* @param inst the instance to compute the density for
* @return an array containing the estimated densities
* @throws Exception if the density could not be computed successfully
*/
@Override
public double[] logDensityPerClusterForInstance(Instance inst)
throws Exception {
int i, j;
double logprob;
double[] wghts = new double[m_num_clusters];
if (!m_training) {
m_replaceMissing.input(inst);
inst = m_replaceMissing.output();
}
for (i = 0; i < m_num_clusters; i++) {
// System.err.println("Cluster : "+i);
logprob = 0.0;
for (j = 0; j < m_num_attribs; j++) {
if (inst.attribute(j).isNominal()) {
logprob += Math.log(m_model[i][j].getProbability(inst.value(j)));
} else { // numeric attribute
logprob += logNormalDens(inst.value(j), m_modelNormal[i][j][0],
m_modelNormal[i][j][1]);
/*
* System.err.println(logNormalDens(inst.value(j),
* m_modelNormal[i][j][0], m_modelNormal[i][j][1]) + " ");
*/
}
}
// System.err.println("");
wghts[i] = logprob;
}
return wghts;
}
/**
* Perform the EM algorithm
*
* @throws Exception if something goes wrong
*/
private void doEM() throws Exception {
if (m_verbose) {
System.out.println("Seed: " + getSeed());
}
m_rr = new Random(getSeed());
// throw away numbers to avoid problem of similar initial numbers
// from a similar seed
for (int i = 0; i < 10; i++) {
m_rr.nextDouble();
}
m_num_instances = m_theInstances.numInstances();
m_num_attribs = m_theInstances.numAttributes();
if (m_verbose) {
System.out.println("Number of instances: " + m_num_instances
+ "\nNumber of atts: " + m_num_attribs + "\n");
}
startExecutorPool();
// setDefaultStdDevs(theInstances);
// cross validate to determine number of clusters?
if (m_initialNumClusters == -1) {
if (m_theInstances.numInstances() > 9) {
CVClusters();
m_rr = new Random(getSeed());
for (int i = 0; i < 10; i++) {
m_rr.nextDouble();
}
} else {
m_num_clusters = 1;
}
}
// fit full training set
EM_Init(m_theInstances);
double loglikely = iterate(m_theInstances, m_verbose);
if (m_Debug) {
System.err.println("Current log-likelihood: " + loglikely);
}
m_executorPool.shutdown();
}
/**
* Launch E step tasks
*
* @param inst the instances to be clustered
* @return the log likelihood from this E step
* @throws Exception if a problem occurs
*/
protected double launchESteps(Instances inst) throws Exception {
int numPerTask = inst.numInstances() / m_executionSlots;
double eStepLogL = 0;
double eStepSow = 0;
if (m_executionSlots <= 1 || inst.numInstances() < 2 * m_executionSlots) {
return E(inst, true);
}
List<Future<double[]>> results = new ArrayList<Future<double[]>>();
for (int i = 0; i < m_executionSlots; i++) {
int start = i * numPerTask;
int end = start + numPerTask;
if (i == m_executionSlots - 1) {
end = inst.numInstances();
}
ETask newTask = new ETask(inst, start, end, true);
Future<double[]> futureE = m_executorPool.submit(newTask);
results.add(futureE);
// m_executorPool.execute(newTask);
// et[i] = newTask;
// newTask.run();
}
for (int i = 0; i < results.size(); i++) {
double[] r = results.get(i).get();
eStepLogL += r[0];
eStepSow += r[1];
}
eStepLogL /= eStepSow;
return eStepLogL;
}
/**
* Launch the M step tasks
*
* @param inst the instances to be clustered
* @throws Exception if a problem occurs
*/
protected void launchMSteps(Instances inst) throws Exception {
if (m_executionSlots <= 1 || inst.numInstances() < 2 * m_executionSlots) {
M(inst);
return;
}
// aggregated estimators
new_estimators();
estimate_priors(inst);
int numPerTask = inst.numInstances() / m_executionSlots;
List<Future<MTask>> results = new ArrayList<Future<MTask>>();
for (int i = 0; i < m_executionSlots; i++) {
int start = i * numPerTask;
int end = start + numPerTask;
if (i == m_executionSlots - 1) {
end = inst.numInstances();
}
DiscreteEstimator[][] model = new DiscreteEstimator[m_num_clusters][m_num_attribs];
double[][][] normal = new double[m_num_clusters][m_num_attribs][3];
for (int ii = 0; ii < m_num_clusters; ii++) {
for (int j = 0; j < m_num_attribs; j++) {
if (m_theInstances.attribute(j).isNominal()) {
model[ii][j] = new DiscreteEstimator(m_theInstances.attribute(j)
.numValues(), false);
} else {
normal[ii][j][0] = normal[ii][j][1] = normal[ii][j][2] = 0.0;
}
}
}
MTask newTask = new MTask(inst, start, end, model, normal);
Future<MTask> futureM = m_executorPool.submit(newTask);
results.add(futureM);
// newTask.run();
}
for (Future<MTask> t : results) {
MTask m = t.get();
// aggregate
for (int i = 0; i < m_num_clusters; i++) {
for (int j = 0; j < m_num_attribs; j++) {
if (m_theInstances.attribute(j).isNominal()) {
for (int k = 0; k < m_theInstances.attribute(j).numValues(); k++) {
m_model[i][j].addValue(k, m.m_taskModel[i][j].getCount(k));
}
} else {
m_modelNormal[i][j][0] += m.m_taskModelNormal[i][j][0];
m_modelNormal[i][j][2] += m.m_taskModelNormal[i][j][2];
m_modelNormal[i][j][1] += m.m_taskModelNormal[i][j][1];
}
}
}
}
// re-estimate Gaussian parameters
M_reEstimate(inst);
}
/**
* iterates the E and M steps until the log likelihood of the data converges.
*
* @param inst the training instances.
* @param report be verbose.
* @return the log likelihood of the data
* @throws Exception if something goes wrong
*/
private double iterate(Instances inst, boolean report) throws Exception {
int i;
double llkold = 0.0;
double llk = 0.0;
if (report) {
EM_Report(inst);
}
boolean ok = false;
int seed = getSeed();
int restartCount = 0;
m_iterationsPerformed = -1;
while (!ok) {
try {
for (i = 0; i < m_max_iterations; i++) {
llkold = llk;
llk = launchESteps(inst);
if (report) {
System.out.println("Loglikely: " + llk);
}
if (i > 0) {
if ((llk - llkold) < m_minLogLikelihoodImprovementIterating) {
if (llk - llkold < 0) {
// decrease in log likelihood - revert to the model from the
// previous iteration
m_modelNormal = m_modelNormalPrev;
m_model = m_modelPrev;
m_priors = m_priorsPrev;
m_iterationsPerformed = i - 1;
} else {
m_iterationsPerformed = i;
}
break;
}
}
launchMSteps(inst);
}
ok = true;
} catch (Exception ex) {
// System.err.println("Restarting after training failure");
ex.printStackTrace();
seed++;
restartCount++;
m_rr = new Random(seed);
for (int z = 0; z < 10; z++) {
m_rr.nextDouble();
m_rr.nextInt();
}
if (restartCount > 5) {
// System.err.println("Reducing the number of clusters");
m_num_clusters--;
restartCount = 0;
}
EM_Init(m_theInstances);
startExecutorPool();
}
}
if (m_iterationsPerformed == -1) {
m_iterationsPerformed = m_max_iterations;
}
if (m_verbose) {
System.out.println("# iterations performed: " + m_iterationsPerformed);
}
if (report) {
EM_Report(inst);
}
return llk;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
// ============
// Test method.
// ============
/**
* Main method for testing this class.
*
* @param argv should contain the following arguments:
* <p>
* -t training file [-T test file] [-N number of clusters] [-S random
* seed]
*/
public static void main(String[] argv) {
runClusterer(new EM(), argv);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/FarthestFirst.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* FarthestFirst.java
* Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
<!-- globalinfo-start -->
* Cluster data using the FarthestFirst algorithm.<br/>
* <br/>
* For more information see:<br/>
* <br/>
* Hochbaum, Shmoys (1985). A best possible heuristic for the k-center problem. Mathematics of Operations Research. 10(2):180-184.<br/>
* <br/>
* Sanjoy Dasgupta: Performance Guarantees for Hierarchical Clustering. In: 15th Annual Conference on Computational Learning Theory, 351-363, 2002.<br/>
* <br/>
* Notes:<br/>
* - works as a fast simple approximate clusterer<br/>
* - modelled after SimpleKMeans, might be a useful initializer for it
* <p/>
<!-- globalinfo-end -->
*
<!-- technical-bibtex-start -->
* BibTeX:
* <pre>
* @article{Hochbaum1985,
* author = {Hochbaum and Shmoys},
* journal = {Mathematics of Operations Research},
* number = {2},
* pages = {180-184},
* title = {A best possible heuristic for the k-center problem},
* volume = {10},
* year = {1985}
* }
*
* @inproceedings{Dasgupta2002,
* author = {Sanjoy Dasgupta},
* booktitle = {15th Annual Conference on Computational Learning Theory},
* pages = {351-363},
* publisher = {Springer},
* title = {Performance Guarantees for Hierarchical Clustering},
* year = {2002}
* }
* </pre>
* <p/>
<!-- technical-bibtex-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -N <num>
* number of clusters. (default = 2).</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 1)</pre>
*
<!-- options-end -->
*
* @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz)
* @version $Revision$
* @see RandomizableClusterer
*/
public class FarthestFirst extends RandomizableClusterer implements
TechnicalInformationHandler {
// Todo: rewrite to be fully incremental
// cleanup, like deleting m_instances
/** for serialization */
static final long serialVersionUID = 7499838100631329509L;
/**
* training instances, not necessary to keep, could be replaced by
* m_ClusterCentroids where needed for header info
*/
protected Instances m_instances;
/**
* replace missing values in training instances
*/
protected ReplaceMissingValues m_ReplaceMissingFilter;
/**
* number of clusters to generate
*/
protected int m_NumClusters = 2;
/**
* holds the cluster centroids
*/
protected Instances m_ClusterCentroids;
/**
* attribute min values
*/
private double[] m_Min;
/**
* attribute max values
*/
private double[] m_Max;
/**
* Returns a string describing this clusterer
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Cluster data using the FarthestFirst algorithm.\n\n"
+ "For more information see:\n\n" + getTechnicalInformation().toString()
+ "\n\n" + "Notes:\n"
+ "- works as a fast simple approximate clusterer\n"
+ "- modelled after SimpleKMeans, might be a useful initializer for it";
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
TechnicalInformation additional;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "Hochbaum and Shmoys");
result.setValue(Field.YEAR, "1985");
result.setValue(Field.TITLE,
"A best possible heuristic for the k-center problem");
result.setValue(Field.JOURNAL, "Mathematics of Operations Research");
result.setValue(Field.VOLUME, "10");
result.setValue(Field.NUMBER, "2");
result.setValue(Field.PAGES, "180-184");
additional = result.add(Type.INPROCEEDINGS);
additional.setValue(Field.AUTHOR, "Sanjoy Dasgupta");
additional.setValue(Field.TITLE,
"Performance Guarantees for Hierarchical Clustering");
additional.setValue(Field.BOOKTITLE,
"15th Annual Conference on Computational Learning Theory");
additional.setValue(Field.YEAR, "2002");
additional.setValue(Field.PAGES, "351-363");
additional.setValue(Field.PUBLISHER, "Springer");
return result;
}
/**
* Returns default capabilities of the clusterer.
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
result.enable(Capability.NO_CLASS);
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
return result;
}
/**
* Generates a clusterer. Has to initialize all fields of the clusterer that
* are not being set via options.
*
* @param data set of instances serving as training data
* @throws Exception if the clusterer has not been generated successfully
*/
@Override
public void buildClusterer(Instances data) throws Exception {
// can clusterer handle the data?
getCapabilities().testWithFail(data);
// long start = System.currentTimeMillis();
m_ReplaceMissingFilter = new ReplaceMissingValues();
m_ReplaceMissingFilter.setInputFormat(data);
m_instances = Filter.useFilter(data, m_ReplaceMissingFilter);
initMinMax(m_instances);
m_ClusterCentroids = new Instances(m_instances, m_NumClusters);
int n = m_instances.numInstances();
Random r = new Random(getSeed());
boolean[] selected = new boolean[n];
double[] minDistance = new double[n];
for (int i = 0; i < n; i++) {
minDistance[i] = Double.MAX_VALUE;
}
int firstI = r.nextInt(n);
m_ClusterCentroids.add(m_instances.instance(firstI));
selected[firstI] = true;
updateMinDistance(minDistance, selected, m_instances,
m_instances.instance(firstI));
if (m_NumClusters > n) {
m_NumClusters = n;
}
for (int i = 1; i < m_NumClusters; i++) {
int nextI = farthestAway(minDistance, selected);
m_ClusterCentroids.add(m_instances.instance(nextI));
selected[nextI] = true;
updateMinDistance(minDistance, selected, m_instances,
m_instances.instance(nextI));
}
m_instances = new Instances(m_instances, 0);
// long end = System.currentTimeMillis();
// System.out.println("Clustering Time = " + (end-start));
}
protected void updateMinDistance(double[] minDistance, boolean[] selected,
Instances data, Instance center) {
for (int i = 0; i < selected.length; i++) {
if (!selected[i]) {
double d = distance(center, data.instance(i));
if (d < minDistance[i]) {
minDistance[i] = d;
}
}
}
}
protected int farthestAway(double[] minDistance, boolean[] selected) {
double maxDistance = -1.0;
int maxI = -1;
for (int i = 0; i < selected.length; i++) {
if (!selected[i]) {
if (maxDistance < minDistance[i]) {
maxDistance = minDistance[i];
maxI = i;
}
}
}
return maxI;
}
protected void initMinMax(Instances data) {
m_Min = new double[data.numAttributes()];
m_Max = new double[data.numAttributes()];
for (int i = 0; i < data.numAttributes(); i++) {
m_Min[i] = m_Max[i] = Double.NaN;
}
for (int i = 0; i < data.numInstances(); i++) {
updateMinMax(data.instance(i));
}
}
/**
* Updates the minimum and maximum values for all the attributes based on a
* new instance.
*
* @param instance the new instance
*/
private void updateMinMax(Instance instance) {
for (int j = 0; j < instance.numAttributes(); j++) {
if (Double.isNaN(m_Min[j])) {
m_Min[j] = instance.value(j);
m_Max[j] = instance.value(j);
} else {
if (instance.value(j) < m_Min[j]) {
m_Min[j] = instance.value(j);
} else {
if (instance.value(j) > m_Max[j]) {
m_Max[j] = instance.value(j);
}
}
}
}
}
/**
* clusters an instance that has been through the filters
*
* @param instance the instance to assign a cluster to
* @return a cluster number
*/
protected int clusterProcessedInstance(Instance instance) {
double minDist = Double.MAX_VALUE;
int bestCluster = 0;
for (int i = 0; i < m_NumClusters; i++) {
double dist = distance(instance, m_ClusterCentroids.instance(i));
if (dist < minDist) {
minDist = dist;
bestCluster = i;
}
}
return bestCluster;
}
/**
* Classifies a given instance.
*
* @param instance the instance to be assigned to a cluster
* @return the number of the assigned cluster as an integer if the class is
* enumerated, otherwise the predicted value
* @throws Exception if instance could not be classified successfully
*/
@Override
public int clusterInstance(Instance instance) throws Exception {
m_ReplaceMissingFilter.input(instance);
m_ReplaceMissingFilter.batchFinished();
Instance inst = m_ReplaceMissingFilter.output();
return clusterProcessedInstance(inst);
}
/**
* Calculates the distance between two instances
*
* @param first the first instance
* @param second the second instance
* @return the distance between the two given instances, between 0 and 1
*/
protected double distance(Instance first, Instance second) {
double distance = 0;
int firstI, secondI;
for (int p1 = 0, p2 = 0; p1 < first.numValues() || p2 < second.numValues();) {
if (p1 >= first.numValues()) {
firstI = m_instances.numAttributes();
} else {
firstI = first.index(p1);
}
if (p2 >= second.numValues()) {
secondI = m_instances.numAttributes();
} else {
secondI = second.index(p2);
}
if (firstI == m_instances.classIndex()) {
p1++;
continue;
}
if (secondI == m_instances.classIndex()) {
p2++;
continue;
}
double diff;
if (firstI == secondI) {
diff = difference(firstI, first.valueSparse(p1), second.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
diff = difference(secondI, 0, second.valueSparse(p2));
p2++;
} else {
diff = difference(firstI, first.valueSparse(p1), 0);
p1++;
}
distance += diff * diff;
}
return Math.sqrt(distance / m_instances.numAttributes());
}
/**
* Computes the difference between two given attribute values.
*/
protected double difference(int index, double val1, double val2) {
switch (m_instances.attribute(index).type()) {
case Attribute.NOMINAL:
// If attribute is nominal
if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)
|| ((int) val1 != (int) val2)) {
return 1;
} else {
return 0;
}
case Attribute.NUMERIC:
// If attribute is numeric
if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) {
if (Utils.isMissingValue(val1) && Utils.isMissingValue(val2)) {
return 1;
} else {
double diff;
if (Utils.isMissingValue(val2)) {
diff = norm(val1, index);
} else {
diff = norm(val2, index);
}
if (diff < 0.5) {
diff = 1.0 - diff;
}
return diff;
}
} else {
return norm(val1, index) - norm(val2, index);
}
default:
return 0;
}
}
/**
* Normalizes a given value of a numeric attribute.
*
* @param x the value to be normalized
* @param i the attribute's index
* @return the normalized value
*/
protected double norm(double x, int i) {
if (Double.isNaN(m_Min[i]) || Utils.eq(m_Max[i], m_Min[i])) {
return 0;
} else {
return (x - m_Min[i]) / (m_Max[i] - m_Min[i]);
}
}
/**
* Returns the number of clusters.
*
* @return the number of clusters generated for a training dataset.
* @throws Exception if number of clusters could not be returned successfully
*/
@Override
public int numberOfClusters() throws Exception {
return m_NumClusters;
}
/**
* Get the centroids found by FarthestFirst
*
* @return the centroids found by FarthestFirst
*/
public Instances getClusterCentroids() {
return m_ClusterCentroids;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tnumber of clusters. (default = 2).", "N",
1, "-N <num>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numClustersTipText() {
return "set number of clusters";
}
/**
* set the number of clusters to generate
*
* @param n the number of clusters to generate
* @throws Exception if number of clusters is negative
*/
public void setNumClusters(int n) throws Exception {
if (n < 0) {
throw new Exception("Number of clusters must be > 0");
}
m_NumClusters = n;
}
/**
* gets the number of clusters to generate
*
* @return the number of clusters to generate
*/
public int getNumClusters() {
return m_NumClusters;
}
/**
* Parses a given list of options.
* <p/>
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -N <num>
* number of clusters. (default = 2).</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 1)</pre>
*
<!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String optionString = Utils.getOption('N', options);
if (optionString.length() != 0) {
setNumClusters(Integer.parseInt(optionString));
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of FarthestFirst
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-N");
result.add("" + getNumClusters());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* return a string describing this clusterer
*
* @return a description of the clusterer as a string
*/
@Override
public String toString() {
StringBuffer temp = new StringBuffer();
temp.append("\nFarthestFirst\n==============\n");
temp.append("\nCluster centroids:\n");
for (int i = 0; i < m_NumClusters; i++) {
temp.append("\nCluster " + i + "\n\t");
for (int j = 0; j < m_ClusterCentroids.numAttributes(); j++) {
if (m_ClusterCentroids.attribute(j).isNominal()) {
temp.append(" "
+ m_ClusterCentroids.attribute(j).value(
(int) m_ClusterCentroids.instance(i).value(j)));
} else {
temp.append(" " + m_ClusterCentroids.instance(i).value(j));
}
}
}
temp.append("\n\n");
return temp.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain the following arguments:
* <p>
* -t training file [-N number of clusters]
*/
public static void main(String[] argv) {
runClusterer(new FarthestFirst(), argv);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/FilteredClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* FilteredClusterer.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.*;
import weka.core.Capabilities.Capability;
import weka.filters.Filter;
import weka.filters.SupervisedFilter;
/**
* <!-- globalinfo-start --> Class for running an arbitrary clusterer on data
* that has been passed through an arbitrary filter. Like the clusterer, the
* structure of the filter is based exclusively on the training data and test
* instances will be processed by the filter without changing their structure.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <filter specification>
* Full class name of filter to use, followed
* by filter options.
* eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2"
* (default: weka.filters.AllFilter)
* </pre>
*
* <pre>
* -W
* Full name of base clusterer.
* (default: weka.clusterers.SimpleKMeans)
* </pre>
*
* <pre>
* Options specific to clusterer weka.clusterers.SimpleKMeans:
* </pre>
*
* <pre>
* -N <num>
* number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Replace missing values with mean/mode.
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <!-- options-end -->
*
* Based on code from the FilteredClassifier by Len Trigg.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see weka.classifiers.meta.FilteredClassifier
*/
public class FilteredClusterer extends SingleClustererEnhancer implements Drawable {
/** for serialization. */
private static final long serialVersionUID = 1420005943163412943L;
/** The filter. */
protected Filter m_Filter;
/** The instance structure of the filtered instances. */
protected Instances m_FilteredInstances;
/**
* Default constructor.
*/
public FilteredClusterer() {
m_Clusterer = new SimpleKMeans();
m_Filter = new weka.filters.AllFilter();
}
/**
* Returns a string describing this clusterer.
*
* @return a description of the clusterer suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Class for running an arbitrary clusterer on data that has been passed "
+ "through an arbitrary filter. Like the clusterer, the structure of the filter "
+ "is based exclusively on the training data and test instances will be processed "
+ "by the filter without changing their structure.";
}
/**
* String describing default filter.
*
* @return the default filter classname
*/
protected String defaultFilterString() {
return weka.filters.AllFilter.class.getName();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option(
"\tFull class name of filter to use, followed\n"
+ "\tby filter options.\n"
+ "\teg: \"weka.filters.unsupervised.attribute.Remove -V -R 1,2\"\n"
+ "(default: " + defaultFilterString() + ")", "F", 1,
"-F <filter specification>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <filter specification>
* Full class name of filter to use, followed
* by filter options.
* eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2"
* (default: weka.filters.AllFilter)
* </pre>
*
* <pre>
* -W
* Full name of base clusterer.
* (default: weka.clusterers.SimpleKMeans)
* </pre>
*
* <pre>
* Options specific to clusterer weka.clusterers.SimpleKMeans:
* </pre>
*
* <pre>
* -N <num>
* number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Replace missing values with mean/mode.
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
String[] tmpOptions;
tmpStr = Utils.getOption('F', options);
if (tmpStr.length() > 0) {
tmpOptions = Utils.splitOptions(tmpStr);
if (tmpOptions.length == 0) {
throw new IllegalArgumentException(
"Invalid filter specification string");
}
tmpStr = tmpOptions[0];
tmpOptions[0] = "";
setFilter((Filter) Utils.forName(Filter.class, tmpStr, tmpOptions));
} else {
setFilter(new weka.filters.AllFilter());
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of the clusterer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.addElement("-F");
result.addElement(getFilterSpec());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String filterTipText() {
return "The filter to be used.";
}
/**
* Sets the filter.
*
* @param filter the filter with all options set.
*/
public void setFilter(Filter filter) {
m_Filter = filter;
if (m_Filter instanceof SupervisedFilter) {
System.out
.println("WARNING: you are using a supervised filter, which will leak "
+ "information about the class attribute!");
}
}
/**
* Gets the filter used.
*
* @return the filter
*/
public Filter getFilter() {
return m_Filter;
}
/**
* Gets the filter specification string, which contains the class name of the
* filter and any options to the filter.
*
* @return the filter string.
*/
protected String getFilterSpec() {
String result;
Filter filter;
filter = getFilter();
result = filter.getClass().getName();
if (filter instanceof OptionHandler) {
result += " " + Utils.joinOptions(((OptionHandler) filter).getOptions());
}
return result;
}
/**
* Returns default capabilities of the clusterer.
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
Capabilities result;
if (getFilter() == null) {
result = super.getCapabilities();
result.disableAll();
result.enable(Capability.NO_CLASS);
} else {
result = getFilter().getCapabilities();
}
// set dependencies
for (Capability cap : Capability.values()) {
result.enableDependency(cap);
}
return result;
}
/**
* Build the clusterer on the filtered data.
*
* @param data the training data
* @throws Exception if the clusterer could not be built successfully
*/
@Override
public void buildClusterer(Instances data) throws Exception {
if (m_Clusterer == null) {
throw new Exception("No base clusterer has been set!");
}
// remove instances with missing class
if (data.classIndex() > -1) {
data = new Instances(data);
data.deleteWithMissingClass();
}
m_Filter.setInputFormat(data); // filter capabilities are checked here
data = Filter.useFilter(data, m_Filter);
// can clusterer handle the data?
getClusterer().getCapabilities().testWithFail(data);
m_FilteredInstances = data.stringFreeStructure();
m_Clusterer.buildClusterer(data);
}
/**
* Classifies a given instance after filtering.
*
* @param instance the instance to be classified
* @return the class distribution for the given instance
* @throws Exception if instance could not be classified successfully
*/
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
if (m_Filter.numPendingOutput() > 0) {
throw new Exception("Filter output queue not empty!");
}
if (!m_Filter.input(instance)) {
throw new Exception(
"Filter didn't make the test instance immediately available!");
}
m_Filter.batchFinished();
Instance newInstance = m_Filter.output();
return m_Clusterer.distributionForInstance(newInstance);
}
/**
* Output a representation of this clusterer.
*
* @return a representation of this clusterer
*/
@Override
public String toString() {
String result;
if (m_FilteredInstances == null) {
result = "FilteredClusterer: No model built yet.";
} else {
result = "FilteredClusterer using " + getClustererSpec()
+ " on data filtered through " + getFilterSpec()
+ "\n\nFiltered Header\n" + m_FilteredInstances.toString()
+ "\n\nClusterer Model\n" + m_Clusterer.toString();
}
return result;
}
/**
* Returns the type of graph this clusterer represents.
*
* @return the graph type of this clusterer
*/
public int graphType() {
if (m_Clusterer instanceof Drawable)
return ((Drawable) m_Clusterer).graphType();
else
return Drawable.NOT_DRAWABLE;
}
/**
* Returns graph describing the clusterer (if possible).
*
* @return the graph of the clusterer in dotty format
* @throws Exception if the clusterer cannot be graphed
*/
public String graph() throws Exception {
if (m_Clusterer instanceof Drawable)
return ((Drawable) m_Clusterer).graph();
else
throw new Exception(
"Clusterer: " + getClustererSpec() + " cannot be graphed");
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args the commandline options, use "-h" for help
*/
public static void main(String[] args) {
runClusterer(new FilteredClusterer(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/HierarchicalClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* HierarchicalClusterer.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.clusterers;
import java.io.Serializable;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.Locale;
import java.util.Collections;
import java.util.Comparator;
import java.util.Enumeration;
import java.util.PriorityQueue;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.DistanceFunction;
import weka.core.Drawable;
import weka.core.EuclideanDistance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Hierarchical clustering class. Implements a number
* of classic hierarchical clustering methods. <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -N
* number of clusters
* </pre>
*
*
* <pre>
* -L
* Link type (Single, Complete, Average, Mean, Centroid, Ward, Adjusted complete, Neighbor Joining)
* [SINGLE|COMPLETE|AVERAGE|MEAN|CENTROID|WARD|ADJCOMPLETE|NEIGHBOR_JOINING]
* </pre>
*
* <pre>
* -A
* Distance function to use. (default: weka.core.EuclideanDistance)
* </pre>
*
* <pre>
* -P
* Print hierarchy in Newick format, which can be used for display in other programs.
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and may output additional info to the console.
* </pre>
*
* <pre>
* -B
* \If set, distance is interpreted as branch length, otherwise it is node height.
* </pre>
*
* <!-- options-end -->
*
*
* @author Remco Bouckaert (rrb@xm.co.nz, remco@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class HierarchicalClusterer extends AbstractClusterer implements
OptionHandler, Drawable {
private static final long serialVersionUID = 1L;
/**
* Whether the distance represent node height (if false) or branch length (if
* true).
*/
protected boolean m_bDistanceIsBranchLength = false;
/** training data **/
Instances m_instances;
/** number of clusters desired in clustering **/
int m_nNumClusters = 2;
public void setNumClusters(int nClusters) {
m_nNumClusters = Math.max(1, nClusters);
}
public int getNumClusters() {
return m_nNumClusters;
}
/** distance function used for comparing members of a cluster **/
protected DistanceFunction m_DistanceFunction = new EuclideanDistance();
public DistanceFunction getDistanceFunction() {
return m_DistanceFunction;
}
public void setDistanceFunction(DistanceFunction distanceFunction) {
m_DistanceFunction = distanceFunction;
}
/**
* used for priority queue for efficient retrieval of pair of clusters to
* merge
**/
class Tuple {
public Tuple(double d, int i, int j, int nSize1, int nSize2) {
m_fDist = d;
m_iCluster1 = i;
m_iCluster2 = j;
m_nClusterSize1 = nSize1;
m_nClusterSize2 = nSize2;
}
double m_fDist;
int m_iCluster1;
int m_iCluster2;
int m_nClusterSize1;
int m_nClusterSize2;
}
/** comparator used by priority queue **/
class TupleComparator implements Comparator<Tuple> {
@Override
public int compare(Tuple o1, Tuple o2) {
if (o1.m_fDist < o2.m_fDist) {
return -1;
} else if (o1.m_fDist == o2.m_fDist) {
return 0;
}
return 1;
}
}
/** the various link types */
final static int SINGLE = 0;
final static int COMPLETE = 1;
final static int AVERAGE = 2;
final static int MEAN = 3;
final static int CENTROID = 4;
final static int WARD = 5;
final static int ADJCOMPLETE = 6;
final static int NEIGHBOR_JOINING = 7;
public static final Tag[] TAGS_LINK_TYPE = { new Tag(SINGLE, "SINGLE"),
new Tag(COMPLETE, "COMPLETE"), new Tag(AVERAGE, "AVERAGE"),
new Tag(MEAN, "MEAN"), new Tag(CENTROID, "CENTROID"),
new Tag(WARD, "WARD"), new Tag(ADJCOMPLETE, "ADJCOMPLETE"),
new Tag(NEIGHBOR_JOINING, "NEIGHBOR_JOINING") };
/**
* Holds the Link type used calculate distance between clusters
*/
int m_nLinkType = SINGLE;
boolean m_bPrintNewick = true;;
public boolean getPrintNewick() {
return m_bPrintNewick;
}
public void setPrintNewick(boolean bPrintNewick) {
m_bPrintNewick = bPrintNewick;
}
public void setLinkType(SelectedTag newLinkType) {
if (newLinkType.getTags() == TAGS_LINK_TYPE) {
m_nLinkType = newLinkType.getSelectedTag().getID();
}
}
public SelectedTag getLinkType() {
return new SelectedTag(m_nLinkType, TAGS_LINK_TYPE);
}
/** class representing node in cluster hierarchy **/
class Node implements Serializable {
/** ID added to avoid warning */
private static final long serialVersionUID = 7639483515789717908L;
Node m_left;
Node m_right;
Node m_parent;
int m_iLeftInstance;
int m_iRightInstance;
double m_fLeftLength = 0;
double m_fRightLength = 0;
double m_fHeight = 0;
public String toString(int attIndex) {
NumberFormat nf = NumberFormat.getNumberInstance(new Locale("en","US"));
DecimalFormat myFormatter = (DecimalFormat)nf;
myFormatter.applyPattern("#.#####");
if (m_left == null) {
if (m_right == null) {
return "("
+ m_instances.instance(m_iLeftInstance).stringValue(attIndex) + ":"
+ myFormatter.format(m_fLeftLength) + ","
+ m_instances.instance(m_iRightInstance).stringValue(attIndex)
+ ":" + myFormatter.format(m_fRightLength) + ")";
} else {
return "("
+ m_instances.instance(m_iLeftInstance).stringValue(attIndex) + ":"
+ myFormatter.format(m_fLeftLength) + ","
+ m_right.toString(attIndex) + ":"
+ myFormatter.format(m_fRightLength) + ")";
}
} else {
if (m_right == null) {
return "(" + m_left.toString(attIndex) + ":"
+ myFormatter.format(m_fLeftLength) + ","
+ m_instances.instance(m_iRightInstance).stringValue(attIndex)
+ ":" + myFormatter.format(m_fRightLength) + ")";
} else {
return "(" + m_left.toString(attIndex) + ":"
+ myFormatter.format(m_fLeftLength) + ","
+ m_right.toString(attIndex) + ":"
+ myFormatter.format(m_fRightLength) + ")";
}
}
}
public String toString2(int attIndex) {
NumberFormat nf = NumberFormat.getNumberInstance(new Locale("en","US"));
DecimalFormat myFormatter = (DecimalFormat)nf;
myFormatter.applyPattern("#.#####");
if (m_left == null) {
if (m_right == null) {
return "(" + m_instances.instance(m_iLeftInstance).value(attIndex)
+ ":" + myFormatter.format(m_fLeftLength) + ","
+ m_instances.instance(m_iRightInstance).value(attIndex) + ":"
+ myFormatter.format(m_fRightLength) + ")";
} else {
return "(" + m_instances.instance(m_iLeftInstance).value(attIndex)
+ ":" + myFormatter.format(m_fLeftLength) + ","
+ m_right.toString2(attIndex) + ":"
+ myFormatter.format(m_fRightLength) + ")";
}
} else {
if (m_right == null) {
return "(" + m_left.toString2(attIndex) + ":"
+ myFormatter.format(m_fLeftLength) + ","
+ m_instances.instance(m_iRightInstance).value(attIndex) + ":"
+ myFormatter.format(m_fRightLength) + ")";
} else {
return "(" + m_left.toString2(attIndex) + ":"
+ myFormatter.format(m_fLeftLength) + ","
+ m_right.toString2(attIndex) + ":"
+ myFormatter.format(m_fRightLength) + ")";
}
}
}
void setHeight(double fHeight1, double fHeight2) {
m_fHeight = fHeight1;
if (m_left == null) {
m_fLeftLength = fHeight1;
} else {
m_fLeftLength = fHeight1 - m_left.m_fHeight;
}
if (m_right == null) {
m_fRightLength = fHeight2;
} else {
m_fRightLength = fHeight2 - m_right.m_fHeight;
}
}
void setLength(double fLength1, double fLength2) {
m_fLeftLength = fLength1;
m_fRightLength = fLength2;
m_fHeight = fLength1;
if (m_left != null) {
m_fHeight += m_left.m_fHeight;
}
}
}
protected Node[] m_clusters;
int[] m_nClusterNr;
@Override
public void buildClusterer(Instances data) throws Exception {
// /System.err.println("Method " + m_nLinkType);
m_instances = data;
int nInstances = m_instances.numInstances();
if (nInstances == 0) {
return;
}
m_DistanceFunction.setInstances(m_instances);
// use array of integer vectors to store cluster indices,
// starting with one cluster per instance
@SuppressWarnings("unchecked")
Vector<Integer>[] nClusterID = new Vector[data.numInstances()];
for (int i = 0; i < data.numInstances(); i++) {
nClusterID[i] = new Vector<Integer>();
nClusterID[i].add(i);
}
// calculate distance matrix
int nClusters = data.numInstances();
// used for keeping track of hierarchy
Node[] clusterNodes = new Node[nInstances];
if (m_nLinkType == NEIGHBOR_JOINING) {
neighborJoining(nClusters, nClusterID, clusterNodes);
} else {
doLinkClustering(nClusters, nClusterID, clusterNodes);
}
// move all clusters in m_nClusterID array
// & collect hierarchy
int iCurrent = 0;
m_clusters = new Node[m_nNumClusters];
m_nClusterNr = new int[nInstances];
for (int i = 0; i < nInstances; i++) {
if (nClusterID[i].size() > 0) {
for (int j = 0; j < nClusterID[i].size(); j++) {
m_nClusterNr[nClusterID[i].elementAt(j)] = iCurrent;
}
m_clusters[iCurrent] = clusterNodes[i];
iCurrent++;
}
}
} // buildClusterer
/**
* use neighbor joining algorithm for clustering This is roughly based on the
* RapidNJ simple implementation and runs at O(n^3) More efficient
* implementations exist, see RapidNJ (or my GPU implementation :-))
*
* @param nClusters
* @param nClusterID
* @param clusterNodes
*/
void neighborJoining(int nClusters, Vector<Integer>[] nClusterID,
Node[] clusterNodes) {
int n = m_instances.numInstances();
double[][] fDist = new double[nClusters][nClusters];
for (int i = 0; i < nClusters; i++) {
fDist[i][i] = 0;
for (int j = i + 1; j < nClusters; j++) {
fDist[i][j] = getDistance0(nClusterID[i], nClusterID[j]);
fDist[j][i] = fDist[i][j];
}
}
double[] fSeparationSums = new double[n];
double[] fSeparations = new double[n];
int[] nNextActive = new int[n];
// calculate initial separation rows
for (int i = 0; i < n; i++) {
double fSum = 0;
for (int j = 0; j < n; j++) {
fSum += fDist[i][j];
}
fSeparationSums[i] = fSum;
fSeparations[i] = fSum / (nClusters - 2);
nNextActive[i] = i + 1;
}
while (nClusters > 2) {
// find minimum
int iMin1 = -1;
int iMin2 = -1;
double fMin = Double.MAX_VALUE;
if (m_Debug) {
for (int i = 0; i < n; i++) {
if (nClusterID[i].size() > 0) {
double[] fRow = fDist[i];
double fSep1 = fSeparations[i];
for (int j = 0; j < n; j++) {
if (nClusterID[j].size() > 0 && i != j) {
double fSep2 = fSeparations[j];
double fVal = fRow[j] - fSep1 - fSep2;
if (fVal < fMin) {
// new minimum
iMin1 = i;
iMin2 = j;
fMin = fVal;
}
}
}
}
}
} else {
int i = 0;
while (i < n) {
double fSep1 = fSeparations[i];
double[] fRow = fDist[i];
int j = nNextActive[i];
while (j < n) {
double fSep2 = fSeparations[j];
double fVal = fRow[j] - fSep1 - fSep2;
if (fVal < fMin) {
// new minimum
iMin1 = i;
iMin2 = j;
fMin = fVal;
}
j = nNextActive[j];
}
i = nNextActive[i];
}
}
// record distance
double fMinDistance = fDist[iMin1][iMin2];
nClusters--;
double fSep1 = fSeparations[iMin1];
double fSep2 = fSeparations[iMin2];
double fDist1 = (0.5 * fMinDistance) + (0.5 * (fSep1 - fSep2));
double fDist2 = (0.5 * fMinDistance) + (0.5 * (fSep2 - fSep1));
if (nClusters > 2) {
// update separations & distance
double fNewSeparationSum = 0;
double fMutualDistance = fDist[iMin1][iMin2];
double[] fRow1 = fDist[iMin1];
double[] fRow2 = fDist[iMin2];
for (int i = 0; i < n; i++) {
if (i == iMin1 || i == iMin2 || nClusterID[i].size() == 0) {
fRow1[i] = 0;
} else {
double fVal1 = fRow1[i];
double fVal2 = fRow2[i];
double fDistance = (fVal1 + fVal2 - fMutualDistance) / 2.0;
fNewSeparationSum += fDistance;
// update the separationsum of cluster i.
fSeparationSums[i] += (fDistance - fVal1 - fVal2);
fSeparations[i] = fSeparationSums[i] / (nClusters - 2);
fRow1[i] = fDistance;
fDist[i][iMin1] = fDistance;
}
}
fSeparationSums[iMin1] = fNewSeparationSum;
fSeparations[iMin1] = fNewSeparationSum / (nClusters - 2);
fSeparationSums[iMin2] = 0;
merge(iMin1, iMin2, fDist1, fDist2, nClusterID, clusterNodes);
int iPrev = iMin2;
// since iMin1 < iMin2 we havenActiveRows[0] >= 0, so the next loop
// should be save
while (nClusterID[iPrev].size() == 0) {
iPrev--;
}
nNextActive[iPrev] = nNextActive[iMin2];
} else {
merge(iMin1, iMin2, fDist1, fDist2, nClusterID, clusterNodes);
break;
}
}
for (int i = 0; i < n; i++) {
if (nClusterID[i].size() > 0) {
for (int j = i + 1; j < n; j++) {
if (nClusterID[j].size() > 0) {
double fDist1 = fDist[i][j];
if (nClusterID[i].size() == 1) {
merge(i, j, fDist1, 0, nClusterID, clusterNodes);
} else if (nClusterID[j].size() == 1) {
merge(i, j, 0, fDist1, nClusterID, clusterNodes);
} else {
merge(i, j, fDist1 / 2.0, fDist1 / 2.0, nClusterID, clusterNodes);
}
break;
}
}
}
}
} // neighborJoining
/**
* Perform clustering using a link method This implementation uses a priority
* queue resulting in a O(n^2 log(n)) algorithm
*
* @param nClusters number of clusters
* @param nClusterID
* @param clusterNodes
*/
void doLinkClustering(int nClusters, Vector<Integer>[] nClusterID,
Node[] clusterNodes) {
int nInstances = m_instances.numInstances();
PriorityQueue<Tuple> queue = new PriorityQueue<Tuple>(nClusters * nClusters
/ 2, new TupleComparator());
double[][] fDistance0 = new double[nClusters][nClusters];
double[][] fClusterDistance = null;
if (m_Debug) {
fClusterDistance = new double[nClusters][nClusters];
}
for (int i = 0; i < nClusters; i++) {
fDistance0[i][i] = 0;
for (int j = i + 1; j < nClusters; j++) {
fDistance0[i][j] = getDistance0(nClusterID[i], nClusterID[j]);
fDistance0[j][i] = fDistance0[i][j];
queue.add(new Tuple(fDistance0[i][j], i, j, 1, 1));
if (m_Debug) {
fClusterDistance[i][j] = fDistance0[i][j];
fClusterDistance[j][i] = fDistance0[i][j];
}
}
}
while (nClusters > m_nNumClusters) {
int iMin1 = -1;
int iMin2 = -1;
// find closest two clusters
if (m_Debug) {
/* simple but inefficient implementation */
double fMinDistance = Double.MAX_VALUE;
for (int i = 0; i < nInstances; i++) {
if (nClusterID[i].size() > 0) {
for (int j = i + 1; j < nInstances; j++) {
if (nClusterID[j].size() > 0) {
double fDist = fClusterDistance[i][j];
if (fDist < fMinDistance) {
fMinDistance = fDist;
iMin1 = i;
iMin2 = j;
}
}
}
}
}
merge(iMin1, iMin2, fMinDistance, fMinDistance, nClusterID,
clusterNodes);
} else {
// use priority queue to find next best pair to cluster
Tuple t;
do {
t = queue.poll();
} while (t != null
&& (nClusterID[t.m_iCluster1].size() != t.m_nClusterSize1 || nClusterID[t.m_iCluster2]
.size() != t.m_nClusterSize2));
iMin1 = t.m_iCluster1;
iMin2 = t.m_iCluster2;
merge(iMin1, iMin2, t.m_fDist, t.m_fDist, nClusterID, clusterNodes);
}
// merge clusters
// update distances & queue
for (int i = 0; i < nInstances; i++) {
if (i != iMin1 && nClusterID[i].size() != 0) {
int i1 = Math.min(iMin1, i);
int i2 = Math.max(iMin1, i);
double fDistance = getDistance(fDistance0, nClusterID[i1],
nClusterID[i2]);
if (m_Debug) {
fClusterDistance[i1][i2] = fDistance;
fClusterDistance[i2][i1] = fDistance;
}
queue.add(new Tuple(fDistance, i1, i2, nClusterID[i1].size(),
nClusterID[i2].size()));
}
}
nClusters--;
}
} // doLinkClustering
void merge(int iMin1, int iMin2, double fDist1, double fDist2,
Vector<Integer>[] nClusterID, Node[] clusterNodes) {
if (m_Debug) {
System.err.println("Merging " + iMin1 + " " + iMin2 + " " + fDist1 + " "
+ fDist2);
}
if (iMin1 > iMin2) {
int h = iMin1;
iMin1 = iMin2;
iMin2 = h;
double f = fDist1;
fDist1 = fDist2;
fDist2 = f;
}
nClusterID[iMin1].addAll(nClusterID[iMin2]);
nClusterID[iMin2].removeAllElements();
// track hierarchy
Node node = new Node();
if (clusterNodes[iMin1] == null) {
node.m_iLeftInstance = iMin1;
} else {
node.m_left = clusterNodes[iMin1];
clusterNodes[iMin1].m_parent = node;
}
if (clusterNodes[iMin2] == null) {
node.m_iRightInstance = iMin2;
} else {
node.m_right = clusterNodes[iMin2];
clusterNodes[iMin2].m_parent = node;
}
if (m_bDistanceIsBranchLength) {
node.setLength(fDist1, fDist2);
} else {
node.setHeight(fDist1, fDist2);
}
clusterNodes[iMin1] = node;
} // merge
/** calculate distance the first time when setting up the distance matrix **/
double getDistance0(Vector<Integer> cluster1, Vector<Integer> cluster2) {
double fBestDist = Double.MAX_VALUE;
switch (m_nLinkType) {
case SINGLE:
case NEIGHBOR_JOINING:
case CENTROID:
case COMPLETE:
case ADJCOMPLETE:
case AVERAGE:
case MEAN:
// set up two instances for distance function
Instance instance1 = (Instance) m_instances.instance(
cluster1.elementAt(0)).copy();
Instance instance2 = (Instance) m_instances.instance(
cluster2.elementAt(0)).copy();
fBestDist = m_DistanceFunction.distance(instance1, instance2);
break;
case WARD: {
// finds the distance of the change in caused by merging the cluster.
// The information of a cluster is calculated as the error sum of squares
// of the
// centroids of the cluster and its members.
double ESS1 = calcESS(cluster1);
double ESS2 = calcESS(cluster2);
Vector<Integer> merged = new Vector<Integer>();
merged.addAll(cluster1);
merged.addAll(cluster2);
double ESS = calcESS(merged);
fBestDist = ESS * merged.size() - ESS1 * cluster1.size() - ESS2
* cluster2.size();
}
break;
}
return fBestDist;
} // getDistance0
/**
* calculate the distance between two clusters
*
* @param cluster1 list of indices of instances in the first cluster
* @param cluster2 dito for second cluster
* @return distance between clusters based on link type
*/
double getDistance(double[][] fDistance, Vector<Integer> cluster1,
Vector<Integer> cluster2) {
double fBestDist = Double.MAX_VALUE;
switch (m_nLinkType) {
case SINGLE:
// find single link distance aka minimum link, which is the closest
// distance between
// any item in cluster1 and any item in cluster2
fBestDist = Double.MAX_VALUE;
for (int i = 0; i < cluster1.size(); i++) {
int i1 = cluster1.elementAt(i);
for (int j = 0; j < cluster2.size(); j++) {
int i2 = cluster2.elementAt(j);
double fDist = fDistance[i1][i2];
if (fBestDist > fDist) {
fBestDist = fDist;
}
}
}
break;
case COMPLETE:
case ADJCOMPLETE:
// find complete link distance aka maximum link, which is the largest
// distance between
// any item in cluster1 and any item in cluster2
fBestDist = 0;
for (int i = 0; i < cluster1.size(); i++) {
int i1 = cluster1.elementAt(i);
for (int j = 0; j < cluster2.size(); j++) {
int i2 = cluster2.elementAt(j);
double fDist = fDistance[i1][i2];
if (fBestDist < fDist) {
fBestDist = fDist;
}
}
}
if (m_nLinkType == COMPLETE) {
break;
}
// calculate adjustment, which is the largest within cluster distance
double fMaxDist = 0;
for (int i = 0; i < cluster1.size(); i++) {
int i1 = cluster1.elementAt(i);
for (int j = i + 1; j < cluster1.size(); j++) {
int i2 = cluster1.elementAt(j);
double fDist = fDistance[i1][i2];
if (fMaxDist < fDist) {
fMaxDist = fDist;
}
}
}
for (int i = 0; i < cluster2.size(); i++) {
int i1 = cluster2.elementAt(i);
for (int j = i + 1; j < cluster2.size(); j++) {
int i2 = cluster2.elementAt(j);
double fDist = fDistance[i1][i2];
if (fMaxDist < fDist) {
fMaxDist = fDist;
}
}
}
fBestDist -= fMaxDist;
break;
case AVERAGE:
// finds average distance between the elements of the two clusters
fBestDist = 0;
for (int i = 0; i < cluster1.size(); i++) {
int i1 = cluster1.elementAt(i);
for (int j = 0; j < cluster2.size(); j++) {
int i2 = cluster2.elementAt(j);
fBestDist += fDistance[i1][i2];
}
}
fBestDist /= (cluster1.size() * cluster2.size());
break;
case MEAN: {
// calculates the mean distance of a merged cluster (akak Group-average
// agglomerative clustering)
Vector<Integer> merged = new Vector<Integer>();
merged.addAll(cluster1);
merged.addAll(cluster2);
fBestDist = 0;
for (int i = 0; i < merged.size(); i++) {
int i1 = merged.elementAt(i);
for (int j = i + 1; j < merged.size(); j++) {
int i2 = merged.elementAt(j);
fBestDist += fDistance[i1][i2];
}
}
int n = merged.size();
fBestDist /= (n * (n - 1.0) / 2.0);
}
break;
case CENTROID:
// finds the distance of the centroids of the clusters
double[] fValues1 = new double[m_instances.numAttributes()];
for (int i = 0; i < cluster1.size(); i++) {
Instance instance = m_instances.instance(cluster1.elementAt(i));
for (int j = 0; j < m_instances.numAttributes(); j++) {
fValues1[j] += instance.value(j);
}
}
double[] fValues2 = new double[m_instances.numAttributes()];
for (int i = 0; i < cluster2.size(); i++) {
Instance instance = m_instances.instance(cluster2.elementAt(i));
for (int j = 0; j < m_instances.numAttributes(); j++) {
fValues2[j] += instance.value(j);
}
}
for (int j = 0; j < m_instances.numAttributes(); j++) {
fValues1[j] /= cluster1.size();
fValues2[j] /= cluster2.size();
}
fBestDist = m_DistanceFunction.distance(m_instances.instance(0).copy(fValues1),
m_instances.instance(0).copy(fValues2));
break;
case WARD: {
// finds the distance of the change in caused by merging the cluster.
// The information of a cluster is calculated as the error sum of squares
// of the
// centroids of the cluster and its members.
double ESS1 = calcESS(cluster1);
double ESS2 = calcESS(cluster2);
Vector<Integer> merged = new Vector<Integer>();
merged.addAll(cluster1);
merged.addAll(cluster2);
double ESS = calcESS(merged);
fBestDist = ESS * merged.size() - ESS1 * cluster1.size() - ESS2
* cluster2.size();
}
break;
}
return fBestDist;
} // getDistance
/** calculated error sum-of-squares for instances wrt centroid **/
double calcESS(Vector<Integer> cluster) {
double[] fValues1 = new double[m_instances.numAttributes()];
for (int i = 0; i < cluster.size(); i++) {
Instance instance = m_instances.instance(cluster.elementAt(i));
for (int j = 0; j < m_instances.numAttributes(); j++) {
fValues1[j] += instance.value(j);
}
}
for (int j = 0; j < m_instances.numAttributes(); j++) {
fValues1[j] /= cluster.size();
}
// set up instance for distance function
Instance centroid = m_instances.instance(cluster.elementAt(0)).copy(fValues1);
double fESS = 0;
for (int i = 0; i < cluster.size(); i++) {
Instance instance = m_instances.instance(cluster.elementAt(i));
fESS += m_DistanceFunction.distance(centroid, instance);
}
return fESS / cluster.size();
} // calcESS
@Override
/** instances are assigned a cluster by finding the instance in the training data
* with the closest distance to the instance to be clustered. The cluster index of
* the training data point is taken as the cluster index.
*/
public int clusterInstance(Instance instance) throws Exception {
if (m_instances.numInstances() == 0) {
return 0;
}
double fBestDist = Double.MAX_VALUE;
int iBestInstance = -1;
for (int i = 0; i < m_instances.numInstances(); i++) {
double fDist = m_DistanceFunction.distance(instance,
m_instances.instance(i));
if (fDist < fBestDist) {
fBestDist = fDist;
iBestInstance = i;
}
}
return m_nClusterNr[iBestInstance];
}
@Override
/** create distribution with all clusters having zero probability, except the
* cluster the instance is assigned to.
*/
public double[] distributionForInstance(Instance instance) throws Exception {
if (numberOfClusters() == 0) {
double[] p = new double[1];
p[0] = 1;
return p;
}
double[] p = new double[numberOfClusters()];
p[clusterInstance(instance)] = 1.0;
return p;
}
@Override
public Capabilities getCapabilities() {
Capabilities result = new Capabilities(this);
result.disableAll();
result.enable(Capability.NO_CLASS);
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
result.enable(Capability.STRING_ATTRIBUTES);
// other
result.setMinimumNumberInstances(0);
return result;
}
@Override
public int numberOfClusters() throws Exception {
return Math.min(m_nNumClusters, m_instances.numInstances());
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(5);
newVector.addElement(new Option(
"\tIf set, distance is interpreted as branch length\n"
+ "\totherwise it is node height.", "B", 0, "-B"));
newVector.addElement(new Option("\tnumber of clusters", "N", 1,
"-N <Nr Of Clusters>"));
newVector.addElement(new Option(
"\tFlag to indicate the cluster should be printed in Newick format.",
"P", 0, "-P"));
newVector
.addElement(new Option(
"Link type (Single, Complete, Average, Mean, Centroid, Ward, Adjusted complete, Neighbor joining)",
"L", 1,
"-L [SINGLE|COMPLETE|AVERAGE|MEAN|CENTROID|WARD|ADJCOMPLETE|NEIGHBOR_JOINING]"));
newVector.add(new Option("\tDistance function to use.\n"
+ "\t(default: weka.core.EuclideanDistance)", "A", 1,
"-A <classname and options>"));
newVector.addAll(Collections.list(super.listOptions()));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
m_bPrintNewick = Utils.getFlag('P', options);
String optionString = Utils.getOption('N', options);
if (optionString.length() != 0) {
Integer temp = new Integer(optionString);
setNumClusters(temp);
} else {
setNumClusters(2);
}
setDistanceIsBranchLength(Utils.getFlag('B', options));
String sLinkType = Utils.getOption('L', options);
if (sLinkType.compareTo("SINGLE") == 0) {
setLinkType(new SelectedTag(SINGLE, TAGS_LINK_TYPE));
}
if (sLinkType.compareTo("COMPLETE") == 0) {
setLinkType(new SelectedTag(COMPLETE, TAGS_LINK_TYPE));
}
if (sLinkType.compareTo("AVERAGE") == 0) {
setLinkType(new SelectedTag(AVERAGE, TAGS_LINK_TYPE));
}
if (sLinkType.compareTo("MEAN") == 0) {
setLinkType(new SelectedTag(MEAN, TAGS_LINK_TYPE));
}
if (sLinkType.compareTo("CENTROID") == 0) {
setLinkType(new SelectedTag(CENTROID, TAGS_LINK_TYPE));
}
if (sLinkType.compareTo("WARD") == 0) {
setLinkType(new SelectedTag(WARD, TAGS_LINK_TYPE));
}
if (sLinkType.compareTo("ADJCOMPLETE") == 0) {
setLinkType(new SelectedTag(ADJCOMPLETE, TAGS_LINK_TYPE));
}
if (sLinkType.compareTo("NEIGHBOR_JOINING") == 0) {
setLinkType(new SelectedTag(NEIGHBOR_JOINING, TAGS_LINK_TYPE));
}
String nnSearchClass = Utils.getOption('A', options);
if (nnSearchClass.length() != 0) {
String nnSearchClassSpec[] = Utils.splitOptions(nnSearchClass);
if (nnSearchClassSpec.length == 0) {
throw new Exception("Invalid DistanceFunction specification string.");
}
String className = nnSearchClassSpec[0];
nnSearchClassSpec[0] = "";
setDistanceFunction((DistanceFunction) Utils.forName(
DistanceFunction.class, className, nnSearchClassSpec));
} else {
setDistanceFunction(new EuclideanDistance());
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of the clusterer.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
options.add("-N");
options.add("" + getNumClusters());
options.add("-L");
switch (m_nLinkType) {
case (SINGLE):
options.add("SINGLE");
break;
case (COMPLETE):
options.add("COMPLETE");
break;
case (AVERAGE):
options.add("AVERAGE");
break;
case (MEAN):
options.add("MEAN");
break;
case (CENTROID):
options.add("CENTROID");
break;
case (WARD):
options.add("WARD");
break;
case (ADJCOMPLETE):
options.add("ADJCOMPLETE");
break;
case (NEIGHBOR_JOINING):
options.add("NEIGHBOR_JOINING");
break;
}
if (m_bPrintNewick) {
options.add("-P");
}
if (getDistanceIsBranchLength()) {
options.add("-B");
}
options.add("-A");
options.add((m_DistanceFunction.getClass().getName() + " " + Utils
.joinOptions(m_DistanceFunction.getOptions())).trim());
Collections.addAll(options, super.getOptions());
return options.toArray(new String[0]);
}
@Override
public String toString() {
StringBuffer buf = new StringBuffer();
int attIndex = m_instances.classIndex();
if (attIndex < 0) {
// try find a string, or last attribute otherwise
attIndex = 0;
while (attIndex < m_instances.numAttributes() - 1) {
if (m_instances.attribute(attIndex).isString()) {
break;
}
attIndex++;
}
}
try {
if (m_bPrintNewick && (numberOfClusters() > 0)) {
for (int i = 0; i < m_clusters.length; i++) {
if (m_clusters[i] != null) {
buf.append("Cluster " + i + "\n");
if (m_instances.attribute(attIndex).isString()) {
buf.append(m_clusters[i].toString(attIndex));
} else {
buf.append(m_clusters[i].toString2(attIndex));
}
buf.append("\n\n");
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
return buf.toString();
}
public boolean getDistanceIsBranchLength() {
return m_bDistanceIsBranchLength;
}
public void setDistanceIsBranchLength(boolean bDistanceIsHeight) {
m_bDistanceIsBranchLength = bDistanceIsHeight;
}
public String distanceIsBranchLengthTipText() {
return "If set to false, the distance between clusters is interpreted "
+ "as the height of the node linking the clusters. This is appropriate for "
+ "example for single link clustering. However, for neighbor joining, the "
+ "distance is better interpreted as branch length. Set this flag to "
+ "get the latter interpretation.";
}
/**
* @return a string to describe the NumClusters
*/
public String numClustersTipText() {
return "Sets the number of clusters. "
+ "If a single hierarchy is desired, set this to 1.";
}
/**
* @return a string to describe the print Newick flag
*/
public String printNewickTipText() {
return "Flag to indicate whether the cluster should be print in Newick format."
+ " This can be useful for display in other programs. However, for large datasets"
+ " a lot of text may be produced, which may not be a nuisance when the Newick format"
+ " is not required";
}
/**
* @return a string to describe the distance function
*/
public String distanceFunctionTipText() {
return "Sets the distance function, which measures the distance between two individual. "
+ "instances (or possibly the distance between an instance and the centroid of a cluster"
+ "depending on the Link type).";
}
/**
* @return a string to describe the Link type
*/
public String linkTypeTipText() {
return "Sets the method used to measure the distance between two clusters.\n"
+ "SINGLE:\n"
+ " find single link distance aka minimum link, which is the closest distance between"
+ " any item in cluster1 and any item in cluster2\n"
+ "COMPLETE:\n"
+ " find complete link distance aka maximum link, which is the largest distance between"
+ " any item in cluster1 and any item in cluster2\n"
+ "ADJCOMPLETE:\n"
+ " as COMPLETE, but with adjustment, which is the largest within cluster distance\n"
+ "AVERAGE:\n"
+ " finds average distance between the elements of the two clusters\n"
+ "MEAN: \n"
+ " calculates the mean distance of a merged cluster (akak Group-average agglomerative clustering)\n"
+ "CENTROID:\n"
+ " finds the distance of the centroids of the clusters\n"
+ "WARD:\n"
+ " finds the distance of the change in caused by merging the cluster."
+ " The information of a cluster is calculated as the error sum of squares of the"
+ " centroids of the cluster and its members.\n"
+ "NEIGHBOR_JOINING\n"
+ " use neighbor joining algorithm.";
}
/**
* This will return a string describing the clusterer.
*
* @return The string.
*/
public String globalInfo() {
return "Hierarchical clustering class.\n"
+ "Implements a number of classic agglomerative (i.e., bottom up) hierarchical clustering methods.";
}
public static void main(String[] argv) {
runClusterer(new HierarchicalClusterer(), argv);
}
@Override
public String graph() throws Exception {
if (numberOfClusters() == 0) {
return "Newick:(no,clusters)";
}
int attIndex = m_instances.classIndex();
if (attIndex < 0) {
// try find a string, or last attribute otherwise
attIndex = 0;
while (attIndex < m_instances.numAttributes() - 1) {
if (m_instances.attribute(attIndex).isString()) {
break;
}
attIndex++;
}
}
String sNewick = null;
if (m_instances.attribute(attIndex).isString()) {
sNewick = m_clusters[0].toString(attIndex);
} else {
sNewick = m_clusters[0].toString2(attIndex);
}
return "Newick:" + sNewick;
}
@Override
public int graphType() {
return Drawable.Newick;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // class HierarchicalClusterer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/MakeDensityBasedClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MakeDensityBasedClusterer.java
* Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
import weka.estimators.DiscreteEstimator;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
* <!-- globalinfo-start --> Class for wrapping a Clusterer to make it return a
* distribution and density. Fits normal distributions and discrete
* distributions within each cluster produced by the wrapped clusterer. Supports
* the NumberOfClustersRequestable interface only if the wrapped Clusterer does.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M <num>
* minimum allowable standard deviation for normal density computation
* (default 1e-6)
* </pre>
*
* <pre>
* -W <clusterer name>
* Clusterer to wrap.
* (default weka.clusterers.SimpleKMeans)
* </pre>
*
* <pre>
* Options specific to clusterer weka.clusterers.SimpleKMeans:
* </pre>
*
* <pre>
* -N <num>
* number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Replace missing values with mean/mode.
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <!-- options-end -->
*
* Options after "--" are passed on to the base clusterer.
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class MakeDensityBasedClusterer extends AbstractDensityBasedClusterer
implements NumberOfClustersRequestable, OptionHandler,
WeightedInstancesHandler {
/** for serialization */
static final long serialVersionUID = -5643302427972186631L;
/** holds training instances header information */
private Instances m_theInstances;
/** prior probabilities for the fitted clusters */
private double[] m_priors;
/** normal distributions fitted to each numeric attribute in each cluster */
private double[][][] m_modelNormal;
/** discrete distributions fitted to each discrete attribute in each cluster */
private DiscreteEstimator[][] m_model;
/** default minimum standard deviation */
private double m_minStdDev = 1e-6;
/** The clusterer being wrapped */
private Clusterer m_wrappedClusterer = new weka.clusterers.SimpleKMeans();
/** globally replace missing values */
private ReplaceMissingValues m_replaceMissing;
/**
* Default constructor.
*
*/
public MakeDensityBasedClusterer() {
super();
}
/**
* Contructs a MakeDensityBasedClusterer wrapping a given Clusterer.
*
* @param toWrap the clusterer to wrap around
*/
public MakeDensityBasedClusterer(Clusterer toWrap) {
setClusterer(toWrap);
}
/**
* Returns a string describing classifier
*
* @return a description suitable for displaying in the explorer/experimenter
* gui
*/
public String globalInfo() {
return "Class for wrapping a Clusterer to make it return a distribution "
+ "and density. Fits normal distributions and discrete distributions "
+ "within each cluster produced by the wrapped clusterer. Supports the "
+ "NumberOfClustersRequestable interface only if the wrapped Clusterer "
+ "does.";
}
/**
* String describing default clusterer.
*
* @return the default clusterer classname
*/
protected String defaultClustererString() {
return SimpleKMeans.class.getName();
}
/**
* Set the number of clusters to generate.
*
* @param n the number of clusters to generate
* @throws Exception if the wrapped clusterer has not been set, or if the
* wrapped clusterer does not implement this facility.
*/
@Override
public void setNumClusters(int n) throws Exception {
if (m_wrappedClusterer == null) {
throw new Exception("Can't set the number of clusters to generate - "
+ "no clusterer has been set yet.");
}
if (!(m_wrappedClusterer instanceof NumberOfClustersRequestable)) {
throw new Exception("Can't set the number of clusters to generate - "
+ "wrapped clusterer does not support this facility.");
}
((NumberOfClustersRequestable) m_wrappedClusterer).setNumClusters(n);
}
/**
* Returns default capabilities of the clusterer (i.e., of the wrapper
* clusterer).
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
if (m_wrappedClusterer != null) {
return m_wrappedClusterer.getCapabilities();
}
Capabilities result = super.getCapabilities();
result.disableAll();
result.enable(Capability.NO_CLASS);
return result;
}
/**
* Builds a clusterer for a set of instances.
*
* @param data the instances to train the clusterer with
* @throws Exception if the clusterer hasn't been set or something goes wrong
*/
@Override
public void buildClusterer(Instances data) throws Exception {
// can clusterer handle the data?
getCapabilities().testWithFail(data);
m_replaceMissing = new ReplaceMissingValues();
m_replaceMissing.setInputFormat(data);
data = weka.filters.Filter.useFilter(data, m_replaceMissing);
m_theInstances = new Instances(data, 0);
if (m_wrappedClusterer == null) {
throw new Exception("No clusterer has been set");
}
m_wrappedClusterer.buildClusterer(data);
m_model = new DiscreteEstimator[m_wrappedClusterer.numberOfClusters()][data
.numAttributes()];
m_modelNormal = new double[m_wrappedClusterer.numberOfClusters()][data
.numAttributes()][2];
double[][] weights = new double[m_wrappedClusterer.numberOfClusters()][data
.numAttributes()];
m_priors = new double[m_wrappedClusterer.numberOfClusters()];
for (int i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) {
m_priors[i] = 1.0; // laplace correction
for (int j = 0; j < data.numAttributes(); j++) {
if (data.attribute(j).isNominal()) {
m_model[i][j] = new DiscreteEstimator(data.attribute(j).numValues(),
true);
}
}
}
Instance inst = null;
// Compute mean, etc.
int[] clusterIndex = new int[data.numInstances()];
for (int i = 0; i < data.numInstances(); i++) {
inst = data.instance(i);
int cluster = m_wrappedClusterer.clusterInstance(inst);
m_priors[cluster] += inst.weight();
for (int j = 0; j < data.numAttributes(); j++) {
if (!inst.isMissing(j)) {
if (data.attribute(j).isNominal()) {
m_model[cluster][j].addValue(inst.value(j), inst.weight());
} else {
m_modelNormal[cluster][j][0] += inst.weight() * inst.value(j);
weights[cluster][j] += inst.weight();
}
}
}
clusterIndex[i] = cluster;
}
for (int j = 0; j < data.numAttributes(); j++) {
if (data.attribute(j).isNumeric()) {
for (int i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) {
if (weights[i][j] > 0) {
m_modelNormal[i][j][0] /= weights[i][j];
}
}
}
}
// Compute standard deviations
for (int i = 0; i < data.numInstances(); i++) {
inst = data.instance(i);
for (int j = 0; j < data.numAttributes(); j++) {
if (!inst.isMissing(j)) {
if (data.attribute(j).isNumeric()) {
double diff = m_modelNormal[clusterIndex[i]][j][0] - inst.value(j);
m_modelNormal[clusterIndex[i]][j][1] += inst.weight() * diff * diff;
}
}
}
}
for (int j = 0; j < data.numAttributes(); j++) {
if (data.attribute(j).isNumeric()) {
for (int i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) {
if (weights[i][j] > 0) {
m_modelNormal[i][j][1] = Math.sqrt(m_modelNormal[i][j][1]
/ weights[i][j]);
} else if (weights[i][j] <= 0) {
m_modelNormal[i][j][1] = Double.MAX_VALUE;
}
if (m_modelNormal[i][j][1] <= m_minStdDev) {
m_modelNormal[i][j][1] = data.attributeStats(j).numericStats.stdDev;
if (m_modelNormal[i][j][1] <= m_minStdDev) {
m_modelNormal[i][j][1] = m_minStdDev;
}
}
}
}
}
Utils.normalize(m_priors);
}
/**
* Returns the cluster priors.
*
* @return the cluster priors
*/
@Override
public double[] clusterPriors() {
double[] n = new double[m_priors.length];
System.arraycopy(m_priors, 0, n, 0, n.length);
return n;
}
/**
* Computes the log of the conditional density (per cluster) for a given
* instance.
*
* @param inst the instance to compute the density for
* @return an array containing the estimated densities
* @throws Exception if the density could not be computed successfully
*/
@Override
public double[] logDensityPerClusterForInstance(Instance inst)
throws Exception {
int i, j;
double logprob;
double[] wghts = new double[m_wrappedClusterer.numberOfClusters()];
m_replaceMissing.input(inst);
inst = m_replaceMissing.output();
for (i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) {
logprob = 0;
for (j = 0; j < inst.numAttributes(); j++) {
if (!inst.isMissing(j)) {
if (inst.attribute(j).isNominal()) {
logprob += Math.log(m_model[i][j].getProbability(inst.value(j)));
} else { // numeric attribute
logprob += logNormalDens(inst.value(j), m_modelNormal[i][j][0],
m_modelNormal[i][j][1]);
}
}
}
wghts[i] = logprob;
}
return wghts;
}
/** Constant for normal distribution. */
private static double m_normConst = 0.5 * Math.log(2 * Math.PI);
/**
* Density function of normal distribution.
*
* @param x input value
* @param mean mean of distribution
* @param stdDev standard deviation of distribution
* @return the density
*/
private double logNormalDens(double x, double mean, double stdDev) {
double diff = x - mean;
return -(diff * diff / (2 * stdDev * stdDev)) - m_normConst
- Math.log(stdDev);
}
/**
* Returns the number of clusters.
*
* @return the number of clusters generated for a training dataset.
* @throws Exception if number of clusters could not be returned successfully
*/
@Override
public int numberOfClusters() throws Exception {
return m_wrappedClusterer.numberOfClusters();
}
/**
* Returns a description of the clusterer.
*
* @return a string containing a description of the clusterer
*/
@Override
public String toString() {
if (m_priors == null) {
return "No clusterer built yet!";
}
StringBuffer text = new StringBuffer();
text.append("MakeDensityBasedClusterer: \n\nWrapped clusterer: "
+ m_wrappedClusterer.toString());
text.append("\nFitted estimators (with ML estimates of variance):\n");
for (int j = 0; j < m_priors.length; j++) {
text.append("\nCluster: " + j + " Prior probability: "
+ Utils.doubleToString(m_priors[j], 4) + "\n\n");
for (int i = 0; i < m_model[0].length; i++) {
text.append("Attribute: " + m_theInstances.attribute(i).name() + "\n");
if (m_theInstances.attribute(i).isNominal()) {
if (m_model[j][i] != null) {
text.append(m_model[j][i].toString());
}
} else {
text.append("Normal Distribution. Mean = "
+ Utils.doubleToString(m_modelNormal[j][i][0], 4) + " StdDev = "
+ Utils.doubleToString(m_modelNormal[j][i][1], 4) + "\n");
}
}
}
return text.toString();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String clustererTipText() {
return "the clusterer to wrap";
}
/**
* Sets the clusterer to wrap.
*
* @param toWrap the clusterer
*/
public void setClusterer(Clusterer toWrap) {
m_wrappedClusterer = toWrap;
}
/**
* Gets the clusterer being wrapped.
*
* @return the clusterer
*/
public Clusterer getClusterer() {
return m_wrappedClusterer;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minStdDevTipText() {
return "set minimum allowable standard deviation";
}
/**
* Set the minimum value for standard deviation when calculating normal
* density. Reducing this value can help prevent arithmetic overflow resulting
* from multiplying large densities (arising from small standard deviations)
* when there are many singleton or near singleton values.
*
* @param m minimum value for standard deviation
*/
public void setMinStdDev(double m) {
m_minStdDev = m;
}
/**
* Get the minimum allowable standard deviation.
*
* @return the minumum allowable standard deviation
*/
public double getMinStdDev() {
return m_minStdDev;
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option(
"\tminimum allowable standard deviation for normal density computation "
+ "\n\t(default 1e-6)", "M", 1, "-M <num>"));
result.addElement(new Option("\tClusterer to wrap.\n" + "\t(default "
+ defaultClustererString() + ")", "W", 1, "-W <clusterer name>"));
result.addAll(Collections.list(super.listOptions()));
if ((m_wrappedClusterer != null)
&& (m_wrappedClusterer instanceof OptionHandler)) {
result.addElement(new Option("", "", 0,
"\nOptions specific to clusterer "
+ m_wrappedClusterer.getClass().getName() + ":"));
result.addAll(Collections.list(((OptionHandler) m_wrappedClusterer)
.listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M <num>
* minimum allowable standard deviation for normal density computation
* (default 1e-6)
* </pre>
*
* <pre>
* -W <clusterer name>
* Clusterer to wrap.
* (default weka.clusterers.SimpleKMeans)
* </pre>
*
* <pre>
* Options specific to clusterer weka.clusterers.SimpleKMeans:
* </pre>
*
* <pre>
* -N <num>
* number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Replace missing values with mean/mode.
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String optionString = Utils.getOption('M', options);
if (optionString.length() != 0) {
setMinStdDev((new Double(optionString)).doubleValue());
} else {
setMinStdDev(1e-6);
}
String wString = Utils.getOption('W', options);
if (wString.length() == 0) {
wString = defaultClustererString();
}
setClusterer(AbstractClusterer.forName(wString,
Utils.partitionOptions(options)));
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of the clusterer.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
options.add("-M");
options.add("" + getMinStdDev());
if (getClusterer() != null) {
options.add("-W");
options.add(getClusterer().getClass().getName());
if (m_wrappedClusterer instanceof OptionHandler) {
String[] clustererOptions = ((OptionHandler) m_wrappedClusterer)
.getOptions();
if (clustererOptions.length > 0) {
options.add("--");
Collections.addAll(options, clustererOptions);
}
}
}
Collections.addAll(options, super.getOptions());
return options.toArray(new String[0]);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv the options
*/
public static void main(String[] argv) {
runClusterer(new MakeDensityBasedClusterer(), argv);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/NumberOfClustersRequestable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NumberOfClustersRequestable.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
/**
* Interface to a clusterer that can generate a requested number of
* clusters
*
* @author Mark Hall
* @version $Revision$
*/
public interface NumberOfClustersRequestable {
/**
* Set the number of clusters to generate
*
* @param numClusters the number of clusters to generate
* @exception Exception if the requested number of
* clusters in inapropriate
*/
void setNumClusters(int numClusters) throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/RandomizableClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RandomizableClusterer.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Randomizable;
import weka.core.Utils;
/**
* Abstract utility class for handling settings common to randomizable
* clusterers.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class RandomizableClusterer extends AbstractClusterer implements
OptionHandler, Randomizable {
/** for serialization */
private static final long serialVersionUID = -4819590778152242745L;
/** the default seed value */
protected int m_SeedDefault = 1;
/** The random number seed. */
protected int m_Seed = m_SeedDefault;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tRandom number seed.\n" + "\t(default "
+ m_SeedDefault + ")", "S", 1, "-S <num>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options. Valid options are:
* <p>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption('S', options);
if (tmpStr.length() != 0) {
setSeed(Integer.parseInt(tmpStr));
} else {
setSeed(m_SeedDefault);
}
super.setOptions(options);
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-S");
result.add("" + getSeed());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return "The random number seed to be used.";
}
/**
* Set the seed for random number generation.
*
* @param value the seed to use
*/
@Override
public void setSeed(int value) {
m_Seed = value;
}
/**
* Gets the seed for the random number generations
*
* @return the seed for the random number generation
*/
@Override
public int getSeed() {
return m_Seed;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/RandomizableDensityBasedClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RandomizableDensityBasedClusterer.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Randomizable;
import weka.core.Utils;
/**
* Abstract utility class for handling settings common to randomizable
* clusterers.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class RandomizableDensityBasedClusterer extends
AbstractDensityBasedClusterer implements OptionHandler, Randomizable {
/** for serialization */
private static final long serialVersionUID = -5325270357918932849L;
/** the default seed value */
protected int m_SeedDefault = 1;
/** The random number seed. */
protected int m_Seed = m_SeedDefault;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tRandom number seed.\n" + "\t(default "
+ m_SeedDefault + ")", "S", 1, "-S <num>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options. Valid options are:
* <p>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption('S', options);
if (tmpStr.length() != 0) {
setSeed(Integer.parseInt(tmpStr));
} else {
setSeed(m_SeedDefault);
}
super.setOptions(options);
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-S");
result.add("" + getSeed());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return "The random number seed to be used.";
}
/**
* Set the seed for random number generation.
*
* @param value the seed to use
*/
@Override
public void setSeed(int value) {
m_Seed = value;
}
/**
* Gets the seed for the random number generations
*
* @return the seed for the random number generation
*/
@Override
public int getSeed() {
return m_Seed;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/RandomizableSingleClustererEnhancer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RandomizableSingleClustererEnhancer.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Randomizable;
import weka.core.Utils;
/**
* Abstract utility class for handling settings common to randomizable
* clusterers.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class RandomizableSingleClustererEnhancer extends
AbstractClusterer implements OptionHandler, Randomizable {
/** for serialization */
private static final long serialVersionUID = -644847037106316249L;
/** the default seed value */
protected int m_SeedDefault = 1;
/** The random number seed. */
protected int m_Seed = m_SeedDefault;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tRandom number seed.\n" + "\t(default "
+ m_SeedDefault + ")", "S", 1, "-S <num>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Parses a given list of options. Valid options are:
* <p>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr = Utils.getOption('S', options);
if (tmpStr.length() != 0) {
setSeed(Integer.parseInt(tmpStr));
} else {
setSeed(m_SeedDefault);
}
super.setOptions(options);
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-S");
result.add("" + getSeed());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return "The random number seed to be used.";
}
/**
* Set the seed for random number generation.
*
* @param value the seed to use
*/
@Override
public void setSeed(int value) {
m_Seed = value;
}
/**
* Gets the seed for the random number generations
*
* @return the seed for the random number generation
*/
@Override
public int getSeed() {
return m_Seed;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/SimpleKMeans.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SimpleKMeans.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import weka.classifiers.rules.DecisionTableHashKey;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.DenseInstance;
import weka.core.DistanceFunction;
import weka.core.EuclideanDistance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.ManhattanDistance;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
* <!-- globalinfo-start --> Cluster data using the k means algorithm. Can use
* either the Euclidean distance (default) or the Manhattan distance. If the
* Manhattan distance is used, then centroids are computed as the component-wise
* median rather than mean. For more information see:<br/>
* <br/>
* D. Arthur, S. Vassilvitskii: k-means++: the advantages of carefull seeding.
* In: Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete
* algorithms, 1027-1035, 2007.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @inproceedings{Arthur2007,
* author = {D. Arthur and S. Vassilvitskii},
* booktitle = {Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms},
* pages = {1027-1035},
* title = {k-means++: the advantages of carefull seeding},
* year = {2007}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -N <num>
* Number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -init
* Initialization method to use.
* 0 = random, 1 = k-means++, 2 = canopy, 3 = farthest first.
* (default = 0)
* </pre>
*
* <pre>
* -C
* Use canopies to reduce the number of distance calculations.
* </pre>
*
* <pre>
* -max-candidates <num>
* Maximum number of candidate canopies to retain in memory
* at any one time when using canopy clustering.
* T2 distance plus, data characteristics,
* will determine how many candidate canopies are formed before
* periodic and final pruning are performed, which might result
* in exceess memory consumption. This setting avoids large numbers
* of candidate canopies consuming memory. (default = 100)
* </pre>
*
* <pre>
* -periodic-pruning <num>
* How often to prune low density canopies when using canopy clustering.
* (default = every 10,000 training instances)
* </pre>
*
* <pre>
* -min-density
* Minimum canopy density, when using canopy clustering, below which
* a canopy will be pruned during periodic pruning. (default = 2 instances)
* </pre>
*
* <pre>
* -t2
* The T2 distance to use when using canopy clustering. Values < 0 indicate that
* a heuristic based on attribute std. deviation should be used to set this.
* (default = -1.0)
* </pre>
*
* <pre>
* -t1
* The T1 distance to use when using canopy clustering. A value < 0 is taken as a
* positive multiplier for T2. (default = -1.5)
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Don't replace missing values with mean/mode.
* </pre>
*
* <pre>
* -A <classname and options>
* Distance function to use.
* (default: weka.core.EuclideanDistance)
* </pre>
*
* <pre>
* -I <num>
* Maximum number of iterations.
* </pre>
*
* <pre>
* -O
* Preserve order of instances.
* </pre>
*
* <pre>
* -fast
* Enables faster distance calculations, using cut-off values.
* Disables the calculation/output of squared errors/distances.
* </pre>
*
* <pre>
* -num-slots <num>
* Number of execution slots.
* (default 1 - i.e. no parallelism)
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <pre>
* -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console
* </pre>
*
* <pre>
* -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
* @see RandomizableClusterer
*/
public class SimpleKMeans extends RandomizableClusterer implements NumberOfClustersRequestable, WeightedInstancesHandler, TechnicalInformationHandler {
/** for serialization. */
static final long serialVersionUID = -3235809600124455376L;
/**
* replace missing values in training instances.
*/
protected ReplaceMissingValues m_ReplaceMissingFilter;
/**
* number of clusters to generate.
*/
protected int m_NumClusters = 2;
/**
* Holds the initial start points, as supplied by the initialization method
* used
*/
protected Instances m_initialStartPoints;
/**
* holds the cluster centroids.
*/
protected Instances m_ClusterCentroids;
/**
* Holds the standard deviations of the numeric attributes in each cluster.
*/
protected Instances m_ClusterStdDevs;
/**
* For each cluster, holds the frequency counts for the values of each nominal
* attribute.
*/
protected double[][][] m_ClusterNominalCounts;
protected double[][] m_ClusterMissingCounts;
/**
* Stats on the full data set for comparison purposes. In case the attribute
* is numeric the value is the mean if is being used the Euclidian distance or
* the median if Manhattan distance and if the attribute is nominal then it's
* mode is saved.
*/
protected double[] m_FullMeansOrMediansOrModes;
protected double[] m_FullStdDevs;
protected double[][] m_FullNominalCounts;
protected double[] m_FullMissingCounts;
/**
* Display standard deviations for numeric atts.
*/
protected boolean m_displayStdDevs;
/**
* Replace missing values globally?
*/
protected boolean m_dontReplaceMissing = false;
/**
* The number of instances in each cluster.
*/
protected double[] m_ClusterSizes;
/**
* Maximum number of iterations to be executed.
*/
protected int m_MaxIterations = 500;
/**
* Keep track of the number of iterations completed before convergence.
*/
protected int m_Iterations = 0;
/**
* Holds the squared errors for all clusters.
*/
protected double[] m_squaredErrors;
/** the distance function used. */
protected DistanceFunction m_DistanceFunction = new EuclideanDistance();
/**
* Preserve order of instances.
*/
protected boolean m_PreserveOrder = false;
/**
* Assignments obtained.
*/
protected int[] m_Assignments = null;
/** whether to use fast calculation of distances (using a cut-off). */
protected boolean m_FastDistanceCalc = false;
public static final int RANDOM = 0;
public static final int KMEANS_PLUS_PLUS = 1;
public static final int CANOPY = 2;
public static final int FARTHEST_FIRST = 3;
/** Initialization methods */
public static final Tag[] TAGS_SELECTION = { new Tag(RANDOM, "Random"), new Tag(KMEANS_PLUS_PLUS, "k-means++"), new Tag(CANOPY, "Canopy"), new Tag(FARTHEST_FIRST, "Farthest first") };
/** The initialization method to use */
protected int m_initializationMethod = RANDOM;
/**
* Whether to reducet the number of distance calcs done by k-means with
* canopies
*/
protected boolean m_speedUpDistanceCompWithCanopies = false;
/** Canopies that each centroid falls into (determined by T1 radius) */
protected List<long[]> m_centroidCanopyAssignments;
/** Canopies that each training instance falls into (determined by T1 radius) */
protected List<long[]> m_dataPointCanopyAssignments;
/** The canopy clusterer (if being used) */
protected Canopy m_canopyClusters;
/**
* The maximum number of candidate canopies to hold in memory at any one time
* (if using canopy clustering)
*/
protected int m_maxCanopyCandidates = 100;
/**
* Prune low-density candidate canopies after every x instances have been seen
* (if using canopy clustering)
*/
protected int m_periodicPruningRate = 10000;
/**
* The minimum cluster density (according to T2 distance) allowed. Used when
* periodically pruning candidate canopies (if using canopy clustering)
*/
protected double m_minClusterDensity = 2;
/** The t2 radius to pass through to Canopy */
protected double m_t2 = Canopy.DEFAULT_T2;
/** The t1 radius to pass through to Canopy */
protected double m_t1 = Canopy.DEFAULT_T1;
/** Number of threads to run */
protected int m_executionSlots = 1;
/** For parallel execution mode */
protected transient ExecutorService m_executorPool;
/**
* the default constructor.
*/
public SimpleKMeans() {
super();
this.m_SeedDefault = 10;
this.setSeed(this.m_SeedDefault);
}
/**
* Start the pool of execution threads
*/
protected void startExecutorPool() {
if (this.m_executorPool != null) {
this.m_executorPool.shutdownNow();
}
this.m_executorPool = Executors.newFixedThreadPool(this.m_executionSlots);
}
protected int m_completed;
protected int m_failed;
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "D. Arthur and S. Vassilvitskii");
result.setValue(Field.TITLE, "k-means++: the advantages of carefull seeding");
result.setValue(Field.BOOKTITLE, "Proceedings of the eighteenth annual " + "ACM-SIAM symposium on Discrete algorithms");
result.setValue(Field.YEAR, "2007");
result.setValue(Field.PAGES, "1027-1035");
return result;
}
/**
* Returns a string describing this clusterer.
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Cluster data using the k means algorithm. Can use either " + "the Euclidean distance (default) or the Manhattan distance." + " If the Manhattan distance is used, then centroids are computed "
+ "as the component-wise median rather than mean." + " For more information see:\n\n" + this.getTechnicalInformation().toString();
}
/**
* Returns default capabilities of the clusterer.
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
result.enable(Capability.NO_CLASS);
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
return result;
}
private class KMeansComputeCentroidTask implements Callable<double[]> {
protected Instances m_cluster;
protected int m_centroidIndex;
public KMeansComputeCentroidTask(final int centroidIndex, final Instances cluster) {
this.m_cluster = cluster;
this.m_centroidIndex = centroidIndex;
}
@Override
public double[] call() throws InterruptedException {
return SimpleKMeans.this.moveCentroid(this.m_centroidIndex, this.m_cluster, true, false);
}
}
/**
* Launch the move centroids tasks
*
* @param clusters the cluster centroids
* @return the number of empty clusters
*/
protected int launchMoveCentroids(final Instances[] clusters) {
int emptyClusterCount = 0;
List<Future<double[]>> results = new ArrayList<Future<double[]>>();
for (int i = 0; i < this.m_NumClusters; i++) {
if (clusters[i].numInstances() == 0) {
emptyClusterCount++;
} else {
Future<double[]> futureCentroid = this.m_executorPool.submit(new KMeansComputeCentroidTask(i, clusters[i]));
results.add(futureCentroid);
}
}
try {
for (Future<double[]> d : results) {
this.m_ClusterCentroids.add(new DenseInstance(1.0, d.get()));
}
} catch (Exception ex) {
ex.printStackTrace();
}
return emptyClusterCount;
}
private class KMeansClusterTask implements Callable<Boolean> {
protected int m_start;
protected int m_end;
protected Instances m_inst;
protected int[] m_clusterAssignments;
public KMeansClusterTask(final Instances inst, final int start, final int end, final int[] clusterAssignments) {
this.m_start = start;
this.m_end = end;
this.m_inst = inst;
this.m_clusterAssignments = clusterAssignments;
}
@Override
public Boolean call() {
boolean converged = true;
for (int i = this.m_start; i < this.m_end; i++) {
Instance toCluster = this.m_inst.instance(i);
long[] instanceCanopies = SimpleKMeans.this.m_speedUpDistanceCompWithCanopies ? SimpleKMeans.this.m_dataPointCanopyAssignments.get(i) : null;
int newC = this.clusterInstance(toCluster, instanceCanopies);
if (newC != this.m_clusterAssignments[i]) {
converged = false;
}
this.m_clusterAssignments[i] = newC;
}
return converged;
}
protected int clusterInstance(final Instance inst, final long[] instanceCanopies) {
double minDist = Integer.MAX_VALUE;
int bestCluster = 0;
for (int i = 0; i < SimpleKMeans.this.m_NumClusters; i++) {
double dist;
if (SimpleKMeans.this.m_speedUpDistanceCompWithCanopies && instanceCanopies != null && instanceCanopies.length > 0) {
try {
if (!Canopy.nonEmptyCanopySetIntersection(SimpleKMeans.this.m_centroidCanopyAssignments.get(i), instanceCanopies)) {
// System.err.println("Skipping distance calc... "
// + Canopy.printSingleAssignment(instanceCanopies));
continue;
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
dist = SimpleKMeans.this.m_DistanceFunction.distance(inst, SimpleKMeans.this.m_ClusterCentroids.instance(i), minDist);
if (dist < minDist) {
minDist = dist;
bestCluster = i;
}
}
return bestCluster;
}
}
/**
* Launch the tasks that assign instances to clusters
*
* @param insts the instances to be clustered
* @param clusterAssignments the array of cluster assignments
* @return true if k means has converged
* @throws Exception if a problem occurs
*/
protected boolean launchAssignToClusters(final Instances insts, final int[] clusterAssignments) throws Exception {
int numPerTask = insts.numInstances() / this.m_executionSlots;
List<Future<Boolean>> results = new ArrayList<Future<Boolean>>();
for (int i = 0; i < this.m_executionSlots; i++) {
int start = i * numPerTask;
int end = start + numPerTask;
if (i == this.m_executionSlots - 1) {
end = insts.numInstances();
}
Future<Boolean> futureKM = this.m_executorPool.submit(new KMeansClusterTask(insts, start, end, clusterAssignments));
results.add(futureKM);
}
boolean converged = true;
for (Future<Boolean> f : results) {
if (!f.get()) {
converged = false;
}
}
return converged;
}
/**
* Generates a clusterer. Has to initialize all fields of the clusterer that
* are not being set via options.
*
* @param data set of instances serving as training data
* @throws Exception if the clusterer has not been generated successfully
*/
@Override
public void buildClusterer(final Instances data) throws Exception {
this.m_canopyClusters = null;
// can clusterer handle the data?
this.getCapabilities().testWithFail(data);
this.m_Iterations = 0;
this.m_ReplaceMissingFilter = new ReplaceMissingValues();
Instances instances = new Instances(data);
instances.setClassIndex(-1);
if (!this.m_dontReplaceMissing) {
this.m_ReplaceMissingFilter.setInputFormat(instances);
instances = Filter.useFilter(instances, this.m_ReplaceMissingFilter);
}
this.m_ClusterNominalCounts = new double[this.m_NumClusters][instances.numAttributes()][];
this.m_ClusterMissingCounts = new double[this.m_NumClusters][instances.numAttributes()];
if (this.m_displayStdDevs) {
this.m_FullStdDevs = instances.variances();
}
this.m_FullMeansOrMediansOrModes = this.moveCentroid(0, instances, true, false);
this.m_FullMissingCounts = this.m_ClusterMissingCounts[0];
this.m_FullNominalCounts = this.m_ClusterNominalCounts[0];
double sumOfWeights = instances.sumOfWeights();
for (int i = 0; i < instances.numAttributes(); i++) {
if (instances.attribute(i).isNumeric()) {
if (this.m_displayStdDevs) {
this.m_FullStdDevs[i] = Math.sqrt(this.m_FullStdDevs[i]);
}
if (this.m_FullMissingCounts[i] == sumOfWeights) {
this.m_FullMeansOrMediansOrModes[i] = Double.NaN; // mark missing as mean
}
} else {
if (this.m_FullMissingCounts[i] > this.m_FullNominalCounts[i][Utils.maxIndex(this.m_FullNominalCounts[i])]) {
this.m_FullMeansOrMediansOrModes[i] = -1; // mark missing as most common
// value
}
}
}
this.m_ClusterCentroids = new Instances(instances, this.m_NumClusters);
int[] clusterAssignments = new int[instances.numInstances()];
if (this.m_PreserveOrder) {
this.m_Assignments = clusterAssignments;
}
this.m_DistanceFunction.setInstances(instances);
Random RandomO = new Random(this.getSeed());
int instIndex;
HashMap<DecisionTableHashKey, Integer> initC = new HashMap<DecisionTableHashKey, Integer>();
DecisionTableHashKey hk = null;
Instances initInstances = null;
if (this.m_PreserveOrder) {
initInstances = new Instances(instances);
} else {
initInstances = instances;
}
if (this.m_speedUpDistanceCompWithCanopies) {
this.m_canopyClusters = new Canopy();
this.m_canopyClusters.setNumClusters(this.m_NumClusters);
this.m_canopyClusters.setSeed(this.getSeed());
this.m_canopyClusters.setT2(this.getCanopyT2());
this.m_canopyClusters.setT1(this.getCanopyT1());
this.m_canopyClusters.setMaxNumCandidateCanopiesToHoldInMemory(this.getCanopyMaxNumCanopiesToHoldInMemory());
this.m_canopyClusters.setPeriodicPruningRate(this.getCanopyPeriodicPruningRate());
this.m_canopyClusters.setMinimumCanopyDensity(this.getCanopyMinimumCanopyDensity());
this.m_canopyClusters.setDebug(this.getDebug());
this.m_canopyClusters.buildClusterer(initInstances);
// System.err.println(m_canopyClusters);
this.m_centroidCanopyAssignments = new ArrayList<long[]>();
this.m_dataPointCanopyAssignments = new ArrayList<long[]>();
}
if (this.m_initializationMethod == KMEANS_PLUS_PLUS) {
this.kMeansPlusPlusInit(initInstances);
this.m_initialStartPoints = new Instances(this.m_ClusterCentroids);
} else if (this.m_initializationMethod == CANOPY) {
this.canopyInit(initInstances);
this.m_initialStartPoints = new Instances(this.m_canopyClusters.getCanopies());
} else if (this.m_initializationMethod == FARTHEST_FIRST) {
this.farthestFirstInit(initInstances);
this.m_initialStartPoints = new Instances(this.m_ClusterCentroids);
} else {
// random
for (int j = initInstances.numInstances() - 1; j >= 0; j--) {
instIndex = RandomO.nextInt(j + 1);
hk = new DecisionTableHashKey(initInstances.instance(instIndex), initInstances.numAttributes(), true);
if (!initC.containsKey(hk)) {
this.m_ClusterCentroids.add(initInstances.instance(instIndex));
initC.put(hk, null);
}
initInstances.swap(j, instIndex);
if (this.m_ClusterCentroids.numInstances() == this.m_NumClusters) {
break;
}
}
this.m_initialStartPoints = new Instances(this.m_ClusterCentroids);
}
if (this.m_speedUpDistanceCompWithCanopies) {
// assign canopies to training data
for (int i = 0; i < instances.numInstances(); i++) {
this.m_dataPointCanopyAssignments.add(this.m_canopyClusters.assignCanopies(instances.instance(i)));
}
}
this.m_NumClusters = this.m_ClusterCentroids.numInstances();
// removing reference
initInstances = null;
int i;
boolean converged = false;
int emptyClusterCount;
Instances[] tempI = new Instances[this.m_NumClusters];
this.m_squaredErrors = new double[this.m_NumClusters];
this.m_ClusterNominalCounts = new double[this.m_NumClusters][instances.numAttributes()][0];
this.m_ClusterMissingCounts = new double[this.m_NumClusters][instances.numAttributes()];
this.startExecutorPool();
while (!converged) {
if (this.m_speedUpDistanceCompWithCanopies) {
// re-assign canopies to the current cluster centers
this.m_centroidCanopyAssignments.clear();
for (int kk = 0; kk < this.m_ClusterCentroids.numInstances(); kk++) {
this.m_centroidCanopyAssignments.add(this.m_canopyClusters.assignCanopies(this.m_ClusterCentroids.instance(kk)));
}
}
emptyClusterCount = 0;
this.m_Iterations++;
converged = true;
if (this.m_executionSlots <= 1 || instances.numInstances() < 2 * this.m_executionSlots) {
for (i = 0; i < instances.numInstances(); i++) {
Instance toCluster = instances.instance(i);
int newC = this.clusterProcessedInstance(toCluster, false, true, this.m_speedUpDistanceCompWithCanopies ? this.m_dataPointCanopyAssignments.get(i) : null);
if (newC != clusterAssignments[i]) {
converged = false;
}
clusterAssignments[i] = newC;
}
} else {
converged = this.launchAssignToClusters(instances, clusterAssignments);
}
// update centroids
this.m_ClusterCentroids = new Instances(instances, this.m_NumClusters);
for (i = 0; i < this.m_NumClusters; i++) {
tempI[i] = new Instances(instances, 0);
}
for (i = 0; i < instances.numInstances(); i++) {
tempI[clusterAssignments[i]].add(instances.instance(i));
}
if (this.m_executionSlots <= 1 || instances.numInstances() < 2 * this.m_executionSlots) {
for (i = 0; i < this.m_NumClusters; i++) {
if (tempI[i].numInstances() == 0) {
// empty cluster
emptyClusterCount++;
} else {
this.moveCentroid(i, tempI[i], true, true);
}
}
} else {
emptyClusterCount = this.launchMoveCentroids(tempI);
}
if (this.m_Iterations == this.m_MaxIterations) {
converged = true;
}
if (emptyClusterCount > 0) {
this.m_NumClusters -= emptyClusterCount;
if (converged) {
Instances[] t = new Instances[this.m_NumClusters];
int index = 0;
for (int k = 0; k < tempI.length; k++) {
if (tempI[k].numInstances() > 0) {
t[index] = tempI[k];
for (i = 0; i < tempI[k].numAttributes(); i++) {
this.m_ClusterNominalCounts[index][i] = this.m_ClusterNominalCounts[k][i];
}
index++;
}
}
tempI = t;
} else {
tempI = new Instances[this.m_NumClusters];
}
}
if (!converged) {
this.m_ClusterNominalCounts = new double[this.m_NumClusters][instances.numAttributes()][0];
}
}
// calculate errors
if (!this.m_FastDistanceCalc) {
for (i = 0; i < instances.numInstances(); i++) {
this.clusterProcessedInstance(instances.instance(i), true, false, null);
}
}
if (this.m_displayStdDevs) {
this.m_ClusterStdDevs = new Instances(instances, this.m_NumClusters);
}
this.m_ClusterSizes = new double[this.m_NumClusters];
for (i = 0; i < this.m_NumClusters; i++) {
if (this.m_displayStdDevs) {
double[] vals2 = tempI[i].variances();
for (int j = 0; j < instances.numAttributes(); j++) {
if (instances.attribute(j).isNumeric()) {
vals2[j] = Math.sqrt(vals2[j]);
} else {
vals2[j] = Utils.missingValue();
}
}
this.m_ClusterStdDevs.add(new DenseInstance(1.0, vals2));
}
this.m_ClusterSizes[i] = tempI[i].sumOfWeights();
}
this.m_executorPool.shutdown();
// save memory!
this.m_DistanceFunction.clean();
}
/**
* Initialize with the canopy centers of the Canopy clustering method
*
* @param data the training data
* @throws Exception if a problem occurs
*/
protected void canopyInit(final Instances data) throws Exception {
if (this.m_canopyClusters == null) {
this.m_canopyClusters = new Canopy();
this.m_canopyClusters.setNumClusters(this.m_NumClusters);
this.m_canopyClusters.setSeed(this.getSeed());
this.m_canopyClusters.setT2(this.getCanopyT2());
this.m_canopyClusters.setT1(this.getCanopyT1());
this.m_canopyClusters.setMaxNumCandidateCanopiesToHoldInMemory(this.getCanopyMaxNumCanopiesToHoldInMemory());
this.m_canopyClusters.setPeriodicPruningRate(this.getCanopyPeriodicPruningRate());
this.m_canopyClusters.setMinimumCanopyDensity(this.getCanopyMinimumCanopyDensity());
this.m_canopyClusters.setDebug(this.getDebug());
this.m_canopyClusters.buildClusterer(data);
}
this.m_ClusterCentroids = this.m_canopyClusters.getCanopies();
}
/**
* Initialize with the fartherst first centers
*
* @param data the training data
* @throws Exception if a problem occurs
*/
protected void farthestFirstInit(final Instances data) throws Exception {
FarthestFirst ff = new FarthestFirst();
ff.setNumClusters(this.m_NumClusters);
ff.buildClusterer(data);
this.m_ClusterCentroids = ff.getClusterCentroids();
}
/**
* Initialize using the k-means++ method
*
* @param data the training data
* @throws Exception if a problem occurs
*/
protected void kMeansPlusPlusInit(final Instances data) throws Exception {
Random randomO = new Random(this.getSeed());
HashMap<DecisionTableHashKey, String> initC = new HashMap<DecisionTableHashKey, String>();
// choose initial center uniformly at random
int index = randomO.nextInt(data.numInstances());
this.m_ClusterCentroids.add(data.instance(index));
DecisionTableHashKey hk = new DecisionTableHashKey(data.instance(index), data.numAttributes(), true);
initC.put(hk, null);
int iteration = 0;
int remainingInstances = data.numInstances() - 1;
if (this.m_NumClusters > 1) {
// proceed with selecting the rest
// distances to the initial randomly chose center
double[] distances = new double[data.numInstances()];
double[] cumProbs = new double[data.numInstances()];
for (int i = 0; i < data.numInstances(); i++) {
distances[i] = this.m_DistanceFunction.distance(data.instance(i), this.m_ClusterCentroids.instance(iteration));
}
// now choose the remaining cluster centers
for (int i = 1; i < this.m_NumClusters; i++) {
// distances converted to probabilities
double[] weights = new double[data.numInstances()];
System.arraycopy(distances, 0, weights, 0, distances.length);
Utils.normalize(weights);
double sumOfProbs = 0;
for (int k = 0; k < data.numInstances(); k++) {
sumOfProbs += weights[k];
cumProbs[k] = sumOfProbs;
}
cumProbs[data.numInstances() - 1] = 1.0; // make sure there are no
// rounding issues
// choose a random instance
double prob = randomO.nextDouble();
for (int k = 0; k < cumProbs.length; k++) {
if (prob < cumProbs[k]) {
Instance candidateCenter = data.instance(k);
hk = new DecisionTableHashKey(candidateCenter, data.numAttributes(), true);
if (!initC.containsKey(hk)) {
initC.put(hk, null);
this.m_ClusterCentroids.add(candidateCenter);
} else {
// we shouldn't get here because any instance that is a duplicate
// of
// an already chosen cluster center should have zero distance (and
// hence
// zero probability of getting chosen) to that center.
System.err.println("We shouldn't get here....");
}
remainingInstances--;
break;
}
}
iteration++;
if (remainingInstances == 0) {
break;
}
// prepare to choose the next cluster center.
// check distances against the new cluster center to see if it is closer
for (int k = 0; k < data.numInstances(); k++) {
if (distances[k] > 0) {
double newDist = this.m_DistanceFunction.distance(data.instance(k), this.m_ClusterCentroids.instance(iteration));
if (newDist < distances[k]) {
distances[k] = newDist;
}
}
}
}
}
}
/**
* Move the centroid to it's new coordinates. Generate the centroid
* coordinates based on it's members (objects assigned to the cluster of the
* centroid) and the distance function being used.
*
* @param centroidIndex index of the centroid which the coordinates will be
* computed
* @param members the objects that are assigned to the cluster of this
* centroid
* @param updateClusterInfo if the method is supposed to update the m_Cluster
* arrays
* @param addToCentroidInstances true if the method is to add the computed
* coordinates to the Instances holding the centroids
* @return the centroid coordinates
* @throws InterruptedException
*/
protected double[] moveCentroid(final int centroidIndex, final Instances members, final boolean updateClusterInfo, final boolean addToCentroidInstances) throws InterruptedException {
double[] vals = new double[members.numAttributes()];
double[][] nominalDists = new double[members.numAttributes()][];
double[] weightMissing = new double[members.numAttributes()];
double[] weightNonMissing = new double[members.numAttributes()];
// Quickly calculate some relevant statistics
for (int j = 0; j < members.numAttributes(); j++) {
if (members.attribute(j).isNominal()) {
nominalDists[j] = new double[members.attribute(j).numValues()];
}
}
for (Instance inst : members) {
for (int j = 0; j < members.numAttributes(); j++) {
if (inst.isMissing(j)) {
weightMissing[j] += inst.weight();
} else {
weightNonMissing[j] += inst.weight();
if (members.attribute(j).isNumeric()) {
vals[j] += inst.weight() * inst.value(j); // Will be overwritten in Manhattan case
} else {
nominalDists[j][(int) inst.value(j)] += inst.weight();
}
}
}
}
for (int j = 0; j < members.numAttributes(); j++) {
if (members.attribute(j).isNumeric()) {
if (weightNonMissing[j] > 0) {
vals[j] /= weightNonMissing[j];
} else {
vals[j] = Utils.missingValue();
}
} else {
double max = -Double.MAX_VALUE;
double maxIndex = -1;
for (int i = 0; i < nominalDists[j].length; i++) {
if (nominalDists[j][i] > max) {
max = nominalDists[j][i];
maxIndex = i;
}
if (max < weightMissing[j]) {
vals[j] = Utils.missingValue();
} else {
vals[j] = maxIndex;
}
}
}
}
if (this.m_DistanceFunction instanceof ManhattanDistance) {
// Need to replace means by medians
Instances sortedMembers = null;
int middle = (members.numInstances() - 1) / 2;
boolean dataIsEven = ((members.numInstances() % 2) == 0);
if (this.m_PreserveOrder) {
sortedMembers = members;
} else {
sortedMembers = new Instances(members);
}
for (int j = 0; j < members.numAttributes(); j++) {
if ((weightNonMissing[j] > 0) && members.attribute(j).isNumeric()) {
// singleton special case
if (members.numInstances() == 1) {
vals[j] = members.instance(0).value(j);
} else {
vals[j] = sortedMembers.kthSmallestValue(j, middle + 1);
if (dataIsEven) {
vals[j] = (vals[j] + sortedMembers.kthSmallestValue(j, middle + 2)) / 2;
}
}
}
}
}
if (updateClusterInfo) {
for (int j = 0; j < members.numAttributes(); j++) {
this.m_ClusterMissingCounts[centroidIndex][j] = weightMissing[j];
this.m_ClusterNominalCounts[centroidIndex][j] = nominalDists[j];
}
}
if (addToCentroidInstances) {
this.m_ClusterCentroids.add(new DenseInstance(1.0, vals));
}
return vals;
}
/**
* clusters an instance that has been through the filters.
*
* @param instance the instance to assign a cluster to
* @param updateErrors if true, update the within clusters sum of errors
* @param useFastDistCalc whether to use the fast distance calculation or not
* @param instanceCanopies the canopies covering the instance to be clustered,
* or null if not using the option to reduce the number of distance
* computations via canopies
* @return a cluster number
*/
private int clusterProcessedInstance(final Instance instance, final boolean updateErrors, final boolean useFastDistCalc, final long[] instanceCanopies) {
double minDist = Integer.MAX_VALUE;
int bestCluster = 0;
for (int i = 0; i < this.m_NumClusters; i++) {
double dist;
if (useFastDistCalc) {
if (this.m_speedUpDistanceCompWithCanopies && instanceCanopies != null && instanceCanopies.length > 0) {
try {
if (!Canopy.nonEmptyCanopySetIntersection(this.m_centroidCanopyAssignments.get(i), instanceCanopies)) {
continue;
}
} catch (Exception ex) {
ex.printStackTrace();
}
dist = this.m_DistanceFunction.distance(instance, this.m_ClusterCentroids.instance(i), minDist);
} else {
dist = this.m_DistanceFunction.distance(instance, this.m_ClusterCentroids.instance(i), minDist);
}
} else {
dist = this.m_DistanceFunction.distance(instance, this.m_ClusterCentroids.instance(i));
}
if (dist < minDist) {
minDist = dist;
bestCluster = i;
}
}
if (updateErrors) {
if (this.m_DistanceFunction instanceof EuclideanDistance) {
// Euclidean distance to Squared Euclidean distance
minDist *= minDist * instance.weight();
}
this.m_squaredErrors[bestCluster] += minDist;
}
return bestCluster;
}
/**
* Classifies a given instance.
*
* @param instance the instance to be assigned to a cluster
* @return the number of the assigned cluster as an interger if the class is
* enumerated, otherwise the predicted value
* @throws Exception if instance could not be classified successfully
*/
@Override
public int clusterInstance(final Instance instance) throws Exception {
Instance inst = null;
if (!this.m_dontReplaceMissing) {
this.m_ReplaceMissingFilter.input(instance);
this.m_ReplaceMissingFilter.batchFinished();
inst = this.m_ReplaceMissingFilter.output();
} else {
inst = instance;
}
return this.clusterProcessedInstance(inst, false, true, null);
}
/**
* Returns the number of clusters.
*
* @return the number of clusters generated for a training dataset.
* @throws Exception if number of clusters could not be returned successfully
*/
@Override
public int numberOfClusters() throws Exception {
return this.m_NumClusters;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tNumber of clusters.\n" + "\t(default 2).", "N", 1, "-N <num>"));
result.addElement(new Option("\tInitialization method to use.\n\t0 = random, 1 = k-means++, " + "2 = canopy, 3 = farthest first.\n\t(default = 0)", "init", 1, "-init"));
result.addElement(new Option("\tUse canopies to reduce the number of distance calculations.", "C", 0, "-C"));
result.addElement(new Option("\tMaximum number of candidate canopies to retain in memory\n\t" + "at any one time when using canopy clustering.\n\t" + "T2 distance plus, data characteristics,\n\t"
+ "will determine how many candidate canopies are formed before\n\t" + "periodic and final pruning are performed, which might result\n\t" + "in exceess memory consumption. This setting avoids large numbers\n\t"
+ "of candidate canopies consuming memory. (default = 100)", "-max-candidates", 1, "-max-candidates <num>"));
result.addElement(new Option("\tHow often to prune low density canopies when using canopy clustering. \n\t" + "(default = every 10,000 training instances)", "periodic-pruning", 1, "-periodic-pruning <num>"));
result.addElement(new Option("\tMinimum canopy density, when using canopy clustering, below which\n\t" + " a canopy will be pruned during periodic pruning. (default = 2 instances)", "min-density", 1, "-min-density"));
result.addElement(
new Option("\tThe T2 distance to use when using canopy clustering. Values < 0 indicate that\n\t" + "a heuristic based on attribute std. deviation should be used to set this.\n\t" + "(default = -1.0)", "t2", 1, "-t2"));
result.addElement(new Option("\tThe T1 distance to use when using canopy clustering. A value < 0 is taken as a\n\t" + "positive multiplier for T2. (default = -1.5)", "t1", 1, "-t1"));
result.addElement(new Option("\tDisplay std. deviations for centroids.\n", "V", 0, "-V"));
result.addElement(new Option("\tDon't replace missing values with mean/mode.\n", "M", 0, "-M"));
result.add(new Option("\tDistance function to use.\n" + "\t(default: weka.core.EuclideanDistance)", "A", 1, "-A <classname and options>"));
result.add(new Option("\tMaximum number of iterations.\n", "I", 1, "-I <num>"));
result.addElement(new Option("\tPreserve order of instances.\n", "O", 0, "-O"));
result.addElement(new Option("\tEnables faster distance calculations, using cut-off values.\n" + "\tDisables the calculation/output of squared errors/distances.\n", "fast", 0, "-fast"));
result.addElement(new Option("\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)", "num-slots", 1, "-num-slots <num>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numClustersTipText() {
return "set number of clusters";
}
/**
* set the number of clusters to generate.
*
* @param n the number of clusters to generate
* @throws Exception if number of clusters is negative
*/
@Override
public void setNumClusters(final int n) throws Exception {
if (n <= 0) {
throw new Exception("Number of clusters must be > 0");
}
this.m_NumClusters = n;
}
/**
* gets the number of clusters to generate.
*
* @return the number of clusters to generate
*/
public int getNumClusters() {
return this.m_NumClusters;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String initializationMethodTipText() {
return "The initialization method to use. Random, k-means++, Canopy or farthest first";
}
/**
* Set the initialization method to use
*
* @param method the initialization method to use
*/
public void setInitializationMethod(final SelectedTag method) {
if (method.getTags() == TAGS_SELECTION) {
this.m_initializationMethod = method.getSelectedTag().getID();
}
}
/**
* Get the initialization method to use
*
* @return method the initialization method to use
*/
public SelectedTag getInitializationMethod() {
return new SelectedTag(this.m_initializationMethod, TAGS_SELECTION);
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String reduceNumberOfDistanceCalcsViaCanopiesTipText() {
return "Use canopy clustering to reduce the number of distance calculations " + "performed by k-means";
}
/**
* Set whether to use canopies to reduce the number of distance computations
* required
*
* @param c true if canopies are to be used to reduce the number of distance
* computations
*/
public void setReduceNumberOfDistanceCalcsViaCanopies(final boolean c) {
this.m_speedUpDistanceCompWithCanopies = c;
}
/**
* Get whether to use canopies to reduce the number of distance computations
* required
*
* @return true if canopies are to be used to reduce the number of distance
* computations
*/
public boolean getReduceNumberOfDistanceCalcsViaCanopies() {
return this.m_speedUpDistanceCompWithCanopies;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String canopyPeriodicPruningRateTipText() {
return "If using canopy clustering for initialization and/or speedup " + "this is how often to prune low density canopies during training";
}
/**
* Set the how often to prune low density canopies during training (if using
* canopy clustering)
*
* @param p how often (every p instances) to prune low density canopies
*/
public void setCanopyPeriodicPruningRate(final int p) {
this.m_periodicPruningRate = p;
}
/**
* Get the how often to prune low density canopies during training (if using
* canopy clustering)
*
* @return how often (every p instances) to prune low density canopies
*/
public int getCanopyPeriodicPruningRate() {
return this.m_periodicPruningRate;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String canopyMinimumCanopyDensityTipText() {
return "If using canopy clustering for initialization and/or speedup " + "this is the minimum T2-based density " + "below which a canopy will be pruned during periodic pruning";
}
/**
* Set the minimum T2-based density below which a canopy will be pruned during
* periodic pruning.
*
* @param dens the minimum canopy density
*/
public void setCanopyMinimumCanopyDensity(final double dens) {
this.m_minClusterDensity = dens;
}
/**
* Get the minimum T2-based density below which a canopy will be pruned during
* periodic pruning.
*
* @return the minimum canopy density
*/
public double getCanopyMinimumCanopyDensity() {
return this.m_minClusterDensity;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String canopyMaxNumCanopiesToHoldInMemoryTipText() {
return "If using canopy clustering for initialization and/or speedup " + "this is the maximum number of candidate canopies to " + "retain in main memory during training of the canopy clusterer. "
+ "T2 distance and data characteristics determine how many candidate " + "canopies are formed before periodic and final pruning are performed. There " + "may not be enough memory available if T2 is set too low.";
}
/**
* Set the maximum number of candidate canopies to retain in memory during
* training. T2 distance and data characteristics determine how many candidate
* canopies are formed before periodic and final pruning are performed. There
* may not be enough memory available if T2 is set too low.
*
* @param max the maximum number of candidate canopies to retain in memory
* during training
*/
public void setCanopyMaxNumCanopiesToHoldInMemory(final int max) {
this.m_maxCanopyCandidates = max;
}
/**
* Get the maximum number of candidate canopies to retain in memory during
* training. T2 distance and data characteristics determine how many candidate
* canopies are formed before periodic and final pruning are performed. There
* may not be enough memory available if T2 is set too low.
*
* @return the maximum number of candidate canopies to retain in memory during
* training
*/
public int getCanopyMaxNumCanopiesToHoldInMemory() {
return this.m_maxCanopyCandidates;
}
/**
* Tip text for this property
*
* @return the tip text for this property
*/
public String canopyT2TipText() {
return "The T2 distance to use when using canopy clustering. Values < 0 indicate that this should be set using " + "a heuristic based on attribute standard deviation";
}
/**
* Set the t2 radius to use when canopy clustering is being used as start
* points and/or to reduce the number of distance calcs
*
* @param t2 the t2 radius to use
*/
public void setCanopyT2(final double t2) {
this.m_t2 = t2;
}
/**
* Get the t2 radius to use when canopy clustering is being used as start
* points and/or to reduce the number of distance calcs
*
* @return the t2 radius to use
*/
public double getCanopyT2() {
return this.m_t2;
}
/**
* Tip text for this property
*
* @return the tip text for this property
*/
public String canopyT1TipText() {
return "The T1 distance to use when using canopy clustering. Values < 0 are taken as a positive " + "multiplier for the T2 distance";
}
/**
* Set the t1 radius to use when canopy clustering is being used as start
* points and/or to reduce the number of distance calcs
*
* @param t1 the t1 radius to use
*/
public void setCanopyT1(final double t1) {
this.m_t1 = t1;
}
/**
* Get the t1 radius to use when canopy clustering is being used as start
* points and/or to reduce the number of distance calcs
*
* @return the t1 radius to use
*/
public double getCanopyT1() {
return this.m_t1;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxIterationsTipText() {
return "set maximum number of iterations";
}
/**
* set the maximum number of iterations to be executed.
*
* @param n the maximum number of iterations
* @throws Exception if maximum number of iteration is smaller than 1
*/
public void setMaxIterations(final int n) throws Exception {
if (n <= 0) {
throw new Exception("Maximum number of iterations must be > 0");
}
this.m_MaxIterations = n;
}
/**
* gets the number of maximum iterations to be executed.
*
* @return the number of clusters to generate
*/
public int getMaxIterations() {
return this.m_MaxIterations;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String displayStdDevsTipText() {
return "Display std deviations of numeric attributes " + "and counts of nominal attributes.";
}
/**
* Sets whether standard deviations and nominal count. Should be displayed in
* the clustering output.
*
* @param stdD true if std. devs and counts should be displayed
*/
public void setDisplayStdDevs(final boolean stdD) {
this.m_displayStdDevs = stdD;
}
/**
* Gets whether standard deviations and nominal count. Should be displayed in
* the clustering output.
*
* @return true if std. devs and counts should be displayed
*/
public boolean getDisplayStdDevs() {
return this.m_displayStdDevs;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String dontReplaceMissingValuesTipText() {
return "Replace missing values globally with mean/mode.";
}
/**
* Sets whether missing values are to be replaced.
*
* @param r true if missing values are to be replaced
*/
public void setDontReplaceMissingValues(final boolean r) {
this.m_dontReplaceMissing = r;
}
/**
* Gets whether missing values are to be replaced.
*
* @return true if missing values are to be replaced
*/
public boolean getDontReplaceMissingValues() {
return this.m_dontReplaceMissing;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String distanceFunctionTipText() {
return "The distance function to use for instances comparison " + "(default: weka.core.EuclideanDistance). ";
}
/**
* returns the distance function currently in use.
*
* @return the distance function
*/
public DistanceFunction getDistanceFunction() {
return this.m_DistanceFunction;
}
/**
* sets the distance function to use for instance comparison.
*
* @param df the new distance function to use
* @throws Exception if instances cannot be processed
*/
public void setDistanceFunction(final DistanceFunction df) throws Exception {
if (!(df instanceof EuclideanDistance) && !(df instanceof ManhattanDistance)) {
throw new Exception("SimpleKMeans currently only supports the Euclidean and Manhattan distances.");
}
this.m_DistanceFunction = df;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String preserveInstancesOrderTipText() {
return "Preserve order of instances.";
}
/**
* Sets whether order of instances must be preserved.
*
* @param r true if missing values are to be replaced
*/
public void setPreserveInstancesOrder(final boolean r) {
this.m_PreserveOrder = r;
}
/**
* Gets whether order of instances must be preserved.
*
* @return true if missing values are to be replaced
*/
public boolean getPreserveInstancesOrder() {
return this.m_PreserveOrder;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String fastDistanceCalcTipText() {
return "Uses cut-off values for speeding up distance calculation, but " + "suppresses also the calculation and output of the within cluster sum " + "of squared errors/sum of distances.";
}
/**
* Sets whether to use faster distance calculation.
*
* @param value true if faster calculation to be used
*/
public void setFastDistanceCalc(final boolean value) {
this.m_FastDistanceCalc = value;
}
/**
* Gets whether to use faster distance calculation.
*
* @return true if faster calculation is used
*/
public boolean getFastDistanceCalc() {
return this.m_FastDistanceCalc;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numExecutionSlotsTipText() {
return "The number of execution slots (threads) to use. " + "Set equal to the number of available cpu/cores";
}
/**
* Set the degree of parallelism to use.
*
* @param slots the number of tasks to run in parallel when computing the
* nearest neighbors and evaluating different values of k between the
* lower and upper bounds
*/
public void setNumExecutionSlots(final int slots) {
this.m_executionSlots = slots;
}
/**
* Get the degree of parallelism to use.
*
* @return the number of tasks to run in parallel when computing the nearest
* neighbors and evaluating different values of k between the lower
* and upper bounds
*/
public int getNumExecutionSlots() {
return this.m_executionSlots;
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -N <num>
* Number of clusters.
* (default 2).
* </pre>
*
* <pre>
* -init
* Initialization method to use.
* 0 = random, 1 = k-means++, 2 = canopy, 3 = farthest first.
* (default = 0)
* </pre>
*
* <pre>
* -C
* Use canopies to reduce the number of distance calculations.
* </pre>
*
* <pre>
* -max-candidates <num>
* Maximum number of candidate canopies to retain in memory
* at any one time when using canopy clustering.
* T2 distance plus, data characteristics,
* will determine how many candidate canopies are formed before
* periodic and final pruning are performed, which might result
* in exceess memory consumption. This setting avoids large numbers
* of candidate canopies consuming memory. (default = 100)
* </pre>
*
* <pre>
* -periodic-pruning <num>
* How often to prune low density canopies when using canopy clustering.
* (default = every 10,000 training instances)
* </pre>
*
* <pre>
* -min-density
* Minimum canopy density, when using canopy clustering, below which
* a canopy will be pruned during periodic pruning. (default = 2 instances)
* </pre>
*
* <pre>
* -t2
* The T2 distance to use when using canopy clustering. Values < 0 indicate that
* a heuristic based on attribute std. deviation should be used to set this.
* (default = -1.0)
* </pre>
*
* <pre>
* -t1
* The T1 distance to use when using canopy clustering. A value < 0 is taken as a
* positive multiplier for T2. (default = -1.5)
* </pre>
*
* <pre>
* -V
* Display std. deviations for centroids.
* </pre>
*
* <pre>
* -M
* Don't replace missing values with mean/mode.
* </pre>
*
* <pre>
* -A <classname and options>
* Distance function to use.
* (default: weka.core.EuclideanDistance)
* </pre>
*
* <pre>
* -I <num>
* Maximum number of iterations.
* </pre>
*
* <pre>
* -O
* Preserve order of instances.
* </pre>
*
* <pre>
* -fast
* Enables faster distance calculations, using cut-off values.
* Disables the calculation/output of squared errors/distances.
* </pre>
*
* <pre>
* -num-slots <num>
* Number of execution slots.
* (default 1 - i.e. no parallelism)
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 10)
* </pre>
*
* <pre>
* -output-debug-info
* If set, clusterer is run in debug mode and
* may output additional info to the console
* </pre>
*
* <pre>
* -do-not-check-capabilities
* If set, clusterer capabilities are not checked before clusterer is built
* (use with caution).
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(final String[] options) throws Exception {
this.m_displayStdDevs = Utils.getFlag("V", options);
this.m_dontReplaceMissing = Utils.getFlag("M", options);
String initM = Utils.getOption("init", options);
if (initM.length() > 0) {
this.setInitializationMethod(new SelectedTag(Integer.parseInt(initM), TAGS_SELECTION));
}
this.m_speedUpDistanceCompWithCanopies = Utils.getFlag('C', options);
String temp = Utils.getOption("max-candidates", options);
if (temp.length() > 0) {
this.setCanopyMaxNumCanopiesToHoldInMemory(Integer.parseInt(temp));
}
temp = Utils.getOption("periodic-pruning", options);
if (temp.length() > 0) {
this.setCanopyPeriodicPruningRate(Integer.parseInt(temp));
}
temp = Utils.getOption("min-density", options);
if (temp.length() > 0) {
this.setCanopyMinimumCanopyDensity(Double.parseDouble(temp));
}
temp = Utils.getOption("t2", options);
if (temp.length() > 0) {
this.setCanopyT2(Double.parseDouble(temp));
}
temp = Utils.getOption("t1", options);
if (temp.length() > 0) {
this.setCanopyT1(Double.parseDouble(temp));
}
String optionString = Utils.getOption('N', options);
if (optionString.length() != 0) {
this.setNumClusters(Integer.parseInt(optionString));
}
optionString = Utils.getOption("I", options);
if (optionString.length() != 0) {
this.setMaxIterations(Integer.parseInt(optionString));
}
String distFunctionClass = Utils.getOption('A', options);
if (distFunctionClass.length() != 0) {
String distFunctionClassSpec[] = Utils.splitOptions(distFunctionClass);
if (distFunctionClassSpec.length == 0) {
throw new Exception("Invalid DistanceFunction specification string.");
}
String className = distFunctionClassSpec[0];
distFunctionClassSpec[0] = "";
this.setDistanceFunction((DistanceFunction) Utils.forName(DistanceFunction.class, className, distFunctionClassSpec));
} else {
this.setDistanceFunction(new EuclideanDistance());
}
this.m_PreserveOrder = Utils.getFlag("O", options);
this.m_FastDistanceCalc = Utils.getFlag("fast", options);
String slotsS = Utils.getOption("num-slots", options);
if (slotsS.length() > 0) {
this.setNumExecutionSlots(Integer.parseInt(slotsS));
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of SimpleKMeans.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-init");
result.add("" + this.getInitializationMethod().getSelectedTag().getID());
if (this.m_speedUpDistanceCompWithCanopies) {
result.add("-C");
}
result.add("-max-candidates");
result.add("" + this.getCanopyMaxNumCanopiesToHoldInMemory());
result.add("-periodic-pruning");
result.add("" + this.getCanopyPeriodicPruningRate());
result.add("-min-density");
result.add("" + this.getCanopyMinimumCanopyDensity());
result.add("-t1");
result.add("" + this.getCanopyT1());
result.add("-t2");
result.add("" + this.getCanopyT2());
if (this.m_displayStdDevs) {
result.add("-V");
}
if (this.m_dontReplaceMissing) {
result.add("-M");
}
result.add("-N");
result.add("" + this.getNumClusters());
result.add("-A");
result.add((this.m_DistanceFunction.getClass().getName() + " " + Utils.joinOptions(this.m_DistanceFunction.getOptions())).trim());
result.add("-I");
result.add("" + this.getMaxIterations());
if (this.m_PreserveOrder) {
result.add("-O");
}
if (this.m_FastDistanceCalc) {
result.add("-fast");
}
result.add("-num-slots");
result.add("" + this.getNumExecutionSlots());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* return a string describing this clusterer.
*
* @return a description of the clusterer as a string
*/
@Override
public String toString() {
if (this.m_ClusterCentroids == null) {
return "No clusterer built yet!";
}
int maxWidth = 0;
int maxAttWidth = 0;
boolean containsNumeric = false;
for (int i = 0; i < this.m_NumClusters; i++) {
for (int j = 0; j < this.m_ClusterCentroids.numAttributes(); j++) {
if (this.m_ClusterCentroids.attribute(j).name().length() > maxAttWidth) {
maxAttWidth = this.m_ClusterCentroids.attribute(j).name().length();
}
if (this.m_ClusterCentroids.attribute(j).isNumeric()) {
containsNumeric = true;
double width = Math.log(Math.abs(this.m_ClusterCentroids.instance(i).value(j))) / Math.log(10.0);
if (width < 0) {
width = 1;
}
// decimal + # decimal places + 1
width += 6.0;
if ((int) width > maxWidth) {
maxWidth = (int) width;
}
}
}
}
for (int i = 0; i < this.m_ClusterCentroids.numAttributes(); i++) {
if (this.m_ClusterCentroids.attribute(i).isNominal()) {
Attribute a = this.m_ClusterCentroids.attribute(i);
for (int j = 0; j < this.m_ClusterCentroids.numInstances(); j++) {
String val = a.value((int) this.m_ClusterCentroids.instance(j).value(i));
if (val.length() > maxWidth) {
maxWidth = val.length();
}
}
for (int j = 0; j < a.numValues(); j++) {
String val = a.value(j) + " ";
if (val.length() > maxAttWidth) {
maxAttWidth = val.length();
}
}
}
}
if (this.m_displayStdDevs) {
// check for maximum width of maximum frequency count
for (int i = 0; i < this.m_ClusterCentroids.numAttributes(); i++) {
if (this.m_ClusterCentroids.attribute(i).isNominal()) {
int maxV = Utils.maxIndex(this.m_FullNominalCounts[i]);
/*
* int percent = (int)((double)m_FullNominalCounts[i][maxV] /
* Utils.sum(m_ClusterSizes) * 100.0);
*/
int percent = 6; // max percent width (100%)
String nomV = "" + this.m_FullNominalCounts[i][maxV];
// + " (" + percent + "%)";
if (nomV.length() + percent > maxWidth) {
maxWidth = nomV.length() + 1;
}
}
}
}
// check for size of cluster sizes
for (double m_ClusterSize : this.m_ClusterSizes) {
String size = "(" + m_ClusterSize + ")";
if (size.length() > maxWidth) {
maxWidth = size.length();
}
}
if (this.m_displayStdDevs && maxAttWidth < "missing".length()) {
maxAttWidth = "missing".length();
}
String plusMinus = "+/-";
maxAttWidth += 2;
if (this.m_displayStdDevs && containsNumeric) {
maxWidth += plusMinus.length();
}
if (maxAttWidth < "Attribute".length() + 2) {
maxAttWidth = "Attribute".length() + 2;
}
if (maxWidth < "Full Data".length()) {
maxWidth = "Full Data".length() + 1;
}
if (maxWidth < "missing".length()) {
maxWidth = "missing".length() + 1;
}
StringBuffer temp = new StringBuffer();
temp.append("\nkMeans\n======\n");
temp.append("\nNumber of iterations: " + this.m_Iterations);
if (!this.m_FastDistanceCalc) {
temp.append("\n");
if (this.m_DistanceFunction instanceof EuclideanDistance) {
temp.append("Within cluster sum of squared errors: " + Utils.sum(this.m_squaredErrors));
} else {
temp.append("Sum of within cluster distances: " + Utils.sum(this.m_squaredErrors));
}
}
temp.append("\n\nInitial starting points (");
switch (this.m_initializationMethod) {
case FARTHEST_FIRST:
temp.append("farthest first");
break;
case KMEANS_PLUS_PLUS:
temp.append("k-means++");
break;
case CANOPY:
temp.append("canopy");
break;
default:
temp.append("random");
}
temp.append("):\n");
if (this.m_initializationMethod != CANOPY) {
temp.append("\n");
for (int i = 0; i < this.m_initialStartPoints.numInstances(); i++) {
temp.append("Cluster " + i + ": " + this.m_initialStartPoints.instance(i)).append("\n");
}
} else {
temp.append(this.m_canopyClusters.toString(false));
}
if (this.m_speedUpDistanceCompWithCanopies) {
temp.append("\nReduced number of distance calculations by using canopies.");
if (this.m_initializationMethod != CANOPY) {
temp.append("\nCanopy T2 radius: " + String.format("%-10.3f", this.m_canopyClusters.getActualT2()));
temp.append("\nCanopy T1 radius: " + String.format("%-10.3f", this.m_canopyClusters.getActualT1())).append("\n");
}
}
if (!this.m_dontReplaceMissing) {
temp.append("\nMissing values globally replaced with mean/mode");
}
temp.append("\n\nFinal cluster centroids:\n");
temp.append(this.pad("Cluster#", " ", (maxAttWidth + (maxWidth * 2 + 2)) - "Cluster#".length(), true));
temp.append("\n");
temp.append(this.pad("Attribute", " ", maxAttWidth - "Attribute".length(), false));
temp.append(this.pad("Full Data", " ", maxWidth + 1 - "Full Data".length(), true));
// cluster numbers
for (int i = 0; i < this.m_NumClusters; i++) {
String clustNum = "" + i;
temp.append(this.pad(clustNum, " ", maxWidth + 1 - clustNum.length(), true));
}
temp.append("\n");
// cluster sizes
String cSize = "(" + Utils.sum(this.m_ClusterSizes) + ")";
temp.append(this.pad(cSize, " ", maxAttWidth + maxWidth + 1 - cSize.length(), true));
for (int i = 0; i < this.m_NumClusters; i++) {
cSize = "(" + this.m_ClusterSizes[i] + ")";
temp.append(this.pad(cSize, " ", maxWidth + 1 - cSize.length(), true));
}
temp.append("\n");
temp.append(this.pad("", "=", maxAttWidth + (maxWidth * (this.m_ClusterCentroids.numInstances() + 1) + this.m_ClusterCentroids.numInstances() + 1), true));
temp.append("\n");
for (int i = 0; i < this.m_ClusterCentroids.numAttributes(); i++) {
String attName = this.m_ClusterCentroids.attribute(i).name();
temp.append(attName);
for (int j = 0; j < maxAttWidth - attName.length(); j++) {
temp.append(" ");
}
String strVal;
String valMeanMode;
// full data
if (this.m_ClusterCentroids.attribute(i).isNominal()) {
if (this.m_FullMeansOrMediansOrModes[i] == -1) { // missing
valMeanMode = this.pad("missing", " ", maxWidth + 1 - "missing".length(), true);
} else {
valMeanMode = this.pad((strVal = this.m_ClusterCentroids.attribute(i).value((int) this.m_FullMeansOrMediansOrModes[i])), " ", maxWidth + 1 - strVal.length(), true);
}
} else {
if (Double.isNaN(this.m_FullMeansOrMediansOrModes[i])) {
valMeanMode = this.pad("missing", " ", maxWidth + 1 - "missing".length(), true);
} else {
valMeanMode = this.pad((strVal = Utils.doubleToString(this.m_FullMeansOrMediansOrModes[i], maxWidth, 4).trim()), " ", maxWidth + 1 - strVal.length(), true);
}
}
temp.append(valMeanMode);
for (int j = 0; j < this.m_NumClusters; j++) {
if (this.m_ClusterCentroids.attribute(i).isNominal()) {
if (this.m_ClusterCentroids.instance(j).isMissing(i)) {
valMeanMode = this.pad("missing", " ", maxWidth + 1 - "missing".length(), true);
} else {
valMeanMode = this.pad((strVal = this.m_ClusterCentroids.attribute(i).value((int) this.m_ClusterCentroids.instance(j).value(i))), " ", maxWidth + 1 - strVal.length(), true);
}
} else {
if (this.m_ClusterCentroids.instance(j).isMissing(i)) {
valMeanMode = this.pad("missing", " ", maxWidth + 1 - "missing".length(), true);
} else {
valMeanMode = this.pad((strVal = Utils.doubleToString(this.m_ClusterCentroids.instance(j).value(i), maxWidth, 4).trim()), " ", maxWidth + 1 - strVal.length(), true);
}
}
temp.append(valMeanMode);
}
temp.append("\n");
if (this.m_displayStdDevs) {
// Std devs/max nominal
String stdDevVal = "";
if (this.m_ClusterCentroids.attribute(i).isNominal()) {
// Do the values of the nominal attribute
Attribute a = this.m_ClusterCentroids.attribute(i);
for (int j = 0; j < a.numValues(); j++) {
// full data
String val = " " + a.value(j);
temp.append(this.pad(val, " ", maxAttWidth + 1 - val.length(), false));
double count = this.m_FullNominalCounts[i][j];
int percent = (int) (this.m_FullNominalCounts[i][j] / Utils.sum(this.m_ClusterSizes) * 100.0);
String percentS = "" + percent + "%)";
percentS = this.pad(percentS, " ", 5 - percentS.length(), true);
stdDevVal = "" + count + " (" + percentS;
stdDevVal = this.pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true);
temp.append(stdDevVal);
// Clusters
for (int k = 0; k < this.m_NumClusters; k++) {
percent = (int) (this.m_ClusterNominalCounts[k][i][j] / this.m_ClusterSizes[k] * 100.0);
percentS = "" + percent + "%)";
percentS = this.pad(percentS, " ", 5 - percentS.length(), true);
stdDevVal = "" + this.m_ClusterNominalCounts[k][i][j] + " (" + percentS;
stdDevVal = this.pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true);
temp.append(stdDevVal);
}
temp.append("\n");
}
// missing (if any)
if (this.m_FullMissingCounts[i] > 0) {
// Full data
temp.append(this.pad(" missing", " ", maxAttWidth + 1 - " missing".length(), false));
double count = this.m_FullMissingCounts[i];
int percent = (int) (this.m_FullMissingCounts[i] / Utils.sum(this.m_ClusterSizes) * 100.0);
String percentS = "" + percent + "%)";
percentS = this.pad(percentS, " ", 5 - percentS.length(), true);
stdDevVal = "" + count + " (" + percentS;
stdDevVal = this.pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true);
temp.append(stdDevVal);
// Clusters
for (int k = 0; k < this.m_NumClusters; k++) {
percent = (int) (this.m_ClusterMissingCounts[k][i] / this.m_ClusterSizes[k] * 100.0);
percentS = "" + percent + "%)";
percentS = this.pad(percentS, " ", 5 - percentS.length(), true);
stdDevVal = "" + this.m_ClusterMissingCounts[k][i] + " (" + percentS;
stdDevVal = this.pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true);
temp.append(stdDevVal);
}
temp.append("\n");
}
temp.append("\n");
} else {
// Full data
if (Double.isNaN(this.m_FullMeansOrMediansOrModes[i])) {
stdDevVal = this.pad("--", " ", maxAttWidth + maxWidth + 1 - 2, true);
} else {
stdDevVal = this.pad((strVal = plusMinus + Utils.doubleToString(this.m_FullStdDevs[i], maxWidth, 4).trim()), " ", maxWidth + maxAttWidth + 1 - strVal.length(), true);
}
temp.append(stdDevVal);
// Clusters
for (int j = 0; j < this.m_NumClusters; j++) {
if (this.m_ClusterCentroids.instance(j).isMissing(i)) {
stdDevVal = this.pad("--", " ", maxWidth + 1 - 2, true);
} else {
stdDevVal = this.pad((strVal = plusMinus + Utils.doubleToString(this.m_ClusterStdDevs.instance(j).value(i), maxWidth, 4).trim()), " ", maxWidth + 1 - strVal.length(), true);
}
temp.append(stdDevVal);
}
temp.append("\n\n");
}
}
}
temp.append("\n\n");
return temp.toString();
}
private String pad(final String source, final String padChar, final int length, final boolean leftPad) {
StringBuffer temp = new StringBuffer();
if (leftPad) {
for (int i = 0; i < length; i++) {
temp.append(padChar);
}
temp.append(source);
} else {
temp.append(source);
for (int i = 0; i < length; i++) {
temp.append(padChar);
}
}
return temp.toString();
}
/**
* Gets the the cluster centroids.
*
* @return the cluster centroids
*/
public Instances getClusterCentroids() {
return this.m_ClusterCentroids;
}
/**
* Gets the standard deviations of the numeric attributes in each cluster.
*
* @return the standard deviations of the numeric attributes in each cluster
*/
public Instances getClusterStandardDevs() {
return this.m_ClusterStdDevs;
}
/**
* Returns for each cluster the weighted frequency counts for the values of each
* nominal attribute.
*
* @return the counts
*/
public double[][][] getClusterNominalCounts() {
return this.m_ClusterNominalCounts;
}
/**
* Gets the squared error for all clusters.
*
* @return the squared error, NaN if fast distance calculation is used
* @see #m_FastDistanceCalc
*/
public double getSquaredError() {
if (this.m_FastDistanceCalc) {
return Double.NaN;
} else {
return Utils.sum(this.m_squaredErrors);
}
}
/**
* Gets the sum of weights for all the instances in each cluster.
*
* @return The number of instances in each cluster
*/
public double[] getClusterSizes() {
return this.m_ClusterSizes;
}
/**
* Gets the assignments for each instance.
*
* @return Array of indexes of the centroid assigned to each instance
* @throws Exception if order of instances wasn't preserved or no assignments
* were made
*/
public int[] getAssignments() throws Exception {
if (!this.m_PreserveOrder) {
throw new Exception("The assignments are only available when order of instances is preserved (-O)");
}
if (this.m_Assignments == null) {
throw new Exception("No assignments made.");
}
return this.m_Assignments;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for executing this class.
*
* @param args use -h to list all parameters
*/
public static void main(final String[] args) {
runClusterer(new SimpleKMeans(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/SingleClustererEnhancer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SingleClustererEnhancer.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.clusterers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
/**
* Meta-clusterer for enhancing a base clusterer.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class SingleClustererEnhancer extends AbstractClusterer
implements OptionHandler {
/** for serialization */
private static final long serialVersionUID = 4893928362926428671L;
/** the clusterer */
protected Clusterer m_Clusterer = new SimpleKMeans();
/**
* String describing default clusterer.
*
* @return the default clusterer classname
*/
protected String defaultClustererString() {
return SimpleKMeans.class.getName();
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tFull name of base clusterer.\n"
+ "\t(default: " + defaultClustererString() + ")", "W", 1, "-W"));
result.addAll(Collections.list(super.listOptions()));
if (m_Clusterer instanceof OptionHandler) {
result.addElement(new Option("", "", 0,
"\nOptions specific to clusterer " + m_Clusterer.getClass().getName()
+ ":"));
result.addAll(Collections.list(((OptionHandler) m_Clusterer)
.listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption('W', options);
super.setOptions(options);
if (tmpStr.length() > 0) {
setClusterer(AbstractClusterer.forName(tmpStr, null));
setClusterer(AbstractClusterer.forName(tmpStr,
Utils.partitionOptions(options)));
} else {
setClusterer(AbstractClusterer.forName(defaultClustererString(), null));
setClusterer(AbstractClusterer.forName(defaultClustererString(),
Utils.partitionOptions(options)));
}
}
/**
* Gets the current settings of the clusterer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-W");
result.add(getClusterer().getClass().getName());
Collections.addAll(result, super.getOptions());
if (getClusterer() instanceof OptionHandler) {
String[] options = ((OptionHandler) getClusterer()).getOptions();
if (options.length > 0) {
result.add("--");
}
Collections.addAll(result, options);
}
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String clustererTipText() {
return "The base clusterer to be used.";
}
/**
* Set the base clusterer.
*
* @param value the classifier to use.
*/
public void setClusterer(Clusterer value) {
m_Clusterer = value;
}
/**
* Get the clusterer used as the base clusterer.
*
* @return the base clusterer
*/
public Clusterer getClusterer() {
return m_Clusterer;
}
/**
* Gets the clusterer specification string, which contains the class name of
* the clusterer and any options to the clusterer
*
* @return the clusterer string
*/
protected String getClustererSpec() {
String result;
Clusterer clusterer;
clusterer = getClusterer();
result = clusterer.getClass().getName();
if (clusterer instanceof OptionHandler) {
result += " "
+ Utils.joinOptions(((OptionHandler) clusterer).getOptions());
}
return result;
}
/**
* Returns default capabilities of the clusterer.
*
* @return the capabilities of this clusterer
*/
@Override
public Capabilities getCapabilities() {
Capabilities result;
if (getClusterer() == null) {
result = super.getCapabilities();
} else {
result = getClusterer().getCapabilities();
}
// set dependencies
for (Capability cap : Capability.values()) {
result.enableDependency(cap);
}
return result;
}
/**
* Returns the number of clusters.
*
* @return the number of clusters generated for a training dataset.
* @throws Exception if number of clusters could not be returned successfully
*/
@Override
public int numberOfClusters() throws Exception {
return m_Clusterer.numberOfClusters();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/clusterers/UpdateableClusterer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UpdateableClusterer.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.clusterers;
import weka.core.Instance;
/**
* Interface to incremental cluster models that can learn using one instance
* at a time.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface UpdateableClusterer {
/**
* Adds an instance to the clusterer.
*
* @param newInstance the instance to be added
* @throws Exception if something goes wrong
*/
public void updateClusterer(Instance newInstance) throws Exception;
/**
* Signals the end of the updating.
* @throws InterruptedException
*/
public void updateFinished() throws InterruptedException;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AbstractInstance.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AbstractInstance.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.Serializable;
import java.util.Enumeration;
/**
* Abstract class providing common functionality for the original instance
* implementations.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class AbstractInstance implements Instance, Serializable,
RevisionHandler {
/** for serialization */
static final long serialVersionUID = 1482635194499365155L;
/**
* The dataset the instance has access to. Null if the instance doesn't have
* access to any dataset. Only if an instance has access to a dataset, it
* knows about the actual attribute types.
*/
protected/* @spec_public@ */Instances m_Dataset;
/** The instance's attribute values. */
protected/* @spec_public non_null@ */double[] m_AttValues;
/** The instance's weight. */
protected double m_Weight;
/** Default max number of digits after the decimal point for numeric values */
public static int s_numericAfterDecimalPoint = 6;
/**
* Returns the attribute with the given index.
*
* @param index the attribute's index
* @return the attribute at the given position
* @throws UnassignedDatasetException if instance doesn't have access to a
* dataset
*/
// @ requires m_Dataset != null;
@Override
public/* @pure@ */Attribute attribute(int index) {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.attribute(index);
}
/**
* Returns the attribute with the given index in the sparse representation.
*
* @param indexOfIndex the index of the attribute's index
* @return the attribute at the given position
* @throws UnassignedDatasetException if instance doesn't have access to a
* dataset
*/
// @ requires m_Dataset != null;
@Override
public/* @pure@ */Attribute attributeSparse(int indexOfIndex) {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.attribute(index(indexOfIndex));
}
/**
* Returns class attribute.
*
* @return the class attribute
* @throws UnassignedDatasetException if the class is not set or the instance
* doesn't have access to a dataset
*/
// @ requires m_Dataset != null;
@Override
public/* @pure@ */Attribute classAttribute() {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.classAttribute();
}
/**
* Returns the class attribute's index.
*
* @return the class index as an integer
* @throws UnassignedDatasetException if instance doesn't have access to a
* dataset
*/
// @ requires m_Dataset != null;
// @ ensures \result == m_Dataset.classIndex();
@Override
public/* @pure@ */int classIndex() {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.classIndex();
}
/**
* Tests if an instance's class is missing.
*
* @return true if the instance's class is missing
* @throws UnassignedClassException if the class is not set or the instance
* doesn't have access to a dataset
*/
// @ requires classIndex() >= 0;
@Override
public/* @pure@ */boolean classIsMissing() {
int classIndex = classIndex();
if (classIndex < 0) {
throw new UnassignedClassException("Class is not set!");
}
return isMissing(classIndex);
}
/**
* Returns an instance's class value in internal format. (ie. as a
* floating-point number)
*
* @return the corresponding value as a double (If the corresponding attribute
* is nominal (or a string) then it returns the value's index as a
* double).
* @throws UnassignedClassException if the class is not set or the instance
* doesn't have access to a dataset
*/
// @ requires classIndex() >= 0;
@Override
public/* @pure@ */double classValue() {
int classIndex = classIndex();
if (classIndex < 0) {
throw new UnassignedClassException("Class is not set!");
}
return value(classIndex);
}
/**
* Returns the dataset this instance has access to. (ie. obtains information
* about attribute types from) Null if the instance doesn't have access to a
* dataset.
*
* @return the dataset the instance has accesss to
*/
// @ ensures \result == m_Dataset;
@Override
public/* @pure@ */Instances dataset() {
return m_Dataset;
}
/**
* Deletes an attribute at the given position (0 to numAttributes() - 1). Only
* succeeds if the instance does not have access to any dataset because
* otherwise inconsistencies could be introduced.
*
* @param position the attribute's position
* @throws RuntimeException if the instance has access to a dataset
*/
// @ requires m_Dataset != null;
@Override
public void deleteAttributeAt(int position) {
if (m_Dataset != null) {
throw new RuntimeException("Instance has access to a dataset!");
}
forceDeleteAttributeAt(position);
}
/**
* Returns an enumeration of all the attributes.
*
* @return enumeration of all the attributes
* @throws UnassignedDatasetException if the instance doesn't have access to a
* dataset
*/
// @ requires m_Dataset != null;
@Override
public/* @pure@ */Enumeration<Attribute> enumerateAttributes() {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.enumerateAttributes();
}
/**
* Tests if the headers of two instances are equivalent.
*
* @param inst another instance
* @return true if the header of the given instance is equivalent to this
* instance's header
* @throws UnassignedDatasetException if instance doesn't have access to any
* dataset
*/
// @ requires m_Dataset != null;
@Override
public/* @pure@ */boolean equalHeaders(Instance inst) {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.equalHeaders(inst.dataset());
}
/**
* Checks if the headers of two instances are equivalent. If not, then returns
* a message why they differ.
*
* @param dataset another instance
* @return null if the header of the given instance is equivalent to this
* instance's header, otherwise a message with details on why they
* differ
*/
@Override
public String equalHeadersMsg(Instance inst) {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.equalHeadersMsg(inst.dataset());
}
/**
* Tests whether an instance has a missing value. Skips the class attribute if
* set.
*
* @return true if instance has a missing value.
* @throws UnassignedDatasetException if instance doesn't have access to any
* dataset
*/
// @ requires m_Dataset != null;
@Override
public/* @pure@ */boolean hasMissingValue() {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
int classIndex = classIndex();
for (int i = 0; i < numValues(); i++) {
if (index(i) != classIndex) {
if (isMissingSparse(i)) {
return true;
}
}
}
return false;
}
/**
* Inserts an attribute at the given position (0 to numAttributes()). Only
* succeeds if the instance does not have access to any dataset because
* otherwise inconsistencies could be introduced.
*
* @param position the attribute's position
* @throws RuntimeException if the instance has accesss to a dataset
* @throws IllegalArgumentException if the position is out of range
*/
// @ requires m_Dataset == null;
// @ requires 0 <= position && position <= numAttributes();
@Override
public void insertAttributeAt(int position) {
if (m_Dataset != null) {
throw new RuntimeException("Instance has accesss to a dataset!");
}
if ((position < 0) || (position > numAttributes())) {
throw new IllegalArgumentException("Can't insert attribute: index out "
+ "of range");
}
forceInsertAttributeAt(position);
}
/**
* Tests if a specific value is "missing".
*
* @param attIndex the attribute's index
* @return true if the value is "missing"
*/
@Override
public/* @pure@ */boolean isMissing(int attIndex) {
if (Utils.isMissingValue(value(attIndex))) {
return true;
}
return false;
}
/**
* Tests if a specific value is "missing", given an index in the sparse
* representation.
*
* @param indexOfIndex the index of the attribute's index
* @return true if the value is "missing"
*/
@Override
public/* @pure@ */boolean isMissingSparse(int indexOfIndex) {
if (Utils.isMissingValue(valueSparse(indexOfIndex))) {
return true;
}
return false;
}
/**
* Tests if a specific value is "missing". The given attribute has to belong
* to a dataset.
*
* @param att the attribute
* @return true if the value is "missing"
*/
@Override
public/* @pure@ */boolean isMissing(Attribute att) {
return isMissing(att.index());
}
/**
* Returns the number of class labels.
*
* @return the number of class labels as an integer if the class attribute is
* nominal, 1 otherwise.
* @throws UnassignedDatasetException if instance doesn't have access to any
* dataset
*/
// @ requires m_Dataset != null;
@Override
public/* @pure@ */int numClasses() {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return m_Dataset.numClasses();
}
/**
* Sets the class value of an instance to be "missing". A deep copy of the
* vector of attribute values is performed before the value is set to be
* missing.
*
* @throws UnassignedClassException if the class is not set
* @throws UnassignedDatasetException if the instance doesn't have access to a
* dataset
*/
// @ requires classIndex() >= 0;
@Override
public void setClassMissing() {
int classIndex = classIndex();
if (classIndex < 0) {
throw new UnassignedClassException("Class is not set!");
}
setMissing(classIndex);
}
/**
* Sets the class value of an instance to the given value (internal
* floating-point format). A deep copy of the vector of attribute values is
* performed before the value is set.
*
* @param value the new attribute value (If the corresponding attribute is
* nominal (or a string) then this is the new value's index as a
* double).
* @throws UnassignedClassException if the class is not set
* @throws UnaddignedDatasetException if the instance doesn't have access to a
* dataset
*/
// @ requires classIndex() >= 0;
@Override
public void setClassValue(double value) {
int classIndex = classIndex();
if (classIndex < 0) {
throw new UnassignedClassException("Class is not set!");
}
setValue(classIndex, value);
}
/**
* Sets the class value of an instance to the given value. A deep copy of the
* vector of attribute values is performed before the value is set.
*
* @param value the new class value (If the class is a string attribute and
* the value can't be found, the value is added to the attribute).
* @throws UnassignedClassException if the class is not set
* @throws UnassignedDatasetException if the dataset is not set
* @throws IllegalArgumentException if the attribute is not nominal or a
* string, or the value couldn't be found for a nominal attribute
*/
// @ requires classIndex() >= 0;
@Override
public final void setClassValue(String value) {
int classIndex = classIndex();
if (classIndex < 0) {
throw new UnassignedClassException("Class is not set!");
}
setValue(classIndex, value);
}
/**
* Sets the reference to the dataset. Does not check if the instance is
* compatible with the dataset. Note: the dataset does not know about this
* instance. If the structure of the dataset's header gets changed, this
* instance will not be adjusted automatically.
*
* @param instances the reference to the dataset
*/
@Override
public final void setDataset(Instances instances) {
m_Dataset = instances;
}
/**
* Sets a specific value to be "missing". Performs a deep copy of the vector
* of attribute values before the value is set to be missing.
*
* @param attIndex the attribute's index
*/
@Override
public final void setMissing(int attIndex) {
setValue(attIndex, Utils.missingValue());
}
/**
* Sets a specific value to be "missing". Performs a deep copy of the vector
* of attribute values before the value is set to be missing. The given
* attribute has to belong to a dataset.
*
* @param att the attribute
*/
@Override
public final void setMissing(Attribute att) {
setMissing(att.index());
}
/**
* Sets a value of a nominal or string attribute to the given value. Performs
* a deep copy of the vector of attribute values before the value is set.
*
* @param attIndex the attribute's index
* @param value the new attribute value (If the attribute is a string
* attribute and the value can't be found, the value is added to the
* attribute).
* @throws UnassignedDatasetException if the dataset is not set
* @throws IllegalArgumentException if the selected attribute is not nominal
* or a string, or the supplied value couldn't be found for a
* nominal attribute
*/
// @ requires m_Dataset != null;
@Override
public final void setValue(int attIndex, String value) {
int valIndex;
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
if (!attribute(attIndex).isNominal() && !attribute(attIndex).isString()) {
throw new IllegalArgumentException(
"Attribute neither nominal nor string!");
}
valIndex = attribute(attIndex).indexOfValue(value);
if (valIndex == -1) {
if (attribute(attIndex).isNominal()) {
throw new IllegalArgumentException(
"Value not defined for given nominal attribute!");
} else {
attribute(attIndex).forceAddValue(value);
valIndex = attribute(attIndex).indexOfValue(value);
}
}
setValue(attIndex, valIndex);
}
/**
* Sets a specific value in the instance to the given value (internal
* floating-point format). Performs a deep copy of the vector of attribute
* values before the value is set, so if you are planning on calling setValue
* many times it may be faster to create a new instance using toDoubleArray.
* The given attribute has to belong to a dataset.
*
* @param att the attribute
* @param value the new attribute value (If the corresponding attribute is
* nominal (or a string) then this is the new value's index as a
* double).
*/
@Override
public final void setValue(Attribute att, double value) {
setValue(att.index(), value);
}
/**
* Sets a value of an nominal or string attribute to the given value. Performs
* a deep copy of the vector of attribute values before the value is set, so
* if you are planning on calling setValue many times it may be faster to
* create a new instance using toDoubleArray. The given attribute has to
* belong to a dataset.
*
* @param att the attribute
* @param value the new attribute value (If the attribute is a string
* attribute and the value can't be found, the value is added to the
* attribute).
* @throws IllegalArgumentException if the the attribute is not nominal or a
* string, or the value couldn't be found for a nominal attribute
*/
@Override
public final void setValue(Attribute att, String value) {
if (!att.isNominal() && !att.isString()) {
throw new IllegalArgumentException(
"Attribute neither nominal nor string!");
}
int valIndex = att.indexOfValue(value);
if (valIndex == -1) {
if (att.isNominal()) {
throw new IllegalArgumentException(
"Value not defined for given nominal attribute!");
} else {
att.forceAddValue(value);
valIndex = att.indexOfValue(value);
}
}
setValue(att.index(), valIndex);
}
/**
* Sets the weight of an instance.
*
* @param weight the weight
*/
@Override
public final void setWeight(double weight) {
m_Weight = weight;
}
/**
* Returns the relational value of a relational attribute.
*
* @param attIndex the attribute's index
* @return the corresponding relation as an Instances object
* @throws IllegalArgumentException if the attribute is not a relation-valued
* attribute
* @throws UnassignedDatasetException if the instance doesn't belong to a
* dataset.
*/
// @ requires m_Dataset != null;
@Override
public final/* @pure@ */Instances relationalValue(int attIndex) {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return relationalValue(m_Dataset.attribute(attIndex));
}
/**
* Returns the relational value of a relational attribute.
*
* @param att the attribute
* @return the corresponding relation as an Instances object, null if missing
* @throws IllegalArgumentException if the attribute is not a relation-valued
* attribute
* @throws UnassignedDatasetException if the instance doesn't belong to a
* dataset.
*/
@Override
public final/* @pure@ */Instances relationalValue(Attribute att) {
int attIndex = att.index();
if (att.isRelationValued()) {
if (isMissing(attIndex)) {
return null;
}
return att.relation((int) value(attIndex));
} else {
throw new IllegalArgumentException("Attribute isn't relation-valued!");
}
}
/**
* Returns the value of a nominal, string, date, or relational attribute for
* the instance as a string.
*
* @param attIndex the attribute's index
* @return the value as a string
* @throws IllegalArgumentException if the attribute is not a nominal, string,
* date, or relation-valued attribute.
* @throws UnassignedDatasetException if the instance doesn't belong to a
* dataset.
*/
// @ requires m_Dataset != null;
@Override
public final/* @pure@ */String stringValue(int attIndex) {
if (m_Dataset == null) {
throw new UnassignedDatasetException(
"Instance doesn't have access to a dataset!");
}
return stringValue(m_Dataset.attribute(attIndex));
}
/**
* Returns the value of a nominal, string, date, or relational attribute for
* the instance as a string.
*
* @param att the attribute
* @return the value as a string
* @throws IllegalArgumentException if the attribute is not a nominal, string,
* date, or relation-valued attribute.
* @throws UnassignedDatasetException if the instance doesn't belong to a
* dataset.
*/
@Override
public final/* @pure@ */String stringValue(Attribute att) {
int attIndex = att.index();
if (isMissing(attIndex)) {
return "?";
}
switch (att.type()) {
case Attribute.NOMINAL:
case Attribute.STRING:
return att.value((int) value(attIndex));
case Attribute.DATE:
return att.formatDate(value(attIndex));
case Attribute.RELATIONAL:
return att.relation((int) value(attIndex)).stringWithoutHeader();
default:
throw new IllegalArgumentException(
"Attribute isn't nominal, string or date!");
}
}
/**
* Returns the description of one instance with any numeric values printed at
* the supplied maximum number of decimal places. If the instance doesn't have
* access to a dataset, it returns the internal floating-point values. Quotes
* string values that contain whitespace characters.
*
* @param afterDecimalPoint the maximum number of digits permitted after the
* decimal point for a numeric value
*
* @return the instance's description as a string
*/
@Override
public final String toStringMaxDecimalDigits(int afterDecimalPoint) {
StringBuffer text = new StringBuffer(toStringNoWeight(afterDecimalPoint));
if (m_Weight != 1.0) {
text.append(",{" + Utils.doubleToString(m_Weight, afterDecimalPoint)
+ "}");
}
return text.toString();
}
/**
* Returns the description of one instance. If the instance doesn't have
* access to a dataset, it returns the internal floating-point values. Quotes
* string values that contain whitespace characters.
*
* @return the instance's description as a string
*/
@Override
public String toString() {
return toStringMaxDecimalDigits(s_numericAfterDecimalPoint);
}
/**
* Returns the description of one value of the instance as a string. If the
* instance doesn't have access to a dataset, it returns the internal
* floating-point value. Quotes string values that contain whitespace
* characters, or if they are a question mark.
*
* @param attIndex the attribute's index
* @return the value's description as a string
*/
@Override
public final String toString(int attIndex) {
return toString(attIndex, s_numericAfterDecimalPoint);
}
/**
* Returns the description of one value of the instance as a string. If the
* instance doesn't have access to a dataset, it returns the internal
* floating-point value. Quotes string values that contain whitespace
* characters, or if they are a question mark.
*
* @param attIndex the attribute's index
* @param afterDecimalPoint the maximum number of digits permitted after the
* decimal point for numeric values
* @return the value's description as a string
*/
@Override
public final/* @pure@ */String toString(int attIndex, int afterDecimalPoint) {
StringBuffer text = new StringBuffer();
if (isMissing(attIndex)) {
text.append("?");
} else {
if (m_Dataset == null) {
text.append(Utils.doubleToString(value(attIndex), afterDecimalPoint));
} else {
switch (m_Dataset.attribute(attIndex).type()) {
case Attribute.NOMINAL:
case Attribute.STRING:
case Attribute.DATE:
case Attribute.RELATIONAL:
text.append(Utils.quote(stringValue(attIndex)));
break;
case Attribute.NUMERIC:
text.append(Utils.doubleToString(value(attIndex), afterDecimalPoint));
break;
default:
throw new IllegalStateException("Unknown attribute type");
}
}
}
return text.toString();
}
/**
* Returns the description of one value of the instance as a string. If the
* instance doesn't have access to a dataset it returns the internal
* floating-point value. Quotes string values that contain whitespace
* characters, or if they are a question mark. The given attribute has to
* belong to a dataset.
*
* @param att the attribute
* @return the value's description as a string
*/
@Override
public final String toString(Attribute att) {
return toString(att.index());
}
/**
* Returns the description of one value of the instance as a string. If the
* instance doesn't have access to a dataset it returns the internal
* floating-point value. Quotes string values that contain whitespace
* characters, or if they are a question mark. The given attribute has to
* belong to a dataset.
*
* @param att the attribute
* @param afterDecimalPoint the maximum number of decimal places to print
* @return the value's description as a string
*/
@Override
public final String toString(Attribute att, int afterDecimalPoint) {
return toString(att.index(), afterDecimalPoint);
}
/**
* Returns an instance's attribute value in internal format. The given
* attribute has to belong to a dataset.
*
* @param att the attribute
* @return the specified value as a double (If the corresponding attribute is
* nominal (or a string) then it returns the value's index as a
* double).
*/
@Override
public/* @pure@ */double value(Attribute att) {
return value(att.index());
}
/**
* Returns an instance's attribute value in internal format, given an index in
* the sparse representation.
*
* @param indexOfIndex the index of the attribute's index
* @return the specified value as a double (If the corresponding attribute is
* nominal (or a string) then it returns the value's index as a
* double).
*/
@Override
public/* @pure@ */double valueSparse(int indexOfIndex) {
return m_AttValues[indexOfIndex];
}
/**
* Returns the instance's weight.
*
* @return the instance's weight as a double
*/
@Override
public final/* @pure@ */double weight() {
return m_Weight;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Deletes an attribute at the given position (0 to numAttributes() - 1).
*
* @param position the attribute's position
*/
protected abstract void forceDeleteAttributeAt(int position);
/**
* Inserts an attribute at the given position (0 to numAttributes()) and sets
* its value to be missing.
*
* @param position the attribute's position
*/
protected abstract void forceInsertAttributeAt(int position);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AdditionalMeasureProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AdditionalMeasureProducer.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.util.Enumeration;
/**
* Interface to something that can produce measures other than those
* calculated by evaluation modules.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface AdditionalMeasureProducer {
/**
* Returns an enumeration of the measure names. Additional measures
* must follow the naming convention of starting with "measure", eg.
* double measureBlah()
* @return an enumeration of the measure names
*/
Enumeration<String> enumerateMeasures();
/**
* Returns the value of the named measure
* @param measureName the name of the measure to query for its value
* @return the value of the named measure
* @exception IllegalArgumentException if the named measure is not supported
*/
double getMeasure(String measureName);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Aggregateable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Aggregateable.java
* Copyright (C) 2013 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
/**
* Interface to something that can aggregate an object of the same type with
* itself.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public interface Aggregateable<E> {
/**
* Aggregate an object with this one
*
* @param toAggregate the object to aggregate
* @return the result of aggregation
* @throws Exception if the supplied object can't be aggregated for some
* reason
*/
E aggregate(E toAggregate) throws Exception;
/**
* Call to complete the aggregation process. Allows implementers to do any
* final processing based on how many objects were aggregated.
*
* @throws Exception if the aggregation can't be finalized for some reason
*/
void finalizeAggregation() throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AlgVector.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AlgVector.java
* Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.Serializable;
import java.util.Random;
/**
* Class for performing operations on an algebraic vector
* of floating-point values.
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @version $Revision$
*/
public class AlgVector
implements Cloneable, Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -4023736016850256591L;
/** The values of the matrix */
protected double[] m_Elements;
/**
* Constructs a vector and initializes it with default values.
*
* @param n the number of elements
*/
public AlgVector(int n) {
m_Elements = new double[n];
initialize();
}
/**
* Constructs a vector using a given array.
*
* @param array the values of the matrix
*/
public AlgVector(double[] array) {
m_Elements = new double[array.length];
for (int i = 0; i < array.length; i++) {
m_Elements[i] = array[i];
}
}
/**
* Constructs a vector using a given data format.
* The vector has an element for each numerical attribute.
* The other attributes (nominal, string) are ignored.
* Random is used to initialize the attributes.
*
* @param format the data format to use
* @param random for initializing the attributes
* @throws Exception if something goes wrong
*/
public AlgVector(Instances format, Random random) throws Exception {
int len = format.numAttributes();
for (int i = 0; i < format.numAttributes(); i++) {
if (!format.attribute(i).isNumeric()) len--;
}
if (len > 0) {
m_Elements = new double[len];
initialize(random);
}
}
/**
* Constructs a vector using an instance.
* The vector has an element for each numerical attribute.
* The other attributes (nominal, string) are ignored.
*
* @param instance with numeric attributes, that AlgVector gets build from
* @throws Exception if instance doesn't have access to the data format or
* no numeric attributes in the data
*/
public AlgVector(Instance instance) throws Exception {
int len = instance.numAttributes();
for (int i = 0; i < instance.numAttributes(); i++) {
if (!instance.attribute(i).isNumeric())
len--;
}
if (len > 0) {
m_Elements = new double[len];
int n = 0;
for (int i = 0; i < instance.numAttributes(); i++) {
if (!instance.attribute(i).isNumeric())
continue;
m_Elements[n] = instance.value(i);
n++;
}
}
else {
throw new IllegalArgumentException("No numeric attributes in data!");
}
}
/**
* Creates and returns a clone of this object.
*
* @return a clone of this instance.
* @throws CloneNotSupportedException if an error occurs
*/
public Object clone() throws CloneNotSupportedException {
AlgVector v = (AlgVector)super.clone();
v.m_Elements = new double[numElements()];
for (int i = 0; i < numElements(); i++) {
v.m_Elements[i] = m_Elements[i];
}
return v;
}
/**
* Resets the elements to the default value which is 0.0.
*/
protected void initialize() {
for (int i = 0; i < m_Elements.length; i++) {
m_Elements[i] = 0.0;
}
}
/**
* Initializes the values with random numbers between 0 and 1.
*
* @param random the random number generator to use for initializing
*/
protected void initialize(Random random) {
for (int i = 0; i < m_Elements.length; i++) {
m_Elements[i] = random.nextDouble();
}
}
/**
* Returns the value of a cell in the matrix.
*
* @param index the row's index
* @return the value of the cell of the vector
*/
public final double getElement(int index) {
return m_Elements[index];
}
/**
* Returns the number of elements in the vector.
*
* @return the number of rows
*/
public final int numElements() {
return m_Elements.length;
}
/**
* Sets an element of the matrix to the given value.
*
* @param index the elements index
* @param value the new value
*/
public final void setElement(int index, double value) {
m_Elements[index] = value;
}
/**
* Sets the elements of the vector to values of the given array.
* Performs a deep copy.
*
* @param elements an array of doubles
*/
public final void setElements(double[] elements) {
for (int i = 0; i < elements.length; i++) {
m_Elements[i] = elements[i];
}
}
/**
* Gets the elements of the vector and returns them as double array.
*
* @return an array of doubles
*/
public double[] getElements() {
double [] elements = new double[this.numElements()];
for (int i = 0; i < elements.length; i++) {
elements[i] = m_Elements[i];
}
return elements;
}
/**
* Gets the elements of the vector as an instance.
* !! NON-numeric data is ignored sofar
*
* @param model the dataset structure to fit the data to
* @param random in case of nominal values a random label is taken
* @return an array of doubles
* @throws Exception if length of vector is not number of numerical attributes
*/
public Instance getAsInstance(Instances model, Random random)
throws Exception {
Instance newInst = null;
if (m_Elements != null) {
newInst = new DenseInstance(model.numAttributes());
newInst.setDataset(model);
for (int i = 0, j = 0; i < model.numAttributes(); i++) {
if (model.attribute(i).isNumeric()) {
if (j >= m_Elements.length)
throw new Exception("Datatypes are not compatible.");
newInst.setValue(i, m_Elements[j++]);
}
if (model.attribute(i).isNominal()) {
int newVal = (int)
(random.nextDouble() * (double) (model.attribute(i).numValues()));
if (newVal == (int) model.attribute(i).numValues())
newVal -= 1;
newInst.setValue(i, newVal);
}
}
}
return newInst;
}
/**
* Returns the sum of this vector with another.
*
* @param other the vector to add
* @return a vector containing the sum.
*/
public final AlgVector add(AlgVector other) {
AlgVector b = null;
if (m_Elements != null) {
int n = m_Elements.length;
try {
b = (AlgVector)clone();
} catch (CloneNotSupportedException ex) {
b = new AlgVector(n);
}
for(int i = 0; i < n; i++) {
b.m_Elements[i] = m_Elements[i] + other.m_Elements[i];
}
}
return b;
}
/**
* Returns the difference of this vector minus another.
*
* @param other the vector to subtract
* @return a vector containing the difference vector.
*/
public final AlgVector substract(AlgVector other) {
int n = m_Elements.length;
AlgVector b;
try {
b = (AlgVector)clone();
} catch (CloneNotSupportedException ex) {
b = new AlgVector(n);
}
for(int i = 0; i < n; i++) {
b.m_Elements[i] = m_Elements[i] - other.m_Elements[i];
}
return b;
}
/**
* Returns the inner (or dot) product of two vectors
*
* @param b the multiplication matrix
* @return the double representing the dot product
*/
public final double dotMultiply(AlgVector b) {
double sum = 0.0;
if (m_Elements != null) {
int n = m_Elements.length;
for(int i = 0; i < n; i++) {
sum += m_Elements[i] * b.m_Elements[i];
}
}
return sum;
}
/**
* Computes the scalar product of this vector with a scalar
*
* @param s the scalar
*/
public final void scalarMultiply(double s) {
if (m_Elements != null) {
int n = m_Elements.length;
for(int i = 0; i < n; i++) {
m_Elements[i] = s * m_Elements[i];
}
}
}
/**
* Changes the length of a vector.
*
* @param len the new length of the vector
*/
public void changeLength(double len) {
double factor = this.norm();
factor = len / factor;
scalarMultiply(factor);
}
/**
* Returns the norm of the vector
*
* @return the norm of the vector
*/
public double norm() {
if (m_Elements != null) {
int n = m_Elements.length;
double sum = 0.0;
for(int i = 0; i < n; i++) {
sum += m_Elements[i] * m_Elements[i];
}
return Math.pow(sum, 0.5);
}
else return 0.0;
}
/**
* Norms this vector to length 1.0
*/
public final void normVector() {
double len = this.norm();
this.scalarMultiply(1 / len);
}
/**
* Converts a vector to a string
*
* @return the converted string
*/
public String toString() {
StringBuffer text = new StringBuffer();
for (int i = 0; i < m_Elements.length; i++) {
if (i > 0) text.append(",");
text.append(Utils.doubleToString(m_Elements[i],6));
}
text.append("\n");
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class, can take an ARFF file as first argument.
*
* @param args commandline options
* @throws Exception if something goes wrong in testing
*/
public static void main(String[] args) throws Exception {
double[] first = {2.3, 1.2, 5.0};
try {
AlgVector test = new AlgVector(first);
System.out.println("test:\n " + test);
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AllJavadoc.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AllJavadoc.java
* Copyright (C) 2006-2012,2015 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
import java.util.HashSet;
import java.util.Vector;
/**
* Applies all known Javadoc-derived classes to a source file.
*
* <!-- options-start --> Valid options are:
* <br><br>
*
* <pre>
* -W <classname>
* The class to load.
* </pre>
*
* <pre>
* -nostars
* Suppresses the '*' in the Javadoc.
* </pre>
*
* <pre>
* -dir <dir>
* The directory above the package hierarchy of the class.
* </pre>
*
* <pre>
* -silent
* Suppresses printing in the console.
* </pre>
*
* <!-- options-end -->
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class AllJavadoc extends Javadoc {
/** contains all the */
protected static Vector<Javadoc> m_Javadocs;
/** determine all classes derived from Javadoc and instantiate them */
static {
// get all classnames, besides this one
HashSet<String> set = new HashSet<String>(ClassDiscovery.find(
Javadoc.class, Javadoc.class.getPackage().getName()));
if (set.contains(AllJavadoc.class.getName())) {
set.remove(AllJavadoc.class.getName());
}
// instantiate them
m_Javadocs = new Vector<Javadoc>();
for (String classname : set) {
try {
Class<?> cls = Class.forName(classname);
m_Javadocs.add((Javadoc) cls.newInstance());
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* sets the classname of the class to generate the Javadoc for
*
* @param value the new classname
*/
@Override
public void setClassname(String value) {
super.setClassname(value);
for (int i = 0; i < m_Javadocs.size(); i++) {
m_Javadocs.get(i).setClassname(value);
}
}
/**
* sets whether to prefix the Javadoc with "*"
*
* @param value true if stars are to be used
*/
@Override
public void setUseStars(boolean value) {
super.setUseStars(value);
for (int i = 0; i < m_Javadocs.size(); i++) {
m_Javadocs.get(i).setUseStars(value);
}
}
/**
* sets whether to suppress output in the console
*
* @param value true if output is to be suppressed
*/
@Override
public void setSilent(boolean value) {
super.setSilent(value);
for (int i = 0; i < m_Javadocs.size(); i++) {
m_Javadocs.get(i).setSilent(value);
}
}
/**
* generates and returns the Javadoc for the specified start/end tag pair.
*
* @param index the index in the start/end tag array
* @return the generated Javadoc
* @throws Exception in case the generation fails
*/
@Override
protected String generateJavadoc(int index) throws Exception {
throw new Exception("Not used!");
}
/**
* updates the Javadoc in the given source code, using all the found Javadoc
* updaters.
*
* @param content the source code
* @return the updated source code
* @throws Exception in case the generation fails
*/
@Override
protected String updateJavadoc(String content) throws Exception {
String result;
int i;
result = content;
for (i = 0; i < m_Javadocs.size(); i++) {
result = m_Javadocs.get(i).updateJavadoc(result);
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Parses the given commandline parameters and generates the Javadoc.
*
* @param args the commandline parameters for the object
*/
public static void main(String[] args) {
runJavadoc(new AllJavadoc(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Attribute.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Attribute.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.Serializable;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Date;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.List;
import java.util.Properties;
/**
* Class for handling an attribute. Once an attribute has been created, it can't
* be changed.
* <p>
*
* The following attribute types are supported:
* <ul>
* <li>numeric: <br/>
* This type of attribute represents a floating-point number.</li>
* <li>nominal: <br/>
* This type of attribute represents a fixed set of nominal values.</li>
* <li>string: <br/>
* This type of attribute represents a dynamically expanding set of nominal
* values. Usually used in text classification.</li>
* <li>date: <br/>
* This type of attribute represents a date, internally represented as
* floating-point number storing the milliseconds since January 1, 1970,
* 00:00:00 GMT. The string representation of the date must be <a
* href="http://www.iso.org/iso/en/prods-services/popstds/datesandtime.html"
* target="_blank"> ISO-8601</a> compliant, the default is
* <code>yyyy-MM-dd'T'HH:mm:ss</code>.</li>
* <li>relational: <br/>
* This type of attribute can contain other attributes and is, e.g., used for
* representing Multi-Instance data. (Multi-Instance data consists of a nominal
* attribute containing the bag-id, then a relational attribute with all the
* attributes of the bag, and finally the class attribute.)</li>
* </ul>
*
* Typical usage (code from the main() method of this class):
* <p>
*
* <code>
* ... <br>
*
* // Create numeric attributes "length" and "weight" <br>
* Attribute length = new Attribute("length"); <br>
* Attribute weight = new Attribute("weight"); <br><br>
*
* // Create list to hold nominal values "first", "second", "third" <br>
* List<String> my_nominal_values = new ArrayList<String>(3); <br>
* my_nominal_values.add("first"); <br>
* my_nominal_values.add("second"); <br>
* my_nominal_values.add("third"); <br><br>
*
* // Create nominal attribute "position" <br>
* Attribute position = new Attribute("position", my_nominal_values);<br>
*
* ... <br>
* </code>
* <p>
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class Attribute implements Copyable, Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -742180568732916383L;
/** Constant set for numeric attributes. */
public static final int NUMERIC = 0;
/** Constant set for nominal attributes. */
public static final int NOMINAL = 1;
/** Constant set for attributes with string values. */
public static final int STRING = 2;
/** Constant set for attributes with date values. */
public static final int DATE = 3;
/** Constant set for relation-valued attributes. */
public static final int RELATIONAL = 4;
/** Constant set for symbolic attributes. */
public static final int ORDERING_SYMBOLIC = 0;
/** Constant set for ordered attributes. */
public static final int ORDERING_ORDERED = 1;
/** Constant set for modulo-ordered attributes. */
public static final int ORDERING_MODULO = 2;
/** The keyword used to denote the start of an arff attribute declaration */
public final static String ARFF_ATTRIBUTE = "@attribute";
/** A keyword used to denote a numeric attribute */
public final static String ARFF_ATTRIBUTE_INTEGER = "integer";
/** A keyword used to denote a numeric attribute */
public final static String ARFF_ATTRIBUTE_REAL = "real";
/** A keyword used to denote a numeric attribute */
public final static String ARFF_ATTRIBUTE_NUMERIC = "numeric";
/** The keyword used to denote a string attribute */
public final static String ARFF_ATTRIBUTE_STRING = "string";
/** The keyword used to denote a date attribute */
public final static String ARFF_ATTRIBUTE_DATE = "date";
/** The keyword used to denote a relation-valued attribute */
public final static String ARFF_ATTRIBUTE_RELATIONAL = "relational";
/** The keyword used to denote the end of the declaration of a subrelation */
public final static String ARFF_END_SUBRELATION = "@end";
/** Strings longer than this will be stored compressed. */
protected static final int STRING_COMPRESS_THRESHOLD = 200;
/** The attribute's name. */
protected final/* @ spec_public non_null @ */String m_Name;
/** The attribute's type. */
protected/* @ spec_public @ */int m_Type;
/*
* @ invariant m_Type == NUMERIC || m_Type == DATE || m_Type == STRING ||
* m_Type == NOMINAL || m_Type == RELATIONAL;
*/
/** The attribute info (null for numeric attributes) */
protected AttributeInfo m_AttributeInfo;
/** The attribute's index. */
protected/* @ spec_public @ */int m_Index = -1;
/** The attribute's weight. */
protected double m_Weight = 1.0;
/** The meta data for the attribute. */
protected AttributeMetaInfo m_AttributeMetaInfo;
/**
* Constructor for a numeric attribute.
*
* @param attributeName the name for the attribute
*/
// @ requires attributeName != null;
// @ ensures m_Name == attributeName;
public Attribute(String attributeName) {
this(attributeName, (ProtectedProperties)null);
}
/**
* Constructor for a numeric attribute, where metadata is supplied.
*
* @param attributeName the name for the attribute
* @param metadata the attribute's properties
*/
// @ requires attributeName != null;
// @ requires metadata != null;
// @ ensures m_Name == attributeName;
public Attribute(String attributeName, ProtectedProperties metadata) {
m_Name = attributeName;
if (metadata != null) {
m_AttributeMetaInfo = new AttributeMetaInfo(metadata, this);
}
}
/**
* Constructor for a numeric or string attribute. Provides an alternative
* way for creating string attributes.
*
* @param attributeName the name for the attribute
* @param createStringAttribute if true, a string attribute will be created, otherwise a numeric one.
*/
// @ requires attributeName != null;
// @ ensures m_Name == attributeName;
public Attribute(String attributeName, boolean createStringAttribute) {
this(attributeName, createStringAttribute, (ProtectedProperties)null);
}
/**
* Constructor for a numeric or string attribute, where metadata is supplied. Provides an alternative
* way for creating string attributes.
*
* @param attributeName the name for the attribute
* @param createStringAttribute if true, a string attribute will be created, otherwise a numeric one.
* @param metadata the attribute's properties
*/
// @ requires attributeName != null;
// @ requires metadata != null;
// @ ensures m_Name == attributeName;
public Attribute(String attributeName, boolean createStringAttribute, ProtectedProperties metadata) {
m_Name = attributeName;
if (createStringAttribute) {
m_AttributeInfo = new NominalAttributeInfo((List<String>)null, attributeName);
m_Type = STRING;
}
if (metadata != null) {
m_AttributeMetaInfo = new AttributeMetaInfo(metadata, this);
}
}
/**
* Constructor for a date attribute.
*
* @param attributeName the name for the attribute
* @param dateFormat a string suitable for use with SimpleDateFormatter for
* parsing dates.
*/
// @ requires attributeName != null;
// @ requires dateFormat != null;
// @ ensures m_Name == attributeName;
public Attribute(String attributeName, String dateFormat) {
this(attributeName, dateFormat, (ProtectedProperties)null);
}
/**
* Constructor for a date attribute, where metadata is supplied.
*
* @param attributeName the name for the attribute
* @param dateFormat a string suitable for use with SimpleDateFormatter for
* parsing dates.
* @param metadata the attribute's properties
*/
// @ requires attributeName != null;
// @ requires dateFormat != null;
// @ requires metadata != null;
// @ ensures m_Name == attributeName;
public Attribute(String attributeName, String dateFormat,
ProtectedProperties metadata) {
m_Name = attributeName;
m_Type = DATE;
m_AttributeInfo = new DateAttributeInfo(dateFormat);
if (metadata != null) {
m_AttributeMetaInfo = new AttributeMetaInfo(metadata, this);
}
}
/**
* Constructor for nominal attributes and string attributes. If a null vector
* of attribute values is passed to the method, the attribute is assumed to be
* a string.
*
* @param attributeName the name for the attribute
* @param attributeValues a vector of strings denoting the attribute values.
* Null if the attribute is a string attribute.
*/
// @ requires attributeName != null;
// @ ensures m_Name == attributeName;
public Attribute(String attributeName, List<String> attributeValues) {
this(attributeName, attributeValues, (ProtectedProperties)null);
}
/**
* Constructor for nominal attributes and string attributes, where metadata is
* supplied. If a null vector of attribute values is passed to the method, the
* attribute is assumed to be a string.
*
* @param attributeName the name for the attribute
* @param attributeValues a vector of strings denoting the attribute values.
* Null if the attribute is a string attribute.
* @param metadata the attribute's properties
*/
// @ requires attributeName != null;
// @ requires metadata != null;
/*
* @ ensures m_Name == attributeName; ensures m_Index == -1; ensures
* attributeValues == null && m_Type == STRING || attributeValues != null &&
* m_Type == NOMINAL && m_Values.size() == attributeValues.size(); signals
* (IllegalArgumentException ex) (* if duplicate strings in attributeValues
* *);
*/
public Attribute(String attributeName, List<String> attributeValues,
ProtectedProperties metadata) {
m_Name = attributeName;
m_AttributeInfo = new NominalAttributeInfo(attributeValues, attributeName);
if (attributeValues == null) {
m_Type = STRING;
} else {
m_Type = NOMINAL;
}
if (metadata != null) {
m_AttributeMetaInfo = new AttributeMetaInfo(metadata, this);
}
}
/**
* Constructor for relation-valued attributes.
*
* @param attributeName the name for the attribute
* @param header an Instances object specifying the header of the relation.
*/
public Attribute(String attributeName, Instances header) {
this(attributeName, header, (ProtectedProperties)null);
}
/**
* Constructor for relation-valued attributes.
*
* @param attributeName the name for the attribute
* @param header an Instances object specifying the header of the relation.
* @param metadata the attribute's properties
*/
public Attribute(String attributeName, Instances header,
ProtectedProperties metadata) {
if (header.numInstances() > 0) {
throw new IllegalArgumentException("Header for relation-valued "
+ "attribute should not contain " + "any instances");
}
m_Name = attributeName;
m_Type = RELATIONAL;
m_AttributeInfo = new RelationalAttributeInfo(header);
if (metadata != null) {
m_AttributeMetaInfo = new AttributeMetaInfo(metadata, this);
}
}
/**
* Produces a shallow copy of this attribute.
*
* @return a copy of this attribute with the same index
*/
// @ also ensures \result instanceof Attribute;
@Override
public/* @ pure non_null @ */Object copy() {
return copy(m_Name);
}
/**
* Returns an enumeration of all the attribute's values if the attribute is
* nominal, string, or relation-valued, null otherwise.
*
* @return enumeration of all the attribute's values
*/
public final/* @ pure @ */Enumeration<Object> enumerateValues() {
if (isNominal() || isString()) {
final Enumeration<Object> ee =
new WekaEnumeration<Object>(((NominalAttributeInfo)m_AttributeInfo).m_Values);
return new Enumeration<Object>() {
@Override
public boolean hasMoreElements() {
return ee.hasMoreElements();
}
@Override
public Object nextElement() {
Object oo = ee.nextElement();
if (oo instanceof SerializedObject) {
return ((SerializedObject) oo).getObject();
} else {
return oo;
}
}
};
}
return null;
}
/**
* Tests if given attribute is equal to this attribute. Attribute indices are ignored in the comparison.
*
* @param other the Object to be compared to this attribute
* @return true if the given attribute is equal to this attribute
*/
@Override
public final/* @ pure @ */boolean equals(Object other) {
return (equalsMsg(other) == null);
}
/**
* Returns a hash code for this attribute based on its name.
*
* @return the hash code
*/
@Override
public final/* @ pure @ */int hashCode() {
return name().hashCode();
}
/**
* Tests if given attribute is equal to this attribute. If they're not the
* same a message detailing why they differ will be returned, otherwise null.
* Attribute indices are ignored in the comparison.
*
* @param other the Object to be compared to this attribute
* @return null if the given attribute is equal to this attribute
*/
public final String equalsMsg(Object other) {
if (other == null) {
return "Comparing with null object";
}
if (!(other.getClass().equals(this.getClass()))) {
return "Object has wrong class";
}
Attribute att = (Attribute) other;
if (!m_Name.equals(att.m_Name)) {
return "Names differ: " + m_Name + " != " + att.m_Name;
}
if (isNominal() && att.isNominal()) {
if (((NominalAttributeInfo)m_AttributeInfo).m_Values.size() !=
((NominalAttributeInfo)att.m_AttributeInfo).m_Values.size()) {
return "Different number of labels: " + ((NominalAttributeInfo)m_AttributeInfo).m_Values.size() + " != "
+ ((NominalAttributeInfo)att.m_AttributeInfo).m_Values.size();
}
for (int i = 0; i < ((NominalAttributeInfo)m_AttributeInfo).m_Values.size(); i++) {
if (!((NominalAttributeInfo)m_AttributeInfo).m_Values.get(i).
equals(((NominalAttributeInfo)att.m_AttributeInfo).m_Values.get(i))) {
return "Labels differ at position " + (i + 1) + ": "
+ ((NominalAttributeInfo)m_AttributeInfo).m_Values.get(i) + " != " +
((NominalAttributeInfo)att.m_AttributeInfo).m_Values.get(i);
}
}
return null;
}
if (isRelationValued() && att.isRelationValued()) {
return ((RelationalAttributeInfo)m_AttributeInfo).m_Header.equalHeadersMsg(((RelationalAttributeInfo)att.m_AttributeInfo).m_Header);
}
if ((type() != att.type())) {
return "Types differ: " + typeToString(this) + " != " + typeToString(att);
}
return null;
}
/**
* Returns a string representation of the attribute type.
*
* @param att the attribute to return the type string for
* @return the string representation of the attribute type
*/
public static String typeToString(Attribute att) {
return typeToString(att.type());
}
/**
* Returns a string representation of the attribute type.
*
* @param type the type of the attribute
* @return the string representation of the attribute type
*/
public static String typeToString(int type) {
String result;
switch (type) {
case NUMERIC:
result = "numeric";
break;
case NOMINAL:
result = "nominal";
break;
case STRING:
result = "string";
break;
case DATE:
result = "date";
break;
case RELATIONAL:
result = "relational";
break;
default:
result = "unknown(" + type + ")";
}
return result;
}
/**
* Returns a short string representation of the attribute type.
*
* @param att the attribute to return the type string for
* @return the string representation of the attribute type
*/
public static String typeToStringShort(Attribute att) {
return typeToStringShort(att.type());
}
/**
* Returns a short string representation of the attribute type.
*
* @param type the type of the attribute
* @return the string representation of the attribute type
*/
public static String typeToStringShort(int type) {
String result;
switch (type) {
case NUMERIC:
result = "Num";
break;
case NOMINAL:
result = "Nom";
break;
case STRING:
result = "Str";
break;
case DATE:
result = "Dat";
break;
case RELATIONAL:
result = "Rel";
break;
default:
result = "???";
}
return result;
}
/**
* Returns the index of this attribute.
*
* @return the index of this attribute
*/
// @ ensures \result == m_Index;
public final/* @ pure @ */int index() {
return m_Index;
}
/**
* Returns the index of a given attribute value. (The index of the first
* occurence of this value.)
*
* @param value the value for which the index is to be returned
* @return the index of the given attribute value if attribute is nominal or a
* string, -1 if it is not or the value can't be found
*/
public final int indexOfValue(String value) {
if (!isNominal() && !isString()) {
return -1;
}
Object store = value;
if (value.length() > STRING_COMPRESS_THRESHOLD) {
try {
store = new SerializedObject(value, true);
} catch (Exception ex) {
System.err.println("Couldn't compress string attribute value -"
+ " searching uncompressed.");
}
}
Integer val = ((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.get(store);
if (val == null) {
return -1;
} else {
return val.intValue();
}
}
/**
* Test if the attribute is nominal.
*
* @return true if the attribute is nominal
*/
// @ ensures \result <==> (m_Type == NOMINAL);
public final/* @ pure @ */boolean isNominal() {
return (m_Type == NOMINAL);
}
/**
* Tests if the attribute is numeric.
*
* @return true if the attribute is numeric
*/
// @ ensures \result <==> ((m_Type == NUMERIC) || (m_Type == DATE));
public final/* @ pure @ */boolean isNumeric() {
return ((m_Type == NUMERIC) || (m_Type == DATE));
}
/**
* Tests if the attribute is relation valued.
*
* @return true if the attribute is relation valued
*/
// @ ensures \result <==> (m_Type == RELATIONAL);
public final/* @ pure @ */boolean isRelationValued() {
return (m_Type == RELATIONAL);
}
/**
* Tests if the attribute is a string.
*
* @return true if the attribute is a string
*/
// @ ensures \result <==> (m_Type == STRING);
public final/* @ pure @ */boolean isString() {
return (m_Type == STRING);
}
/**
* Tests if the attribute is a date type.
*
* @return true if the attribute is a date type
*/
// @ ensures \result <==> (m_Type == DATE);
public final/* @ pure @ */boolean isDate() {
return (m_Type == DATE);
}
/**
* Returns the attribute's name.
*
* @return the attribute's name as a string
*/
// @ ensures \result == m_Name;
public final/* @ pure @ */String name() {
return m_Name;
}
/**
* Returns the number of attribute values. Returns 0 for attributes that are
* not either nominal, string, or relation-valued.
*
* @return the number of attribute values
*/
public final/* @ pure @ */int numValues() {
if (!isNominal() && !isString() && !isRelationValued()) {
return 0;
} else {
return ((NominalAttributeInfo)m_AttributeInfo).m_Values.size();
}
}
/**
* Returns a description of this attribute in ARFF format. Quotes strings if
* they contain whitespace characters, or if they are a question mark.
*
* @return a description of this attribute as a string
*/
@Override
public final String toString() {
StringBuffer text = new StringBuffer();
text.append(ARFF_ATTRIBUTE).append(" ").append(Utils.quote(m_Name))
.append(" ");
switch (m_Type) {
case NOMINAL:
text.append('{');
Enumeration<Object> enu = enumerateValues();
while (enu.hasMoreElements()) {
text.append(Utils.quote((String) enu.nextElement()));
if (enu.hasMoreElements()) {
text.append(',');
}
}
text.append('}');
text.append((weight() != 1.0) ? " {" + weight() + "}" : "");
break;
case NUMERIC:
text.append(ARFF_ATTRIBUTE_NUMERIC);
text.append((weight() != 1.0) ? " {" + weight() + "}" : "");
break;
case STRING:
text.append(ARFF_ATTRIBUTE_STRING);
text.append((weight() != 1.0) ? " {" + weight() + "}" : "");
break;
case DATE:
text.append(ARFF_ATTRIBUTE_DATE).append(" ")
.append(Utils.quote(((DateAttributeInfo)m_AttributeInfo).m_DateFormat.toPattern()));
text.append((weight() != 1.0) ? " {" + weight() + "}" : "");
break;
case RELATIONAL:
text.append(ARFF_ATTRIBUTE_RELATIONAL);
text.append((weight() != 1.0) ? " {" + weight() + "}" : "");
text.append("\n");
Enumeration<Attribute> enm = ((RelationalAttributeInfo)m_AttributeInfo).m_Header.enumerateAttributes();
while (enm.hasMoreElements()) {
text.append(enm.nextElement()).append("\n");
}
text.append(ARFF_END_SUBRELATION).append(" ").append(Utils.quote(m_Name));
break;
default:
text.append("UNKNOWN");
break;
}
return text.toString();
}
/**
* Returns the attribute's type as an integer.
*
* @return the attribute's type.
*/
// @ ensures \result == m_Type;
public final/* @ pure @ */int type() {
return m_Type;
}
/**
* Returns the Date format pattern in case this attribute is of type DATE,
* otherwise an empty string.
*
* @return the date format pattern
* @see java.text.SimpleDateFormat
*/
public final String getDateFormat() {
if (isDate()) {
return ((DateAttributeInfo)m_AttributeInfo).m_DateFormat.toPattern();
} else {
return "";
}
}
/**
* Returns a value of a nominal or string attribute. Returns an empty string
* if the attribute is neither a string nor a nominal attribute.
*
* @param valIndex the value's index
* @return the attribute's value as a string
*/
public final/* @ non_null pure @ */String value(int valIndex) {
if (!isNominal() && !isString()) {
return "";
} else {
Object val = ((NominalAttributeInfo)m_AttributeInfo).m_Values.get(valIndex);
// If we're storing strings compressed, uncompress it.
if (val instanceof SerializedObject) {
val = ((SerializedObject) val).getObject();
}
return (String) val;
}
}
/**
* Returns the header info for a relation-valued attribute, null if the
* attribute is not relation-valued.
*
* @return the attribute's value as an Instances object
*/
public final/* @ non_null pure @ */Instances relation() {
if (!isRelationValued()) {
return null;
} else {
return ((RelationalAttributeInfo)m_AttributeInfo).m_Header;
}
}
/**
* Returns a value of a relation-valued attribute. Returns null if the
* attribute is not relation-valued.
*
* @param valIndex the value's index
* @return the attribute's value as an Instances object
*/
public final/* @ non_null pure @ */Instances relation(int valIndex) {
if (!isRelationValued()) {
return null;
} else {
return (Instances) ((RelationalAttributeInfo)m_AttributeInfo).m_Values.get(valIndex);
}
}
/**
* Constructor for a numeric attribute with a particular index.
*
* @param attributeName the name for the attribute
* @param index the attribute's index
*/
// @ requires attributeName != null;
// @ requires index >= 0;
// @ ensures m_Name == attributeName;
// @ ensures m_Index == index;
public Attribute(String attributeName, int index) {
this(attributeName);
m_Index = index;
}
/**
* Constructor for date attributes with a particular index.
*
* @param attributeName the name for the attribute
* @param dateFormat a string suitable for use with SimpleDateFormatter for
* parsing dates. Null for a default format string.
* @param index the attribute's index
*/
// @ requires attributeName != null;
// @ requires index >= 0;
// @ ensures m_Name == attributeName;
// @ ensures m_Index == index;
public Attribute(String attributeName, String dateFormat, int index) {
this(attributeName, dateFormat);
m_Index = index;
}
/**
* Constructor for nominal attributes and string attributes with a particular
* index. If a null vector of attribute values is passed to the method, the
* attribute is assumed to be a string.
*
* @param attributeName the name for the attribute
* @param attributeValues a vector of strings denoting the attribute values.
* Null if the attribute is a string attribute.
* @param index the attribute's index
*/
// @ requires attributeName != null;
// @ requires index >= 0;
// @ ensures m_Name == attributeName;
// @ ensures m_Index == index;
public Attribute(String attributeName, List<String> attributeValues, int index) {
this(attributeName, attributeValues);
m_Index = index;
}
/**
* Constructor for a relation-valued attribute with a particular index.
*
* @param attributeName the name for the attribute
* @param header the header information for this attribute
* @param index the attribute's index
*/
// @ requires attributeName != null;
// @ requires index >= 0;
// @ ensures m_Name == attributeName;
// @ ensures m_Index == index;
public Attribute(String attributeName, Instances header, int index) {
this(attributeName, header);
m_Index = index;
}
/**
* Adds a string value to the list of valid strings for attributes of type
* STRING and returns the index of the string.
*
* @param value The string value to add
* @return the index assigned to the string, or -1 if the attribute is not of
* type Attribute.STRING
*/
/*
* @ requires value != null; ensures isString() && 0 <= \result && \result <
* m_Values.size() || ! isString() && \result == -1;
*/
public int addStringValue(String value) {
if (!isString()) {
return -1;
}
Object store = value;
if (value.length() > STRING_COMPRESS_THRESHOLD) {
try {
store = new SerializedObject(value, true);
} catch (Exception ex) {
System.err.println("Couldn't compress string attribute value -"
+ " storing uncompressed.");
}
}
Integer index = ((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.get(store);
if (index != null) {
return index.intValue();
} else {
int intIndex = ((NominalAttributeInfo)m_AttributeInfo).m_Values.size();
((NominalAttributeInfo)m_AttributeInfo).m_Values.add(store);
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.put(store, new Integer(intIndex));
return intIndex;
}
}
/**
* Clear the map and list of values and set them to contain just the supplied
* value
*
* @param value the current (and only) value of this String attribute. If null
* then just the map is cleared.
*/
public void setStringValue(String value) {
if (!isString()) {
return;
}
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.clear();
((NominalAttributeInfo)m_AttributeInfo).m_Values.clear();
if (value != null) {
addStringValue(value);
}
}
/**
* Adds a string value to the list of valid strings for attributes of type
* STRING and returns the index of the string. This method is more efficient
* than addStringValue(String) for long strings.
*
* @param src The Attribute containing the string value to add.
* @param index the index of the string value in the source attribute.
* @return the index assigned to the string, or -1 if the attribute is not of
* type Attribute.STRING
*/
/*
* @ requires src != null; requires 0 <= index && index < src.m_Values.size();
* ensures isString() && 0 <= \result && \result < m_Values.size() || !
* isString() && \result == -1;
*/
public int addStringValue(Attribute src, int index) {
if (!isString()) {
return -1;
}
Object store = ((NominalAttributeInfo)src.m_AttributeInfo).m_Values.get(index);
Integer oldIndex = ((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.get(store);
if (oldIndex != null) {
return oldIndex.intValue();
} else {
int intIndex = ((NominalAttributeInfo)m_AttributeInfo).m_Values.size();
((NominalAttributeInfo)m_AttributeInfo).m_Values.add(store);
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.put(store, new Integer(intIndex));
return intIndex;
}
}
/**
* Adds a relation to a relation-valued attribute.
*
* @param value The value to add
* @return the index assigned to the value, or -1 if the attribute is not of
* type Attribute.RELATIONAL
*/
public int addRelation(Instances value) {
if (!isRelationValued()) {
return -1;
}
if (!((RelationalAttributeInfo)m_AttributeInfo).m_Header.equalHeaders(value)) {
throw new IllegalArgumentException("Incompatible value for "
+ "relation-valued attribute.\n" + ((RelationalAttributeInfo)m_AttributeInfo).m_Header.equalHeadersMsg(value));
}
Integer index = ((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.get(value);
if (index != null) {
return index.intValue();
} else {
int intIndex = ((NominalAttributeInfo)m_AttributeInfo).m_Values.size();
((NominalAttributeInfo)m_AttributeInfo).m_Values.add(value);
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.put(value, new Integer(intIndex));
return intIndex;
}
}
/**
* Adds an attribute value. Creates a fresh list of attribute values before
* adding it.
*
* @param value the attribute value
*/
final void addValue(String value) {
((NominalAttributeInfo)m_AttributeInfo).m_Values =
Utils.cast(((NominalAttributeInfo)m_AttributeInfo).m_Values.clone());
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable =
Utils.cast(((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.clone());
forceAddValue(value);
}
/**
* Produces a shallow copy of this attribute with a new name.
*
* @param newName the name of the new attribute
* @return a copy of this attribute with the same index
*/
// @ requires newName != null;
// @ ensures \result.m_Name == newName;
// @ ensures \result.m_Index == m_Index;
// @ ensures \result.m_Type == m_Type;
public final/* @ pure non_null @ */Attribute copy(String newName) {
Attribute copy = new Attribute(newName);
copy.m_Index = m_Index;
copy.m_Type = m_Type;
copy.m_AttributeInfo = m_AttributeInfo;
copy.m_AttributeMetaInfo = m_AttributeMetaInfo;
copy.m_Weight = m_Weight;
return copy;
}
/**
* Removes a value of a nominal, string, or relation-valued attribute. Creates
* a fresh list of attribute values before removing it.
*
* @param index the value's index
* @throws IllegalArgumentException if the attribute is not of the correct
* type
*/
// @ requires isNominal() || isString() || isRelationValued();
// @ requires 0 <= index && index < m_Values.size();
final void delete(int index) {
if (!isNominal() && !isString() && !isRelationValued()) {
throw new IllegalArgumentException("Can only remove value of "
+ "nominal, string or relation-" + " valued attribute!");
} else {
((NominalAttributeInfo)m_AttributeInfo).m_Values =
Utils.cast(((NominalAttributeInfo)m_AttributeInfo).m_Values.clone());
((NominalAttributeInfo)m_AttributeInfo).m_Values.remove(index);
if (!isRelationValued()) {
Hashtable<Object, Integer> hash = new Hashtable<Object, Integer>(
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.size());
Enumeration<Object> enu = ((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.keys();
while (enu.hasMoreElements()) {
Object string = enu.nextElement();
Integer valIndexObject = ((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.get(string);
int valIndex = valIndexObject.intValue();
if (valIndex > index) {
hash.put(string, new Integer(valIndex - 1));
} else if (valIndex < index) {
hash.put(string, valIndexObject);
}
}
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable = hash;
}
}
}
/**
* Adds an attribute value.
*
* @param value the attribute value
*/
// @ requires value != null;
// @ ensures m_Values.size() == \old(m_Values.size()) + 1;
final void forceAddValue(String value) {
Object store = value;
if (value.length() > STRING_COMPRESS_THRESHOLD) {
try {
store = new SerializedObject(value, true);
} catch (Exception ex) {
System.err.println("Couldn't compress string attribute value -"
+ " storing uncompressed.");
}
}
((NominalAttributeInfo)m_AttributeInfo).m_Values.add(store);
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.
put(store, new Integer(((NominalAttributeInfo)m_AttributeInfo).m_Values.size() - 1));
}
/**
* Sets the index of this attribute.
*
* @param index the index of this attribute
*/
// @ requires 0 <= index;
// @ assignable m_Index;
// @ ensures m_Index == index;
final void setIndex(int index) {
m_Index = index;
}
/**
* Sets a value of a nominal attribute or string attribute. Creates a fresh
* list of attribute values before it is set.
*
* @param index the value's index
* @param string the value
* @throws IllegalArgumentException if the attribute is not nominal or string.
*/
// @ requires string != null;
// @ requires isNominal() || isString();
// @ requires 0 <= index && index < m_Values.size();
final void setValue(int index, String string) {
switch (m_Type) {
case NOMINAL:
case STRING:
((NominalAttributeInfo)m_AttributeInfo).m_Values =
Utils.cast(((NominalAttributeInfo)m_AttributeInfo).m_Values.clone());
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable =
Utils.cast(((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.clone());
Object store = string;
if (string.length() > STRING_COMPRESS_THRESHOLD) {
try {
store = new SerializedObject(string, true);
} catch (Exception ex) {
System.err.println("Couldn't compress string attribute value -"
+ " storing uncompressed.");
}
}
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.
remove(((NominalAttributeInfo)m_AttributeInfo).m_Values.get(index));
((NominalAttributeInfo)m_AttributeInfo).m_Values.set(index, store);
((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.put(store, new Integer(index));
break;
default:
throw new IllegalArgumentException("Can only set values for nominal"
+ " or string attributes!");
}
}
/**
* Sets a value of a relation-valued attribute. Creates a fresh list of
* attribute values before it is set.
*
* @param index the value's index
* @param data the value
* @throws IllegalArgumentException if the attribute is not relation-valued.
*/
final void setValue(int index, Instances data) {
if (isRelationValued()) {
if (!data.equalHeaders(((RelationalAttributeInfo)m_AttributeInfo).m_Header)) {
throw new IllegalArgumentException("Can't set relational value. "
+ "Headers not compatible.\n" + data.equalHeadersMsg(((RelationalAttributeInfo)m_AttributeInfo).m_Header));
}
((NominalAttributeInfo)m_AttributeInfo).m_Values =
Utils.cast(((NominalAttributeInfo)m_AttributeInfo).m_Values.clone());
((NominalAttributeInfo)m_AttributeInfo).m_Values.set(index, data);
} else {
throw new IllegalArgumentException("Can only set value for"
+ " relation-valued attributes!");
}
}
/**
* Returns the given amount of milliseconds formatted according to the current
* Date format.
*
* @param date the date, represented in milliseconds since January 1, 1970,
* 00:00:00 GMT, to return as string
* @return the formatted date
*/
// @ requires isDate();
public/* @pure@ */String formatDate(double date) {
switch (m_Type) {
case DATE:
return ((DateAttributeInfo)m_AttributeInfo).m_DateFormat.format(new Date((long) date));
default:
throw new IllegalArgumentException("Can only format date values for date"
+ " attributes!");
}
}
/**
* Parses the given String as Date, according to the current format and
* returns the corresponding amount of milliseconds.
*
* @param string the date to parse
* @return the date in milliseconds since January 1, 1970, 00:00:00 GMT
* @throws ParseException if parsing fails
*/
// @ requires isDate();
// @ requires string != null;
public double parseDate(String string) throws ParseException {
switch (m_Type) {
case DATE:
long time = ((DateAttributeInfo)m_AttributeInfo).m_DateFormat.parse(string).getTime();
// TODO put in a safety check here if we can't store the value in a
// double.
return time;
default:
throw new IllegalArgumentException("Can only parse date values for date"
+ " attributes!");
}
}
/**
* Returns the properties supplied for this attribute. Returns null
* if there is no meta data for this attribute.
*
* @return metadata for this attribute
*/
public final/* @ pure @ */ProtectedProperties getMetadata() {
if (m_AttributeMetaInfo == null) {
return null;
}
return m_AttributeMetaInfo.m_Metadata;
}
/**
* Returns the ordering of the attribute. One of the following:
*
* ORDERING_SYMBOLIC - attribute values should be treated as symbols.
* ORDERING_ORDERED - attribute values have a global ordering. ORDERING_MODULO
* - attribute values have an ordering which wraps.
*
* @return the ordering type of the attribute
*/
public final/* @ pure @ */int ordering() {
if (m_AttributeMetaInfo == null) {
return ORDERING_ORDERED;
}
return m_AttributeMetaInfo.m_Ordering;
}
/**
* Returns whether the attribute values are equally spaced.
*
* @return whether the attribute is regular or not
*/
public final/* @ pure @ */boolean isRegular() {
if (m_AttributeMetaInfo == null) {
return true;
}
return m_AttributeMetaInfo.m_IsRegular;
}
/**
* Returns whether the attribute can be averaged meaningfully.
*
* @return whether the attribute can be averaged or not
*/
public final/* @ pure @ */boolean isAveragable() {
if (m_AttributeMetaInfo == null) {
return true;
}
return m_AttributeMetaInfo.m_IsAveragable;
}
/**
* Returns whether the attribute has a zeropoint and may be added
* meaningfully.
*
* @return whether the attribute has a zeropoint or not
*/
public final/* @ pure @ */boolean hasZeropoint() {
if (m_AttributeMetaInfo == null) {
return true;
}
return m_AttributeMetaInfo.m_HasZeropoint;
}
/**
* Returns the attribute's weight.
*
* @return the attribute's weight as a double
*/
public final/* @ pure @ */double weight() {
return m_Weight;
}
/**
* Sets the new attribute's weight. Does not modify the weight info stored in the
* attribute's meta data object!
*
* @param value the new weight
*/
public void setWeight(double value) {
m_Weight = value;
}
/**
* Returns the lower bound of a numeric attribute.
*
* @return the lower bound of the specified numeric range
*/
public final/* @ pure @ */double getLowerNumericBound() {
if (m_AttributeMetaInfo == null) {
return -Double.MAX_VALUE;
}
return m_AttributeMetaInfo.m_LowerBound;
}
/**
* Returns whether the lower numeric bound of the attribute is open.
*
* @return whether the lower numeric bound is open or not (closed)
*/
public final/* @ pure @ */boolean lowerNumericBoundIsOpen() {
if (m_AttributeMetaInfo == null) {
return true;
}
return m_AttributeMetaInfo.m_LowerBoundIsOpen;
}
/**
* Returns the upper bound of a numeric attribute.
*
* @return the upper bound of the specified numeric range
*/
public final/* @ pure @ */double getUpperNumericBound() {
if (m_AttributeMetaInfo == null) {
return Double.MAX_VALUE;
}
return m_AttributeMetaInfo.m_UpperBound;
}
/**
* Returns whether the upper numeric bound of the attribute is open.
*
* @return whether the upper numeric bound is open or not (closed)
*/
public final/* @ pure @ */boolean upperNumericBoundIsOpen() {
if (m_AttributeMetaInfo == null) {
return true;
}
return m_AttributeMetaInfo.m_UpperBoundIsOpen;
}
/**
* Determines whether a value lies within the bounds of the attribute.
*
* @param value the value to check
* @return whether the value is in range
*/
public final/* @ pure @ */boolean isInRange(double value) {
// dates and missing values are a special case
if (m_Type == DATE || Utils.isMissingValue(value)) {
return true;
}
if (m_Type != NUMERIC) {
// do label range check
int intVal = (int) value;
if (intVal < 0 || intVal >= ((NominalAttributeInfo)m_AttributeInfo).m_Hashtable.size()) {
return false;
}
} else {
if (m_AttributeMetaInfo == null) {
return true;
}
// do numeric bounds check
if (m_AttributeMetaInfo.m_LowerBoundIsOpen) {
if (value <= m_AttributeMetaInfo.m_LowerBound) {
return false;
}
} else {
if (value < m_AttributeMetaInfo.m_LowerBound) {
return false;
}
}
if (m_AttributeMetaInfo.m_UpperBoundIsOpen) {
if (value >= m_AttributeMetaInfo.m_UpperBound) {
return false;
}
} else {
if (value > m_AttributeMetaInfo.m_UpperBound) {
return false;
}
}
}
return true;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Simple main method for testing this class.
*
* @param ops the commandline options
*/
// @ requires ops != null;
// @ requires \nonnullelements(ops);
public static void main(String[] ops) {
try {
new Attribute("length");
Attribute weight = new Attribute("weight");
// Create date attribute "date"
Attribute date = new Attribute("date", "yyyy-MM-dd HH:mm:ss");
System.out.println(date);
double dd = date.parseDate("2001-04-04 14:13:55");
System.out.println("Test date = " + dd);
System.out.println(date.formatDate(dd));
dd = new Date().getTime();
System.out.println("Date now = " + dd);
System.out.println(date.formatDate(dd));
// Create vector to hold nominal values "first", "second", "third"
List<String> my_nominal_values = new ArrayList<String>(3);
my_nominal_values.add("first");
my_nominal_values.add("second");
my_nominal_values.add("third");
// Create nominal attribute "position"
Attribute position = new Attribute("position", my_nominal_values);
// Print the name of "position"
System.out.println("Name of \"position\": " + position.name());
// Print the values of "position"
Enumeration<Object> attValues = position.enumerateValues();
while (attValues.hasMoreElements()) {
String string = (String) attValues.nextElement();
System.out.println("Value of \"position\": " + string);
}
// Shallow copy attribute "position"
Attribute copy = (Attribute) position.copy();
// Test if attributes are the same
System.out.println("Copy is the same as original: "
+ copy.equals(position));
// Print index of attribute "weight" (should be unset: -1)
System.out.println("Index of attribute \"weight\" (should be -1): "
+ weight.index());
// Print index of value "first" of attribute "position"
System.out
.println("Index of value \"first\" of \"position\" (should be 0): "
+ position.indexOfValue("first"));
// Tests type of attribute "position"
System.out.println("\"position\" is numeric: " + position.isNumeric());
System.out.println("\"position\" is nominal: " + position.isNominal());
System.out.println("\"position\" is string: " + position.isString());
// Prints name of attribute "position"
System.out.println("Name of \"position\": " + position.name());
// Prints number of values of attribute "position"
System.out.println("Number of values for \"position\": "
+ position.numValues());
// Prints the values (againg)
for (int i = 0; i < position.numValues(); i++) {
System.out.println("Value " + i + ": " + position.value(i));
}
// Prints the attribute "position" in ARFF format
System.out.println(position);
// Checks type of attribute "position" using constants
switch (position.type()) {
case Attribute.NUMERIC:
System.out.println("\"position\" is numeric");
break;
case Attribute.NOMINAL:
System.out.println("\"position\" is nominal");
break;
case Attribute.STRING:
System.out.println("\"position\" is string");
break;
case Attribute.DATE:
System.out.println("\"position\" is date");
break;
case Attribute.RELATIONAL:
System.out.println("\"position\" is relation-valued");
break;
default:
System.out.println("\"position\" has unknown type");
}
ArrayList<Attribute> atts = new ArrayList<Attribute>(1);
atts.add(position);
Instances relation = new Instances("Test", atts, 0);
Attribute relationValuedAtt = new Attribute("test", relation);
System.out.println(relationValuedAtt);
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AttributeInfo.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AttributeInfo.java
* Copyright (C) 2014 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.Serializable;
/**
* Interface implemented by classes that store information for a particular attribute
* type.
*/
interface AttributeInfo extends Serializable {
/** Methods depend on the information that is stored. */
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AttributeLocator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* StringLocator.java
* Copyright (C) 2005-2014 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.BitSet;
/**
* This class locates and records the indices of a certain type of attributes,
* recursively in case of Relational attributes.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see Attribute#RELATIONAL
*/
public class AttributeLocator
implements Serializable, Comparable<AttributeLocator>, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -2932848827681070345L;
/** the attribute indices that may be inspected */
protected int[] m_AllowedIndices = null;
/** contains the attribute locations, either true or false Boolean objects */
protected BitSet m_Attributes = null;
/** contains the locator locations, either null or a AttributeLocator reference */
protected ArrayList<AttributeLocator> m_Locators = null;
/** the type of the attribute */
protected int m_Type = -1;
/** the referenced data */
protected Instances m_Data = null;
/** the indices */
protected int[] m_Indices = null;
/** the indices of locator objects */
protected int[] m_LocatorIndices = null;
/**
* Initializes the AttributeLocator with the given data for the specified
* type of attribute. Checks all attributes.
*
* @param data the data to work on
* @param type the type of attribute to locate
*/
public AttributeLocator(Instances data, int type) {
this(data, type, 0, data.numAttributes() - 1);
}
/**
* Initializes the AttributeLocator with the given data for the specified
* type of attribute. Checks only the given range.
*
* @param data the data to work on
* @param type the type of attribute to locate
* @param fromIndex the first index to inspect (including)
* @param toIndex the last index to check (including)
*/
public AttributeLocator(Instances data, int type, int fromIndex, int toIndex) {
super();
int[] indices = new int[toIndex - fromIndex + 1];
for (int i = 0; i < indices.length; i++)
indices[i] = fromIndex + i;
initialize(data, type, indices);
}
/**
* initializes the AttributeLocator with the given data for the specified
* type of attribute. Checks only the given attribute indices.
*
* @param data the data to work on
* @param type the type of attribute to locate
* @param indices the attribute indices to check
*/
public AttributeLocator(Instances data, int type, int[] indices) {
super();
initialize(data, type, indices);
}
/**
* initializes the AttributeLocator
*
* @param data the data to base the search for attributes on
* @param type the type of attribute to look for
* @param indices the indices that are allowed to check
*/
protected void initialize(Instances data, int type, int[] indices) {
m_Data = new Instances(data, 0);
m_Type = type;
m_AllowedIndices = new int[indices.length];
System.arraycopy(indices, 0, m_AllowedIndices, 0, indices.length);
locate();
m_Indices = find(true);
m_LocatorIndices = find(false);
}
/**
* returns the type of attribute that is located
*
* @return the type of attribute
*/
public int getType() {
return m_Type;
}
/**
* returns the indices that are allowed to check for the attribute type
*
* @return the indices that are checked for the attribute type
*/
public int[] getAllowedIndices() {
return m_AllowedIndices;
}
/**
* sets up the structure
*/
protected void locate() {
int i;
m_Attributes = new BitSet(m_AllowedIndices.length);
m_Locators = new ArrayList<AttributeLocator>();
for (i = 0; i < m_AllowedIndices.length; i++) {
if (m_Data.attribute(m_AllowedIndices[i]).type() == Attribute.RELATIONAL)
m_Locators.add(new AttributeLocator(m_Data.attribute(m_AllowedIndices[i]).relation(), getType()));
else
m_Locators.add(null);
m_Attributes.set(i, m_Data.attribute(m_AllowedIndices[i]).type() == getType());
}
}
/**
* returns the underlying data
*
* @return the underlying Instances object
*/
public Instances getData() {
return m_Data;
}
/**
* returns the indices of the searched-for attributes (if TRUE) or the indices
* of AttributeLocator objects (if FALSE)
*
* @param findAtts if true the indices of attributes are located,
* otherwise the ones of AttributeLocator objects
* @return the indices of the attributes or the AttributeLocator objects
*/
protected int[] find(boolean findAtts) {
int i;
int[] result;
ArrayList<Integer> indices;
// determine locations
indices = new ArrayList<Integer>();
if (findAtts) {
for (i = 0; i < m_Attributes.size(); i++) {
if (m_Attributes.get(i))
indices.add(new Integer(i));
}
}
else {
for (i = 0; i < m_Locators.size(); i++) {
if (m_Locators.get(i) != null)
indices.add(new Integer(i));
}
}
// fill array
result = new int[indices.size()];
for (i = 0; i < indices.size(); i++)
result[i] = ((Integer) indices.get(i)).intValue();
return result;
}
/**
* returns actual index in the Instances object.
*
* @param index the index in the m_AllowedIndices array
* @return the actual index in the instances object
*/
public int getActualIndex(int index) {
return m_AllowedIndices[index];
}
/**
* Returns the indices of the attributes. These indices are referring
* to the m_AllowedIndices array, not the actual indices in the Instances
* object.
*
* @return the indices of the attributes
* @see #getActualIndex(int)
*/
public int[] getAttributeIndices() {
return m_Indices;
}
/**
* Returns the indices of the AttributeLocator objects. These indices are
* referring to the m_AllowedIndices array, not the actual indices in the
* Instances object.
*
* @return the indices of the AttributeLocator objects
* @see #getActualIndex(int)
*/
public int[] getLocatorIndices() {
return m_LocatorIndices;
}
/**
* Returns the AttributeLocator at the given index. This index refers to
* the index of the m_AllowedIndices array, not the actual Instances object.
*
* @param index the index of the locator to retrieve
* @return the AttributeLocator at the given index
*/
public AttributeLocator getLocator(int index) {
return (AttributeLocator) m_Locators.get(index);
}
/**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less
* than, equal to, or greater than the specified object. Only type and
* indices are checked.
*
* @param o the object to compare with
* @return -1 if less than, 0 if equal, +1 if greater than the
* given object
*/
public int compareTo(AttributeLocator o) {
int result;
int i;
result = 0;
// 1. check type
if (this.getType() < o.getType()) {
result = -1;
}
else if (this.getType() > o.getType()) {
result = 1;
}
else {
// 2. check indices
if (this.getAllowedIndices().length < o.getAllowedIndices().length) {
result = -1;
}
else if (this.getAllowedIndices().length > o.getAllowedIndices().length) {
result = 1;
}
else {
for (i = 0; i < this.getAllowedIndices().length; i++) {
if (this.getAllowedIndices()[i] < o.getAllowedIndices()[i]) {
result = -1;
break;
}
else if (this.getAllowedIndices()[i] > o.getAllowedIndices()[i]) {
result = 1;
break;
}
else {
result = 0;
}
}
}
}
return result;
}
/**
* Indicates whether some other object is "equal to" this one. Only type
* and indices are checked.
*
* @param o the AttributeLocator to check for equality
* @return true if the AttributeLocators have the same type and
* indices
*/
public boolean equals(Object o) {
return (compareTo((AttributeLocator) o) == 0);
}
/**
* returns a string representation of this object
*
* @return a string representation
*/
public String toString() {
return m_Attributes.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AttributeMetaInfo.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AttributeMetaInfo.java
* Copyright (C) 2014 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.IOException;
import java.io.Serializable;
import java.io.StreamTokenizer;
import java.io.StringReader;
import java.util.Properties;
public class AttributeMetaInfo implements Serializable, RevisionHandler {
/** The attribute's metadata. */
protected ProtectedProperties m_Metadata;
/** The attribute's ordering. */
protected int m_Ordering;
/** Whether the attribute is regular. */
protected boolean m_IsRegular;
/** Whether the attribute is averagable. */
protected boolean m_IsAveragable;
/** Whether the attribute has a zeropoint. */
protected boolean m_HasZeropoint;
/** The attribute's lower numeric bound. */
protected double m_LowerBound;
/** Whether the lower bound is open. */
protected boolean m_LowerBoundIsOpen;
/** The attribute's upper numeric bound. */
protected double m_UpperBound;
/** Whether the upper bound is open */
protected boolean m_UpperBoundIsOpen;
/**
* Creates the meta info object based on the given meta data.
*/
public AttributeMetaInfo(ProtectedProperties metadata, Attribute att) {
setMetadata(metadata, att);
}
/**
* Sets the metadata for the attribute. Processes the strings stored in the
* metadata of the attribute so that the properties can be set up for the
* easy-access metadata methods. Any strings sought that are omitted will
* cause default values to be set.
*
* The following properties are recognised: ordering, averageable, zeropoint,
* regular, weight, and range.
*
* All other properties can be queried and handled appropriately by classes
* calling the getMetadata() method.
*
* @param metadata the metadata
* @throws IllegalArgumentException if the properties are not consistent
*/
// @ requires metadata != null;
private void setMetadata(ProtectedProperties metadata, Attribute att) {
m_Metadata = metadata;
if (att.m_Type == Attribute.DATE) {
m_Ordering = Attribute.ORDERING_ORDERED;
m_IsRegular = true;
m_IsAveragable = false;
m_HasZeropoint = false;
} else {
// get ordering
String orderString = m_Metadata.getProperty("ordering", "");
// numeric ordered attributes are averagable and zeropoint by default
String def;
if (att.m_Type == Attribute.NUMERIC && orderString.compareTo("modulo") != 0
&& orderString.compareTo("symbolic") != 0) {
def = "true";
} else {
def = "false";
}
// determine boolean states
m_IsAveragable = (m_Metadata.getProperty("averageable", def).compareTo(
"true") == 0);
m_HasZeropoint = (m_Metadata.getProperty("zeropoint", def).compareTo(
"true") == 0);
// averagable or zeropoint implies regular
if (m_IsAveragable || m_HasZeropoint) {
def = "true";
}
m_IsRegular = (m_Metadata.getProperty("regular", def).compareTo("true") == 0);
// determine ordering
if (orderString.compareTo("symbolic") == 0) {
m_Ordering = Attribute.ORDERING_SYMBOLIC;
} else if (orderString.compareTo("ordered") == 0) {
m_Ordering = Attribute.ORDERING_ORDERED;
} else if (orderString.compareTo("modulo") == 0) {
m_Ordering = Attribute.ORDERING_MODULO;
} else {
if (att.m_Type == Attribute.NUMERIC || m_IsAveragable || m_HasZeropoint) {
m_Ordering = Attribute.ORDERING_ORDERED;
} else {
m_Ordering = Attribute.ORDERING_SYMBOLIC;
}
}
}
// consistency checks
if (m_IsAveragable && !m_IsRegular) {
throw new IllegalArgumentException("An averagable attribute must be"
+ " regular");
}
if (m_HasZeropoint && !m_IsRegular) {
throw new IllegalArgumentException("A zeropoint attribute must be"
+ " regular");
}
if (m_IsRegular && m_Ordering == Attribute.ORDERING_SYMBOLIC) {
throw new IllegalArgumentException("A symbolic attribute cannot be"
+ " regular");
}
if (m_IsAveragable && m_Ordering != Attribute.ORDERING_ORDERED) {
throw new IllegalArgumentException("An averagable attribute must be"
+ " ordered");
}
if (m_HasZeropoint && m_Ordering != Attribute.ORDERING_ORDERED) {
throw new IllegalArgumentException("A zeropoint attribute must be"
+ " ordered");
}
// determine weight
att.m_Weight = 1.0;
String weightString = m_Metadata.getProperty("weight");
if (weightString != null) {
try {
att.m_Weight = Double.valueOf(weightString).doubleValue();
} catch (NumberFormatException e) {
// Check if value is really a number
throw new IllegalArgumentException("Not a valid attribute weight: '"
+ weightString + "'");
}
}
// determine numeric range
if (att.m_Type == Attribute.NUMERIC) {
setNumericRange(m_Metadata.getProperty("range"));
}
}
/**
* Sets the numeric range based on a string. If the string is null the range
* will default to [-inf,+inf]. A square brace represents a closed interval, a
* curved brace represents an open interval, and 'inf' represents infinity.
* Examples of valid range strings: "[-inf,20)","(-13.5,-5.2)","(5,inf]"
*
* @param rangeString the string to parse as the attribute's numeric range
* @throws IllegalArgumentException if the range is not valid
*/
// @ requires rangeString != null;
private void setNumericRange(String rangeString) {
// set defaults
m_LowerBound = Double.NEGATIVE_INFINITY;
m_LowerBoundIsOpen = false;
m_UpperBound = Double.POSITIVE_INFINITY;
m_UpperBoundIsOpen = false;
if (rangeString == null) {
return;
}
// set up a tokenzier to parse the string
StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(
rangeString));
tokenizer.resetSyntax();
tokenizer.whitespaceChars(0, ' ');
tokenizer.wordChars(' ' + 1, '\u00FF');
tokenizer.ordinaryChar('[');
tokenizer.ordinaryChar('(');
tokenizer.ordinaryChar(',');
tokenizer.ordinaryChar(']');
tokenizer.ordinaryChar(')');
try {
// get opening brace
tokenizer.nextToken();
if (tokenizer.ttype == '[') {
m_LowerBoundIsOpen = false;
} else if (tokenizer.ttype == '(') {
m_LowerBoundIsOpen = true;
} else {
throw new IllegalArgumentException("Expected opening brace on range,"
+ " found: " + tokenizer.toString());
}
// get lower bound
tokenizer.nextToken();
if (tokenizer.ttype != StreamTokenizer.TT_WORD) {
throw new IllegalArgumentException("Expected lower bound in range,"
+ " found: " + tokenizer.toString());
}
if (tokenizer.sval.compareToIgnoreCase("-inf") == 0) {
m_LowerBound = Double.NEGATIVE_INFINITY;
} else if (tokenizer.sval.compareToIgnoreCase("+inf") == 0) {
m_LowerBound = Double.POSITIVE_INFINITY;
} else if (tokenizer.sval.compareToIgnoreCase("inf") == 0) {
m_LowerBound = Double.NEGATIVE_INFINITY;
} else {
try {
m_LowerBound = Double.valueOf(tokenizer.sval).doubleValue();
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected lower bound in range,"
+ " found: '" + tokenizer.sval + "'");
}
}
// get separating comma
if (tokenizer.nextToken() != ',') {
throw new IllegalArgumentException("Expected comma in range,"
+ " found: " + tokenizer.toString());
}
// get upper bound
tokenizer.nextToken();
if (tokenizer.ttype != StreamTokenizer.TT_WORD) {
throw new IllegalArgumentException("Expected upper bound in range,"
+ " found: " + tokenizer.toString());
}
if (tokenizer.sval.compareToIgnoreCase("-inf") == 0) {
m_UpperBound = Double.NEGATIVE_INFINITY;
} else if (tokenizer.sval.compareToIgnoreCase("+inf") == 0) {
m_UpperBound = Double.POSITIVE_INFINITY;
} else if (tokenizer.sval.compareToIgnoreCase("inf") == 0) {
m_UpperBound = Double.POSITIVE_INFINITY;
} else {
try {
m_UpperBound = Double.valueOf(tokenizer.sval).doubleValue();
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected upper bound in range,"
+ " found: '" + tokenizer.sval + "'");
}
}
// get closing brace
tokenizer.nextToken();
if (tokenizer.ttype == ']') {
m_UpperBoundIsOpen = false;
} else if (tokenizer.ttype == ')') {
m_UpperBoundIsOpen = true;
} else {
throw new IllegalArgumentException("Expected closing brace on range,"
+ " found: " + tokenizer.toString());
}
// check for rubbish on end
if (tokenizer.nextToken() != StreamTokenizer.TT_EOF) {
throw new IllegalArgumentException("Expected end of range string,"
+ " found: " + tokenizer.toString());
}
} catch (IOException e) {
throw new IllegalArgumentException("IOException reading attribute range"
+ " string: " + e.getMessage());
}
if (m_UpperBound < m_LowerBound) {
throw new IllegalArgumentException("Upper bound (" + m_UpperBound
+ ") on numeric range is" + " less than lower bound (" + m_LowerBound
+ ")!");
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 10203 $");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/AttributeStats.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AttributeStats.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.Serializable;
/**
* A Utility class that contains summary information on an
* the values that appear in a dataset for a particular attribute.
*
* @author <a href="mailto:len@reeltwo.com">Len Trigg</a>
* @version $Revision$
*/
public class AttributeStats
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 4434688832743939380L;
/** The number of int-like values */
public int intCount = 0;
/** The number of real-like values (i.e. have a fractional part) */
public int realCount = 0;
/** The number of missing values */
public int missingCount = 0;
/** The number of distinct values */
public int distinctCount = 0;
/** The number of values that only appear once */
public int uniqueCount = 0;
/** The total number of values (i.e. number of instances) */
public int totalCount = 0;
/** Stats on numeric value distributions */
// perhaps Stats should be moved from weka.experiment to weka.core
public weka.experiment.Stats numericStats;
/** Counts of each nominal value */
public int [] nominalCounts;
/** Weight mass for each nominal value */
public double[] nominalWeights;
/**
* Updates the counters for one more observed distinct value.
*
* @param value the value that has just been seen
* @param count the number of times the value appeared
* @param weight the weight mass of the value
*/
protected void addDistinct(double value, int count, double weight) {
if (count > 0) {
if (count == 1) {
uniqueCount++;
}
if (Utils.eq(value, (double)((int)value))) {
intCount += count;
} else {
realCount += count;
}
if (nominalCounts != null) {
nominalCounts[(int)value] = count;
nominalWeights[(int)value] = weight;
}
if (numericStats != null) {
//numericStats.add(value, count);
numericStats.add(value, weight);
numericStats.calculateDerived();
}
}
distinctCount++;
}
/**
* Returns a human readable representation of this AttributeStats instance.
*
* @return a String represtinging these AttributeStats.
*/
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append(Utils.padLeft("Type", 4)).append(Utils.padLeft("Nom", 5));
sb.append(Utils.padLeft("Int", 5)).append(Utils.padLeft("Real", 5));
sb.append(Utils.padLeft("Missing", 12));
sb.append(Utils.padLeft("Unique", 12));
sb.append(Utils.padLeft("Dist", 6));
if (nominalCounts != null) {
sb.append(' ');
for (int i = 0; i < nominalCounts.length; i++) {
sb.append(Utils.padLeft("C[" + i + "]", 5));
}
}
sb.append('\n');
long percent;
percent = Math.round(100.0 * intCount / totalCount);
if (nominalCounts != null) {
sb.append(Utils.padLeft("Nom", 4)).append(' ');
sb.append(Utils.padLeft("" + percent, 3)).append("% ");
sb.append(Utils.padLeft("" + 0, 3)).append("% ");
} else {
sb.append(Utils.padLeft("Num", 4)).append(' ');
sb.append(Utils.padLeft("" + 0, 3)).append("% ");
sb.append(Utils.padLeft("" + percent, 3)).append("% ");
}
percent = Math.round(100.0 * realCount / totalCount);
sb.append(Utils.padLeft("" + percent, 3)).append("% ");
sb.append(Utils.padLeft("" + missingCount, 5)).append(" /");
percent = Math.round(100.0 * missingCount / totalCount);
sb.append(Utils.padLeft("" + percent, 3)).append("% ");
sb.append(Utils.padLeft("" + uniqueCount, 5)).append(" /");
percent = Math.round(100.0 * uniqueCount / totalCount);
sb.append(Utils.padLeft("" + percent, 3)).append("% ");
sb.append(Utils.padLeft("" + distinctCount, 5)).append(' ');
if (nominalCounts != null) {
for (int i = 0; i < nominalCounts.length; i++) {
sb.append(Utils.padLeft("" + nominalCounts[i], 5));
}
}
sb.append('\n');
return sb.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/BatchPredictor.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BatchPredictor.java
* Copyright (C) 2012 University of Waikato, Hamilton, New Zealand.
*
*/
package weka.core;
/**
* Interface to something that can produce predictions in a batch manner
* when presented with a set of Instances.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public interface BatchPredictor {
/**
* Set the batch size to use. The implementer will
* prefer (but not necessarily expect) this many instances
* to be passed in to distributionsForInstances().
*
* @param size the batch size to use
*/
void setBatchSize(String size);
/**
* Get the batch size to use. The implementer will prefer (but not
* necessarily expect) this many instances to be passed in to
* distributionsForInstances(). Allows the preferred batch size
* to be encapsulated with the client.
*
* @return the batch size to use
*/
String getBatchSize();
/**
* Batch scoring method
*
* @param insts the instances to get predictions for
* @return an array of probability distributions, one for each instance
* @throws Exception if a problem occurs
*/
double[][] distributionsForInstances(Instances insts) throws Exception;
/**
* Returns true if this BatchPredictor can generate batch predictions
* in an efficient manner.
*
* @return true if batch predictions can be generated efficiently
*/
boolean implementsMoreEfficientBatchPrediction();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/BinarySparseInstance.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BinarySparseInstance.java
* Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.util.ArrayList;
import java.util.Enumeration;
/**
* Class for storing a binary-data-only instance as a sparse vector. A sparse
* instance only requires storage for those attribute values that are non-zero.
* Since the objective is to reduce storage requirements for datasets with large
* numbers of default values, this also includes nominal attributes -- the first
* nominal value (i.e. that which has index 0) will not require explicit
* storage, so rearrange your nominal attribute value orderings if necessary.
* Missing values are not supported, and will be treated as 1 (true).
*
* @version $Revision$
*/
public class BinarySparseInstance extends SparseInstance {
/** for serialization */
private static final long serialVersionUID = -5297388762342528737L;
/**
* Constructor that generates a sparse instance from the given instance.
* Reference to the dataset is set to null. (ie. the instance doesn't have
* access to information about the attribute types)
*
* @param instance the instance from which the attribute values and the weight
* are to be copied
*/
public BinarySparseInstance(Instance instance) {
m_Weight = instance.weight();
m_Dataset = null;
m_NumAttributes = instance.numAttributes();
if (instance instanceof SparseInstance) {
m_AttValues = null;
m_Indices = ((SparseInstance) instance).m_Indices;
} else {
int[] tempIndices = new int[instance.numAttributes()];
int vals = 0;
for (int i = 0; i < instance.numAttributes(); i++) {
if (instance.value(i) != 0) {
tempIndices[vals] = i;
vals++;
}
}
m_AttValues = null;
m_Indices = new int[vals];
System.arraycopy(tempIndices, 0, m_Indices, 0, vals);
}
}
/**
* Constructor that copies the info from the given instance. Reference to the
* dataset is set to null. (ie. the instance doesn't have access to
* information about the attribute types)
*
* @param instance the instance from which the attribute info is to be copied
*/
public BinarySparseInstance(SparseInstance instance) {
m_AttValues = null;
m_Indices = instance.m_Indices;
m_Weight = instance.m_Weight;
m_NumAttributes = instance.m_NumAttributes;
m_Dataset = null;
}
/**
* Constructor that generates a sparse instance from the given parameters.
* Reference to the dataset is set to null. (ie. the instance doesn't have
* access to information about the attribute types)
*
* @param weight the instance's weight
* @param attValues a vector of attribute values
*/
public BinarySparseInstance(double weight, double[] attValues) {
m_Weight = weight;
m_Dataset = null;
m_NumAttributes = attValues.length;
int[] tempIndices = new int[m_NumAttributes];
int vals = 0;
for (int i = 0; i < m_NumAttributes; i++) {
if (attValues[i] != 0) {
tempIndices[vals] = i;
vals++;
}
}
m_AttValues = null;
m_Indices = new int[vals];
System.arraycopy(tempIndices, 0, m_Indices, 0, vals);
}
/**
* Constructor that inititalizes instance variable with given values.
* Reference to the dataset is set to null. (ie. the instance doesn't have
* access to information about the attribute types)
*
* @param weight the instance's weight
* @param indices the indices of the given values in the full vector
* @param maxNumValues the maximium number of values that can be stored
*/
public BinarySparseInstance(double weight, int[] indices, int maxNumValues) {
m_AttValues = null;
m_Indices = indices;
m_Weight = weight;
m_NumAttributes = maxNumValues;
m_Dataset = null;
}
/**
* Constructor of an instance that sets weight to one, all values to 1, and
* the reference to the dataset to null. (ie. the instance doesn't have access
* to information about the attribute types)
*
* @param numAttributes the size of the instance
*/
public BinarySparseInstance(int numAttributes) {
m_AttValues = null;
m_NumAttributes = numAttributes;
m_Indices = new int[numAttributes];
for (int i = 0; i < m_Indices.length; i++) {
m_Indices[i] = i;
}
m_Weight = 1;
m_Dataset = null;
}
/**
* Produces a shallow copy of this instance. The copy has access to the same
* dataset. (if you want to make a copy that doesn't have access to the
* dataset, use <code>new BinarySparseInstance(instance)</code>
*
* @return the shallow copy
*/
@Override
public Object copy() {
BinarySparseInstance result = new BinarySparseInstance(this);
result.m_Dataset = m_Dataset;
return result;
}
/**
* Copies the instance but fills up its values based on the given array
* of doubles. The copy has access to the same dataset.
*
* @param values the array with new values
* @return the new instance
*/
public Instance copy(double[] values) {
BinarySparseInstance result = new BinarySparseInstance(this.m_Weight, values);
result.m_Dataset = m_Dataset;
return result;
}
/**
* Merges this instance with the given instance and returns the result.
* Dataset is set to null.
*
* @param inst the instance to be merged with this one
* @return the merged instances
*/
@Override
public Instance mergeInstance(Instance inst) {
int[] indices = new int[numValues() + inst.numValues()];
int m = 0;
for (int j = 0; j < numValues(); j++) {
indices[m++] = index(j);
}
for (int j = 0; j < inst.numValues(); j++) {
if (inst.valueSparse(j) != 0) {
indices[m++] = numAttributes() + inst.index(j);
}
}
if (m != indices.length) {
// Need to truncate
int[] newInd = new int[m];
System.arraycopy(indices, 0, newInd, 0, m);
indices = newInd;
}
return new BinarySparseInstance(1.0, indices, numAttributes()
+ inst.numAttributes());
}
/**
* Does nothing, since we don't support missing values.
*
* @param array containing the means and modes
*/
@Override
public void replaceMissingValues(double[] array) {
// Does nothing, since we don't store missing values.
}
/**
* Sets a specific value in the instance to the given value (internal
* floating-point format). Performs a deep copy of the vector of attribute
* values before the value is set.
*
* @param attIndex the attribute's index
* @param value the new attribute value (If the corresponding attribute is
* nominal (or a string) then this is the new value's index as a
* double).
*/
@Override
public void setValue(int attIndex, double value) {
int index = locateIndex(attIndex);
if ((index >= 0) && (m_Indices[index] == attIndex)) {
if (value == 0) {
int[] tempIndices = new int[m_Indices.length - 1];
System.arraycopy(m_Indices, 0, tempIndices, 0, index);
System.arraycopy(m_Indices, index + 1, tempIndices, index,
m_Indices.length - index - 1);
m_Indices = tempIndices;
}
} else {
if (value != 0) {
int[] tempIndices = new int[m_Indices.length + 1];
System.arraycopy(m_Indices, 0, tempIndices, 0, index + 1);
tempIndices[index + 1] = attIndex;
System.arraycopy(m_Indices, index + 1, tempIndices, index + 2,
m_Indices.length - index - 1);
m_Indices = tempIndices;
}
}
}
/**
* Sets a specific value in the instance to the given value (internal
* floating-point format). Performs a deep copy of the vector of attribute
* values before the value is set.
*
* @param indexOfIndex the index of the attribute's index
* @param value the new attribute value (If the corresponding attribute is
* nominal (or a string) then this is the new value's index as a
* double).
*/
@Override
public void setValueSparse(int indexOfIndex, double value) {
if (value == 0) {
int[] tempIndices = new int[m_Indices.length - 1];
System.arraycopy(m_Indices, 0, tempIndices, 0, indexOfIndex);
System.arraycopy(m_Indices, indexOfIndex + 1, tempIndices, indexOfIndex,
m_Indices.length - indexOfIndex - 1);
m_Indices = tempIndices;
}
}
/**
* Returns the values of each attribute as an array of doubles.
*
* @return an array containing all the instance attribute values
*/
@Override
public double[] toDoubleArray() {
double[] newValues = new double[m_NumAttributes];
for (int i = 0; i < m_Indices.length; i++) {
newValues[m_Indices[i]] = 1.0;
}
return newValues;
}
/**
* Returns the description of one instance in sparse format. If the instance
* doesn't have access to a dataset, it returns the internal floating-point
* values. Quotes string values that contain whitespace characters.
*
* @return the instance's description as a string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
text.append('{');
for (int i = 0; i < m_Indices.length; i++) {
if (i > 0) {
text.append(",");
}
if (m_Dataset == null) {
text.append(m_Indices[i] + " 1");
} else {
if (m_Dataset.attribute(m_Indices[i]).isNominal()
|| m_Dataset.attribute(m_Indices[i]).isString()) {
text.append(m_Indices[i] + " "
+ Utils.quote(m_Dataset.attribute(m_Indices[i]).value(1)));
} else {
text.append(m_Indices[i] + " 1");
}
}
}
text.append('}');
if (m_Weight != 1.0) {
text.append(",{"
+ Utils.doubleToString(m_Weight,
AbstractInstance.s_numericAfterDecimalPoint) + "}");
}
return text.toString();
}
/**
* Returns an instance's attribute value in internal format.
*
* @param attIndex the attribute's index
* @return the specified value as a double (If the corresponding attribute is
* nominal (or a string) then it returns the value's index as a
* double).
*/
@Override
public double value(int attIndex) {
int index = locateIndex(attIndex);
if ((index >= 0) && (m_Indices[index] == attIndex)) {
return 1.0;
} else {
return 0.0;
}
}
/**
* Returns an instance's attribute value in internal format. Does exactly the
* same thing as value() if applied to an Instance.
*
* @param indexOfIndex the index of the attribute's index
* @return the specified value as a double (If the corresponding attribute is
* nominal (or a string) then it returns the value's index as a
* double).
*/
@Override
public final double valueSparse(int indexOfIndex) {
return 1;
}
/**
* Deletes an attribute at the given position (0 to numAttributes() - 1).
*
* @param position the attribute's position
*/
@Override
protected void forceDeleteAttributeAt(int position) {
int index = locateIndex(position);
m_NumAttributes--;
if ((index >= 0) && (m_Indices[index] == position)) {
int[] tempIndices = new int[m_Indices.length - 1];
System.arraycopy(m_Indices, 0, tempIndices, 0, index);
for (int i = index; i < m_Indices.length - 1; i++) {
tempIndices[i] = m_Indices[i + 1] - 1;
}
m_Indices = tempIndices;
} else {
int[] tempIndices = new int[m_Indices.length];
System.arraycopy(m_Indices, 0, tempIndices, 0, index + 1);
for (int i = index + 1; i < m_Indices.length - 1; i++) {
tempIndices[i] = m_Indices[i] - 1;
}
m_Indices = tempIndices;
}
}
/**
* Inserts an attribute at the given position (0 to numAttributes()) and sets
* its value to 1.
*
* @param position the attribute's position
*/
@Override
protected void forceInsertAttributeAt(int position) {
int index = locateIndex(position);
m_NumAttributes++;
if ((index >= 0) && (m_Indices[index] == position)) {
int[] tempIndices = new int[m_Indices.length + 1];
System.arraycopy(m_Indices, 0, tempIndices, 0, index);
tempIndices[index] = position;
for (int i = index; i < m_Indices.length; i++) {
tempIndices[i + 1] = m_Indices[i] + 1;
}
m_Indices = tempIndices;
} else {
int[] tempIndices = new int[m_Indices.length + 1];
System.arraycopy(m_Indices, 0, tempIndices, 0, index + 1);
tempIndices[index + 1] = position;
for (int i = index + 1; i < m_Indices.length; i++) {
tempIndices[i + 1] = m_Indices[i] + 1;
}
m_Indices = tempIndices;
}
}
/**
* Main method for testing this class.
*
* @param options the command line options - ignored
*/
public static void main(String[] options) {
try {
// Create numeric attributes "length" and "weight"
Attribute length = new Attribute("length");
Attribute weight = new Attribute("weight");
// Create vector to hold nominal values "first", "second", "third"
ArrayList<String> my_nominal_values = new ArrayList<String>(3);
my_nominal_values.add("first");
my_nominal_values.add("second");
// Create nominal attribute "position"
Attribute position = new Attribute("position", my_nominal_values);
// Create vector of the above attributes
ArrayList<Attribute> attributes = new ArrayList<Attribute>(3);
attributes.add(length);
attributes.add(weight);
attributes.add(position);
// Create the empty dataset "race" with above attributes
Instances race = new Instances("race", attributes, 0);
// Make position the class attribute
race.setClassIndex(position.index());
// Create empty instance with three attribute values
BinarySparseInstance inst = new BinarySparseInstance(3);
// Set instance's values for the attributes "length", "weight", and
// "position"
inst.setValue(length, 5.3);
inst.setValue(weight, 300);
inst.setValue(position, "first");
// Set instance's dataset to be the dataset "race"
inst.setDataset(race);
// Print the instance
System.out.println("The instance: " + inst);
// Print the first attribute
System.out.println("First attribute: " + inst.attribute(0));
// Print the class attribute
System.out.println("Class attribute: " + inst.classAttribute());
// Print the class index
System.out.println("Class index: " + inst.classIndex());
// Say if class is missing
System.out.println("Class is missing: " + inst.classIsMissing());
// Print the instance's class value in internal format
System.out.println("Class value (internal format): " + inst.classValue());
// Print a shallow copy of this instance
SparseInstance copy = (SparseInstance) inst.copy();
System.out.println("Shallow copy: " + copy);
// Set dataset for shallow copy
copy.setDataset(inst.dataset());
System.out.println("Shallow copy with dataset set: " + copy);
// Print out all values in internal format
System.out.print("All stored values in internal format: ");
for (int i = 0; i < inst.numValues(); i++) {
if (i > 0) {
System.out.print(",");
}
System.out.print(inst.valueSparse(i));
}
System.out.println();
// Set all values to zero
System.out.print("All values set to zero: ");
while (inst.numValues() > 0) {
inst.setValueSparse(0, 0);
}
for (int i = 0; i < inst.numValues(); i++) {
if (i > 0) {
System.out.print(",");
}
System.out.print(inst.valueSparse(i));
}
System.out.println();
// Set all values to one
System.out.print("All values set to one: ");
for (int i = 0; i < inst.numAttributes(); i++) {
inst.setValue(i, 1);
}
for (int i = 0; i < inst.numValues(); i++) {
if (i > 0) {
System.out.print(",");
}
System.out.print(inst.valueSparse(i));
}
System.out.println();
// Unset dataset for copy, delete first attribute, and insert it again
copy.setDataset(null);
copy.deleteAttributeAt(0);
copy.insertAttributeAt(0);
copy.setDataset(inst.dataset());
System.out.println("Copy with first attribute deleted and inserted: "
+ copy);
// Same for second attribute
copy.setDataset(null);
copy.deleteAttributeAt(1);
copy.insertAttributeAt(1);
copy.setDataset(inst.dataset());
System.out.println("Copy with second attribute deleted and inserted: "
+ copy);
// Same for last attribute
copy.setDataset(null);
copy.deleteAttributeAt(2);
copy.insertAttributeAt(2);
copy.setDataset(inst.dataset());
System.out.println("Copy with third attribute deleted and inserted: "
+ copy);
// Enumerate attributes (leaving out the class attribute)
System.out.println("Enumerating attributes (leaving out class):");
Enumeration<Attribute> enu = inst.enumerateAttributes();
while (enu.hasMoreElements()) {
Attribute att = enu.nextElement();
System.out.println(att);
}
// Headers are equivalent?
System.out.println("Header of original and copy equivalent: "
+ inst.equalHeaders(copy));
// Test for missing values
System.out.println("Length of copy missing: " + copy.isMissing(length));
System.out.println("Weight of copy missing: "
+ copy.isMissing(weight.index()));
System.out.println("Length of copy missing: "
+ Utils.isMissingValue(copy.value(length)));
// Prints number of attributes and classes
System.out.println("Number of attributes: " + copy.numAttributes());
System.out.println("Number of classes: " + copy.numClasses());
// Replace missing values
double[] meansAndModes = { 2, 3, 0 };
copy.replaceMissingValues(meansAndModes);
System.out.println("Copy with missing value replaced: " + copy);
// Setting and getting values and weights
copy.setClassMissing();
System.out.println("Copy with missing class: " + copy);
copy.setClassValue(0);
System.out.println("Copy with class value set to first value: " + copy);
copy.setClassValue("second");
System.out.println("Copy with class value set to \"second\": " + copy);
copy.setMissing(1);
System.out.println("Copy with second attribute set to be missing: "
+ copy);
copy.setMissing(length);
System.out.println("Copy with length set to be missing: " + copy);
copy.setValue(0, 0);
System.out.println("Copy with first attribute set to 0: " + copy);
copy.setValue(weight, 1);
System.out.println("Copy with weight attribute set to 1: " + copy);
copy.setValue(position, "second");
System.out.println("Copy with position set to \"second\": " + copy);
copy.setValue(2, "first");
System.out.println("Copy with last attribute set to \"first\": " + copy);
System.out.println("Current weight of instance copy: " + copy.weight());
copy.setWeight(2);
System.out.println("Current weight of instance copy (set to 2): "
+ copy.weight());
System.out.println("Last value of copy: " + copy.toString(2));
System.out.println("Value of position for copy: "
+ copy.toString(position));
System.out.println("Last value of copy (internal format): "
+ copy.value(2));
System.out.println("Value of position for copy (internal format): "
+ copy.value(position));
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Capabilities.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Capabilities.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Vector;
import weka.core.converters.ConverterUtils.DataSource;
/**
* A class that describes the capabilites (e.g., handling certain types of
* attributes, missing values, types of classes, etc.) of a specific classifier.
* By default, the classifier is capable of nothing. This ensures that new
* features have to be enabled explicitly.
* <p/>
*
* A common code fragment for making use of the capabilities in a classifier
* would be this:
*
* <pre>
* public void <b>buildClassifier</b>(Instances instances) throws Exception {
* // can the classifier handle the data?
* getCapabilities().<b>testWithFail(instances)</b>;
* ...
* // possible deletion of instances with missing class labels, etc.
* </pre>
*
* For only testing a single attribute, use this:
*
* <pre>
* ...
* Attribute att = instances.attribute(0);
* getCapabilities().<b>testWithFail(att)</b>;
* ...
* </pre>
*
* Or for testing the class attribute (uses the capabilities that are especially
* for the class):
*
* <pre>
* ...
* Attribute att = instances.classAttribute();
* getCapabilities().<b>testWithFail(att, <i>true</i>)</b>;
* ...
* </pre>
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class Capabilities implements Cloneable, Serializable, RevisionHandler {
/** serialversion UID */
static final long serialVersionUID = -5478590032325567849L;
/** the properties file for managing the tests */
public final static String PROPERTIES_FILE = "weka/core/Capabilities.props";
/** the actual properties */
protected static Properties PROPERTIES;
/** Capabilities defined by interfaces */
protected static HashSet<Class> INTERFACE_DEFINED_CAPABILITIES;
/** whether to perform any tests at all */
protected static boolean TEST;
/** whether to perform data based tests */
protected static boolean INSTANCES_TEST;
/** whether to perform attribute based tests */
protected static boolean ATTRIBUTE_TEST;
/** whether to test for missing values */
protected static boolean MISSING_VALUES_TEST;
/** whether to test for missing class values */
protected static boolean MISSING_CLASS_VALUES_TEST;
/** whether to test for minimum number of instances */
protected static boolean MINIMUM_NUMBER_INSTANCES_TEST;
static {
// load properties
if (PROPERTIES == null) {
try {
PROPERTIES = Utils.readProperties(PROPERTIES_FILE);
TEST = Boolean.parseBoolean(PROPERTIES.getProperty("Test", "true"));
INSTANCES_TEST = Boolean.parseBoolean(PROPERTIES.getProperty("InstancesTest", "true")) && TEST;
ATTRIBUTE_TEST = Boolean.parseBoolean(PROPERTIES.getProperty("AttributeTest", "true")) && TEST;
MISSING_VALUES_TEST = Boolean.parseBoolean(PROPERTIES.getProperty("MissingValuesTest", "true")) && TEST;
MISSING_CLASS_VALUES_TEST = Boolean.parseBoolean(PROPERTIES.getProperty("MissingClassValuesTest", "true")) && TEST;
MINIMUM_NUMBER_INSTANCES_TEST = Boolean.parseBoolean(PROPERTIES.getProperty("MinimumNumberInstancesTest", "true")) && TEST;
INTERFACE_DEFINED_CAPABILITIES = new HashSet<Class>();
for (String key : PROPERTIES.stringPropertyNames()) {
if (key.endsWith("InterfaceCapability")) {
INTERFACE_DEFINED_CAPABILITIES.add(Class.forName(PROPERTIES.getProperty(key)));
}
}
} catch (Exception e) {
e.printStackTrace();
PROPERTIES = new Properties();
}
}
}
/** defines an attribute type */
private final static int ATTRIBUTE = 1;
/** defines a class type */
private final static int CLASS = 2;
/** defines an attribute capability */
private final static int ATTRIBUTE_CAPABILITY = 4;
/** defines a class capability */
private final static int CLASS_CAPABILITY = 8;
/** defines a other capability */
private final static int OTHER_CAPABILITY = 16;
/** enumeration of all capabilities */
public enum Capability {
// attributes
/** can handle nominal attributes */
NOMINAL_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Nominal attributes"),
/** can handle binary attributes */
BINARY_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Binary attributes"),
/** can handle unary attributes */
UNARY_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Unary attributes"),
/** can handle empty nominal attributes */
EMPTY_NOMINAL_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Empty nominal attributes"),
/** can handle numeric attributes */
NUMERIC_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Numeric attributes"),
/** can handle date attributes */
DATE_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Date attributes"),
/** can handle string attributes */
STRING_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "String attributes"),
/** can handle relational attributes */
RELATIONAL_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Relational attributes"),
/** can handle missing values in attributes */
MISSING_VALUES(ATTRIBUTE_CAPABILITY, "Missing values"),
// class
/** can handle data without class attribute, eg clusterers */
NO_CLASS(CLASS_CAPABILITY, "No class"),
/** can handle nominal classes */
NOMINAL_CLASS(CLASS + CLASS_CAPABILITY, "Nominal class"),
/** can handle binary classes */
BINARY_CLASS(CLASS + CLASS_CAPABILITY, "Binary class"),
/** can handle unary classes */
UNARY_CLASS(CLASS + CLASS_CAPABILITY, "Unary class"),
/** can handle empty nominal classes */
EMPTY_NOMINAL_CLASS(CLASS + CLASS_CAPABILITY, "Empty nominal class"),
/** can handle numeric classes */
NUMERIC_CLASS(CLASS + CLASS_CAPABILITY, "Numeric class"),
/** can handle date classes */
DATE_CLASS(CLASS + CLASS_CAPABILITY, "Date class"),
/** can handle string classes */
STRING_CLASS(CLASS + CLASS_CAPABILITY, "String class"),
/** can handle relational classes */
RELATIONAL_CLASS(CLASS + CLASS_CAPABILITY, "Relational class"),
/** can handle missing values in class attribute */
MISSING_CLASS_VALUES(CLASS_CAPABILITY, "Missing class values"),
// other
/** can handle multi-instance data */
ONLY_MULTIINSTANCE(OTHER_CAPABILITY, "Only multi-Instance data");
/** the flags for the capabilities */
private int m_Flags = 0;
/** the display string */
private String m_Display;
/**
* initializes the capability with the given flags
*
* @param flags "meta-data" for the capability
* @param display the display string (must be unique!)
*/
private Capability(final int flags, final String display) {
this.m_Flags = flags;
this.m_Display = display;
}
/**
* returns true if the capability is an attribute
*
* @return true if the capability is an attribute
*/
public boolean isAttribute() {
return ((this.m_Flags & ATTRIBUTE) == ATTRIBUTE);
}
/**
* returns true if the capability is a class
*
* @return true if the capability is a class
*/
public boolean isClass() {
return ((this.m_Flags & CLASS) == CLASS);
}
/**
* returns true if the capability is an attribute capability
*
* @return true if the capability is an attribute capability
*/
public boolean isAttributeCapability() {
return ((this.m_Flags & ATTRIBUTE_CAPABILITY) == ATTRIBUTE_CAPABILITY);
}
/**
* returns true if the capability is a class capability
*
* @return true if the capability is a class capability
*/
public boolean isOtherCapability() {
return ((this.m_Flags & OTHER_CAPABILITY) == OTHER_CAPABILITY);
}
/**
* returns true if the capability is a other capability
*
* @return true if the capability is a other capability
*/
public boolean isClassCapability() {
return ((this.m_Flags & CLASS_CAPABILITY) == CLASS_CAPABILITY);
}
/**
* returns the display string of the capability
*
* @return the display string
*/
@Override
public String toString() {
return this.m_Display;
}
}
/** the object that owns this capabilities instance */
protected CapabilitiesHandler m_Owner;
/** the hashset for storing the active capabilities */
protected HashSet<Capability> m_Capabilities;
/** the hashset for storing dependent capabilities, eg for meta-classifiers */
protected HashSet<Capability> m_Dependencies;
/** the interface-defined capabilities*/
protected HashSet<Class> m_InterfaceDefinedCapabilities;
/** the reason why the test failed, used to throw an exception */
protected Exception m_FailReason = null;
/** the minimum number of instances in a dataset */
protected int m_MinimumNumberInstances = 1;
/** whether to perform any tests at all */
protected boolean m_Test = TEST;
/** whether to perform data based tests */
protected boolean m_InstancesTest = INSTANCES_TEST;
/** whether to perform attribute based tests */
protected boolean m_AttributeTest = ATTRIBUTE_TEST;
/** whether to test for missing values */
protected boolean m_MissingValuesTest = MISSING_VALUES_TEST;
/** whether to test for missing class values */
protected boolean m_MissingClassValuesTest = MISSING_CLASS_VALUES_TEST;
/** whether to test for minimum number of instances */
protected boolean m_MinimumNumberInstancesTest = MINIMUM_NUMBER_INSTANCES_TEST;
/**
* initializes the capabilities for the given owner
*
* @param owner the object that produced this Capabilities instance
*/
public Capabilities(final CapabilitiesHandler owner) {
super();
this.setOwner(owner);
this.m_Capabilities = new HashSet<Capability>();
this.m_Dependencies = new HashSet<Capability>();
if (owner instanceof weka.classifiers.UpdateableClassifier || owner instanceof weka.clusterers.UpdateableClusterer) {
this.setMinimumNumberInstances(0);
}
}
/**
* Does owner implement CapabilitiesIgnorer and does it not
* want capability checking to be performed?
*/
public boolean doNotCheckCapabilities() {
// Do we actually want to check capabilities?
if ((this.getOwner() != null) && (this.getOwner() instanceof CapabilitiesIgnorer)) {
return ((CapabilitiesIgnorer) this.getOwner()).getDoNotCheckCapabilities();
} else {
return false;
}
}
/**
* Creates and returns a copy of this object.
*
* @return a clone of this object
*/
@Override
public Object clone() {
Capabilities result;
result = new Capabilities(this.m_Owner);
result.assign(this);
return result;
}
/**
* retrieves the data from the given Capabilities object
*
* @param c the capabilities object to initialize with
*/
public void assign(final Capabilities c) {
for (Capability cap : Capability.values()) {
// capability
if (c.handles(cap)) {
this.enable(cap);
} else {
this.disable(cap);
}
// dependency
if (c.hasDependency(cap)) {
this.enableDependency(cap);
} else {
this.disableDependency(cap);
}
}
this.setMinimumNumberInstances(c.getMinimumNumberInstances());
this.m_InterfaceDefinedCapabilities = new HashSet(c.m_InterfaceDefinedCapabilities);
}
/**
* performs an AND conjunction with the capabilities of the given Capabilities
* object and updates itself
*
* @param c the capabilities to AND with
*/
public void and(final Capabilities c) {
for (Capability cap : Capability.values()) {
// capability
if (this.handles(cap) && c.handles(cap)) {
this.m_Capabilities.add(cap);
} else {
this.m_Capabilities.remove(cap);
}
// dependency
if (this.hasDependency(cap) && c.hasDependency(cap)) {
this.m_Dependencies.add(cap);
} else {
this.m_Dependencies.remove(cap);
}
}
// minimum number of instances that both handlers need at least to work
if (c.getMinimumNumberInstances() > this.getMinimumNumberInstances()) {
this.setMinimumNumberInstances(c.getMinimumNumberInstances());
}
HashSet<Class> intersection = new HashSet<Class>();
for (Class cl : c.m_InterfaceDefinedCapabilities) {
if (this.m_InterfaceDefinedCapabilities.contains(cl)) {
intersection.add(cl);
}
}
this.m_InterfaceDefinedCapabilities = intersection;
}
/**
* performs an OR conjunction with the capabilities of the given Capabilities
* object and updates itself
*
* @param c the capabilities to OR with
*/
public void or(final Capabilities c) {
for (Capability cap : Capability.values()) {
// capability
if (this.handles(cap) || c.handles(cap)) {
this.m_Capabilities.add(cap);
} else {
this.m_Capabilities.remove(cap);
}
// dependency
if (this.hasDependency(cap) || c.hasDependency(cap)) {
this.m_Dependencies.add(cap);
} else {
this.m_Dependencies.remove(cap);
}
}
if (c.getMinimumNumberInstances() < this.getMinimumNumberInstances()) {
this.setMinimumNumberInstances(c.getMinimumNumberInstances());
}
this.m_InterfaceDefinedCapabilities.addAll(c.m_InterfaceDefinedCapabilities);
}
/**
* Returns true if the currently set capabilities support at least all of the
* capabilities of the given Capabilities object (checks only the enum!)
*
* @param c the capabilities to support at least
* @return true if all the requested capabilities are supported
*/
public boolean supports(final Capabilities c) {
for (Capability cap : Capability.values()) {
if (c.handles(cap) && !this.handles(cap)) {
return false;
}
}
// Check interface-based capabilities
for (Class cl : c.m_InterfaceDefinedCapabilities) {
if (!this.m_InterfaceDefinedCapabilities.contains(cl)) {
return false;
}
}
return true;
}
/**
* Returns true if the currently set capabilities support (or have a
* dependency) at least all of the capabilities of the given Capabilities
* object (checks only the enum!)
*
* @param c the capabilities (or dependencies) to support at least
* @return true if all the requested capabilities are supported (or at least
* have a dependency)
*/
public boolean supportsMaybe(final Capabilities c) {
for (Capability cap : Capability.values()) {
if (c.handles(cap) && !(this.handles(cap) || this.hasDependency(cap))) {
return false;
}
}
// Check interface-based capabilities
for (Class cl : c.m_InterfaceDefinedCapabilities) {
if (!this.m_InterfaceDefinedCapabilities.contains(cl)) {
return false;
}
}
return true;
}
/**
* sets the owner of this capabilities object
*
* @param value the new owner
*/
public void setOwner(final CapabilitiesHandler value) {
this.m_Owner = value;
this.m_InterfaceDefinedCapabilities = new HashSet<Class>();
if (this.m_Owner != null) {
for (Class c : INTERFACE_DEFINED_CAPABILITIES) {
if (c.isInstance(this.m_Owner)) {
this.m_InterfaceDefinedCapabilities.add(c);
}
}
}
}
/**
* returns the owner of this capabilities object
*
* @return the current owner of this capabilites object
*/
public CapabilitiesHandler getOwner() {
return this.m_Owner;
}
/**
* sets the minimum number of instances that have to be in the dataset
*
* @param value the minimum number of instances
*/
public void setMinimumNumberInstances(final int value) {
if (value >= 0) {
this.m_MinimumNumberInstances = value;
}
}
/**
* returns the minimum number of instances that have to be in the dataset
*
* @return the minimum number of instances
*/
public int getMinimumNumberInstances() {
return this.m_MinimumNumberInstances;
}
/**
* Returns an Iterator over the stored capabilities
*
* @return iterator over the current capabilities
*/
public Iterator<Capability> capabilities() {
return this.m_Capabilities.iterator();
}
/**
* Returns an Iterator over the stored dependencies
*
* @return iterator over the current dependencies
*/
public Iterator<Capability> dependencies() {
return this.m_Dependencies.iterator();
}
/**
* enables the given capability. Enabling NOMINAL_ATTRIBUTES also enables
* BINARY_ATTRIBUTES, UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. Enabling
* BINARY_ATTRIBUTES also enables UNARY_ATTRIBUTES and
* EMPTY_NOMINAL_ATTRIBUTES. Enabling UNARY_ATTRIBUTES also enables
* EMPTY_NOMINAL_ATTRIBUTES. But NOMINAL_CLASS only enables BINARY_CLASS,
* since normal schemes in Weka don't work with datasets that have only 1
* class label (or none).
*
* @param c the capability to enable
*/
public void enable(final Capability c) {
// attributes
if (c == Capability.NOMINAL_ATTRIBUTES) {
this.enable(Capability.BINARY_ATTRIBUTES);
} else if (c == Capability.BINARY_ATTRIBUTES) {
this.enable(Capability.UNARY_ATTRIBUTES);
} else if (c == Capability.UNARY_ATTRIBUTES) {
this.enable(Capability.EMPTY_NOMINAL_ATTRIBUTES);
}
// class
else if (c == Capability.NOMINAL_CLASS) {
this.enable(Capability.BINARY_CLASS);
}
this.m_Capabilities.add(c);
}
/**
* enables the dependency flag for the given capability Enabling
* NOMINAL_ATTRIBUTES also enables BINARY_ATTRIBUTES, UNARY_ATTRIBUTES and
* EMPTY_NOMINAL_ATTRIBUTES. Enabling BINARY_ATTRIBUTES also enables
* UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. Enabling UNARY_ATTRIBUTES
* also enables EMPTY_NOMINAL_ATTRIBUTES. But NOMINAL_CLASS only enables
* BINARY_CLASS, since normal schemes in Weka don't work with datasets that
* have only 1 class label (or none).
*
* @param c the capability to enable the dependency flag for
*/
public void enableDependency(final Capability c) {
// attributes
if (c == Capability.NOMINAL_ATTRIBUTES) {
this.enableDependency(Capability.BINARY_ATTRIBUTES);
} else if (c == Capability.BINARY_ATTRIBUTES) {
this.enableDependency(Capability.UNARY_ATTRIBUTES);
} else if (c == Capability.UNARY_ATTRIBUTES) {
this.enableDependency(Capability.EMPTY_NOMINAL_ATTRIBUTES);
}
// class
else if (c == Capability.NOMINAL_CLASS) {
this.enableDependency(Capability.BINARY_CLASS);
}
this.m_Dependencies.add(c);
}
/**
* enables all class types
*
* @see #disableAllClasses()
* @see #getClassCapabilities()
*/
public void enableAllClasses() {
for (Capability cap : Capability.values()) {
if (cap.isClass()) {
this.enable(cap);
}
}
}
/**
* enables all class type dependencies
*
* @see #disableAllClassDependencies()
* @see #getClassCapabilities()
*/
public void enableAllClassDependencies() {
for (Capability cap : Capability.values()) {
if (cap.isClass()) {
this.enableDependency(cap);
}
}
}
/**
* enables all attribute types
*
* @see #disableAllAttributes()
* @see #getAttributeCapabilities()
*/
public void enableAllAttributes() {
for (Capability cap : Capability.values()) {
if (cap.isAttribute()) {
this.enable(cap);
}
}
}
/**
* enables all attribute type dependencies
*
* @see #disableAllAttributeDependencies()
* @see #getAttributeCapabilities()
*/
public void enableAllAttributeDependencies() {
for (Capability cap : Capability.values()) {
if (cap.isAttribute()) {
this.enableDependency(cap);
}
}
}
/**
* enables all attribute and class types (including dependencies)
*/
public void enableAll() {
this.enableAllAttributes();
this.enableAllAttributeDependencies();
this.enableAllClasses();
this.enableAllClassDependencies();
this.enable(Capability.MISSING_VALUES);
this.enable(Capability.MISSING_CLASS_VALUES);
}
/**
* disables the given capability Disabling NOMINAL_ATTRIBUTES also disables
* BINARY_ATTRIBUTES, UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. Disabling
* BINARY_ATTRIBUTES also disables UNARY_ATTRIBUTES and
* EMPTY_NOMINAL_ATTRIBUTES. Disabling UNARY_ATTRIBUTES also disables
* EMPTY_NOMINAL_ATTRIBUTES. The same hierarchy applies to the class
* capabilities.
*
* @param c the capability to disable
*/
public void disable(final Capability c) {
// attributes
if (c == Capability.NOMINAL_ATTRIBUTES) {
this.disable(Capability.BINARY_ATTRIBUTES);
} else if (c == Capability.BINARY_ATTRIBUTES) {
this.disable(Capability.UNARY_ATTRIBUTES);
} else if (c == Capability.UNARY_ATTRIBUTES) {
this.disable(Capability.EMPTY_NOMINAL_ATTRIBUTES);
}
// class
else if (c == Capability.NOMINAL_CLASS) {
this.disable(Capability.BINARY_CLASS);
} else if (c == Capability.BINARY_CLASS) {
this.disable(Capability.UNARY_CLASS);
} else if (c == Capability.UNARY_CLASS) {
this.disable(Capability.EMPTY_NOMINAL_CLASS);
}
this.m_Capabilities.remove(c);
}
/**
* disables the dependency of the given capability Disabling
* NOMINAL_ATTRIBUTES also disables BINARY_ATTRIBUTES, UNARY_ATTRIBUTES and
* EMPTY_NOMINAL_ATTRIBUTES. Disabling BINARY_ATTRIBUTES also disables
* UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. Disabling UNARY_ATTRIBUTES
* also disables EMPTY_NOMINAL_ATTRIBUTES. The same hierarchy applies to the
* class capabilities.
*
* @param c the capability to disable the dependency flag for
*/
public void disableDependency(final Capability c) {
// attributes
if (c == Capability.NOMINAL_ATTRIBUTES) {
this.disableDependency(Capability.BINARY_ATTRIBUTES);
} else if (c == Capability.BINARY_ATTRIBUTES) {
this.disableDependency(Capability.UNARY_ATTRIBUTES);
} else if (c == Capability.UNARY_ATTRIBUTES) {
this.disableDependency(Capability.EMPTY_NOMINAL_ATTRIBUTES);
}
// class
else if (c == Capability.NOMINAL_CLASS) {
this.disableDependency(Capability.BINARY_CLASS);
} else if (c == Capability.BINARY_CLASS) {
this.disableDependency(Capability.UNARY_CLASS);
} else if (c == Capability.UNARY_CLASS) {
this.disableDependency(Capability.EMPTY_NOMINAL_CLASS);
}
this.m_Dependencies.remove(c);
}
/**
* disables all class types
*
* @see #enableAllClasses()
* @see #getClassCapabilities()
*/
public void disableAllClasses() {
for (Capability cap : Capability.values()) {
if (cap.isClass()) {
this.disable(cap);
}
}
}
/**
* disables all class type dependencies
*
* @see #enableAllClassDependencies()
* @see #getClassCapabilities()
*/
public void disableAllClassDependencies() {
for (Capability cap : Capability.values()) {
if (cap.isClass()) {
this.disableDependency(cap);
}
}
}
/**
* disables all attribute types
*
* @see #enableAllAttributes()
* @see #getAttributeCapabilities()
*/
public void disableAllAttributes() {
for (Capability cap : Capability.values()) {
if (cap.isAttribute()) {
this.disable(cap);
}
}
}
/**
* disables all attribute type dependencies
*
* @see #enableAllAttributeDependencies()
* @see #getAttributeCapabilities()
*/
public void disableAllAttributeDependencies() {
for (Capability cap : Capability.values()) {
if (cap.isAttribute()) {
this.disableDependency(cap);
}
}
}
/**
* disables all attribute and class types (including dependencies)
*/
public void disableAll() {
this.disableAllAttributes();
this.disableAllAttributeDependencies();
this.disableAllClasses();
this.disableAllClassDependencies();
this.disable(Capability.MISSING_VALUES);
this.disable(Capability.MISSING_CLASS_VALUES);
this.disable(Capability.NO_CLASS);
}
/**
* returns all class capabilities
*
* @return all capabilities regarding the class
* @see #enableAllClasses()
* @see #disableAllClasses()
*/
public Capabilities getClassCapabilities() {
Capabilities result;
result = new Capabilities(this.getOwner());
for (Capability cap : Capability.values()) {
if (cap.isClassCapability()) {
if (this.handles(cap)) {
result.m_Capabilities.add(cap);
}
}
}
return result;
}
/**
* returns all attribute capabilities
*
* @return all capabilities regarding attributes
* @see #enableAllAttributes()
* @see #disableAllAttributes()
*/
public Capabilities getAttributeCapabilities() {
Capabilities result;
result = new Capabilities(this.getOwner());
for (Capability cap : Capability.values()) {
if (cap.isAttributeCapability()) {
if (this.handles(cap)) {
result.m_Capabilities.add(cap);
}
}
}
return result;
}
/**
* returns all other capabilities, besides class and attribute related ones
*
* @return all other capabilities, besides class and attribute related ones
*/
public Capabilities getOtherCapabilities() {
Capabilities result;
result = new Capabilities(this.getOwner());
for (Capability cap : Capability.values()) {
if (cap.isOtherCapability()) {
if (this.handles(cap)) {
result.m_Capabilities.add(cap);
}
}
}
return result;
}
/**
* returns true if the classifier handler has the specified capability
*
* @param c the capability to test
* @return true if the classifier handler has the capability
*/
public boolean handles(final Capability c) {
return this.m_Capabilities.contains(c);
}
/**
* returns true if the classifier handler has a dependency for the specified
* capability
*
* @param c the capability to test
* @return true if the classifier handler has a dependency for the capability
*/
public boolean hasDependency(final Capability c) {
return this.m_Dependencies.contains(c);
}
/**
* Checks whether there are any dependencies at all
*
* @return true if there is at least one dependency for a capability
*/
public boolean hasDependencies() {
return (this.m_Dependencies.size() > 0);
}
/**
* returns the reason why the tests failed, is null if tests succeeded
*
* @return the reason why the tests failed
*/
public Exception getFailReason() {
return this.m_FailReason;
}
/**
* Generates the message for, e.g., an exception. Adds the classname before
* the actual message and returns that string.
*
* @param msg the actual content of the message, e.g., exception
* @return the new message
*/
protected String createMessage(final String msg) {
String result;
if (this.getOwner() != null) {
result = this.getOwner().getClass().getName();
} else {
result = "<anonymous>";
}
result += ": " + msg;
return result;
}
/**
* Test the given attribute, whether it can be processed by the handler, given
* its capabilities. The method assumes that the specified attribute is not
* the class attribute.
*
* @param att the attribute to test
* @return true if all the tests succeeded
* @see #test(Attribute, boolean)
*/
public boolean test(final Attribute att) {
return this.test(att, false);
}
/**
* Test the given attribute, whether it can be processed by the handler, given
* its capabilities.
*
* @param att the attribute to test
* @param isClass whether this attribute is the class attribute
* @return true if all the tests succeeded
* @see #m_AttributeTest
*/
public boolean test(final Attribute att, final boolean isClass) {
// Do we actually want to check capabilities?
if (this.doNotCheckCapabilities()) {
return true;
}
boolean result;
Capability cap;
Capability capBinary;
Capability capUnary;
Capability capEmpty;
String errorStr;
result = true;
// shall we test the data?
if (!this.m_AttributeTest) {
return result;
}
// for exception
if (isClass) {
errorStr = "class";
} else {
errorStr = "attributes";
}
switch (att.type()) {
case Attribute.NOMINAL:
if (isClass) {
cap = Capability.NOMINAL_CLASS;
capBinary = Capability.BINARY_CLASS;
capUnary = Capability.UNARY_CLASS;
capEmpty = Capability.EMPTY_NOMINAL_CLASS;
} else {
cap = Capability.NOMINAL_ATTRIBUTES;
capBinary = Capability.BINARY_ATTRIBUTES;
capUnary = Capability.UNARY_ATTRIBUTES;
capEmpty = Capability.EMPTY_NOMINAL_ATTRIBUTES;
}
if (this.handles(cap) && (att.numValues() > 2)) {
break;
} else if (this.handles(capBinary) && (att.numValues() == 2)) {
break;
} else if (this.handles(capUnary) && (att.numValues() == 1)) {
break;
} else if (this.handles(capEmpty) && (att.numValues() == 0)) {
break;
}
if (att.numValues() == 0) {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle empty nominal " + errorStr + "!"));
result = false;
}
if (att.numValues() == 1) {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle unary " + errorStr + "!"));
result = false;
} else if (att.numValues() == 2) {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle binary " + errorStr + "!"));
result = false;
} else {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle multi-valued nominal " + errorStr + "!"));
result = false;
}
break;
case Attribute.NUMERIC:
if (isClass) {
cap = Capability.NUMERIC_CLASS;
} else {
cap = Capability.NUMERIC_ATTRIBUTES;
}
if (!this.handles(cap)) {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle numeric " + errorStr + "!"));
result = false;
}
break;
case Attribute.DATE:
if (isClass) {
cap = Capability.DATE_CLASS;
} else {
cap = Capability.DATE_ATTRIBUTES;
}
if (!this.handles(cap)) {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle date " + errorStr + "!"));
result = false;
}
break;
case Attribute.STRING:
if (isClass) {
cap = Capability.STRING_CLASS;
} else {
cap = Capability.STRING_ATTRIBUTES;
}
if (!this.handles(cap)) {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle string " + errorStr + "!"));
result = false;
}
break;
case Attribute.RELATIONAL:
if (isClass) {
cap = Capability.RELATIONAL_CLASS;
} else {
cap = Capability.RELATIONAL_ATTRIBUTES;
}
if (!this.handles(cap)) {
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle relational " + errorStr + "!"));
result = false;
}
// attributes in the relation of this attribute must be tested
// separately with a different Capabilites object
break;
default:
this.m_FailReason = new UnsupportedAttributeTypeException(this.createMessage("Cannot handle unknown attribute type '" + att.type() + "'!"));
result = false;
}
return result;
}
/**
* Tests the given data, whether it can be processed by the handler, given its
* capabilities. Classifiers implementing the
* <code>MultiInstanceCapabilitiesHandler</code> interface are checked
* automatically for their multi-instance Capabilities (if no bags, then only
* the bag-structure, otherwise only the first bag).
*
* @param data the data to test
* @return true if all the tests succeeded
* @throws InterruptedException
* @see #test(Instances, int, int)
*/
public boolean test(final Instances data) throws InterruptedException {
return this.test(data, 0, data.numAttributes() - 1);
}
/**
* Gets the class for the given name. Return Object if class is not found.
*/
protected static Class getClass(final String name) {
try {
return Class.forName(name);
} catch (Exception ex) {
System.err.println("Class: " + name + " not found in Capabilities");
return new Object().getClass();
}
}
/**
* Tests a certain range of attributes of the given data, whether it can be
* processed by the handler, given its capabilities. Classifiers implementing
* the <code>MultiInstanceCapabilitiesHandler</code> interface are checked
* automatically for their multi-instance Capabilities (if no bags, then only
* the bag-structure, otherwise only the first bag).
*
* @param data the data to test
* @param fromIndex the range of attributes - start (incl.)
* @param toIndex the range of attributes - end (incl.)
* @return true if all the tests succeeded
* @throws InterruptedException
* @see MultiInstanceCapabilitiesHandler
* @see #m_InstancesTest
* @see #m_MissingValuesTest
* @see #m_MissingClassValuesTest
* @see #m_MinimumNumberInstancesTest
*/
public boolean test(final Instances data, final int fromIndex, final int toIndex) throws InterruptedException {
// Do we actually want to check capabilities?
if (this.doNotCheckCapabilities()) {
return true;
}
int i;
int n;
int m;
Attribute att;
Instance inst;
boolean testClass;
Capabilities cap;
boolean missing;
Iterator<Capability> iter;
// shall we test the data?
if (!this.m_InstancesTest) {
return true;
}
// no Capabilities? -> warning
if ((this.m_Capabilities.size() == 0) || ((this.m_Capabilities.size() == 1) && this.handles(Capability.NO_CLASS))) {
System.err.println(this.createMessage("No capabilities set!"));
}
// any attributes?
if (toIndex - fromIndex < 0) {
this.m_FailReason = new WekaException(this.createMessage("No attributes!"));
return false;
}
// do wee need to test the class attribute, i.e., is the class attribute
// within the range of attributes?
testClass = (data.classIndex() > -1) && (data.classIndex() >= fromIndex) && (data.classIndex() <= toIndex);
// attributes
Class weightedAttributesHandler = getClass("weka.core.WeightedAttributesHandler");
for (i = fromIndex; i <= toIndex; i++) {
att = data.attribute(i);
// class is handled separately
if (i == data.classIndex()) {
continue;
}
// check attribute types
if (!this.test(att)) {
return false;
}
if (att.weight() != 1.0) {
if (INTERFACE_DEFINED_CAPABILITIES.contains(weightedAttributesHandler) && !this.m_InterfaceDefinedCapabilities.contains(weightedAttributesHandler)) {
this.m_FailReason = new WekaException(this.createMessage("Some attribute weights are not equal to 1 and " + "scheme does not implement the WeightedAttributesHandler interface!"));
return false;
}
}
}
// class
if (!this.handles(Capability.NO_CLASS) && (data.classIndex() == -1)) {
this.m_FailReason = new UnassignedClassException(this.createMessage("Class attribute not set!"));
return false;
}
// special case: no class attribute can be handled
if (this.handles(Capability.NO_CLASS) && (data.classIndex() > -1)) {
cap = this.getClassCapabilities();
cap.disable(Capability.NO_CLASS);
iter = cap.capabilities();
if (!iter.hasNext()) {
this.m_FailReason = new WekaException(this.createMessage("Cannot handle any class attribute!"));
return false;
}
}
if (testClass && !this.handles(Capability.NO_CLASS)) {
att = data.classAttribute();
if (!this.test(att, true)) {
return false;
}
// special handling of RELATIONAL class
// TODO: store additional Capabilities for this case
// missing class labels
if (this.m_MissingClassValuesTest) {
if (!this.handles(Capability.MISSING_CLASS_VALUES)) {
for (i = 0; i < data.numInstances(); i++) {
if (data.instance(i).classIsMissing()) {
this.m_FailReason = new WekaException(this.createMessage("Cannot handle missing class values!"));
return false;
}
}
} else {
if (this.m_MinimumNumberInstancesTest) {
int hasClass = 0;
for (i = 0; i < data.numInstances(); i++) {
if (Thread.interrupted()) {
throw new InterruptedException("Killed WEKA!");
}
if (!data.instance(i).classIsMissing()) {
hasClass++;
}
}
// not enough instances with class labels?
if (hasClass < this.getMinimumNumberInstances()) {
this.m_FailReason = new WekaException(this.createMessage("Not enough training instances with class labels (required: " + this.getMinimumNumberInstances() + ", provided: " + hasClass + ")!"));
return false;
}
}
}
}
}
// missing values and instance weights
missing = false;
Class weightedInstancesHandler = getClass("weka.core.WeightedInstancesHandler");
for (i = 0; i < data.numInstances(); i++) {
inst = data.instance(i);
if (inst.weight() != 1.0) {
if (INTERFACE_DEFINED_CAPABILITIES.contains(weightedInstancesHandler) && !this.m_InterfaceDefinedCapabilities.contains(weightedInstancesHandler)) {
this.m_FailReason = new WekaException(this.createMessage("Some instance weights are not equal to 1 and " + "scheme does not implement the WeightedInstancesHandler interface!"));
return false;
}
}
if (this.m_MissingValuesTest) {
if (!this.handles(Capability.MISSING_VALUES)) {
if (inst instanceof SparseInstance) {
for (m = 0; m < inst.numValues(); m++) {
n = inst.index(m);
// out of scope?
if (n < fromIndex) {
continue;
}
if (n > toIndex) {
break;
}
// skip class
if (n == inst.classIndex()) {
continue;
}
if (inst.isMissing(n)) {
missing = true;
break;
}
}
} else {
for (n = fromIndex; n <= toIndex; n++) {
// skip class
if (n == inst.classIndex()) {
continue;
}
if (inst.isMissing(n)) {
missing = true;
break;
}
}
}
if (missing) {
this.m_FailReason = new NoSupportForMissingValuesException(this.createMessage("Cannot handle missing values!"));
return false;
}
}
}
}
// instances
if (this.m_MinimumNumberInstancesTest) {
if (data.numInstances() < this.getMinimumNumberInstances()) {
this.m_FailReason = new WekaException(this.createMessage("Not enough training instances (required: " + this.getMinimumNumberInstances() + ", provided: " + data.numInstances() + ")!"));
return false;
}
}
// Multi-Instance? -> check structure (regardless of attribute range!)
if (this.handles(Capability.ONLY_MULTIINSTANCE)) {
// number of attributes?
if (data.numAttributes() != 3) {
this.m_FailReason = new WekaException(this.createMessage("Incorrect Multi-Instance format, must be 'bag-id, bag, class'!"));
return false;
}
// type of attributes and position of class?
if (!data.attribute(0).isNominal() || !data.attribute(1).isRelationValued() || (data.classIndex() != data.numAttributes() - 1)) {
this.m_FailReason = new WekaException(this.createMessage("Incorrect Multi-Instance format, must be 'NOMINAL att, RELATIONAL att, CLASS att'!"));
return false;
}
// check data immediately
if (this.getOwner() instanceof MultiInstanceCapabilitiesHandler) {
MultiInstanceCapabilitiesHandler handler = (MultiInstanceCapabilitiesHandler) this.getOwner();
cap = handler.getMultiInstanceCapabilities();
boolean result;
if (data.numInstances() > 0 && data.attribute(1).numValues() > 0) {
result = cap.test(data.attribute(1).relation(0));
} else {
result = cap.test(data.attribute(1).relation());
}
if (!result) {
this.m_FailReason = cap.m_FailReason;
return false;
}
}
}
// passed all tests!
return true;
}
/**
* tests the given attribute by calling the test(Attribute,boolean) method and
* throws an exception if the test fails. The method assumes that the
* specified attribute is not the class attribute.
*
* @param att the attribute to test
* @throws Exception in case the attribute doesn't pass the tests
* @see #test(Attribute,boolean)
*/
public void testWithFail(final Attribute att) throws Exception {
this.test(att, false);
}
/**
* tests the given attribute by calling the test(Attribute,boolean) method and
* throws an exception if the test fails.
*
* @param att the attribute to test
* @param isClass whether this attribute is the class attribute
* @throws Exception in case the attribute doesn't pass the tests
* @see #test(Attribute,boolean)
*/
public void testWithFail(final Attribute att, final boolean isClass) throws Exception {
if (!this.test(att, isClass)) {
throw this.m_FailReason;
}
}
/**
* tests the given data by calling the test(Instances,int,int) method and
* throws an exception if the test fails.
*
* @param data the data to test
* @param fromIndex the range of attributes - start (incl.)
* @param toIndex the range of attributes - end (incl.)
* @throws Exception in case the data doesn't pass the tests
* @see #test(Instances,int,int)
*/
public void testWithFail(final Instances data, final int fromIndex, final int toIndex) throws Exception {
if (!this.test(data, fromIndex, toIndex)) {
throw this.m_FailReason;
}
}
/**
* tests the given data by calling the test(Instances) method and throws an
* exception if the test fails.
*
* @param data the data to test
* @throws Exception in case the data doesn't pass the tests
* @see #test(Instances)
*/
public void testWithFail(final Instances data) throws Exception {
if (!this.test(data)) {
throw this.m_FailReason;
}
}
/**
* returns a comma-separated list of all the capabilities, excluding interface-based ones.
* @return the string describing the capabilities
*/
public String listCapabilities() {
Iterator<Capability> iter = this.capabilities();
ArrayList<String> caps = new ArrayList();
while (iter.hasNext()) {
caps.add(iter.next().toString());
}
Collections.sort(caps);
String s = caps.toString();
return s.substring(1, s.length() - 1);
}
/**
* generates a string from the capabilities, suitable to add to the help
* text.
*
* @param title the title for the capabilities
* @return a string describing the capabilities
*/
public String addCapabilities(final String title) {
String result;
String caps;
result = title + "\n";
// class
caps = this.getClassCapabilities().listCapabilities();
if (caps.length() != 0) {
result += "Class -- ";
result += caps;
result += "\n\n";
}
// attribute
caps = this.getAttributeCapabilities().listCapabilities();
if (caps.length() != 0) {
result += "Attributes -- ";
result += caps;
result += "\n\n";
}
// other capabilities
caps = this.getOtherCapabilities().listCapabilities();
if (caps.length() != 0) {
result += "Other -- ";
result += caps;
result += "\n\n";
}
// interface-defined capabilities
ArrayList<String> interfaceNames = new ArrayList<String>();
for (Class c : this.m_InterfaceDefinedCapabilities) {
interfaceNames.add(c.getSimpleName());
}
Collections.sort(interfaceNames);
if (interfaceNames.size() > 0) {
result += "Interfaces -- ";
String s = interfaceNames.toString();
result += s.substring(1, s.length() - 1);
result += "\n\n";
}
// additional stuff
result += "Additional\n";
result += "Minimum number of instances: " + this.getMinimumNumberInstances() + "\n";
result += "\n";
return result;
}
/**
* returns a string representation of the capabilities
*
* @return a string representation of this object
*/
@Override
public String toString() {
Vector<Capability> sorted;
StringBuffer result;
result = new StringBuffer();
// capabilities
sorted = new Vector<Capability>(this.m_Capabilities);
Collections.sort(sorted);
result.append("Capabilities: " + sorted.toString() + "\n");
// dependencies
sorted = new Vector<Capability>(this.m_Dependencies);
Collections.sort(sorted);
result.append("Dependencies: " + sorted.toString() + "\n");
// interface-defined capabilities
ArrayList<String> interfaceNames = new ArrayList<String>();
for (Class c : this.m_InterfaceDefinedCapabilities) {
interfaceNames.add(c.getSimpleName());
}
Collections.sort(interfaceNames);
result.append("Interfaces: " + interfaceNames.toString() + "\n");
// other stuff
result.append("Minimum number of instances: " + this.getMinimumNumberInstances() + "\n");
return result.toString();
}
/**
* turns the capabilities object into source code. The returned source code is
* a block that creates a Capabilities object named 'objectname' and enables
* all the capabilities of this Capabilities object.
*
* @param objectname the name of the Capabilities object being instantiated
* @return the generated source code
*/
public String toSource(final String objectname) {
return this.toSource(objectname, 0);
}
/**
* turns the capabilities object into source code. The returned source code is
* a block that creates a Capabilities object named 'objectname' and enables
* all the capabilities of this Capabilities object.
*
* @param objectname the name of the Capabilities object being instantiated
* @param indent the number of blanks to indent
* @return the generated source code
*/
public String toSource(final String objectname, final int indent) {
StringBuffer result;
String capsName;
String capName;
String indentStr;
int i;
result = new StringBuffer();
capsName = Capabilities.class.getName();
capName = Capabilities.Capability.class.getName().replaceAll("\\$", ".");
indentStr = "";
for (i = 0; i < indent; i++) {
indentStr += " ";
}
// object name
result.append(indentStr + capsName + " " + objectname + " = new " + capsName + "(this);\n");
List<Capability> capsList = new ArrayList<Capability>();
boolean hasNominalAtt = false;
boolean hasBinaryAtt = false;
boolean hasUnaryAtt = false;
boolean hasNominalClass = false;
// capabilities
result.append("\n");
for (Capability cap : Capability.values()) {
// capability
if (this.handles(cap)) {
if (cap == Capability.NOMINAL_ATTRIBUTES) {
hasNominalAtt = true;
}
if (cap == Capability.NOMINAL_CLASS) {
hasNominalClass = true;
}
if (cap == Capability.BINARY_ATTRIBUTES) {
hasBinaryAtt = true;
}
if (cap == Capability.UNARY_ATTRIBUTES) {
hasUnaryAtt = true;
}
if (cap == Capability.EMPTY_NOMINAL_ATTRIBUTES) {
}
capsList.add(cap);
}
}
for (Capability cap : capsList) {
if ((cap == Capability.BINARY_ATTRIBUTES && hasNominalAtt) || (cap == Capability.UNARY_ATTRIBUTES && hasBinaryAtt) || (cap == Capability.EMPTY_NOMINAL_ATTRIBUTES && hasUnaryAtt)
|| (cap == Capability.BINARY_CLASS && hasNominalClass)) {
continue;
}
result.append(indentStr + objectname + ".enable(" + capName + "." + cap.name() + ");\n");
// dependency
if (this.hasDependency(cap)) {
result.append(indentStr + objectname + ".enableDependency(" + capName + "." + cap.name() + ");\n");
}
}
// capabilities
result.append("\n");
// other
result.append("\n");
result.append(indentStr + objectname + ".setMinimumNumberInstances(" + this.getMinimumNumberInstances() + ");\n");
result.append("\n");
return result.toString();
}
/**
* returns a Capabilities object specific for this data. The multi-instance
* capability is not checked as well as the minimum number of instances is not
* set.
*
* @param data the data to base the capabilities on
* @return a data-specific capabilities object
* @throws Exception in case an error occurrs, e.g., an unknown attribute type
*/
public static Capabilities forInstances(final Instances data) throws Exception {
return forInstances(data, false);
}
/**
* returns a Capabilities object specific for this data. The minimum number of
* instances is not set, the check for multi-instance data is optional.
*
* @param data the data to base the capabilities on
* @param multi if true then the structure is checked, too
* @return a data-specific capabilities object
* @throws Exception in case an error occurrs, e.g., an unknown attribute type
*/
public static Capabilities forInstances(final Instances data, final boolean multi) throws Exception {
Capabilities result;
Capabilities multiInstance;
int i;
int n;
int m;
Instance inst;
boolean missing;
result = new Capabilities(null);
result.m_InterfaceDefinedCapabilities = new HashSet<Class>();
// class
if (data.classIndex() == -1) {
result.enable(Capability.NO_CLASS);
} else {
switch (data.classAttribute().type()) {
case Attribute.NOMINAL:
if (data.classAttribute().numValues() == 1) {
result.enable(Capability.UNARY_CLASS);
} else if (data.classAttribute().numValues() == 2) {
result.enable(Capability.BINARY_CLASS);
} else {
result.enable(Capability.NOMINAL_CLASS);
}
break;
case Attribute.NUMERIC:
result.enable(Capability.NUMERIC_CLASS);
break;
case Attribute.STRING:
result.enable(Capability.STRING_CLASS);
break;
case Attribute.DATE:
result.enable(Capability.DATE_CLASS);
break;
case Attribute.RELATIONAL:
result.enable(Capability.RELATIONAL_CLASS);
break;
default:
throw new UnsupportedAttributeTypeException("Unknown class attribute type '" + data.classAttribute() + "'!");
}
// missing class values
for (i = 0; i < data.numInstances(); i++) {
if (data.instance(i).classIsMissing()) {
result.enable(Capability.MISSING_CLASS_VALUES);
break;
}
}
}
// attributes
Class weightedAttributesHandler = getClass("weka.core.WeightedAttributesHandler");
for (i = 0; i < data.numAttributes(); i++) {
// skip class
if (i == data.classIndex()) {
continue;
}
if (data.attribute(i).weight() != 1.0) {
result.m_InterfaceDefinedCapabilities.add(weightedAttributesHandler);
}
switch (data.attribute(i).type()) {
case Attribute.NOMINAL:
result.enable(Capability.UNARY_ATTRIBUTES);
if (data.attribute(i).numValues() == 2) {
result.enable(Capability.BINARY_ATTRIBUTES);
} else if (data.attribute(i).numValues() > 2) {
result.enable(Capability.NOMINAL_ATTRIBUTES);
}
break;
case Attribute.NUMERIC:
result.enable(Capability.NUMERIC_ATTRIBUTES);
break;
case Attribute.DATE:
result.enable(Capability.DATE_ATTRIBUTES);
break;
case Attribute.STRING:
result.enable(Capability.STRING_ATTRIBUTES);
break;
case Attribute.RELATIONAL:
result.enable(Capability.RELATIONAL_ATTRIBUTES);
break;
default:
throw new UnsupportedAttributeTypeException("Unknown attribute type '" + data.attribute(i).type() + "'!");
}
}
// missing values and instance weights
missing = false;
Class weightedInstancesHandler = getClass("weka.core.WeightedInstancesHandler");
for (i = 0; i < data.numInstances(); i++) {
inst = data.instance(i);
if (inst.weight() != 1.0) {
result.m_InterfaceDefinedCapabilities.add(weightedInstancesHandler);
}
if (inst instanceof SparseInstance) {
for (m = 0; m < inst.numValues(); m++) {
n = inst.index(m);
// skip class
if (n == inst.classIndex()) {
continue;
}
if (inst.isMissing(n)) {
missing = true;
break;
}
}
} else {
for (n = 0; n < data.numAttributes(); n++) {
// skip class
if (n == inst.classIndex()) {
continue;
}
if (inst.isMissing(n)) {
missing = true;
break;
}
}
}
if (missing) {
result.enable(Capability.MISSING_VALUES);
break;
}
}
// multi-instance data?
if (multi) {
if ((data.numAttributes() == 3) && (data.attribute(0).isNominal()) // bag-id
&& (data.attribute(1).isRelationValued()) // bag
&& (data.classIndex() == data.numAttributes() - 1)) {
multiInstance = new Capabilities(null);
multiInstance.or(result.getClassCapabilities());
multiInstance.enable(Capability.NOMINAL_ATTRIBUTES);
multiInstance.enable(Capability.RELATIONAL_ATTRIBUTES);
multiInstance.enable(Capability.ONLY_MULTIINSTANCE);
result.assign(multiInstance);
}
}
return result;
}
/**
* loads the given dataset and prints the Capabilities necessary to process
* it.
* <p/>
*
* Valid parameters:
* <p/>
*
* -file filename <br/>
* the file to load
*
* -c index the explicit index of the class attribute (default: none)
*
* @param args the commandline arguments
* @throws Exception if something goes wrong
*/
public static void main(final String[] args) throws Exception {
String tmpStr;
String filename;
DataSource source;
Instances data;
int classIndex;
Capabilities cap;
Iterator<Capability> iter;
if (args.length == 0) {
System.out.println("\nUsage: " + Capabilities.class.getName() + " -file <dataset> [-c <class index>]\n");
return;
}
// get parameters
tmpStr = Utils.getOption("file", args);
if (tmpStr.length() == 0) {
throw new Exception("No file provided with option '-file'!");
} else {
filename = tmpStr;
}
tmpStr = Utils.getOption("c", args);
if (tmpStr.length() != 0) {
if (tmpStr.equals("first")) {
classIndex = 0;
} else if (tmpStr.equals("last")) {
classIndex = -2; // last
} else {
classIndex = Integer.parseInt(tmpStr) - 1;
}
} else {
classIndex = -3; // not set
}
// load data
source = new DataSource(filename);
if (classIndex == -3) {
data = source.getDataSet();
} else if (classIndex == -2) {
data = source.getDataSet(source.getStructure().numAttributes() - 1);
} else {
data = source.getDataSet(classIndex);
}
// determine and print capabilities
cap = forInstances(data);
System.out.println("File: " + filename);
System.out.println("Class index: " + ((data.classIndex() == -1) ? "not set" : "" + (data.classIndex() + 1)));
System.out.println("Capabilities:");
iter = cap.capabilities();
while (iter.hasNext()) {
System.out.println("- " + iter.next());
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CapabilitiesHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CapabilitiesHandler.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
/**
* Classes implementing this interface return their capabilities in regards
* to datasets.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see Capabilities
*/
public interface CapabilitiesHandler {
/**
* Returns the capabilities of this object.
*
* @return the capabilities of this object
* @see Capabilities
*/
public Capabilities getCapabilities();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CapabilitiesIgnorer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CapabilitiesIgnorer.java
* Copyright (C) 2014 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
/**
* Classes implementing this interface make it possible to turn off
* capabilities checking.
*
* @author Eibe Frank
* @version $Revision: 11004 $
* @see Capabilities
*/
public interface CapabilitiesIgnorer {
/**
* Returns true if we do not actually want to check
* capabilities to conserver runtime.
*/
public boolean getDoNotCheckCapabilities();
/**
* If argument is true, capabilities are not actually
* checked to improve runtime.
*/
public void setDoNotCheckCapabilities(boolean flag);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CapabilitiesUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* CapabilitiesUtils.java
* Copyright (C) 2017 University of Waikato, Hamilton, NZ
*/
package weka.core;
/**
* Helper class for Capabilities.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class CapabilitiesUtils {
/**
* returns a comma-separated list of all the capabilities, excluding interface-based ones.
*
* @param c the capabilities to get a string representation from
* @return the string describing the capabilities
*/
public static String listCapabilities(Capabilities c) {
return c.listCapabilities();
}
/**
* generates a string from the capabilities, suitable to add to the help
* text.
*
* @param title the title for the capabilities
* @param c the capabilities
* @return a string describing the capabilities
*/
public static String addCapabilities(String title, Capabilities c) {
return c.addCapabilities(title);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/ChebyshevDistance.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ChebyshevDistance.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
/**
<!-- globalinfo-start -->
* Implements the Chebyshev distance. The distance between two vectors is the greatest of their differences along any coordinate dimension.<br/>
* <br/>
* For more information, see:<br/>
* <br/>
* Wikipedia. Chebyshev distance. URL http://en.wikipedia.org/wiki/Chebyshev_distance.
* <p/>
<!-- globalinfo-end -->
*
<!-- technical-bibtex-start -->
* BibTeX:
* <pre>
* @misc{missing_id,
* author = {Wikipedia},
* title = {Chebyshev distance},
* URL = {http://en.wikipedia.org/wiki/Chebyshev_distance}
* }
* </pre>
* <p/>
<!-- technical-bibtex-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -D
* Turns off the normalization of attribute
* values in distance calculation.</pre>
*
* <pre> -R <col1,col2-col4,...>
* Specifies list of columns to used in the calculation of the
* distance. 'first' and 'last' are valid indices.
* (default: first-last)</pre>
*
* <pre> -V
* Invert matching sense of column indices.</pre>
*
<!-- options-end -->
*
* @author Fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ChebyshevDistance
extends NormalizableDistance
implements TechnicalInformationHandler {
/** for serialization. */
private static final long serialVersionUID = -7739904999895461429L;
/**
* Constructs an Chebyshev Distance object, Instances must be still set.
*/
public ChebyshevDistance() {
super();
}
/**
* Constructs an Chebyshev Distance object and automatically initializes the
* ranges.
*
* @param data the instances the distance function should work on
*/
public ChebyshevDistance(Instances data) {
super(data);
}
/**
* Returns a string describing this object.
*
* @return a description of the evaluator suitable for
* displaying in the explorer/experimenter gui
*/
public String globalInfo() {
return
"Implements the Chebyshev distance. The distance between two vectors "
+ "is the greatest of their differences along any coordinate dimension.\n\n"
+ "For more information, see:\n\n"
+ getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing
* detailed information about the technical background of this class,
* e.g., paper reference or book this class is based on.
*
* @return the technical information about this class
*/
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.MISC);
result.setValue(Field.AUTHOR, "Wikipedia");
result.setValue(Field.TITLE, "Chebyshev distance");
result.setValue(Field.URL, "http://en.wikipedia.org/wiki/Chebyshev_distance");
return result;
}
/**
* Updates the current distance calculated so far with the new difference
* between two attributes. The difference between the attributes was
* calculated with the difference(int,double,double) method.
*
* @param currDist the current distance calculated so far
* @param diff the difference between two new attributes
* @return the update distance
* @see #difference(int, double, double)
*/
protected double updateDistance(double currDist, double diff) {
double result;
result = currDist;
diff = Math.abs(diff);
if (diff > result)
result = diff;
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Check.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckScheme.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.util.Enumeration;
import java.util.Vector;
/**
* Abstract general class for testing in Weka.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class Check implements OptionHandler, RevisionHandler {
/** Debugging mode, gives extra output if true */
protected boolean m_Debug = false;
/** Silent mode, for no output at all to stdout */
protected boolean m_Silent = false;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tTurn on debugging output.", "D", 0, "-D"));
result.addElement(new Option("\tSilent mode - prints nothing to stdout.",
"S", 0, "-S"));
return result.elements();
}
/**
* Parses a given list of options.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
setDebug(Utils.getFlag('D', options));
setSilent(Utils.getFlag('S', options));
}
/**
* Gets the current settings of the CheckClassifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
result = new Vector<String>();
if (getDebug()) {
result.add("-D");
}
if (getSilent()) {
result.add("-S");
}
return result.toArray(new String[result.size()]);
}
/**
* Tries to instantiate a new instance of the given class and checks whether
* it is an instance of the specified class. For convenience one can also
* specify a classname prefix (e.g., "weka.classifiers") to avoid long
* classnames and then instantiate it with the shortened classname (e.g.,
* "trees.J48").
*
* @param prefix the classname prefix (without trailing dot)
* @param cls the class to check whether the generated object is an instance
* of
* @param classname the classname to instantiate
* @param options optional options for the object
* @return the configured object
* @throws Exception if instantiation fails
*/
protected Object forName(String prefix, Class<?> cls, String classname,
String[] options) throws Exception {
Object result;
result = null;
try {
result = Utils.forName(cls, classname, options);
} catch (Exception e) {
// shall we try with prefix?
if (e.getMessage().toLowerCase().indexOf("can't find") > -1) {
try {
result = Utils.forName(cls, prefix + "." + classname, options);
} catch (Exception ex) {
if (e.getMessage().toLowerCase().indexOf("can't find") > -1) {
throw new Exception("Can't find class called '" + classname
+ "' or '" + prefix + "." + classname + "'!");
} else {
throw new Exception(ex);
}
}
} else {
throw new Exception(e);
}
}
return result;
}
/**
* Begin the tests, reporting results to System.out
*/
public abstract void doTests();
/**
* Set debugging mode
*
* @param debug true if debug output should be printed
*/
public void setDebug(boolean debug) {
m_Debug = debug;
// disable silent mode, if necessary
if (getDebug()) {
setSilent(false);
}
}
/**
* Get whether debugging is turned on
*
* @return true if debugging output is on
*/
public boolean getDebug() {
return m_Debug;
}
/**
* Set slient mode, i.e., no output at all to stdout
*
* @param value whether silent mode is active or not
*/
public void setSilent(boolean value) {
m_Silent = value;
}
/**
* Get whether silent mode is turned on
*
* @return true if silent mode is on
*/
public boolean getSilent() {
return m_Silent;
}
/**
* prints the given message to stdout, if not silent mode
*
* @param msg the text to print to stdout
*/
protected void print(Object msg) {
if (!getSilent()) {
System.out.print(msg);
}
}
/**
* prints the given message (+ LF) to stdout, if not silent mode
*
* @param msg the message to println to stdout
*/
protected void println(Object msg) {
print(msg + "\n");
}
/**
* prints a LF to stdout, if not silent mode
*/
protected void println() {
print("\n");
}
/**
* runs the CheckScheme with the given options
*
* @param check the checkscheme to setup and run
* @param options the commandline parameters to use
*/
protected static void runCheck(Check check, String[] options) {
try {
try {
check.setOptions(options);
Utils.checkForRemainingOptions(options);
} catch (Exception ex) {
String result = ex.getMessage() + "\n\n"
+ check.getClass().getName().replaceAll(".*\\.", "")
+ " Options:\n\n";
Enumeration<Option> enm = check.listOptions();
while (enm.hasMoreElements()) {
Option option = enm.nextElement();
result += option.synopsis() + "\n" + option.description() + "\n";
}
throw new Exception(result);
}
check.doTests();
} catch (Exception ex) {
System.err.println(ex.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CheckGOE.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckGOE.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import weka.gui.ProgrammaticProperty;
import java.beans.BeanInfo;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Vector;
/**
* Simple command line checking of classes that are editable in the GOE.
* <p/>
*
* Usage:
* <p/>
* <code>
* CheckGOE -W classname -- test options
* </code>
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -ignored <comma-separated list of properties>
* Skipped properties.
* (default: capabilities,options)
* </pre>
*
* <pre>
* -W
* Full name of the class analysed.
* eg: weka.classifiers.rules.ZeroR
* (default weka.classifiers.rules.ZeroR)
* </pre>
*
* <!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class CheckGOE extends Check {
/** the object to test */
protected Object m_Object = new weka.classifiers.rules.ZeroR();
/** whether the tests were successful */
protected boolean m_Success;
/**
* properties that are skipped in the checkToolTips method
*
* @see #checkToolTips()
*/
protected HashSet<String> m_IgnoredProperties = new HashSet<String>();
/**
* default constructor
*/
public CheckGOE() {
super();
// set default options
try {
setOptions(new String[0]);
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addAll(Collections.list(super.listOptions()));
result.addElement(new Option("\tSkipped properties.\n"
+ "\t(default: capabilities,options)", "ignored", 1,
"-ignored <comma-separated list of properties>"));
result.addElement(new Option("\tFull name of the class analysed.\n"
+ "\teg: weka.classifiers.rules.ZeroR\n"
+ "\t(default weka.classifiers.rules.ZeroR)", "W", 1, "-W"));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -ignored <comma-separated list of properties>
* Skipped properties.
* (default: capabilities,options)
* </pre>
*
* <pre>
* -W
* Full name of the class analysed.
* eg: weka.classifiers.rules.ZeroR
* (default weka.classifiers.rules.ZeroR)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('W', options);
if (tmpStr.length() == 0) {
tmpStr = weka.classifiers.rules.ZeroR.class.getName();
}
setObject(Utils.forName(Object.class, tmpStr, null));
tmpStr = Utils.getOption("ignored", options);
if (tmpStr.length() == 0) {
tmpStr = "capabilities,options";
}
setIgnoredProperties(tmpStr);
}
/**
* Gets the current settings of the object.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-ignored");
result.add(getIgnoredProperties());
if (getObject() != null) {
result.add("-W");
result.add(getObject().getClass().getName());
}
return result.toArray(new String[result.size()]);
}
/**
* Set the object to work on..
*
* @param value the object to use.
*/
public void setObject(Object value) {
m_Object = value;
}
/**
* Get the object used in the tests.
*
* @return the object used in the tests.
*/
public Object getObject() {
return m_Object;
}
/**
* Sets the properties to ignore in checkToolTips(). Comma-separated list.
*
* @param value the list of properties
* @see #checkToolTips()
*/
public void setIgnoredProperties(String value) {
String[] props;
int i;
m_IgnoredProperties.clear();
props = value.split(",");
for (i = 0; i < props.length; i++) {
m_IgnoredProperties.add(props[i]);
}
}
/**
* Get the ignored properties used in checkToolTips() as comma-separated list
* (sorted).
*
* @return the ignored properties
* @see #checkToolTips()
*/
public String getIgnoredProperties() {
String result;
Vector<String> list;
int i;
list = new Vector<String>();
list.addAll(m_IgnoredProperties);
// sort
if (list.size() > 1) {
Collections.sort(list);
}
result = "";
for (i = 0; i < list.size(); i++) {
if (i > 0) {
result += ",";
}
result += list.get(i);
}
return result;
}
/**
* returns the success of the tests
*
* @return true if the tests were successful
*/
public boolean getSuccess() {
return m_Success;
}
/**
* checks whether the object declares a globalInfo method.
*
* @return true if the test was passed
*/
public boolean checkGlobalInfo() {
boolean result;
Class<?> cls;
print("Global info...");
result = true;
cls = getObject().getClass();
// test for globalInfo method
try {
cls.getMethod("globalInfo", (Class[]) null);
} catch (Exception e) {
result = false;
}
if (result) {
println("yes");
} else {
println("no");
}
return result;
}
/**
* checks whether the object declares tip text method for all its properties.
*
* @return true if the test was passed
*/
public boolean checkToolTips() {
boolean result;
Class<?> cls;
BeanInfo info;
PropertyDescriptor[] desc;
int i;
Vector<String> missing;
String suffix;
print("Tool tips...");
result = true;
suffix = "TipText";
cls = getObject().getClass();
// get properties
try {
info = Introspector.getBeanInfo(cls, Object.class);
desc = info.getPropertyDescriptors();
} catch (Exception e) {
e.printStackTrace();
desc = null;
}
// test for TipText methods
if (desc != null) {
missing = new Vector<String>();
for (i = 0; i < desc.length; i++) {
// skip property?
if (m_IgnoredProperties.contains(desc[i].getName())) {
continue;
}
if ((desc[i].getReadMethod() == null)
|| (desc[i].getWriteMethod() == null)) {
continue;
}
OptionMetadata m = desc[i].getReadMethod().getAnnotation(OptionMetadata.class);
if (m == null) {
m = desc[i].getWriteMethod().getAnnotation(OptionMetadata.class);
}
if (m != null) {
continue;
}
// programatic properties don't need tip texts
ProgrammaticProperty p = desc[i].getReadMethod().getAnnotation(ProgrammaticProperty.class);
if (p == null) {
p = desc[i].getWriteMethod().getAnnotation(ProgrammaticProperty.class);
}
if (p != null) {
continue;
}
try {
cls.getMethod(desc[i].getName() + suffix, (Class[]) null);
} catch (Exception e) {
result = false;
missing.add(desc[i].getName() + suffix);
}
}
if (result) {
println("yes");
} else {
println("no (missing: " + missing + ")");
}
} else {
println("maybe");
}
return result;
}
/**
* Runs some diagnostic tests on the object. Output is printed to System.out
* (if not silent).
*/
@Override
public void doTests() {
println("Object: " + m_Object.getClass().getName() + "\n");
println("--> Tests");
m_Success = checkGlobalInfo();
if (m_Success) {
m_Success = checkToolTips();
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for using the CheckGOE.
*
* @param args the options to the CheckGOE
*/
public static void main(String[] args) {
CheckGOE check = new CheckGOE();
runCheck(check, args);
if (check.getSuccess()) {
System.exit(0);
} else {
System.exit(1);
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CheckOptionHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckOptionHandler.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
/**
* Simple command line checking of classes that implement OptionHandler.
* <p/>
*
* Usage:
* <p/>
* <code>
* CheckOptionHandler -W optionHandlerClassName -- test options
* </code>
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -W
* Full name of the OptionHandler analysed.
* eg: weka.classifiers.rules.ZeroR
* (default weka.classifiers.rules.ZeroR)
* </pre>
*
* <pre>
* Options specific to option handler weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* Options after -- are used as user options in testing the OptionHandler
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class CheckOptionHandler extends Check {
/** the optionhandler to test */
protected OptionHandler m_OptionHandler = new weka.classifiers.rules.ZeroR();
/** the user-supplied options */
protected String[] m_UserOptions = new String[0];
/** whether the tests were successful */
protected boolean m_Success;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addAll(Collections.list(super.listOptions()));
result.addElement(new Option("\tFull name of the OptionHandler analysed.\n"
+ "\teg: weka.classifiers.rules.ZeroR\n"
+ "\t(default weka.classifiers.rules.ZeroR)", "W", 1, "-W"));
if (m_OptionHandler != null) {
result.addElement(new Option("", "", 0,
"\nOptions specific to option handler "
+ m_OptionHandler.getClass().getName() + ":"));
result.addAll(Collections.list(m_OptionHandler.listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -W
* Full name of the OptionHandler analysed.
* eg: weka.classifiers.rules.ZeroR
* (default weka.classifiers.rules.ZeroR)
* </pre>
*
* <pre>
* Options specific to option handler weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('W', options);
if (tmpStr.length() == 0) {
tmpStr = weka.classifiers.rules.ZeroR.class.getName();
}
setUserOptions(Utils.partitionOptions(options));
setOptionHandler((OptionHandler) Utils.forName(OptionHandler.class, tmpStr,
null));
}
/**
* Gets the current settings of the CheckClassifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
if (getOptionHandler() != null) {
result.add("-W");
result.add(getOptionHandler().getClass().getName());
}
if (m_OptionHandler != null) {
String[] options = m_OptionHandler.getOptions();
result.add("--");
Collections.addAll(result, options);
}
return result.toArray(new String[result.size()]);
}
/**
* Set the OptionHandler to work on..
*
* @param value the OptionHandler to use.
*/
public void setOptionHandler(OptionHandler value) {
m_OptionHandler = value;
}
/**
* Get the OptionHandler used in the tests.
*
* @return the OptionHandler used in the tests.
*/
public OptionHandler getOptionHandler() {
return m_OptionHandler;
}
/**
* Sets the user-supplied options (creates a copy)
*
* @param value the user-supplied options to use
*/
public void setUserOptions(String[] value) {
m_UserOptions = getCopy(value);
}
/**
* Gets the current user-supplied options (creates a copy)
*
* @return the user-supplied options
*/
public String[] getUserOptions() {
return getCopy(m_UserOptions);
}
/**
* returns the success of the tests
*
* @return true if the tests were successful
*/
public boolean getSuccess() {
return m_Success;
}
/**
* Prints the given options to a string.
*
* @param options the options to be joined
* @return the options as one long string
*/
protected String printOptions(String[] options) {
if (options == null) {
return ("<null>");
} else {
return Utils.joinOptions(options);
}
}
/**
* Compares the two given sets of options.
*
* @param options1 the first set of options
* @param options2 the second set of options
* @throws Exception if the two sets of options differ
*/
protected void compareOptions(String[] options1, String[] options2)
throws Exception {
if (options1 == null) {
throw new Exception("first set of options is null!");
}
if (options2 == null) {
throw new Exception("second set of options is null!");
}
if (options1.length != options2.length) {
throw new Exception("problem found!\n" + "First set: "
+ printOptions(options1) + '\n' + "Second set: "
+ printOptions(options2) + '\n' + "options differ in length");
}
for (int i = 0; i < options1.length; i++) {
if (!options1[i].equals(options2[i])) {
throw new Exception("problem found!\n" + "\tFirst set: "
+ printOptions(options1) + '\n' + "\tSecond set: "
+ printOptions(options2) + '\n' + '\t' + options1[i] + " != "
+ options2[i]);
}
}
}
/**
* creates a copy of the given options
*
* @param options the options to copy
* @return the copy
*/
protected String[] getCopy(String[] options) {
String[] result;
result = new String[options.length];
System.arraycopy(options, 0, result, 0, options.length);
return result;
}
/**
* returns a new instance of the OptionHandler's class
*
* @return a new instance
*/
protected OptionHandler getDefaultHandler() {
OptionHandler result;
try {
result = m_OptionHandler.getClass().newInstance();
} catch (Exception e) {
e.printStackTrace();
result = null;
}
return result;
}
/**
* returns the default options the default OptionHandler will return
*
* @return the default options
*/
protected String[] getDefaultOptions() {
String[] result;
OptionHandler o;
o = getDefaultHandler();
if (o == null) {
println("WARNING: couldn't create default handler, cannot use default options!");
result = new String[0];
} else {
result = o.getOptions();
}
return result;
}
/**
* checks whether the listOptions method works
*
* @return index 0 is true if the test was passed, index 1 is always false
*/
public boolean checkListOptions() {
boolean result;
print("ListOptions...");
try {
Enumeration<Option> enu = getOptionHandler().listOptions();
if (getDebug() && enu.hasMoreElements()) {
println("");
}
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
if (getDebug()) {
println(option.synopsis());
println(option.description());
}
}
println("yes");
result = true;
} catch (Exception e) {
println("no");
result = false;
if (getDebug()) {
println(e);
}
}
return result;
}
/**
* checks whether the user-supplied options can be processed at all
*
* @return index 0 is true if the test was passed, index 1 is always false
*/
public boolean checkSetOptions() {
boolean result;
print("SetOptions...");
try {
getDefaultHandler().setOptions(getUserOptions());
println("yes");
result = true;
} catch (Exception e) {
println("no");
result = false;
if (getDebug()) {
println(e);
}
}
return result;
}
/**
* checks whether the default options can be processed completely or some
* invalid options are returned by the getOptions() method.
*
* @return index 0 is true if the test was passed, index 1 is always false
*/
public boolean checkDefaultOptions() {
boolean result;
String[] options;
print("Default options...");
options = getDefaultOptions();
try {
getDefaultHandler().setOptions(options);
Utils.checkForRemainingOptions(options);
println("yes");
result = true;
} catch (Exception e) {
println("no");
result = false;
if (getDebug()) {
println(e);
}
}
return result;
}
/**
* checks whether the user-supplied options can be processed completely or
* some "left-over" options remain
*
* @return index 0 is true if the test was passed, index 1 is always false
*/
public boolean checkRemainingOptions() {
boolean result;
String[] options;
print("Remaining options...");
options = getUserOptions();
try {
getDefaultHandler().setOptions(options);
if (getDebug()) {
println("\n remaining: " + printOptions(options));
}
println("yes");
result = true;
} catch (Exception e) {
println("no");
result = false;
if (getDebug()) {
println(e);
}
}
return result;
}
/**
* checks whether the user-supplied options stay the same after settting,
* getting and re-setting again
*
* @return index 0 is true if the test was passed, index 1 is always false
*/
public boolean checkCanonicalUserOptions() {
boolean result;
OptionHandler handler;
String[] userOptions;
String[] userOptionsCheck;
print("Canonical user options...");
try {
handler = getDefaultHandler();
handler.setOptions(getUserOptions());
if (getDebug()) {
print("\n Getting canonical user options: ");
}
userOptions = handler.getOptions();
if (getDebug()) {
println(printOptions(userOptions));
}
if (getDebug()) {
println(" Setting canonical user options");
}
handler.setOptions(userOptions.clone());
if (getDebug()) {
println(" Checking canonical user options");
}
userOptionsCheck = handler.getOptions();
compareOptions(userOptions, userOptionsCheck);
println("yes");
result = true;
} catch (Exception e) {
println("no");
result = false;
if (getDebug()) {
println(e);
}
}
return result;
}
/**
* checks whether the optionhandler can be re-setted again to default options
* after the user-supplied options have been set.
*
* @return index 0 is true if the test was passed, index 1 is always false
*/
public boolean checkResettingOptions() {
boolean result;
String[] defaultOptions;
String[] defaultOptionsCheck;
OptionHandler handler;
print("Resetting options...");
try {
if (getDebug()) {
println("\n Setting user options");
}
handler = getDefaultHandler();
handler.setOptions(getUserOptions());
defaultOptions = getDefaultOptions();
if (getDebug()) {
println(" Resetting to default options");
}
handler.setOptions(getCopy(defaultOptions));
if (getDebug()) {
println(" Checking default options match previous default");
}
defaultOptionsCheck = handler.getOptions();
compareOptions(defaultOptions, defaultOptionsCheck);
println("yes");
result = true;
} catch (Exception e) {
println("no");
result = false;
if (getDebug()) {
println(e);
}
}
return result;
}
/**
* Runs some diagnostic tests on an optionhandler object. Output is printed to
* System.out (if not silent).
*/
@Override
public void doTests() {
println("OptionHandler: " + m_OptionHandler.getClass().getName() + "\n");
if (getDebug()) {
println("--> Info");
print("Default options: ");
println(printOptions(getDefaultOptions()));
print("User options: ");
println(printOptions(getUserOptions()));
}
println("--> Tests");
m_Success = checkListOptions();
if (m_Success) {
m_Success = checkSetOptions();
}
if (m_Success) {
m_Success = checkDefaultOptions();
}
if (m_Success) {
m_Success = checkRemainingOptions();
}
if (m_Success) {
m_Success = checkCanonicalUserOptions();
}
if (m_Success) {
m_Success = checkResettingOptions();
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for using the CheckOptionHandler.
*
* @param args the options to the CheckOptionHandler
*/
public static void main(String[] args) {
CheckOptionHandler check = new CheckOptionHandler();
runCheck(check, args);
if (check.getSuccess()) {
System.exit(0);
} else {
System.exit(1);
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CheckScheme.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckScheme.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.StringTokenizer;
import java.util.Vector;
/**
* Abstract general class for testing schemes in Weka. Derived classes are
* also used for JUnit tests.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see TestInstances
*/
public abstract class CheckScheme
extends Check {
/** a class for postprocessing the test-data */
public static class PostProcessor
implements RevisionHandler {
/**
* Provides a hook for derived classes to further modify the data. Currently,
* the data is just passed through.
*
* @param data the data to process
* @return the processed data
*/
public Instances process(Instances data) {
return data;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/** The number of instances in the datasets */
protected int m_NumInstances = 20;
/** the number of nominal attributes */
protected int m_NumNominal = 2;
/** the number of numeric attributes */
protected int m_NumNumeric = 1;
/** the number of string attributes */
protected int m_NumString = 1;
/** the number of date attributes */
protected int m_NumDate = 1;
/** the number of relational attributes */
protected int m_NumRelational = 1;
/** the number of instances in relational attributes (applies also for bags
* in multi-instance) */
protected int m_NumInstancesRelational = 10;
/** for generating String attributes/classes */
protected String[] m_Words = TestInstances.DEFAULT_WORDS;
/** for generating String attributes/classes */
protected String m_WordSeparators = TestInstances.DEFAULT_SEPARATORS;
/** for post-processing the data even further */
protected PostProcessor m_PostProcessor = null;
/** whether classpath problems occurred */
protected boolean m_ClasspathProblems = false;
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addAll(Collections.list(super.listOptions()));
result.addElement(new Option(
"\tThe number of instances in the datasets (default 20).",
"N", 1, "-N <num>"));
result.addElement(new Option(
"\tThe number of nominal attributes (default 2).",
"nominal", 1, "-nominal <num>"));
result.addElement(new Option(
"\tThe number of values for nominal attributes (default 1).",
"nominal-values", 1, "-nominal-values <num>"));
result.addElement(new Option(
"\tThe number of numeric attributes (default 1).",
"numeric", 1, "-numeric <num>"));
result.addElement(new Option(
"\tThe number of string attributes (default 1).",
"string", 1, "-string <num>"));
result.addElement(new Option(
"\tThe number of date attributes (default 1).",
"date", 1, "-date <num>"));
result.addElement(new Option(
"\tThe number of relational attributes (default 1).",
"relational", 1, "-relational <num>"));
result.addElement(new Option(
"\tThe number of instances in relational/bag attributes (default 10).",
"num-instances-relational", 1, "-num-instances-relational <num>"));
result.addElement(new Option(
"\tThe words to use in string attributes.",
"words", 1, "-words <comma-separated-list>"));
result.addElement(new Option(
"\tThe word separators to use in string attributes.",
"word-separators", 1, "-word-separators <chars>"));
return result.elements();
}
/**
* Parses a given list of options.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0)
setNumInstances(Integer.parseInt(tmpStr));
else
setNumInstances(20);
tmpStr = Utils.getOption("nominal", options);
if (tmpStr.length() != 0)
setNumNominal(Integer.parseInt(tmpStr));
else
setNumNominal(2);
tmpStr = Utils.getOption("numeric", options);
if (tmpStr.length() != 0)
setNumNumeric(Integer.parseInt(tmpStr));
else
setNumNumeric(1);
tmpStr = Utils.getOption("string", options);
if (tmpStr.length() != 0)
setNumString(Integer.parseInt(tmpStr));
else
setNumString(1);
tmpStr = Utils.getOption("date", options);
if (tmpStr.length() != 0)
setNumDate(Integer.parseInt(tmpStr));
else
setNumDate(1);
tmpStr = Utils.getOption("relational", options);
if (tmpStr.length() != 0)
setNumRelational(Integer.parseInt(tmpStr));
else
setNumRelational(1);
tmpStr = Utils.getOption("num-instances-relational", options);
if (tmpStr.length() != 0)
setNumInstancesRelational(Integer.parseInt(tmpStr));
else
setNumInstancesRelational(10);
tmpStr = Utils.getOption("words", options);
if (tmpStr.length() != 0)
setWords(tmpStr);
else
setWords(new TestInstances().getWords());
if (Utils.getOptionPos("word-separators", options) > -1) {
tmpStr = Utils.getOption("word-separators", options);
setWordSeparators(tmpStr);
}
else {
setWordSeparators(TestInstances.DEFAULT_SEPARATORS);
}
}
/**
* Gets the current settings of the CheckClassifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String[] getOptions() {
Vector<String> result;
String[] options;
int i;
result = new Vector<String>();
options = super.getOptions();
for (i = 0; i < options.length; i++)
result.add(options[i]);
result.add("-N");
result.add("" + getNumInstances());
result.add("-nominal");
result.add("" + getNumNominal());
result.add("-numeric");
result.add("" + getNumNumeric());
result.add("-string");
result.add("" + getNumString());
result.add("-date");
result.add("" + getNumDate());
result.add("-relational");
result.add("" + getNumRelational());
result.add("-words");
result.add("" + getWords());
result.add("-word-separators");
result.add("" + getWordSeparators());
return (String[]) result.toArray(new String[result.size()]);
}
/**
* sets the PostProcessor to use
*
* @param value the new PostProcessor
* @see #m_PostProcessor
*/
public void setPostProcessor(PostProcessor value) {
m_PostProcessor = value;
}
/**
* returns the current PostProcessor, can be null
*
* @return the current PostProcessor
*/
public PostProcessor getPostProcessor() {
return m_PostProcessor;
}
/**
* returns TRUE if the classifier returned a "not in classpath" Exception
*
* @return true if CLASSPATH problems occurred
*/
public boolean hasClasspathProblems() {
return m_ClasspathProblems;
}
/**
* Begin the tests, reporting results to System.out
*/
public abstract void doTests();
/**
* Sets the number of instances to use in the datasets (some classifiers
* might require more instances).
*
* @param value the number of instances to use
*/
public void setNumInstances(int value) {
m_NumInstances = value;
}
/**
* Gets the current number of instances to use for the datasets.
*
* @return the number of instances
*/
public int getNumInstances() {
return m_NumInstances;
}
/**
* sets the number of nominal attributes
*
* @param value the number of nominal attributes
*/
public void setNumNominal(int value) {
m_NumNominal = value;
}
/**
* returns the current number of nominal attributes
*
* @return the number of nominal attributes
*/
public int getNumNominal() {
return m_NumNominal;
}
/**
* sets the number of numeric attributes
*
* @param value the number of numeric attributes
*/
public void setNumNumeric(int value) {
m_NumNumeric = value;
}
/**
* returns the current number of numeric attributes
*
* @return the number of numeric attributes
*/
public int getNumNumeric() {
return m_NumNumeric;
}
/**
* sets the number of string attributes
*
* @param value the number of string attributes
*/
public void setNumString(int value) {
m_NumString = value;
}
/**
* returns the current number of string attributes
*
* @return the number of string attributes
*/
public int getNumString() {
return m_NumString;
}
/**
* sets the number of data attributes
*
* @param value the number of date attributes
*/
public void setNumDate(int value) {
m_NumDate = value;
}
/**
* returns the current number of date attributes
*
* @return the number of date attributes
*/
public int getNumDate() {
return m_NumDate;
}
/**
* sets the number of relational attributes
*
* @param value the number of relational attributes
*/
public void setNumRelational(int value) {
m_NumRelational = value;
}
/**
* returns the current number of relational attributes
*
* @return the number of relational attributes
*/
public int getNumRelational() {
return m_NumRelational;
}
/**
* sets the number of instances in relational/bag attributes to produce
*
* @param value the number of instances
*/
public void setNumInstancesRelational(int value) {
m_NumInstancesRelational = value;
}
/**
* returns the current number of instances in relational/bag attributes to produce
*
* @return the number of instances
*/
public int getNumInstancesRelational() {
return m_NumInstancesRelational;
}
/**
* turns the comma-separated list into an array
*
* @param value the list to process
* @return the list as array
*/
protected static String[] listToArray(String value) {
StringTokenizer tok;
Vector<String> list;
list = new Vector<String>();
tok = new StringTokenizer(value, ",");
while (tok.hasMoreTokens())
list.add(tok.nextToken());
return (String[]) list.toArray(new String[list.size()]);
}
/**
* turns the array into a comma-separated list
*
* @param value the array to process
* @return the array as list
*/
protected static String arrayToList(String[] value) {
String result;
int i;
result = "";
for (i = 0; i < value.length; i++) {
if (i > 0)
result += ",";
result += value[i];
}
return result;
}
/**
* returns a string representation of the attribute type
*
* @param type the attribute type to get a string rerpresentation for
* @return the string representation
*/
public static String attributeTypeToString(int type) {
String result;
switch (type) {
case Attribute.NUMERIC:
result = "numeric";
break;
case Attribute.NOMINAL:
result = "nominal";
break;
case Attribute.STRING:
result = "string";
break;
case Attribute.DATE:
result = "date";
break;
case Attribute.RELATIONAL:
result = "relational";
break;
default:
result = "???";
}
return result;
}
/**
* Sets the comma-separated list of words to use for generating strings. The
* list must contain at least 2 words, otherwise an exception will be thrown.
*
* @param value the list of words
* @throws IllegalArgumentException if not at least 2 words are provided
*/
public void setWords(String value) {
if (listToArray(value).length < 2)
throw new IllegalArgumentException("At least 2 words must be provided!");
m_Words = listToArray(value);
}
/**
* returns the words used for assembling strings in a comma-separated list.
*
* @return the words as comma-separated list
*/
public String getWords() {
return arrayToList(m_Words);
}
/**
* sets the word separators (chars) to use for assembling strings.
*
* @param value the characters to use as separators
*/
public void setWordSeparators(String value) {
m_WordSeparators = value;
}
/**
* returns the word separators (chars) to use for assembling strings.
*
* @return the current separators
*/
public String getWordSeparators() {
return m_WordSeparators;
}
/**
* Compare two datasets to see if they differ.
*
* @param data1 one set of instances
* @param data2 the other set of instances
* @throws Exception if the datasets differ
*/
protected void compareDatasets(Instances data1, Instances data2)
throws Exception {
if (!data2.equalHeaders(data1)) {
throw new Exception("header has been modified\n" + data2.equalHeadersMsg(data1));
}
if (!(data2.numInstances() == data1.numInstances())) {
throw new Exception("number of instances has changed");
}
for (int i = 0; i < data2.numInstances(); i++) {
Instance orig = data1.instance(i);
Instance copy = data2.instance(i);
for (int j = 0; j < orig.numAttributes(); j++) {
if (orig.isMissing(j)) {
if (!copy.isMissing(j)) {
throw new Exception("instances have changed");
}
} else if (orig.value(j) != copy.value(j)) {
throw new Exception("instances have changed");
}
if (orig.weight() != copy.weight()) {
throw new Exception("instance weights have changed");
}
}
}
}
/**
* Add missing values to a dataset.
*
* @param data the instances to add missing values to
* @param level the level of missing values to add (if positive, this
* is the probability that a value will be set to missing, if negative
* all but one value will be set to missing (not yet implemented))
* @param predictorMissing if true, predictor attributes will be modified
* @param classMissing if true, the class attribute will be modified
*/
protected void addMissing(Instances data, int level,
boolean predictorMissing, boolean classMissing) {
int classIndex = data.classIndex();
Random random = new Random(1);
for (int i = 0; i < data.numInstances(); i++) {
Instance current = data.instance(i);
for (int j = 0; j < data.numAttributes(); j++) {
if (((j == classIndex) && classMissing) ||
((j != classIndex) && predictorMissing)) {
if (random.nextInt(100) < level)
current.setMissing(j);
}
}
}
}
/**
* Provides a hook for derived classes to further modify the data.
*
* @param data the data to process
* @return the processed data
* @see #m_PostProcessor
*/
protected Instances process(Instances data) {
if (getPostProcessor() == null)
return data;
else
return getPostProcessor().process(data);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/ClassCache.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ClassCache.java
* Copyright (C) 2010-2014 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
import java.io.File;
import java.io.FileFilter;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Set;
import java.util.jar.Attributes;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
/**
* A singleton that stores all classes on the classpath.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ClassCache implements RevisionHandler {
/**
* For filtering classes.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public static class ClassFileFilter implements FileFilter {
/**
* Checks whether the file is a class.
*
* @param pathname the file to check
* @return true if a class file
*/
@Override
public boolean accept(File pathname) {
return pathname.getName().endsWith(".class");
}
}
/**
* For filtering classes.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public static class DirectoryFilter implements FileFilter {
/**
* Checks whether the file is a directory.
*
* @param pathname the file to check
* @return true if a directory
*/
@Override
public boolean accept(File pathname) {
return pathname.isDirectory();
}
}
/** whether to output some debug information. */
public final static boolean VERBOSE = false;
/** the key for the default package. */
public final static String DEFAULT_PACKAGE = "DEFAULT";
/**
* for caching all classes on the class path (package-name <-> HashSet
* with classnames).
*/
protected Hashtable<String, HashSet<String>> m_Cache;
static {
// notify if VERBOSE is still on
if (VERBOSE) {
System.err.println(ClassCache.class.getName() + ": VERBOSE ON");
}
}
/**
* Initializes the cache.
*/
public ClassCache() {
super();
initializeNew();
}
/**
* Fixes the classname, turns "/" and "\" into "." and removes ".class".
*
* @param classname the classname to process
* @return the processed classname
*/
public static String cleanUp(String classname) {
String result;
result = classname;
if (result.indexOf("/") > -1) {
result = result.replace("/", ".");
}
if (result.indexOf("\\") > -1) {
result = result.replace("\\", ".");
}
if (result.endsWith(".class")) {
result = result.substring(0, result.length() - 6);
}
return result;
}
/**
* Extracts the package name from the (clean) classname.
*
* @param classname the classname to extract the package from
* @return the package name
*/
public static String extractPackage(String classname) {
if (classname.indexOf(".") > -1) {
return classname.substring(0, classname.lastIndexOf("."));
} else {
return DEFAULT_PACKAGE;
}
}
/**
* Adds the classname to the cache.
*
* @param classname the classname, automatically removes ".class" and turns
* "/" or "\" into "."
* @return true if adding changed the cache
*/
public boolean add(String classname) {
String pkgname;
HashSet<String> names;
// classname and package
classname = cleanUp(classname);
pkgname = extractPackage(classname);
// add to cache
if (!m_Cache.containsKey(pkgname)) {
m_Cache.put(pkgname, new HashSet<String>());
}
names = m_Cache.get(pkgname);
return names.add(classname);
}
/**
* Removes the classname from the cache.
*
* @param classname the classname to remove
* @return true if the removal changed the cache
*/
public boolean remove(String classname) {
String pkgname;
HashSet<String> names;
classname = cleanUp(classname);
pkgname = extractPackage(classname);
names = m_Cache.get(pkgname);
if (names != null) {
return names.remove(classname);
} else {
return false;
}
}
/**
* Fills the class cache with classes in the specified directory.
*
* @param prefix the package prefix so far, null for default package
* @param dir the directory to search
*/
protected void initFromDir(String prefix, File dir) {
File[] files;
// check classes
files = dir.listFiles(new ClassFileFilter());
for (File file : files) {
if (prefix == null) {
add(file.getName());
} else {
add(prefix + "." + file.getName());
}
}
// descend in directories
files = dir.listFiles(new DirectoryFilter());
for (File file : files) {
if (prefix == null) {
initFromDir(file.getName(), file);
} else {
initFromDir(prefix + "." + file.getName(), file);
}
}
}
/**
* Fills the class cache with classes in the specified directory.
*
* @param dir the directory to search
*/
protected void initFromDir(File dir) {
if (VERBOSE) {
System.out.println("Analyzing directory: " + dir);
}
initFromDir(null, dir);
}
/**
* Analyzes the MANIFEST.MF file of a jar whether additional jars are listed
* in the "Class-Path" key.
*
* @param manifest the manifest to analyze
*/
protected void initFromManifest(Manifest manifest) {
if (manifest == null) {
return;
}
Attributes atts;
String cp;
String[] parts;
atts = manifest.getMainAttributes();
cp = atts.getValue("Class-Path");
if (cp == null) {
return;
}
parts = cp.split(" ");
for (String part : parts) {
if (part.trim().length() == 0) {
return;
}
if (part.toLowerCase().endsWith(".jar")) {
initFromClasspathPart(part);
}
}
}
/**
* Fills the class cache with classes from the specified jar.
*
* @param file the jar to inspect
*/
protected void initFromJar(File file) {
JarFile jar;
JarEntry entry;
Enumeration<JarEntry> enm;
if (VERBOSE) {
System.out.println("Analyzing jar: " + file);
}
if (!file.exists()) {
System.out.println("Jar does not exist: " + file);
return;
}
try {
jar = new JarFile(file);
enm = jar.entries();
while (enm.hasMoreElements()) {
entry = enm.nextElement();
if (entry.getName().endsWith(".class")) {
add(entry.getName());
}
}
initFromManifest(jar.getManifest());
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Returns all the stored packages.
*
* @return the package names
*/
public Enumeration<String> packages() {
return m_Cache.keys();
}
/**
* Returns all the classes for the given package.
*
* @param pkgname the package to get the classes for
* @return the classes (sorted by name)
*/
public HashSet<String> getClassnames(String pkgname) {
if (m_Cache.containsKey(pkgname)) {
return m_Cache.get(pkgname);
} else {
return new HashSet<String>();
}
}
/**
* Analyzes a part of the classpath.
*
* @param part the part to analyze
*/
protected void initFromClasspathPart(String part) {
File file;
file = null;
if (part.startsWith("file:")) {
part = part.replace(" ", "%20");
try {
file = new File(new java.net.URI(part));
} catch (URISyntaxException e) {
System.err.println("Failed to generate URI: " + part);
e.printStackTrace();
}
} else {
file = new File(part);
}
if (file == null) {
System.err.println("Skipping: " + part);
return;
}
// find classes
if (file.isDirectory()) {
initFromDir(file);
} else if (file.exists()) {
initFromJar(file);
}
}
/**
* Initializes the cache.
*/
protected void initialize() {
String part = "";
URLClassLoader sysLoader;
URL[] urls;
m_Cache = new Hashtable<String, HashSet<String>>();
sysLoader = (URLClassLoader) getClass().getClassLoader();
urls = sysLoader.getURLs();
for (URL url : urls) {
part = url.toString();
if (VERBOSE) {
System.out.println("Classpath-part: " + part);
}
initFromClasspathPart(part);
}
}
protected void initializeNew() {
m_Cache = new Hashtable<String, HashSet<String>>();
WekaPackageClassLoaderManager wcl = WekaPackageClassLoaderManager.getWekaPackageClassLoaderManager();
// parent classloader entries...
URL[] sysOrWekaCP = wcl.getWekaClassloaderClasspathEntries();
for (URL url : sysOrWekaCP) {
String part = url.toString();
if (VERBOSE) {
System.out.println("Classpath-part: " + part);
}
initFromClasspathPart(part);
}
// top-level package jar file class entries
Set<String> classes = wcl.getPackageJarFileClasses();
for (String cl : classes) {
add(cl);
}
}
/**
* Find all classes that have the supplied matchText String in their suffix.
*
* @param matchText the text to match
* @return an array list of matching fully qualified class names.
*/
public ArrayList<String> find(String matchText) {
ArrayList<String> result;
Enumeration<String> packages;
Iterator<String> names;
String name;
result = new ArrayList<String>();
packages = m_Cache.keys();
while (packages.hasMoreElements()) {
names = m_Cache.get(packages.nextElement()).iterator();
while (names.hasNext()) {
name = names.next();
if (name.contains(matchText)) {
result.add(name);
}
}
}
if (result.size() > 1) {
Collections.sort(result);
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* For testing only.
*
* @param args ignored
*/
public static void main(String[] args) {
ClassCache cache = new ClassCache();
Enumeration<String> packages = cache.packages();
while (packages.hasMoreElements()) {
String key = packages.nextElement();
System.out.println(key + ": " + cache.getClassnames(key).size());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/ClassDiscovery.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassDiscovery.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.File;
import java.lang.reflect.Modifier;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.StringTokenizer;
import java.util.Vector;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
/**
* This class is used for discovering classes that implement a certain interface
* or a derived from a certain class.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see StringCompare
*/
public class ClassDiscovery implements RevisionHandler {
/** whether to output some debug information. */
public final static boolean VERBOSE = false;
/**
* for caching queries (classname+packagename <-> Vector with
* classnames).
*/
protected static Hashtable<String, Vector<String>> m_Cache;
/** the overall class cache. */
protected static ClassCache m_ClassCache;
/** notify if VERBOSE is still on */
static {
if (VERBOSE) {
System.err.println(ClassDiscovery.class.getName() + ": VERBOSE ON");
}
}
/**
* If the given package can be found in this part of the classpath then an URL
* object is returned, otherwise <code>null</code>.
*
* @param classpathPart the part of the classpath to look for the package
* @param pkgname the package to look for
* @return if found, the url as string, otherwise null
*/
protected static URL getURL(String classpathPart, String pkgname) {
String urlStr;
URL result;
File classpathFile;
File file;
JarFile jarfile;
Enumeration<JarEntry> enm;
String pkgnameTmp;
result = null;
urlStr = null;
try {
classpathFile = new File(classpathPart);
// directory or jar?
if (classpathFile.isDirectory()) {
// does the package exist in this directory?
file = new File(classpathPart + pkgname);
if (file.exists()) {
urlStr = "file:" + classpathPart + pkgname;
}
} else {
// is package actually included in jar?
jarfile = new JarFile(classpathPart);
enm = jarfile.entries();
pkgnameTmp = pkgname.substring(1); // remove the leading "/"
while (enm.hasMoreElements()) {
if (enm.nextElement().toString().startsWith(pkgnameTmp)) {
urlStr = "jar:file:" + classpathPart + "!" + pkgname;
break;
}
}
jarfile.close();
}
} catch (Exception e) {
// ignore
}
// try to generate URL from url string
if (urlStr != null) {
try {
result = new URL(urlStr);
} catch (Exception e) {
System.err.println("Trying to create URL from '" + urlStr
+ "' generates this exception:\n" + e);
result = null;
}
}
return result;
}
/**
* Checks the given packages for classes that inherited from the given class,
* in case it's a class, or implement this class, in case it's an interface.
*
* @param classname the class/interface to look for
* @param pkgnames the packages to search in
* @return a list with all the found classnames
*/
public static Vector<String> find(String classname, String[] pkgnames) {
Vector<String> result;
Class<?> cls;
result = new Vector<String>();
try {
// cls = Class.forName(classname);
cls = WekaPackageClassLoaderManager.forName(classname);
result = find(cls, pkgnames);
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
/**
* Checks the given package for classes that inherited from the given class,
* in case it's a class, or implement this class, in case it's an interface.
*
* @param classname the class/interface to look for
* @param pkgname the package to search in
* @return a list with all the found classnames
*/
public static Vector<String> find(String classname, String pkgname) {
Vector<String> result;
Class<?> cls;
result = new Vector<String>();
try {
// cls = Class.forName(classname);
cls = WekaPackageClassLoaderManager.forName(classname);
result = find(cls, pkgname);
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
/**
* Checks the given packages for classes that inherited from the given class,
* in case it's a class, or implement this class, in case it's an interface.
*
* @param cls the class/interface to look for
* @param pkgnames the packages to search in
* @return a list with all the found classnames
*/
public static Vector<String> find(Class<?> cls, String[] pkgnames) {
Vector<String> result;
int i;
HashSet<String> names;
result = new Vector<String>();
names = new HashSet<String>();
for (i = 0; i < pkgnames.length; i++) {
names.addAll(find(cls, pkgnames[i]));
}
// sort result
result.addAll(names);
Collections.sort(result, new StringCompare());
return result;
}
/**
* Find all classes that have the supplied matchText String in their suffix.
*
* @param matchText the text to match
* @return an array list of matching fully qualified class names.
*/
public static ArrayList<String> find(String matchText) {
return m_ClassCache.find(matchText);
}
/**
* Checks the given package for classes that inherited from the given class,
* in case it's a class, or implement this class, in case it's an interface.
*
* @param cls the class/interface to look for
* @param pkgname the package to search in
* @return a list with all the found classnames
*/
public static Vector<String> find(Class<?> cls, String pkgname) {
Vector<String> result;
int i;
Class<?> clsNew;
// already cached?
result = getCache(cls, pkgname);
if (result == null) {
if (VERBOSE) {
System.out.println("Searching for '" + cls.getName() + "' in '"
+ pkgname + "':");
}
result = new Vector<String>();
if (m_ClassCache.getClassnames(pkgname) != null) {
result.addAll(m_ClassCache.getClassnames(pkgname));
}
// check classes
i = 0;
while (i < result.size()) {
try {
// clsNew = Class.forName(result.get(i));
clsNew = WekaPackageClassLoaderManager.forName(result.get(i));
// no abstract classes
if (Modifier.isAbstract(clsNew.getModifiers())) {
m_ClassCache.remove(result.get(i));
result.remove(i);
}
// must implement interface
else if ((cls.isInterface()) && (!InheritanceUtils.hasInterface(cls, clsNew))) {
result.remove(i);
}
// must be derived from class
else if ((!cls.isInterface()) && (!InheritanceUtils.isSubclass(cls, clsNew))) {
result.remove(i);
} else {
i++;
}
} catch (Exception e) {
System.out.println("Accessing class '" + result.get(i)
+ "' resulted in error:");
e.printStackTrace();
}
}
// sort result
Collections.sort(result, new StringCompare());
// add to cache
addCache(cls, pkgname, result);
}
return result;
}
/**
* adds all the sub-directories recursively to the list.
*
* @param prefix the path prefix
* @param dir the directory to look in for sub-dirs
* @param list the current list of sub-dirs
* @return the new list of sub-dirs
*/
protected static HashSet<String> getSubDirectories(String prefix, File dir,
HashSet<String> list) {
File[] files;
int i;
String newPrefix;
// add directory to the list
if (prefix == null) {
newPrefix = "";
} else if (prefix.length() == 0) {
newPrefix = dir.getName();
} else {
newPrefix = prefix + "." + dir.getName();
}
if (newPrefix.length() != 0) {
list.add(newPrefix);
}
// search for sub-directories
files = dir.listFiles();
if (files != null) {
for (i = 0; i < files.length; i++) {
if (files[i].isDirectory()) {
list = getSubDirectories(newPrefix, files[i], list);
}
}
}
return list;
}
/**
* Lists all packages it can find in the classpath.
*
* @return a list with all the found packages
*/
public static Vector<String> findPackages() {
Vector<String> result;
Enumeration<String> packages;
initCache();
result = new Vector<String>();
packages = m_ClassCache.packages();
while (packages.hasMoreElements()) {
result.add(packages.nextElement());
}
Collections.sort(result, new StringCompare());
return result;
}
/**
* initializes the cache for the classnames.
*/
protected static void initCache() {
if (m_Cache == null) {
m_Cache = new Hashtable<String, Vector<String>>();
}
if (m_ClassCache == null) {
m_ClassCache = new ClassCache();
}
}
/**
* adds the list of classnames to the cache.
*
* @param cls the class to cache the classnames for
* @param pkgname the package name the classes were found in
* @param classnames the list of classnames to cache
*/
protected static void addCache(Class<?> cls, String pkgname,
Vector<String> classnames) {
initCache();
m_Cache.put(cls.getName() + "-" + pkgname, classnames);
}
/**
* returns the list of classnames associated with this class and package, if
* available, otherwise null.
*
* @param cls the class to get the classnames for
* @param pkgname the package name for the classes
* @return the classnames if found, otherwise null
*/
protected static Vector<String> getCache(Class<?> cls, String pkgname) {
initCache();
return m_Cache.get(cls.getName() + "-" + pkgname);
}
/**
* clears the cache for class/classnames queries.
*/
public static void clearCache() {
initCache();
m_Cache.clear();
}
/**
* Calls clearCache() and resets the cache of classes on the classpath (i.e.
* forces a rescan of the classpath).
*/
public static void clearClassCache() {
clearCache();
// make sure that any new classes are picked up
m_ClassCache = new ClassCache();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Possible calls:
* <ul>
* <li>
* weka.core.ClassDiscovery <packages><br/>
* Prints all the packages in the current classpath</li>
* <li>
* weka.core.ClassDiscovery <classname> <packagename(s)><br/>
* Prints the classes it found.</li>
* </ul>
*
* @param args the commandline arguments
*/
public static void main(String[] args) {
Vector<String> list;
Vector<String> packages;
int i;
StringTokenizer tok;
if ((args.length == 1) && (args[0].equals("packages"))) {
list = findPackages();
for (i = 0; i < list.size(); i++) {
System.out.println(list.get(i));
}
} else if (args.length == 2) {
// packages
packages = new Vector<String>();
tok = new StringTokenizer(args[1], ",");
while (tok.hasMoreTokens()) {
packages.add(tok.nextToken());
}
// search
list = ClassDiscovery.find(args[0],
packages.toArray(new String[packages.size()]));
// print result, if any
System.out.println("Searching for '" + args[0] + "' in '" + args[1]
+ "':\n" + " " + list.size() + " found.");
for (i = 0; i < list.size(); i++) {
System.out.println(" " + (i + 1) + ". " + list.get(i));
}
} else {
System.out.println("\nUsage:");
System.out.println(ClassDiscovery.class.getName() + " packages");
System.out.println("\tlists all packages in the classpath");
System.out.println(ClassDiscovery.class.getName()
+ " <classname> <packagename(s)>");
System.out
.println("\tlists classes derived from/implementing 'classname' that");
System.out
.println("\tcan be found in 'packagename(s)' (comma-separated list");
System.out.println();
System.exit(1);
}
}
/**
* compares two strings. The following order is used:<br/>
* <ul>
* <li>case insensitive</li>
* <li>german umlauts (ä , ö etc.) or other non-ASCII letters are
* treated as special chars</li>
* <li>special chars < numbers < letters</li>
* </ul>
*/
public static class StringCompare implements Comparator<String>,
RevisionHandler {
/**
* appends blanks to the string if its shorter than <code>len</code>.
*
* @param s the string to pad
* @param len the minimum length for the string to have
* @return the padded string
*/
private String fillUp(String s, int len) {
while (s.length() < len) {
s += " ";
}
return s;
}
/**
* returns the group of the character: 0=special char, 1=number, 2=letter.
*
* @param c the character to check
* @return the group
*/
private int charGroup(char c) {
int result;
result = 0;
if ((c >= 'a') && (c <= 'z')) {
result = 2;
} else if ((c >= '0') && (c <= '9')) {
result = 1;
}
return result;
}
/**
* Compares its two arguments for order.
*
* @param o1 the first object
* @param o2 the second object
* @return -1 if o1<o2, 0 if o1=o2 and 1 if o1&;gt;o2
*/
@Override
public int compare(String o1, String o2) {
String s1;
String s2;
int i;
int result;
int v1;
int v2;
result = 0; // they're equal
// get lower case string
s1 = o1.toString().toLowerCase();
s2 = o2.toString().toLowerCase();
// same length
s1 = fillUp(s1, s2.length());
s2 = fillUp(s2, s1.length());
for (i = 0; i < s1.length(); i++) {
// same char?
if (s1.charAt(i) == s2.charAt(i)) {
result = 0;
} else {
v1 = charGroup(s1.charAt(i));
v2 = charGroup(s2.charAt(i));
// different type (special, number, letter)?
if (v1 != v2) {
if (v1 < v2) {
result = -1;
} else {
result = 1;
}
} else {
if (s1.charAt(i) < s2.charAt(i)) {
result = -1;
} else {
result = 1;
}
}
break;
}
}
return result;
}
/**
* Indicates whether some other object is "equal to" this Comparator.
*
* @param obj the object to compare with this Comparator
* @return true if the object is a StringCompare object as well
*/
@Override
public boolean equals(Object obj) {
return (obj instanceof StringCompare);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/ClassloaderUtil.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassloaderUtil.java
* Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLClassLoader;
/**
* Utility class that can add jar files to the classpath dynamically.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}org
* @version $Revision$
*/
public class ClassloaderUtil implements RevisionHandler {
// Parameters
private static final Class<?>[] parameters = new Class[] { URL.class };
/**
* Add file to CLASSPATH
*
* @param s File name
* @throws IOException if something goes wrong when adding a file
*/
public static void addFile(String s) throws IOException {
File f = new File(s);
addFile(f);
}
/**
* Add file to CLASSPATH
*
* @param f File object
* @throws IOException if something goes wrong when adding a file
*/
public static void addFile(File f) throws IOException {
addURL(f.toURI().toURL());
}
/**
* Add URL to CLASSPATH
*
* @param u URL
* @throws IOException if something goes wrong when adding a url
*/
public static void addURL(URL u) throws IOException {
ClassloaderUtil clu = new ClassloaderUtil();
// URLClassLoader sysLoader = (URLClassLoader)
// ClassLoader.getSystemClassLoader();
URLClassLoader sysLoader = (URLClassLoader) clu.getClass().getClassLoader();
URL urls[] = sysLoader.getURLs();
for (URL url : urls) {
if (url.toString().toLowerCase().equals(u.toString().toLowerCase())) {
System.err.println("URL " + u + " is already in the CLASSPATH");
return;
}
}
Class<?> sysclass = URLClassLoader.class;
try {
Method method = sysclass.getDeclaredMethod("addURL", parameters);
method.setAccessible(true);
method.invoke(sysLoader, new Object[] { u });
} catch (Throwable t) {
t.printStackTrace();
throw new IOException("Error, could not add URL to system classloader");
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CommandlineRunnable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CommandlineRunnable.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
/**
* Interface to something that can be run from the command line.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision$
*/
public interface CommandlineRunnable {
/**
* Perform any setup stuff that might need to happen before execution.
*
* @throws Exception if a problem occurs during setup
*/
void preExecution() throws Exception;
/**
* Execute the supplied object.
*
* @param toRun the object to execute
* @param options any options to pass to the object
* @throws Exception if a problem occurs.
*/
void run(Object toRun, String[] options) throws Exception;
/**
* Perform any teardown stuff that might need to happen after execution.
*
* @throws Exception if a problem occurs during teardown
*/
void postExecution() throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/ConjugateGradientOptimization.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ConjugateGradientOptimization.java
* Copyright (C) 2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.util.Arrays;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
/**
* This subclass of Optimization.java implements conjugate gradient descent
* rather than BFGS updates, by overriding findArgmin(), with the same tests for
* convergence, and applies the same line search code. Note that constraints are
* NOT actually supported. Using this class instead of Optimization.java can
* reduce runtime when there are many parameters.
*
* Uses the second hybrid method proposed in "An Efficient Hybrid Conjugate
* Gradient Method for Unconstrained Optimization" by Dai and Yuan (2001). See
* also information in the getTechnicalInformation() method.
*
* @author Eibe Frank
* @version $Revision$
*/
public abstract class ConjugateGradientOptimization extends Optimization
implements RevisionHandler {
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "Y.H. Dai and Y. Yuan");
result.setValue(Field.YEAR, "2001");
result
.setValue(Field.TITLE,
"An Efficient Hybrid Conjugate Gradient Method for Unconstrained Optimization");
result.setValue(Field.JOURNAL, "Annals of Operations Research");
result.setValue(Field.VOLUME, "103");
result.setValue(Field.PAGES, "33-47");
result.add(Type.ARTICLE);
result.setValue(Field.AUTHOR, "W.W. Hager and H. Zhang");
result.setValue(Field.YEAR, "2006");
result.setValue(Field.TITLE,
"A survey of nonlinear conjugate gradient methods");
result.setValue(Field.JOURNAL, "Pacific Journal of Optimization");
result.setValue(Field.VOLUME, "2");
result.setValue(Field.PAGES, "35-58");
return result;
}
/**
* Constructor that sets MAXITS to 2000 by default and the parameter in the
* second weak Wolfe condition to 0.1.
*/
public ConjugateGradientOptimization() {
setMaxIteration(2000);
m_BETA = 0.1; // To make line search more exact, recommended for non-linear
// CGD
}
/**
* Main algorithm. NOTE: constraints are not actually supported.
*
* @param initX initial point of x, assuming no value's on the bound!
* @param constraints both arrays must contain Double.NaN
* @return the solution of x, null if number of iterations not enough
* @throws Exception if an error occurs
*/
@Override
public double[] findArgmin(double[] initX, double[][] constraints)
throws Exception {
int l = initX.length;
// Initial value of obj. function, gradient and inverse of the Hessian
m_f = objectiveFunction(initX);
if (Double.isNaN(m_f)) {
throw new Exception("Objective function value is NaN!");
}
// Get gradient at initial point
double[] grad = evaluateGradient(initX), oldGrad, oldX, deltaX = new double[l], direct = new double[l], x = new double[l];
// Turn gradient into direction and calculate squared length
double sum = 0;
for (int i = 0; i < grad.length; i++) {
direct[i] = -grad[i];
sum += grad[i] * grad[i];
}
// Same as in Optimization.java
double stpmax = m_STPMX * Math.max(Math.sqrt(sum), l);
boolean[] isFixed = new boolean[initX.length];
DynamicIntArray wsBdsIndx = new DynamicIntArray(initX.length);
double[][] consts = new double[2][initX.length];
for (int i = 0; i < initX.length; i++) {
if (!Double.isNaN(constraints[0][i])
|| (!Double.isNaN(constraints[1][i]))) {
throw new Exception("Cannot deal with constraints, sorry.");
}
consts[0][i] = constraints[0][i];
consts[1][i] = constraints[1][i];
x[i] = initX[i];
}
boolean finished = false;
for (int step = 0; step < m_MAXITS; step++) {
if (m_Debug) {
System.err.println("\nIteration # " + step + ":");
}
oldX = x;
oldGrad = grad;
// Make a copy of direction vector because it may get modified in lnsrch
double[] directB = Arrays.copyOf(direct, direct.length);
// Perform a line search based on new direction
m_IsZeroStep = false;
x = lnsrch(x, grad, directB, stpmax, isFixed, constraints, wsBdsIndx);
if (m_IsZeroStep) {
throw new Exception("Exiting due to zero step.");
}
double test = 0.0;
for (int h = 0; h < x.length; h++) {
deltaX[h] = x[h] - oldX[h];
double tmp = Math.abs(deltaX[h]) / Math.max(Math.abs(x[h]), 1.0);
if (tmp > test) {
test = tmp;
}
}
if (test < m_Zero) {
if (m_Debug) {
System.err.println("\nDeltaX converged: " + test);
}
finished = true;
break;
}
// Check zero gradient
grad = evaluateGradient(x);
test = 0.0;
for (int g = 0; g < l; g++) {
double tmp = Math.abs(grad[g]) * Math.max(Math.abs(directB[g]), 1.0)
/ Math.max(Math.abs(m_f), 1.0);
if (tmp > test) {
test = tmp;
}
}
if (test < m_Zero) {
if (m_Debug) {
for (int i = 0; i < l; i++) {
System.out.println(grad[i] + " " + directB[i] + " " + m_f);
}
System.err.println("Gradient converged: " + test);
}
finished = true;
break;
}
// Calculate multiplier
double betaHSNumerator = 0, betaDYNumerator = 0;
double betaHSandDYDenominator = 0;
for (int i = 0; i < grad.length; i++) {
betaDYNumerator += grad[i] * grad[i];
betaHSNumerator += (grad[i] - oldGrad[i]) * grad[i];
betaHSandDYDenominator += (grad[i] - oldGrad[i]) * direct[i];
}
double betaHS = betaHSNumerator / betaHSandDYDenominator;
double betaDY = betaDYNumerator / betaHSandDYDenominator;
if (m_Debug) {
System.err.println("Beta HS: " + betaHS);
System.err.println("Beta DY: " + betaDY);
}
for (int i = 0; i < direct.length; i++) {
direct[i] = -grad[i] + Math.max(0, Math.min(betaHS, betaDY))
* direct[i];
}
}
if (finished) {
if (m_Debug) {
System.err.println("Minimum found.");
}
m_f = objectiveFunction(x);
if (Double.isNaN(m_f)) {
throw new Exception("Objective function value is NaN!");
}
return x;
}
if (m_Debug) {
System.err.println("Cannot find minimum -- too many iterations!");
}
m_X = x;
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/ContingencyTables.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ContingencyTables.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
/**
* Class implementing some statistical routines for contingency tables.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class ContingencyTables implements RevisionHandler {
/** The natural logarithm of 2 */
public static final double log2 = Math.log(2);
/** Cache of integer logs */
private static final double MAX_INT_FOR_CACHE_PLUS_ONE = 10000;
private static final double[] INT_N_LOG_N_CACHE = new double[(int) MAX_INT_FOR_CACHE_PLUS_ONE];
/** Initialize cache */
static {
for (int i = 1; i < MAX_INT_FOR_CACHE_PLUS_ONE; i++) {
double d = i;
INT_N_LOG_N_CACHE[i] = d * Math.log(d);
}
}
/**
* Returns chi-squared probability for a given matrix.
*
* @param matrix
* the contigency table
* @param yates
* is Yates' correction to be used?
* @return the chi-squared probability
*/
public static double chiSquared(final double[][] matrix, final boolean yates) {
int df = (matrix.length - 1) * (matrix[0].length - 1);
return Statistics.chiSquaredProbability(chiVal(matrix, yates), df);
}
/**
* Computes chi-squared statistic for a contingency table.
*
* @param matrix
* the contigency table
* @param useYates
* is Yates' correction to be used?
* @return the value of the chi-squared statistic
*/
public static double chiVal(final double[][] matrix, final boolean useYates) {
int df, nrows, ncols, row, col;
double[] rtotal, ctotal;
double expect = 0, chival = 0, n = 0;
boolean yates = true;
nrows = matrix.length;
ncols = matrix[0].length;
rtotal = new double[nrows];
ctotal = new double[ncols];
for (row = 0; row < nrows; row++) {
for (col = 0; col < ncols; col++) {
rtotal[row] += matrix[row][col];
ctotal[col] += matrix[row][col];
n += matrix[row][col];
}
}
df = (nrows - 1) * (ncols - 1);
if ((df > 1) || (!useYates)) {
yates = false;
} else if (df <= 0) {
return 0;
}
chival = 0.0;
for (row = 0; row < nrows; row++) {
if (Utils.gr(rtotal[row], 0)) {
for (col = 0; col < ncols; col++) {
if (Utils.gr(ctotal[col], 0)) {
expect = (ctotal[col] * rtotal[row]) / n;
chival += chiCell(matrix[row][col], expect, yates);
}
}
}
}
return chival;
}
/**
* Tests if Cochran's criterion is fullfilled for the given contingency table. Rows and columns with
* all zeros are not considered relevant.
*
* @param matrix
* the contigency table to be tested
* @return true if contingency table is ok, false if not
*/
public static boolean cochransCriterion(final double[][] matrix) {
double[] rtotal, ctotal;
double n = 0, expect, smallfreq = 5;
int smallcount = 0, nonZeroRows = 0, nonZeroColumns = 0, nrows, ncols, row, col;
nrows = matrix.length;
ncols = matrix[0].length;
rtotal = new double[nrows];
ctotal = new double[ncols];
for (row = 0; row < nrows; row++) {
for (col = 0; col < ncols; col++) {
rtotal[row] += matrix[row][col];
ctotal[col] += matrix[row][col];
n += matrix[row][col];
}
}
for (row = 0; row < nrows; row++) {
if (Utils.gr(rtotal[row], 0)) {
nonZeroRows++;
}
}
for (col = 0; col < ncols; col++) {
if (Utils.gr(ctotal[col], 0)) {
nonZeroColumns++;
}
}
for (row = 0; row < nrows; row++) {
if (Utils.gr(rtotal[row], 0)) {
for (col = 0; col < ncols; col++) {
if (Utils.gr(ctotal[col], 0)) {
expect = (ctotal[col] * rtotal[row]) / n;
if (Utils.sm(expect, smallfreq)) {
if (Utils.sm(expect, 1)) {
return false;
} else {
smallcount++;
if (smallcount > (nonZeroRows * nonZeroColumns) / smallfreq) {
return false;
}
}
}
}
}
}
}
return true;
}
/**
* Computes Cramer's V for a contingency table.
*
* @param matrix
* the contingency table
* @return Cramer's V
*/
public static double CramersV(final double[][] matrix) {
int row, col, nrows, ncols, min;
double n = 0;
nrows = matrix.length;
ncols = matrix[0].length;
for (row = 0; row < nrows; row++) {
for (col = 0; col < ncols; col++) {
n += matrix[row][col];
}
}
min = nrows < ncols ? nrows - 1 : ncols - 1;
if ((min == 0) || Utils.eq(n, 0)) {
return 0;
}
return Math.sqrt(chiVal(matrix, false) / (n * min));
}
/**
* Computes the entropy of the given array.
*
* @param array
* the array
* @return the entropy
*/
public static double entropy(final double[] array) {
double returnValue = 0, sum = 0;
for (int i = 0; i < array.length; i++) {
returnValue -= lnFunc(array[i]);
sum += array[i];
}
if (Utils.eq(sum, 0)) {
return 0;
} else {
return (returnValue + lnFunc(sum)) / (sum * log2);
}
}
/**
* Computes conditional entropy of the rows given the columns.
*
* @param matrix
* the contingency table
* @return the conditional entropy of the rows given the columns
*/
public static double entropyConditionedOnColumns(final double[][] matrix) {
double returnValue = 0, sumForColumn, total = 0;
for (int j = 0; j < matrix[0].length; j++) {
sumForColumn = 0;
for (int i = 0; i < matrix.length; i++) {
returnValue = returnValue + lnFunc(matrix[i][j]);
sumForColumn += matrix[i][j];
}
returnValue = returnValue - lnFunc(sumForColumn);
total += sumForColumn;
}
if (Utils.eq(total, 0)) {
return 0;
}
return -returnValue / (total * log2);
}
/**
* Computes conditional entropy of the columns given the rows.
*
* @param matrix
* the contingency table
* @return the conditional entropy of the columns given the rows
*/
public static double entropyConditionedOnRows(final double[][] matrix) {
double returnValue = 0, sumForRow, total = 0;
for (int i = 0; i < matrix.length; i++) {
sumForRow = 0;
for (int j = 0; j < matrix[0].length; j++) {
returnValue = returnValue + lnFunc(matrix[i][j]);
sumForRow += matrix[i][j];
}
returnValue = returnValue - lnFunc(sumForRow);
total += sumForRow;
}
if (Utils.eq(total, 0)) {
return 0;
}
return -returnValue / (total * log2);
}
/**
* Computes conditional entropy of the columns given the rows of the test matrix with respect to the
* train matrix. Uses a Laplace prior. Does NOT normalize the entropy.
*
* @param train
* the train matrix
* @param test
* the test matrix
* @param numClasses
* the number of symbols for Laplace
* @return the entropy
*/
public static double entropyConditionedOnRows(final double[][] train, final double[][] test, final double numClasses) {
double returnValue = 0, trainSumForRow, testSumForRow, testSum = 0;
for (int i = 0; i < test.length; i++) {
trainSumForRow = 0;
testSumForRow = 0;
for (int j = 0; j < test[0].length; j++) {
returnValue -= test[i][j] * Math.log(train[i][j] + 1);
trainSumForRow += train[i][j];
testSumForRow += test[i][j];
}
testSum = testSumForRow;
returnValue += testSumForRow * Math.log(trainSumForRow + numClasses);
}
return returnValue / (testSum * log2);
}
/**
* Computes the rows' entropy for the given contingency table.
*
* @param matrix
* the contingency table
* @return the rows' entropy
*/
public static double entropyOverRows(final double[][] matrix) {
double returnValue = 0, sumForRow, total = 0;
for (int i = 0; i < matrix.length; i++) {
sumForRow = 0;
for (int j = 0; j < matrix[0].length; j++) {
sumForRow += matrix[i][j];
}
returnValue = returnValue - lnFunc(sumForRow);
total += sumForRow;
}
if (Utils.eq(total, 0)) {
return 0;
}
return (returnValue + lnFunc(total)) / (total * log2);
}
/**
* Computes the columns' entropy for the given contingency table.
*
* @param matrix
* the contingency table
* @return the columns' entropy
* @throws InterruptedException
*/
public static double entropyOverColumns(final double[][] matrix) throws InterruptedException {
double returnValue = 0, sumForColumn, total = 0;
for (int j = 0; j < matrix[0].length; j++) {
sumForColumn = 0;
for (int i = 0; i < matrix.length; i++) {
// XXX kill weka execution
if (Thread.interrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
sumForColumn += matrix[i][j];
}
returnValue = returnValue - lnFunc(sumForColumn);
total += sumForColumn;
}
if (Utils.eq(total, 0)) {
return 0;
}
return (returnValue + lnFunc(total)) / (total * log2);
}
/**
* Computes gain ratio for contingency table (split on rows). Returns Double.MAX_VALUE if the split
* entropy is 0.
*
* @param matrix
* the contingency table
* @return the gain ratio
*/
public static double gainRatio(final double[][] matrix) {
double preSplit = 0, postSplit = 0, splitEnt = 0, sumForRow, sumForColumn, total = 0, infoGain;
// Compute entropy before split
for (int i = 0; i < matrix[0].length; i++) {
sumForColumn = 0;
for (int j = 0; j < matrix.length; j++) {
sumForColumn += matrix[j][i];
}
preSplit += lnFunc(sumForColumn);
total += sumForColumn;
}
preSplit -= lnFunc(total);
// Compute entropy after split and split entropy
for (int i = 0; i < matrix.length; i++) {
sumForRow = 0;
for (int j = 0; j < matrix[0].length; j++) {
postSplit += lnFunc(matrix[i][j]);
sumForRow += matrix[i][j];
}
splitEnt += lnFunc(sumForRow);
}
postSplit -= splitEnt;
splitEnt -= lnFunc(total);
infoGain = preSplit - postSplit;
if (Utils.eq(splitEnt, 0)) {
return 0;
}
return infoGain / splitEnt;
}
/**
* Returns negative base 2 logarithm of multiple hypergeometric probability for a contingency table.
*
* @param matrix
* the contingency table
* @return the log of the hypergeometric probability of the contingency table
*/
public static double log2MultipleHypergeometric(final double[][] matrix) {
double sum = 0, sumForRow, sumForColumn, total = 0;
for (int i = 0; i < matrix.length; i++) {
sumForRow = 0;
for (int j = 0; j < matrix[i].length; j++) {
sumForRow += matrix[i][j];
}
sum += SpecialFunctions.lnFactorial(sumForRow);
total += sumForRow;
}
for (int j = 0; j < matrix[0].length; j++) {
sumForColumn = 0;
for (int i = 0; i < matrix.length; i++) {
sumForColumn += matrix[i][j];
}
sum += SpecialFunctions.lnFactorial(sumForColumn);
}
for (int i = 0; i < matrix.length; i++) {
for (int j = 0; j < matrix[i].length; j++) {
sum -= SpecialFunctions.lnFactorial(matrix[i][j]);
}
}
sum -= SpecialFunctions.lnFactorial(total);
return -sum / log2;
}
/**
* Reduces a matrix by deleting all zero rows and columns.
*
* @param matrix
* the matrix to be reduced
* @return the matrix with all zero rows and columns deleted
*/
public static double[][] reduceMatrix(final double[][] matrix) {
int row, col, currCol, currRow, nrows, ncols, nonZeroRows = 0, nonZeroColumns = 0;
double[] rtotal, ctotal;
double[][] newMatrix;
nrows = matrix.length;
ncols = matrix[0].length;
rtotal = new double[nrows];
ctotal = new double[ncols];
for (row = 0; row < nrows; row++) {
for (col = 0; col < ncols; col++) {
rtotal[row] += matrix[row][col];
ctotal[col] += matrix[row][col];
}
}
for (row = 0; row < nrows; row++) {
if (Utils.gr(rtotal[row], 0)) {
nonZeroRows++;
}
}
for (col = 0; col < ncols; col++) {
if (Utils.gr(ctotal[col], 0)) {
nonZeroColumns++;
}
}
newMatrix = new double[nonZeroRows][nonZeroColumns];
currRow = 0;
for (row = 0; row < nrows; row++) {
if (Utils.gr(rtotal[row], 0)) {
currCol = 0;
for (col = 0; col < ncols; col++) {
if (Utils.gr(ctotal[col], 0)) {
newMatrix[currRow][currCol] = matrix[row][col];
currCol++;
}
}
currRow++;
}
}
return newMatrix;
}
/**
* Calculates the symmetrical uncertainty for base 2.
*
* @param matrix
* the contingency table
* @return the calculated symmetrical uncertainty
*
*/
public static double symmetricalUncertainty(final double matrix[][]) {
double sumForColumn, sumForRow, total = 0, columnEntropy = 0, rowEntropy = 0, entropyConditionedOnRows = 0, infoGain = 0;
// Compute entropy for columns
for (int i = 0; i < matrix[0].length; i++) {
sumForColumn = 0;
for (int j = 0; j < matrix.length; j++) {
sumForColumn += matrix[j][i];
}
columnEntropy += lnFunc(sumForColumn);
total += sumForColumn;
}
columnEntropy -= lnFunc(total);
// Compute entropy for rows and conditional entropy
for (int i = 0; i < matrix.length; i++) {
sumForRow = 0;
for (int j = 0; j < matrix[0].length; j++) {
sumForRow += matrix[i][j];
entropyConditionedOnRows += lnFunc(matrix[i][j]);
}
rowEntropy += lnFunc(sumForRow);
}
entropyConditionedOnRows -= rowEntropy;
rowEntropy -= lnFunc(total);
infoGain = columnEntropy - entropyConditionedOnRows;
if (Utils.eq(columnEntropy, 0) || Utils.eq(rowEntropy, 0)) {
return 0;
}
return 2.0 * (infoGain / (columnEntropy + rowEntropy));
}
/**
* Computes Goodman and Kruskal's tau-value for a contingency table.
*
* @param matrix
* the contingency table
* @return Goodman and Kruskal's tau-value
*/
public static double tauVal(final double[][] matrix) {
int nrows, ncols, row, col;
double[] ctotal;
double maxcol = 0, max, maxtotal = 0, n = 0;
nrows = matrix.length;
ncols = matrix[0].length;
ctotal = new double[ncols];
for (row = 0; row < nrows; row++) {
max = 0;
for (col = 0; col < ncols; col++) {
if (Utils.gr(matrix[row][col], max)) {
max = matrix[row][col];
}
ctotal[col] += matrix[row][col];
n += matrix[row][col];
}
maxtotal += max;
}
if (Utils.eq(n, 0)) {
return 0;
}
maxcol = ctotal[Utils.maxIndex(ctotal)];
return (maxtotal - maxcol) / (n - maxcol);
}
/**
* Help method for computing entropy.
*/
public static double lnFunc(final double num) {
if (num <= 0) {
return 0;
} else {
// Use cache if we have a sufficiently small integer
if (num < MAX_INT_FOR_CACHE_PLUS_ONE) {
int n = (int) num;
if (n == num) {
return INT_N_LOG_N_CACHE[n];
}
}
return num * Math.log(num);
}
}
/**
* Computes chi-value for one cell in a contingency table.
*
* @param freq
* the observed frequency in the cell
* @param expected
* the expected frequency in the cell
* @return the chi-value for that cell; 0 if the expected value is too close to zero
*/
private static double chiCell(final double freq, final double expected, final boolean yates) {
// Cell in empty row and column?
if (Utils.smOrEq(expected, 0)) {
return 0;
}
// Compute difference between observed and expected value
double diff = Math.abs(freq - expected);
if (yates) {
// Apply Yates' correction if wanted
diff -= 0.5;
// The difference should never be negative
if (diff < 0) {
diff = 0;
}
}
// Return chi-value for the cell
return (diff * diff / expected);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @throws InterruptedException
*/
public static void main(final String[] ops) throws InterruptedException {
double[] firstRow = { 10, 5, 20 };
double[] secondRow = { 2, 10, 6 };
double[] thirdRow = { 5, 10, 10 };
double[][] matrix = new double[3][0];
matrix[0] = firstRow;
matrix[1] = secondRow;
matrix[2] = thirdRow;
for (int i = 0; i < matrix.length; i++) {
for (int j = 0; j < matrix[i].length; j++) {
System.out.print(matrix[i][j] + " ");
}
System.out.println();
}
System.out.println("Chi-squared probability: " + ContingencyTables.chiSquared(matrix, false));
System.out.println("Chi-squared value: " + ContingencyTables.chiVal(matrix, false));
System.out.println("Cochran's criterion fullfilled: " + ContingencyTables.cochransCriterion(matrix));
System.out.println("Cramer's V: " + ContingencyTables.CramersV(matrix));
System.out.println("Entropy of first row: " + ContingencyTables.entropy(firstRow));
System.out.println("Entropy conditioned on columns: " + ContingencyTables.entropyConditionedOnColumns(matrix));
System.out.println("Entropy conditioned on rows: " + ContingencyTables.entropyConditionedOnRows(matrix));
System.out.println("Entropy conditioned on rows (with Laplace): " + ContingencyTables.entropyConditionedOnRows(matrix, matrix, 3));
System.out.println("Entropy of rows: " + ContingencyTables.entropyOverRows(matrix));
System.out.println("Entropy of columns: " + ContingencyTables.entropyOverColumns(matrix));
System.out.println("Gain ratio: " + ContingencyTables.gainRatio(matrix));
System.out.println("Negative log2 of multiple hypergeometric probability: " + ContingencyTables.log2MultipleHypergeometric(matrix));
System.out.println("Symmetrical uncertainty: " + ContingencyTables.symmetricalUncertainty(matrix));
System.out.println("Tau value: " + ContingencyTables.tauVal(matrix));
double[][] newMatrix = new double[3][3];
newMatrix[0][0] = 1;
newMatrix[0][1] = 0;
newMatrix[0][2] = 1;
newMatrix[1][0] = 0;
newMatrix[1][1] = 0;
newMatrix[1][2] = 0;
newMatrix[2][0] = 1;
newMatrix[2][1] = 0;
newMatrix[2][2] = 1;
System.out.println("Matrix with empty row and column: ");
for (int i = 0; i < newMatrix.length; i++) {
for (int j = 0; j < newMatrix[i].length; j++) {
System.out.print(newMatrix[i][j] + " ");
}
System.out.println();
}
System.out.println("Reduced matrix: ");
newMatrix = ContingencyTables.reduceMatrix(newMatrix);
for (int i = 0; i < newMatrix.length; i++) {
for (int j = 0; j < newMatrix[i].length; j++) {
System.out.print(newMatrix[i][j] + " ");
}
System.out.println();
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Copyable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Copyable.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
/**
* Interface implemented by classes that can produce "shallow" copies
* of their objects. (As opposed to clone(), which is supposed to
* produce a "deep" copy.)
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface Copyable {
/**
* This method produces a shallow copy of an object.
* It does the same as the clone() method in Object, which also produces
* a shallow copy.
*/
Object copy();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Copyright.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Copyright.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
import java.util.Calendar;
import java.util.Properties;
/**
* A class for providing centralized Copyright information.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class Copyright {
/** the copyright file */
public final static String PROPERTY_FILE = "weka/core/Copyright.props";
/** Contains the properties */
protected static Properties PROPERTIES;
static {
PROPERTIES = new Properties();
try {
// PROPERTIES.load(ClassLoader.getSystemResourceAsStream(PROPERTY_FILE));
PROPERTIES.
load((new Copyright()).getClass().getClassLoader().getResourceAsStream(PROPERTY_FILE));
}
catch (Exception e) {
System.err.println(
"Could not read configuration file for the copyright "
+ "information - using default.");
}
}
/**
* returns the start year of the copyright
*
* @return the start year
*/
public static String getFromYear() {
return PROPERTIES.getProperty("FromYear", "1999");
}
/**
* returns the end year of the copyright (i.e., current year)
*
* @return the end/current year
*/
public static String getToYear() {
return PROPERTIES.getProperty("ToYear", "" + Calendar.getInstance().get(Calendar.YEAR));
}
/**
* returns the entity owning the copyright
*
* @return the owner
*/
public static String getOwner() {
return PROPERTIES.getProperty("Owner", "The University of Waikato");
}
/**
* returns the address of the owner
*
* @return the address
*/
public static String getAddress() {
return PROPERTIES.getProperty("Address", "Hamilton, New Zealand");
}
/**
* returns the URL of the owner
*
* @return the URL
*/
public static String getURL() {
return PROPERTIES.getProperty("URL", "http://www.cs.waikato.ac.nz/~ml/");
}
/**
* Only for testing
*
* @param args ignored
*/
public static void main(String[] args) {
System.out.println(PROPERTIES);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/CustomDisplayStringProvider.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* CustomDisplayStringProvider.java
* Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
/**
* For classes that do not implement the OptionHandler interface and want to
* provide a custom display string in the GenericObjectEditor, which is more
* descriptive than the class name.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface CustomDisplayStringProvider {
/**
* Returns the custom display string.
*
* @return the string
*/
public String toDisplay();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/DateAttributeInfo.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DateAttributeInfo.java
* Copyright (C) 2014 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.text.SimpleDateFormat;
/**
* Stores information for date attributes.
*/
public class DateAttributeInfo implements AttributeInfo {
/** Date format specification for date attributes */
protected SimpleDateFormat m_DateFormat;
/**
* Constructs info based on argument.
*/
public DateAttributeInfo(String dateFormat) {
if (dateFormat != null) {
m_DateFormat = new SimpleDateFormat(dateFormat);
} else {
m_DateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
}
m_DateFormat.setLenient(false);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Debug.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Debug.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.io.Serializable;
import java.io.StringWriter;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.logging.FileHandler;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.logging.SimpleFormatter;
/**
* A helper class for debug output, logging, clocking, etc.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class Debug
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 66171861743328020L;
/** the log level All */
public final static Level ALL = Level.ALL;
/** the log level Vonfig */
public final static Level CONFIG = Level.CONFIG;
/** the log level Fine */
public final static Level FINE = Level.FINE;
/** the log level Finer */
public final static Level FINER = Level.FINER;
/** the log level Finest */
public final static Level FINEST = Level.FINEST;
/** the log level Info */
public final static Level INFO = Level.INFO;
/** the log level Off - i.e., no logging */
public final static Level OFF = Level.OFF;
/** the log level Severe */
public final static Level SEVERE = Level.SEVERE;
/** the log level Warning */
public final static Level WARNING = Level.WARNING;
/** whether logging is enabled */
protected boolean m_Enabled = true;
/** for logging */
protected Log m_Log;
/** for clocking */
protected Clock m_Clock = new Clock();
/**
* A little helper class for clocking and outputting times. It measures the
* CPU time if possible, otherwise it's just based on the system time. In
* case one just wants to measure time (e.g., database queries don't take up
* much CPU time, but still might take a long time to finish), then one can
* disable the use of CPU time as well.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see ThreadMXBean#isThreadCpuTimeEnabled()
*/
public static class Clock
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 4622161807307942201L;
/** the output format in milli-seconds */
public final static int FORMAT_MILLISECONDS = 0;
/** the output format in seconds, with fraction of msecs */
public final static int FORMAT_SECONDS = 1;
/** the output format in hours:minutes:seconds, with fraction of msecs */
public final static int FORMAT_HHMMSS = 2;
/** the output formats */
public static final Tag[] TAGS_FORMAT = {
new Tag(FORMAT_MILLISECONDS, "milli-seconds"),
new Tag(FORMAT_SECONDS, "seconds"),
new Tag(FORMAT_HHMMSS, "hh:mm:ss")
};
/** the format of the output */
public int m_OutputFormat = FORMAT_SECONDS;
/** the start time */
protected long m_Start;
/** the end time */
protected long m_Stop;
/** whether the time is still clocked */
protected boolean m_Running;
/** the thread ID */
protected long m_ThreadID;
/** whether the system can measure the CPU time */
protected boolean m_CanMeasureCpuTime;
/** whether to use the CPU time (by default TRUE) */
protected boolean m_UseCpuTime;
/** the thread monitor, if the system can measure the CPU time */
protected transient ThreadMXBean m_ThreadMonitor;
/**
* automatically starts the clock with FORMAT_SECONDS format and CPU
* time if available
*
* @see #m_OutputFormat
*/
public Clock() {
this(true);
}
/**
* automatically starts the clock with the given output format and CPU
* time if available
*
* @param format the output format
* @see #m_OutputFormat
*/
public Clock(int format) {
this(true, format);
}
/**
* starts the clock depending on <code>start</code> immediately with the
* FORMAT_SECONDS output format and CPU time if available
*
* @param start whether to start the clock immediately
* @see #m_OutputFormat
*/
public Clock(boolean start) {
this(start, FORMAT_SECONDS);
}
/**
* starts the clock depending on <code>start</code> immediately, using
* CPU time if available
*
* @param start whether to start the clock immediately
* @param format the format
* @see #m_OutputFormat
*/
public Clock(boolean start, int format) {
m_Running = false;
m_Start = 0;
m_Stop = 0;
m_UseCpuTime = true;
setOutputFormat(format);
if (start)
start();
}
/**
* initializes the clocking, ensure to get the correct thread ID.
*/
protected void init() {
m_ThreadMonitor = null;
m_ThreadMonitor = getThreadMonitor();
// can we measure cpu time?
m_CanMeasureCpuTime = m_ThreadMonitor.isThreadCpuTimeSupported();
}
/**
* whether the measurement is based on the msecs returned from the System
* class or on the more accurate CPU time. Also depends on whether the
* usage of the CPU time was disabled or enabled.
*
* @return true if the more accurate CPU time of the thread is
* used and the use of CPU time hasn't been disabled
* @see System#currentTimeMillis()
* @see ThreadMXBean#isThreadCpuTimeEnabled()
* @see #getUseCpuTime()
*/
public boolean isCpuTime() {
return m_UseCpuTime && m_CanMeasureCpuTime;
}
/**
* enables/disables the use of CPU time (if measurement of CPU time is
* available). The actual use of CPU time still depends on whether the
* system supports it. Resets the current timer, if running.
*
* @param value if true the CPU time is used (if possible)
*/
public void setUseCpuTime(boolean value) {
m_UseCpuTime = value;
// we have to re-initialize the start time, otherwise we get bogus
// results
if (m_Running) {
stop();
start();
}
}
/**
* returns whether the use of CPU is time is enabled/disabled (regardless
* whether the system supports it or not)
*
* @return true the CPU time is used (if possible)
*/
public boolean getUseCpuTime() {
return m_UseCpuTime;
}
/**
* Returns a new thread monitor if the current one is null (e.g., due to
* serialization) or the currently set one. The thread ID is also updated
* if necessary.
*
* @return the thread monitor to use
*/
protected ThreadMXBean getThreadMonitor() {
if (m_ThreadMonitor == null) {
m_ThreadMonitor = ManagementFactory.getThreadMXBean();
if (m_CanMeasureCpuTime && !m_ThreadMonitor.isThreadCpuTimeEnabled())
m_ThreadMonitor.setThreadCpuTimeEnabled(true);
m_ThreadID = Thread.currentThread().getId();
}
return m_ThreadMonitor;
}
/**
* returns the current time in msec
*
* @return the current time
*/
protected long getCurrentTime() {
long result;
if (isCpuTime())
result = getThreadMonitor().getThreadUserTime(m_ThreadID) / 1000000;
else
result = System.currentTimeMillis();
return result;
}
/**
* saves the current system time (or CPU time) in msec as start time
*
* @see #m_Start
*/
public void start() {
// make sure that we get the right thread ID!
init();
m_Start = getCurrentTime();
m_Stop = m_Start;
m_Running = true;
}
/**
* saves the current system (or CPU time) in msec as stop time
*
* @see #m_Stop
*/
public void stop() {
m_Stop = getCurrentTime();
m_Running = false;
}
/**
* returns the start time
*
* @return the start time
*/
public long getStart() {
return m_Start;
}
/**
* returns the stop time or, if still running, the current time
*
* @return the stop time
*/
public long getStop() {
long result;
if (isRunning())
result = getCurrentTime();
else
result = m_Stop;
return result;
}
/**
* whether the time is still being clocked
*
* @return true if the time is still being clocked
*/
public boolean isRunning() {
return m_Running;
}
/**
* sets the format of the output
*
* @param value the format of the output
* @see #m_OutputFormat
*/
public void setOutputFormat(int value) {
if (value == FORMAT_MILLISECONDS)
m_OutputFormat = value;
else if (value == FORMAT_SECONDS)
m_OutputFormat = value;
else if (value == FORMAT_HHMMSS)
m_OutputFormat = value;
else
System.out.println("Format '" + value + "' is not recognized!");
}
/**
* returns the output format
*
* @return the output format
* @see #m_OutputFormat
*/
public int getOutputFormat() {
return m_OutputFormat;
}
/**
* returns the elapsed time, getStop() - getStart(), as string
*
* @return the elapsed time as string
* @see #getStart()
* @see #getStop()
*/
public String toString() {
String result;
long elapsed;
long hours;
long mins;
long secs;
long msecs;
result = "";
elapsed = getStop() - getStart();
switch (getOutputFormat()) {
case FORMAT_HHMMSS:
hours = elapsed / (3600 * 1000);
elapsed = elapsed % (3600 * 1000);
mins = elapsed / (60 * 1000);
elapsed = elapsed % (60 * 1000);
secs = elapsed / 1000;
msecs = elapsed % 1000;
if (hours > 0)
result += "" + hours + ":";
if (mins < 10)
result += "0" + mins + ":";
else
result += "" + mins + ":";
if (secs < 10)
result += "0" + secs + ".";
else
result += "" + secs + ".";
result += Utils.doubleToString(
(double) msecs / (double) 1000, 3).replaceAll(".*\\.", "");
break;
case FORMAT_SECONDS:
result = Utils.doubleToString((double) elapsed / (double) 1000, 3) + "s";
break;
case FORMAT_MILLISECONDS:
result = "" + elapsed + "ms";
break;
default:
result = "<unknown time format>";
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* A class that can be used for timestamps in files, The toString() method
* simply returns the associated Date object in a timestamp format. For
* formatting options, see java.text.SimpleDateFormat.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see SimpleDateFormat
*/
public static class Timestamp
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -6099868388466922753L;
/** the default format */
public final static String DEFAULT_FORMAT = "yyyy-MM-dd HH:mm:ss";
/** the actual date */
protected Date m_Stamp;
/** the format of the timestamp */
protected String m_Format;
/** handles the format of the output */
protected SimpleDateFormat m_Formatter;
/**
* creates a timestamp with the current date and time and the default
* format.
*/
public Timestamp() {
this(DEFAULT_FORMAT);
}
/**
* creates a timestamp with the current date and time and the specified
* format.
*
* @param format the format of the timestamp
* @see SimpleDateFormat
*/
public Timestamp(String format) {
this(new Date(), format);
}
/**
* creates a timestamp with the given date and the default format.
*
* @param stamp the associated date/time for the timestamp
*/
public Timestamp(Date stamp) {
this(stamp, DEFAULT_FORMAT);
}
/**
* creates a timestamp with the given date and format.
*
* @param stamp the associated date/time for the timestamp
* @param format the format of the timestamp
* @see SimpleDateFormat
*/
public Timestamp(Date stamp, String format) {
super();
m_Stamp = stamp;
setFormat(format);
}
/**
* sets the format for the timestamp
*
* @param value the format string
* @see SimpleDateFormat
*/
public void setFormat(String value) {
try {
m_Formatter = new SimpleDateFormat(value);
m_Format = value;
}
catch (Exception e) {
m_Formatter = new SimpleDateFormat(DEFAULT_FORMAT);
m_Format = DEFAULT_FORMAT;
}
}
/**
* returns the current timestamp format
*
* @return the current format
*/
public String getFormat() {
return m_Format;
}
/**
* returns the associated date/time
*
* @return the timestamp value
*/
public Date getStamp() {
return m_Stamp;
}
/**
* returns the timestamp as string in the specified format
*
* @return the timestamp as string
*/
public String toString() {
return m_Formatter.format(getStamp());
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* A little, simple helper class for logging stuff. Uses simple file access
* and not the java.util.logging stuff (see Log for that). Uses the
* writeToFile methods of the Debug class.
*
* @see Debug.Log
* @see Debug#writeToFile(String, String)
* @see Debug#writeToFile(String, String, boolean)
*/
public static class SimpleLog
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -2671928223819510830L;
/** the file to write to (if null then only stdout is used) */
protected String m_Filename = null;
/**
* default constructor, uses only stdout
*/
public SimpleLog() {
this(null);
}
/**
* Creates a logger that writes into the specified file. Appends to the
* file by default.
*
* @param filename the file to write to, if null then only stdout is used
*/
public SimpleLog(String filename) {
this(filename, true);
}
/**
* Creates a logger that writes into the specified file. Appends to the
* file by default.
*
* @param filename the file to write to, if null then only stdout is used
* @param append if false, the file will be deleted first
*/
public SimpleLog(String filename, boolean append) {
super();
m_Filename = filename;
Debug.writeToFile(m_Filename, "--> Log started", append);
}
/**
* returns the filename of the log, can be null
*
* @return the filename of the log
*/
public String getFilename() {
return m_Filename;
}
/**
* logs the given message to the file
*
* @param message the message to log
*/
public void log(String message) {
String log;
log = new Timestamp() + " " + message;
if (getFilename() != null)
Debug.writeToFile(getFilename(), log);
System.out.println(log);
}
/**
* a convenience method for dumping the current system info in the
* log file
*
* @see SystemInfo
*/
public void logSystemInfo() {
log("SystemInfo:\n" + new SystemInfo().toString());
}
/**
* returns a string representation of the logger
*
* @return a string representation of the logger
*/
public String toString() {
String result;
result = "Filename: " + getFilename();
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* A helper class for logging stuff. Uses the java.util.logging
* package. If this approach seems an "overkill" (it can create quite a few
* log files if used in different threads), one can use the
* Debug.SimpleLog class.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see Debug.SimpleLog
*/
public static class Log
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 1458435732111675823L;
/** the actual logger, if null only stdout is used */
protected transient Logger m_Logger = null;
/** the filename, if any */
protected String m_Filename = null;
/** the size of the file (in bytes) */
protected int m_Size;
/** the number of files for rotating the logs */
protected int m_NumFiles;
/** whether the initialization of the logger failed */
protected boolean m_LoggerInitFailed = false;
/**
* default constructor, uses only stdout
*/
public Log() {
this(null);
}
/**
* creates a logger that logs into the specified file, if null then only
* stdout is used. It uses 1,000,000 bytes for file size and 1 file.
*
* @param filename the file to log into
*/
public Log(String filename) {
this(filename, 1000000, 1);
}
/**
* creates a logger that logs into the specified file, if null then only
* stdout is used.
*
* @param filename the file to log into
* @param size the size of the files in bytes
* @param numFiles the number of files for rotating
*/
public Log(String filename, int size, int numFiles) {
m_Filename = filename;
m_Size = size;
m_NumFiles = numFiles;
}
/**
* initializes and returns the logger if necessary (e.g., due to
* serialization).
*
* @return the logger, can be null, e.g., if no filename provided
*/
protected Logger getLogger() {
if ( (m_Logger == null) && (!m_LoggerInitFailed) ) {
if (m_Filename != null) {
m_Logger = Logger.getLogger(m_Filename);
Handler fh = null;
try{
fh = new FileHandler(m_Filename, m_Size, m_NumFiles);
fh.setFormatter(new SimpleFormatter());
m_Logger.addHandler(fh);
m_LoggerInitFailed = false;
}
catch(Exception e) {
System.out.println("Cannot init fileHandler for logger:" + e.toString());
m_Logger = null;
m_LoggerInitFailed = true;
}
}
}
return m_Logger;
}
/**
* turns the string representing a level, e.g., "FINE" or "ALL" into
* the corresponding level (case-insensitive). The default is ALL.
*
* @param level the string to return a level for
* @return the corresponding level or the default
*/
public static Level stringToLevel(String level) {
Level result;
if (level.equalsIgnoreCase("ALL"))
result = ALL;
else if (level.equalsIgnoreCase("CONFIG"))
result = CONFIG;
else if (level.equalsIgnoreCase("FINE"))
result = FINE;
else if (level.equalsIgnoreCase("FINER"))
result = FINER;
else if (level.equalsIgnoreCase("FINEST"))
result = FINEST;
else if (level.equalsIgnoreCase("INFO"))
result = INFO;
else if (level.equalsIgnoreCase("OFF"))
result = OFF;
else if (level.equalsIgnoreCase("SEVERE"))
result = SEVERE;
else if (level.equalsIgnoreCase("WARNING"))
result = WARNING;
else
result = ALL;
return result;
}
/**
* returns the filename of the log, can be null
*
* @return the filename of the log
*/
public String getFilename() {
return m_Filename;
}
/**
* returns the size of the files
*
* @return the size of a file
*/
public int getSize() {
return m_Size;
}
/**
* returns the number of files being used
*
* @return the number of files
*/
public int getNumFiles() {
return m_NumFiles;
}
/**
* logs the given message
*
* @param level the level of severity
* @param message the message to log
*/
public void log(Level level, String message) {
log(level, "", message);
}
/**
* prints the given message with the specified level
*
* @param level the level of logging
* @param sourceclass the class that logs the message
* @param message the message to print
*/
public void log(Level level, String sourceclass, String message) {
log(level, sourceclass, "", message);
}
/**
* prints the given message with the specified level
*
* @param level the level of logging
* @param sourceclass the class that logs the message
* @param sourcemethod the method that logs the message
* @param message the message to print
*/
public void log(Level level, String sourceclass, String sourcemethod, String message) {
Logger logger;
logger = getLogger();
if (logger != null)
logger.logp(level, sourceclass, sourcemethod, message);
else
System.out.println(message);
}
/**
* a convenience method for dumping the current system info in the
* log file
*
* @see SystemInfo
*/
public void logSystemInfo() {
log(INFO, "SystemInfo:\n" + new SystemInfo().toString());
}
/**
* returns a string representation of the logger
*
* @return a string representation of the logger
*/
public String toString() {
String result;
result = "Filename: " + getFilename() + ", "
+ "Size: " + getSize() + ", "
+ "# Files: " + getNumFiles();
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* This extended Random class enables one to print the generated random
* numbers etc., before they are returned. It can either use stdout (default)
* for outputting the logging information or a Log object (level is then
* INFO).
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public static class Random
extends java.util.Random
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = 1256846887618333956L;
/** whether to output debug information */
protected boolean m_Debug = false;
/** the unique ID for this number generator */
protected long m_ID;
/** for keeping track of unique IDs */
protected static long m_CurrentID;
/** the log to use for outputting the data, otherwise just stdout */
protected Log m_Log = null;
/**
* Creates a new random number generator. With no debugging.
*/
public Random() {
this(false);
}
/**
* Creates a new random number generator using a single long seed.
* With no debugging
*
* @param seed the seed value
*/
public Random(long seed) {
this(seed, false);
}
/**
* Creates a new random number generator. With optional debugging.
*
* @param debug if true, debugging output is enabled
*/
public Random(boolean debug) {
super();
setDebug(debug);
m_ID = nextID();
if (getDebug())
printStackTrace();
}
/**
* Creates a new random number generator using a single long seed.
* With optional debugging
*
* @param seed the seed value
* @param debug if true, debugging output is enabled
*/
public Random(long seed, boolean debug) {
super(seed);
setDebug(debug);
m_ID = nextID();
if (getDebug())
printStackTrace();
}
/**
* sets whether to print the generated random values or not
*
* @param value if true debugging output is enabled
*/
public void setDebug(boolean value) {
m_Debug = value;
}
/**
* returns whether to print the generated random values or not
*
* @return true if debugging output is enabled
*/
public boolean getDebug() {
return m_Debug;
}
/**
* the log to use, if it is null then stdout is used
*
* @param value the log to use
*/
public void setLog(Log value) {
m_Log = value;
}
/**
* the currently used log, if null then stdout is used for outputting
* the debugging information
*
* @return the log, can be null
*/
public Log getLog() {
return m_Log;
}
/**
* returns the next unique ID for a number generator
*
* @return the next unique ID
*/
protected static long nextID() {
m_CurrentID++;
return m_CurrentID;
}
/**
* returns the unique ID of this number generator
*
* @return the unique ID of this number generator
*/
public long getID() {
return m_ID;
}
/**
* prints the given message only if m_Debug is TRUE
*
* @param msg the message to print
* @see #m_Debug
*/
protected void println(String msg) {
if (getDebug()) {
if (getLog() != null)
getLog().log(Level.INFO, m_ID + ": " + msg);
else
System.out.println(m_ID + ": " + msg);
}
}
/**
* prints the current stacktrace
*/
public void printStackTrace() {
Throwable t;
StringWriter writer;
writer = new StringWriter();
// generate stacktrace
t = new Throwable();
t.fillInStackTrace();
t.printStackTrace(new PrintWriter(writer));
println(writer.toString());
}
/**
* Returns the next pseudorandom, uniformly distributed boolean value from
* this random number generator's sequence.
*
* @return random boolean
*/
public boolean nextBoolean() {
boolean result = super.nextBoolean();
println("nextBoolean=" + result);
return result;
}
/**
* Generates random bytes and places them into a user-supplied byte array.
*
* @param bytes array to fill with random bytes
*/
public void nextBytes(byte[] bytes) {
super.nextBytes(bytes);
println("nextBytes=" + Utils.arrayToString(bytes));
}
/**
* Returns the next pseudorandom, uniformly distributed double value between
* 0.0 and 1.0 from this random number generator's sequence.
*
* @return random double
*/
public double nextDouble() {
double result = super.nextDouble();
println("nextDouble=" + result);
return result;
}
/**
* Returns the next pseudorandom, uniformly distributed float value between
* 0.0 and 1.0 from this random number generator's sequence.
*
* @return random float
*/
public float nextFloat() {
float result = super.nextFloat();
println("nextFloat=" + result);
return result;
}
/**
* Returns the next pseudorandom, Gaussian ("normally") distributed double
* value with mean 0.0 and standard deviation 1.0 from this random number
* generator's sequence.
*
* @return random double, gaussian distributed
*/
public double nextGaussian() {
double result = super.nextGaussian();
println("nextGaussian=" + result);
return result;
}
/**
* Returns the next pseudorandom, uniformly distributed int value from this
* random number generator's sequence.
*
* @return random int
*/
public int nextInt() {
int result = super.nextInt();
println("nextInt=" + result);
return result;
}
/**
* Returns a pseudorandom, uniformly distributed int value between 0
* (inclusive) and the specified value (exclusive), drawn from this random
* number generator's sequence.
*
* @param n the upper limit (exclusive)
* @return random int
*/
public int nextInt(int n) {
int result = super.nextInt(n);
println("nextInt(" + n + ")=" + result);
return result;
}
/**
* Returns the next pseudorandom, uniformly distributed long value from this
* random number generator's sequence.
*
* @return random long
*/
public long nextLong() {
long result = super.nextLong();
println("nextLong=" + result);
return result;
}
/**
* Sets the seed of this random number generator using a single long seed.
*
* @param seed the seed value
*/
public void setSeed(long seed) {
super.setSeed(seed);
println("setSeed(" + seed + ")");
}
/**
* returns a string representation of this number generator
*
* @return a string representation
*/
public String toString() {
return this.getClass().getName() + ": " + getID();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* contains debug methods
*
* @author Gabi Schmidberger (gabi at cs dot waikato dot ac dot nz)
* @version $Revision$
*/
public static class DBO
implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -5245628124742606784L;
/** enables/disables output of debug information */
public boolean m_verboseOn = false;
/** range of outputtyp */
public Range m_outputTypes = new Range();
/**
* Set the verbose on flag on
*/
public void setVerboseOn() {
m_verboseOn = true;
}
/**
* Initialize ranges, upper limit must be set
*
* @param upper upper limit
*/
public void initializeRanges(int upper) {
m_outputTypes.setUpper(upper);
}
/**
* Return true if the outputtype is set
*
* @param num value that is reserved for a specific outputtype
* @return return true if the output type is set
*/
public boolean outputTypeSet(int num) {
return (m_outputTypes.isInRange(num));
}
/**
* Return true if the debug level is set
* same method as outpuTypeSet but better name
*
* @param num value that is reserved for a specific outputtype
* @return return true if the debug level is set
*/
public boolean dl(int num) {
return (outputTypeSet(num));
}
/**
* Switches the outputs on that are requested from the option O
*
* @param list list of integers, all are used for an output type
*/
public void setOutputTypes(String list) {
if (list.length() > 0) {
m_verboseOn = true;
m_outputTypes.setRanges(list);
m_outputTypes.setUpper(30);
}
}
/**
* Gets the current output type selection
*
* @return a string containing a comma separated list of ranges
*/
public String getOutputTypes() {
return m_outputTypes.getRanges();
}
/**
* prints out text + endofline if verbose is on.
* helps to make debug output commands more visible in text
*
* @param text the text to print
*/
public void dpln(String text) {
if (m_verboseOn) {
System.out.println(text);
}
}
/**
* prints out text + endofline but only if parameter debug type is set.
* helps to make debug output commands more visible in text
*
* @param debugType the type of the output
* @param text the text to print
*/
public void dpln(int debugType, String text) {
if (outputTypeSet(debugType)) {
System.out.println(text);
}
}
/**
* prints out text if verbose is on.
* helps to make debug output commands more visible in text
*
* @param text the text to print
*/
public void dp(String text) {
if (m_verboseOn) {
System.out.print(text);
}
}
/**
* prints out text but only if debug level is set.
* helps to make debug output commands more visible in text
*
* @param debugType the type of the output
* @param text the text to print
*/
public void dp(int debugType, String text) {
if (outputTypeSet(debugType)) {
System.out.print(text);
}
}
/**
* prints out text + endofline.
* helps to make debug output commands more visible in text
*
* @param text the text to print
*/
public static void pln(String text) {
System.out.println(text);
}
/**
* prints out text.
* helps to make debug output commands more visible in text
*
* @param text the text to print
*/
public static void p (String text) {
System.out.print(text);
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* default constructor, prints only to stdout
*/
public Debug() {
this(null);
}
/**
* logs the output to the specified file (and stdout). Size is 1,000,000 bytes
* and 1 file.
*
* @param filename the name of the log
*/
public Debug(String filename) {
this(filename, 1000000, 1);
}
/**
* logs the output
*
* @param filename the name of the log
* @param size the size of the files in bytes
* @param numFiles the number of files for rotating
*/
public Debug(String filename, int size, int numFiles) {
super();
m_Log = newLog(filename, size, numFiles);
}
/**
* turns the string representing a level, e.g., "FINE" or "ALL" into
* the corresponding level (case-insensitive). The default is ALL.
*
* @param level the string to return a level for
* @return the corresponding level or the default
*/
public static Level stringToLevel(String level) {
return Log.stringToLevel(level);
}
/**
* returns a new Log instance
*
* @param filename the name of the log
* @param size the size of the files in bytes
* @param numFiles the number of files for rotating
* @return the log instance
*/
public static Log newLog(String filename, int size, int numFiles) {
return new Log(filename, size, numFiles);
}
/**
* prints the given message with level INFO
*
* @param message the message to print
*/
public void log(String message) {
log(INFO, message);
}
/**
* prints the given message with the specified level and an empty sourceclass
*
* @param level the level of logging
* @param message the message to print
*/
public void log(Level level, String message) {
log(level, "", message);
}
/**
* prints the given message with the specified level
*
* @param level the level of logging
* @param sourceclass the class that logs the message
* @param message the message to print
*/
public void log(Level level, String sourceclass, String message) {
log(level, sourceclass, "", message);
}
/**
* prints the given message with the specified level
*
* @param level the level of logging
* @param sourceclass the class that logs the message
* @param sourcemethod the method that logs the message
* @param message the message to print
*/
public void log(Level level, String sourceclass, String sourcemethod, String message) {
if (getEnabled())
m_Log.log(level, sourceclass, sourcemethod, message);
}
/**
* sets whether the logging is enabled or not
*
* @param value if true logging will be enabled
*/
public void setEnabled(boolean value) {
m_Enabled = value;
}
/**
* returns whether the logging is enabled
*
* @return true if the logging is enabled
*/
public boolean getEnabled() {
return m_Enabled;
}
/**
* returns a new instance of a clock
*
* @return a new instance of a Clock
*/
public static Clock newClock() {
return new Clock();
}
/**
* returns the instance of the Clock that is internally used
*
* @return the clock that's being used
*/
public Clock getClock() {
return m_Clock;
}
/**
* starts the clock
*/
public void startClock() {
m_Clock.start();
}
/**
* stops the clock and prints the message associated with the time, but only
* if the logging is enabled.
*
* @param message the message to print
* @see #getEnabled()
*/
public void stopClock(String message) {
log(message + ": " + m_Clock);
}
/**
* returns a default debug random object, with no particular seed and
* debugging enabled.
*
* @return a new instance of a Random object
*/
public static java.util.Random newRandom() {
return new Random(true);
}
/**
* returns a debug random object with the specified seed and debugging
* enabled.
*
* @param seed the seed value
* @return a new instance of a Random object
*/
public static java.util.Random newRandom(int seed) {
return new Random(seed, true);
}
/**
* returns a default timestamp for the current date/time
*
* @return a new timestamp
*/
public static Timestamp newTimestamp() {
return new Timestamp();
}
/**
* returns the system temp directory
*
* @return the temp directory
*/
public static String getTempDir() {
return System.getProperty("java.io.tmpdir");
}
/**
* returns the home directory of the user
*
* @return the user's home directory
*/
public static String getHomeDir() {
return System.getProperty("user.home");
}
/**
* returns the current working directory of the user
*
* @return the user's current working directory
*/
public static String getCurrentDir() {
return System.getProperty("user.dir");
}
/**
* Writes the given object to the specified file. The string representation
* of the object is appended to the file.
*
* @param filename the file to write to
* @param obj the object to write to the file
* @return true if writing was successful
*/
public static boolean writeToFile(String filename, Object obj) {
return writeToFile(filename, obj, true);
}
/**
* Writes the given message to the specified file. The message is appended
* to the file.
*
* @param filename the file to write to
* @param message the message to write
* @return true if writing was successful
*/
public static boolean writeToFile(String filename, String message) {
return writeToFile(filename, message, true);
}
/**
* Writes the given object to the specified file. The string representation
* of the object is either appended or replaces the current content of the
* file.
*
* @param filename the file to write to
* @param obj the object to write to the file
* @param append whether to append the message or not
* @return true if writing was successful
*/
public static boolean writeToFile(String filename, Object obj, boolean append) {
return writeToFile(filename, obj.toString(), append);
}
/**
* Writes the given message to the specified file. The message is either
* appended or replaces the current content of the file.
*
* @param filename the file to write to
* @param message the message to write
* @param append whether to append the message or not
* @return true if writing was successful
*/
public static boolean writeToFile(String filename, String message, boolean append) {
boolean result;
BufferedWriter writer;
try {
writer = new BufferedWriter(new FileWriter(filename, append));
writer.write(message);
writer.newLine();
writer.flush();
writer.close();
result = true;
}
catch (Exception e) {
result = false;
}
return result;
}
/**
* writes the serialized object to the speicified file
*
* @param filename the file to serialize the object to
* @param o the object to serialize
* @return true if writing was successful
*/
public static boolean saveToFile(String filename, Object o) {
boolean result;
if (SerializationHelper.isSerializable(o.getClass())) {
try {
SerializationHelper.write(filename, o);
result = true;
}
catch (Exception e) {
result = false;
}
}
else {
result = false;
}
return result;
}
/**
* deserializes the content of the file and returns it, null if an error
* occurred.
*
* @param filename the name of the file to deserialize
* @return the deserialized content, null if problem occurred
*/
public static Object loadFromFile(String filename) {
Object result;
try {
result = SerializationHelper.read(filename);
}
catch (Exception e) {
result = null;
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/Defaults.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Defaults.java
* Copyright (C) 2014 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.io.Serializable;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Base class for providing a set of default settings for an application.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}com)
* @version $Revision: $
*/
public class Defaults implements Serializable {
/** For serialization */
private static final long serialVersionUID = 1061521489520308096L;
/** Identifier for this set of defaults */
protected String m_defaultsID = "";
/** Maintains the list of default settings */
protected Map<Settings.SettingKey, Object> m_defaults =
new LinkedHashMap<Settings.SettingKey, Object>();
/**
* Construct a new empty Defaults
*
* @param ID the ID for this set of defaults
*/
public Defaults(String ID) {
setID(ID);
}
/**
* Construct a new Defaults
*
* @param ID the ID for this set of defaults
* @param defaults the default settings to use
*/
public Defaults(String ID, Map<Settings.SettingKey, Object> defaults) {
this(ID);
m_defaults = defaults;
}
/**
* Get the ID of this set of defaults
*
* @return the ID of this set of defaults
*/
public String getID() {
return m_defaultsID;
}
/**
* Set the ID for this set of defaults
*
* @param ID the ID to use
*/
public void setID(String ID) {
m_defaultsID = ID;
}
/**
* Get the map of default settings
*
* @return the map of default settings
*/
public Map<Settings.SettingKey, Object> getDefaults() {
return m_defaults;
}
/**
* Add the supplied defaults to this one. Note that the added defaults now
* come under the ID of this set of defaults.
*
* @param toAdd the defaults to add
*/
public void add(Defaults toAdd) {
m_defaults.putAll(toAdd.getDefaults());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/DenseInstance.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DenseInstance.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core;
import java.util.ArrayList;
import java.util.Enumeration;
/**
* Class for handling an instance. All values (numeric, date, nominal, string or
* relational) are internally stored as floating-point numbers. If an attribute
* is nominal (or a string or relational), the stored value is the index of the
* corresponding nominal (or string or relational) value in the attribute's
* definition. We have chosen this approach in favor of a more elegant
* object-oriented approach because it is much faster.
* <p>
*
* Typical usage (code from the main() method of this class):
* <p>
*
* <code>
* ... <br>
*
* // Create empty instance with three attribute values <br>
* Instance inst = new DenseInstance(3); <br><br>
*
* // Set instance's values for the attributes "length", "weight", and "position"<br>
* inst.setValue(length, 5.3); <br>
* inst.setValue(weight, 300); <br>
* inst.setValue(position, "first"); <br><br>
*
* // Set instance's dataset to be the dataset "race" <br>
* inst.setDataset(race); <br><br>
*
* // Print the instance <br>
* System.out.println("The instance: " + inst); <br>
*
* ... <br>
* </code>
* <p>
*
* All methods that change an instance's attribute values are safe, ie. a change
* of an instance's attribute values does not affect any other instances. All
* methods that change an instance's attribute values clone the attribute value
* vector before it is changed. If your application heavily modifies instance
* values, it may be faster to create a new instance from scratch.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DenseInstance extends AbstractInstance {
/** for serialization */
static final long serialVersionUID = 1482635194499365122L;
/**
* Constructor that copies the attribute values and the weight from the given
* instance. It does NOT perform a deep copy of the attribute values if the
* instance provided is also of type DenseInstance (it simply copies the
* reference to the array of values), otherwise it does. Reference to the
* dataset is set to null. (ie. the instance doesn't have access to
* information about the attribute types)
*
* @param instance the instance from which the attribute values and the weight
* are to be copied
*/
// @ ensures m_Dataset == null;
public DenseInstance(/* @non_null@ */Instance instance) {
if (instance instanceof DenseInstance) {
m_AttValues = ((DenseInstance) instance).m_AttValues;
} else {
m_AttValues = instance.toDoubleArray();
}
m_Weight = instance.weight();
m_Dataset = null;
}
/**
* Constructor that inititalizes instance variable with given values.
* Reference to the dataset is set to null. (ie. the instance doesn't have
* access to information about the attribute types)
*
* @param weight the instance's weight
* @param attValues a vector of attribute values
*/
// @ ensures m_Dataset == null;
public DenseInstance(double weight, /* @non_null@ */double[] attValues) {
m_AttValues = attValues;
m_Weight = weight;
m_Dataset = null;
}
/**
* Constructor of an instance that sets weight to one, all values to be
* missing, and the reference to the dataset to null. (ie. the instance
* doesn't have access to information about the attribute types)
*
* @param numAttributes the size of the instance
*/
// @ requires numAttributes > 0; // Or maybe == 0 is okay too?
// @ ensures m_Dataset == null;
public DenseInstance(int numAttributes) {
m_AttValues = new double[numAttributes];
for (int i = 0; i < m_AttValues.length; i++) {
m_AttValues[i] = Utils.missingValue();
}
m_Weight = 1;
m_Dataset = null;
}
/**
* Produces a shallow copy of this instance. The copy has access to the same
* dataset. (if you want to make a copy that doesn't have access to the
* dataset, use <code>new DenseInstance(instance)</code>
*
* @return the shallow copy
*/
// @ also ensures \result != null;
// @ also ensures \result instanceof DenseInstance;
// @ also ensures ((DenseInstance)\result).m_Dataset == m_Dataset;
@Override
public/* @pure@ */Object copy() {
DenseInstance result = new DenseInstance(this);
result.m_Dataset = m_Dataset;
return result;
}
/**
* Copies the instance but fills up its values based on the given array
* of doubles. The copy has access to the same dataset.
*
* @param values the array with new values
* @return the new instance
*/
public Instance copy(double[] values) {
DenseInstance result = new DenseInstance(this.m_Weight, values);
result.m_Dataset = m_Dataset;
return result;
}
/**
* Returns the index of the attribute stored at the given position. Just
* returns the given value.
*
* @param position the position
* @return the index of the attribute stored at the given position
*/
@Override
public/* @pure@ */int index(int position) {
return position;
}
/**
* Merges this instance with the given instance and returns the result.
* Dataset is set to null. The returned instance is of the same type as this
* instance.
*
* @param inst the instance to be merged with this one
* @return the merged instances
*/
@Override
public Instance mergeInstance(Instance inst) {
int m = 0;
double[] newVals = new double[numAttributes() + inst.numAttributes()];
for (int j = 0; j < numAttributes(); j++, m++) {
newVals[m] = value(j);
}
for (int j = 0; j < inst.numAttributes(); j++, m++) {
newVals[m] = inst.value(j);
}
return new DenseInstance(1.0, newVals);
}
/**
* Returns the number of attributes.
*
* @return the number of attributes as an integer
*/
// @ ensures \result == m_AttValues.length;
@Override
public/* @pure@ */int numAttributes() {
return m_AttValues.length;
}
/**
* Returns the number of values present. Always the same as numAttributes().
*
* @return the number of values
*/
// @ ensures \result == m_AttValues.length;
@Override
public/* @pure@ */int numValues() {
return m_AttValues.length;
}
/**
* Replaces all missing values in the instance with the values contained in
* the given array. A deep copy of the vector of attribute values is performed
* before the values are replaced.
*
* @param array containing the means and modes
* @throws IllegalArgumentException if numbers of attributes are unequal
*/
@Override
public void replaceMissingValues(double[] array) {
if ((array == null) || (array.length != m_AttValues.length)) {
throw new IllegalArgumentException("Unequal number of attributes!");
}
freshAttributeVector();
for (int i = 0; i < m_AttValues.length; i++) {
if (isMissing(i)) {
m_AttValues[i] = array[i];
}
}
}
/**
* Sets a specific value in the instance to the given value (internal
* floating-point format). Performs a deep copy of the vector of attribute
* values before the value is set.
*
* @param attIndex the attribute's index
* @param value the new attribute value (If the corresponding attribute is
* nominal (or a string) then this is the new value's index as a
* double).
*/
@Override
public void setValue(int attIndex, double value) {
freshAttributeVector();
m_AttValues[attIndex] = value;
}
/**
* Sets a specific value in the instance to the given value (internal
* floating-point format). Performs a deep copy of the vector of attribute
* values before the value is set. Does exactly the same thing as setValue().
*
* @param indexOfIndex the index of the attribute's index
* @param value the new attribute value (If the corresponding attribute is
* nominal (or a string) then this is the new value's index as a
* double).
*/
@Override
public void setValueSparse(int indexOfIndex, double value) {
freshAttributeVector();
m_AttValues[indexOfIndex] = value;
}
/**
* Returns the values of each attribute as an array of doubles.
*
* @return an array containing all the instance attribute values
*/
@Override
public double[] toDoubleArray() {
double[] newValues = new double[m_AttValues.length];
System.arraycopy(m_AttValues, 0, newValues, 0, m_AttValues.length);
return newValues;
}
/**
* Returns the description of one instance (without weight appended). If the
* instance doesn't have access to a dataset, it returns the internal
* floating-point values. Quotes string values that contain whitespace
* characters.
*
* This method is used by getRandomNumberGenerator() in Instances.java in
* order to maintain backwards compatibility with weka 3.4.
*
* @return the instance's description as a string
*/
@Override
public String toStringNoWeight() {
return toStringNoWeight(AbstractInstance.s_numericAfterDecimalPoint);
}
/**
* Returns the description of one instance (without weight appended). If the
* instance doesn't have access to a dataset, it returns the internal
* floating-point values. Quotes string values that contain whitespace
* characters.
*
* This method is used by getRandomNumberGenerator() in Instances.java in
* order to maintain backwards compatibility with weka 3.4.
*
* @param afterDecimalPoint maximum number of digits after the decimal point
* for numeric values
*
* @return the instance's description as a string
*/
@Override
public String toStringNoWeight(int afterDecimalPoint) {
StringBuffer text = new StringBuffer();
for (int i = 0; i < m_AttValues.length; i++) {
if (i > 0) {
text.append(",");
}
text.append(toString(i, afterDecimalPoint));
}
return text.toString();
}
/**
* Returns an instance's attribute value in internal format.
*
* @param attIndex the attribute's index
* @return the specified value as a double (If the corresponding attribute is
* nominal (or a string) then it returns the value's index as a
* double).
*/
@Override
public/* @pure@ */double value(int attIndex) {
return m_AttValues[attIndex];
}
/**
* Deletes an attribute at the given position (0 to numAttributes() - 1).
*
* @param position the attribute's position
*/
@Override
protected void forceDeleteAttributeAt(int position) {
double[] newValues = new double[m_AttValues.length - 1];
System.arraycopy(m_AttValues, 0, newValues, 0, position);
if (position < m_AttValues.length - 1) {
System.arraycopy(m_AttValues, position + 1, newValues, position,
m_AttValues.length - (position + 1));
}
m_AttValues = newValues;
}
/**
* Inserts an attribute at the given position (0 to numAttributes()) and sets
* its value to be missing.
*
* @param position the attribute's position
*/
@Override
protected void forceInsertAttributeAt(int position) {
double[] newValues = new double[m_AttValues.length + 1];
System.arraycopy(m_AttValues, 0, newValues, 0, position);
newValues[position] = Utils.missingValue();
System.arraycopy(m_AttValues, position, newValues, position + 1,
m_AttValues.length - position);
m_AttValues = newValues;
}
/**
* Clones the attribute vector of the instance and overwrites it with the
* clone.
*/
private void freshAttributeVector() {
m_AttValues = toDoubleArray();
}
/**
* Main method for testing this class.
*
* @param options the commandline options - ignored
*/
// @ requires options != null;
public static void main(String[] options) {
try {
// Create numeric attributes "length" and "weight"
Attribute length = new Attribute("length");
Attribute weight = new Attribute("weight");
// Create vector to hold nominal values "first", "second", "third"
ArrayList<String> my_nominal_values = new ArrayList<String>(3);
my_nominal_values.add("first");
my_nominal_values.add("second");
my_nominal_values.add("third");
// Create nominal attribute "position"
Attribute position = new Attribute("position", my_nominal_values);
// Create vector of the above attributes
ArrayList<Attribute> attributes = new ArrayList<Attribute>(3);
attributes.add(length);
attributes.add(weight);
attributes.add(position);
// Create the empty dataset "race" with above attributes
Instances race = new Instances("race", attributes, 0);
// Make position the class attribute
race.setClassIndex(position.index());
// Create empty instance with three attribute values
Instance inst = new DenseInstance(3);
// Set instance's values for the attributes "length", "weight", and
// "position"
inst.setValue(length, 5.3);
inst.setValue(weight, 300);
inst.setValue(position, "first");
// Set instance's dataset to be the dataset "race"
inst.setDataset(race);
// Print the instance
System.out.println("The instance: " + inst);
// Print the first attribute
System.out.println("First attribute: " + inst.attribute(0));
// Print the class attribute
System.out.println("Class attribute: " + inst.classAttribute());
// Print the class index
System.out.println("Class index: " + inst.classIndex());
// Say if class is missing
System.out.println("Class is missing: " + inst.classIsMissing());
// Print the instance's class value in internal format
System.out.println("Class value (internal format): " + inst.classValue());
// Print a shallow copy of this instance
Instance copy = (Instance) inst.copy();
System.out.println("Shallow copy: " + copy);
// Set dataset for shallow copy
copy.setDataset(inst.dataset());
System.out.println("Shallow copy with dataset set: " + copy);
// Unset dataset for copy, delete first attribute, and insert it again
copy.setDataset(null);
copy.deleteAttributeAt(0);
copy.insertAttributeAt(0);
copy.setDataset(inst.dataset());
System.out.println("Copy with first attribute deleted and inserted: "
+ copy);
// Enumerate attributes (leaving out the class attribute)
System.out.println("Enumerating attributes (leaving out class):");
Enumeration<Attribute> enu = inst.enumerateAttributes();
while (enu.hasMoreElements()) {
Attribute att = enu.nextElement();
System.out.println(att);
}
// Headers are equivalent?
System.out.println("Header of original and copy equivalent: "
+ inst.equalHeaders(copy));
// Test for missing values
System.out.println("Length of copy missing: " + copy.isMissing(length));
System.out.println("Weight of copy missing: "
+ copy.isMissing(weight.index()));
System.out.println("Length of copy missing: "
+ Utils.isMissingValue(copy.value(length)));
// Prints number of attributes and classes
System.out.println("Number of attributes: " + copy.numAttributes());
System.out.println("Number of classes: " + copy.numClasses());
// Replace missing values
double[] meansAndModes = { 2, 3, 0 };
copy.replaceMissingValues(meansAndModes);
System.out.println("Copy with missing value replaced: " + copy);
// Setting and getting values and weights
copy.setClassMissing();
System.out.println("Copy with missing class: " + copy);
copy.setClassValue(0);
System.out.println("Copy with class value set to first value: " + copy);
copy.setClassValue("third");
System.out.println("Copy with class value set to \"third\": " + copy);
copy.setMissing(1);
System.out.println("Copy with second attribute set to be missing: "
+ copy);
copy.setMissing(length);
System.out.println("Copy with length set to be missing: " + copy);
copy.setValue(0, 0);
System.out.println("Copy with first attribute set to 0: " + copy);
copy.setValue(weight, 1);
System.out.println("Copy with weight attribute set to 1: " + copy);
copy.setValue(position, "second");
System.out.println("Copy with position set to \"second\": " + copy);
copy.setValue(2, "first");
System.out.println("Copy with last attribute set to \"first\": " + copy);
System.out.println("Current weight of instance copy: " + copy.weight());
copy.setWeight(2);
System.out.println("Current weight of instance copy (set to 2): "
+ copy.weight());
System.out.println("Last value of copy: " + copy.toString(2));
System.out.println("Value of position for copy: "
+ copy.toString(position));
System.out.println("Last value of copy (internal format): "
+ copy.value(2));
System.out.println("Value of position for copy (internal format): "
+ copy.value(position));
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.